| import os |
| import glob |
| import json |
| import argparse |
| from typing import Dict, List, Tuple |
|
|
| import torch |
| import torch.nn as nn |
| import torch.nn.functional as F |
| from safetensors.torch import load_file |
| from huggingface_hub import hf_hub_download |
| from transformers import AutoImageProcessor, AutoModelForCausalLM, AutoTokenizer |
|
|
| |
| |
| |
| def load_jsonl(path: str) -> List[dict]: |
| data = [] |
| with open(path, "r") as f: |
| for line in f: |
| line = line.strip() |
| if not line: |
| continue |
| data.append(json.loads(line)) |
| return data |
|
|
|
|
| def load_safetensor_from_hf(repo_id, filename, repo_type="dataset"): |
| cached_path = hf_hub_download( |
| repo_id=repo_id, |
| filename=filename, |
| repo_type=repo_type, |
| local_files_only=False |
| ) |
| return load_file(cached_path) |
|
|
|
|
| def to_vchw(point_map: torch.Tensor) -> torch.Tensor: |
| """ |
| Convert point_map to (V, 3, H, W) float tensor. |
| Accepts common layouts: |
| (V, 3, H, W) -> ok |
| (V, H, W, 3) -> permute |
| (V, H, W, C) where C=3 -> permute |
| """ |
| if point_map.dim() != 4: |
| raise ValueError(f"Expected point_map to be 4D (V,*,*,*), got shape={tuple(point_map.shape)}") |
|
|
| V, a, b, c = point_map.shape |
|
|
| |
| if a == 3: |
| out = point_map |
| |
| elif c == 3: |
| out = point_map.permute(0, 3, 1, 2).contiguous() |
| else: |
| raise ValueError(f"Unrecognized point_map layout: shape={tuple(point_map.shape)}") |
|
|
| return out.float() |
|
|
|
|
| def load_pretrain(model, pretrain_ckpt_path): |
| print(f"📂 Loading pretrained weights from: {str(pretrain_ckpt_path)}") |
| |
| |
| model_weight_path_pattern = pretrain_ckpt_path + "/model*.safetensors" |
| model_weight_paths = glob.glob(model_weight_path_pattern) |
|
|
| if len(model_weight_paths) == 0: |
| raise FileNotFoundError(f"❌ Cannot find any .safetensors file in {str(pretrain_ckpt_path)}") |
|
|
| |
| weights = {} |
| for model_weight_path in model_weight_paths: |
| print(f"📥 Loading weights from: {model_weight_path}") |
| weights.update(load_file(model_weight_path, device="cpu")) |
|
|
| |
| result = model.load_state_dict(weights, strict=False) |
| |
| model_keys = set(model.state_dict().keys()) |
| loaded_keys = model_keys.intersection(weights.keys()) |
| missing_keys = result.missing_keys |
| unexpected_keys = result.unexpected_keys |
| print(f"✅ Loaded keys: {len(loaded_keys)} / {len(model_keys)}") |
| print(f"❌ Missing keys: {len(missing_keys)}") |
| print(f"⚠️ Unexpected keys: {len(unexpected_keys)}") |
|
|
| class _GlobalViewAttnBlock(nn.Module): |
| """One pre-norm Transformer-style block over view tokens (B,V,D).""" |
| def __init__( |
| self, |
| dim: int, |
| num_heads: int, |
| mlp_ratio: float, |
| dropout: float, |
| zero_init_residual: bool, |
| zero_init_attn_out: bool, |
| ): |
| super().__init__() |
| self.zero_init_residual = zero_init_residual |
| self.zero_init_attn_out = zero_init_attn_out |
|
|
| self.norm1 = nn.LayerNorm(dim) |
| self.attn = nn.MultiheadAttention( |
| embed_dim=dim, |
| num_heads=num_heads, |
| dropout=dropout, |
| batch_first=True, |
| bias=True, |
| ) |
|
|
| self.norm2 = nn.LayerNorm(dim) |
| hidden_dim = int(dim * mlp_ratio) |
| self.mlp = nn.Sequential( |
| nn.Linear(dim, hidden_dim), |
| nn.GELU(), |
| nn.Dropout(dropout), |
| nn.Linear(hidden_dim, dim), |
| nn.Dropout(dropout), |
| ) |
|
|
| self._init_weights() |
|
|
| def forward(self, x, key_padding_mask=None): |
| h = self.norm1(x) |
| attn_out, _ = self.attn( |
| h, h, h, |
| key_padding_mask=key_padding_mask, |
| need_weights=False, |
| ) |
| x = x + attn_out |
| x = x + self.mlp(self.norm2(x)) |
| return x |
|
|
| @torch.no_grad() |
| def _init_weights(self): |
| |
| for ln in (self.norm1, self.norm2): |
| nn.init.ones_(ln.weight) |
| nn.init.zeros_(ln.bias) |
|
|
| |
| if getattr(self.attn, "in_proj_weight", None) is not None: |
| nn.init.xavier_uniform_(self.attn.in_proj_weight) |
| if getattr(self.attn, "in_proj_bias", None) is not None: |
| nn.init.zeros_(self.attn.in_proj_bias) |
|
|
| |
| nn.init.xavier_uniform_(self.attn.out_proj.weight) |
| if self.attn.out_proj.bias is not None: |
| nn.init.zeros_(self.attn.out_proj.bias) |
|
|
| |
| if self.zero_init_attn_out: |
| nn.init.zeros_(self.attn.out_proj.weight) |
| if self.attn.out_proj.bias is not None: |
| nn.init.zeros_(self.attn.out_proj.bias) |
|
|
| |
| fc1: nn.Linear = self.mlp[0] |
| fc2: nn.Linear = self.mlp[3] |
|
|
| nn.init.xavier_uniform_(fc1.weight) |
| if fc1.bias is not None: |
| nn.init.zeros_(fc1.bias) |
|
|
| |
| if self.zero_init_residual: |
| nn.init.zeros_(fc2.weight) |
| if fc2.bias is not None: |
| nn.init.zeros_(fc2.bias) |
| else: |
| nn.init.xavier_uniform_(fc2.weight) |
| if fc2.bias is not None: |
| nn.init.zeros_(fc2.bias) |
|
|
| class _GlobalViewGatedAttnBlock(nn.Module): |
| """Pre-norm Transformer block over view tokens (B,V,D) with gated residuals.""" |
| def __init__( |
| self, |
| dim: int, |
| num_heads: int, |
| mlp_ratio: float, |
| dropout: float, |
| zero_init_residual: bool, |
| zero_init_attn_out: bool, |
| gate_bias_init: float = -2.0, |
| ): |
| super().__init__() |
| self.zero_init_residual = zero_init_residual |
| self.zero_init_attn_out = zero_init_attn_out |
|
|
| self.norm1 = nn.LayerNorm(dim) |
| self.attn = nn.MultiheadAttention( |
| embed_dim=dim, |
| num_heads=num_heads, |
| dropout=dropout, |
| batch_first=True, |
| bias=True, |
| ) |
|
|
| |
| |
| self.attn_gate = nn.Linear(dim, dim, bias=True) |
|
|
| self.norm2 = nn.LayerNorm(dim) |
| hidden_dim = int(dim * mlp_ratio) |
| self.mlp = nn.Sequential( |
| nn.Linear(dim, hidden_dim), |
| nn.GELU(), |
| nn.Dropout(dropout), |
| nn.Linear(hidden_dim, dim), |
| nn.Dropout(dropout), |
| ) |
|
|
| |
| self.mlp_gate = nn.Linear(dim, dim, bias=True) |
|
|
| self._init_weights(gate_bias_init=gate_bias_init) |
|
|
| def forward(self, x: torch.Tensor, key_padding_mask=None) -> torch.Tensor: |
| |
| h1 = self.norm1(x) |
| attn_out, _ = self.attn( |
| h1, h1, h1, |
| key_padding_mask=key_padding_mask, |
| need_weights=False, |
| ) |
| g_attn = torch.sigmoid(self.attn_gate(h1)) |
| x = x + g_attn * attn_out |
|
|
| h2 = self.norm2(x) |
| mlp_out = self.mlp(h2) |
| g_mlp = torch.sigmoid(self.mlp_gate(h2)) |
| x = x + g_mlp * mlp_out |
| return x |
|
|
| @torch.no_grad() |
| def _init_weights(self, gate_bias_init: float): |
| |
| for ln in (self.norm1, self.norm2): |
| nn.init.ones_(ln.weight) |
| nn.init.zeros_(ln.bias) |
|
|
| |
| if getattr(self.attn, "in_proj_weight", None) is not None: |
| nn.init.xavier_uniform_(self.attn.in_proj_weight) |
| if getattr(self.attn, "in_proj_bias", None) is not None: |
| nn.init.zeros_(self.attn.in_proj_bias) |
|
|
| |
| nn.init.xavier_uniform_(self.attn.out_proj.weight) |
| if self.attn.out_proj.bias is not None: |
| nn.init.zeros_(self.attn.out_proj.bias) |
|
|
| |
| if self.zero_init_attn_out: |
| nn.init.zeros_(self.attn.out_proj.weight) |
| if self.attn.out_proj.bias is not None: |
| nn.init.zeros_(self.attn.out_proj.bias) |
|
|
| |
| fc1: nn.Linear = self.mlp[0] |
| fc2: nn.Linear = self.mlp[3] |
| nn.init.xavier_uniform_(fc1.weight) |
| if fc1.bias is not None: |
| nn.init.zeros_(fc1.bias) |
|
|
| if self.zero_init_residual: |
| nn.init.zeros_(fc2.weight) |
| if fc2.bias is not None: |
| nn.init.zeros_(fc2.bias) |
| else: |
| nn.init.xavier_uniform_(fc2.weight) |
| if fc2.bias is not None: |
| nn.init.zeros_(fc2.bias) |
|
|
| |
| nn.init.zeros_(self.attn_gate.weight) |
| nn.init.constant_(self.attn_gate.bias, gate_bias_init) |
|
|
| nn.init.zeros_(self.mlp_gate.weight) |
| nn.init.constant_(self.mlp_gate.bias, gate_bias_init) |
| |
| class GlobalViewAttention(nn.Module): |
| """ |
| Multi-layer global self-attention over multi-view tokens. |
| |
| Input: x ∈ (B, V, D) |
| Output: x' ∈ (B, V, D) |
| """ |
| def __init__( |
| self, |
| dim: int, |
| num_layers: int = 1, |
| num_heads: int = 8, |
| mlp_ratio: float = 4.0, |
| dropout: float = 0.0, |
| zero_init_residual: bool = True, |
| zero_init_attn_out: bool = False, |
| ): |
| super().__init__() |
| assert num_layers >= 1, "num_layers must be >= 1" |
|
|
| self.dim = dim |
| self.num_layers = num_layers |
| self.num_heads = num_heads |
| self.layers = nn.ModuleList([ |
| _GlobalViewAttnBlock( |
| dim=dim, |
| num_heads=num_heads, |
| mlp_ratio=mlp_ratio, |
| dropout=dropout, |
| zero_init_residual=zero_init_residual, |
| zero_init_attn_out=zero_init_attn_out, |
| ) |
| for _ in range(num_layers) |
| ]) |
|
|
| def forward(self, x, key_padding_mask=None): |
| """ |
| x: (B, V, D) |
| key_padding_mask: (B, V), True = ignore (padding) |
| """ |
| for layer in self.layers: |
| x = layer(x, key_padding_mask=key_padding_mask) |
| return x |
| |
| class RepModel(nn.Module): |
| def __init__(self, model_root: str = "fg-clip-base"): |
| super().__init__() |
|
|
| self.pm_encoder = AutoModelForCausalLM.from_pretrained(f'../{model_root}', trust_remote_code=True) |
| |
| self.tokenizer = AutoTokenizer.from_pretrained(f'../{model_root}', trust_remote_code=True, use_fast=True) |
| self.image_processor = AutoImageProcessor.from_pretrained(f'../{model_root}') |
|
|
| |
| try: |
| self.pm_encoder.print_trainable_parameters() |
| except Exception: |
| pass |
|
|
| @torch.no_grad() |
| def encode_views(self, pm_batched): |
| |
| |
| _, feats = self.pm_encoder.get_image_features(pm_batched) |
| |
| feats = torch.nn.functional.normalize(feats.float(), dim=-1) |
| return feats |
| |
| @torch.no_grad() |
| def encode_text(self, texts): |
| tok = self.tokenizer(texts, padding="max_length", truncation=True, max_length=248, return_tensors="pt").to('cuda') |
| feats = self.pm_encoder.get_text_features(tok["input_ids"], walk_short_pos=False) |
| feats = torch.nn.functional.normalize(feats.float(), dim=-1) |
| return feats |
|
|
|
|
| |
| |
| |
| @torch.no_grad() |
| def eval_view_retrieval( |
| model: RepModel, |
| items: List[dict], |
| scan_root: str, |
| device: str = "cuda", |
| batch_views: int = 32, |
| recall_ks: Tuple[int, ...] = (1, 5, 10), |
| ) -> Dict[str, float]: |
| model.eval() |
| model.to(device) |
|
|
| |
| scan_cache: Dict[str, torch.Tensor] = {} |
|
|
| total = 0 |
| top1_correct = 0 |
| recall_correct = {k: 0 for k in recall_ks} |
|
|
| for it in items: |
| scan_id = it["scan_id"] |
| utter = it["utterance"] |
| gt_views = it.get("view_ground_truth", None) |
| if not gt_views: |
| continue |
| gt = int(gt_views[0]) |
|
|
| |
| if scan_id not in scan_cache: |
| filename = f'light_scannet/{scan_id}.safetensors' |
| data = load_safetensor_from_hf('MatchLab/ScenePoint', filename, repo_type="dataset") |
|
|
| |
| |
|
|
| pm = to_vchw(data["point_map"]) |
| |
| |
| V = pm.shape[0] |
|
|
| feats = model.encode_views(pm.to(device, non_blocking=True)) |
| scan_cache[scan_id] = feats |
|
|
| view_feats = scan_cache[scan_id] |
| V = view_feats.shape[0] |
| if gt < 0 or gt >= V: |
| |
| continue |
|
|
| |
| text_feat = model.encode_text(utter).squeeze(0).unsqueeze(-1) |
|
|
| |
| sims = (view_feats @ text_feat).squeeze(-1) |
| |
| |
| ranked = torch.argsort(sims, descending=True) |
|
|
| pred = int(ranked[0].item()) |
| total += 1 |
|
|
| if pred == gt: |
| top1_correct += 1 |
| else: |
| |
| print(f"GT: {gt}, Pred: {pred}, Utterance: {utter}") |
|
|
| |
| for k in recall_ks: |
| k_eff = min(k, V) |
| if (ranked[:k_eff] == gt).any().item(): |
| recall_correct[k] += 1 |
|
|
| |
| out = {} |
| if total == 0: |
| return {"n": 0} |
|
|
| out["n"] = total |
| out["top1_acc"] = top1_correct / total |
| for k in recall_ks: |
| out[f"recall@{k}"] = recall_correct[k] / total |
|
|
| return out |
|
|
| def main(): |
| ap = argparse.ArgumentParser() |
| ap.add_argument("--jsonl", type=str, required=True, help="SR3D-style jsonl file") |
| ap.add_argument("--scan_root", type=str, required=True, help="Root dir containing scan safetensors") |
| ap.add_argument("--ckpt", type=str, default="", help="Optional: path to .pth/.pt or dir with model*.safetensors") |
| ap.add_argument("--model_root", type=str, default="fg-clip-base") |
| ap.add_argument("--device", type=str, default="cuda") |
| ap.add_argument("--batch_views", type=int, default=32) |
| ap.add_argument("--max_items", type=int, default=-1) |
| args = ap.parse_args() |
|
|
| items = load_jsonl(args.jsonl) |
| if args.max_items > 0: |
| items = items[: args.max_items] |
|
|
| model = RepModel(model_root=args.model_root) |
| if args.ckpt: |
| load_pretrain(model, args.ckpt) |
|
|
| metrics = eval_view_retrieval( |
| model=model, |
| items=items, |
| scan_root=args.scan_root, |
| device=args.device, |
| batch_views=args.batch_views, |
| recall_ks=(1, 5, 10), |
| ) |
|
|
| print("\n=== View Retrieval Results ===") |
| for k, v in metrics.items(): |
| if isinstance(v, float): |
| print(f"{k:>10}: {v:.4f}") |
| else: |
| print(f"{k:>10}: {v}") |
|
|
|
|
| if __name__ == "__main__": |
| main() |
|
|