| |
| """ |
| Experiment 2-A Swap Analysis: Minimal Pair Probing for Spatial Representations |
| |
| Creates minimal pairs by swapping obj1↔obj2 in spatial questions: |
| Original: "Is A to the left or right of B?" → left |
| Swapped: "Is B to the left or right of A?" → right |
| |
| Analyses: |
| 1. Difference vectors: Δ = feature(swapped) - feature(original) |
| 2. Within-group Δ consistency (do all left→right swaps point same direction?) |
| 3. Cross-group Δ alignment (Δ_vertical vs Δ_distance) for perspective bias |
| 4. PCA visualization of per-sample embeddings |
| 5. Scaling effects on all of the above |
| |
| Cross-group analysis (perspective bias hypothesis): |
| For far/close samples, use bbox to determine vertical relationship. |
| Create vertical swap pairs for the same image+objects. |
| Measure cos(Δ_vertical, Δ_distance) — high = entangled, low = disentangled. |
| Expect: vanilla = high alignment, scaled = lower alignment. |
| |
| Usage: |
| # Single scale (for parallel execution) |
| python exp2a_swap_analysis.py --model_type molmo --scales vanilla --device cuda |
| |
| # Merge after all scales finish |
| python exp2a_swap_analysis.py --model_type molmo --merge |
| """ |
|
|
| import os |
| import sys |
| import json |
| import argparse |
| import base64 |
| import logging |
| import random |
| import re |
| from io import BytesIO |
| from collections import defaultdict |
| from typing import Dict, List, Tuple, Optional, Any |
| from abc import ABC, abstractmethod |
|
|
| import torch |
| import numpy as np |
| import pandas as pd |
| from PIL import Image |
| from tqdm import tqdm |
| import matplotlib |
| matplotlib.use('Agg') |
| import matplotlib.pyplot as plt |
| import seaborn as sns |
| from sklearn.metrics.pairwise import cosine_similarity |
| from sklearn.decomposition import PCA |
|
|
| logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s') |
| logger = logging.getLogger(__name__) |
|
|
| |
| |
| |
|
|
| CATEGORY_ORDER = ['left', 'right', 'above', 'under', 'far', 'close'] |
|
|
| OPPOSITE_MAP = { |
| 'left': 'right', 'right': 'left', |
| 'above': 'under', 'under': 'above', |
| 'far': 'close', 'close': 'far', |
| } |
|
|
| GROUP_MAP = { |
| 'left': 'horizontal', 'right': 'horizontal', |
| 'above': 'vertical', 'under': 'vertical', |
| 'far': 'distance', 'close': 'distance', |
| } |
|
|
| GROUP_ORDER = ['horizontal', 'vertical', 'distance'] |
|
|
| SCALE_COLORS = { |
| 'vanilla': '#1f77b4', '80k': '#ff7f0e', '400k': '#2ca02c', |
| '800k': '#d62728', '2m': '#9467bd', 'roborefer': '#8c564b', |
| } |
|
|
| MODEL_CONFIGS = { |
| 'molmo': { |
| 'vanilla': 'allenai/Molmo-7B-O-0924', |
| '80k': '/data/shared/Qwen/molmo/outputs/data_scale_exp_80k/unshared', |
| '400k': '/data/shared/Qwen/molmo/outputs/data_scale_exp_400k/unshared', |
| '800k': '/data/shared/Qwen/molmo/outputs/data_scale_exp_800k/unshared', |
| '2m': '/data/shared/Qwen/molmo/outputs/data_scale_exp_2m/unshared', |
| }, |
| 'nvila': { |
| 'vanilla': '/data/shared/Qwen/mydisk/NVILA-Lite-2B', |
| '80k': '/data/shared/Qwen/mydisk/output/DATA/NVILA-Lite-2B-DATA_SCALE_EXP_80K-20251108_180221', |
| '400k': '/data/shared/Qwen/mydisk/output/DATA/NVILA-Lite-2B-DATA_SCALE_EXP_400K-20251108_180221', |
| '800k': '/data/shared/Qwen/mydisk/output/DATA/NVILA-Lite-2B-DATA_SCALE_EXP_800K-20251108_180221', |
| '2m': '/data/shared/Qwen/mydisk/output/DATA/NVILA-Lite-2B-DATA_SCALE_EXP_2M-20260205_003632', |
| 'roborefer': '/data/shared/Qwen/mydisk/RoboRefer_model', |
| }, |
| 'qwen': { |
| 'vanilla': 'Qwen/Qwen2.5-VL-3B-Instruct', |
| '80k': '/data/shared/Qwen/mydisk/output/Qwen/Qwen2.5-VL-3B-Instruct-data_scale_exp_80k-20251114_120221', |
| '400k': '/data/shared/Qwen/mydisk/output/Qwen/Qwen2.5-VL-3B-Instruct-data_scale_exp_400k-20251114_120221', |
| '800k': '/data/shared/Qwen/mydisk/output/Qwen/Qwen2.5-VL-3B-Instruct-data_scale_exp_800k-20251114_120221', |
| '2m': '/data/shared/Qwen/mydisk/output/Qwen/Qwen2.5-VL-3B-Instruct-data_scale_exp_2m-20260109_120517', |
| }, |
| } |
|
|
|
|
| |
| |
| |
|
|
| OBJECT_PATTERNS = [ |
| re.compile(r'between\s+(.+?)\s+and\s+(.+?)\s+in', re.IGNORECASE), |
| re.compile(r'of\s+(.+?)\s+and\s+(.+?)\s+in', re.IGNORECASE), |
| re.compile(r'positions\s+of\s+(.+?)\s+and\s+(.+?)\s+interact', re.IGNORECASE), |
| re.compile(r'How\s+are\s+(.+?)\s+and\s+(.+?)\s+positioned', re.IGNORECASE), |
| re.compile(r'arrangement\s+of\s+(.+?)\s+and\s+(.+?)\s+in', re.IGNORECASE), |
| ] |
|
|
|
|
| def extract_objects(question: str) -> Tuple[str, str]: |
| for pattern in OBJECT_PATTERNS: |
| m = pattern.search(question) |
| if m: |
| return m.group(1).strip(), m.group(2).strip() |
| raise ValueError(f"Could not extract objects from: {question}") |
|
|
|
|
| def decode_base64_image(base64_str: str) -> Image.Image: |
| image_data = base64.b64decode(base64_str) |
| return Image.open(BytesIO(image_data)).convert('RGB') |
|
|
|
|
| def check_answer(generated_text: str, expected_category: str) -> bool: |
| if not generated_text or not generated_text.strip(): |
| return False |
| text = generated_text.strip().lower() |
| expected = expected_category.lower() |
| opposite = OPPOSITE_MAP[expected] |
| pos_exp = text.find(expected) |
| pos_opp = text.find(opposite) |
| if pos_exp == -1: |
| return False |
| if pos_opp == -1: |
| return True |
| return pos_exp < pos_opp |
|
|
|
|
| def load_swap_pairs(tsv_path: str, seed: int = 42) -> List[dict]: |
| """Load EmbSpatialBench TSV and create swap pairs for all samples.""" |
| rng = random.Random(seed) |
| df = pd.read_csv(tsv_path, sep='\t') |
|
|
| pairs = [] |
| stats = defaultdict(lambda: {'total': 0, 'success': 0}) |
|
|
| for _, row in df.iterrows(): |
| category = row['category'] |
| stats[category]['total'] += 1 |
|
|
| try: |
| if category in ['left', 'right', 'above', 'under']: |
| obj1, obj2 = extract_objects(row['question']) |
| if category in ['left', 'right']: |
| template = "Is the {} to the left or right of the {}?" |
| else: |
| template = "Is the {} above or under the {}?" |
|
|
| pair = { |
| 'index': row['index'], |
| 'image_base64': row['image'], |
| 'original_question': template.format(obj1, obj2), |
| 'swapped_question': template.format(obj2, obj1), |
| 'original_answer': category, |
| 'swapped_answer': OPPOSITE_MAP[category], |
| 'group': GROUP_MAP[category], |
| 'category': category, |
| 'obj1': obj1, |
| 'obj2': obj2, |
| } |
|
|
| elif category in ['far', 'close']: |
| answer_key = row['answer'] |
| options = {k: row[k] for k in ['A', 'B', 'C', 'D']} |
| target_object = options[answer_key] |
| candidates = [v for k, v in options.items() if k != answer_key] |
| reference_object = rng.choice(candidates) |
|
|
| pair = { |
| 'index': row['index'], |
| 'image_base64': row['image'], |
| 'original_question': f"Compared to {reference_object}, is {target_object} far or close from you?", |
| 'swapped_question': f"Compared to {target_object}, is {reference_object} far or close from you?", |
| 'original_answer': category, |
| 'swapped_answer': OPPOSITE_MAP[category], |
| 'group': 'distance', |
| 'category': category, |
| 'target_object': target_object, |
| 'reference_object': reference_object, |
| } |
| else: |
| continue |
|
|
| pairs.append(pair) |
| stats[category]['success'] += 1 |
|
|
| except Exception as e: |
| logger.warning(f"Failed to create swap pair for index {row['index']}: {e}") |
| continue |
|
|
| logger.info("Swap pair creation stats:") |
| for cat in CATEGORY_ORDER: |
| s = stats[cat] |
| logger.info(f" {cat}: {s['success']}/{s['total']}") |
| logger.info(f" Total pairs: {len(pairs)}") |
|
|
| return pairs |
|
|
|
|
| def build_hf_bbox_cache(hf_dataset_name: str = 'FlagEval/EmbSpatial-Bench') -> Dict[int, dict]: |
| """Load HF dataset and build bbox lookup cache keyed by question_id.""" |
| from datasets import load_dataset |
| logger.info(f"Loading HF dataset: {hf_dataset_name}") |
| ds = load_dataset(hf_dataset_name, split='test') |
|
|
| cache = {} |
| for item in ds: |
| qid = item['question_id'] |
| cache[qid] = { |
| 'objects': item['objects'], |
| 'relation': item['relation'], |
| 'data_source': item['data_source'], |
| 'answer': item['answer'], |
| 'answer_options': item['answer_options'], |
| } |
|
|
| logger.info(f"Built bbox cache: {len(cache)} entries") |
| return cache |
|
|
|
|
| def get_bbox_center_y(bbox: list) -> float: |
| """BBox [x, y, width, height] -> center y coordinate.""" |
| return bbox[1] + bbox[3] / 2 |
|
|
|
|
| def create_cross_group_quads( |
| swap_pairs: List[dict], |
| hf_cache: Dict[int, dict], |
| threshold_ratio: float = 0.05, |
| ) -> List[dict]: |
| """ |
| For far/close swap pairs, create additional vertical queries using bbox. |
| |
| Returns quads: each has distance swap pair + vertical swap pair for same image/objects. |
| Only includes samples where vertical relationship is unambiguous. |
| """ |
| IMAGE_HEIGHTS = {'ai2thor': 300, 'mp3d': 480, 'scannet': 968} |
|
|
| quads = [] |
| stats = {'total': 0, 'matched': 0, 'ambiguous': 0, 'no_bbox': 0} |
|
|
| distance_pairs = [p for p in swap_pairs if p['group'] == 'distance'] |
|
|
| for pair in distance_pairs: |
| stats['total'] += 1 |
| idx = pair['index'] |
|
|
| if idx not in hf_cache: |
| stats['no_bbox'] += 1 |
| continue |
|
|
| hf_item = hf_cache[idx] |
| names = hf_item['objects']['name'] |
| bboxes = hf_item['objects']['bbox'] |
|
|
| target = pair['target_object'] |
| reference = pair['reference_object'] |
|
|
| |
| target_bbox_y, ref_bbox_y = None, None |
| for name, bbox in zip(names, bboxes): |
| if name == target: |
| target_bbox_y = get_bbox_center_y(bbox) |
| if name == reference: |
| ref_bbox_y = get_bbox_center_y(bbox) |
|
|
| if target_bbox_y is None or ref_bbox_y is None: |
| stats['no_bbox'] += 1 |
| continue |
|
|
| |
| image_height = IMAGE_HEIGHTS.get(hf_item['data_source'], 480) |
| threshold = image_height * threshold_ratio |
| y_diff = target_bbox_y - ref_bbox_y |
|
|
| if abs(y_diff) < threshold: |
| stats['ambiguous'] += 1 |
| continue |
|
|
| |
| |
| if target_bbox_y < ref_bbox_y: |
| vert_original_answer = 'above' |
| else: |
| vert_original_answer = 'under' |
|
|
| vert_original_q = f"Is the {target} above or under the {reference}?" |
| vert_swapped_q = f"Is the {reference} above or under the {target}?" |
|
|
| quad = { |
| 'index': idx, |
| 'image_base64': pair['image_base64'], |
| |
| 'dist_original_q': pair['original_question'], |
| 'dist_swapped_q': pair['swapped_question'], |
| 'dist_original_answer': pair['original_answer'], |
| 'dist_swapped_answer': pair['swapped_answer'], |
| |
| 'vert_original_q': vert_original_q, |
| 'vert_swapped_q': vert_swapped_q, |
| 'vert_original_answer': vert_original_answer, |
| 'vert_swapped_answer': OPPOSITE_MAP[vert_original_answer], |
| |
| 'target_object': target, |
| 'reference_object': reference, |
| 'target_bbox_y': target_bbox_y, |
| 'ref_bbox_y': ref_bbox_y, |
| 'y_diff': y_diff, |
| 'data_source': hf_item['data_source'], |
| } |
| quads.append(quad) |
| stats['matched'] += 1 |
|
|
| logger.info(f"Cross-group quads: {stats['matched']}/{stats['total']} " |
| f"(ambiguous={stats['ambiguous']}, no_bbox={stats['no_bbox']})") |
| return quads |
|
|
|
|
| |
| |
| |
|
|
| class BaseHiddenStateExtractor(ABC): |
| def __init__(self, model_path: str, device: str = 'cuda', target_layers: List[int] = None): |
| self.model_path = model_path |
| self.device = device |
| self.hidden_states = {} |
| self.hooks = [] |
| self._load_model() |
| num_layers = self._get_num_layers() |
| if target_layers is None: |
| self.target_layers = list(range(num_layers)) |
| logger.info(f"Model has {num_layers} layers. Extracting ALL.") |
| else: |
| self.target_layers = target_layers |
| self._register_hooks() |
|
|
| def _register_hooks(self): |
| for layer_idx in self.target_layers: |
| module = self._get_layer_module(layer_idx) |
| if module is not None: |
| hook = module.register_forward_hook(self._make_hook(layer_idx)) |
| self.hooks.append(hook) |
|
|
| def _make_hook(self, layer_idx: int): |
| def hook_fn(module, input, output): |
| if isinstance(output, tuple): |
| hidden = output[0] |
| else: |
| hidden = output |
| if hidden.shape[1] > 1: |
| last_token = hidden[:, -1, :].detach().cpu().float() |
| self.hidden_states[layer_idx] = last_token.squeeze(0) |
| return hook_fn |
|
|
| @abstractmethod |
| def _load_model(self): pass |
| @abstractmethod |
| def _get_num_layers(self) -> int: pass |
| @abstractmethod |
| def _get_layer_module(self, layer_idx: int): pass |
| @abstractmethod |
| def extract_and_predict(self, image: Image.Image, question: str) -> Tuple[Dict[int, torch.Tensor], str]: pass |
|
|
| def cleanup(self): |
| for hook in self.hooks: |
| hook.remove() |
| self.hooks = [] |
| if hasattr(self, 'model'): |
| del self.model |
| if hasattr(self, 'processor'): |
| del self.processor |
| torch.cuda.empty_cache() |
|
|
|
|
| |
| |
| |
|
|
| class MolmoExtractor(BaseHiddenStateExtractor): |
| def _load_model(self): |
| config_path = os.path.join(self.model_path, "config.yaml") |
| checkpoint_path = os.path.join(self.model_path, "model.pt") |
| if os.path.exists(config_path) and os.path.exists(checkpoint_path): |
| self._load_native_model() |
| self.is_native = True |
| else: |
| self._load_hf_model() |
| self.is_native = False |
|
|
| def _load_native_model(self): |
| from olmo.config import ModelConfig |
| from olmo.model import Molmo as NativeMolmoModel |
| from olmo.data.model_preprocessor import MultiModalPreprocessor |
| from olmo.data.data_formatter import DataFormatter |
|
|
| _original_load = torch.load |
| def _unsafe_load_wrapper(*args, **kwargs): |
| if 'weights_only' not in kwargs: |
| kwargs['weights_only'] = False |
| return _original_load(*args, **kwargs) |
| torch.load = _unsafe_load_wrapper |
|
|
| cfg = ModelConfig.load( |
| os.path.join(self.model_path, "config.yaml"), |
| key="model", validate_paths=False |
| ) |
| cfg.init_device = "cpu" |
| self.model = NativeMolmoModel(cfg) |
| state_dict = torch.load(os.path.join(self.model_path, "model.pt"), map_location="cpu") |
| self.model.load_state_dict(state_dict) |
| self.model = self.model.to(self.device, dtype=torch.bfloat16).eval() |
| self.tokenizer = cfg.get_tokenizer() |
|
|
| v_cfg = cfg.vision_backbone |
| h, w = cfg.llm_patches_per_crop() |
| image_padding_mask = 2 if cfg.fix_image_padding else (1 if cfg.image_padding_embed else None) |
|
|
| class SafeDataFormatter(DataFormatter): |
| def get_system_prompt(self, style, for_inference, messages, rng=None): |
| if style is None: |
| style = "User" |
| return super().get_system_prompt(style, for_inference, messages, rng) |
|
|
| self.formatter = SafeDataFormatter( |
| prompt_templates=cfg.prompt_type, message_format=cfg.message_formatting, |
| system_prompt=cfg.system_prompt_kind, always_start_with_space=cfg.always_start_with_space, |
| default_inference_len=cfg.default_inference_len |
| ) |
| self.preprocessor = MultiModalPreprocessor( |
| tokenizer=self.tokenizer, normalize=str(v_cfg.image_model_type), |
| crop_mode=cfg.crop_mode, max_crops=cfg.max_crops, |
| overlap_margins=cfg.overlap_margins, resize=v_cfg.resize_mode, |
| use_col_tokens=cfg.use_col_tokens, base_image_input_size=v_cfg.image_default_input_size, |
| image_pooling_w=cfg.image_pooling_w, image_pooling_h=cfg.image_pooling_h, |
| image_token_length_w=w, image_token_length_h=h, |
| image_patch_size=v_cfg.image_patch_size, image_padding_mask=image_padding_mask, |
| pad_value=cfg.pad_value, loss_token_weighting=cfg.multi_annotation_weighting, |
| ) |
| logger.info(f"Loaded native Molmo from {self.model_path}") |
|
|
| def _load_hf_model(self): |
| from transformers import AutoModelForCausalLM, AutoProcessor |
| self.model = AutoModelForCausalLM.from_pretrained( |
| self.model_path, torch_dtype=torch.bfloat16, |
| trust_remote_code=True, device_map=self.device |
| ).eval() |
| self.processor = AutoProcessor.from_pretrained(self.model_path, trust_remote_code=True) |
| logger.info(f"Loaded HF Molmo from {self.model_path}") |
|
|
| def _get_num_layers(self) -> int: |
| if self.is_native: |
| return len(self.model.transformer.blocks) |
| if hasattr(self.model, 'model') and hasattr(self.model.model, 'transformer'): |
| return len(self.model.model.transformer.blocks) |
| return 32 |
|
|
| def _get_layer_module(self, layer_idx: int): |
| if self.is_native: |
| return self.model.transformer.blocks[layer_idx] |
| return self.model.model.transformer.blocks[layer_idx] |
|
|
| def extract_and_predict(self, image, question): |
| self.hidden_states = {} |
| if self.is_native: |
| example = {"messages": [question], "image": image} |
| messages, _ = self.formatter(example, is_training=False, for_inference=True, rng=np.random) |
| batch = self.preprocessor(np.array(image), messages, is_training=False, require_image_features=True) |
| if 'input_ids' not in batch and 'input_tokens' in batch: |
| batch['input_ids'] = batch['input_tokens'] |
|
|
| def to_t(x): |
| return torch.from_numpy(x) if isinstance(x, np.ndarray) else x |
|
|
| input_ids = to_t(batch['input_ids']).unsqueeze(0).to(self.device).long() |
| images_t = to_t(batch['images']).unsqueeze(0).to(self.device, dtype=torch.bfloat16) |
| image_masks = to_t(batch['image_masks']).unsqueeze(0).to(self.device, dtype=torch.bfloat16) |
| image_input_idx = to_t(batch['image_input_idx']).unsqueeze(0).to(self.device) |
|
|
| with torch.inference_mode(), torch.autocast("cuda", dtype=torch.bfloat16): |
| gen = self.model.generate( |
| input_ids=input_ids, images=images_t, |
| image_masks=image_masks, image_input_idx=image_input_idx, |
| max_steps=20, beam_size=1, |
| ) |
| generated_ids = gen.token_ids[0, 0] |
| answer = self.tokenizer.decode(generated_ids.tolist()).strip() |
| for eos in ['<|endoftext|>', '</s>', '<|end|>']: |
| answer = answer.replace(eos, '').strip() |
| else: |
| from transformers import GenerationConfig |
| inputs = self.processor.process(images=[image], text=question) |
| processed = {} |
| for k, v in inputs.items(): |
| v = v.to(self.device).unsqueeze(0) |
| if v.dtype == torch.float32: |
| v = v.to(dtype=torch.bfloat16) |
| processed[k] = v |
| with torch.no_grad(), torch.autocast("cuda", dtype=torch.bfloat16): |
| output = self.model.generate_from_batch( |
| processed, |
| GenerationConfig(max_new_tokens=20, stop_strings="<|endoftext|>"), |
| tokenizer=self.processor.tokenizer, |
| ) |
| input_len = processed['input_ids'].shape[1] |
| answer = self.processor.tokenizer.decode(output[0, input_len:], skip_special_tokens=True).strip() |
|
|
| return self.hidden_states.copy(), answer |
|
|
|
|
| |
| |
| |
|
|
| class NVILAExtractor(BaseHiddenStateExtractor): |
| def _load_model(self): |
| original_sys_path = sys.path.copy() |
| sys.path = [p for p in sys.path if 'RoboRefer' not in p] |
| modules_to_remove = [k for k in list(sys.modules.keys()) if 'llava' in k.lower()] |
| removed = {m: sys.modules.pop(m) for m in modules_to_remove} |
| try: |
| import llava |
| from llava.media import Image as LLaVAImage |
| from llava import conversation as clib |
| except Exception as err: |
| sys.path = original_sys_path |
| for m, mod in removed.items(): |
| sys.modules[m] = mod |
| raise RuntimeError(f"Failed to import llava: {err}") |
| sys.path = original_sys_path |
| self.LLaVAImage = LLaVAImage |
| self.clib = clib |
| self.model = llava.load(self.model_path, model_base=None) |
| self._find_llm_backbone() |
| logger.info(f"Loaded NVILA from {self.model_path}") |
|
|
| def _find_llm_backbone(self): |
| candidates = [] |
| if hasattr(self.model, 'llm'): |
| if hasattr(self.model.llm, 'model') and hasattr(self.model.llm.model, 'layers'): |
| candidates.append(self.model.llm.model.layers) |
| if hasattr(self.model.llm, 'layers'): |
| candidates.append(self.model.llm.layers) |
| if hasattr(self.model, 'model'): |
| if hasattr(self.model.model, 'model') and hasattr(self.model.model.model, 'layers'): |
| candidates.append(self.model.model.model.layers) |
| if hasattr(self.model.model, 'layers'): |
| candidates.append(self.model.model.layers) |
| for name, module in self.model.named_modules(): |
| if name.endswith('.layers') and hasattr(module, '__len__') and len(module) > 0: |
| candidates.append(module) |
| if candidates: |
| self.llm_backbone = candidates[0] |
| else: |
| raise ValueError("Could not locate transformer layers in NVILA model") |
|
|
| def _get_num_layers(self) -> int: |
| return len(self.llm_backbone) if hasattr(self, 'llm_backbone') else 24 |
|
|
| def _get_layer_module(self, layer_idx: int): |
| return self.llm_backbone[layer_idx] |
|
|
| def extract_and_predict(self, image, question): |
| self.hidden_states = {} |
| import tempfile |
| with tempfile.NamedTemporaryFile(suffix='.png', delete=False) as f: |
| temp_path = f.name |
| image.save(temp_path) |
| try: |
| prompt = [self.LLaVAImage(temp_path), question] |
| from transformers import GenerationConfig |
| response = self.model.generate_content( |
| prompt, generation_config=GenerationConfig(max_new_tokens=20, do_sample=False) |
| ) |
| finally: |
| os.unlink(temp_path) |
| answer = str(response[0] if isinstance(response, list) else response).strip() |
| return self.hidden_states.copy(), answer |
|
|
|
|
| class RoboReferExtractor(NVILAExtractor): |
| ROBOREFER_PATH = '/data/shared/Qwen/RoboRefer' |
|
|
| def _load_model(self): |
| original_sys_path = sys.path.copy() |
| if self.ROBOREFER_PATH not in sys.path: |
| sys.path.insert(0, self.ROBOREFER_PATH) |
| modules_to_remove = [k for k in list(sys.modules.keys()) if 'llava' in k.lower()] |
| removed = {m: sys.modules.pop(m) for m in modules_to_remove} |
| try: |
| import llava |
| from llava.media import Image as LLaVAImage |
| from llava import conversation as clib |
| except Exception as err: |
| sys.path = original_sys_path |
| for m, mod in removed.items(): |
| sys.modules[m] = mod |
| raise RuntimeError(f"Failed to import RoboRefer llava: {err}") |
| sys.path = original_sys_path |
| self.LLaVAImage = LLaVAImage |
| self.clib = clib |
| self.model = llava.load(self.model_path, model_base=None) |
| self._find_llm_backbone() |
| logger.info(f"Loaded RoboRefer from {self.model_path}") |
|
|
|
|
| |
| |
| |
|
|
| class Qwen25VLExtractor(BaseHiddenStateExtractor): |
| BASE_MODEL = "Qwen/Qwen2.5-VL-3B-Instruct" |
|
|
| def _load_model(self): |
| from transformers import Qwen2_5_VLForConditionalGeneration, AutoProcessor |
| try: |
| self.model = Qwen2_5_VLForConditionalGeneration.from_pretrained( |
| self.model_path, torch_dtype=torch.bfloat16, device_map=self.device |
| ) |
| except ImportError: |
| self.model = Qwen2_5_VLForConditionalGeneration.from_pretrained( |
| self.model_path, torch_dtype=torch.bfloat16 |
| ).to(self.device) |
| self.model.eval() |
| if self.model_path.startswith('/'): |
| self.processor = AutoProcessor.from_pretrained(self.BASE_MODEL) |
| else: |
| self.processor = AutoProcessor.from_pretrained(self.model_path) |
| logger.info(f"Loaded Qwen2.5-VL from {self.model_path}") |
|
|
| def _get_num_layers(self) -> int: |
| return len(self.model.model.layers) |
|
|
| def _get_layer_module(self, layer_idx: int): |
| return self.model.model.layers[layer_idx] |
|
|
| def extract_and_predict(self, image, question): |
| self.hidden_states = {} |
| messages = [{"role": "user", "content": [ |
| {"type": "image", "image": image}, |
| {"type": "text", "text": question} |
| ]}] |
| text = self.processor.apply_chat_template(messages, tokenize=False, add_generation_prompt=True) |
| from qwen_vl_utils import process_vision_info |
| image_inputs, video_inputs = process_vision_info(messages) |
| inputs = self.processor( |
| text=[text], images=image_inputs, videos=video_inputs, |
| padding=True, return_tensors="pt" |
| ).to(self.device) |
| with torch.no_grad(): |
| output_ids = self.model.generate(**inputs, max_new_tokens=20, do_sample=False) |
| input_len = inputs['input_ids'].shape[1] |
| answer = self.processor.tokenizer.decode(output_ids[0, input_len:], skip_special_tokens=True).strip() |
| return self.hidden_states.copy(), answer |
|
|
|
|
| def get_extractor(model_type: str, model_path: str, scale: str = None, **kwargs): |
| if model_type == 'nvila' and scale == 'roborefer': |
| return RoboReferExtractor(model_path, **kwargs) |
| extractors = {'molmo': MolmoExtractor, 'nvila': NVILAExtractor, 'qwen': Qwen25VLExtractor} |
| return extractors[model_type](model_path, **kwargs) |
|
|
|
|
| |
| |
| |
|
|
| def run_single_query(extractor, image, question): |
| """Run a single forward pass and return (hidden_states_dict, predicted_text).""" |
| hidden_states, predicted = extractor.extract_and_predict(image, question) |
| result = {} |
| for layer_idx in extractor.target_layers: |
| if layer_idx in hidden_states: |
| state = hidden_states[layer_idx].numpy().flatten() |
| if state.size > 0: |
| result[layer_idx] = state |
| return result, predicted |
|
|
|
|
| def extract_swap_features( |
| extractor: BaseHiddenStateExtractor, |
| swap_pairs: List[dict], |
| max_samples_per_category: int = 0, |
| ) -> List[dict]: |
| """Extract features for all swap pairs. Returns per-sample records with delta vectors.""" |
| rng = random.Random(42) |
|
|
| |
| if max_samples_per_category > 0: |
| grouped = defaultdict(list) |
| for p in swap_pairs: |
| grouped[p['category']].append(p) |
| limited = [] |
| for cat in CATEGORY_ORDER: |
| samples = grouped[cat] |
| if len(samples) > max_samples_per_category: |
| samples = rng.sample(samples, max_samples_per_category) |
| limited.extend(samples) |
| swap_pairs = limited |
|
|
| records = [] |
| for pair in tqdm(swap_pairs, desc="Swap pairs"): |
| try: |
| image = decode_base64_image(pair['image_base64']) |
|
|
| hs_orig, pred_orig = run_single_query(extractor, image, pair['original_question']) |
| hs_swap, pred_swap = run_single_query(extractor, image, pair['swapped_question']) |
|
|
| is_correct_orig = check_answer(pred_orig, pair['original_answer']) |
| is_correct_swap = check_answer(pred_swap, pair['swapped_answer']) |
|
|
| |
| delta = {} |
| for layer_idx in extractor.target_layers: |
| if layer_idx in hs_orig and layer_idx in hs_swap: |
| delta[layer_idx] = hs_swap[layer_idx] - hs_orig[layer_idx] |
|
|
| record = { |
| 'index': pair['index'], |
| 'group': pair['group'], |
| 'category': pair['category'], |
| 'original_answer': pair['original_answer'], |
| 'swapped_answer': pair['swapped_answer'], |
| 'pred_orig': pred_orig, |
| 'pred_swap': pred_swap, |
| 'is_correct_orig': is_correct_orig, |
| 'is_correct_swap': is_correct_swap, |
| 'hs_orig': hs_orig, |
| 'hs_swap': hs_swap, |
| 'delta': delta, |
| } |
| records.append(record) |
|
|
| mark_o = "O" if is_correct_orig else "X" |
| mark_s = "O" if is_correct_swap else "X" |
| tqdm.write(f" #{pair['index']:<6} {pair['category']:<6} " |
| f"orig[{mark_o}]=\"{pred_orig[:40]}\" swap[{mark_s}]=\"{pred_swap[:40]}\"") |
|
|
| except Exception as e: |
| logger.warning(f"Error on index {pair['index']}: {e}") |
| continue |
|
|
| logger.info(f"Extracted {len(records)} swap pair records") |
| for group in GROUP_ORDER: |
| n = sum(1 for r in records if r['group'] == group) |
| logger.info(f" {group}: {n}") |
| return records |
|
|
|
|
| def extract_cross_group_features( |
| extractor: BaseHiddenStateExtractor, |
| quads: List[dict], |
| ) -> List[dict]: |
| """Extract features for cross-group quads (4 forward passes each).""" |
| records = [] |
|
|
| for quad in tqdm(quads, desc="Cross-group quads"): |
| try: |
| image = decode_base64_image(quad['image_base64']) |
|
|
| hs_d_orig, pred_d_orig = run_single_query(extractor, image, quad['dist_original_q']) |
| hs_d_swap, pred_d_swap = run_single_query(extractor, image, quad['dist_swapped_q']) |
| hs_v_orig, pred_v_orig = run_single_query(extractor, image, quad['vert_original_q']) |
| hs_v_swap, pred_v_swap = run_single_query(extractor, image, quad['vert_swapped_q']) |
|
|
| delta_dist, delta_vert = {}, {} |
| for layer_idx in extractor.target_layers: |
| if layer_idx in hs_d_orig and layer_idx in hs_d_swap: |
| delta_dist[layer_idx] = hs_d_swap[layer_idx] - hs_d_orig[layer_idx] |
| if layer_idx in hs_v_orig and layer_idx in hs_v_swap: |
| delta_vert[layer_idx] = hs_v_swap[layer_idx] - hs_v_orig[layer_idx] |
|
|
| record = { |
| 'index': quad['index'], |
| 'delta_dist': delta_dist, |
| 'delta_vert': delta_vert, |
| 'pred_d_orig': pred_d_orig, |
| 'pred_d_swap': pred_d_swap, |
| 'pred_v_orig': pred_v_orig, |
| 'pred_v_swap': pred_v_swap, |
| 'is_correct_d_orig': check_answer(pred_d_orig, quad['dist_original_answer']), |
| 'is_correct_d_swap': check_answer(pred_d_swap, quad['dist_swapped_answer']), |
| 'is_correct_v_orig': check_answer(pred_v_orig, quad['vert_original_answer']), |
| 'is_correct_v_swap': check_answer(pred_v_swap, quad['vert_swapped_answer']), |
| 'data_source': quad['data_source'], |
| } |
| records.append(record) |
|
|
| tqdm.write(f" #{quad['index']:<6} dist=[{pred_d_orig[:20]}/{pred_d_swap[:20]}] " |
| f"vert=[{pred_v_orig[:20]}/{pred_v_swap[:20]}]") |
|
|
| except Exception as e: |
| logger.warning(f"Error on cross-group index {quad['index']}: {e}") |
| continue |
|
|
| logger.info(f"Extracted {len(records)} cross-group quad records") |
| return records |
|
|
|
|
| |
| |
| |
|
|
| def compute_delta_consistency(records: List[dict], target_layers: List[int]) -> dict: |
| """ |
| For each group × layer, compute average pairwise cosine similarity among Δ vectors. |
| High consistency = all swaps point in the same direction = model encodes concept linearly. |
| """ |
| results = {} |
| for group in GROUP_ORDER: |
| group_recs = [r for r in records if r['group'] == group] |
| for layer in target_layers: |
| deltas = [r['delta'][layer] for r in group_recs if layer in r['delta']] |
| if len(deltas) < 2: |
| continue |
| deltas_arr = np.array(deltas) |
| sim = cosine_similarity(deltas_arr) |
| upper_tri = sim[np.triu_indices(len(deltas), k=1)] |
| results[(group, layer)] = { |
| 'mean': float(np.mean(upper_tri)), |
| 'std': float(np.std(upper_tri)), |
| 'n': len(deltas), |
| } |
| return results |
|
|
|
|
| def compute_cross_group_alignment(quad_records: List[dict], target_layers: List[int]) -> dict: |
| """ |
| For each layer, compute cos(Δ_vert, Δ_dist) per sample, then average. |
| High alignment = perspective bias (vertical ≈ distance in representation). |
| Also compute alignment using mean Δ vectors. |
| """ |
| results = {} |
| for layer in target_layers: |
| per_sample = [] |
| delta_verts, delta_dists = [], [] |
|
|
| for rec in quad_records: |
| if layer in rec['delta_vert'] and layer in rec['delta_dist']: |
| dv = rec['delta_vert'][layer] |
| dd = rec['delta_dist'][layer] |
| norm_v = np.linalg.norm(dv) |
| norm_d = np.linalg.norm(dd) |
| if norm_v > 1e-10 and norm_d > 1e-10: |
| cos = np.dot(dv, dd) / (norm_v * norm_d) |
| per_sample.append(float(cos)) |
| delta_verts.append(dv) |
| delta_dists.append(dd) |
|
|
| if not per_sample: |
| continue |
|
|
| |
| mean_dv = np.mean(delta_verts, axis=0) |
| mean_dd = np.mean(delta_dists, axis=0) |
| norm_mv = np.linalg.norm(mean_dv) |
| norm_md = np.linalg.norm(mean_dd) |
| mean_alignment = float(np.dot(mean_dv, mean_dd) / (norm_mv * norm_md + 1e-10)) |
|
|
| |
| rng = np.random.RandomState(42) |
| n_perm = 100 |
| perm_alignments = [] |
| for _ in range(n_perm): |
| shuffled_dd = [delta_dists[i] for i in rng.permutation(len(delta_dists))] |
| perm_cos = [] |
| for dv, dd in zip(delta_verts, shuffled_dd): |
| norm_v = np.linalg.norm(dv) |
| norm_d = np.linalg.norm(dd) |
| if norm_v > 1e-10 and norm_d > 1e-10: |
| perm_cos.append(np.dot(dv, dd) / (norm_v * norm_d)) |
| perm_alignments.append(np.mean(perm_cos)) |
|
|
| results[layer] = { |
| 'per_sample_mean': float(np.mean(per_sample)), |
| 'per_sample_std': float(np.std(per_sample)), |
| 'mean_delta_alignment': mean_alignment, |
| 'permutation_mean': float(np.mean(perm_alignments)), |
| 'permutation_std': float(np.std(perm_alignments)), |
| 'n_samples': len(per_sample), |
| } |
| return results |
|
|
|
|
| def compute_prediction_stats(records: List[dict], scale: str) -> dict: |
| """Compute accuracy stats from swap pair records.""" |
| stats = {'scale': scale} |
| total_correct_orig, total_correct_swap, total_both, total_n = 0, 0, 0, 0 |
|
|
| for group in GROUP_ORDER: |
| group_recs = [r for r in records if r['group'] == group] |
| n = len(group_recs) |
| c_orig = sum(1 for r in group_recs if r['is_correct_orig']) |
| c_swap = sum(1 for r in group_recs if r['is_correct_swap']) |
| c_both = sum(1 for r in group_recs if r['is_correct_orig'] and r['is_correct_swap']) |
|
|
| stats[f'{group}_n'] = n |
| stats[f'{group}_acc_orig'] = c_orig / n if n > 0 else 0 |
| stats[f'{group}_acc_swap'] = c_swap / n if n > 0 else 0 |
| stats[f'{group}_acc_both'] = c_both / n if n > 0 else 0 |
|
|
| total_correct_orig += c_orig |
| total_correct_swap += c_swap |
| total_both += c_both |
| total_n += n |
|
|
| stats['overall_acc_orig'] = total_correct_orig / total_n if total_n > 0 else 0 |
| stats['overall_acc_swap'] = total_correct_swap / total_n if total_n > 0 else 0 |
| stats['overall_acc_both'] = total_both / total_n if total_n > 0 else 0 |
| stats['overall_n'] = total_n |
| return stats |
|
|
|
|
| |
| |
| |
|
|
| def save_scale_results( |
| scale: str, |
| swap_records: List[dict], |
| quad_records: List[dict], |
| delta_consistency: dict, |
| cross_alignment: dict, |
| pred_stats: dict, |
| target_layers: List[int], |
| output_dir: str, |
| ): |
| """Save all per-scale results to disk.""" |
| |
| pred_rows = [] |
| for r in swap_records: |
| pred_rows.append({ |
| 'index': r['index'], 'group': r['group'], 'category': r['category'], |
| 'pred_orig': r['pred_orig'], 'pred_swap': r['pred_swap'], |
| 'is_correct_orig': r['is_correct_orig'], 'is_correct_swap': r['is_correct_swap'], |
| }) |
| pd.DataFrame(pred_rows).to_csv( |
| os.path.join(output_dir, f'predictions_{scale}.csv'), index=False |
| ) |
|
|
| |
| consistency_data = {} |
| for (group, layer), vals in delta_consistency.items(): |
| consistency_data[f'{group}_L{layer}'] = vals |
| with open(os.path.join(output_dir, f'delta_consistency_{scale}.json'), 'w') as f: |
| json.dump(consistency_data, f, indent=2) |
|
|
| |
| alignment_data = {} |
| for layer, vals in cross_alignment.items(): |
| alignment_data[f'L{layer}'] = vals |
| with open(os.path.join(output_dir, f'cross_alignment_{scale}.json'), 'w') as f: |
| json.dump(alignment_data, f, indent=2) |
|
|
| |
| with open(os.path.join(output_dir, f'pred_stats_{scale}.json'), 'w') as f: |
| json.dump(pred_stats, f, indent=2) |
|
|
| |
| rep_layers = get_representative_layers(target_layers, n=5) |
| delta_data = {} |
| for layer in rep_layers: |
| groups_list, categories_list, vectors = [], [], [] |
| for r in swap_records: |
| if layer in r['delta']: |
| groups_list.append(r['group']) |
| categories_list.append(r['category']) |
| vectors.append(r['delta'][layer]) |
| if vectors: |
| delta_data[f'delta_L{layer}'] = np.array(vectors) |
| delta_data[f'groups_L{layer}'] = np.array(groups_list) |
| delta_data[f'categories_L{layer}'] = np.array(categories_list) |
|
|
| |
| orig_vecs, swap_vecs, labels = [], [], [] |
| for r in swap_records: |
| if layer in r['hs_orig'] and layer in r['hs_swap']: |
| orig_vecs.append(r['hs_orig'][layer]) |
| swap_vecs.append(r['hs_swap'][layer]) |
| labels.append(r['category']) |
| if orig_vecs: |
| delta_data[f'orig_L{layer}'] = np.array(orig_vecs) |
| delta_data[f'swap_L{layer}'] = np.array(swap_vecs) |
| delta_data[f'labels_L{layer}'] = np.array(labels) |
|
|
| np.savez_compressed(os.path.join(output_dir, f'vectors_{scale}.npz'), **delta_data) |
|
|
| |
| if quad_records: |
| cg_data = {} |
| for layer in rep_layers: |
| dverts, ddists = [], [] |
| for rec in quad_records: |
| if layer in rec['delta_vert'] and layer in rec['delta_dist']: |
| dverts.append(rec['delta_vert'][layer]) |
| ddists.append(rec['delta_dist'][layer]) |
| if dverts: |
| cg_data[f'delta_vert_L{layer}'] = np.array(dverts) |
| cg_data[f'delta_dist_L{layer}'] = np.array(ddists) |
| np.savez_compressed(os.path.join(output_dir, f'cross_group_vectors_{scale}.npz'), **cg_data) |
|
|
| logger.info(f"Saved results for scale={scale} to {output_dir}") |
|
|
|
|
| def load_scale_consistency(output_dir: str, scale: str) -> dict: |
| path = os.path.join(output_dir, f'delta_consistency_{scale}.json') |
| if not os.path.exists(path): |
| return {} |
| with open(path) as f: |
| raw = json.load(f) |
| result = {} |
| for key, vals in raw.items(): |
| |
| parts = key.rsplit('_L', 1) |
| if len(parts) == 2: |
| group, layer = parts[0], int(parts[1]) |
| result[(group, layer)] = vals |
| return result |
|
|
|
|
| def load_scale_alignment(output_dir: str, scale: str) -> dict: |
| path = os.path.join(output_dir, f'cross_alignment_{scale}.json') |
| if not os.path.exists(path): |
| return {} |
| with open(path) as f: |
| raw = json.load(f) |
| result = {} |
| for key, vals in raw.items(): |
| layer = int(key.replace('L', '')) |
| result[layer] = vals |
| return result |
|
|
|
|
| |
| |
| |
|
|
| def get_representative_layers(all_layers: List[int], n: int = 5) -> List[int]: |
| if len(all_layers) <= n: |
| return list(all_layers) |
| indices = np.linspace(0, len(all_layers) - 1, n, dtype=int) |
| return [all_layers[i] for i in indices] |
|
|
|
|
| def plot_delta_consistency_trajectory( |
| delta_consistency: dict, |
| scale: str, |
| model_type: str, |
| save_path: str, |
| ): |
| """Plot Δ consistency (mean pairwise cosine of Δ vectors) across layers, per group.""" |
| fig, ax = plt.subplots(figsize=(12, 6)) |
| colors = {'horizontal': '#2ca02c', 'vertical': '#ff7f0e', 'distance': '#9467bd'} |
|
|
| for group in GROUP_ORDER: |
| layers, vals = [], [] |
| for (g, l), v in sorted(delta_consistency.items(), key=lambda x: x[0][1]): |
| if g == group: |
| layers.append(l) |
| vals.append(v['mean']) |
| if layers: |
| ax.plot(layers, vals, '-o', color=colors[group], label=group, linewidth=2, markersize=3) |
|
|
| ax.set_xlabel('Layer Index', fontsize=12) |
| ax.set_ylabel('Δ Consistency (mean pairwise cosine)', fontsize=12) |
| ax.set_title(f'{model_type.upper()} ({scale}) - Within-Group Δ Vector Consistency', fontsize=14, fontweight='bold') |
| ax.legend(fontsize=11) |
| ax.grid(True, alpha=0.3) |
| plt.tight_layout() |
| plt.savefig(save_path, dpi=300, bbox_inches='tight') |
| plt.close() |
| logger.info(f"Saved: {save_path}") |
|
|
|
|
| def plot_cross_group_alignment_trajectory( |
| cross_alignment: dict, |
| scale: str, |
| model_type: str, |
| save_path: str, |
| ): |
| """Plot cos(Δ_vert, Δ_dist) across layers, with permutation baseline.""" |
| fig, ax = plt.subplots(figsize=(12, 6)) |
|
|
| layers = sorted(cross_alignment.keys()) |
| actual = [cross_alignment[l]['per_sample_mean'] for l in layers] |
| mean_delta = [cross_alignment[l]['mean_delta_alignment'] for l in layers] |
| perm_mean = [cross_alignment[l]['permutation_mean'] for l in layers] |
| perm_std = [cross_alignment[l]['permutation_std'] for l in layers] |
|
|
| ax.plot(layers, actual, '-o', color='#d62728', label='cos(Δ_vert, Δ_dist) per-sample mean', |
| linewidth=2.5, markersize=3) |
| ax.plot(layers, mean_delta, '--s', color='#e377c2', label='cos(mean_Δ_vert, mean_Δ_dist)', |
| linewidth=1.5, markersize=3) |
| ax.plot(layers, perm_mean, ':', color='gray', label='permutation control', linewidth=1.5) |
| ax.fill_between(layers, |
| [m - 2*s for m, s in zip(perm_mean, perm_std)], |
| [m + 2*s for m, s in zip(perm_mean, perm_std)], |
| alpha=0.2, color='gray') |
|
|
| ax.set_xlabel('Layer Index', fontsize=12) |
| ax.set_ylabel('Cosine Alignment', fontsize=12) |
| ax.set_title(f'{model_type.upper()} ({scale}) - Cross-Group Δ Alignment (Perspective Bias)', |
| fontsize=14, fontweight='bold') |
| ax.legend(fontsize=9) |
| ax.grid(True, alpha=0.3) |
| plt.tight_layout() |
| plt.savefig(save_path, dpi=300, bbox_inches='tight') |
| plt.close() |
| logger.info(f"Saved: {save_path}") |
|
|
|
|
| def plot_pca_embeddings( |
| vectors_npz_path: str, |
| scale: str, |
| model_type: str, |
| save_dir: str, |
| ): |
| """PCA visualization for representative layers.""" |
| data = np.load(vectors_npz_path, allow_pickle=True) |
|
|
| |
| layer_keys = [k for k in data.files if k.startswith('orig_L')] |
| layers = sorted([int(k.replace('orig_L', '')) for k in layer_keys]) |
|
|
| for layer in layers: |
| orig = data.get(f'orig_L{layer}') |
| swap = data.get(f'swap_L{layer}') |
| labels = data.get(f'labels_L{layer}') |
| deltas = data.get(f'delta_L{layer}') |
| groups = data.get(f'groups_L{layer}') |
| cats = data.get(f'categories_L{layer}') |
|
|
| if orig is None or swap is None: |
| continue |
|
|
| |
| fig, axes = plt.subplots(1, 3, figsize=(24, 7)) |
|
|
| pca = PCA(n_components=2) |
| all_vecs = np.vstack([orig, swap]) |
| all_pca = pca.fit_transform(all_vecs) |
| orig_pca = all_pca[:len(orig)] |
| swap_pca = all_pca[len(orig):] |
|
|
| cat_colors = { |
| 'left': '#1f77b4', 'right': '#aec7e8', |
| 'above': '#ff7f0e', 'under': '#ffbb78', |
| 'far': '#2ca02c', 'close': '#98df8a', |
| } |
|
|
| ax = axes[0] |
| for cat in CATEGORY_ORDER: |
| mask = np.array([str(l) == cat for l in labels]) |
| if mask.any(): |
| ax.scatter(orig_pca[mask, 0], orig_pca[mask, 1], |
| c=cat_colors.get(cat, 'gray'), label=f'{cat} (orig)', |
| alpha=0.5, s=15, marker='o') |
| ax.scatter(swap_pca[mask, 0], swap_pca[mask, 1], |
| c=cat_colors.get(cat, 'gray'), |
| alpha=0.5, s=15, marker='x') |
| ax.set_title('Embeddings by Category\n(o=original, x=swapped)', fontsize=11) |
| ax.legend(fontsize=7, ncol=2, loc='best') |
| ax.grid(True, alpha=0.2) |
|
|
| |
| ax = axes[1] |
| if deltas is not None and groups is not None: |
| pca_d = PCA(n_components=2) |
| delta_pca = pca_d.fit_transform(deltas) |
| group_colors = {'horizontal': '#2ca02c', 'vertical': '#ff7f0e', 'distance': '#9467bd'} |
| for group in GROUP_ORDER: |
| mask = np.array([str(g) == group for g in groups]) |
| if mask.any(): |
| ax.scatter(delta_pca[mask, 0], delta_pca[mask, 1], |
| c=group_colors.get(group, 'gray'), label=group, |
| alpha=0.5, s=15) |
| ax.set_title('Δ Vectors by Group', fontsize=11) |
| ax.legend(fontsize=9) |
| ax.grid(True, alpha=0.2) |
|
|
| |
| ax = axes[2] |
| if deltas is not None and cats is not None: |
| |
| for cat in CATEGORY_ORDER: |
| mask = np.array([str(c) == cat for c in cats]) |
| if mask.any(): |
| ax.scatter(delta_pca[mask, 0], delta_pca[mask, 1], |
| c=cat_colors.get(cat, 'gray'), label=cat, |
| alpha=0.5, s=15) |
| ax.set_title('Δ Vectors by Category', fontsize=11) |
| ax.legend(fontsize=8, ncol=2) |
| ax.grid(True, alpha=0.2) |
|
|
| fig.suptitle(f'{model_type.upper()} ({scale}) - Layer {layer} - PCA', |
| fontsize=14, fontweight='bold') |
| plt.tight_layout() |
| plt.savefig(os.path.join(save_dir, f'pca_{scale}_L{layer}.png'), dpi=200, bbox_inches='tight') |
| plt.close() |
|
|
| logger.info(f"Saved PCA plots to {save_dir}") |
|
|
|
|
| def plot_cross_scale_consistency( |
| all_consistency: Dict[str, dict], |
| model_type: str, |
| save_path: str, |
| ): |
| """Compare Δ consistency across scales for each group.""" |
| fig, axes = plt.subplots(1, 3, figsize=(21, 6)) |
| scale_order = ['vanilla', '80k', '400k', '800k', '2m', 'roborefer'] |
|
|
| for idx, group in enumerate(GROUP_ORDER): |
| ax = axes[idx] |
| for scale in scale_order: |
| if scale not in all_consistency: |
| continue |
| consistency = all_consistency[scale] |
| layers, vals = [], [] |
| for (g, l), v in sorted(consistency.items(), key=lambda x: x[0][1]): |
| if g == group: |
| layers.append(l) |
| vals.append(v['mean']) |
| if layers: |
| color = SCALE_COLORS.get(scale, 'gray') |
| ax.plot(layers, vals, '-', color=color, label=scale, linewidth=2) |
|
|
| ax.set_xlabel('Layer Index', fontsize=11) |
| ax.set_ylabel('Δ Consistency', fontsize=11) |
| ax.set_title(f'{group}', fontsize=13, fontweight='bold') |
| ax.legend(fontsize=9) |
| ax.grid(True, alpha=0.3) |
|
|
| fig.suptitle(f'{model_type.upper()} - Δ Consistency Across Scales', |
| fontsize=15, fontweight='bold', y=1.02) |
| plt.tight_layout() |
| plt.savefig(save_path, dpi=300, bbox_inches='tight') |
| plt.close() |
| logger.info(f"Saved: {save_path}") |
|
|
|
|
| def plot_cross_scale_alignment( |
| all_alignment: Dict[str, dict], |
| model_type: str, |
| save_path: str, |
| ): |
| """Compare cross-group alignment across scales.""" |
| fig, ax = plt.subplots(figsize=(12, 6)) |
| scale_order = ['vanilla', '80k', '400k', '800k', '2m', 'roborefer'] |
|
|
| for scale in scale_order: |
| if scale not in all_alignment: |
| continue |
| alignment = all_alignment[scale] |
| layers = sorted(alignment.keys()) |
| vals = [alignment[l]['per_sample_mean'] for l in layers] |
| color = SCALE_COLORS.get(scale, 'gray') |
| ax.plot(layers, vals, '-', color=color, label=scale, linewidth=2) |
|
|
| ax.set_xlabel('Layer Index', fontsize=12) |
| ax.set_ylabel('cos(Δ_vert, Δ_dist)', fontsize=12) |
| ax.set_title(f'{model_type.upper()} - Cross-Group Alignment Across Scales\n' |
| f'(High=entangled, Low=disentangled)', |
| fontsize=14, fontweight='bold') |
| ax.legend(fontsize=10) |
| ax.grid(True, alpha=0.3) |
| plt.tight_layout() |
| plt.savefig(save_path, dpi=300, bbox_inches='tight') |
| plt.close() |
| logger.info(f"Saved: {save_path}") |
|
|
|
|
| def plot_summary_barplot( |
| all_consistency: Dict[str, dict], |
| all_alignment: Dict[str, dict], |
| model_type: str, |
| save_path: str, |
| ): |
| """Summary bar plot: for the deepest layer, show Δ consistency per group + alignment.""" |
| scale_order = ['vanilla', '80k', '400k', '800k', '2m', 'roborefer'] |
| available_scales = [s for s in scale_order if s in all_consistency] |
|
|
| if not available_scales: |
| return |
|
|
| |
| sample_cons = all_consistency[available_scales[0]] |
| max_layer = max(l for (_, l) in sample_cons.keys()) |
|
|
| fig, axes = plt.subplots(1, 2, figsize=(16, 6)) |
|
|
| |
| ax = axes[0] |
| x = np.arange(len(GROUP_ORDER)) |
| width = 0.8 / len(available_scales) |
| for i, scale in enumerate(available_scales): |
| cons = all_consistency[scale] |
| vals = [cons.get((g, max_layer), {}).get('mean', 0) for g in GROUP_ORDER] |
| offset = (i - len(available_scales) / 2 + 0.5) * width |
| color = SCALE_COLORS.get(scale, 'gray') |
| ax.bar(x + offset, vals, width, label=scale, color=color) |
| ax.set_xticks(x) |
| ax.set_xticklabels(GROUP_ORDER) |
| ax.set_ylabel('Δ Consistency') |
| ax.set_title(f'Δ Consistency at Layer {max_layer}', fontweight='bold') |
| ax.legend(fontsize=8) |
| ax.grid(True, alpha=0.3, axis='y') |
|
|
| |
| ax = axes[1] |
| available_align_scales = [s for s in available_scales if s in all_alignment] |
| if available_align_scales: |
| vals = [] |
| colors_list = [] |
| for scale in available_align_scales: |
| alignment = all_alignment[scale] |
| val = alignment.get(max_layer, {}).get('per_sample_mean', 0) |
| vals.append(val) |
| colors_list.append(SCALE_COLORS.get(scale, 'gray')) |
|
|
| ax.bar(range(len(vals)), vals, color=colors_list) |
| ax.set_xticks(range(len(vals))) |
| ax.set_xticklabels(available_align_scales, fontsize=10) |
| ax.set_ylabel('cos(Δ_vert, Δ_dist)') |
| ax.set_title(f'Cross-Group Alignment at Layer {max_layer}\n(Lower=more disentangled)', |
| fontweight='bold') |
| ax.grid(True, alpha=0.3, axis='y') |
|
|
| fig.suptitle(f'{model_type.upper()} - Summary at Deepest Layer', |
| fontsize=15, fontweight='bold', y=1.02) |
| plt.tight_layout() |
| plt.savefig(save_path, dpi=300, bbox_inches='tight') |
| plt.close() |
| logger.info(f"Saved: {save_path}") |
|
|
|
|
| |
| |
| |
|
|
| def process_scale(args, scale: str, swap_pairs: List[dict], quads: List[dict]): |
| """Process a single scale: extract features, analyze, save.""" |
| model_configs = MODEL_CONFIGS[args.model_type] |
| model_path = model_configs[scale] |
|
|
| logger.info(f"\n{'='*60}") |
| logger.info(f"Processing {args.model_type} - {scale}") |
| logger.info(f"Model path: {model_path}") |
| logger.info(f"{'='*60}") |
|
|
| extractor = get_extractor(args.model_type, model_path, scale=scale, device=args.device) |
| target_layers = extractor.target_layers |
|
|
| |
| logger.info("\n--- Phase A: Extracting swap pair features ---") |
| swap_records = extract_swap_features(extractor, swap_pairs, |
| max_samples_per_category=args.max_samples_per_category) |
|
|
| |
| logger.info("\n--- Phase B: Extracting cross-group features ---") |
| quad_records = extract_cross_group_features(extractor, quads) if quads else [] |
|
|
| |
| logger.info("\n--- Phase C: Analysis ---") |
| delta_consistency = compute_delta_consistency(swap_records, target_layers) |
| cross_alignment = compute_cross_group_alignment(quad_records, target_layers) |
| pred_stats = compute_prediction_stats(swap_records, scale) |
|
|
| |
| max_layer = max(target_layers) |
| for group in GROUP_ORDER: |
| key = (group, max_layer) |
| if key in delta_consistency: |
| logger.info(f" Δ consistency [{group}, L{max_layer}]: " |
| f"{delta_consistency[key]['mean']:.4f} ± {delta_consistency[key]['std']:.4f}") |
| if max_layer in cross_alignment: |
| ca = cross_alignment[max_layer] |
| logger.info(f" Cross-group alignment L{max_layer}: " |
| f"{ca['per_sample_mean']:.4f} (perm={ca['permutation_mean']:.4f})") |
|
|
| logger.info(f" Accuracy orig={pred_stats['overall_acc_orig']:.1%}, " |
| f"swap={pred_stats['overall_acc_swap']:.1%}, " |
| f"both={pred_stats['overall_acc_both']:.1%}") |
|
|
| |
| logger.info("\n--- Phase D: Saving results ---") |
| output_dir = os.path.join(args.output_dir, args.model_type) |
| os.makedirs(output_dir, exist_ok=True) |
|
|
| save_scale_results( |
| scale, swap_records, quad_records, delta_consistency, |
| cross_alignment, pred_stats, target_layers, output_dir, |
| ) |
|
|
| |
| logger.info("\n--- Phase E: Per-scale plots ---") |
| plots_dir = os.path.join(output_dir, 'plots') |
| os.makedirs(plots_dir, exist_ok=True) |
|
|
| plot_delta_consistency_trajectory( |
| delta_consistency, scale, args.model_type, |
| os.path.join(plots_dir, f'delta_consistency_{scale}.png') |
| ) |
|
|
| if cross_alignment: |
| plot_cross_group_alignment_trajectory( |
| cross_alignment, scale, args.model_type, |
| os.path.join(plots_dir, f'cross_alignment_{scale}.png') |
| ) |
|
|
| npz_path = os.path.join(output_dir, f'vectors_{scale}.npz') |
| if os.path.exists(npz_path): |
| pca_dir = os.path.join(plots_dir, 'pca') |
| os.makedirs(pca_dir, exist_ok=True) |
| plot_pca_embeddings(npz_path, scale, args.model_type, pca_dir) |
|
|
| |
| del swap_records, quad_records |
| extractor.cleanup() |
|
|
| logger.info(f"\n Scale {scale} complete.") |
|
|
|
|
| def run_merge(args): |
| """Merge mode: load per-scale results, generate cross-scale comparisons.""" |
| output_dir = os.path.join(args.output_dir, args.model_type) |
| plots_dir = os.path.join(output_dir, 'plots') |
| os.makedirs(plots_dir, exist_ok=True) |
|
|
| scale_order = ['vanilla', '80k', '400k', '800k', '2m', 'roborefer'] |
| available_scales = [s for s in scale_order if s in args.scales] |
|
|
| all_consistency = {} |
| all_alignment = {} |
|
|
| for scale in available_scales: |
| cons = load_scale_consistency(output_dir, scale) |
| if cons: |
| all_consistency[scale] = cons |
| logger.info(f"Loaded consistency for {scale}: {len(cons)} entries") |
|
|
| align = load_scale_alignment(output_dir, scale) |
| if align: |
| all_alignment[scale] = align |
| logger.info(f"Loaded alignment for {scale}: {len(align)} entries") |
|
|
| |
| if len(all_consistency) > 1: |
| plot_cross_scale_consistency( |
| all_consistency, args.model_type, |
| os.path.join(plots_dir, 'cross_scale_consistency.png') |
| ) |
|
|
| if len(all_alignment) > 1: |
| plot_cross_scale_alignment( |
| all_alignment, args.model_type, |
| os.path.join(plots_dir, 'cross_scale_alignment.png') |
| ) |
|
|
| if all_consistency: |
| plot_summary_barplot( |
| all_consistency, all_alignment, args.model_type, |
| os.path.join(plots_dir, 'summary_barplot.png') |
| ) |
|
|
| |
| summary_rows = [] |
| for scale in available_scales: |
| pred_path = os.path.join(output_dir, f'pred_stats_{scale}.json') |
| if os.path.exists(pred_path): |
| with open(pred_path) as f: |
| row = json.load(f) |
| |
| if scale in all_alignment: |
| max_layer = max(all_alignment[scale].keys()) |
| row['alignment_deepest'] = all_alignment[scale][max_layer]['per_sample_mean'] |
| row['alignment_perm'] = all_alignment[scale][max_layer]['permutation_mean'] |
| summary_rows.append(row) |
|
|
| if summary_rows: |
| pd.DataFrame(summary_rows).to_csv( |
| os.path.join(output_dir, 'summary.csv'), index=False |
| ) |
| logger.info(f"Saved summary CSV") |
|
|
| logger.info(f"\n=== Merge Complete ===\nResults in: {output_dir}") |
|
|
|
|
| def main(): |
| parser = argparse.ArgumentParser(description='Exp 2-A Swap Analysis') |
| parser.add_argument('--data_path', type=str, |
| default='/data/shared/Qwen/EmbSpatial-Bench/EmbSpatial-Bench.tsv') |
| parser.add_argument('--model_type', type=str, required=True, |
| choices=['molmo', 'nvila', 'qwen']) |
| parser.add_argument('--scales', type=str, nargs='+', |
| default=['vanilla', '80k', '400k', '800k', '2m']) |
| parser.add_argument('--output_dir', type=str, |
| default='/data/shared/Qwen/experiments/exp2a_swap_analysis/results') |
| parser.add_argument('--device', type=str, default='cuda') |
| parser.add_argument('--seed', type=int, default=42) |
| parser.add_argument('--merge', action='store_true', |
| help='Merge mode: read per-scale results and generate cross-scale plots.') |
| parser.add_argument('--no-auto-roborefer', action='store_true', dest='no_auto_roborefer') |
| parser.add_argument('--skip-cross-group', action='store_true', |
| help='Skip cross-group analysis (faster, no HF dataset needed).') |
| parser.add_argument('--max-samples-per-category', type=int, default=200, |
| help='Limit samples per category (default=200). Set 0 for no limit.') |
|
|
| args = parser.parse_args() |
|
|
| if args.model_type == 'nvila' and 'roborefer' not in args.scales and not args.no_auto_roborefer: |
| args.scales.append('roborefer') |
|
|
| np.random.seed(args.seed) |
| torch.manual_seed(args.seed) |
| random.seed(args.seed) |
|
|
| |
| if args.merge: |
| logger.info("\n=== MERGE MODE ===") |
| run_merge(args) |
| return |
|
|
| |
| logger.info("\n=== Loading & Creating Swap Pairs ===") |
| swap_pairs = load_swap_pairs(args.data_path, args.seed) |
|
|
| |
| quads = [] |
| if not args.skip_cross_group: |
| try: |
| hf_cache = build_hf_bbox_cache() |
| quads = create_cross_group_quads(swap_pairs, hf_cache) |
| except Exception as e: |
| logger.warning(f"Cross-group setup failed: {e}. Skipping cross-group analysis.") |
| quads = [] |
|
|
| model_configs = MODEL_CONFIGS[args.model_type] |
|
|
| for scale in args.scales: |
| if scale not in model_configs: |
| logger.warning(f"Scale {scale} not in config for {args.model_type}, skipping...") |
| continue |
|
|
| model_path = model_configs[scale] |
| if not os.path.exists(model_path) and not model_path.startswith(('Qwen/', 'allenai/')): |
| logger.warning(f"Model path not found: {model_path}, skipping...") |
| continue |
|
|
| try: |
| process_scale(args, scale, swap_pairs, quads) |
| except Exception as e: |
| logger.error(f"Failed {args.model_type} - {scale}: {e}") |
| import traceback |
| traceback.print_exc() |
| continue |
|
|
| logger.info(f"\n{'='*60}") |
| logger.info("=== All scales complete ===") |
| logger.info(f"Results: {os.path.join(args.output_dir, args.model_type)}") |
| logger.info(f"{'='*60}") |
|
|
|
|
| if __name__ == '__main__': |
| main() |