diff --git a/correct_filter/correct_filter_analysis.py b/correct_filter/correct_filter_analysis.py new file mode 100644 index 0000000000000000000000000000000000000000..4c4c388339a7562734c428b557557171e05d11be --- /dev/null +++ b/correct_filter/correct_filter_analysis.py @@ -0,0 +1,1583 @@ +#!/usr/bin/env python3 +""" +Correct Filter Analysis: Correctness-Filtered Representation Analysis + +Extends the original experiment by: +- Generating model predictions to determine correctness +- Filtering samples into correct/incorrect groups with balanced sampling +- Running similarity analysis on each group separately +- Recording per-scale, per-category accuracy +- Comparing correct-only vs incorrect-only vs all to check whether + scaling effects on similarity are genuine or just accuracy-driven + +Fixes applied: +- Fix 1: "Answer with only one word." appended to all prompts +- Fix 2: Synonym handling (below/beneath->under, near/nearby->close, distant->far) +- Fix 3: Overlay trajectory plots (correct+all, correct+incorrect, all three) + plus cross-scale versions for correct-only and all-samples +""" + +import os +import sys +import json +import argparse +import base64 +import logging +import random +import re +from io import BytesIO +from collections import defaultdict +from typing import Dict, List, Tuple, Optional, Any +from abc import ABC, abstractmethod + +import torch +import numpy as np +import pandas as pd +from PIL import Image +from tqdm import tqdm +import matplotlib +matplotlib.use('Agg') +import matplotlib.pyplot as plt +import seaborn as sns +from sklearn.metrics.pairwise import cosine_similarity + +logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s') +logger = logging.getLogger(__name__) + +# ============================================================================ +# Constants +# ============================================================================ + +CATEGORY_ORDER = ['left', 'right', 'above', 'under', 'far', 'close'] + +OPPOSITE_MAP = { + 'left': 'right', 'right': 'left', + 'above': 'under', 'under': 'above', + 'far': 'close', 'close': 'far', +} + +# Fix 2: Synonyms for answer matching +SYNONYMS = { + 'under': ['below', 'beneath'], + 'close': ['near', 'nearby'], + 'far': ['distant'], +} + +TRAJECTORY_PAIRS = { + 'hypothesis': [ + ('above', 'far', 'above-far', '#d62728'), + ('under', 'close', 'under-close', '#1f77b4'), + ], + 'within_axis': [ + ('left', 'right', 'left-right', '#2ca02c'), + ('above', 'under', 'above-under', '#ff7f0e'), + ('far', 'close', 'far-close', '#9467bd'), + ], + 'counter_hypothesis': [ + ('above', 'close', 'above-close', '#e377c2'), + ('under', 'far', 'under-far', '#17becf'), + ], +} + +# Key pairs for overlay trajectory plots (Fix 3) +KEY_PAIRS = [ + ('above', 'far', 'above-far'), + ('under', 'close', 'under-close'), + ('left', 'right', 'left-right'), + ('above', 'under', 'above-under'), + ('far', 'close', 'far-close'), +] + +SCALE_COLORS = { + 'vanilla': '#1f77b4', '80k': '#ff7f0e', '400k': '#2ca02c', + '800k': '#d62728', '2m': '#9467bd', 'roborefer': '#8c564b', +} + +MODEL_CONFIGS = { + 'molmo': { + 'vanilla': 'allenai/Molmo-7B-O-0924', + '80k': '/data/shared/Qwen/molmo/outputs/data_scale_exp_80k/unshared', + '400k': '/data/shared/Qwen/molmo/outputs/data_scale_exp_400k/unshared', + '800k': '/data/shared/Qwen/molmo/outputs/data_scale_exp_800k/unshared', + '2m': '/data/shared/Qwen/molmo/outputs/data_scale_exp_2m/unshared', + }, + 'nvila': { + 'vanilla': '/data/shared/Qwen/mydisk/NVILA-Lite-2B', + # '80k': '/data/shared/Qwen/mydisk/output/DATA/NVILA-Lite-2B-DATA_SCALE_EXP_80K-20251108_180221', + # '400k': '/data/shared/Qwen/mydisk/output/DATA/NVILA-Lite-2B-DATA_SCALE_EXP_400K-20251108_180221', + # '800k': '/data/shared/Qwen/mydisk/output/DATA/NVILA-Lite-2B-DATA_SCALE_EXP_800K-20251108_180221', + # '2m': '/data/shared/Qwen/mydisk/output/DATA/NVILA-Lite-2B-DATA_SCALE_EXP_2M-20260205_003632', + '80k': '/data/shared/Qwen/mydisk/output/SINGLE/NVILA-Lite-2B-SINGLE_REFSPATIAL_16M-20260217_035008/checkpoint-1250', + '400k': '/data/shared/Qwen/mydisk/output/SINGLE/NVILA-Lite-2B-SINGLE_REFSPATIAL_16M-20260217_035008/checkpoint-6250', + '800k': '/data/shared/Qwen/mydisk/output/SINGLE/NVILA-Lite-2B-SINGLE_REFSPATIAL_16M-20260217_035008/checkpoint-12500', + '2m': '/data/shared/Qwen/mydisk/output/SINGLE/NVILA-Lite-2B-SINGLE_REFSPATIAL_16M-20260217_035008/checkpoint-31250', + 'roborefer': '/data/shared/Qwen/mydisk/RoboRefer_model', + }, + 'qwen': { + 'vanilla': 'Qwen/Qwen2.5-VL-3B-Instruct', + '80k': '/data/shared/Qwen/mydisk/output/Qwen/Qwen2.5-VL-3B-Instruct-data_scale_exp_80k-20251114_120221', + '400k': '/data/shared/Qwen/mydisk/output/Qwen/Qwen2.5-VL-3B-Instruct-data_scale_exp_400k-20251114_120221', + '800k': '/data/shared/Qwen/mydisk/output/Qwen/Qwen2.5-VL-3B-Instruct-data_scale_exp_800k-20251114_120221', + '2m': '/data/shared/Qwen/mydisk/output/Qwen/Qwen2.5-VL-3B-Instruct-data_scale_exp_2m-20260109_120517', + }, +} + + +# ============================================================================ +# Data Loading & Modification +# ============================================================================ + +OBJECT_PATTERNS = [ + re.compile(r'between\s+(.+?)\s+and\s+(.+?)\s+in', re.IGNORECASE), + re.compile(r'of\s+(.+?)\s+and\s+(.+?)\s+in', re.IGNORECASE), + re.compile(r'positions\s+of\s+(.+?)\s+and\s+(.+?)\s+interact', re.IGNORECASE), + re.compile(r'How\s+are\s+(.+?)\s+and\s+(.+?)\s+positioned', re.IGNORECASE), + re.compile(r'arrangement\s+of\s+(.+?)\s+and\s+(.+?)\s+in', re.IGNORECASE), +] + + +def extract_objects(question: str) -> Tuple[str, str]: + for pattern in OBJECT_PATTERNS: + m = pattern.search(question) + if m: + return m.group(1).strip(), m.group(2).strip() + raise ValueError(f"Could not extract objects from: {question}") + + +def modify_pairwise_sample(sample: dict) -> dict: + obj1, obj2 = extract_objects(sample['question']) + category = sample['category'] + + # Fix 1: Add "Answer with only one word." + if category in ['left', 'right']: + new_question = f"Is the {obj1} to the left or right of the {obj2}? Answer with only one word." + else: # above, under + new_question = f"Is the {obj1} above or under the {obj2}? Answer with only one word." + + return { + 'index': sample['index'], + 'image_base64': sample['image_base64'], + 'question': new_question, + 'answer': category, + 'category': category, + } + + +def modify_distance_sample(sample: dict, rng: random.Random) -> dict: + category = sample['category'] + answer_key = sample['answer'] + options = sample['options'] + + target_object = options[answer_key] + candidates = [v for k, v in options.items() if k != answer_key] + reference_object = rng.choice(candidates) + + # Fix 1: Add "Answer with only one word." + new_question = f"Compared to {reference_object}, is {target_object} far or close from you? Answer with only one word." + + return { + 'index': sample['index'], + 'image_base64': sample['image_base64'], + 'question': new_question, + 'answer': category, + 'category': category, + } + + +def load_and_modify_data(tsv_path: str, seed: int = 42) -> Dict[str, List[dict]]: + """Load ALL samples (no per-category limit) to maximize data for correct/incorrect filtering.""" + rng = random.Random(seed) + np.random.seed(seed) + + df = pd.read_csv(tsv_path, sep='\t') + + raw_grouped = defaultdict(list) + for _, row in df.iterrows(): + category = row['category'] + sample = { + 'index': row['index'], + 'image_base64': row['image'], + 'question': row['question'], + 'answer': row['answer'], + 'category': category, + 'options': {'A': row['A'], 'B': row['B'], 'C': row['C'], 'D': row['D']} + } + raw_grouped[category].append(sample) + + modified_data = defaultdict(list) + stats = {'total': 0, 'success': 0, 'failed': 0} + + for category in CATEGORY_ORDER: + samples = raw_grouped[category] + for sample in samples: + stats['total'] += 1 + try: + if category in ['left', 'right', 'above', 'under']: + modified = modify_pairwise_sample(sample) + else: + modified = modify_distance_sample(sample, rng) + assert modified['answer'] == modified['category'] + modified_data[category].append(modified) + stats['success'] += 1 + except Exception as e: + stats['failed'] += 1 + logger.warning(f" Failed to modify sample {sample['index']}: {e}") + + logger.info(f"Data modification: {stats['success']}/{stats['total']} success, {stats['failed']} failed") + for cat in CATEGORY_ORDER: + if cat in modified_data: + logger.info(f" {cat}: {len(modified_data[cat])} samples") + ex = modified_data[cat][0] + logger.info(f" Example Q: {ex['question']}") + logger.info(f" Example A: {ex['answer']}") + + return dict(modified_data) + + +def decode_base64_image(base64_str: str) -> Image.Image: + image_data = base64.b64decode(base64_str) + return Image.open(BytesIO(image_data)).convert('RGB') + + +# ============================================================================ +# Answer Matching (Fix 2: synonym support) +# ============================================================================ + +def find_earliest_position(text: str, word: str) -> int: + """Find earliest position of word or any of its synonyms in text.""" + positions = [] + pos = text.find(word) + if pos != -1: + positions.append(pos) + for syn in SYNONYMS.get(word, []): + pos = text.find(syn) + if pos != -1: + positions.append(pos) + return min(positions) if positions else -1 + + +def check_answer(generated_text: str, expected_category: str) -> bool: + """Check if model's generated text matches the expected category. + + Uses synonym-aware matching: finds which of the two options + (expected vs opposite, including synonyms) appears first. + """ + if not generated_text or not generated_text.strip(): + return False + + text = generated_text.strip().lower() + expected = expected_category.lower() + opposite = OPPOSITE_MAP[expected] + + pos_exp = find_earliest_position(text, expected) + pos_opp = find_earliest_position(text, opposite) + + if pos_exp == -1: + return False + if pos_opp == -1: + return True + return pos_exp < pos_opp + + +# ============================================================================ +# Base Extractor (prefill-only hooks + extract_and_predict) +# ============================================================================ + +class BaseHiddenStateExtractor(ABC): + def __init__(self, model_path: str, device: str = 'cuda', target_layers: List[int] = None): + self.model_path = model_path + self.device = device + self.hidden_states = {} + self.hooks = [] + self._load_model() + num_layers = self._get_num_layers() + if target_layers is None: + self.target_layers = list(range(num_layers)) + logger.info(f"Model has {num_layers} layers. Extracting ALL layers (0..{num_layers-1})") + else: + self.target_layers = target_layers + logger.info(f"Model has {num_layers} layers. Target layers: {self.target_layers}") + self._register_hooks() + + def _register_hooks(self): + for layer_idx in self.target_layers: + module = self._get_layer_module(layer_idx) + if module is not None: + hook = module.register_forward_hook(self._make_hook(layer_idx)) + self.hooks.append(hook) + + def _make_hook(self, layer_idx: int): + def hook_fn(module, input, output): + if isinstance(output, tuple): + hidden = output[0] + else: + hidden = output + if hidden.shape[1] > 1: # prefill only + last_token = hidden[:, -1, :].detach().cpu().float() + self.hidden_states[layer_idx] = last_token.squeeze(0) + return hook_fn + + @abstractmethod + def _load_model(self): pass + @abstractmethod + def _get_num_layers(self) -> int: pass + @abstractmethod + def _get_layer_module(self, layer_idx: int): pass + @abstractmethod + def extract_and_predict(self, image: Image.Image, question: str) -> Tuple[Dict[int, torch.Tensor], str]: pass + + def cleanup(self): + for hook in self.hooks: + hook.remove() + self.hooks = [] + if hasattr(self, 'model'): + del self.model + if hasattr(self, 'processor'): + del self.processor + torch.cuda.empty_cache() + + +# ============================================================================ +# Molmo Extractor +# ============================================================================ + +class MolmoExtractor(BaseHiddenStateExtractor): + def _load_model(self): + config_path = os.path.join(self.model_path, "config.yaml") + checkpoint_path = os.path.join(self.model_path, "model.pt") + if os.path.exists(config_path) and os.path.exists(checkpoint_path): + self._load_native_model() + self.is_native = True + else: + self._load_hf_model() + self.is_native = False + + def _load_native_model(self): + from olmo.config import ModelConfig + from olmo.model import Molmo as NativeMolmoModel + from olmo.data.model_preprocessor import MultiModalPreprocessor + from olmo.data.data_formatter import DataFormatter + + _original_load = torch.load + def _unsafe_load_wrapper(*args, **kwargs): + if 'weights_only' not in kwargs: + kwargs['weights_only'] = False + return _original_load(*args, **kwargs) + torch.load = _unsafe_load_wrapper + + cfg = ModelConfig.load( + os.path.join(self.model_path, "config.yaml"), + key="model", validate_paths=False + ) + cfg.init_device = "cpu" + self.model = NativeMolmoModel(cfg) + state_dict = torch.load(os.path.join(self.model_path, "model.pt"), map_location="cpu") + self.model.load_state_dict(state_dict) + self.model = self.model.to(self.device, dtype=torch.bfloat16).eval() + self.tokenizer = cfg.get_tokenizer() + + v_cfg = cfg.vision_backbone + h, w = cfg.llm_patches_per_crop() + image_padding_mask = 2 if cfg.fix_image_padding else (1 if cfg.image_padding_embed else None) + + class SafeDataFormatter(DataFormatter): + def get_system_prompt(self, style, for_inference, messages, rng=None): + if style is None: + style = "User" + return super().get_system_prompt(style, for_inference, messages, rng) + + self.formatter = SafeDataFormatter( + prompt_templates=cfg.prompt_type, message_format=cfg.message_formatting, + system_prompt=cfg.system_prompt_kind, always_start_with_space=cfg.always_start_with_space, + default_inference_len=cfg.default_inference_len + ) + self.preprocessor = MultiModalPreprocessor( + tokenizer=self.tokenizer, normalize=str(v_cfg.image_model_type), + crop_mode=cfg.crop_mode, max_crops=cfg.max_crops, + overlap_margins=cfg.overlap_margins, resize=v_cfg.resize_mode, + use_col_tokens=cfg.use_col_tokens, base_image_input_size=v_cfg.image_default_input_size, + image_pooling_w=cfg.image_pooling_w, image_pooling_h=cfg.image_pooling_h, + image_token_length_w=w, image_token_length_h=h, + image_patch_size=v_cfg.image_patch_size, image_padding_mask=image_padding_mask, + pad_value=cfg.pad_value, loss_token_weighting=cfg.multi_annotation_weighting, + ) + logger.info(f"Loaded native Molmo from {self.model_path}") + + def _load_hf_model(self): + from transformers import AutoModelForCausalLM, AutoProcessor + self.model = AutoModelForCausalLM.from_pretrained( + self.model_path, torch_dtype=torch.bfloat16, + trust_remote_code=True, device_map=self.device + ).eval() + self.processor = AutoProcessor.from_pretrained(self.model_path, trust_remote_code=True) + logger.info(f"Loaded HF Molmo from {self.model_path}") + + def _get_num_layers(self) -> int: + if self.is_native: + return len(self.model.transformer.blocks) + if hasattr(self.model, 'model') and hasattr(self.model.model, 'transformer'): + return len(self.model.model.transformer.blocks) + return 32 + + def _get_layer_module(self, layer_idx: int): + if self.is_native: + return self.model.transformer.blocks[layer_idx] + return self.model.model.transformer.blocks[layer_idx] + + def extract_and_predict(self, image, question): + self.hidden_states = {} + if self.is_native: + example = {"messages": [question], "image": image} + messages, _ = self.formatter(example, is_training=False, for_inference=True, rng=np.random) + batch = self.preprocessor(np.array(image), messages, is_training=False, require_image_features=True) + if 'input_ids' not in batch and 'input_tokens' in batch: + batch['input_ids'] = batch['input_tokens'] + + def to_t(x): + return torch.from_numpy(x) if isinstance(x, np.ndarray) else x + + input_ids = to_t(batch['input_ids']).unsqueeze(0).to(self.device).long() + images_t = to_t(batch['images']).unsqueeze(0).to(self.device, dtype=torch.bfloat16) + image_masks = to_t(batch['image_masks']).unsqueeze(0).to(self.device, dtype=torch.bfloat16) + image_input_idx = to_t(batch['image_input_idx']).unsqueeze(0).to(self.device) + + with torch.inference_mode(), torch.autocast("cuda", dtype=torch.bfloat16): + gen = self.model.generate( + input_ids=input_ids, images=images_t, + image_masks=image_masks, image_input_idx=image_input_idx, + max_steps=20, beam_size=1, + ) + generated_ids = gen.token_ids[0, 0] + answer = self.tokenizer.decode(generated_ids.tolist()).strip() + for eos in ['<|endoftext|>', '', '<|end|>']: + answer = answer.replace(eos, '').strip() + else: + from transformers import GenerationConfig + inputs = self.processor.process(images=[image], text=question) + processed = {} + for k, v in inputs.items(): + v = v.to(self.device).unsqueeze(0) + if v.dtype == torch.float32: + v = v.to(dtype=torch.bfloat16) + processed[k] = v + with torch.no_grad(), torch.autocast("cuda", dtype=torch.bfloat16): + output = self.model.generate_from_batch( + processed, + GenerationConfig(max_new_tokens=20, stop_strings="<|endoftext|>"), + tokenizer=self.processor.tokenizer, + ) + input_len = processed['input_ids'].shape[1] + answer = self.processor.tokenizer.decode(output[0, input_len:], skip_special_tokens=True).strip() + + return self.hidden_states.copy(), answer + + +# ============================================================================ +# NVILA Extractor +# ============================================================================ + +class NVILAExtractor(BaseHiddenStateExtractor): + def _load_model(self): + original_sys_path = sys.path.copy() + sys.path = [p for p in sys.path if 'RoboRefer' not in p] + modules_to_remove = [k for k in list(sys.modules.keys()) if 'llava' in k.lower()] + removed = {m: sys.modules.pop(m) for m in modules_to_remove} + try: + import llava + from llava.media import Image as LLaVAImage + from llava import conversation as clib + except Exception as err: + sys.path = original_sys_path + for m, mod in removed.items(): + sys.modules[m] = mod + raise RuntimeError(f"Failed to import llava: {err}") + sys.path = original_sys_path + self.LLaVAImage = LLaVAImage + self.clib = clib + self.model = llava.load(self.model_path, model_base=None) + self._find_llm_backbone() + logger.info(f"Loaded NVILA from {self.model_path}") + + def _find_llm_backbone(self): + candidates = [] + if hasattr(self.model, 'llm'): + if hasattr(self.model.llm, 'model') and hasattr(self.model.llm.model, 'layers'): + candidates.append(self.model.llm.model.layers) + if hasattr(self.model.llm, 'layers'): + candidates.append(self.model.llm.layers) + if hasattr(self.model, 'model'): + if hasattr(self.model.model, 'model') and hasattr(self.model.model.model, 'layers'): + candidates.append(self.model.model.model.layers) + if hasattr(self.model.model, 'layers'): + candidates.append(self.model.model.layers) + for name, module in self.model.named_modules(): + if name.endswith('.layers') and hasattr(module, '__len__') and len(module) > 0: + candidates.append(module) + if candidates: + self.llm_backbone = candidates[0] + else: + raise ValueError("Could not locate transformer layers in NVILA model") + + def _get_num_layers(self) -> int: + return len(self.llm_backbone) if hasattr(self, 'llm_backbone') else 24 + + def _get_layer_module(self, layer_idx: int): + return self.llm_backbone[layer_idx] + + def extract_and_predict(self, image, question): + self.hidden_states = {} + import tempfile + with tempfile.NamedTemporaryFile(suffix='.png', delete=False) as f: + temp_path = f.name + image.save(temp_path) + try: + prompt = [self.LLaVAImage(temp_path), question] + from transformers import GenerationConfig + response = self.model.generate_content( + prompt, generation_config=GenerationConfig(max_new_tokens=20, do_sample=False) + ) + finally: + os.unlink(temp_path) + answer = str(response[0] if isinstance(response, list) else response).strip() + return self.hidden_states.copy(), answer + + +class RoboReferExtractor(NVILAExtractor): + ROBOREFER_PATH = '/data/shared/Qwen/RoboRefer' + + def _load_model(self): + original_sys_path = sys.path.copy() + if self.ROBOREFER_PATH not in sys.path: + sys.path.insert(0, self.ROBOREFER_PATH) + modules_to_remove = [k for k in list(sys.modules.keys()) if 'llava' in k.lower()] + removed = {m: sys.modules.pop(m) for m in modules_to_remove} + try: + import llava + from llava.media import Image as LLaVAImage + from llava import conversation as clib + except Exception as err: + sys.path = original_sys_path + for m, mod in removed.items(): + sys.modules[m] = mod + raise RuntimeError(f"Failed to import RoboRefer llava: {err}") + sys.path = original_sys_path + self.LLaVAImage = LLaVAImage + self.clib = clib + self.model = llava.load(self.model_path, model_base=None) + self._find_llm_backbone() + logger.info(f"Loaded RoboRefer from {self.model_path}") + + +# ============================================================================ +# Qwen2.5-VL Extractor +# ============================================================================ + +class Qwen25VLExtractor(BaseHiddenStateExtractor): + BASE_MODEL = "Qwen/Qwen2.5-VL-3B-Instruct" + + def _load_model(self): + from transformers import Qwen2_5_VLForConditionalGeneration, AutoProcessor + try: + self.model = Qwen2_5_VLForConditionalGeneration.from_pretrained( + self.model_path, torch_dtype=torch.bfloat16, device_map=self.device + ) + except ImportError: + self.model = Qwen2_5_VLForConditionalGeneration.from_pretrained( + self.model_path, torch_dtype=torch.bfloat16 + ).to(self.device) + self.model.eval() + if self.model_path.startswith('/'): + self.processor = AutoProcessor.from_pretrained(self.BASE_MODEL) + else: + self.processor = AutoProcessor.from_pretrained(self.model_path) + logger.info(f"Loaded Qwen2.5-VL from {self.model_path}") + + def _get_num_layers(self) -> int: + return len(self.model.model.layers) + + def _get_layer_module(self, layer_idx: int): + return self.model.model.layers[layer_idx] + + def extract_and_predict(self, image, question): + self.hidden_states = {} + messages = [{"role": "user", "content": [ + {"type": "image", "image": image}, + {"type": "text", "text": question} + ]}] + text = self.processor.apply_chat_template(messages, tokenize=False, add_generation_prompt=True) + from qwen_vl_utils import process_vision_info + image_inputs, video_inputs = process_vision_info(messages) + inputs = self.processor( + text=[text], images=image_inputs, videos=video_inputs, + padding=True, return_tensors="pt" + ).to(self.device) + with torch.no_grad(): + output_ids = self.model.generate(**inputs, max_new_tokens=20, do_sample=False) + input_len = inputs['input_ids'].shape[1] + answer = self.processor.tokenizer.decode(output_ids[0, input_len:], skip_special_tokens=True).strip() + return self.hidden_states.copy(), answer + + +def get_extractor(model_type: str, model_path: str, scale: str = None, **kwargs): + if model_type == 'nvila' and scale == 'roborefer': + return RoboReferExtractor(model_path, **kwargs) + extractors = {'molmo': MolmoExtractor, 'nvila': NVILAExtractor, 'qwen': Qwen25VLExtractor} + return extractors[model_type](model_path, **kwargs) + + +# ============================================================================ +# Extraction with Per-Sample Recording +# ============================================================================ + +def extract_all_with_predictions( + extractor: BaseHiddenStateExtractor, + data: Dict[str, List[dict]], +) -> Dict[str, List[dict]]: + """Extract hidden states and predictions for all samples.""" + sample_records = defaultdict(list) + + for category in CATEGORY_ORDER: + if category not in data: + continue + samples = data[category] + logger.info(f"Processing category: {category} ({len(samples)} samples)") + success_count = 0 + + for sample in tqdm(samples, desc=f" {category}"): + try: + image = decode_base64_image(sample['image_base64']) + hidden_states, predicted = extractor.extract_and_predict(image, sample['question']) + + is_correct = check_answer(predicted, category) + mark = "O" if is_correct else "X" + tqdm.write(f" [{mark}] #{sample['index']:<6} expected={category:<8} | predicted=\"{predicted[:80]}\"") + + record = { + 'hidden_states': {}, + 'is_correct': is_correct, + 'predicted': predicted, + 'index': sample['index'], + } + + for layer_idx in extractor.target_layers: + if layer_idx in hidden_states: + state = hidden_states[layer_idx].numpy().flatten() + if state.size > 0: + record['hidden_states'][layer_idx] = state + + if record['hidden_states']: + sample_records[category].append(record) + success_count += 1 + else: + logger.warning(f" No hidden states for sample {sample['index']}") + except Exception as e: + logger.warning(f" Error processing sample {sample['index']}: {e}") + continue + + correct_n = sum(1 for r in sample_records[category] if r['is_correct']) + incorrect_n = sum(1 for r in sample_records[category] if not r['is_correct']) + acc = correct_n / (correct_n + incorrect_n) * 100 if (correct_n + incorrect_n) > 0 else 0 + logger.info(f" {category}: {success_count}/{len(samples)} extracted | " + f"correct={correct_n}, incorrect={incorrect_n}, accuracy={acc:.1f}%") + + total_correct = sum(1 for cat in sample_records for r in sample_records[cat] if r['is_correct']) + total_all = sum(len(sample_records[cat]) for cat in sample_records) + overall_acc = total_correct / total_all * 100 if total_all > 0 else 0 + logger.info(f"\n === Category Accuracy Summary ===") + for cat in CATEGORY_ORDER: + if cat in sample_records: + c = sum(1 for r in sample_records[cat] if r['is_correct']) + n = len(sample_records[cat]) + a = c / n * 100 if n > 0 else 0 + logger.info(f" {cat:>6s}: {c:>4d}/{n:<4d} = {a:5.1f}%") + logger.info(f" {'TOTAL':>6s}: {total_correct:>4d}/{total_all:<4d} = {overall_acc:5.1f}%") + logger.info(f" ================================\n") + + return dict(sample_records) + + +# ============================================================================ +# Balanced Sampling +# ============================================================================ + +def compute_balanced_size(sample_records: Dict[str, List[dict]], filter_correct: bool) -> int: + counts = [] + for cat in CATEGORY_ORDER: + if cat not in sample_records: + return 0 + n = sum(1 for s in sample_records[cat] if s['is_correct'] == filter_correct) + counts.append(n) + + min_count = min(counts) + if min_count == 0: + return 0 + + balanced = (min_count // 50) * 50 + if balanced == 0: + balanced = min_count + return balanced + + +def balanced_sample_and_average( + sample_records: Dict[str, List[dict]], + filter_correct: bool, + n_samples: int, + target_layers: List[int], + seed: int = 42, +) -> Dict[int, Dict[str, np.ndarray]]: + rng = random.Random(seed) + result = defaultdict(dict) + + for category in CATEGORY_ORDER: + filtered = [s for s in sample_records[category] if s['is_correct'] == filter_correct] + if len(filtered) < n_samples: + logger.warning(f" {category}: only {len(filtered)} samples, need {n_samples}") + continue + sampled = rng.sample(filtered, n_samples) + for layer_idx in target_layers: + vectors = [record['hidden_states'][layer_idx] + for record in sampled if layer_idx in record['hidden_states']] + if vectors: + result[layer_idx][category] = np.mean(vectors, axis=0) + + return dict(result) + + +def compute_all_samples_reps( + sample_records: Dict[str, List[dict]], + target_layers: List[int], +) -> Dict[int, Dict[str, np.ndarray]]: + """Compute average representations using ALL samples (no filtering).""" + result = defaultdict(dict) + for category in CATEGORY_ORDER: + records = sample_records.get(category, []) + if not records: + continue + for layer_idx in target_layers: + vectors = [r['hidden_states'][layer_idx] + for r in records if layer_idx in r['hidden_states']] + if vectors: + result[layer_idx][category] = np.mean(vectors, axis=0) + return dict(result) + + +# ============================================================================ +# Accuracy +# ============================================================================ + +def compute_accuracy_stats(sample_records, scale, model_type): + stats = {'model': model_type, 'scale': scale} + total_correct, total_count = 0, 0 + for cat in CATEGORY_ORDER: + records = sample_records.get(cat, []) + n = len(records) + correct = sum(1 for r in records if r['is_correct']) + stats[f'{cat}_total'] = n + stats[f'{cat}_correct'] = correct + stats[f'{cat}_accuracy'] = correct / n if n > 0 else 0.0 + total_correct += correct + total_count += n + stats['overall_total'] = total_count + stats['overall_correct'] = total_correct + stats['overall_accuracy'] = total_correct / total_count if total_count > 0 else 0.0 + return stats + + +def save_per_sample_predictions(sample_records, scale, save_path): + rows = [] + for cat in CATEGORY_ORDER: + for record in sample_records.get(cat, []): + rows.append({ + 'index': record['index'], 'category': cat, 'scale': scale, + 'predicted': record['predicted'], 'expected': cat, + 'is_correct': record['is_correct'], + }) + pd.DataFrame(rows).to_csv(save_path, index=False) + logger.info(f"Saved {len(rows)} per-sample predictions to {save_path}") + + +def save_per_sample_norms(sample_records, scale, save_path): + """Save L2 norm of each sample's hidden state at each layer.""" + rows = [] + for cat in CATEGORY_ORDER: + for record in sample_records.get(cat, []): + row = { + 'index': record['index'], + 'category': cat, + 'scale': scale, + 'is_correct': record['is_correct'], + } + for layer_idx, state in record['hidden_states'].items(): + row[f'norm_L{layer_idx}'] = float(np.linalg.norm(state)) + rows.append(row) + pd.DataFrame(rows).to_csv(save_path, index=False) + logger.info(f"Saved {len(rows)} per-sample norms to {save_path}") + + +# ============================================================================ +# Analysis Functions +# ============================================================================ + +def compute_similarity_matrix(representations: Dict[str, np.ndarray]) -> pd.DataFrame: + available = [c for c in CATEGORY_ORDER if c in representations] + vectors = np.array([representations[cat] for cat in available]) + sim_matrix = cosine_similarity(vectors) + return pd.DataFrame(sim_matrix, index=available, columns=available) + + +def analyze_hypothesis(sim_df, model_name): + results = {'model': model_name} + pairs_to_check = { + 'above_far': ('above', 'far'), 'under_close': ('under', 'close'), + 'left_right': ('left', 'right'), + } + for pair_name, (cat1, cat2) in pairs_to_check.items(): + if cat1 in sim_df.index and cat2 in sim_df.columns: + results[f'sim_{pair_name}'] = sim_df.loc[cat1, cat2] + else: + results[f'sim_{pair_name}'] = None + return results + + +# ============================================================================ +# Visualization +# ============================================================================ + +def plot_similarity_heatmap(sim_df, title, save_path): + plt.figure(figsize=(10, 8)) + available_order = [c for c in CATEGORY_ORDER if c in sim_df.index] + sim_df_ordered = sim_df.loc[available_order, available_order] + sns.heatmap(sim_df_ordered, annot=True, fmt='.4f', cmap='RdYlBu_r', + center=0.5, vmin=0, vmax=1, square=True, linewidths=0.5, + cbar_kws={'label': 'Cosine Similarity'}) + plt.title(title, fontsize=14, fontweight='bold') + plt.tight_layout() + plt.savefig(save_path, dpi=300, bbox_inches='tight') + plt.close() + logger.info(f"Saved heatmap: {save_path}") + + +def _extract_pair_trajectory(all_layer_sims, cat1, cat2): + layers = sorted(all_layer_sims.keys()) + valid_layers, values = [], [] + for l in layers: + df = all_layer_sims[l] + if cat1 in df.index and cat2 in df.columns: + valid_layers.append(l) + values.append(df.loc[cat1, cat2]) + return valid_layers, values + + +def get_representative_layers(all_layers, n=5): + if len(all_layers) <= n: + return list(all_layers) + indices = np.linspace(0, len(all_layers) - 1, n, dtype=int) + return [all_layers[i] for i in indices] + + +def plot_similarity_trajectories(all_layer_sims, title, save_path): + fig, axes = plt.subplots(1, 2, figsize=(20, 7)) + + ax = axes[0] + for cat1, cat2, label, color in TRAJECTORY_PAIRS['hypothesis']: + layers, vals = _extract_pair_trajectory(all_layer_sims, cat1, cat2) + ax.plot(layers, vals, '-', color=color, label=label, linewidth=2.5) + for cat1, cat2, label, color in TRAJECTORY_PAIRS['within_axis']: + layers, vals = _extract_pair_trajectory(all_layer_sims, cat1, cat2) + ax.plot(layers, vals, '--', color=color, label=label, linewidth=1.8) + for cat1, cat2, label, color in TRAJECTORY_PAIRS['counter_hypothesis']: + layers, vals = _extract_pair_trajectory(all_layer_sims, cat1, cat2) + ax.plot(layers, vals, ':', color=color, label=label, linewidth=1.5, alpha=0.8) + ax.set_xlabel('Layer Index') + ax.set_ylabel('Cosine Similarity') + ax.set_title(f'{title}\nPairwise Similarity Across Layers') + ax.legend(fontsize=9, loc='best') + ax.grid(True, alpha=0.3) + + ax = axes[1] + lr_layers, lr_vals = _extract_pair_trajectory(all_layer_sims, 'left', 'right') + lr_dict = dict(zip(lr_layers, lr_vals)) + for cat1, cat2, label, color in TRAJECTORY_PAIRS['hypothesis']: + layers, vals = _extract_pair_trajectory(all_layer_sims, cat1, cat2) + diffs = [v - lr_dict.get(l, 0) for l, v in zip(layers, vals)] + ax.plot(layers, diffs, '-', color=color, label=f'{label} - left-right', linewidth=2.5) + for cat1, cat2, label, color in TRAJECTORY_PAIRS['counter_hypothesis']: + layers, vals = _extract_pair_trajectory(all_layer_sims, cat1, cat2) + diffs = [v - lr_dict.get(l, 0) for l, v in zip(layers, vals)] + ax.plot(layers, diffs, ':', color=color, label=f'{label} - left-right', linewidth=1.5, alpha=0.8) + for cat1, cat2, label, color in TRAJECTORY_PAIRS['within_axis']: + if label == 'left-right': + continue + layers, vals = _extract_pair_trajectory(all_layer_sims, cat1, cat2) + diffs = [v - lr_dict.get(l, 0) for l, v in zip(layers, vals)] + ax.plot(layers, diffs, '--', color=color, label=f'{label} - left-right', linewidth=1.5, alpha=0.7) + ax.axhline(y=0, color='gray', linestyle='-', linewidth=1, alpha=0.5) + ax.set_xlabel('Layer Index') + ax.set_ylabel('Similarity Difference (pair - left-right)') + ax.set_title(f'{title}\nRelative to Left-Right Baseline') + ax.legend(fontsize=8, loc='best') + ax.grid(True, alpha=0.3) + + plt.tight_layout() + plt.savefig(save_path, dpi=300, bbox_inches='tight') + plt.close() + logger.info(f"Saved trajectory: {save_path}") + + +def plot_cross_scale_trajectories(cross_scale_data, model_type, save_path): + pairs = [ + ('above', 'far', 'above-far (hypothesis)'), + ('under', 'close', 'under-close (hypothesis)'), + ('left', 'right', 'left-right (control)'), + ] + fig, axes = plt.subplots(1, len(pairs), figsize=(7 * len(pairs), 6)) + if len(pairs) == 1: + axes = [axes] + for idx, (cat1, cat2, label) in enumerate(pairs): + ax = axes[idx] + for scale in ['vanilla', '80k', '400k', '800k', '2m', 'roborefer']: + if scale not in cross_scale_data: + continue + layers, vals = _extract_pair_trajectory(cross_scale_data[scale], cat1, cat2) + ax.plot(layers, vals, '-', color=SCALE_COLORS.get(scale, 'gray'), label=scale, linewidth=2) + ax.set_xlabel('Layer Index') + ax.set_ylabel('Cosine Similarity') + ax.set_title(label, fontweight='bold') + ax.legend(fontsize=10) + ax.grid(True, alpha=0.3) + fig.suptitle(f'{model_type.upper()} - Similarity Trajectory Across Scales', + fontsize=15, fontweight='bold', y=1.02) + plt.tight_layout() + plt.savefig(save_path, dpi=300, bbox_inches='tight') + plt.close() + logger.info(f"Saved cross-scale trajectory: {save_path}") + + +def plot_similarity_evolution_heatmap(cross_scale_data, model_type, save_path): + pairs = [ + ('above', 'far', 'above-far'), ('under', 'close', 'under-close'), + ('left', 'right', 'left-right'), ('above', 'under', 'above-under'), + ('far', 'close', 'far-close'), + ] + scale_order = ['vanilla', '80k', '400k', '800k', '2m', 'roborefer'] + available_scales = [s for s in scale_order if s in cross_scale_data] + first_scale = available_scales[0] + all_layers = sorted(cross_scale_data[first_scale].keys()) + + fig, axes = plt.subplots(len(pairs), 1, figsize=(max(14, len(all_layers) * 0.5), 3 * len(pairs))) + if len(pairs) == 1: + axes = [axes] + for idx, (cat1, cat2, label) in enumerate(pairs): + ax = axes[idx] + matrix = np.full((len(available_scales), len(all_layers)), np.nan) + for si, scale in enumerate(available_scales): + layer_sims = cross_scale_data[scale] + for li, layer in enumerate(all_layers): + if layer in layer_sims: + df = layer_sims[layer] + if cat1 in df.index and cat2 in df.columns: + matrix[si, li] = df.loc[cat1, cat2] + im = ax.imshow(matrix, aspect='auto', cmap='RdYlBu_r', vmin=0.5, vmax=1.0) + ax.set_yticks(range(len(available_scales))) + ax.set_yticklabels(available_scales, fontsize=10) + step = max(1, len(all_layers) // 15) + ax.set_xticks(range(0, len(all_layers), step)) + ax.set_xticklabels([str(all_layers[i]) for i in range(0, len(all_layers), step)], fontsize=8) + ax.set_title(label, fontweight='bold') + ax.set_xlabel('Layer Index') + fig.colorbar(im, ax=ax, label='Cosine Similarity', shrink=0.8) + fig.suptitle(f'{model_type.upper()} - Similarity Evolution (Layer x Scale)', + fontsize=15, fontweight='bold', y=1.01) + plt.tight_layout() + plt.savefig(save_path, dpi=300, bbox_inches='tight') + plt.close() + logger.info(f"Saved evolution heatmap: {save_path}") + + +# ============================================================================ +# Fix 3: Overlay Trajectory Plots +# ============================================================================ + +def plot_overlay_trajectories( + datasets: Dict[str, Dict[int, pd.DataFrame]], + styles: Dict[str, Tuple[str, str, float]], + title: str, + save_path: str, +): + """Plot overlay trajectory for multiple datasets (correct, incorrect, all). + + datasets: {name -> {layer -> sim_df}} + styles: {name -> (linestyle, color, linewidth)} + """ + n_pairs = len(KEY_PAIRS) + fig, axes = plt.subplots(1, n_pairs, figsize=(5.5 * n_pairs, 5.5)) + if n_pairs == 1: + axes = [axes] + + for idx, (cat1, cat2, label) in enumerate(KEY_PAIRS): + ax = axes[idx] + for name, layer_sims in datasets.items(): + ls, color, lw = styles[name] + layers, vals = _extract_pair_trajectory(layer_sims, cat1, cat2) + if layers: + ax.plot(layers, vals, linestyle=ls, color=color, label=name, linewidth=lw) + ax.set_xlabel('Layer Index', fontsize=10) + ax.set_ylabel('Cosine Similarity', fontsize=10) + ax.set_title(label, fontsize=11, fontweight='bold') + ax.legend(fontsize=8) + ax.grid(True, alpha=0.3) + + fig.suptitle(title, fontsize=14, fontweight='bold', y=1.02) + plt.tight_layout() + plt.savefig(save_path, dpi=300, bbox_inches='tight') + plt.close() + logger.info(f"Saved overlay trajectory: {save_path}") + + +def generate_overlay_plots( + correct_sims, incorrect_sims, all_sims, + scale, model_type, save_dir, +): + """Generate all 3 overlay trajectory variants for a single scale.""" + prefix = f'{model_type.upper()} ({scale})' + + # 1. correct + all + if correct_sims and all_sims: + plot_overlay_trajectories( + {'correct': correct_sims, 'all': all_sims}, + {'correct': ('-', '#2ca02c', 2.5), 'all': ('--', '#7f7f7f', 1.8)}, + f'{prefix} - Correct vs All Samples', + os.path.join(save_dir, f'overlay_correct_all_{scale}.png'), + ) + + # 2. correct + incorrect + if correct_sims and incorrect_sims: + plot_overlay_trajectories( + {'correct': correct_sims, 'incorrect': incorrect_sims}, + {'correct': ('-', '#2ca02c', 2.5), 'incorrect': ('-', '#d62728', 2.5)}, + f'{prefix} - Correct vs Incorrect', + os.path.join(save_dir, f'overlay_correct_incorrect_{scale}.png'), + ) + + # 3. correct + incorrect + all + if correct_sims and all_sims: + ds = {'correct': correct_sims, 'all': all_sims} + st = {'correct': ('-', '#2ca02c', 2.5), 'all': ('--', '#7f7f7f', 1.8)} + if incorrect_sims: + ds['incorrect'] = incorrect_sims + st['incorrect'] = ('-', '#d62728', 2.0) + plot_overlay_trajectories( + ds, st, + f'{prefix} - Correct vs Incorrect vs All', + os.path.join(save_dir, f'overlay_all_{scale}.png'), + ) + + +# ============================================================================ +# Accuracy & Ablation Visualization +# ============================================================================ + +def plot_accuracy_chart(accuracy_records, model_type, save_path): + fig, ax = plt.subplots(figsize=(14, 6)) + scales = [r['scale'] for r in accuracy_records] + x = np.arange(len(CATEGORY_ORDER) + 1) + width = 0.8 / len(scales) + for i, record in enumerate(accuracy_records): + values = [record.get(f'{cat}_accuracy', 0) for cat in CATEGORY_ORDER] + values.append(record.get('overall_accuracy', 0)) + offset = (i - len(scales) / 2 + 0.5) * width + color = SCALE_COLORS.get(record['scale'], 'gray') + bars = ax.bar(x + offset, values, width, label=record['scale'], color=color) + for bar, val in zip(bars, values): + if val > 0: + ax.annotate(f'{val:.0%}', xy=(bar.get_x() + bar.get_width() / 2, bar.get_height()), + xytext=(0, 2), textcoords='offset points', + ha='center', va='bottom', fontsize=6, rotation=90) + ax.set_ylabel('Accuracy') + ax.set_title(f'{model_type.upper()} - Per-Category Accuracy Across Scales', fontweight='bold') + ax.set_xticks(x) + ax.set_xticklabels(CATEGORY_ORDER + ['overall']) + ax.legend(fontsize=9) + ax.set_ylim(0, 1.15) + ax.axhline(y=0.5, color='gray', linestyle='--', alpha=0.5) + plt.tight_layout() + plt.savefig(save_path, dpi=300, bbox_inches='tight') + plt.close() + logger.info(f"Saved accuracy chart: {save_path}") + + +def plot_ablation_summary(ablation_data, model_type, save_path, include_roborefer=False): + pairs = [ + ('above', 'far', 'above-far', '#d62728'), + ('under', 'close', 'under-close', '#1f77b4'), + ('left', 'right', 'left-right', '#2ca02c'), + ] + if include_roborefer: + scale_order = ['vanilla', '80k', '400k', '800k', '2m', 'roborefer'] + else: + scale_order = ['vanilla', '80k', '400k', '800k', '2m'] + + fig, axes = plt.subplots(1, 2, figsize=(18, 7)) + + ax = axes[0] + for cat1, cat2, label, color in pairs: + x_vals, y_correct, y_all = [], [], [] + for i, scale in enumerate(scale_order): + entry = next((d for d in ablation_data if d['scale'] == scale), None) + if entry is None: + continue + sim_c = entry.get(f'correct_{cat1}_{cat2}') + sim_a = entry.get(f'all_{cat1}_{cat2}') + if sim_c is not None: + x_vals.append(i) + y_correct.append(sim_c) + y_all.append(sim_a) + if x_vals: + ax.plot(x_vals, y_correct, '-o', color=color, label=f'{label} (correct)', linewidth=2.5) + ax.plot(x_vals, y_all, '--s', color=color, label=f'{label} (all)', linewidth=1.5, alpha=0.6) + ax.set_xticks(range(len(scale_order))) + ax.set_xticklabels(scale_order) + ax.set_xlabel('Scale') + ax.set_ylabel('Cosine Similarity') + ax.set_title('Correct-Only vs All-Samples Similarity', fontweight='bold') + ax.legend(fontsize=8, loc='best') + ax.grid(True, alpha=0.3) + + ax2 = axes[1] + x_vals, acc_vals = [], [] + for i, scale in enumerate(scale_order): + entry = next((d for d in ablation_data if d['scale'] == scale), None) + if entry and 'accuracy' in entry: + x_vals.append(i) + acc_vals.append(entry['accuracy']) + ax2.bar(x_vals, acc_vals, color=[SCALE_COLORS.get(scale_order[x], 'gray') for x in x_vals], alpha=0.8) + for x, acc in zip(x_vals, acc_vals): + ax2.annotate(f'{acc:.1%}', xy=(x, acc), xytext=(0, 5), textcoords='offset points', + ha='center', fontsize=10, fontweight='bold') + ax2.set_xticks(range(len(scale_order))) + ax2.set_xticklabels(scale_order) + ax2.set_xlabel('Scale') + ax2.set_ylabel('Overall Accuracy') + ax2.set_title('Model Accuracy by Scale', fontweight='bold') + ax2.set_ylim(0, 1.15) + ax2.grid(True, alpha=0.3, axis='y') + + fig.suptitle(f'{model_type.upper()} - Ablation: Is Similarity Change Due to Accuracy?', + fontsize=15, fontweight='bold', y=1.02) + plt.tight_layout() + plt.savefig(save_path, dpi=300, bbox_inches='tight') + plt.close() + logger.info(f"Saved ablation summary: {save_path}") + + +# ============================================================================ +# Process Subset & CSV I/O +# ============================================================================ + +def process_subset( + subset_name, all_layer_reps, target_layers, scale, model_type, output_dir, n_samples, +): + """Compute similarity matrices and save outputs for one subset.""" + scale_sims = {} + results_list = [] + + for layer_idx in sorted(all_layer_reps.keys()): + reps = all_layer_reps[layer_idx] + if len(reps) < 2: + continue + sim_df = compute_similarity_matrix(reps) + scale_sims[layer_idx] = sim_df + results = analyze_hypothesis(sim_df, f"{model_type}_{scale}_{subset_name}") + results['layer_idx'] = layer_idx + results['subset'] = subset_name + results['scale'] = scale + results['n_samples_per_cat'] = n_samples + results_list.append(results) + csv_out = os.path.join(output_dir, 'csv') + os.makedirs(csv_out, exist_ok=True) + sim_df.to_csv(os.path.join(csv_out, f'similarity_{scale}_L{layer_idx}.csv')) + + if scale_sims: + rep_layers = get_representative_layers(sorted(scale_sims.keys())) + for layer_idx in rep_layers: + plot_similarity_heatmap( + scale_sims[layer_idx], + f'{model_type.upper()} ({scale}) [{subset_name}, n={n_samples}] - Layer {layer_idx}', + os.path.join(output_dir, f'heatmap_{scale}_L{layer_idx}.png') + ) + plot_similarity_trajectories( + scale_sims, + f'{model_type.upper()} ({scale}) [{subset_name}, n={n_samples}]', + os.path.join(output_dir, f'trajectory_{scale}.png') + ) + + return scale_sims, results_list + + +def _load_scale_sims_from_csvs(subset_dir, scale): + import glob as glob_mod + pattern = os.path.join(subset_dir, 'csv', f'similarity_{scale}_L*.csv') + files = glob_mod.glob(pattern) + layer_sims = {} + for fpath in files: + basename = os.path.basename(fpath) + layer_str = basename.replace(f'similarity_{scale}_L', '').replace('.csv', '') + try: + layer_idx = int(layer_str) + except ValueError: + continue + layer_sims[layer_idx] = pd.read_csv(fpath, index_col=0) + return layer_sims + + +# ============================================================================ +# Merge Mode +# ============================================================================ + +def run_merge(model_type, scales, output_dir, + correct_dir, incorrect_dir, all_dir, accuracy_dir, comparison_dir, + write_output_dir=None): + """Merge mode. Read from *_dir, write to write_output_dir (or same dirs if None).""" + # Write destinations + w_comparison = os.path.join(write_output_dir, 'comparison') if write_output_dir else comparison_dir + w_accuracy = os.path.join(write_output_dir, 'accuracy') if write_output_dir else accuracy_dir + if write_output_dir: + os.makedirs(w_comparison, exist_ok=True) + os.makedirs(w_accuracy, exist_ok=True) + + scale_order = ['vanilla', '80k', '400k', '800k', '2m', 'roborefer'] + available_scales = [s for s in scale_order if s in scales] + + cross_scale_correct, cross_scale_incorrect, cross_scale_all = {}, {}, {} + for scale in available_scales: + c_sims = _load_scale_sims_from_csvs(correct_dir, scale) + if c_sims: + cross_scale_correct[scale] = c_sims + logger.info(f" Loaded correct-only CSVs for {scale}: {len(c_sims)} layers") + i_sims = _load_scale_sims_from_csvs(incorrect_dir, scale) + if i_sims: + cross_scale_incorrect[scale] = i_sims + logger.info(f" Loaded incorrect-only CSVs for {scale}: {len(i_sims)} layers") + a_sims = _load_scale_sims_from_csvs(all_dir, scale) + if a_sims: + cross_scale_all[scale] = a_sims + logger.info(f" Loaded all-samples CSVs for {scale}: {len(a_sims)} layers") + + # Cross-scale trajectories + evolution heatmaps + for name, data, subdir in [ + ('correct-only', cross_scale_correct, 'cross_scale_correct_only'), + ('incorrect-only', cross_scale_incorrect, 'cross_scale_incorrect_only'), + ('all-samples', cross_scale_all, 'cross_scale_all_samples'), + ]: + if len(data) > 1: + logger.info(f"\n--- Cross-scale comparison ({name}) ---") + plot_cross_scale_trajectories( + data, model_type, + os.path.join(w_comparison, f'{subdir}.png') + ) + plot_similarity_evolution_heatmap( + data, model_type, + os.path.join(w_comparison, f'evolution_heatmap_{subdir.replace("cross_scale_", "")}.png') + ) + + # Per-scale overlay plots (Fix 3) + for scale in available_scales: + c = cross_scale_correct.get(scale) + i = cross_scale_incorrect.get(scale) + a = cross_scale_all.get(scale) + generate_overlay_plots(c, i, a, scale, model_type, w_comparison) + + # Accuracy chart + accuracy_records = [] + for scale in available_scales: + acc_path = os.path.join(accuracy_dir, 'json', f'accuracy_{scale}.json') + if os.path.exists(acc_path): + with open(acc_path) as f: + accuracy_records.append(json.load(f)) + if accuracy_records: + w_acc_csv = os.path.join(w_accuracy, 'csv') + os.makedirs(w_acc_csv, exist_ok=True) + pd.DataFrame(accuracy_records).to_csv(os.path.join(w_acc_csv, 'accuracy_summary.csv'), index=False) + plot_accuracy_chart(accuracy_records, model_type, + os.path.join(w_accuracy, 'accuracy_chart.png')) + + # Ablation summary + ablation_data = [] + for scale in available_scales: + abl_path = os.path.join(comparison_dir, 'json', f'ablation_{scale}.json') + if os.path.exists(abl_path): + with open(abl_path) as f: + ablation_data.append(json.load(f)) + if ablation_data: + w_comp_csv = os.path.join(w_comparison, 'csv') + os.makedirs(w_comp_csv, exist_ok=True) + pd.DataFrame(ablation_data).to_csv(os.path.join(w_comp_csv, 'ablation_summary.csv'), index=False) + plot_ablation_summary(ablation_data, model_type, + os.path.join(w_comparison, 'ablation_summary.png'), + include_roborefer=bool(write_output_dir)) + + w_out = write_output_dir or output_dir + logger.info(f"\n=== Merge Complete ===\nResults in: {w_out}") + + +# ============================================================================ +# Main +# ============================================================================ + +def main(): + parser = argparse.ArgumentParser(description='Correct Filter Analysis') + parser.add_argument('--data_path', type=str, + default='/data/shared/Qwen/EmbSpatial-Bench/EmbSpatial-Bench.tsv') + parser.add_argument('--model_type', type=str, required=True, choices=['molmo', 'nvila', 'qwen']) + parser.add_argument('--scales', type=str, nargs='+', + default=['vanilla', '80k', '400k', '800k', '2m']) + parser.add_argument('--output_dir', type=str, + default='/data/shared/Qwen/experiments/correct_filter/results') + parser.add_argument('--device', type=str, default='cuda') + parser.add_argument('--seed', type=int, default=42) + parser.add_argument('--merge', action='store_true') + parser.add_argument('--merge-output-dir', type=str, default=None, dest='merge_output_dir', + help='Override output dir for merge cross-scale plots (for NVILA dual merge)') + parser.add_argument('--no-auto-roborefer', action='store_true', dest='no_auto_roborefer') + + args = parser.parse_args() + + if args.model_type == 'nvila' and 'roborefer' not in args.scales and not args.no_auto_roborefer: + args.scales.append('roborefer') + + np.random.seed(args.seed) + torch.manual_seed(args.seed) + random.seed(args.seed) + + output_dir = os.path.join(args.output_dir, args.model_type) + correct_dir = os.path.join(output_dir, 'correct_only') + incorrect_dir = os.path.join(output_dir, 'incorrect_only') + all_dir = os.path.join(output_dir, 'all_samples') + accuracy_dir = os.path.join(output_dir, 'accuracy') + comparison_dir = os.path.join(output_dir, 'comparison') + for d in [correct_dir, incorrect_dir, all_dir, accuracy_dir, comparison_dir]: + os.makedirs(d, exist_ok=True) + + # Merge mode + if args.merge: + logger.info("\n=== MERGE MODE ===") + run_merge(args.model_type, args.scales, output_dir, + correct_dir, incorrect_dir, all_dir, accuracy_dir, comparison_dir, + write_output_dir=args.merge_output_dir) + return + + # Normal mode + logger.info("\n=== Loading & Modifying EmbSpatialBench Data (ALL samples) ===") + data = load_and_modify_data(args.data_path, args.seed) + + model_configs = MODEL_CONFIGS[args.model_type] + + all_results = [] + accuracy_records = [] + cross_scale_correct = {} + cross_scale_incorrect = {} + cross_scale_all = {} + ablation_data = [] + + for scale in args.scales: + if scale not in model_configs: + logger.warning(f"Scale {scale} not available for {args.model_type}, skipping...") + continue + + model_path = model_configs[scale] + if not os.path.exists(model_path) and not model_path.startswith(('Qwen/', 'allenai/')): + logger.warning(f"Model path not found: {model_path}, skipping...") + continue + + logger.info(f"\n{'='*60}") + logger.info(f"Processing {args.model_type} - {scale}") + logger.info(f"Model path: {model_path}") + logger.info(f"{'='*60}") + + try: + extractor = get_extractor(args.model_type, model_path, scale=scale, device=args.device) + target_layers = extractor.target_layers + + # Phase A: Extract all samples with predictions + logger.info("\n--- Phase A: Extracting hidden states with predictions ---") + sample_records = extract_all_with_predictions(extractor, data) + + acc_csv_dir = os.path.join(accuracy_dir, 'csv') + acc_json_dir = os.path.join(accuracy_dir, 'json') + os.makedirs(acc_csv_dir, exist_ok=True) + os.makedirs(acc_json_dir, exist_ok=True) + + save_per_sample_predictions( + sample_records, scale, + os.path.join(acc_csv_dir, f'predictions_{scale}.csv') + ) + save_per_sample_norms( + sample_records, scale, + os.path.join(acc_csv_dir, f'norms_{scale}.csv') + ) + + acc_stats = compute_accuracy_stats(sample_records, scale, args.model_type) + accuracy_records.append(acc_stats) + logger.info(f"\n Accuracy for {scale}: {acc_stats['overall_accuracy']:.1%}") + for cat in CATEGORY_ORDER: + logger.info(f" {cat}: {acc_stats[f'{cat}_correct']}/{acc_stats[f'{cat}_total']} " + f"= {acc_stats[f'{cat}_accuracy']:.1%}") + + # Phase B: Compute all-samples similarity for ALL layers + logger.info("\n--- Phase B: All-samples similarity (all layers) ---") + all_reps = compute_all_samples_reps(sample_records, target_layers) + all_sims, all_results_sub = process_subset( + 'all', all_reps, target_layers, scale, + args.model_type, all_dir, sum(len(sample_records.get(c, [])) for c in CATEGORY_ORDER), + ) + all_results.extend(all_results_sub) + cross_scale_all[scale] = all_sims + + # Phase C: Balanced sampling + logger.info("\n--- Phase C: Balanced sampling ---") + n_correct = compute_balanced_size(sample_records, filter_correct=True) + n_incorrect = compute_balanced_size(sample_records, filter_correct=False) + logger.info(f" Correct group: {n_correct} samples/category") + logger.info(f" Incorrect group: {n_incorrect} samples/category") + + # Process correct-only subset + correct_layer_sims = {} + if n_correct > 0: + logger.info(f"\n--- Processing correct-only (n={n_correct}) ---") + correct_reps = balanced_sample_and_average( + sample_records, filter_correct=True, n_samples=n_correct, + target_layers=target_layers, seed=args.seed, + ) + correct_layer_sims, correct_results = process_subset( + 'correct', correct_reps, target_layers, scale, + args.model_type, correct_dir, n_correct, + ) + all_results.extend(correct_results) + cross_scale_correct[scale] = correct_layer_sims + else: + logger.warning(f" Skipping correct-only: no correct samples in some category") + + # Process incorrect-only subset + incorrect_layer_sims = {} + if n_incorrect > 0: + logger.info(f"\n--- Processing incorrect-only (n={n_incorrect}) ---") + incorrect_reps = balanced_sample_and_average( + sample_records, filter_correct=False, n_samples=n_incorrect, + target_layers=target_layers, seed=args.seed, + ) + incorrect_layer_sims, incorrect_results = process_subset( + 'incorrect', incorrect_reps, target_layers, scale, + args.model_type, incorrect_dir, n_incorrect, + ) + all_results.extend(incorrect_results) + cross_scale_incorrect[scale] = incorrect_layer_sims + else: + logger.warning(f" Skipping incorrect-only: no incorrect samples in some category") + + # Phase D: Overlay plots (Fix 3) + generate_overlay_plots( + correct_layer_sims or None, + incorrect_layer_sims or None, + all_sims or None, + scale, args.model_type, comparison_dir, + ) + + # Phase E: Build ablation entry (mean similarity across ALL layers) + ablation_entry = { + 'scale': scale, + 'accuracy': acc_stats['overall_accuracy'], + 'n_correct_per_cat': n_correct, + 'n_incorrect_per_cat': n_incorrect, + } + + pairs_list = TRAJECTORY_PAIRS['hypothesis'] + TRAJECTORY_PAIRS['within_axis'] + + # All-samples: mean similarity across all layers + if all_sims: + for cat1, cat2, _, _ in pairs_list: + vals = [float(all_sims[l].loc[cat1, cat2]) + for l in all_sims + if cat1 in all_sims[l].index and cat2 in all_sims[l].columns] + if vals: + ablation_entry[f'all_{cat1}_{cat2}'] = float(np.mean(vals)) + + # Correct-only: mean similarity across all layers + if correct_layer_sims: + for cat1, cat2, _, _ in pairs_list: + vals = [float(correct_layer_sims[l].loc[cat1, cat2]) + for l in correct_layer_sims + if cat1 in correct_layer_sims[l].index and cat2 in correct_layer_sims[l].columns] + if vals: + ablation_entry[f'correct_{cat1}_{cat2}'] = float(np.mean(vals)) + + # Incorrect-only: mean similarity across all layers + if incorrect_layer_sims: + for cat1, cat2, _, _ in pairs_list: + vals = [float(incorrect_layer_sims[l].loc[cat1, cat2]) + for l in incorrect_layer_sims + if cat1 in incorrect_layer_sims[l].index and cat2 in incorrect_layer_sims[l].columns] + if vals: + ablation_entry[f'incorrect_{cat1}_{cat2}'] = float(np.mean(vals)) + + ablation_data.append(ablation_entry) + + # Save per-scale JSONs + comp_json_dir = os.path.join(comparison_dir, 'json') + os.makedirs(comp_json_dir, exist_ok=True) + with open(os.path.join(comp_json_dir, f'ablation_{scale}.json'), 'w') as f: + json.dump(ablation_entry, f, indent=2, default=str) + with open(os.path.join(acc_json_dir, f'accuracy_{scale}.json'), 'w') as f: + json.dump(acc_stats, f, indent=2, default=str) + + # Cleanup + del sample_records + extractor.cleanup() + + except Exception as e: + logger.error(f"Failed to process {args.model_type} - {scale}: {e}") + import traceback + traceback.print_exc() + continue + + # Cross-scale comparisons + for name, data, subdir in [ + ('correct-only', cross_scale_correct, 'cross_scale_correct_only'), + ('incorrect-only', cross_scale_incorrect, 'cross_scale_incorrect_only'), + ('all-samples', cross_scale_all, 'cross_scale_all_samples'), + ]: + if len(data) > 1: + logger.info(f"\n--- Cross-scale comparison ({name}) ---") + plot_cross_scale_trajectories( + data, args.model_type, + os.path.join(comparison_dir, f'{subdir}.png') + ) + plot_similarity_evolution_heatmap( + data, args.model_type, + os.path.join(comparison_dir, f'evolution_heatmap_{subdir.replace("cross_scale_", "")}.png') + ) + + if accuracy_records: + os.makedirs(os.path.join(accuracy_dir, 'csv'), exist_ok=True) + pd.DataFrame(accuracy_records).to_csv(os.path.join(accuracy_dir, 'csv', 'accuracy_summary.csv'), index=False) + # accuracy_chart.png is only written in merge mode (where all scales are present). + # Writing it here (single-scale run) would overwrite the multi-scale merge chart + # with a single-scale version whenever any individual scale is re-run. + + if ablation_data: + os.makedirs(os.path.join(comparison_dir, 'csv'), exist_ok=True) + pd.DataFrame(ablation_data).to_csv(os.path.join(comparison_dir, 'csv', 'ablation_summary.csv'), index=False) + plot_ablation_summary(ablation_data, args.model_type, + os.path.join(comparison_dir, 'ablation_summary.png')) + + if all_results: + os.makedirs(os.path.join(output_dir, 'csv'), exist_ok=True) + pd.DataFrame(all_results).to_csv(os.path.join(output_dir, 'csv', 'results_summary.csv'), index=False) + + logger.info(f"\n{'='*60}") + logger.info("=== Analysis Complete ===") + logger.info(f"Results saved to: {output_dir}") + logger.info(f"{'='*60}") + + +if __name__ == '__main__': + main() \ No newline at end of file diff --git a/correct_filter/norm_analysis.py b/correct_filter/norm_analysis.py new file mode 100644 index 0000000000000000000000000000000000000000..2d2663f53ce67ebf6a45872bb4e034ced89566d1 --- /dev/null +++ b/correct_filter/norm_analysis.py @@ -0,0 +1,454 @@ +#!/usr/bin/env python3 +""" +Norm Analysis: Testing the "Neutral Zone Collapse" Hypothesis + +Hypothesis: Incorrect samples have higher inter-category cosine similarity NOT +because they carry the opposite category's features, but because their spatial +feature extraction failed — causing hidden states to collapse toward a neutral +(text-bias) region with smaller norms. + +Verification: Compare L2 norms of hidden states between correct and incorrect +samples per category and layer. If incorrect samples have systematically lower +norms, it supports the "collapse to neutral zone" explanation. + +Reads: results/{model_type}/accuracy/norms_{scale}.csv + (produced by correct_filter_analysis.py) +Writes: results/{model_type}/norm_analysis/ +""" + +import os +import argparse +import glob +import logging + +import numpy as np +import pandas as pd +import matplotlib +matplotlib.use('Agg') +import matplotlib.pyplot as plt +import seaborn as sns +from scipy import stats + +logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s') +logger = logging.getLogger(__name__) + +CATEGORY_ORDER = ['left', 'right', 'above', 'under', 'far', 'close'] +SCALE_ORDER = ['vanilla', '80k', '400k', '800k', '2m', 'roborefer'] +SCALE_COLORS = { + 'vanilla': '#1f77b4', '80k': '#ff7f0e', '400k': '#2ca02c', + '800k': '#d62728', '2m': '#9467bd', 'roborefer': '#8c564b', +} + + +def load_norms(results_dir, model_type): + """Load all norms_{scale}.csv files for a model.""" + csv_dir = os.path.join(results_dir, model_type, 'accuracy', 'csv') + all_dfs = [] + for path in sorted(glob.glob(os.path.join(csv_dir, 'norms_*.csv'))): + df = pd.read_csv(path) + all_dfs.append(df) + logger.info(f"Loaded {path}: {len(df)} samples") + if not all_dfs: + raise FileNotFoundError(f"No norms_*.csv found in {csv_dir}") + return pd.concat(all_dfs, ignore_index=True) + + +def get_layer_columns(df): + """Extract sorted layer columns from dataframe.""" + cols = [c for c in df.columns if c.startswith('norm_L')] + return sorted(cols, key=lambda c: int(c.replace('norm_L', ''))) + + +def get_layer_index(col): + return int(col.replace('norm_L', '')) + + +# ============================================================================ +# Analysis 1: Per-layer norm comparison (correct vs incorrect) +# ============================================================================ + +def compute_norm_stats(df): + """Compute mean/std/median norm for correct vs incorrect, per category × scale × layer.""" + layer_cols = get_layer_columns(df) + rows = [] + for scale in df['scale'].unique(): + for cat in CATEGORY_ORDER: + subset = df[(df['scale'] == scale) & (df['category'] == cat)] + for is_correct in [True, False]: + group = subset[subset['is_correct'] == is_correct] + if len(group) == 0: + continue + label = 'correct' if is_correct else 'incorrect' + for col in layer_cols: + norms = group[col].dropna().values + if len(norms) == 0: + continue + rows.append({ + 'scale': scale, 'category': cat, 'group': label, + 'layer': get_layer_index(col), 'n_samples': len(norms), + 'mean_norm': np.mean(norms), 'std_norm': np.std(norms), + 'median_norm': np.median(norms), + }) + return pd.DataFrame(rows) + + +def compute_norm_ratios(norm_stats): + """Compute incorrect/correct norm ratio per category × scale × layer.""" + rows = [] + for (scale, cat, layer), grp in norm_stats.groupby(['scale', 'category', 'layer']): + correct = grp[grp['group'] == 'correct'] + incorrect = grp[grp['group'] == 'incorrect'] + if len(correct) == 0 or len(incorrect) == 0: + continue + c_mean = correct['mean_norm'].values[0] + i_mean = incorrect['mean_norm'].values[0] + if c_mean > 0: + rows.append({ + 'scale': scale, 'category': cat, 'layer': layer, + 'correct_mean': c_mean, 'incorrect_mean': i_mean, + 'ratio': i_mean / c_mean, + 'diff': i_mean - c_mean, + 'n_correct': int(correct['n_samples'].values[0]), + 'n_incorrect': int(incorrect['n_samples'].values[0]), + }) + return pd.DataFrame(rows) + + +def stat_test_norms(df, scale, layer_col): + """Mann-Whitney U test: are incorrect norms significantly different from correct?""" + rows = [] + subset = df[df['scale'] == scale] + for cat in CATEGORY_ORDER: + cat_data = subset[subset['category'] == cat] + correct = cat_data[cat_data['is_correct'] == True][layer_col].dropna().values + incorrect = cat_data[cat_data['is_correct'] == False][layer_col].dropna().values + if len(correct) < 5 or len(incorrect) < 5: + continue + u_stat, p_val = stats.mannwhitneyu(correct, incorrect, alternative='two-sided') + # Effect size: rank-biserial correlation + n1, n2 = len(correct), len(incorrect) + r = 1 - (2 * u_stat) / (n1 * n2) + rows.append({ + 'category': cat, 'n_correct': n1, 'n_incorrect': n2, + 'correct_mean': np.mean(correct), 'incorrect_mean': np.mean(incorrect), + 'U_stat': u_stat, 'p_value': p_val, 'effect_size_r': r, + 'significant': p_val < 0.05, + }) + return pd.DataFrame(rows) + + +# ============================================================================ +# Plots +# ============================================================================ + +def plot_norm_trajectory(norm_stats, scale, model_type, save_path): + """Per-scale: mean norm across layers, correct vs incorrect, per category.""" + data = norm_stats[norm_stats['scale'] == scale] + if data.empty: + return + + fig, axes = plt.subplots(2, 3, figsize=(18, 10), sharex=True) + fig.suptitle(f'{model_type} — {scale}: Hidden State L2 Norm by Layer\n' + f'(Solid=correct, Dashed=incorrect)', fontsize=14) + + for idx, cat in enumerate(CATEGORY_ORDER): + ax = axes[idx // 3][idx % 3] + for group, style in [('correct', '-'), ('incorrect', '--')]: + subset = data[(data['category'] == cat) & (data['group'] == group)] + if subset.empty: + continue + subset = subset.sort_values('layer') + ax.plot(subset['layer'], subset['mean_norm'], style, + label=f'{group} (n={subset["n_samples"].iloc[0]})', linewidth=1.5) + ax.fill_between( + subset['layer'], + subset['mean_norm'] - subset['std_norm'], + subset['mean_norm'] + subset['std_norm'], + alpha=0.15, + ) + ax.set_title(cat, fontsize=12) + ax.set_xlabel('Layer') + ax.set_ylabel('L2 Norm') + ax.legend(fontsize=8) + ax.grid(True, alpha=0.3) + + plt.tight_layout() + plt.savefig(save_path, dpi=200, bbox_inches='tight') + plt.close() + logger.info(f"Saved: {save_path}") + + +def plot_norm_ratio_trajectory(norm_ratios, scale, model_type, save_path): + """Per-scale: incorrect/correct norm ratio across layers, all 6 categories.""" + data = norm_ratios[norm_ratios['scale'] == scale].sort_values('layer') + if data.empty: + return + + fig, ax = plt.subplots(figsize=(12, 6)) + for cat in CATEGORY_ORDER: + subset = data[data['category'] == cat] + if subset.empty: + continue + ax.plot(subset['layer'], subset['ratio'], label=cat, linewidth=1.5) + + ax.axhline(y=1.0, color='black', linestyle=':', alpha=0.5, label='ratio=1 (equal)') + ax.set_title(f'{model_type} — {scale}: Incorrect/Correct Norm Ratio by Layer', fontsize=13) + ax.set_xlabel('Layer') + ax.set_ylabel('Norm Ratio (incorrect / correct)') + ax.legend(fontsize=9) + ax.grid(True, alpha=0.3) + + plt.tight_layout() + plt.savefig(save_path, dpi=200, bbox_inches='tight') + plt.close() + logger.info(f"Saved: {save_path}") + + +def plot_cross_scale_norm_ratio(norm_ratios, model_type, save_path): + """Cross-scale: norm ratio at representative layers, all categories averaged.""" + if norm_ratios.empty: + return + + layers = sorted(norm_ratios['layer'].unique()) + n_layers = len(layers) + # Pick 5 representative layers + rep_indices = [0, n_layers // 4, n_layers // 2, 3 * n_layers // 4, n_layers - 1] + rep_layers = sorted(set(layers[i] for i in rep_indices)) + + available_scales = [s for s in SCALE_ORDER if s in norm_ratios['scale'].unique()] + + fig, axes = plt.subplots(1, len(rep_layers), figsize=(4 * len(rep_layers), 5), sharey=True) + if len(rep_layers) == 1: + axes = [axes] + + for ax, layer in zip(axes, rep_layers): + layer_data = norm_ratios[norm_ratios['layer'] == layer] + # Grouped bar: x=category, color=scale + x = np.arange(len(CATEGORY_ORDER)) + width = 0.8 / max(len(available_scales), 1) + for si, scale in enumerate(available_scales): + vals = [] + for cat in CATEGORY_ORDER: + row = layer_data[(layer_data['scale'] == scale) & (layer_data['category'] == cat)] + vals.append(row['ratio'].values[0] if len(row) > 0 else np.nan) + ax.bar(x + si * width, vals, width, label=scale, + color=SCALE_COLORS.get(scale, '#999999'), alpha=0.8) + + ax.axhline(y=1.0, color='black', linestyle=':', alpha=0.5) + ax.set_title(f'Layer {layer}', fontsize=11) + ax.set_xticks(x + width * (len(available_scales) - 1) / 2) + ax.set_xticklabels(CATEGORY_ORDER, rotation=45, fontsize=9) + ax.set_ylabel('Norm Ratio (incorr / corr)' if ax == axes[0] else '') + ax.grid(True, alpha=0.2, axis='y') + + axes[-1].legend(fontsize=8, bbox_to_anchor=(1.02, 1), loc='upper left') + fig.suptitle(f'{model_type}: Incorrect/Correct Norm Ratio Across Scales', fontsize=13, y=1.02) + plt.tight_layout() + plt.savefig(save_path, dpi=200, bbox_inches='tight') + plt.close() + logger.info(f"Saved: {save_path}") + + +def plot_overall_norm_comparison(norm_stats, model_type, save_path): + """Aggregate across categories: mean norm trajectory for correct vs incorrect, per scale.""" + available_scales = [s for s in SCALE_ORDER if s in norm_stats['scale'].unique()] + if not available_scales: + return + + fig, ax = plt.subplots(figsize=(12, 6)) + + for scale in available_scales: + color = SCALE_COLORS.get(scale, '#999999') + for group, style, alpha in [('correct', '-', 1.0), ('incorrect', '--', 0.7)]: + subset = norm_stats[(norm_stats['scale'] == scale) & (norm_stats['group'] == group)] + if subset.empty: + continue + agg = subset.groupby('layer')['mean_norm'].mean().reset_index() + agg = agg.sort_values('layer') + ax.plot(agg['layer'], agg['mean_norm'], style, + color=color, alpha=alpha, linewidth=1.5, + label=f'{scale} ({group})') + + ax.set_title(f'{model_type}: Mean Norm (averaged across categories)\n' + f'Solid=correct, Dashed=incorrect', fontsize=13) + ax.set_xlabel('Layer') + ax.set_ylabel('Mean L2 Norm') + ax.legend(fontsize=8, ncol=2, bbox_to_anchor=(1.02, 1), loc='upper left') + ax.grid(True, alpha=0.3) + + plt.tight_layout() + plt.savefig(save_path, dpi=200, bbox_inches='tight') + plt.close() + logger.info(f"Saved: {save_path}") + + +def plot_stat_test_heatmap(df_raw, model_type, out_dir): + """For each scale, run stat tests at representative layers and plot a summary heatmap.""" + layer_cols = get_layer_columns(df_raw) + layers = [get_layer_index(c) for c in layer_cols] + n_layers = len(layers) + rep_indices = [0, n_layers // 4, n_layers // 2, 3 * n_layers // 4, n_layers - 1] + rep_layers = sorted(set(layers[i] for i in rep_indices)) + + available_scales = [s for s in SCALE_ORDER if s in df_raw['scale'].unique()] + + for scale in available_scales: + all_test_rows = [] + for layer in rep_layers: + col = f'norm_L{layer}' + if col not in df_raw.columns: + continue + test_df = stat_test_norms(df_raw, scale, col) + if test_df.empty: + continue + test_df['layer'] = layer + all_test_rows.append(test_df) + + if not all_test_rows: + continue + test_results = pd.concat(all_test_rows, ignore_index=True) + test_results.to_csv(os.path.join(out_dir, f'stat_tests_{scale}.csv'), index=False) + + # Heatmap of effect sizes + pivot = test_results.pivot_table( + index='category', columns='layer', values='effect_size_r', + ) + pivot = pivot.reindex(index=CATEGORY_ORDER) + + fig, ax = plt.subplots(figsize=(max(6, len(rep_layers) * 1.5), 5)) + sns.heatmap(pivot, annot=True, fmt='.2f', center=0, cmap='RdBu_r', + vmin=-1, vmax=1, ax=ax, linewidths=0.5) + + # Mark significant cells + for i, cat in enumerate(pivot.index): + for j, layer in enumerate(pivot.columns): + row = test_results[(test_results['category'] == cat) & (test_results['layer'] == layer)] + if len(row) > 0 and row.iloc[0]['significant']: + ax.text(j + 0.5, i + 0.85, '*', ha='center', va='center', + fontsize=14, fontweight='bold', color='black') + + ax.set_title(f'{model_type} — {scale}: Norm Effect Size (rank-biserial r)\n' + f'Positive r = correct > incorrect. * = p<0.05', fontsize=11) + plt.tight_layout() + plt.savefig(os.path.join(out_dir, f'effect_size_heatmap_{scale}.png'), + dpi=200, bbox_inches='tight') + plt.close() + logger.info(f"Saved effect size heatmap: {scale}") + + +# ============================================================================ +# Summary +# ============================================================================ + +def generate_summary(norm_ratios, df_raw, model_type, out_dir): + """Generate a text summary of findings.""" + layer_cols = get_layer_columns(df_raw) + layers = [get_layer_index(c) for c in layer_cols] + # Use last-quarter layer as representative + rep_layer = layers[3 * len(layers) // 4] + + lines = [f"=== Norm Analysis Summary: {model_type} ===", ""] + lines.append("Hypothesis: Incorrect samples collapse to a neutral zone (lower norms)") + lines.append(f"Representative layer: L{rep_layer}") + lines.append("") + + available_scales = [s for s in SCALE_ORDER if s in norm_ratios['scale'].unique()] + for scale in available_scales: + data = norm_ratios[(norm_ratios['scale'] == scale) & (norm_ratios['layer'] == rep_layer)] + if data.empty: + continue + lines.append(f"--- {scale} (L{rep_layer}) ---") + n_lower = 0 + for _, row in data.iterrows(): + direction = "LOWER" if row['ratio'] < 1.0 else "higher" + if row['ratio'] < 1.0: + n_lower += 1 + lines.append( + f" {row['category']:>6s}: ratio={row['ratio']:.3f} " + f"(correct={row['correct_mean']:.1f}, incorrect={row['incorrect_mean']:.1f}) " + f"-> incorrect is {direction}" + ) + lines.append(f" => {n_lower}/{len(data)} categories have lower incorrect norms") + lines.append("") + + # Stat test at rep layer + col = f'norm_L{rep_layer}' + if col in df_raw.columns: + lines.append(f"--- Statistical Tests (Mann-Whitney U) at L{rep_layer} ---") + for scale in available_scales: + test_df = stat_test_norms(df_raw, scale, col) + if test_df.empty: + continue + n_sig = test_df['significant'].sum() + lines.append(f" {scale}: {n_sig}/{len(test_df)} categories significant (p<0.05)") + for _, row in test_df.iterrows(): + sig = "*" if row['significant'] else " " + lines.append( + f" {sig} {row['category']:>6s}: p={row['p_value']:.4f}, " + f"r={row['effect_size_r']:+.3f} " + f"(corr={row['correct_mean']:.1f}, incorr={row['incorrect_mean']:.1f})" + ) + lines.append("") + + summary_text = "\n".join(lines) + summary_path = os.path.join(out_dir, 'summary.txt') + with open(summary_path, 'w') as f: + f.write(summary_text) + logger.info(f"Saved summary: {summary_path}") + print(summary_text) + + +# ============================================================================ +# Main +# ============================================================================ + +def main(): + parser = argparse.ArgumentParser(description='Norm Analysis: Neutral Zone Collapse Hypothesis') + parser.add_argument('--model_type', type=str, required=True, choices=['molmo', 'nvila', 'qwen']) + parser.add_argument('--results_dir', type=str, + default='/data/shared/Qwen/experiments/correct_filter/results') + args = parser.parse_args() + + out_dir = os.path.join(args.results_dir, args.model_type, 'norm_analysis') + os.makedirs(out_dir, exist_ok=True) + + # Load data + logger.info(f"Loading norms for {args.model_type}...") + df = load_norms(args.results_dir, args.model_type) + logger.info(f"Total samples: {len(df)}") + logger.info(f"Scales: {sorted(df['scale'].unique())}") + logger.info(f"Correct: {df['is_correct'].sum()}, Incorrect: {(~df['is_correct']).sum()}") + + # Compute stats + logger.info("\nComputing norm statistics...") + norm_stats = compute_norm_stats(df) + norm_stats.to_csv(os.path.join(out_dir, 'norm_stats.csv'), index=False) + + norm_ratios = compute_norm_ratios(norm_stats) + norm_ratios.to_csv(os.path.join(out_dir, 'norm_ratios.csv'), index=False) + + # Per-scale plots + available_scales = [s for s in SCALE_ORDER if s in df['scale'].unique()] + for scale in available_scales: + plot_norm_trajectory(norm_stats, scale, args.model_type, + os.path.join(out_dir, f'norm_trajectory_{scale}.png')) + plot_norm_ratio_trajectory(norm_ratios, scale, args.model_type, + os.path.join(out_dir, f'norm_ratio_{scale}.png')) + + # Cross-scale plots + plot_cross_scale_norm_ratio(norm_ratios, args.model_type, + os.path.join(out_dir, 'cross_scale_norm_ratio.png')) + plot_overall_norm_comparison(norm_stats, args.model_type, + os.path.join(out_dir, 'overall_norm_comparison.png')) + + # Statistical tests + effect size heatmaps + plot_stat_test_heatmap(df, args.model_type, out_dir) + + # Summary + generate_summary(norm_ratios, df, args.model_type, out_dir) + + logger.info(f"\n=== Norm Analysis Complete ===\nResults in: {out_dir}") + + +if __name__ == '__main__': + main() diff --git a/correct_filter/run_molmo.sh b/correct_filter/run_molmo.sh new file mode 100644 index 0000000000000000000000000000000000000000..154b5652302b510e21d1734a87dcca923f2a46b5 --- /dev/null +++ b/correct_filter/run_molmo.sh @@ -0,0 +1,59 @@ +#!/bin/bash +set -e + +SCRIPT="/data/shared/Qwen/experiments/correct_filter/correct_filter_analysis.py" +PYTHON="conda run --no-capture-output -n molmo python" +MODEL="molmo" +LOG_DIR="/data/shared/Qwen/experiments/correct_filter/logs/${MODEL}" +mkdir -p "$LOG_DIR" + +# GPU plan: all 6 scripts run simultaneously +# Molmo(25GB) shares GPU 0-4 with NVILA(8GB) = ~33GB each +SCALES=("vanilla" "80k" "400k" "800k" "2m") +GPUS=(0 1 2 3 4) + +echo "=========================================" +echo " Molmo Correct Filter: Launching ${#SCALES[@]} scales in parallel" +echo "=========================================" + +PIDS=() +for i in "${!SCALES[@]}"; do + scale="${SCALES[$i]}" + gpu="${GPUS[$i]}" + log="${LOG_DIR}/${scale}.log" + + echo "[GPU $gpu] $scale -> $log" + CUDA_VISIBLE_DEVICES=$gpu $PYTHON $SCRIPT \ + --model_type $MODEL \ + --scales $scale \ + --device cuda \ + --no-auto-roborefer \ + > "$log" 2>&1 & + PIDS+=($!) +done + +echo "" +echo "Waiting for all ${#PIDS[@]} processes..." +FAILED=0 +for i in "${!PIDS[@]}"; do + pid="${PIDS[$i]}" + scale="${SCALES[$i]}" + if wait $pid; then + echo "[DONE] $scale (PID $pid) - SUCCESS" + else + echo "[FAIL] $scale (PID $pid) - EXIT CODE $?" + FAILED=$((FAILED + 1)) + fi +done + +if [ $FAILED -gt 0 ]; then + echo "WARNING: $FAILED scale(s) failed. Check logs in $LOG_DIR" +fi + +echo "=========================================" +echo " Molmo Correct Filter: Running merge" +echo "=========================================" +$PYTHON $SCRIPT --model_type $MODEL --merge --scales vanilla 80k 400k 800k 2m \ + 2>&1 | tee "${LOG_DIR}/merge.log" + +echo "ALL DONE: $MODEL" \ No newline at end of file diff --git a/correct_filter/run_nvila.sh b/correct_filter/run_nvila.sh new file mode 100644 index 0000000000000000000000000000000000000000..fba14a350fb7ec818a02830d63013383cf51002c --- /dev/null +++ b/correct_filter/run_nvila.sh @@ -0,0 +1,70 @@ +#!/bin/bash +set -e + +SCRIPT="/data/shared/Qwen/experiments/correct_filter/correct_filter_analysis.py" +PYTHON="conda run --no-capture-output -n vila python" +MODEL="nvila" +RESULTS_BASE="/data/shared/Qwen/experiments/correct_filter/results" +LOG_DIR="/data/shared/Qwen/experiments/correct_filter/logs/${MODEL}" +mkdir -p "$LOG_DIR" + +# GPU plan: NVILA(8GB) shares GPU 0-4 with Molmo(25GB), GPU 5 with Qwen vanilla(10GB) +SCALES=("vanilla" "80k" "400k" "800k" "2m" "roborefer") +GPUS=(0 1 2 3 4 5) + +echo "=========================================" +echo " NVILA Correct Filter: Launching ${#SCALES[@]} scales in parallel" +echo "=========================================" + +PIDS=() +for i in "${!SCALES[@]}"; do + scale="${SCALES[$i]}" + gpu="${GPUS[$i]}" + log="${LOG_DIR}/${scale}.log" + + echo "[GPU $gpu] $scale -> $log" + CUDA_VISIBLE_DEVICES=$gpu $PYTHON $SCRIPT \ + --model_type $MODEL \ + --scales $scale \ + --device cuda \ + --no-auto-roborefer \ + > "$log" 2>&1 & + PIDS+=($!) +done + +echo "" +echo "Waiting for all ${#PIDS[@]} processes..." +FAILED=0 +for i in "${!PIDS[@]}"; do + pid="${PIDS[$i]}" + scale="${SCALES[$i]}" + if wait $pid; then + echo "[DONE] $scale (PID $pid) - SUCCESS" + else + echo "[FAIL] $scale (PID $pid) - EXIT CODE $?" + FAILED=$((FAILED + 1)) + fi +done + +if [ $FAILED -gt 0 ]; then + echo "WARNING: $FAILED scale(s) failed. Check logs in $LOG_DIR" +fi + +echo "=========================================" +echo " NVILA Correct Filter: Merge 1/2 (without roborefer)" +echo "=========================================" +$PYTHON $SCRIPT --model_type $MODEL --merge \ + --scales vanilla 80k 400k 800k 2m \ + 2>&1 | tee "${LOG_DIR}/merge.log" + +echo "=========================================" +echo " NVILA Correct Filter: Merge 2/2 (with roborefer)" +echo "=========================================" +$PYTHON $SCRIPT --model_type $MODEL --merge \ + --scales vanilla 80k 400k 800k 2m roborefer \ + --merge-output-dir "${RESULTS_BASE}/nvila_with_roborefer" \ + 2>&1 | tee "${LOG_DIR}/merge_with_roborefer.log" + +echo "ALL DONE: $MODEL" +echo "Results (no roborefer): ${RESULTS_BASE}/nvila/" +echo "Results (with roborefer): ${RESULTS_BASE}/nvila_with_roborefer/" \ No newline at end of file diff --git a/correct_filter/run_qwen.sh b/correct_filter/run_qwen.sh new file mode 100644 index 0000000000000000000000000000000000000000..db54b597337492533567aa32683083b67763b638 --- /dev/null +++ b/correct_filter/run_qwen.sh @@ -0,0 +1,59 @@ +#!/bin/bash +set -e + +SCRIPT="/data/shared/Qwen/experiments/correct_filter/correct_filter_analysis.py" +PYTHON="/usr/bin/python3" +MODEL="qwen" +LOG_DIR="/data/shared/Qwen/experiments/correct_filter/logs/${MODEL}" +mkdir -p "$LOG_DIR" + +# GPU plan: Qwen(10GB) on GPU 5-7, sharing with NVILA roborefer on GPU 5 +# GPU 6,7 each host 2 Qwen scales (20GB each, well within 80GB) +SCALES=("vanilla" "80k" "400k" "800k" "2m") +GPUS=(5 6 6 7 7) + +echo "=========================================" +echo " Qwen Correct Filter: Launching ${#SCALES[@]} scales in parallel" +echo "=========================================" + +PIDS=() +for i in "${!SCALES[@]}"; do + scale="${SCALES[$i]}" + gpu="${GPUS[$i]}" + log="${LOG_DIR}/${scale}.log" + + echo "[GPU $gpu] $scale -> $log" + CUDA_VISIBLE_DEVICES=$gpu $PYTHON $SCRIPT \ + --model_type $MODEL \ + --scales $scale \ + --device cuda \ + --no-auto-roborefer \ + > "$log" 2>&1 & + PIDS+=($!) +done + +echo "" +echo "Waiting for all ${#PIDS[@]} processes..." +FAILED=0 +for i in "${!PIDS[@]}"; do + pid="${PIDS[$i]}" + scale="${SCALES[$i]}" + if wait $pid; then + echo "[DONE] $scale (PID $pid) - SUCCESS" + else + echo "[FAIL] $scale (PID $pid) - EXIT CODE $?" + FAILED=$((FAILED + 1)) + fi +done + +if [ $FAILED -gt 0 ]; then + echo "WARNING: $FAILED scale(s) failed. Check logs in $LOG_DIR" +fi + +echo "=========================================" +echo " Qwen Correct Filter: Running merge" +echo "=========================================" +$PYTHON $SCRIPT --model_type $MODEL --merge --scales vanilla 80k 400k 800k 2m \ + 2>&1 | tee "${LOG_DIR}/merge.log" + +echo "ALL DONE: $MODEL" \ No newline at end of file diff --git a/exp2a_correct_filter/exp2a_correct_filter_analysis.py b/exp2a_correct_filter/exp2a_correct_filter_analysis.py new file mode 100644 index 0000000000000000000000000000000000000000..6df0c18758f1d0968c34df7c79085e1aea064d74 --- /dev/null +++ b/exp2a_correct_filter/exp2a_correct_filter_analysis.py @@ -0,0 +1,1825 @@ +""" +Experiment 2-A (Correct Filter): Correctness-Filtered Representation Analysis + +Extends exp2a_modified by: +- Generating model predictions to determine correctness +- Filtering samples into correct/incorrect groups with balanced sampling +- Running similarity analysis on each group separately +- Recording per-scale, per-category accuracy +- Comparing correct-only vs incorrect-only vs all to check whether + scaling effects on similarity are genuine or just accuracy-driven + +Balanced sampling: within each group (correct/incorrect), all 6 categories +have the same number of samples, rounded down to the nearest multiple of 50. +""" + +import os +import sys +import json +import argparse +import base64 +import logging +import random +import re +from io import BytesIO +from collections import defaultdict +from typing import Dict, List, Tuple, Optional, Any +from abc import ABC, abstractmethod + +import torch +import numpy as np +import pandas as pd +from PIL import Image +from tqdm import tqdm +import matplotlib.pyplot as plt +import seaborn as sns +from sklearn.metrics.pairwise import cosine_similarity + +# Setup logging +logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s') +logger = logging.getLogger(__name__) + +# Category order for output +CATEGORY_ORDER = ['left', 'right', 'above', 'under', 'far', 'close'] + +# Opposite map for answer matching +OPPOSITE_MAP = { + 'left': 'right', 'right': 'left', + 'above': 'under', 'under': 'above', + 'far': 'close', 'close': 'far', +} + +# Pair definitions for trajectory analysis +TRAJECTORY_PAIRS = { + 'hypothesis': [ + ('above', 'far', 'above-far', '#d62728'), # red + ('under', 'close', 'under-close', '#1f77b4'), # blue + ], + 'within_axis': [ + ('left', 'right', 'left-right', '#2ca02c'), # green + ('above', 'under', 'above-under', '#ff7f0e'), # orange + ('far', 'close', 'far-close', '#9467bd'), # purple + ], + 'counter_hypothesis': [ + ('above', 'close', 'above-close', '#e377c2'), # pink + ('under', 'far', 'under-far', '#17becf'), # cyan + ], +} + +# Scale colors for cross-scale plots +SCALE_COLORS = { + 'vanilla': '#1f77b4', + '80k': '#ff7f0e', + '400k': '#2ca02c', + '800k': '#d62728', + '2m': '#9467bd', + 'roborefer': '#8c564b', +} + + +# ============================================================================ +# Data Loading & Modification (same as exp2a_modified) +# ============================================================================ + +OBJECT_PATTERNS = [ + re.compile(r'between\s+(.+?)\s+and\s+(.+?)\s+in', re.IGNORECASE), + re.compile(r'of\s+(.+?)\s+and\s+(.+?)\s+in', re.IGNORECASE), + re.compile(r'positions\s+of\s+(.+?)\s+and\s+(.+?)\s+interact', re.IGNORECASE), + re.compile(r'How\s+are\s+(.+?)\s+and\s+(.+?)\s+positioned', re.IGNORECASE), + re.compile(r'arrangement\s+of\s+(.+?)\s+and\s+(.+?)\s+in', re.IGNORECASE), +] + + +def extract_objects(question: str) -> Tuple[str, str]: + for pattern in OBJECT_PATTERNS: + m = pattern.search(question) + if m: + return m.group(1).strip(), m.group(2).strip() + raise ValueError(f"Could not extract objects from: {question}") + + +def modify_pairwise_sample(sample: dict) -> dict: + obj1, obj2 = extract_objects(sample['question']) + category = sample['category'] + + if category in ['left', 'right']: + new_question = f"Is the {obj1} to the left or right of the {obj2}?" + else: # above, under + new_question = f"Is the {obj1} above or under the {obj2}?" + + return { + 'index': sample['index'], + 'image_base64': sample['image_base64'], + 'question': new_question, + 'answer': category, + 'category': category, + } + + +def modify_distance_sample(sample: dict, rng: random.Random) -> dict: + category = sample['category'] + answer_key = sample['answer'] + options = sample['options'] + + target_object = options[answer_key] + candidates = [v for k, v in options.items() if k != answer_key] + reference_object = rng.choice(candidates) + + new_question = f"Compared to {reference_object}, is {target_object} far or close from you?" + + return { + 'index': sample['index'], + 'image_base64': sample['image_base64'], + 'question': new_question, + 'answer': category, + 'category': category, + } + + +def load_and_modify_data( + tsv_path: str, + seed: int = 42 +) -> Dict[str, List[dict]]: + """Load ALL samples (no per-category limit) to maximize data for correct/incorrect filtering.""" + rng = random.Random(seed) + np.random.seed(seed) + + df = pd.read_csv(tsv_path, sep='\t') + + raw_grouped = defaultdict(list) + for _, row in df.iterrows(): + category = row['category'] + sample = { + 'index': row['index'], + 'image_base64': row['image'], + 'question': row['question'], + 'answer': row['answer'], + 'category': category, + 'options': { + 'A': row['A'], + 'B': row['B'], + 'C': row['C'], + 'D': row['D'] + } + } + raw_grouped[category].append(sample) + + modified_data = defaultdict(list) + stats = {'total': 0, 'success': 0, 'failed': 0} + + for category in CATEGORY_ORDER: + samples = raw_grouped[category] + + for sample in samples: + stats['total'] += 1 + try: + if category in ['left', 'right', 'above', 'under']: + modified = modify_pairwise_sample(sample) + else: + modified = modify_distance_sample(sample, rng) + + assert modified['answer'] == modified['category'] + modified_data[category].append(modified) + stats['success'] += 1 + except Exception as e: + stats['failed'] += 1 + logger.warning(f" Failed to modify sample {sample['index']}: {e}") + + logger.info(f"Data modification: {stats['success']}/{stats['total']} success, {stats['failed']} failed") + for cat in CATEGORY_ORDER: + if cat in modified_data: + logger.info(f" {cat}: {len(modified_data[cat])} samples") + ex = modified_data[cat][0] + logger.info(f" Example Q: {ex['question']}") + logger.info(f" Example A: {ex['answer']}") + + return dict(modified_data) + + +def decode_base64_image(base64_str: str) -> Image.Image: + image_data = base64.b64decode(base64_str) + return Image.open(BytesIO(image_data)).convert('RGB') + + +# ============================================================================ +# Answer Matching +# ============================================================================ + +def check_answer(generated_text: str, expected_category: str) -> bool: + """Check if model's generated text matches the expected category. + + Finds which of the two options (expected vs opposite) appears first. + """ + if not generated_text or not generated_text.strip(): + return False + + text = generated_text.strip().lower() + expected = expected_category.lower() + opposite = OPPOSITE_MAP[expected] + + pos_exp = text.find(expected) + pos_opp = text.find(opposite) + + if pos_exp == -1: + return False + if pos_opp == -1: + return True + return pos_exp < pos_opp + + +# ============================================================================ +# Base Extractor (modified: prefill-only hooks + extract_and_predict) +# ============================================================================ + +class BaseHiddenStateExtractor(ABC): + """Base class for extracting hidden states from VLMs.""" + + def __init__(self, model_path: str, device: str = 'cuda', target_layers: List[int] = None): + self.model_path = model_path + self.device = device + self.hidden_states = {} + self.hooks = [] + + self._load_model() + + num_layers = self._get_num_layers() + if target_layers is None: + self.target_layers = list(range(num_layers)) + logger.info(f"Model has {num_layers} layers. Extracting ALL layers (0..{num_layers-1})") + else: + self.target_layers = target_layers + logger.info(f"Model has {num_layers} layers. Target layers: {self.target_layers}") + + self._register_hooks() + + def _register_hooks(self): + for layer_idx in self.target_layers: + module = self._get_layer_module(layer_idx) + if module is not None: + hook = module.register_forward_hook(self._make_hook(layer_idx)) + self.hooks.append(hook) + logger.info(f" Registered hook on layer {layer_idx}") + + def _make_hook(self, layer_idx: int): + """Create a hook that only captures during prefill (seq_len > 1).""" + def hook_fn(module, input, output): + if isinstance(output, tuple): + hidden = output[0] + else: + hidden = output + + # Only capture during prefill pass (seq_len > 1). + # During autoregressive generation, each step has seq_len = 1. + if hidden.shape[1] > 1: + last_token = hidden[:, -1, :].detach().cpu().float() + self.hidden_states[layer_idx] = last_token.squeeze(0) + + return hook_fn + + @abstractmethod + def _load_model(self): + pass + + @abstractmethod + def _get_num_layers(self) -> int: + pass + + @abstractmethod + def _get_layer_module(self, layer_idx: int): + pass + + @abstractmethod + def extract_and_predict(self, image: Image.Image, question: str) -> Tuple[Dict[int, torch.Tensor], str]: + """Extract hidden states AND generate predicted answer in one pass. + + Returns: + (hidden_states, predicted_answer_text) + """ + pass + + def cleanup(self): + for hook in self.hooks: + hook.remove() + self.hooks = [] + if hasattr(self, 'model'): + del self.model + if hasattr(self, 'processor'): + del self.processor + torch.cuda.empty_cache() + + +# ============================================================================ +# Molmo Extractor +# ============================================================================ + +class MolmoExtractor(BaseHiddenStateExtractor): + + def _load_model(self): + config_path = os.path.join(self.model_path, "config.yaml") + checkpoint_path = os.path.join(self.model_path, "model.pt") + + if os.path.exists(config_path) and os.path.exists(checkpoint_path): + self._load_native_model() + self.is_native = True + else: + self._load_hf_model() + self.is_native = False + + def _load_native_model(self): + from olmo.config import ModelConfig + from olmo.model import Molmo as NativeMolmoModel + from olmo.data.model_preprocessor import MultiModalPreprocessor + from olmo.data.data_formatter import DataFormatter + + _original_load = torch.load + def _unsafe_load_wrapper(*args, **kwargs): + if 'weights_only' not in kwargs: + kwargs['weights_only'] = False + return _original_load(*args, **kwargs) + torch.load = _unsafe_load_wrapper + + config_path = os.path.join(self.model_path, "config.yaml") + checkpoint_path = os.path.join(self.model_path, "model.pt") + + cfg = ModelConfig.load(config_path, key="model", validate_paths=False) + cfg.init_device = "cpu" + + self.model = NativeMolmoModel(cfg) + state_dict = torch.load(checkpoint_path, map_location="cpu") + self.model.load_state_dict(state_dict) + self.model = self.model.to(self.device, dtype=torch.bfloat16).eval() + + self.tokenizer = cfg.get_tokenizer() + v_cfg = cfg.vision_backbone + h, w = cfg.llm_patches_per_crop() + image_padding_mask = 2 if cfg.fix_image_padding else (1 if cfg.image_padding_embed else None) + + class SafeDataFormatter(DataFormatter): + def get_system_prompt(self, style, for_inference, messages, rng=None): + if style is None: + style = "User" + return super().get_system_prompt(style, for_inference, messages, rng) + + self.formatter = SafeDataFormatter( + prompt_templates=cfg.prompt_type, + message_format=cfg.message_formatting, + system_prompt=cfg.system_prompt_kind, + always_start_with_space=cfg.always_start_with_space, + default_inference_len=cfg.default_inference_len + ) + + self.preprocessor = MultiModalPreprocessor( + tokenizer=self.tokenizer, + normalize=str(v_cfg.image_model_type), + crop_mode=cfg.crop_mode, + max_crops=cfg.max_crops, + overlap_margins=cfg.overlap_margins, + resize=v_cfg.resize_mode, + use_col_tokens=cfg.use_col_tokens, + base_image_input_size=v_cfg.image_default_input_size, + image_pooling_w=cfg.image_pooling_w, + image_pooling_h=cfg.image_pooling_h, + image_token_length_w=w, + image_token_length_h=h, + image_patch_size=v_cfg.image_patch_size, + image_padding_mask=image_padding_mask, + pad_value=cfg.pad_value, + loss_token_weighting=cfg.multi_annotation_weighting, + ) + + logger.info(f"Loaded native Molmo model from {self.model_path}") + + def _load_hf_model(self): + from transformers import AutoModelForCausalLM, AutoProcessor + + self.model = AutoModelForCausalLM.from_pretrained( + self.model_path, + torch_dtype=torch.bfloat16, + trust_remote_code=True, + device_map=self.device + ) + self.model.eval() + + self.processor = AutoProcessor.from_pretrained( + self.model_path, + trust_remote_code=True + ) + logger.info(f"Loaded HuggingFace Molmo model from {self.model_path}") + + def _get_num_layers(self) -> int: + if self.is_native: + return len(self.model.transformer.blocks) + else: + if hasattr(self.model, 'model') and hasattr(self.model.model, 'transformer'): + return len(self.model.model.transformer.blocks) + return 32 + + def _get_layer_module(self, layer_idx: int): + if self.is_native: + return self.model.transformer.blocks[layer_idx] + else: + return self.model.model.transformer.blocks[layer_idx] + + def extract_and_predict(self, image: Image.Image, question: str) -> Tuple[Dict[int, torch.Tensor], str]: + self.hidden_states = {} + + if self.is_native: + example = {"messages": [question], "image": image} + messages, _ = self.formatter(example, is_training=False, for_inference=True, rng=np.random) + image_np = np.array(image) + batch = self.preprocessor(image_np, messages, is_training=False, require_image_features=True) + + if 'input_ids' not in batch and 'input_tokens' in batch: + batch['input_ids'] = batch['input_tokens'] + + def to_tensor(x): + if isinstance(x, np.ndarray): + return torch.from_numpy(x) + return x + + input_ids = to_tensor(batch['input_ids']).unsqueeze(0).to(self.device) + if input_ids.dtype not in [torch.long, torch.int64]: + input_ids = input_ids.long() + + images_tensor = to_tensor(batch['images']).unsqueeze(0).to(self.device).to(dtype=torch.bfloat16) + image_masks = to_tensor(batch['image_masks']).unsqueeze(0).to(self.device).to(dtype=torch.bfloat16) + image_input_idx = to_tensor(batch['image_input_idx']).unsqueeze(0).to(self.device) + + with torch.inference_mode(): + with torch.autocast(device_type="cuda", enabled=True, dtype=torch.bfloat16): + gen_output = self.model.generate( + input_ids=input_ids, + images=images_tensor, + image_masks=image_masks, + image_input_idx=image_input_idx, + max_steps=20, + beam_size=1, + ) + + # gen_output.token_ids shape: (batch, beam, max_steps) + generated_ids = gen_output.token_ids[0, 0] # first batch, first beam + answer = self.tokenizer.decode(generated_ids.tolist()).strip() + # Remove EOS tokens + for eos in ['<|endoftext|>', '', '<|end|>']: + answer = answer.replace(eos, '').strip() + + else: + from transformers import GenerationConfig + + inputs = self.processor.process(images=[image], text=question) + processed_inputs = {} + for k, v in inputs.items(): + v = v.to(self.device).unsqueeze(0) + if v.dtype == torch.float32: + v = v.to(dtype=torch.bfloat16) + processed_inputs[k] = v + + with torch.no_grad(): + with torch.autocast(device_type="cuda", enabled=True, dtype=torch.bfloat16): + output = self.model.generate_from_batch( + processed_inputs, + GenerationConfig(max_new_tokens=20, stop_strings="<|endoftext|>"), + tokenizer=self.processor.tokenizer, + ) + + input_len = processed_inputs['input_ids'].shape[1] + generated_tokens = output[0, input_len:] + answer = self.processor.tokenizer.decode(generated_tokens, skip_special_tokens=True).strip() + + return self.hidden_states.copy(), answer + + +# ============================================================================ +# NVILA Extractor +# ============================================================================ + +class NVILAExtractor(BaseHiddenStateExtractor): + + def _load_model(self): + original_sys_path = sys.path.copy() + sys.path = [p for p in sys.path if 'RoboRefer' not in p] + + modules_to_remove = [key for key in list(sys.modules.keys()) if 'llava' in key.lower()] + removed_modules = {} + for mod in modules_to_remove: + removed_modules[mod] = sys.modules.pop(mod) + + try: + import llava + from llava.media import Image as LLaVAImage + from llava import conversation as clib + except Exception as err: + sys.path = original_sys_path + for mod, module in removed_modules.items(): + sys.modules[mod] = module + raise RuntimeError(f"Failed to import llava: {err}") + + sys.path = original_sys_path + + self.LLaVAImage = LLaVAImage + self.clib = clib + + self.model = llava.load(self.model_path, model_base=None) + + self._find_llm_backbone() + + logger.info(f"Loaded NVILA model from {self.model_path}") + + def _find_llm_backbone(self): + candidates = [] + + if hasattr(self.model, 'llm'): + if hasattr(self.model.llm, 'model') and hasattr(self.model.llm.model, 'layers'): + candidates.append(('model.llm.model.layers', self.model.llm.model.layers)) + if hasattr(self.model.llm, 'layers'): + candidates.append(('model.llm.layers', self.model.llm.layers)) + + if hasattr(self.model, 'model'): + if hasattr(self.model.model, 'model') and hasattr(self.model.model.model, 'layers'): + candidates.append(('model.model.model.layers', self.model.model.model.layers)) + if hasattr(self.model.model, 'layers'): + candidates.append(('model.model.layers', self.model.model.layers)) + + for name, module in self.model.named_modules(): + if name.endswith('.layers') and hasattr(module, '__len__') and len(module) > 0: + candidates.append((name, module)) + + if candidates: + path, layers = candidates[0] + logger.info(f"Found LLM layers at: {path} (num_layers={len(layers)})") + self.llm_backbone = layers + self.layers_path = path + else: + logger.error("Could not find transformer layers in model!") + for name, _ in list(self.model.named_modules())[:20]: + logger.info(f" {name}") + raise ValueError("Could not locate transformer layers in NVILA model") + + def _get_num_layers(self) -> int: + if hasattr(self, 'llm_backbone') and hasattr(self.llm_backbone, '__len__'): + return len(self.llm_backbone) + return 24 + + def _get_layer_module(self, layer_idx: int): + if hasattr(self, 'llm_backbone') and hasattr(self.llm_backbone, '__getitem__'): + module = self.llm_backbone[layer_idx] + logger.info(f" Accessing layer {layer_idx}: {type(module).__name__}") + return module + logger.error(f"Cannot access layer {layer_idx} - llm_backbone not properly initialized") + return None + + def extract_and_predict(self, image: Image.Image, question: str) -> Tuple[Dict[int, torch.Tensor], str]: + self.hidden_states = {} + + import tempfile + with tempfile.NamedTemporaryFile(suffix='.png', delete=False) as f: + temp_path = f.name + image.save(temp_path) + + try: + prompt = [self.LLaVAImage(temp_path), question] + + from transformers import GenerationConfig + gen_config = GenerationConfig(max_new_tokens=20, do_sample=False) + response = self.model.generate_content(prompt, generation_config=gen_config) + finally: + os.unlink(temp_path) + + if isinstance(response, list): + response = response[0] + answer = str(response).strip() + + return self.hidden_states.copy(), answer + + +# ============================================================================ +# RoboRefer Extractor (NVILA-based) +# ============================================================================ + +class RoboReferExtractor(NVILAExtractor): + + ROBOREFER_PATH = '/data/shared/Qwen/RoboRefer' + + def _load_model(self): + original_sys_path = sys.path.copy() + + if self.ROBOREFER_PATH not in sys.path: + sys.path.insert(0, self.ROBOREFER_PATH) + + modules_to_remove = [key for key in list(sys.modules.keys()) if 'llava' in key.lower()] + removed_modules = {} + for mod in modules_to_remove: + removed_modules[mod] = sys.modules.pop(mod) + + try: + import llava + from llava.media import Image as LLaVAImage + from llava import conversation as clib + except Exception as err: + sys.path = original_sys_path + for mod, module in removed_modules.items(): + sys.modules[mod] = module + raise RuntimeError(f"Failed to import RoboRefer llava: {err}") + + sys.path = original_sys_path + + self.LLaVAImage = LLaVAImage + self.clib = clib + + self.model = llava.load(self.model_path, model_base=None) + + self._find_llm_backbone() + + logger.info(f"Loaded RoboRefer model from {self.model_path}") + + +# ============================================================================ +# Qwen2.5-VL Extractor +# ============================================================================ + +class Qwen25VLExtractor(BaseHiddenStateExtractor): + + BASE_MODEL = "Qwen/Qwen2.5-VL-3B-Instruct" + + def _load_model(self): + from transformers import Qwen2_5_VLForConditionalGeneration, AutoProcessor + + try: + self.model = Qwen2_5_VLForConditionalGeneration.from_pretrained( + self.model_path, + torch_dtype=torch.bfloat16, + device_map=self.device + ) + except ImportError: + logger.info("accelerate not available, loading model without device_map...") + self.model = Qwen2_5_VLForConditionalGeneration.from_pretrained( + self.model_path, + torch_dtype=torch.bfloat16, + ) + self.model = self.model.to(self.device) + + self.model.eval() + + if self.model_path.startswith('/'): + logger.info(f"Fine-tuned model detected, loading processor from base model: {self.BASE_MODEL}") + self.processor = AutoProcessor.from_pretrained(self.BASE_MODEL) + else: + self.processor = AutoProcessor.from_pretrained(self.model_path) + logger.info(f"Loaded Qwen2.5-VL model from {self.model_path}") + + def _get_num_layers(self) -> int: + return len(self.model.model.layers) + + def _get_layer_module(self, layer_idx: int): + return self.model.model.layers[layer_idx] + + def extract_and_predict(self, image: Image.Image, question: str) -> Tuple[Dict[int, torch.Tensor], str]: + self.hidden_states = {} + + messages = [ + { + "role": "user", + "content": [ + {"type": "image", "image": image}, + {"type": "text", "text": question} + ] + } + ] + + text = self.processor.apply_chat_template( + messages, tokenize=False, add_generation_prompt=True + ) + + from qwen_vl_utils import process_vision_info + image_inputs, video_inputs = process_vision_info(messages) + + inputs = self.processor( + text=[text], + images=image_inputs, + videos=video_inputs, + padding=True, + return_tensors="pt" + ) + inputs = inputs.to(self.device) + + with torch.no_grad(): + output_ids = self.model.generate( + **inputs, + max_new_tokens=20, + do_sample=False, + ) + + input_len = inputs['input_ids'].shape[1] + generated_ids = output_ids[0, input_len:] + answer = self.processor.tokenizer.decode(generated_ids, skip_special_tokens=True).strip() + + return self.hidden_states.copy(), answer + + +# ============================================================================ +# Factory Function +# ============================================================================ + +def get_extractor(model_type: str, model_path: str, scale: str = None, **kwargs) -> BaseHiddenStateExtractor: + if model_type == 'nvila' and scale == 'roborefer': + return RoboReferExtractor(model_path, **kwargs) + + extractors = { + 'molmo': MolmoExtractor, + 'nvila': NVILAExtractor, + 'qwen': Qwen25VLExtractor, + } + if model_type not in extractors: + raise ValueError(f"Unknown model type: {model_type}. Available: {list(extractors.keys())}") + return extractors[model_type](model_path, **kwargs) + + +# ============================================================================ +# Extraction with Per-Sample Recording +# ============================================================================ + +def extract_all_with_predictions( + extractor: BaseHiddenStateExtractor, + data: Dict[str, List[dict]], +) -> Dict[str, List[dict]]: + """Extract hidden states and predictions for all samples. + + Returns: + sample_records: {category -> [{hidden_states: {layer: vec}, is_correct: bool, predicted: str, index: int}]} + """ + sample_records = defaultdict(list) + + for category in CATEGORY_ORDER: + if category not in data: + continue + samples = data[category] + logger.info(f"Processing category: {category} ({len(samples)} samples)") + success_count = 0 + + for sample in tqdm(samples, desc=f" {category}"): + try: + image = decode_base64_image(sample['image_base64']) + hidden_states, predicted = extractor.extract_and_predict(image, sample['question']) + + is_correct = check_answer(predicted, category) + mark = "O" if is_correct else "X" + tqdm.write(f" [{mark}] #{sample['index']:<6} expected={category:<8} | predicted=\"{predicted[:80]}\"") + + record = { + 'hidden_states': {}, + 'is_correct': is_correct, + 'predicted': predicted, + 'index': sample['index'], + } + + for layer_idx in extractor.target_layers: + if layer_idx in hidden_states: + state = hidden_states[layer_idx].numpy().flatten() + if state.size > 0: + record['hidden_states'][layer_idx] = state + + if record['hidden_states']: + sample_records[category].append(record) + success_count += 1 + else: + logger.warning(f" No hidden states for sample {sample['index']}") + except Exception as e: + logger.warning(f" Error processing sample {sample['index']}: {e}") + continue + + correct_n = sum(1 for r in sample_records[category] if r['is_correct']) + incorrect_n = sum(1 for r in sample_records[category] if not r['is_correct']) + acc = correct_n / (correct_n + incorrect_n) * 100 if (correct_n + incorrect_n) > 0 else 0 + logger.info(f" {category}: {success_count}/{len(samples)} extracted | " + f"correct={correct_n}, incorrect={incorrect_n}, accuracy={acc:.1f}%") + + # Log overall accuracy summary + total_correct = sum(1 for cat in sample_records for r in sample_records[cat] if r['is_correct']) + total_all = sum(len(sample_records[cat]) for cat in sample_records) + overall_acc = total_correct / total_all * 100 if total_all > 0 else 0 + logger.info(f"\n === Category Accuracy Summary ===") + for cat in CATEGORY_ORDER: + if cat in sample_records: + c = sum(1 for r in sample_records[cat] if r['is_correct']) + n = len(sample_records[cat]) + a = c / n * 100 if n > 0 else 0 + logger.info(f" {cat:>6s}: {c:>4d}/{n:<4d} = {a:5.1f}%") + logger.info(f" {'TOTAL':>6s}: {total_correct:>4d}/{total_all:<4d} = {overall_acc:5.1f}%") + logger.info(f" ================================\n") + + return dict(sample_records) + + +# ============================================================================ +# Balanced Sampling +# ============================================================================ + +def compute_balanced_size(sample_records: Dict[str, List[dict]], filter_correct: bool) -> int: + """Find balanced sample size for all 6 categories. + + Rounds down to nearest multiple of 50 when possible. + If min < 50 but > 0, uses the raw min (no rounding) to avoid skipping. + """ + counts = [] + for cat in CATEGORY_ORDER: + if cat not in sample_records: + return 0 + n = sum(1 for s in sample_records[cat] if s['is_correct'] == filter_correct) + counts.append(n) + + min_count = min(counts) + if min_count == 0: + return 0 + + balanced = (min_count // 50) * 50 + if balanced == 0: + # Less than 50 available but still > 0 — use raw min + balanced = min_count + + return balanced + + +def balanced_sample_and_average( + sample_records: Dict[str, List[dict]], + filter_correct: bool, + n_samples: int, + target_layers: List[int], + seed: int = 42, +) -> Dict[int, Dict[str, np.ndarray]]: + """Sample n_samples per category from filtered group and compute averages. + + Returns: + {layer_idx -> {category -> averaged_vector}} + """ + rng = random.Random(seed) + + result = defaultdict(dict) + + for category in CATEGORY_ORDER: + filtered = [s for s in sample_records[category] if s['is_correct'] == filter_correct] + + if len(filtered) < n_samples: + logger.warning(f" {category}: only {len(filtered)} samples, need {n_samples}") + continue + + sampled = rng.sample(filtered, n_samples) + + for layer_idx in target_layers: + vectors = [] + for record in sampled: + if layer_idx in record['hidden_states']: + vectors.append(record['hidden_states'][layer_idx]) + + if vectors: + result[layer_idx][category] = np.mean(vectors, axis=0) + + return dict(result) + + +# ============================================================================ +# Accuracy +# ============================================================================ + +def compute_accuracy_stats( + sample_records: Dict[str, List[dict]], + scale: str, + model_type: str, +) -> dict: + """Compute per-category and overall accuracy.""" + stats = { + 'model': model_type, + 'scale': scale, + } + + total_correct = 0 + total_count = 0 + + for cat in CATEGORY_ORDER: + records = sample_records.get(cat, []) + n = len(records) + correct = sum(1 for r in records if r['is_correct']) + acc = correct / n if n > 0 else 0.0 + + stats[f'{cat}_total'] = n + stats[f'{cat}_correct'] = correct + stats[f'{cat}_accuracy'] = acc + + total_correct += correct + total_count += n + + stats['overall_total'] = total_count + stats['overall_correct'] = total_correct + stats['overall_accuracy'] = total_correct / total_count if total_count > 0 else 0.0 + + return stats + + +def save_per_sample_predictions( + sample_records: Dict[str, List[dict]], + scale: str, + save_path: str, +): + """Save per-sample prediction details to CSV.""" + rows = [] + for cat in CATEGORY_ORDER: + for record in sample_records.get(cat, []): + rows.append({ + 'index': record['index'], + 'category': cat, + 'scale': scale, + 'predicted': record['predicted'], + 'expected': cat, + 'is_correct': record['is_correct'], + }) + + df = pd.DataFrame(rows) + df.to_csv(save_path, index=False) + logger.info(f"Saved {len(rows)} per-sample predictions to {save_path}") + + +# ============================================================================ +# Analysis Functions +# ============================================================================ + +def compute_similarity_matrix( + representations: Dict[str, np.ndarray] +) -> pd.DataFrame: + available = [c for c in CATEGORY_ORDER if c in representations] + vectors = np.array([representations[cat] for cat in available]) + sim_matrix = cosine_similarity(vectors) + return pd.DataFrame(sim_matrix, index=available, columns=available) + + +def analyze_hypothesis(sim_df: pd.DataFrame, model_name: str) -> dict: + results = {'model': model_name} + + pairs_to_check = { + 'above_far': ('above', 'far'), + 'under_close': ('under', 'close'), + 'left_right': ('left', 'right'), + } + + for pair_name, (cat1, cat2) in pairs_to_check.items(): + if cat1 in sim_df.index and cat2 in sim_df.columns: + sim = sim_df.loc[cat1, cat2] + results[f'sim_{pair_name}'] = sim + else: + results[f'sim_{pair_name}'] = None + + if results.get('sim_above_far') and results.get('sim_left_right'): + results['diff_above_far_vs_left_right'] = results['sim_above_far'] - results['sim_left_right'] + if results.get('sim_under_close') and results.get('sim_left_right'): + results['diff_under_close_vs_left_right'] = results['sim_under_close'] - results['sim_left_right'] + + return results + + +# ============================================================================ +# Visualization +# ============================================================================ + +def plot_similarity_heatmap(sim_df: pd.DataFrame, title: str, save_path: str): + plt.figure(figsize=(10, 8)) + available_order = [c for c in CATEGORY_ORDER if c in sim_df.index] + sim_df_ordered = sim_df.loc[available_order, available_order] + + sns.heatmap( + sim_df_ordered, annot=True, fmt='.4f', cmap='RdYlBu_r', + center=0.5, vmin=0, vmax=1, square=True, linewidths=0.5, + cbar_kws={'label': 'Cosine Similarity'} + ) + plt.title(title, fontsize=14, fontweight='bold') + plt.tight_layout() + plt.savefig(save_path, dpi=300, bbox_inches='tight') + plt.close() + logger.info(f"Saved heatmap: {save_path}") + + +def _extract_pair_trajectory( + all_layer_sims: Dict[int, pd.DataFrame], + cat1: str, cat2: str, +) -> Tuple[List[int], List[float]]: + layers = sorted(all_layer_sims.keys()) + valid_layers = [] + values = [] + for l in layers: + df = all_layer_sims[l] + if cat1 in df.index and cat2 in df.columns: + valid_layers.append(l) + values.append(df.loc[cat1, cat2]) + return valid_layers, values + + +def get_representative_layers(all_layers: List[int], n: int = 5) -> List[int]: + if len(all_layers) <= n: + return list(all_layers) + indices = np.linspace(0, len(all_layers) - 1, n, dtype=int) + return [all_layers[i] for i in indices] + + +def plot_similarity_trajectories( + all_layer_sims: Dict[int, pd.DataFrame], + title: str, + save_path: str, +): + fig, axes = plt.subplots(1, 2, figsize=(20, 7)) + + ax = axes[0] + for cat1, cat2, label, color in TRAJECTORY_PAIRS['hypothesis']: + layers, vals = _extract_pair_trajectory(all_layer_sims, cat1, cat2) + ax.plot(layers, vals, '-', color=color, label=label, linewidth=2.5, markersize=0) + for cat1, cat2, label, color in TRAJECTORY_PAIRS['within_axis']: + layers, vals = _extract_pair_trajectory(all_layer_sims, cat1, cat2) + ax.plot(layers, vals, '--', color=color, label=label, linewidth=1.8, markersize=0) + for cat1, cat2, label, color in TRAJECTORY_PAIRS['counter_hypothesis']: + layers, vals = _extract_pair_trajectory(all_layer_sims, cat1, cat2) + ax.plot(layers, vals, ':', color=color, label=label, linewidth=1.5, alpha=0.8) + + ax.set_xlabel('Layer Index', fontsize=12) + ax.set_ylabel('Cosine Similarity', fontsize=12) + ax.set_title(f'{title}\nPairwise Similarity Across Layers', fontsize=13) + ax.legend(fontsize=9, loc='best') + ax.grid(True, alpha=0.3) + + ax = axes[1] + lr_layers, lr_vals = _extract_pair_trajectory(all_layer_sims, 'left', 'right') + lr_dict = dict(zip(lr_layers, lr_vals)) + + for cat1, cat2, label, color in TRAJECTORY_PAIRS['hypothesis']: + layers, vals = _extract_pair_trajectory(all_layer_sims, cat1, cat2) + diffs = [v - lr_dict.get(l, 0) for l, v in zip(layers, vals)] + ax.plot(layers, diffs, '-', color=color, label=f'{label} - left-right', + linewidth=2.5, markersize=0) + + for cat1, cat2, label, color in TRAJECTORY_PAIRS['counter_hypothesis']: + layers, vals = _extract_pair_trajectory(all_layer_sims, cat1, cat2) + diffs = [v - lr_dict.get(l, 0) for l, v in zip(layers, vals)] + ax.plot(layers, diffs, ':', color=color, label=f'{label} - left-right', + linewidth=1.5, alpha=0.8) + + for cat1, cat2, label, color in TRAJECTORY_PAIRS['within_axis']: + if label == 'left-right': + continue + layers, vals = _extract_pair_trajectory(all_layer_sims, cat1, cat2) + diffs = [v - lr_dict.get(l, 0) for l, v in zip(layers, vals)] + ax.plot(layers, diffs, '--', color=color, label=f'{label} - left-right', + linewidth=1.5, alpha=0.7) + + ax.axhline(y=0, color='gray', linestyle='-', linewidth=1, alpha=0.5) + ax.set_xlabel('Layer Index', fontsize=12) + ax.set_ylabel('Similarity Difference (pair - left-right)', fontsize=12) + ax.set_title(f'{title}\nRelative to Left-Right Baseline', fontsize=13) + ax.legend(fontsize=8, loc='best') + ax.grid(True, alpha=0.3) + + plt.tight_layout() + plt.savefig(save_path, dpi=300, bbox_inches='tight') + plt.close() + logger.info(f"Saved trajectory plot: {save_path}") + + +def plot_cross_scale_trajectories( + cross_scale_data: Dict[str, Dict[int, pd.DataFrame]], + model_type: str, + save_path: str, +): + pairs = [ + ('above', 'far', 'above-far (hypothesis)'), + ('under', 'close', 'under-close (hypothesis)'), + ('left', 'right', 'left-right (control)'), + ] + + fig, axes = plt.subplots(1, len(pairs), figsize=(7 * len(pairs), 6)) + if len(pairs) == 1: + axes = [axes] + + for idx, (cat1, cat2, label) in enumerate(pairs): + ax = axes[idx] + for scale in ['vanilla', '80k', '400k', '800k', '2m', 'roborefer']: + if scale not in cross_scale_data: + continue + layer_sims = cross_scale_data[scale] + layers, vals = _extract_pair_trajectory(layer_sims, cat1, cat2) + color = SCALE_COLORS.get(scale, 'gray') + ax.plot(layers, vals, '-', color=color, label=scale, linewidth=2, markersize=0) + + ax.set_xlabel('Layer Index', fontsize=12) + ax.set_ylabel('Cosine Similarity', fontsize=12) + ax.set_title(label, fontsize=13, fontweight='bold') + ax.legend(fontsize=10) + ax.grid(True, alpha=0.3) + + fig.suptitle( + f'{model_type.upper()} - Similarity Trajectory Across Scales', + fontsize=15, fontweight='bold', y=1.02 + ) + plt.tight_layout() + plt.savefig(save_path, dpi=300, bbox_inches='tight') + plt.close() + logger.info(f"Saved cross-scale trajectory: {save_path}") + + +def plot_similarity_evolution_heatmap( + cross_scale_data: Dict[str, Dict[int, pd.DataFrame]], + model_type: str, + save_path: str, +): + pairs = [ + ('above', 'far', 'above-far'), + ('under', 'close', 'under-close'), + ('left', 'right', 'left-right'), + ('above', 'under', 'above-under'), + ('far', 'close', 'far-close'), + ] + scale_order = ['vanilla', '80k', '400k', '800k', '2m', 'roborefer'] + available_scales = [s for s in scale_order if s in cross_scale_data] + + first_scale = available_scales[0] + all_layers = sorted(cross_scale_data[first_scale].keys()) + + fig, axes = plt.subplots(len(pairs), 1, figsize=(max(14, len(all_layers) * 0.5), 3 * len(pairs))) + if len(pairs) == 1: + axes = [axes] + + for idx, (cat1, cat2, label) in enumerate(pairs): + ax = axes[idx] + matrix = np.full((len(available_scales), len(all_layers)), np.nan) + for si, scale in enumerate(available_scales): + layer_sims = cross_scale_data[scale] + for li, layer in enumerate(all_layers): + if layer in layer_sims: + df = layer_sims[layer] + if cat1 in df.index and cat2 in df.columns: + matrix[si, li] = df.loc[cat1, cat2] + + im = ax.imshow(matrix, aspect='auto', cmap='RdYlBu_r', vmin=0.5, vmax=1.0) + ax.set_yticks(range(len(available_scales))) + ax.set_yticklabels(available_scales, fontsize=10) + + step = max(1, len(all_layers) // 15) + ax.set_xticks(range(0, len(all_layers), step)) + ax.set_xticklabels([str(all_layers[i]) for i in range(0, len(all_layers), step)], fontsize=8) + + ax.set_title(label, fontsize=12, fontweight='bold') + ax.set_xlabel('Layer Index', fontsize=10) + fig.colorbar(im, ax=ax, label='Cosine Similarity', shrink=0.8) + + fig.suptitle( + f'{model_type.upper()} - Similarity Evolution (Layer x Scale)', + fontsize=15, fontweight='bold', y=1.01 + ) + plt.tight_layout() + plt.savefig(save_path, dpi=300, bbox_inches='tight') + plt.close() + logger.info(f"Saved evolution heatmap: {save_path}") + + +# ============================================================================ +# Comparison Visualizations (new for this experiment) +# ============================================================================ + +def plot_accuracy_chart( + accuracy_records: List[dict], + model_type: str, + save_path: str, +): + """Bar chart of per-category accuracy across scales.""" + fig, ax = plt.subplots(figsize=(14, 6)) + + scales = [r['scale'] for r in accuracy_records] + x = np.arange(len(CATEGORY_ORDER) + 1) # +1 for overall + width = 0.8 / len(scales) + + for i, record in enumerate(accuracy_records): + values = [record.get(f'{cat}_accuracy', 0) for cat in CATEGORY_ORDER] + values.append(record.get('overall_accuracy', 0)) + offset = (i - len(scales) / 2 + 0.5) * width + color = SCALE_COLORS.get(record['scale'], 'gray') + bars = ax.bar(x + offset, values, width, label=record['scale'], color=color) + + for bar, val in zip(bars, values): + if val > 0: + ax.annotate( + f'{val:.0%}', + xy=(bar.get_x() + bar.get_width() / 2, bar.get_height()), + xytext=(0, 2), textcoords='offset points', + ha='center', va='bottom', fontsize=6, rotation=90, + ) + + ax.set_ylabel('Accuracy') + ax.set_title(f'{model_type.upper()} - Per-Category Accuracy Across Scales', fontsize=14, fontweight='bold') + ax.set_xticks(x) + ax.set_xticklabels(CATEGORY_ORDER + ['overall']) + ax.legend(fontsize=9) + ax.set_ylim(0, 1.15) + ax.axhline(y=0.5, color='gray', linestyle='--', alpha=0.5, label='chance') + + plt.tight_layout() + plt.savefig(save_path, dpi=300, bbox_inches='tight') + plt.close() + logger.info(f"Saved accuracy chart: {save_path}") + + +def plot_correct_vs_incorrect_overlay( + correct_sims: Dict[int, pd.DataFrame], + incorrect_sims: Optional[Dict[int, pd.DataFrame]], + scale: str, + model_type: str, + save_path: str, +): + """Overlay correct vs incorrect similarity trajectories for key pairs.""" + pairs = [ + ('above', 'far', 'above-far'), + ('under', 'close', 'under-close'), + ('left', 'right', 'left-right'), + ] + + fig, axes = plt.subplots(1, len(pairs), figsize=(7 * len(pairs), 6)) + if len(pairs) == 1: + axes = [axes] + + for idx, (cat1, cat2, label) in enumerate(pairs): + ax = axes[idx] + + layers_c, vals_c = _extract_pair_trajectory(correct_sims, cat1, cat2) + ax.plot(layers_c, vals_c, '-', color='#2ca02c', label='correct', linewidth=2) + + if incorrect_sims: + layers_i, vals_i = _extract_pair_trajectory(incorrect_sims, cat1, cat2) + ax.plot(layers_i, vals_i, '-', color='#d62728', label='incorrect', linewidth=2) + + ax.set_xlabel('Layer Index', fontsize=12) + ax.set_ylabel('Cosine Similarity', fontsize=12) + ax.set_title(f'{label}', fontsize=13, fontweight='bold') + ax.legend(fontsize=10) + ax.grid(True, alpha=0.3) + + fig.suptitle( + f'{model_type.upper()} ({scale}) - Correct vs Incorrect', + fontsize=15, fontweight='bold', y=1.02 + ) + plt.tight_layout() + plt.savefig(save_path, dpi=300, bbox_inches='tight') + plt.close() + logger.info(f"Saved correct vs incorrect overlay: {save_path}") + + +def plot_ablation_summary( + ablation_data: List[dict], + model_type: str, + save_path: str, +): + """Key ablation plot: correct-only vs all-samples similarity across scales. + + x-axis = scales, two lines per pair: + - solid: correct-only similarity + - dashed: all-samples similarity (from the same data, no balanced sampling) + """ + pairs = [ + ('above', 'far', 'above-far', '#d62728'), + ('under', 'close', 'under-close', '#1f77b4'), + ('left', 'right', 'left-right', '#2ca02c'), + ] + + scale_order = ['vanilla', '80k', '400k', '800k', '2m', 'roborefer'] + + fig, axes = plt.subplots(1, 2, figsize=(18, 7)) + + # Left panel: absolute similarities + ax = axes[0] + for cat1, cat2, label, color in pairs: + # correct-only line + x_vals, y_correct, y_all = [], [], [] + for i, scale in enumerate(scale_order): + entry = next((d for d in ablation_data if d['scale'] == scale), None) + if entry is None: + continue + sim_c = entry.get(f'correct_{cat1}_{cat2}') + sim_a = entry.get(f'all_{cat1}_{cat2}') + if sim_c is not None: + x_vals.append(i) + y_correct.append(sim_c) + y_all.append(sim_a) + + if x_vals: + ax.plot(x_vals, y_correct, '-o', color=color, label=f'{label} (correct)', linewidth=2.5) + ax.plot(x_vals, y_all, '--s', color=color, label=f'{label} (all)', linewidth=1.5, alpha=0.6) + + ax.set_xticks(range(len(scale_order))) + ax.set_xticklabels(scale_order, fontsize=10) + ax.set_xlabel('Scale', fontsize=12) + ax.set_ylabel('Cosine Similarity', fontsize=12) + ax.set_title('Correct-Only vs All-Samples Similarity', fontsize=13, fontweight='bold') + ax.legend(fontsize=8, loc='best') + ax.grid(True, alpha=0.3) + + # Right panel: accuracy overlay + ax2 = axes[1] + x_vals, acc_vals = [], [] + for i, scale in enumerate(scale_order): + entry = next((d for d in ablation_data if d['scale'] == scale), None) + if entry and 'accuracy' in entry: + x_vals.append(i) + acc_vals.append(entry['accuracy']) + + ax2.bar(x_vals, acc_vals, color=[SCALE_COLORS.get(scale_order[x], 'gray') for x in x_vals], alpha=0.8) + for x, acc in zip(x_vals, acc_vals): + ax2.annotate(f'{acc:.1%}', xy=(x, acc), xytext=(0, 5), textcoords='offset points', + ha='center', fontsize=10, fontweight='bold') + + ax2.set_xticks(range(len(scale_order))) + ax2.set_xticklabels(scale_order, fontsize=10) + ax2.set_xlabel('Scale', fontsize=12) + ax2.set_ylabel('Overall Accuracy', fontsize=12) + ax2.set_title('Model Accuracy by Scale', fontsize=13, fontweight='bold') + ax2.set_ylim(0, 1.15) + ax2.grid(True, alpha=0.3, axis='y') + + fig.suptitle( + f'{model_type.upper()} - Ablation: Is Similarity Change Due to Accuracy?', + fontsize=15, fontweight='bold', y=1.02 + ) + plt.tight_layout() + plt.savefig(save_path, dpi=300, bbox_inches='tight') + plt.close() + logger.info(f"Saved ablation summary: {save_path}") + + +# ============================================================================ +# Model Configurations +# ============================================================================ + +MODEL_CONFIGS = { + 'molmo': { + 'vanilla': 'allenai/Molmo-7B-O-0924', + '80k': '/data/shared/Qwen/molmo/outputs/data_scale_exp_80k/unshared', + '400k': '/data/shared/Qwen/molmo/outputs/data_scale_exp_400k/unshared', + '800k': '/data/shared/Qwen/molmo/outputs/data_scale_exp_800k/unshared', + '2m': '/data/shared/Qwen/molmo/outputs/data_scale_exp_2m/unshared', + }, + 'nvila': { + 'vanilla': '/data/shared/Qwen/mydisk/NVILA-Lite-2B', + '80k': '/data/shared/Qwen/mydisk/output/DATA/NVILA-Lite-2B-DATA_SCALE_EXP_80K-20251108_180221', + '400k': '/data/shared/Qwen/mydisk/output/DATA/NVILA-Lite-2B-DATA_SCALE_EXP_400K-20251108_180221', + '800k': '/data/shared/Qwen/mydisk/output/DATA/NVILA-Lite-2B-DATA_SCALE_EXP_800K-20251108_180221', + '2m': '/data/shared/Qwen/mydisk/output/DATA/NVILA-Lite-2B-DATA_SCALE_EXP_2M-20260205_003632', + 'roborefer': '/data/shared/Qwen/mydisk/RoboRefer_model', + }, + 'qwen': { + 'vanilla': 'Qwen/Qwen2.5-VL-3B-Instruct', + '80k': '/data/shared/Qwen/mydisk/output/Qwen/Qwen2.5-VL-3B-Instruct-data_scale_exp_80k-20251114_120221', + '400k': '/data/shared/Qwen/mydisk/output/Qwen/Qwen2.5-VL-3B-Instruct-data_scale_exp_400k-20251114_120221', + '800k': '/data/shared/Qwen/mydisk/output/Qwen/Qwen2.5-VL-3B-Instruct-data_scale_exp_800k-20251114_120221', + '2m': '/data/shared/Qwen/mydisk/output/Qwen/Qwen2.5-VL-3B-Instruct-data_scale_exp_2m-20260109_120517', + }, +} + + +# ============================================================================ +# Main +# ============================================================================ + +def process_subset( + subset_name: str, + all_layer_reps: Dict[int, Dict[str, np.ndarray]], + target_layers: List[int], + scale: str, + model_type: str, + output_dir: str, + n_samples: int, +) -> Tuple[Dict[int, pd.DataFrame], List[dict]]: + """Compute similarity matrices and save outputs for one subset (correct/incorrect).""" + num_layers = len(target_layers) + scale_sims = {} + results_list = [] + + for layer_idx in sorted(all_layer_reps.keys()): + reps = all_layer_reps[layer_idx] + if len(reps) < 2: + continue + + sim_df = compute_similarity_matrix(reps) + scale_sims[layer_idx] = sim_df + + model_name = f"{model_type}_{scale}_{subset_name}" + results = analyze_hypothesis(sim_df, model_name) + results['layer_idx'] = layer_idx + results['subset'] = subset_name + results['scale'] = scale + results['n_samples_per_cat'] = n_samples + results_list.append(results) + + sim_df.to_csv(os.path.join(output_dir, f'similarity_{scale}_L{layer_idx}.csv')) + + if scale_sims: + rep_layers = get_representative_layers(sorted(scale_sims.keys())) + for layer_idx in rep_layers: + sim_df = scale_sims[layer_idx] + plot_similarity_heatmap( + sim_df, + f'{model_type.upper()} ({scale}) [{subset_name}, n={n_samples}] - Layer {layer_idx}/{num_layers-1}', + os.path.join(output_dir, f'heatmap_{scale}_L{layer_idx}.png') + ) + + plot_similarity_trajectories( + scale_sims, + f'{model_type.upper()} ({scale}) [{subset_name}, n={n_samples}]', + os.path.join(output_dir, f'trajectory_{scale}.png') + ) + + return scale_sims, results_list + + +def _load_scale_sims_from_csvs(subset_dir: str, scale: str) -> Dict[int, pd.DataFrame]: + """Reload per-layer similarity CSVs for one scale from disk.""" + import glob as glob_mod + pattern = os.path.join(subset_dir, f'similarity_{scale}_L*.csv') + files = glob_mod.glob(pattern) + layer_sims = {} + for fpath in files: + basename = os.path.basename(fpath) + # similarity_{scale}_L{idx}.csv + layer_str = basename.replace(f'similarity_{scale}_L', '').replace('.csv', '') + try: + layer_idx = int(layer_str) + except ValueError: + continue + df = pd.read_csv(fpath, index_col=0) + layer_sims[layer_idx] = df + return layer_sims + + +def run_merge( + model_type: str, + scales: List[str], + output_dir: str, + correct_dir: str, + incorrect_dir: str, + accuracy_dir: str, + comparison_dir: str, +): + """Merge mode: read per-scale results and generate cross-scale plots.""" + + # Determine which scales have data + scale_order = ['vanilla', '80k', '400k', '800k', '2m', 'roborefer'] + available_scales = [s for s in scale_order if s in scales] + + # 1. Rebuild cross-scale similarity dicts from CSVs + cross_scale_correct = {} + cross_scale_incorrect = {} + for scale in available_scales: + c_sims = _load_scale_sims_from_csvs(correct_dir, scale) + if c_sims: + cross_scale_correct[scale] = c_sims + logger.info(f" Loaded correct-only CSVs for {scale}: {len(c_sims)} layers") + + i_sims = _load_scale_sims_from_csvs(incorrect_dir, scale) + if i_sims: + cross_scale_incorrect[scale] = i_sims + logger.info(f" Loaded incorrect-only CSVs for {scale}: {len(i_sims)} layers") + + # 2. Cross-scale trajectory and evolution heatmap + if len(cross_scale_correct) > 1: + logger.info("\n--- Cross-scale comparison (correct-only) ---") + plot_cross_scale_trajectories( + cross_scale_correct, model_type, + os.path.join(comparison_dir, 'cross_scale_correct_only.png') + ) + plot_similarity_evolution_heatmap( + cross_scale_correct, model_type, + os.path.join(comparison_dir, 'evolution_heatmap_correct.png') + ) + + if len(cross_scale_incorrect) > 1: + logger.info("\n--- Cross-scale comparison (incorrect-only) ---") + plot_cross_scale_trajectories( + cross_scale_incorrect, model_type, + os.path.join(comparison_dir, 'cross_scale_incorrect_only.png') + ) + plot_similarity_evolution_heatmap( + cross_scale_incorrect, model_type, + os.path.join(comparison_dir, 'evolution_heatmap_incorrect.png') + ) + + # 3. Accuracy chart from per-scale JSONs + accuracy_records = [] + for scale in available_scales: + acc_path = os.path.join(accuracy_dir, f'accuracy_{scale}.json') + if os.path.exists(acc_path): + with open(acc_path) as f: + accuracy_records.append(json.load(f)) + + if accuracy_records: + acc_df = pd.DataFrame(accuracy_records) + acc_df.to_csv(os.path.join(accuracy_dir, 'accuracy_summary.csv'), index=False) + plot_accuracy_chart(accuracy_records, model_type, + os.path.join(accuracy_dir, 'accuracy_chart.png')) + logger.info(f" Saved merged accuracy summary ({len(accuracy_records)} scales)") + + # 4. Ablation summary from per-scale JSONs + ablation_data = [] + for scale in available_scales: + abl_path = os.path.join(comparison_dir, f'ablation_{scale}.json') + if os.path.exists(abl_path): + with open(abl_path) as f: + ablation_data.append(json.load(f)) + + if ablation_data: + ablation_df = pd.DataFrame(ablation_data) + ablation_df.to_csv(os.path.join(comparison_dir, 'ablation_summary.csv'), index=False) + plot_ablation_summary(ablation_data, model_type, + os.path.join(comparison_dir, 'ablation_summary.png')) + logger.info(f" Saved merged ablation summary ({len(ablation_data)} scales)") + + # 5. Merge per-scale results_summary CSVs + import glob as glob_mod + all_results_files = [] + for subset_dir, subset_name in [(correct_dir, 'correct'), (incorrect_dir, 'incorrect')]: + for scale in available_scales: + # Check if any similarity CSVs exist for this scale + pattern = os.path.join(subset_dir, f'similarity_{scale}_L*.csv') + if glob_mod.glob(pattern): + all_results_files.append((subset_dir, scale, subset_name)) + + logger.info(f"\n=== Merge Complete ===") + logger.info(f"Results in: {output_dir}") + + +def main(): + parser = argparse.ArgumentParser(description='Experiment 2-A (Correct Filter): Correctness-Filtered Analysis') + parser.add_argument('--data_path', type=str, + default='/data/shared/Qwen/EmbSpatial-Bench/EmbSpatial-Bench.tsv') + parser.add_argument('--model_type', type=str, required=True, + choices=['molmo', 'nvila', 'qwen']) + parser.add_argument('--scales', type=str, nargs='+', + default=['vanilla', '80k', '400k', '800k', '2m']) + parser.add_argument('--output_dir', type=str, + default='/data/shared/Qwen/experiments/exp2a_correct_filter/results') + parser.add_argument('--device', type=str, default='cuda') + parser.add_argument('--seed', type=int, default=42) + parser.add_argument('--merge', action='store_true', + help='Merge mode: skip extraction, read existing per-scale results ' + 'and generate cross-scale comparison plots only.') + parser.add_argument('--no-auto-roborefer', action='store_true', dest='no_auto_roborefer', + help='Do not auto-add roborefer for nvila (use for parallel mode).') + + args = parser.parse_args() + + if args.model_type == 'nvila' and 'roborefer' not in args.scales and not args.no_auto_roborefer: + args.scales.append('roborefer') + + np.random.seed(args.seed) + torch.manual_seed(args.seed) + random.seed(args.seed) + + output_dir = os.path.join(args.output_dir, args.model_type) + correct_dir = os.path.join(output_dir, 'correct_only') + incorrect_dir = os.path.join(output_dir, 'incorrect_only') + accuracy_dir = os.path.join(output_dir, 'accuracy') + comparison_dir = os.path.join(output_dir, 'comparison') + for d in [correct_dir, incorrect_dir, accuracy_dir, comparison_dir]: + os.makedirs(d, exist_ok=True) + + # ------------------------------------------------------------------ + # Merge mode: read existing per-scale outputs and generate plots + # ------------------------------------------------------------------ + if args.merge: + logger.info("\n=== MERGE MODE: Reading existing per-scale results ===") + run_merge(args.model_type, args.scales, output_dir, + correct_dir, incorrect_dir, accuracy_dir, comparison_dir) + return + + # ------------------------------------------------------------------ + # Normal mode: extract + analyze + # ------------------------------------------------------------------ + logger.info("\n=== Loading & Modifying EmbSpatialBench Data (ALL samples) ===") + data = load_and_modify_data(args.data_path, args.seed) + + model_configs = MODEL_CONFIGS[args.model_type] + + all_results = [] + accuracy_records = [] + cross_scale_correct = {} + cross_scale_incorrect = {} + ablation_data = [] + + for scale in args.scales: + if scale not in model_configs: + logger.warning(f"Scale {scale} not available for {args.model_type}, skipping...") + continue + + model_path = model_configs[scale] + if not os.path.exists(model_path) and not model_path.startswith('Qwen/') and not model_path.startswith('allenai/'): + logger.warning(f"Model path not found: {model_path}, skipping...") + continue + + logger.info(f"\n{'='*60}") + logger.info(f"Processing {args.model_type} - {scale}") + logger.info(f"Model path: {model_path}") + logger.info(f"{'='*60}") + + try: + extractor = get_extractor( + args.model_type, model_path, scale=scale, device=args.device, + ) + target_layers = extractor.target_layers + + # Phase A: Extract all samples with predictions + logger.info("\n--- Phase A: Extracting hidden states with predictions ---") + sample_records = extract_all_with_predictions(extractor, data) + + # Save per-sample predictions + save_per_sample_predictions( + sample_records, scale, + os.path.join(accuracy_dir, f'predictions_{scale}.csv') + ) + + # Compute and save accuracy + acc_stats = compute_accuracy_stats(sample_records, scale, args.model_type) + accuracy_records.append(acc_stats) + logger.info(f"\n Accuracy for {scale}: {acc_stats['overall_accuracy']:.1%}") + for cat in CATEGORY_ORDER: + logger.info(f" {cat}: {acc_stats[f'{cat}_correct']}/{acc_stats[f'{cat}_total']} " + f"= {acc_stats[f'{cat}_accuracy']:.1%}") + + # Phase B: Balanced sampling + logger.info("\n--- Phase B: Balanced sampling ---") + + n_correct = compute_balanced_size(sample_records, filter_correct=True) + n_incorrect = compute_balanced_size(sample_records, filter_correct=False) + logger.info(f" Correct group: {n_correct} samples/category") + logger.info(f" Incorrect group: {n_incorrect} samples/category") + + # Also compute "all" (no filter) for ablation comparison using ALL samples + logger.info("\n--- Computing all-samples similarity (unfiltered) ---") + all_reps = {} + for layer_idx in target_layers: + cat_avgs = {} + for cat in CATEGORY_ORDER: + vectors = [r['hidden_states'][layer_idx] + for r in sample_records.get(cat, []) + if layer_idx in r['hidden_states']] + if vectors: + cat_avgs[cat] = np.mean(vectors, axis=0) + if cat_avgs: + all_reps[layer_idx] = cat_avgs + + # Get "all" similarity at a representative deep layer for ablation + all_sims_for_ablation = {} + if all_reps: + rep_layer = get_representative_layers(sorted(all_reps.keys()), n=1)[0] + rep_sim_all = compute_similarity_matrix(all_reps[rep_layer]) + for cat1, cat2, _, _ in (TRAJECTORY_PAIRS['hypothesis'] + + TRAJECTORY_PAIRS['within_axis']): + if cat1 in rep_sim_all.index and cat2 in rep_sim_all.columns: + all_sims_for_ablation[f'all_{cat1}_{cat2}'] = rep_sim_all.loc[cat1, cat2] + + # Phase C: Process correct-only subset + correct_layer_sims = {} + if n_correct > 0: + logger.info(f"\n--- Phase C: Processing correct-only (n={n_correct}) ---") + correct_reps = balanced_sample_and_average( + sample_records, filter_correct=True, n_samples=n_correct, + target_layers=target_layers, seed=args.seed, + ) + + correct_layer_sims, correct_results = process_subset( + 'correct', correct_reps, target_layers, scale, + args.model_type, correct_dir, n_correct, + ) + all_results.extend(correct_results) + cross_scale_correct[scale] = correct_layer_sims + else: + logger.warning(f" Skipping correct-only: no correct samples in some category") + + # Process incorrect-only subset + incorrect_layer_sims = {} + if n_incorrect > 0: + logger.info(f"\n--- Phase C: Processing incorrect-only (n={n_incorrect}) ---") + incorrect_reps = balanced_sample_and_average( + sample_records, filter_correct=False, n_samples=n_incorrect, + target_layers=target_layers, seed=args.seed, + ) + + incorrect_layer_sims, incorrect_results = process_subset( + 'incorrect', incorrect_reps, target_layers, scale, + args.model_type, incorrect_dir, n_incorrect, + ) + all_results.extend(incorrect_results) + cross_scale_incorrect[scale] = incorrect_layer_sims + else: + logger.warning(f" Skipping incorrect-only: no incorrect samples in some category") + + # Correct vs incorrect overlay + if correct_layer_sims: + plot_correct_vs_incorrect_overlay( + correct_layer_sims, + incorrect_layer_sims if incorrect_layer_sims else None, + scale, args.model_type, + os.path.join(comparison_dir, f'correct_vs_incorrect_{scale}.png') + ) + + # Build ablation entry + ablation_entry = { + 'scale': scale, + 'accuracy': acc_stats['overall_accuracy'], + 'n_correct_per_cat': n_correct, + 'n_incorrect_per_cat': n_incorrect, + } + ablation_entry.update(all_sims_for_ablation) + + # Get correct-only similarity at the same representative layer + if correct_layer_sims and rep_layer in correct_layer_sims: + rep_sim_c = correct_layer_sims[rep_layer] + for cat1, cat2, _, _ in (TRAJECTORY_PAIRS['hypothesis'] + + TRAJECTORY_PAIRS['within_axis']): + if cat1 in rep_sim_c.index and cat2 in rep_sim_c.columns: + ablation_entry[f'correct_{cat1}_{cat2}'] = rep_sim_c.loc[cat1, cat2] + + # Get incorrect-only similarity + if incorrect_layer_sims and rep_layer in incorrect_layer_sims: + rep_sim_i = incorrect_layer_sims[rep_layer] + for cat1, cat2, _, _ in (TRAJECTORY_PAIRS['hypothesis'] + + TRAJECTORY_PAIRS['within_axis']): + if cat1 in rep_sim_i.index and cat2 in rep_sim_i.columns: + ablation_entry[f'incorrect_{cat1}_{cat2}'] = rep_sim_i.loc[cat1, cat2] + + ablation_data.append(ablation_entry) + + # Save per-scale ablation JSON (for merge mode) + ablation_path = os.path.join(comparison_dir, f'ablation_{scale}.json') + with open(ablation_path, 'w') as f: + json.dump(ablation_entry, f, indent=2, default=str) + + # Save per-scale accuracy JSON (for merge mode) + acc_path = os.path.join(accuracy_dir, f'accuracy_{scale}.json') + with open(acc_path, 'w') as f: + json.dump(acc_stats, f, indent=2, default=str) + + # Cleanup + del sample_records + extractor.cleanup() + + except Exception as e: + logger.error(f"Failed to process {args.model_type} - {scale}: {e}") + import traceback + traceback.print_exc() + continue + + # ======================== + # Cross-scale comparisons + # ======================== + + if len(cross_scale_correct) > 1: + logger.info("\n--- Cross-scale comparison (correct-only) ---") + plot_cross_scale_trajectories( + cross_scale_correct, args.model_type, + os.path.join(comparison_dir, 'cross_scale_correct_only.png') + ) + plot_similarity_evolution_heatmap( + cross_scale_correct, args.model_type, + os.path.join(comparison_dir, 'evolution_heatmap_correct.png') + ) + + if len(cross_scale_incorrect) > 1: + logger.info("\n--- Cross-scale comparison (incorrect-only) ---") + plot_cross_scale_trajectories( + cross_scale_incorrect, args.model_type, + os.path.join(comparison_dir, 'cross_scale_incorrect_only.png') + ) + plot_similarity_evolution_heatmap( + cross_scale_incorrect, args.model_type, + os.path.join(comparison_dir, 'evolution_heatmap_incorrect.png') + ) + + # Accuracy chart + if accuracy_records: + acc_df = pd.DataFrame(accuracy_records) + acc_df.to_csv(os.path.join(accuracy_dir, 'accuracy_summary.csv'), index=False) + plot_accuracy_chart(accuracy_records, args.model_type, + os.path.join(accuracy_dir, 'accuracy_chart.png')) + + # Ablation summary + if ablation_data: + ablation_df = pd.DataFrame(ablation_data) + ablation_df.to_csv(os.path.join(comparison_dir, 'ablation_summary.csv'), index=False) + plot_ablation_summary(ablation_data, args.model_type, + os.path.join(comparison_dir, 'ablation_summary.png')) + + # Save all results + if all_results: + results_df = pd.DataFrame(all_results) + results_df.to_csv(os.path.join(output_dir, 'results_summary.csv'), index=False) + + logger.info(f"\n{'='*60}") + logger.info("=== Analysis Complete ===") + logger.info(f"Results saved to: {output_dir}") + logger.info(f" Accuracy: {accuracy_dir}") + logger.info(f" Correct-only: {correct_dir}") + logger.info(f" Incorrect-only: {incorrect_dir}") + logger.info(f" Comparison: {comparison_dir}") + logger.info(f"{'='*60}") + + +if __name__ == '__main__': + main() diff --git a/exp2a_correct_filter/run_molmo.sh b/exp2a_correct_filter/run_molmo.sh new file mode 100644 index 0000000000000000000000000000000000000000..62c5c815c06b8613be9cf5e30ed72bc1b3447aab --- /dev/null +++ b/exp2a_correct_filter/run_molmo.sh @@ -0,0 +1,62 @@ +#!/bin/bash +set -e + +SCRIPT="/data/shared/Qwen/experiments/exp2a_correct_filter/exp2a_correct_filter_analysis.py" +PYTHON="conda run --no-capture-output -n molmo python" +MODEL="molmo" +LOG_DIR="/data/shared/Qwen/experiments/exp2a_correct_filter/logs/${MODEL}" +mkdir -p "$LOG_DIR" + +SCALES=("vanilla" "80k" "400k" "800k" "2m") +GPUS=(0 1 2 3 4) + +echo "=========================================" +echo " Molmo: Launching ${#SCALES[@]} scales in parallel" +echo "=========================================" + +PIDS=() +for i in "${!SCALES[@]}"; do + scale="${SCALES[$i]}" + gpu="${GPUS[$i]}" + log="${LOG_DIR}/${scale}.log" + + echo "[GPU $gpu] $scale -> $log" + CUDA_VISIBLE_DEVICES=$gpu $PYTHON $SCRIPT \ + --model_type $MODEL \ + --scales $scale \ + --device cuda \ + --no-auto-roborefer \ + > "$log" 2>&1 & + PIDS+=($!) +done + +echo "" +echo "Waiting for all ${#PIDS[@]} processes..." +echo "PIDs: ${PIDS[*]}" +echo "" + +FAILED=0 +for i in "${!PIDS[@]}"; do + pid="${PIDS[$i]}" + scale="${SCALES[$i]}" + if wait $pid; then + echo "[DONE] $scale (PID $pid) - SUCCESS" + else + echo "[FAIL] $scale (PID $pid) - EXIT CODE $?" + FAILED=$((FAILED + 1)) + fi +done + +echo "" +if [ $FAILED -gt 0 ]; then + echo "WARNING: $FAILED scale(s) failed. Check logs in $LOG_DIR" +fi + +echo "=========================================" +echo " Molmo: Running merge" +echo "=========================================" +$PYTHON $SCRIPT --model_type $MODEL --merge 2>&1 | tee "${LOG_DIR}/merge.log" + +echo "" +echo "ALL DONE: $MODEL" +echo "Results: /data/shared/Qwen/experiments/exp2a_correct_filter/results/${MODEL}/" diff --git a/exp2a_correct_filter/run_nvila.sh b/exp2a_correct_filter/run_nvila.sh new file mode 100644 index 0000000000000000000000000000000000000000..d4d2a2cd97331b2a47b6aa93573b1d54387dd641 --- /dev/null +++ b/exp2a_correct_filter/run_nvila.sh @@ -0,0 +1,63 @@ +#!/bin/bash +set -e + +SCRIPT="/data/shared/Qwen/experiments/exp2a_correct_filter/exp2a_correct_filter_analysis.py" +PYTHON="conda run --no-capture-output -n vila python" +MODEL="nvila" +LOG_DIR="/data/shared/Qwen/experiments/exp2a_correct_filter/logs/${MODEL}" +mkdir -p "$LOG_DIR" + +# NVILA has 6 scales (including roborefer) +SCALES=("vanilla" "80k" "400k" "800k" "2m" "roborefer") +GPUS=(0 1 2 3 4 5) + +echo "=========================================" +echo " NVILA: Launching ${#SCALES[@]} scales in parallel" +echo "=========================================" + +PIDS=() +for i in "${!SCALES[@]}"; do + scale="${SCALES[$i]}" + gpu="${GPUS[$i]}" + log="${LOG_DIR}/${scale}.log" + + echo "[GPU $gpu] $scale -> $log" + CUDA_VISIBLE_DEVICES=$gpu $PYTHON $SCRIPT \ + --model_type $MODEL \ + --scales $scale \ + --device cuda \ + --no-auto-roborefer \ + > "$log" 2>&1 & + PIDS+=($!) +done + +echo "" +echo "Waiting for all ${#PIDS[@]} processes..." +echo "PIDs: ${PIDS[*]}" +echo "" + +FAILED=0 +for i in "${!PIDS[@]}"; do + pid="${PIDS[$i]}" + scale="${SCALES[$i]}" + if wait $pid; then + echo "[DONE] $scale (PID $pid) - SUCCESS" + else + echo "[FAIL] $scale (PID $pid) - EXIT CODE $?" + FAILED=$((FAILED + 1)) + fi +done + +echo "" +if [ $FAILED -gt 0 ]; then + echo "WARNING: $FAILED scale(s) failed. Check logs in $LOG_DIR" +fi + +echo "=========================================" +echo " NVILA: Running merge" +echo "=========================================" +$PYTHON $SCRIPT --model_type $MODEL --merge 2>&1 | tee "${LOG_DIR}/merge.log" + +echo "" +echo "ALL DONE: $MODEL" +echo "Results: /data/shared/Qwen/experiments/exp2a_correct_filter/results/${MODEL}/" diff --git a/exp2a_correct_filter/run_qwen.sh b/exp2a_correct_filter/run_qwen.sh new file mode 100644 index 0000000000000000000000000000000000000000..046839ad59e5a65b2c419e77adebb70efbf55e7e --- /dev/null +++ b/exp2a_correct_filter/run_qwen.sh @@ -0,0 +1,62 @@ +#!/bin/bash +set -e + +SCRIPT="/data/shared/Qwen/experiments/exp2a_correct_filter/exp2a_correct_filter_analysis.py" +PYTHON="/usr/bin/python3" +MODEL="qwen" +LOG_DIR="/data/shared/Qwen/experiments/exp2a_correct_filter/logs/${MODEL}" +mkdir -p "$LOG_DIR" + +SCALES=("vanilla" "80k" "400k" "800k" "2m") +GPUS=(0 1 2 3 4) + +echo "=========================================" +echo " Qwen: Launching ${#SCALES[@]} scales in parallel" +echo "=========================================" + +PIDS=() +for i in "${!SCALES[@]}"; do + scale="${SCALES[$i]}" + gpu="${GPUS[$i]}" + log="${LOG_DIR}/${scale}.log" + + echo "[GPU $gpu] $scale -> $log" + CUDA_VISIBLE_DEVICES=$gpu $PYTHON $SCRIPT \ + --model_type $MODEL \ + --scales $scale \ + --device cuda \ + --no-auto-roborefer \ + > "$log" 2>&1 & + PIDS+=($!) +done + +echo "" +echo "Waiting for all ${#PIDS[@]} processes..." +echo "PIDs: ${PIDS[*]}" +echo "" + +FAILED=0 +for i in "${!PIDS[@]}"; do + pid="${PIDS[$i]}" + scale="${SCALES[$i]}" + if wait $pid; then + echo "[DONE] $scale (PID $pid) - SUCCESS" + else + echo "[FAIL] $scale (PID $pid) - EXIT CODE $?" + FAILED=$((FAILED + 1)) + fi +done + +echo "" +if [ $FAILED -gt 0 ]; then + echo "WARNING: $FAILED scale(s) failed. Check logs in $LOG_DIR" +fi + +echo "=========================================" +echo " Qwen: Running merge" +echo "=========================================" +$PYTHON $SCRIPT --model_type $MODEL --merge 2>&1 | tee "${LOG_DIR}/merge.log" + +echo "" +echo "ALL DONE: $MODEL" +echo "Results: /data/shared/Qwen/experiments/exp2a_correct_filter/results/${MODEL}/" diff --git a/exp2a_modified/exp2a_modified_embedding_analysis.py b/exp2a_modified/exp2a_modified_embedding_analysis.py new file mode 100644 index 0000000000000000000000000000000000000000..09bc757b67b74f1659cdde696e339eeef477f9c1 --- /dev/null +++ b/exp2a_modified/exp2a_modified_embedding_analysis.py @@ -0,0 +1,1228 @@ +""" +Experiment 2-A (Modified): Image-conditioned Representation Analysis + +Modification from original: +- Remove task format confound by unifying answer format +- All answers are pure spatial concepts: left, right, above, under, far, close +- Pairwise: "Is the {obj1} to the left or right of the {obj2}?" -> "left" +- Distance: "Compared to {ref}, is {target} far or close from you?" -> "far" +- 200 samples per category (up from 50) + +Goal: Verify Hypothesis 4 - that above/far and under/close are mapped to similar +positions in embedding space, while left/right are well-separated. +""" + +import os +import sys +import json +import argparse +import base64 +import logging +import random +import re +from io import BytesIO +from collections import defaultdict +from typing import Dict, List, Tuple, Optional, Any +from abc import ABC, abstractmethod + +import torch +import numpy as np +import pandas as pd +from PIL import Image +from tqdm import tqdm +import matplotlib.pyplot as plt +import seaborn as sns +from sklearn.metrics.pairwise import cosine_similarity + +# Setup logging +logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s') +logger = logging.getLogger(__name__) + +# Category order for output +CATEGORY_ORDER = ['left', 'right', 'above', 'under', 'far', 'close'] + +# Pair definitions for trajectory analysis +TRAJECTORY_PAIRS = { + 'hypothesis': [ + ('above', 'far', 'above-far', '#d62728'), # red + ('under', 'close', 'under-close', '#1f77b4'), # blue + ], + 'within_axis': [ + ('left', 'right', 'left-right', '#2ca02c'), # green + ('above', 'under', 'above-under', '#ff7f0e'), # orange + ('far', 'close', 'far-close', '#9467bd'), # purple + ], + 'counter_hypothesis': [ + ('above', 'close', 'above-close', '#e377c2'), # pink + ('under', 'far', 'under-far', '#17becf'), # cyan + ], +} + +# Scale colors for cross-scale plots +SCALE_COLORS = { + 'vanilla': '#1f77b4', + '80k': '#ff7f0e', + '400k': '#2ca02c', + '800k': '#d62728', + '2m': '#9467bd', + 'roborefer': '#8c564b', +} + + +# ============================================================================ +# Data Loading & Modification +# ============================================================================ + +# Regex patterns for extracting objects from pairwise questions +OBJECT_PATTERNS = [ + re.compile(r'between\s+(.+?)\s+and\s+(.+?)\s+in', re.IGNORECASE), + re.compile(r'of\s+(.+?)\s+and\s+(.+?)\s+in', re.IGNORECASE), + re.compile(r'positions\s+of\s+(.+?)\s+and\s+(.+?)\s+interact', re.IGNORECASE), + re.compile(r'How\s+are\s+(.+?)\s+and\s+(.+?)\s+positioned', re.IGNORECASE), + re.compile(r'arrangement\s+of\s+(.+?)\s+and\s+(.+?)\s+in', re.IGNORECASE), +] + + +def extract_objects(question: str) -> Tuple[str, str]: + """Extract two objects from a pairwise relation question.""" + for pattern in OBJECT_PATTERNS: + m = pattern.search(question) + if m: + return m.group(1).strip(), m.group(2).strip() + raise ValueError(f"Could not extract objects from: {question}") + + +def modify_pairwise_sample(sample: dict) -> dict: + """Modify a pairwise relation sample (left/right/above/under).""" + obj1, obj2 = extract_objects(sample['question']) + category = sample['category'] + + if category in ['left', 'right']: + new_question = f"Is the {obj1} to the left or right of the {obj2}?" + else: # above, under + new_question = f"Is the {obj1} above or under the {obj2}?" + + return { + 'index': sample['index'], + 'image_base64': sample['image_base64'], + 'question': new_question, + 'answer': category, + 'category': category, + } + + +def modify_distance_sample(sample: dict, rng: random.Random) -> dict: + """Modify a distance relation sample (far/close).""" + category = sample['category'] + answer_key = sample['answer'] # e.g. "C" + options = sample['options'] # {'A': 'table', 'B': 'towel', ...} + + target_object = options[answer_key] + candidates = [v for k, v in options.items() if k != answer_key] + reference_object = rng.choice(candidates) + + new_question = f"Compared to {reference_object}, is {target_object} far or close from you?" + + return { + 'index': sample['index'], + 'image_base64': sample['image_base64'], + 'question': new_question, + 'answer': category, + 'category': category, + } + + +def load_and_modify_data( + tsv_path: str, + samples_per_category: int = 200, + seed: int = 42 +) -> Dict[str, List[dict]]: + """ + Load EmbSpatialBench data, modify questions to remove format confound. + """ + rng = random.Random(seed) + np.random.seed(seed) + + df = pd.read_csv(tsv_path, sep='\t') + + # Group by category + raw_grouped = defaultdict(list) + for _, row in df.iterrows(): + category = row['category'] + sample = { + 'index': row['index'], + 'image_base64': row['image'], + 'question': row['question'], + 'answer': row['answer'], + 'category': category, + 'options': { + 'A': row['A'], + 'B': row['B'], + 'C': row['C'], + 'D': row['D'] + } + } + raw_grouped[category].append(sample) + + # Sample and modify + modified_data = defaultdict(list) + stats = {'total': 0, 'success': 0, 'failed': 0} + + for category in CATEGORY_ORDER: + samples = raw_grouped[category] + + # Sample up to samples_per_category + if len(samples) > samples_per_category: + indices = np.random.choice(len(samples), samples_per_category, replace=False) + samples = [samples[i] for i in indices] + + for sample in samples: + stats['total'] += 1 + try: + if category in ['left', 'right', 'above', 'under']: + modified = modify_pairwise_sample(sample) + else: # far, close + modified = modify_distance_sample(sample, rng) + + # Validate + assert modified['answer'] == modified['category'] + modified_data[category].append(modified) + stats['success'] += 1 + except Exception as e: + stats['failed'] += 1 + logger.warning(f" Failed to modify sample {sample['index']}: {e}") + + logger.info(f"Data modification: {stats['success']}/{stats['total']} success, {stats['failed']} failed") + for cat in CATEGORY_ORDER: + if cat in modified_data: + logger.info(f" {cat}: {len(modified_data[cat])} samples") + # Show first example + ex = modified_data[cat][0] + logger.info(f" Example Q: {ex['question']}") + logger.info(f" Example A: {ex['answer']}") + + return dict(modified_data) + + +def decode_base64_image(base64_str: str) -> Image.Image: + """Decode base64 string to PIL Image.""" + image_data = base64.b64decode(base64_str) + return Image.open(BytesIO(image_data)).convert('RGB') + + +# ============================================================================ +# Base Extractor +# ============================================================================ + +class BaseHiddenStateExtractor(ABC): + """Base class for extracting hidden states from VLMs.""" + + def __init__(self, model_path: str, device: str = 'cuda', target_layers: List[int] = None): + self.model_path = model_path + self.device = device + self.hidden_states = {} + self.hooks = [] + + self._load_model() + + num_layers = self._get_num_layers() + if target_layers is None: + self.target_layers = list(range(num_layers)) + logger.info(f"Model has {num_layers} layers. Extracting ALL layers (0..{num_layers-1})") + else: + self.target_layers = target_layers + logger.info(f"Model has {num_layers} layers. Target layers: {self.target_layers}") + + self._register_hooks() + + def _register_hooks(self): + """Register forward hooks on target layers.""" + for layer_idx in self.target_layers: + module = self._get_layer_module(layer_idx) + if module is not None: + hook = module.register_forward_hook(self._make_hook(layer_idx)) + self.hooks.append(hook) + logger.info(f" Registered hook on layer {layer_idx}") + + def _make_hook(self, layer_idx: int): + """Create a hook function for a specific layer.""" + def hook_fn(module, input, output): + if isinstance(output, tuple): + hidden = output[0] + else: + hidden = output + + # Last token pooling + last_token = hidden[:, -1, :].detach().cpu().float() + self.hidden_states[layer_idx] = last_token.squeeze(0) + + return hook_fn + + @abstractmethod + def _load_model(self): + pass + + @abstractmethod + def _get_num_layers(self) -> int: + pass + + @abstractmethod + def _get_layer_module(self, layer_idx: int): + pass + + @abstractmethod + def extract(self, image: Image.Image, question: str) -> Dict[int, torch.Tensor]: + pass + + def cleanup(self): + """Remove hooks and free memory.""" + for hook in self.hooks: + hook.remove() + self.hooks = [] + if hasattr(self, 'model'): + del self.model + if hasattr(self, 'processor'): + del self.processor + torch.cuda.empty_cache() + + +# ============================================================================ +# Molmo Extractor +# ============================================================================ + +class MolmoExtractor(BaseHiddenStateExtractor): + """Hidden state extractor for Molmo models (native olmo format).""" + + def _load_model(self): + config_path = os.path.join(self.model_path, "config.yaml") + checkpoint_path = os.path.join(self.model_path, "model.pt") + + if os.path.exists(config_path) and os.path.exists(checkpoint_path): + self._load_native_model() + self.is_native = True + else: + self._load_hf_model() + self.is_native = False + + def _load_native_model(self): + from olmo.config import ModelConfig + from olmo.model import Molmo as NativeMolmoModel + from olmo.data.model_preprocessor import MultiModalPreprocessor + from olmo.data.data_formatter import DataFormatter + + _original_load = torch.load + def _unsafe_load_wrapper(*args, **kwargs): + if 'weights_only' not in kwargs: + kwargs['weights_only'] = False + return _original_load(*args, **kwargs) + torch.load = _unsafe_load_wrapper + + config_path = os.path.join(self.model_path, "config.yaml") + checkpoint_path = os.path.join(self.model_path, "model.pt") + + cfg = ModelConfig.load(config_path, key="model", validate_paths=False) + cfg.init_device = "cpu" + + self.model = NativeMolmoModel(cfg) + state_dict = torch.load(checkpoint_path, map_location="cpu") + self.model.load_state_dict(state_dict) + self.model = self.model.to(self.device, dtype=torch.bfloat16).eval() + + self.tokenizer = cfg.get_tokenizer() + v_cfg = cfg.vision_backbone + h, w = cfg.llm_patches_per_crop() + image_padding_mask = 2 if cfg.fix_image_padding else (1 if cfg.image_padding_embed else None) + + class SafeDataFormatter(DataFormatter): + def get_system_prompt(self, style, for_inference, messages, rng=None): + if style is None: + style = "User" + return super().get_system_prompt(style, for_inference, messages, rng) + + self.formatter = SafeDataFormatter( + prompt_templates=cfg.prompt_type, + message_format=cfg.message_formatting, + system_prompt=cfg.system_prompt_kind, + always_start_with_space=cfg.always_start_with_space, + default_inference_len=cfg.default_inference_len + ) + + self.preprocessor = MultiModalPreprocessor( + tokenizer=self.tokenizer, + normalize=str(v_cfg.image_model_type), + crop_mode=cfg.crop_mode, + max_crops=cfg.max_crops, + overlap_margins=cfg.overlap_margins, + resize=v_cfg.resize_mode, + use_col_tokens=cfg.use_col_tokens, + base_image_input_size=v_cfg.image_default_input_size, + image_pooling_w=cfg.image_pooling_w, + image_pooling_h=cfg.image_pooling_h, + image_token_length_w=w, + image_token_length_h=h, + image_patch_size=v_cfg.image_patch_size, + image_padding_mask=image_padding_mask, + pad_value=cfg.pad_value, + loss_token_weighting=cfg.multi_annotation_weighting, + ) + + logger.info(f"Loaded native Molmo model from {self.model_path}") + + def _load_hf_model(self): + from transformers import AutoModelForCausalLM, AutoProcessor + + self.model = AutoModelForCausalLM.from_pretrained( + self.model_path, + torch_dtype=torch.bfloat16, + trust_remote_code=True, + device_map=self.device + ) + self.model.eval() + + self.processor = AutoProcessor.from_pretrained( + self.model_path, + trust_remote_code=True + ) + logger.info(f"Loaded HuggingFace Molmo model from {self.model_path}") + + def _get_num_layers(self) -> int: + if self.is_native: + return len(self.model.transformer.blocks) + else: + if hasattr(self.model, 'model') and hasattr(self.model.model, 'transformer'): + return len(self.model.model.transformer.blocks) + return 32 + + def _get_layer_module(self, layer_idx: int): + if self.is_native: + return self.model.transformer.blocks[layer_idx] + else: + return self.model.model.transformer.blocks[layer_idx] + + def extract(self, image: Image.Image, question: str) -> Dict[int, torch.Tensor]: + self.hidden_states = {} + + if self.is_native: + example = {"messages": [question], "image": image} + messages, _ = self.formatter(example, is_training=False, for_inference=True, rng=np.random) + image_np = np.array(image) + batch = self.preprocessor(image_np, messages, is_training=False, require_image_features=True) + + if 'input_ids' not in batch and 'input_tokens' in batch: + batch['input_ids'] = batch['input_tokens'] + + def to_tensor(x): + if isinstance(x, np.ndarray): + return torch.from_numpy(x) + return x + + input_ids = to_tensor(batch['input_ids']).unsqueeze(0).to(self.device) + if input_ids.dtype not in [torch.long, torch.int64]: + input_ids = input_ids.long() + + images_tensor = to_tensor(batch['images']).unsqueeze(0).to(self.device).to(dtype=torch.bfloat16) + image_masks = to_tensor(batch['image_masks']).unsqueeze(0).to(self.device).to(dtype=torch.bfloat16) + image_input_idx = to_tensor(batch['image_input_idx']).unsqueeze(0).to(self.device) + + with torch.inference_mode(): + with torch.autocast(device_type="cuda", enabled=True, dtype=torch.bfloat16): + _ = self.model( + input_ids=input_ids, + images=images_tensor, + image_masks=image_masks, + image_input_idx=image_input_idx, + ) + else: + inputs = self.processor.process(images=[image], text=question) + processed_inputs = {} + for k, v in inputs.items(): + v = v.to(self.device).unsqueeze(0) + if v.dtype == torch.float32: + v = v.to(dtype=torch.bfloat16) + processed_inputs[k] = v + + with torch.no_grad(): + _ = self.model(**processed_inputs) + + return self.hidden_states.copy() + + +# ============================================================================ +# NVILA Extractor +# ============================================================================ + +class NVILAExtractor(BaseHiddenStateExtractor): + """Hidden state extractor for NVILA models.""" + + def _load_model(self): + original_sys_path = sys.path.copy() + sys.path = [p for p in sys.path if 'RoboRefer' not in p] + + modules_to_remove = [key for key in list(sys.modules.keys()) if 'llava' in key.lower()] + removed_modules = {} + for mod in modules_to_remove: + removed_modules[mod] = sys.modules.pop(mod) + + try: + import llava + from llava.media import Image as LLaVAImage + from llava import conversation as clib + except Exception as err: + sys.path = original_sys_path + for mod, module in removed_modules.items(): + sys.modules[mod] = module + raise RuntimeError(f"Failed to import llava: {err}") + + sys.path = original_sys_path + + self.LLaVAImage = LLaVAImage + self.clib = clib + + self.model = llava.load(self.model_path, model_base=None) + + self._find_llm_backbone() + + logger.info(f"Loaded NVILA model from {self.model_path}") + + def _find_llm_backbone(self): + """Find the LLM backbone module for hook registration.""" + candidates = [] + + if hasattr(self.model, 'llm'): + if hasattr(self.model.llm, 'model') and hasattr(self.model.llm.model, 'layers'): + candidates.append(('model.llm.model.layers', self.model.llm.model.layers)) + if hasattr(self.model.llm, 'layers'): + candidates.append(('model.llm.layers', self.model.llm.layers)) + + if hasattr(self.model, 'model'): + if hasattr(self.model.model, 'model') and hasattr(self.model.model.model, 'layers'): + candidates.append(('model.model.model.layers', self.model.model.model.layers)) + if hasattr(self.model.model, 'layers'): + candidates.append(('model.model.layers', self.model.model.layers)) + + for name, module in self.model.named_modules(): + if name.endswith('.layers') and hasattr(module, '__len__') and len(module) > 0: + candidates.append((name, module)) + + if candidates: + path, layers = candidates[0] + logger.info(f"Found LLM layers at: {path} (num_layers={len(layers)})") + self.llm_backbone = layers + self.layers_path = path + else: + logger.error("Could not find transformer layers in model!") + for name, _ in list(self.model.named_modules())[:20]: + logger.info(f" {name}") + raise ValueError("Could not locate transformer layers in NVILA model") + + def _get_num_layers(self) -> int: + if hasattr(self, 'llm_backbone') and hasattr(self.llm_backbone, '__len__'): + return len(self.llm_backbone) + return 24 + + def _get_layer_module(self, layer_idx: int): + if hasattr(self, 'llm_backbone') and hasattr(self.llm_backbone, '__getitem__'): + module = self.llm_backbone[layer_idx] + logger.info(f" Accessing layer {layer_idx}: {type(module).__name__}") + return module + logger.error(f"Cannot access layer {layer_idx} - llm_backbone not properly initialized") + return None + + def extract(self, image: Image.Image, question: str) -> Dict[int, torch.Tensor]: + self.hidden_states = {} + + import tempfile + with tempfile.NamedTemporaryFile(suffix='.png', delete=False) as f: + temp_path = f.name + image.save(temp_path) + + try: + prompt = [self.LLaVAImage(temp_path), question] + + from transformers import GenerationConfig + gen_config = GenerationConfig(max_new_tokens=1, do_sample=False) + _ = self.model.generate_content(prompt, generation_config=gen_config) + finally: + os.unlink(temp_path) + + return self.hidden_states.copy() + + +# ============================================================================ +# RoboRefer Extractor (NVILA-based) +# ============================================================================ + +class RoboReferExtractor(NVILAExtractor): + """Hidden state extractor for RoboRefer models (NVILA-based, different llava path).""" + + ROBOREFER_PATH = '/data/shared/Qwen/RoboRefer' + + def _load_model(self): + original_sys_path = sys.path.copy() + + # Add RoboRefer path (opposite of NVILA which removes it) + if self.ROBOREFER_PATH not in sys.path: + sys.path.insert(0, self.ROBOREFER_PATH) + + # Clear any existing llava modules to avoid conflicts + modules_to_remove = [key for key in list(sys.modules.keys()) if 'llava' in key.lower()] + removed_modules = {} + for mod in modules_to_remove: + removed_modules[mod] = sys.modules.pop(mod) + + try: + import llava + from llava.media import Image as LLaVAImage + from llava import conversation as clib + except Exception as err: + sys.path = original_sys_path + for mod, module in removed_modules.items(): + sys.modules[mod] = module + raise RuntimeError(f"Failed to import RoboRefer llava: {err}") + + sys.path = original_sys_path + + self.LLaVAImage = LLaVAImage + self.clib = clib + + self.model = llava.load(self.model_path, model_base=None) + + self._find_llm_backbone() + + logger.info(f"Loaded RoboRefer model from {self.model_path}") + + +# ============================================================================ +# Qwen2.5-VL Extractor +# ============================================================================ + +class Qwen25VLExtractor(BaseHiddenStateExtractor): + """Hidden state extractor for Qwen2.5-VL models.""" + + BASE_MODEL = "Qwen/Qwen2.5-VL-3B-Instruct" + + def _load_model(self): + from transformers import Qwen2_5_VLForConditionalGeneration, AutoProcessor + + try: + self.model = Qwen2_5_VLForConditionalGeneration.from_pretrained( + self.model_path, + torch_dtype=torch.bfloat16, + device_map=self.device + ) + except ImportError: + logger.info("accelerate not available, loading model without device_map...") + self.model = Qwen2_5_VLForConditionalGeneration.from_pretrained( + self.model_path, + torch_dtype=torch.bfloat16, + ) + self.model = self.model.to(self.device) + + self.model.eval() + + if self.model_path.startswith('/'): + logger.info(f"Fine-tuned model detected, loading processor from base model: {self.BASE_MODEL}") + self.processor = AutoProcessor.from_pretrained(self.BASE_MODEL) + else: + self.processor = AutoProcessor.from_pretrained(self.model_path) + logger.info(f"Loaded Qwen2.5-VL model from {self.model_path}") + + def _get_num_layers(self) -> int: + return len(self.model.model.layers) + + def _get_layer_module(self, layer_idx: int): + return self.model.model.layers[layer_idx] + + def extract(self, image: Image.Image, question: str) -> Dict[int, torch.Tensor]: + self.hidden_states = {} + + messages = [ + { + "role": "user", + "content": [ + {"type": "image", "image": image}, + {"type": "text", "text": question} + ] + } + ] + + text = self.processor.apply_chat_template( + messages, tokenize=False, add_generation_prompt=True + ) + + from qwen_vl_utils import process_vision_info + image_inputs, video_inputs = process_vision_info(messages) + + inputs = self.processor( + text=[text], + images=image_inputs, + videos=video_inputs, + padding=True, + return_tensors="pt" + ) + inputs = inputs.to(self.device) + + with torch.no_grad(): + _ = self.model(**inputs) + + return self.hidden_states.copy() + + +# ============================================================================ +# Factory Function +# ============================================================================ + +def get_extractor(model_type: str, model_path: str, scale: str = None, **kwargs) -> BaseHiddenStateExtractor: + # RoboRefer uses NVILA architecture but needs different llava import path + if model_type == 'nvila' and scale == 'roborefer': + return RoboReferExtractor(model_path, **kwargs) + + extractors = { + 'molmo': MolmoExtractor, + 'nvila': NVILAExtractor, + 'qwen': Qwen25VLExtractor, + } + if model_type not in extractors: + raise ValueError(f"Unknown model type: {model_type}. Available: {list(extractors.keys())}") + return extractors[model_type](model_path, **kwargs) + + +# ============================================================================ +# Analysis Functions +# ============================================================================ + +def extract_all_layer_representations( + extractor: BaseHiddenStateExtractor, + data: Dict[str, List[dict]], +) -> Dict[int, Dict[str, np.ndarray]]: + """Extract average hidden state representations for ALL target layers at once. + + Returns: + Dict mapping layer_idx -> {category -> avg_vector} + """ + # category_states[layer_idx][category] = list of vectors + category_states = defaultdict(lambda: defaultdict(list)) + + for category in CATEGORY_ORDER: + if category not in data: + continue + samples = data[category] + logger.info(f"Processing category: {category}") + success_count = 0 + for sample in tqdm(samples, desc=f" {category}"): + try: + image = decode_base64_image(sample['image_base64']) + hidden_states = extractor.extract(image, sample['question']) + + for layer_idx in extractor.target_layers: + if layer_idx in hidden_states: + state = hidden_states[layer_idx].numpy().flatten() + if state.size > 0: + category_states[layer_idx][category].append(state) + + if any(l in hidden_states for l in extractor.target_layers): + success_count += 1 + else: + logger.warning(f" No target layers found. Available: {list(hidden_states.keys())}") + except Exception as e: + logger.warning(f" Error processing sample {sample['index']}: {e}") + continue + + logger.info(f" {category}: Successfully extracted {success_count}/{len(samples)} samples") + + # Average per category per layer + result = {} + for layer_idx in extractor.target_layers: + category_avg = {} + for category, states in category_states[layer_idx].items(): + if states: + category_avg[category] = np.mean(states, axis=0) + if category_avg: + result[layer_idx] = category_avg + logger.info(f" Layer {layer_idx}: {len(category_avg)} categories collected") + else: + logger.error(f" Layer {layer_idx}: No states collected!") + + if not result: + raise ValueError("No representations were extracted!") + + return result + + +def compute_similarity_matrix( + representations: Dict[str, np.ndarray] +) -> pd.DataFrame: + """Compute pairwise cosine similarity with fixed category order.""" + available = [c for c in CATEGORY_ORDER if c in representations] + vectors = np.array([representations[cat] for cat in available]) + sim_matrix = cosine_similarity(vectors) + return pd.DataFrame(sim_matrix, index=available, columns=available) + + +def analyze_hypothesis(sim_df: pd.DataFrame, model_name: str) -> dict: + """Analyze the similarity matrix to test Hypothesis 4.""" + results = {'model': model_name} + + pairs_to_check = { + 'above_far': ('above', 'far'), + 'under_close': ('under', 'close'), + 'left_right': ('left', 'right'), + } + + for pair_name, (cat1, cat2) in pairs_to_check.items(): + if cat1 in sim_df.index and cat2 in sim_df.columns: + sim = sim_df.loc[cat1, cat2] + results[f'sim_{pair_name}'] = sim + logger.info(f" {pair_name}: sim({cat1}, {cat2}) = {sim:.4f}") + else: + results[f'sim_{pair_name}'] = None + + if results.get('sim_above_far') and results.get('sim_left_right'): + results['diff_above_far_vs_left_right'] = results['sim_above_far'] - results['sim_left_right'] + if results.get('sim_under_close') and results.get('sim_left_right'): + results['diff_under_close_vs_left_right'] = results['sim_under_close'] - results['sim_left_right'] + + return results + + +# ============================================================================ +# Visualization +# ============================================================================ + +def plot_similarity_heatmap(sim_df: pd.DataFrame, title: str, save_path: str): + """Plot and save similarity heatmap with fixed category order.""" + plt.figure(figsize=(10, 8)) + + available_order = [c for c in CATEGORY_ORDER if c in sim_df.index] + sim_df_ordered = sim_df.loc[available_order, available_order] + + sns.heatmap( + sim_df_ordered, + annot=True, + fmt='.4f', + cmap='RdYlBu_r', + center=0.5, + vmin=0, + vmax=1, + square=True, + linewidths=0.5, + cbar_kws={'label': 'Cosine Similarity'} + ) + + plt.title(title, fontsize=14, fontweight='bold') + plt.tight_layout() + plt.savefig(save_path, dpi=300, bbox_inches='tight') + plt.close() + logger.info(f"Saved heatmap: {save_path}") + + +def plot_comparison(results_list: List[dict], save_path: str): + """Plot comparison of similarity pairs across models.""" + pairs = ['sim_above_far', 'sim_under_close', 'sim_left_right'] + pair_labels = ['above-far', 'under-close', 'left-right'] + + fig, ax = plt.subplots(figsize=(12, 6)) + + x = np.arange(len(pairs)) + width = 0.8 / len(results_list) + + for i, result in enumerate(results_list): + model = result['model'] + values = [result.get(p, 0) or 0 for p in pairs] + offset = (i - len(results_list) / 2 + 0.5) * width + bars = ax.bar(x + offset, values, width, label=model) + + for bar, val in zip(bars, values): + if val: + ax.annotate( + f'{val:.3f}', + xy=(bar.get_x() + bar.get_width() / 2, bar.get_height()), + xytext=(0, 3), + textcoords='offset points', + ha='center', + va='bottom', + fontsize=8 + ) + + ax.set_ylabel('Cosine Similarity') + ax.set_title('Spatial Concept Similarity Comparison (Modified Format)\n(Hypothesis 4: above-far & under-close should be > left-right for vanilla)') + ax.set_xticks(x) + ax.set_xticklabels(pair_labels) + ax.legend(loc='upper right', fontsize=8) + ax.set_ylim(0, 1) + ax.axhline(y=0.5, color='gray', linestyle='--', alpha=0.5) + + plt.tight_layout() + plt.savefig(save_path, dpi=300, bbox_inches='tight') + plt.close() + logger.info(f"Saved comparison plot: {save_path}") + + +def _extract_pair_trajectory( + all_layer_sims: Dict[int, pd.DataFrame], + cat1: str, cat2: str, +) -> Tuple[List[int], List[float]]: + """Extract similarity values for a pair across all layers.""" + layers = sorted(all_layer_sims.keys()) + valid_layers = [] + values = [] + for l in layers: + df = all_layer_sims[l] + if cat1 in df.index and cat2 in df.columns: + valid_layers.append(l) + values.append(df.loc[cat1, cat2]) + return valid_layers, values + + +def get_representative_layers(all_layers: List[int], n: int = 5) -> List[int]: + """Pick n representative layers (evenly spaced) for heatmap output.""" + if len(all_layers) <= n: + return list(all_layers) + indices = np.linspace(0, len(all_layers) - 1, n, dtype=int) + return [all_layers[i] for i in indices] + + +def plot_similarity_trajectories( + all_layer_sims: Dict[int, pd.DataFrame], + title: str, + save_path: str, +): + """Plot similarity of key category pairs across all layers. + + Left panel: absolute cosine similarity per pair across layers. + Right panel: difference from left-right baseline (positive = more similar than L-R). + """ + fig, axes = plt.subplots(1, 2, figsize=(20, 7)) + + # --- Left panel: absolute similarity --- + ax = axes[0] + for cat1, cat2, label, color in TRAJECTORY_PAIRS['hypothesis']: + layers, vals = _extract_pair_trajectory(all_layer_sims, cat1, cat2) + ax.plot(layers, vals, '-', color=color, label=label, linewidth=2.5, markersize=0) + for cat1, cat2, label, color in TRAJECTORY_PAIRS['within_axis']: + layers, vals = _extract_pair_trajectory(all_layer_sims, cat1, cat2) + ax.plot(layers, vals, '--', color=color, label=label, linewidth=1.8, markersize=0) + for cat1, cat2, label, color in TRAJECTORY_PAIRS['counter_hypothesis']: + layers, vals = _extract_pair_trajectory(all_layer_sims, cat1, cat2) + ax.plot(layers, vals, ':', color=color, label=label, linewidth=1.5, alpha=0.8) + + ax.set_xlabel('Layer Index', fontsize=12) + ax.set_ylabel('Cosine Similarity', fontsize=12) + ax.set_title(f'{title}\nPairwise Similarity Across Layers', fontsize=13) + ax.legend(fontsize=9, loc='best') + ax.grid(True, alpha=0.3) + + # --- Right panel: difference from left-right --- + ax = axes[1] + lr_layers, lr_vals = _extract_pair_trajectory(all_layer_sims, 'left', 'right') + lr_dict = dict(zip(lr_layers, lr_vals)) + + for cat1, cat2, label, color in TRAJECTORY_PAIRS['hypothesis']: + layers, vals = _extract_pair_trajectory(all_layer_sims, cat1, cat2) + diffs = [v - lr_dict.get(l, 0) for l, v in zip(layers, vals)] + ax.plot(layers, diffs, '-', color=color, label=f'{label} - left-right', + linewidth=2.5, markersize=0) + + for cat1, cat2, label, color in TRAJECTORY_PAIRS['counter_hypothesis']: + layers, vals = _extract_pair_trajectory(all_layer_sims, cat1, cat2) + diffs = [v - lr_dict.get(l, 0) for l, v in zip(layers, vals)] + ax.plot(layers, diffs, ':', color=color, label=f'{label} - left-right', + linewidth=1.5, alpha=0.8) + + # Also show above-under and far-close as references + for cat1, cat2, label, color in TRAJECTORY_PAIRS['within_axis']: + if label == 'left-right': + continue + layers, vals = _extract_pair_trajectory(all_layer_sims, cat1, cat2) + diffs = [v - lr_dict.get(l, 0) for l, v in zip(layers, vals)] + ax.plot(layers, diffs, '--', color=color, label=f'{label} - left-right', + linewidth=1.5, alpha=0.7) + + ax.axhline(y=0, color='gray', linestyle='-', linewidth=1, alpha=0.5) + ax.set_xlabel('Layer Index', fontsize=12) + ax.set_ylabel('Similarity Difference (pair - left-right)', fontsize=12) + ax.set_title(f'{title}\nRelative to Left-Right Baseline', fontsize=13) + ax.legend(fontsize=8, loc='best') + ax.grid(True, alpha=0.3) + + plt.tight_layout() + plt.savefig(save_path, dpi=300, bbox_inches='tight') + plt.close() + logger.info(f"Saved trajectory plot: {save_path}") + + +def plot_cross_scale_trajectories( + cross_scale_data: Dict[str, Dict[int, pd.DataFrame]], + model_type: str, + save_path: str, +): + """Compare layer-wise trajectories across training scales. + + 3 columns: above-far, under-close, left-right (control). + Each subplot shows one line per scale. + """ + pairs = [ + ('above', 'far', 'above-far (hypothesis)'), + ('under', 'close', 'under-close (hypothesis)'), + ('left', 'right', 'left-right (control)'), + ] + + fig, axes = plt.subplots(1, len(pairs), figsize=(7 * len(pairs), 6)) + if len(pairs) == 1: + axes = [axes] + + for idx, (cat1, cat2, label) in enumerate(pairs): + ax = axes[idx] + for scale in ['vanilla', '80k', '400k', '800k', '2m', 'roborefer']: + if scale not in cross_scale_data: + continue + layer_sims = cross_scale_data[scale] + layers, vals = _extract_pair_trajectory(layer_sims, cat1, cat2) + color = SCALE_COLORS.get(scale, 'gray') + ax.plot(layers, vals, '-', color=color, label=scale, linewidth=2, markersize=0) + + ax.set_xlabel('Layer Index', fontsize=12) + ax.set_ylabel('Cosine Similarity', fontsize=12) + ax.set_title(label, fontsize=13, fontweight='bold') + ax.legend(fontsize=10) + ax.grid(True, alpha=0.3) + + fig.suptitle( + f'{model_type.upper()} - Similarity Trajectory Across Scales', + fontsize=15, fontweight='bold', y=1.02 + ) + plt.tight_layout() + plt.savefig(save_path, dpi=300, bbox_inches='tight') + plt.close() + logger.info(f"Saved cross-scale trajectory: {save_path}") + + +def plot_similarity_evolution_heatmap( + cross_scale_data: Dict[str, Dict[int, pd.DataFrame]], + model_type: str, + save_path: str, +): + """2D heatmap: x=layer, y=scale, color=similarity for each hypothesis pair. + + Gives a bird's-eye view of how both network depth and training data scale + affect the similarity between hypothesis-relevant category pairs. + """ + pairs = [ + ('above', 'far', 'above-far'), + ('under', 'close', 'under-close'), + ('left', 'right', 'left-right'), + ('above', 'under', 'above-under'), + ('far', 'close', 'far-close'), + ] + scale_order = ['vanilla', '80k', '400k', '800k', '2m', 'roborefer'] + available_scales = [s for s in scale_order if s in cross_scale_data] + + # Determine layer range from first available scale + first_scale = available_scales[0] + all_layers = sorted(cross_scale_data[first_scale].keys()) + + fig, axes = plt.subplots(len(pairs), 1, figsize=(max(14, len(all_layers) * 0.5), 3 * len(pairs))) + if len(pairs) == 1: + axes = [axes] + + for idx, (cat1, cat2, label) in enumerate(pairs): + ax = axes[idx] + # Build matrix: rows=scales, cols=layers + matrix = np.full((len(available_scales), len(all_layers)), np.nan) + for si, scale in enumerate(available_scales): + layer_sims = cross_scale_data[scale] + for li, layer in enumerate(all_layers): + if layer in layer_sims: + df = layer_sims[layer] + if cat1 in df.index and cat2 in df.columns: + matrix[si, li] = df.loc[cat1, cat2] + + im = ax.imshow(matrix, aspect='auto', cmap='RdYlBu_r', vmin=0.5, vmax=1.0) + ax.set_yticks(range(len(available_scales))) + ax.set_yticklabels(available_scales, fontsize=10) + + # X-axis: show every Nth layer label to avoid crowding + step = max(1, len(all_layers) // 15) + ax.set_xticks(range(0, len(all_layers), step)) + ax.set_xticklabels([str(all_layers[i]) for i in range(0, len(all_layers), step)], fontsize=8) + + ax.set_title(label, fontsize=12, fontweight='bold') + ax.set_xlabel('Layer Index', fontsize=10) + fig.colorbar(im, ax=ax, label='Cosine Similarity', shrink=0.8) + + fig.suptitle( + f'{model_type.upper()} - Similarity Evolution (Layer x Scale)', + fontsize=15, fontweight='bold', y=1.01 + ) + plt.tight_layout() + plt.savefig(save_path, dpi=300, bbox_inches='tight') + plt.close() + logger.info(f"Saved evolution heatmap: {save_path}") + + +# ============================================================================ +# Model Configurations +# ============================================================================ + +MODEL_CONFIGS = { + 'molmo': { + 'vanilla': 'allenai/Molmo-7B-O-0924', + '80k': '/data/shared/Qwen/molmo/outputs/data_scale_exp_80k/unshared', + '400k': '/data/shared/Qwen/molmo/outputs/data_scale_exp_400k/unshared', + '800k': '/data/shared/Qwen/molmo/outputs/data_scale_exp_800k/unshared', + '2m': '/data/shared/Qwen/molmo/outputs/data_scale_exp_2m/unshared', + }, + 'nvila': { + 'vanilla': '/data/shared/Qwen/mydisk/NVILA-Lite-2B', + '80k': '/data/shared/Qwen/mydisk/output/DATA/NVILA-Lite-2B-DATA_SCALE_EXP_80K-20251108_180221', + '400k': '/data/shared/Qwen/mydisk/output/DATA/NVILA-Lite-2B-DATA_SCALE_EXP_400K-20251108_180221', + '800k': '/data/shared/Qwen/mydisk/output/DATA/NVILA-Lite-2B-DATA_SCALE_EXP_800K-20251108_180221', + '2m': '/data/shared/Qwen/mydisk/output/DATA/NVILA-Lite-2B-DATA_SCALE_EXP_2M-20260205_003632', + 'roborefer': '/data/shared/Qwen/mydisk/RoboRefer_model', + }, + 'qwen': { + 'vanilla': 'Qwen/Qwen2.5-VL-3B-Instruct', + '80k': '/data/shared/Qwen/mydisk/output/Qwen/Qwen2.5-VL-3B-Instruct-data_scale_exp_80k-20251114_120221', + '400k': '/data/shared/Qwen/mydisk/output/Qwen/Qwen2.5-VL-3B-Instruct-data_scale_exp_400k-20251114_120221', + '800k': '/data/shared/Qwen/mydisk/output/Qwen/Qwen2.5-VL-3B-Instruct-data_scale_exp_800k-20251114_120221', + '2m': '/data/shared/Qwen/mydisk/output/Qwen/Qwen2.5-VL-3B-Instruct-data_scale_exp_2m-20260109_120517', + }, +} + + +# ============================================================================ +# Main +# ============================================================================ + +def main(): + parser = argparse.ArgumentParser(description='Experiment 2-A (Modified): Embedding Space Analysis') + parser.add_argument('--data_path', type=str, + default='/data/shared/Qwen/EmbSpatial-Bench/EmbSpatial-Bench.tsv') + parser.add_argument('--model_type', type=str, required=True, + choices=['molmo', 'nvila', 'qwen']) + parser.add_argument('--scales', type=str, nargs='+', + default=['vanilla', '80k', '400k', '800k', '2m']) + parser.add_argument('--output_dir', type=str, + default='/data/shared/Qwen/experiments/exp2a_modified/results_all_layers') + parser.add_argument('--samples_per_category', type=int, default=200) + parser.add_argument('--device', type=str, default='cuda') + parser.add_argument('--seed', type=int, default=42) + + args = parser.parse_args() + + # Auto-include roborefer for nvila if not already specified + if args.model_type == 'nvila' and 'roborefer' not in args.scales: + args.scales.append('roborefer') + + # Set random seed + np.random.seed(args.seed) + torch.manual_seed(args.seed) + random.seed(args.seed) + + # Create output directory + output_dir = os.path.join(args.output_dir, args.model_type) + os.makedirs(output_dir, exist_ok=True) + + # Load and modify data + logger.info("\n=== Loading & Modifying EmbSpatialBench Data ===") + data = load_and_modify_data(args.data_path, args.samples_per_category, args.seed) + + results_list = [] + cross_scale_data = {} # scale -> {layer_idx -> sim_df} + model_configs = MODEL_CONFIGS[args.model_type] + + for scale in args.scales: + if scale not in model_configs: + logger.warning(f"Scale {scale} not available for {args.model_type}, skipping...") + continue + + model_path = model_configs[scale] + + if not os.path.exists(model_path) and not model_path.startswith('Qwen/') and not model_path.startswith('allenai/'): + logger.warning(f"Model path not found: {model_path}, skipping...") + continue + + logger.info(f"\n=== Processing {args.model_type} - {scale} ===") + logger.info(f"Model path: {model_path}") + + try: + extractor = get_extractor( + args.model_type, + model_path, + scale=scale, + device=args.device, + ) + + num_layers = len(extractor.target_layers) + + # Extract representations for ALL layers in one pass + all_layer_reps = extract_all_layer_representations(extractor, data) + + # Compute similarity matrices for all layers + scale_sims = {} + model_name = f"{args.model_type}_{scale}" + for layer_idx in sorted(all_layer_reps.keys()): + sim_df = compute_similarity_matrix(all_layer_reps[layer_idx]) + scale_sims[layer_idx] = sim_df + + results = analyze_hypothesis(sim_df, model_name) + results['layer_idx'] = layer_idx + results_list.append(results) + + # Save CSV for every layer + sim_df.to_csv(os.path.join(output_dir, f'similarity_{scale}_L{layer_idx}.csv')) + + cross_scale_data[scale] = scale_sims + logger.info(f" Computed similarity matrices for {len(scale_sims)} layers") + + # Save heatmaps for representative layers only (to avoid hundreds of files) + rep_layers = get_representative_layers(sorted(scale_sims.keys())) + logger.info(f" Saving heatmaps for representative layers: {rep_layers}") + for layer_idx in rep_layers: + sim_df = scale_sims[layer_idx] + plot_similarity_heatmap( + sim_df, + f'{args.model_type.upper()} ({scale}) - Layer {layer_idx}/{num_layers-1}', + os.path.join(output_dir, f'heatmap_{scale}_L{layer_idx}.png') + ) + + # Per-scale trajectory plot + plot_similarity_trajectories( + scale_sims, + f'{args.model_type.upper()} ({scale})', + os.path.join(output_dir, f'trajectory_{scale}.png') + ) + + extractor.cleanup() + + except Exception as e: + logger.error(f"Failed to process {args.model_type} - {scale}: {e}") + import traceback + traceback.print_exc() + continue + + # Cross-scale comparison plots + if len(cross_scale_data) > 1: + plot_cross_scale_trajectories( + cross_scale_data, + args.model_type, + os.path.join(output_dir, 'trajectory_cross_scale.png') + ) + plot_similarity_evolution_heatmap( + cross_scale_data, + args.model_type, + os.path.join(output_dir, 'evolution_heatmap.png') + ) + + # Save results summary + if results_list: + results_df = pd.DataFrame(results_list) + results_df.to_csv(os.path.join(output_dir, 'results_summary.csv'), index=False) + + logger.info("\n=== Analysis Complete ===") + logger.info(f"Results saved to: {output_dir}") + logger.info(f"Total: {len(results_list)} (layer, scale) combinations across {len(cross_scale_data)} scales") + + +if __name__ == '__main__': + main() diff --git a/exp2a_modified/results/molmo/results_summary.csv b/exp2a_modified/results/molmo/results_summary.csv new file mode 100644 index 0000000000000000000000000000000000000000..41d3515ba8cc576e6fcf91e40f91828ddfd04103 --- /dev/null +++ b/exp2a_modified/results/molmo/results_summary.csv @@ -0,0 +1,26 @@ +model,sim_above_far,sim_under_close,sim_left_right,diff_above_far_vs_left_right,diff_under_close_vs_left_right,layer_idx,layer_label +molmo_vanilla,0.93186307,0.9325508,0.9999072,-0.068044126,-0.06735641,6,early +molmo_vanilla,0.9252183,0.925783,0.9996471,-0.0744288,-0.0738641,13,early_mid +molmo_vanilla,0.8514263,0.85130924,0.9945253,-0.14309901,-0.14321607,19,middle +molmo_vanilla,0.7811126,0.7902819,0.9955554,-0.21444279,-0.20527351,26,late_mid +molmo_vanilla,0.82378054,0.8320327,0.9968723,-0.17309177,-0.16483963,31,late +molmo_80k,0.94482744,0.9447468,0.9999342,-0.05510676,-0.055187404,6,early +molmo_80k,0.9501332,0.9501227,0.99982655,-0.049693346,-0.049703836,13,early_mid +molmo_80k,0.8622559,0.86525977,0.9953824,-0.13312656,-0.13012266,19,middle +molmo_80k,0.7678993,0.780402,0.99710876,-0.22920948,-0.21670675,26,late_mid +molmo_80k,0.8963089,0.9020278,0.99889964,-0.10259074,-0.09687185,31,late +molmo_400k,0.94099295,0.9413343,0.9999467,-0.058953762,-0.058612406,6,early +molmo_400k,0.93268144,0.93169504,0.9983739,-0.065692484,-0.06667888,13,early_mid +molmo_400k,0.8004133,0.7915684,0.9835917,-0.18317837,-0.19202328,19,middle +molmo_400k,0.73278224,0.7314169,0.98859596,-0.25581372,-0.25717908,26,late_mid +molmo_400k,0.9089592,0.911077,0.99682474,-0.08786553,-0.08574772,31,late +molmo_800k,0.9501749,0.95063716,0.9999551,-0.04978019,-0.049317956,6,early +molmo_800k,0.92944044,0.92717594,0.9990981,-0.06965768,-0.07192218,13,early_mid +molmo_800k,0.7842552,0.7732489,0.9752356,-0.19098037,-0.20198667,19,middle +molmo_800k,0.7602978,0.7757774,0.9868044,-0.22650665,-0.21102703,26,late_mid +molmo_800k,0.9205744,0.9238774,0.99709034,-0.07651591,-0.07321292,31,late +molmo_2m,0.95355743,0.9536563,0.99995154,-0.04639411,-0.046295226,6,early +molmo_2m,0.9074487,0.9029928,0.999149,-0.091700315,-0.09615624,13,early_mid +molmo_2m,0.74899715,0.7498276,0.9528682,-0.20387107,-0.2030406,19,middle +molmo_2m,0.72931236,0.751271,0.9772682,-0.24795586,-0.22599721,26,late_mid +molmo_2m,0.9040614,0.9161786,0.99538875,-0.09132737,-0.07921016,31,late diff --git a/exp2a_modified/results/molmo/similarity_2m_L19_middle.csv b/exp2a_modified/results/molmo/similarity_2m_L19_middle.csv new file mode 100644 index 0000000000000000000000000000000000000000..6236b7fe82a33da13d50a852188a773f3f98d206 --- /dev/null +++ b/exp2a_modified/results/molmo/similarity_2m_L19_middle.csv @@ -0,0 +1,7 @@ +,left,right,above,under,far,close +left,0.9999994,0.9528682,0.8079404,0.7898549,0.75441873,0.74726176 +right,0.9528682,1.0,0.79594153,0.79201853,0.74864805,0.74139905 +above,0.8079404,0.79594153,1.0,0.86362475,0.74899715,0.72680587 +under,0.7898549,0.79201853,0.86362475,0.9999998,0.73787785,0.7498276 +far,0.75441873,0.74864805,0.74899715,0.73787785,1.0000002,0.99016166 +close,0.74726176,0.74139905,0.72680587,0.7498276,0.99016166,0.99999976 diff --git a/exp2a_modified/results/molmo/similarity_2m_L26_late_mid.csv b/exp2a_modified/results/molmo/similarity_2m_L26_late_mid.csv new file mode 100644 index 0000000000000000000000000000000000000000..1ec3e6f9fcb7075682398355891dc2e91ad7fd9a --- /dev/null +++ b/exp2a_modified/results/molmo/similarity_2m_L26_late_mid.csv @@ -0,0 +1,7 @@ +,left,right,above,under,far,close +left,1.0000001,0.9772682,0.82484055,0.81500334,0.7462688,0.73856544 +right,0.9772682,1.0,0.81542575,0.81403667,0.73317695,0.7264032 +above,0.82484055,0.81542575,1.0,0.915252,0.72931236,0.7135039 +under,0.81500334,0.81403667,0.915252,1.0000001,0.74381506,0.751271 +far,0.7462688,0.73317695,0.72931236,0.74381506,0.9999998,0.9895668 +close,0.73856544,0.7264032,0.7135039,0.751271,0.9895668,1.0000005 diff --git a/exp2a_modified/results/molmo/similarity_2m_L31_late.csv b/exp2a_modified/results/molmo/similarity_2m_L31_late.csv new file mode 100644 index 0000000000000000000000000000000000000000..133281cf39f4c68218c320aad6c9359e4f7b6988 --- /dev/null +++ b/exp2a_modified/results/molmo/similarity_2m_L31_late.csv @@ -0,0 +1,7 @@ +,left,right,above,under,far,close +left,0.99999976,0.99538875,0.94726926,0.945658,0.9218228,0.9192073 +right,0.99538875,0.9999996,0.94363815,0.9437132,0.9156522,0.91351354 +above,0.94726926,0.94363815,1.0,0.9741205,0.9040614,0.8990477 +under,0.945658,0.9437132,0.9741205,0.9999998,0.91565245,0.9161786 +far,0.9218228,0.9156522,0.9040614,0.91565245,0.999999,0.9976242 +close,0.9192073,0.91351354,0.8990477,0.9161786,0.9976242,1.0000002 diff --git a/exp2a_modified/results/molmo/similarity_2m_L6_early.csv b/exp2a_modified/results/molmo/similarity_2m_L6_early.csv new file mode 100644 index 0000000000000000000000000000000000000000..95724e83467331fd692d498d13af4fe124c8f8ea --- /dev/null +++ b/exp2a_modified/results/molmo/similarity_2m_L6_early.csv @@ -0,0 +1,7 @@ +,left,right,above,under,far,close +left,1.0000001,0.99995154,0.9897138,0.9891182,0.95365894,0.9537702 +right,0.99995154,1.0000001,0.98972285,0.9891555,0.95393157,0.9540078 +above,0.9897138,0.98972285,0.9999999,0.99978626,0.95355743,0.9535313 +under,0.9891182,0.9891555,0.99978626,1.0000004,0.95373374,0.9536563 +far,0.95365894,0.95393157,0.95355743,0.95373374,0.9999998,0.9998942 +close,0.9537702,0.9540078,0.9535313,0.9536563,0.9998942,0.9999999 diff --git a/exp2a_modified/results/molmo/similarity_400k_L13_early_mid.csv b/exp2a_modified/results/molmo/similarity_400k_L13_early_mid.csv new file mode 100644 index 0000000000000000000000000000000000000000..d84e01595203a2f85e8fd39f6b32e432c98e8a03 --- /dev/null +++ b/exp2a_modified/results/molmo/similarity_400k_L13_early_mid.csv @@ -0,0 +1,7 @@ +,left,right,above,under,far,close +left,1.0000005,0.9983739,0.9708368,0.97001463,0.92518365,0.92525065 +right,0.9983739,1.0000001,0.9714834,0.9709337,0.9258892,0.9259387 +above,0.9708368,0.9714834,1.0000001,0.9966369,0.93268144,0.93088496 +under,0.97001463,0.9709337,0.9966369,1.0000001,0.931912,0.93169504 +far,0.92518365,0.9258892,0.93268144,0.931912,1.0,0.9991321 +close,0.92525065,0.9259387,0.93088496,0.93169504,0.9991321,1.0000002 diff --git a/exp2a_modified/results/molmo/similarity_400k_L19_middle.csv b/exp2a_modified/results/molmo/similarity_400k_L19_middle.csv new file mode 100644 index 0000000000000000000000000000000000000000..897a42abe4698c87a844d6e06127ce53cd621a5c --- /dev/null +++ b/exp2a_modified/results/molmo/similarity_400k_L19_middle.csv @@ -0,0 +1,7 @@ +,left,right,above,under,far,close +left,1.0000001,0.9835917,0.89433604,0.8799748,0.8249269,0.82165074 +right,0.9835917,1.0,0.89446956,0.8852003,0.82732373,0.82341003 +above,0.89433604,0.89446956,1.0,0.9350607,0.8004133,0.78341514 +under,0.8799748,0.8852003,0.9350607,1.0000004,0.7830846,0.7915684 +far,0.8249269,0.82732373,0.8004133,0.7830846,1.0000001,0.9916222 +close,0.82165074,0.82341003,0.78341514,0.7915684,0.9916222,0.9999999 diff --git a/exp2a_modified/results/molmo/similarity_400k_L26_late_mid.csv b/exp2a_modified/results/molmo/similarity_400k_L26_late_mid.csv new file mode 100644 index 0000000000000000000000000000000000000000..7ff1ea86dbd671eacafb0730b027a460913fe2bf --- /dev/null +++ b/exp2a_modified/results/molmo/similarity_400k_L26_late_mid.csv @@ -0,0 +1,7 @@ +,left,right,above,under,far,close +left,1.0000002,0.98859596,0.87158585,0.858461,0.78897005,0.7835145 +right,0.98859596,0.99999994,0.8694842,0.8613639,0.78442615,0.779482 +above,0.87158585,0.8694842,1.0000007,0.9409423,0.73278224,0.7150828 +under,0.858461,0.8613639,0.9409423,0.9999998,0.7253824,0.7314169 +far,0.78897005,0.78442615,0.73278224,0.7253824,0.9999997,0.9895003 +close,0.7835145,0.779482,0.7150828,0.7314169,0.9895003,0.9999997 diff --git a/exp2a_modified/results/molmo/similarity_400k_L31_late.csv b/exp2a_modified/results/molmo/similarity_400k_L31_late.csv new file mode 100644 index 0000000000000000000000000000000000000000..9743251160ed42e91fdc6ba0ad340b4b9695560b --- /dev/null +++ b/exp2a_modified/results/molmo/similarity_400k_L31_late.csv @@ -0,0 +1,7 @@ +,left,right,above,under,far,close +left,1.0000001,0.99682474,0.95443934,0.95085055,0.92124707,0.9183261 +right,0.99682474,1.0000005,0.9529462,0.95104045,0.91831386,0.91579354 +above,0.95443934,0.9529462,0.99999976,0.9797501,0.9089592,0.90330064 +under,0.95085055,0.95104045,0.9797501,1.0000005,0.910488,0.911077 +far,0.92124707,0.91831386,0.9089592,0.910488,1.0000002,0.99741966 +close,0.9183261,0.91579354,0.90330064,0.911077,0.99741966,1.0000001 diff --git a/exp2a_modified/results/molmo/similarity_400k_L6_early.csv b/exp2a_modified/results/molmo/similarity_400k_L6_early.csv new file mode 100644 index 0000000000000000000000000000000000000000..18522eb3246d7ee729c9165fe123ea2224744a8d --- /dev/null +++ b/exp2a_modified/results/molmo/similarity_400k_L6_early.csv @@ -0,0 +1,7 @@ +,left,right,above,under,far,close +left,0.9999999,0.9999467,0.98677695,0.98590326,0.9344799,0.93451536 +right,0.9999467,0.9999999,0.9866656,0.98579997,0.9344424,0.9344614 +above,0.98677695,0.9866656,1.0000002,0.9997301,0.94099295,0.94090044 +under,0.98590326,0.98579997,0.9997301,1.0,0.94144833,0.9413343 +far,0.9344799,0.9344424,0.94099295,0.94144833,0.9999999,0.9999009 +close,0.93451536,0.9344614,0.94090044,0.9413343,0.9999009,1.0000001 diff --git a/exp2a_modified/results/molmo/similarity_800k_L13_early_mid.csv b/exp2a_modified/results/molmo/similarity_800k_L13_early_mid.csv new file mode 100644 index 0000000000000000000000000000000000000000..3ea451c5e2e7735c744f22bccc0f3e3a6a98be23 --- /dev/null +++ b/exp2a_modified/results/molmo/similarity_800k_L13_early_mid.csv @@ -0,0 +1,7 @@ +,left,right,above,under,far,close +left,1.0000002,0.9990981,0.970269,0.96832395,0.9112702,0.91077095 +right,0.9990981,1.0000005,0.97084796,0.96887577,0.9112278,0.9107026 +above,0.970269,0.97084796,1.0,0.9983258,0.92944044,0.9281176 +under,0.96832395,0.96887577,0.9983258,1.0000006,0.9282915,0.92717594 +far,0.9112702,0.9112278,0.92944044,0.9282915,0.9999999,0.9996043 +close,0.91077095,0.9107026,0.9281176,0.92717594,0.9996043,0.99999946 diff --git a/exp2a_modified/results/molmo/similarity_800k_L26_late_mid.csv b/exp2a_modified/results/molmo/similarity_800k_L26_late_mid.csv new file mode 100644 index 0000000000000000000000000000000000000000..7b243c328c118f8a27ec3ce734132a57c3287294 --- /dev/null +++ b/exp2a_modified/results/molmo/similarity_800k_L26_late_mid.csv @@ -0,0 +1,7 @@ +,left,right,above,under,far,close +left,0.9999996,0.9868044,0.8516903,0.8428743,0.7809909,0.77917296 +right,0.9868044,0.9999997,0.84324884,0.84005576,0.7758011,0.7743711 +above,0.8516903,0.84324884,1.0000004,0.94099367,0.7602978,0.74670935 +under,0.8428743,0.84005576,0.94099367,0.9999995,0.76920235,0.7757774 +far,0.7809909,0.7758011,0.7602978,0.76920235,0.9999997,0.9897728 +close,0.77917296,0.7743711,0.74670935,0.7757774,0.9897728,0.9999995 diff --git a/exp2a_modified/results/molmo/similarity_800k_L31_late.csv b/exp2a_modified/results/molmo/similarity_800k_L31_late.csv new file mode 100644 index 0000000000000000000000000000000000000000..7f952e14467be90ba9b657c9016f5d011c160160 --- /dev/null +++ b/exp2a_modified/results/molmo/similarity_800k_L31_late.csv @@ -0,0 +1,7 @@ +,left,right,above,under,far,close +left,0.99999946,0.99709034,0.95449007,0.95249826,0.9339559,0.9329122 +right,0.99709034,0.9999998,0.9512191,0.95042264,0.9298728,0.9291853 +above,0.95449007,0.9512191,1.0000004,0.9820097,0.9205744,0.9159472 +under,0.95249826,0.95042264,0.9820097,1.0,0.9233836,0.9238774 +far,0.9339559,0.9298728,0.9205744,0.9233836,1.0,0.9976558 +close,0.9329122,0.9291853,0.9159472,0.9238774,0.9976558,0.99999976 diff --git a/exp2a_modified/results/molmo/similarity_800k_L6_early.csv b/exp2a_modified/results/molmo/similarity_800k_L6_early.csv new file mode 100644 index 0000000000000000000000000000000000000000..d9baf1a48f9246279f44ad6a8157af9eaece1d76 --- /dev/null +++ b/exp2a_modified/results/molmo/similarity_800k_L6_early.csv @@ -0,0 +1,7 @@ +,left,right,above,under,far,close +left,0.9999995,0.9999551,0.98933417,0.9887577,0.9477357,0.9479904 +right,0.9999551,0.9999999,0.9892925,0.98874366,0.9477358,0.94796 +above,0.98933417,0.9892925,0.9999998,0.999767,0.9501749,0.9503241 +under,0.9887577,0.98874366,0.999767,0.99999964,0.95052344,0.95063716 +far,0.9477357,0.9477358,0.9501749,0.95052344,0.9999996,0.9999156 +close,0.9479904,0.94796,0.9503241,0.95063716,0.9999156,1.0000001 diff --git a/exp2a_modified/results/molmo/similarity_80k_L13_early_mid.csv b/exp2a_modified/results/molmo/similarity_80k_L13_early_mid.csv new file mode 100644 index 0000000000000000000000000000000000000000..b0cd852c51dcf7e1319d7235687027c659c2ae4a --- /dev/null +++ b/exp2a_modified/results/molmo/similarity_80k_L13_early_mid.csv @@ -0,0 +1,7 @@ +,left,right,above,under,far,close +left,1.0000001,0.99982655,0.98557615,0.98462695,0.94595236,0.9460132 +right,0.99982655,0.99999964,0.9854151,0.984502,0.94524086,0.9452381 +above,0.98557615,0.9854151,0.99999964,0.9995325,0.9501332,0.9496666 +under,0.98462695,0.984502,0.9995325,1.0,0.950608,0.9501227 +far,0.94595236,0.94524086,0.9501332,0.950608,1.0000001,0.99974734 +close,0.9460132,0.9452381,0.9496666,0.9501227,0.99974734,1.0000001 diff --git a/exp2a_modified/results/molmo/similarity_80k_L19_middle.csv b/exp2a_modified/results/molmo/similarity_80k_L19_middle.csv new file mode 100644 index 0000000000000000000000000000000000000000..75144971e3886b69255805df487fbbb6baeeb541 --- /dev/null +++ b/exp2a_modified/results/molmo/similarity_80k_L19_middle.csv @@ -0,0 +1,7 @@ +,left,right,above,under,far,close +left,0.9999999,0.9953824,0.951213,0.9497772,0.87746465,0.8757486 +right,0.9953824,1.0000002,0.9533331,0.95202667,0.8783575,0.87553334 +above,0.951213,0.9533331,0.9999997,0.9892175,0.8622559,0.8543509 +under,0.9497772,0.95202667,0.9892175,1.0000002,0.86614037,0.86525977 +far,0.87746465,0.8783575,0.8622559,0.86614037,1.0000001,0.9966103 +close,0.8757486,0.87553334,0.8543509,0.86525977,0.9966103,1.0000001 diff --git a/exp2a_modified/results/molmo/similarity_80k_L26_late_mid.csv b/exp2a_modified/results/molmo/similarity_80k_L26_late_mid.csv new file mode 100644 index 0000000000000000000000000000000000000000..116af8296b53b37d0c6aecc3c02e9fa770f3c12d --- /dev/null +++ b/exp2a_modified/results/molmo/similarity_80k_L26_late_mid.csv @@ -0,0 +1,7 @@ +,left,right,above,under,far,close +left,1.0,0.99710876,0.94322985,0.94265085,0.8083289,0.80842566 +right,0.99710876,1.0,0.9450414,0.94532174,0.80541736,0.80532265 +above,0.94322985,0.9450414,1.0000002,0.98973715,0.7678993,0.7628029 +under,0.94265085,0.94532174,0.98973715,1.0000006,0.7791806,0.780402 +far,0.8083289,0.80541736,0.7678993,0.7791806,1.0000001,0.9953803 +close,0.80842566,0.80532265,0.7628029,0.780402,0.9953803,0.9999997 diff --git a/exp2a_modified/results/molmo/similarity_80k_L31_late.csv b/exp2a_modified/results/molmo/similarity_80k_L31_late.csv new file mode 100644 index 0000000000000000000000000000000000000000..82be1451fd358d26849afbe2034221c85e17d090 --- /dev/null +++ b/exp2a_modified/results/molmo/similarity_80k_L31_late.csv @@ -0,0 +1,7 @@ +,left,right,above,under,far,close +left,0.99999994,0.99889964,0.9706672,0.97065854,0.91552895,0.9142971 +right,0.99889964,1.0000006,0.97100496,0.97146505,0.9128623,0.91171795 +above,0.9706672,0.97100496,1.0000001,0.99551195,0.8963089,0.89303714 +under,0.97065854,0.97146505,0.99551195,1.0,0.9027907,0.9020278 +far,0.91552895,0.9128623,0.8963089,0.9027907,1.0,0.99814963 +close,0.9142971,0.91171795,0.89303714,0.9020278,0.99814963,1.0 diff --git a/exp2a_modified/results/molmo/similarity_80k_L6_early.csv b/exp2a_modified/results/molmo/similarity_80k_L6_early.csv new file mode 100644 index 0000000000000000000000000000000000000000..e0c1cacd81b365dcb1a046229dd0db81fdc8553f --- /dev/null +++ b/exp2a_modified/results/molmo/similarity_80k_L6_early.csv @@ -0,0 +1,7 @@ +,left,right,above,under,far,close +left,1.0000005,0.9999342,0.9846145,0.9833463,0.9410094,0.9415399 +right,0.9999342,1.0000002,0.9844082,0.9831588,0.9409639,0.9414707 +above,0.9846145,0.9844082,0.9999996,0.99965036,0.94482744,0.9451473 +under,0.9833463,0.9831588,0.99965036,1.0000004,0.94445574,0.9447468 +far,0.9410094,0.9409639,0.94482744,0.94445574,1.0000002,0.9998886 +close,0.9415399,0.9414707,0.9451473,0.9447468,0.9998886,1.0000001 diff --git a/exp2a_modified/results/molmo/similarity_vanilla_L13_early_mid.csv b/exp2a_modified/results/molmo/similarity_vanilla_L13_early_mid.csv new file mode 100644 index 0000000000000000000000000000000000000000..e366605e3178e25d9a558c02563dc2a719bcd9b7 --- /dev/null +++ b/exp2a_modified/results/molmo/similarity_vanilla_L13_early_mid.csv @@ -0,0 +1,7 @@ +,left,right,above,under,far,close +left,0.99999976,0.9996471,0.98403525,0.98292845,0.91606426,0.9160556 +right,0.9996471,0.99999994,0.98429143,0.983286,0.9153532,0.9152582 +above,0.98403525,0.98429143,1.0000002,0.9989633,0.9252183,0.9246333 +under,0.98292845,0.983286,0.9989633,1.0,0.9264116,0.925783 +far,0.91606426,0.9153532,0.9252183,0.9264116,0.9999999,0.99945354 +close,0.9160556,0.9152582,0.9246333,0.925783,0.99945354,0.99999976 diff --git a/exp2a_modified/results/molmo/similarity_vanilla_L19_middle.csv b/exp2a_modified/results/molmo/similarity_vanilla_L19_middle.csv new file mode 100644 index 0000000000000000000000000000000000000000..8da547883acb15330db756c85773cc1065cdfd78 --- /dev/null +++ b/exp2a_modified/results/molmo/similarity_vanilla_L19_middle.csv @@ -0,0 +1,7 @@ +,left,right,above,under,far,close +left,0.99999976,0.9945253,0.96206295,0.9596552,0.8591312,0.85741884 +right,0.9945253,0.9999999,0.9645537,0.96271646,0.85745674,0.8543235 +above,0.96206295,0.9645537,1.0,0.9921303,0.8514263,0.8453121 +under,0.9596552,0.96271646,0.9921303,1.0000005,0.8540211,0.85130924 +far,0.8591312,0.85745674,0.8514263,0.8540211,0.99999976,0.9961321 +close,0.85741884,0.8543235,0.8453121,0.85130924,0.9961321,0.99999976 diff --git a/exp2a_modified/results/molmo/similarity_vanilla_L31_late.csv b/exp2a_modified/results/molmo/similarity_vanilla_L31_late.csv new file mode 100644 index 0000000000000000000000000000000000000000..af4228ef987a2804163df2e889db8079be248a93 --- /dev/null +++ b/exp2a_modified/results/molmo/similarity_vanilla_L31_late.csv @@ -0,0 +1,7 @@ +,left,right,above,under,far,close +left,1.0000004,0.9968723,0.96832633,0.96827185,0.8455303,0.84230846 +right,0.9968723,0.99999964,0.97106063,0.9713094,0.8435764,0.83977795 +above,0.96832633,0.97106063,0.9999999,0.9944878,0.82378054,0.8183431 +under,0.96827185,0.9713094,0.9944878,1.0000004,0.8355485,0.8320327 +far,0.8455303,0.8435764,0.82378054,0.8355485,1.0000001,0.9970446 +close,0.84230846,0.83977795,0.8183431,0.8320327,0.9970446,0.99999976 diff --git a/exp2a_modified/results/molmo/similarity_vanilla_L6_early.csv b/exp2a_modified/results/molmo/similarity_vanilla_L6_early.csv new file mode 100644 index 0000000000000000000000000000000000000000..899dc723b795b0cd56a078a47a0c7f4598b345f1 --- /dev/null +++ b/exp2a_modified/results/molmo/similarity_vanilla_L6_early.csv @@ -0,0 +1,7 @@ +,left,right,above,under,far,close +left,1.0000005,0.9999072,0.9843363,0.98275346,0.9225271,0.92304546 +right,0.9999072,0.99999976,0.98413754,0.9825534,0.9222437,0.9227377 +above,0.9843363,0.98413754,0.99999976,0.99941427,0.93186307,0.9320967 +under,0.98275346,0.9825534,0.99941427,1.0000001,0.932338,0.9325508 +far,0.9225271,0.9222437,0.93186307,0.932338,1.0000005,0.9998285 +close,0.92304546,0.9227377,0.9320967,0.9325508,0.9998285,1.0000001 diff --git a/exp2a_modified/results/nvila/similarity_2m_L11_early_mid.csv b/exp2a_modified/results/nvila/similarity_2m_L11_early_mid.csv new file mode 100644 index 0000000000000000000000000000000000000000..f7421538f603b3cc0ed62ba08534e0dcbaaadd2d --- /dev/null +++ b/exp2a_modified/results/nvila/similarity_2m_L11_early_mid.csv @@ -0,0 +1,7 @@ +,left,right,above,under,far,close +left,1.0000004,0.99997437,0.99205077,0.9921235,0.9665318,0.96630245 +right,0.99997437,1.0000001,0.9920338,0.99214566,0.966442,0.9661902 +above,0.99205077,0.9920338,1.0000002,0.99985087,0.97557366,0.97540605 +under,0.9921235,0.99214566,0.99985087,0.99999964,0.97505516,0.9748245 +far,0.9665318,0.966442,0.97557366,0.97505516,0.9999999,0.99989897 +close,0.96630245,0.9661902,0.97540605,0.9748245,0.99989897,1.0000004 diff --git a/exp2a_modified/results/nvila/similarity_2m_L6_early.csv b/exp2a_modified/results/nvila/similarity_2m_L6_early.csv new file mode 100644 index 0000000000000000000000000000000000000000..77a0164933d137f02be43017f30c764f10e92041 --- /dev/null +++ b/exp2a_modified/results/nvila/similarity_2m_L6_early.csv @@ -0,0 +1,7 @@ +,left,right,above,under,far,close +left,1.0,0.9999543,0.99784696,0.9978435,0.98729,0.98708475 +right,0.9999543,0.99999934,0.9977713,0.9978466,0.9870302,0.98678756 +above,0.99784696,0.9977713,1.0000005,0.99984324,0.9882744,0.9881075 +under,0.9978435,0.9978466,0.99984324,0.9999999,0.987956,0.9877119 +far,0.98729,0.9870302,0.9882744,0.987956,0.9999998,0.99992377 +close,0.98708475,0.98678756,0.9881075,0.9877119,0.99992377,0.99999994 diff --git a/exp2a_modified/results/nvila/similarity_400k_L22_late_mid.csv b/exp2a_modified/results/nvila/similarity_400k_L22_late_mid.csv new file mode 100644 index 0000000000000000000000000000000000000000..2f588aac8739873ee77ccad448df7f461c2e8963 --- /dev/null +++ b/exp2a_modified/results/nvila/similarity_400k_L22_late_mid.csv @@ -0,0 +1,7 @@ +,left,right,above,under,far,close +left,1.0000001,0.98938745,0.94574475,0.9383662,0.84551483,0.8435887 +right,0.98938745,1.0000005,0.944611,0.942079,0.8478445,0.84517944 +above,0.94574475,0.944611,0.9999996,0.98561645,0.8716479,0.86627376 +under,0.9383662,0.942079,0.98561645,0.99999964,0.8625552,0.8639274 +far,0.84551483,0.8478445,0.8716479,0.8625552,0.9999995,0.9961122 +close,0.8435887,0.84517944,0.86627376,0.8639274,0.9961122,1.0 diff --git a/exp2a_modified/results/nvila/similarity_800k_L27_late.csv b/exp2a_modified/results/nvila/similarity_800k_L27_late.csv new file mode 100644 index 0000000000000000000000000000000000000000..8c55a737a3243f391cd5cf0207488aeb0a1664c2 --- /dev/null +++ b/exp2a_modified/results/nvila/similarity_800k_L27_late.csv @@ -0,0 +1,7 @@ +,left,right,above,under,far,close +left,0.9999998,0.9998052,0.99807984,0.9981629,0.9949574,0.9949126 +right,0.9998052,0.99999976,0.99777746,0.9980407,0.9950078,0.9949836 +above,0.99807984,0.99777746,0.9999996,0.9994222,0.9953831,0.995184 +under,0.9981629,0.9980407,0.9994222,1.0000001,0.99535114,0.9954171 +far,0.9949574,0.9950078,0.9953831,0.99535114,1.0000008,0.9998271 +close,0.9949126,0.9949836,0.995184,0.9954171,0.9998271,0.99999976 diff --git a/exp2a_modified/results/nvila/similarity_80k_L11_early_mid.csv b/exp2a_modified/results/nvila/similarity_80k_L11_early_mid.csv new file mode 100644 index 0000000000000000000000000000000000000000..c16ecf556d61202ee2e9a64e4269ad23a94b84a9 --- /dev/null +++ b/exp2a_modified/results/nvila/similarity_80k_L11_early_mid.csv @@ -0,0 +1,7 @@ +,left,right,above,under,far,close +left,0.99999964,0.99997884,0.98755705,0.9873893,0.95973134,0.9595373 +right,0.99997884,1.0,0.9876951,0.9875475,0.959848,0.9596424 +above,0.98755705,0.9876951,1.0,0.99975145,0.971831,0.97180074 +under,0.9873893,0.9875475,0.99975145,1.0,0.972558,0.972438 +far,0.95973134,0.959848,0.971831,0.972558,0.9999996,0.9999212 +close,0.9595373,0.9596424,0.97180074,0.972438,0.9999212,1.0000001 diff --git a/exp2a_modified/results/nvila/similarity_80k_L17_middle.csv b/exp2a_modified/results/nvila/similarity_80k_L17_middle.csv new file mode 100644 index 0000000000000000000000000000000000000000..6ce7aeff2ab633e577898d1291577edd265b4c45 --- /dev/null +++ b/exp2a_modified/results/nvila/similarity_80k_L17_middle.csv @@ -0,0 +1,7 @@ +,left,right,above,under,far,close +left,1.0,0.9781369,0.9310158,0.9325685,0.8561556,0.8558693 +right,0.9781369,1.0000004,0.9284675,0.94139105,0.86132264,0.86142904 +above,0.9310158,0.9284675,0.9999999,0.98366034,0.8978678,0.8936629 +under,0.9325685,0.94139105,0.98366034,0.9999999,0.8987944,0.8984088 +far,0.8561556,0.86132264,0.8978678,0.8987944,1.0,0.9990812 +close,0.8558693,0.86142904,0.8936629,0.8984088,0.9990812,0.99999976 diff --git a/exp2a_modified/results/nvila/similarity_80k_L22_late_mid.csv b/exp2a_modified/results/nvila/similarity_80k_L22_late_mid.csv new file mode 100644 index 0000000000000000000000000000000000000000..88d244336c29822b8d86920a32a522490158008d --- /dev/null +++ b/exp2a_modified/results/nvila/similarity_80k_L22_late_mid.csv @@ -0,0 +1,7 @@ +,left,right,above,under,far,close +left,1.0000002,0.98724145,0.92874,0.9283841,0.850011,0.8500076 +right,0.98724145,0.9999997,0.9257482,0.9328269,0.84493864,0.8449371 +above,0.92874,0.9257482,0.9999999,0.9870798,0.8770188,0.8740602 +under,0.9283841,0.9328269,0.9870798,1.0,0.8692532,0.8702637 +far,0.850011,0.84493864,0.8770188,0.8692532,1.0,0.99855036 +close,0.8500076,0.8449371,0.8740602,0.8702637,0.99855036,1.0000004 diff --git a/exp2a_modified/results/nvila/similarity_80k_L27_late.csv b/exp2a_modified/results/nvila/similarity_80k_L27_late.csv new file mode 100644 index 0000000000000000000000000000000000000000..b84ea9c33a8136566316358796fcceb81323f9b4 --- /dev/null +++ b/exp2a_modified/results/nvila/similarity_80k_L27_late.csv @@ -0,0 +1,7 @@ +,left,right,above,under,far,close +left,1.0000004,0.99955857,0.9954845,0.9956236,0.992975,0.9930063 +right,0.99955857,0.99999994,0.9953584,0.99582684,0.9927686,0.9928009 +above,0.9954845,0.9953584,1.0000001,0.99934226,0.9929427,0.99282837 +under,0.9956236,0.99582684,0.99934226,1.0000004,0.99276465,0.99287665 +far,0.992975,0.9927686,0.9929427,0.99276465,1.0,0.9998536 +close,0.9930063,0.9928009,0.99282837,0.99287665,0.9998536,1.0 diff --git a/exp2a_modified/results/nvila/similarity_vanilla_L22_late_mid.csv b/exp2a_modified/results/nvila/similarity_vanilla_L22_late_mid.csv new file mode 100644 index 0000000000000000000000000000000000000000..ff6782d2ba7230925a9dc8b1ad6e669af06d0d67 --- /dev/null +++ b/exp2a_modified/results/nvila/similarity_vanilla_L22_late_mid.csv @@ -0,0 +1,7 @@ +,left,right,above,under,far,close +left,0.99999994,0.9937971,0.91717666,0.9179457,0.8878075,0.88723373 +right,0.9937971,1.0000001,0.9206685,0.92282534,0.8912285,0.8903196 +above,0.91717666,0.9206685,0.9999998,0.9956542,0.9163888,0.91370475 +under,0.9179457,0.92282534,0.9956542,0.9999995,0.9187532,0.9182395 +far,0.8878075,0.8912285,0.9163888,0.9187532,1.0000004,0.9987387 +close,0.88723373,0.8903196,0.91370475,0.9182395,0.9987387,1.0000002 diff --git a/exp2a_modified/results/nvila/similarity_vanilla_L6_early.csv b/exp2a_modified/results/nvila/similarity_vanilla_L6_early.csv new file mode 100644 index 0000000000000000000000000000000000000000..b738fdaf6d8b419de1c982eb2b04e37c17c0965d --- /dev/null +++ b/exp2a_modified/results/nvila/similarity_vanilla_L6_early.csv @@ -0,0 +1,7 @@ +,left,right,above,under,far,close +left,1.0000004,0.9997542,0.98725057,0.9877901,0.96586967,0.9646027 +right,0.9997542,0.99999994,0.98683053,0.9876104,0.96541184,0.9639078 +above,0.98725057,0.98683053,1.0000002,0.9994803,0.9768777,0.9762193 +under,0.9877901,0.9876104,0.9994803,1.0,0.97659093,0.9755331 +far,0.96586967,0.96541184,0.9768777,0.97659093,1.0000004,0.99978113 +close,0.9646027,0.9639078,0.9762193,0.9755331,0.99978113,1.0000002 diff --git a/exp2a_modified/results/qwen/results_summary.csv b/exp2a_modified/results/qwen/results_summary.csv new file mode 100644 index 0000000000000000000000000000000000000000..1f5704603e2e262067a8995c57c63177f14dd11b --- /dev/null +++ b/exp2a_modified/results/qwen/results_summary.csv @@ -0,0 +1,26 @@ +model,sim_above_far,sim_under_close,sim_left_right,diff_above_far_vs_left_right,diff_under_close_vs_left_right,layer_idx,layer_label +qwen_vanilla,0.9878544,0.9878074,0.9999392,-0.012084782,-0.01213181,7,early +qwen_vanilla,0.98418283,0.98290306,0.9998846,-0.01570177,-0.016981542,14,early_mid +qwen_vanilla,0.9776592,0.9756624,0.99965596,-0.021996737,-0.023993552,22,middle +qwen_vanilla,0.95032614,0.94788146,0.99512017,-0.044794023,-0.047238708,29,late_mid +qwen_vanilla,0.9415084,0.93939054,0.99848515,-0.056976736,-0.059094608,35,late +qwen_80k,0.9885977,0.98850304,0.99993646,-0.01133877,-0.011433423,7,early +qwen_80k,0.9823469,0.98100173,0.99989814,-0.017551243,-0.0188964,14,early_mid +qwen_80k,0.96985906,0.96774256,0.99973243,-0.029873371,-0.031989872,22,middle +qwen_80k,0.94964135,0.94838035,0.99680495,-0.047163606,-0.0484246,29,late_mid +qwen_80k,0.91188186,0.91212624,0.9987229,-0.08684105,-0.08659667,35,late +qwen_400k,0.9894593,0.9892013,0.99994236,-0.010483086,-0.010741055,7,early +qwen_400k,0.9844377,0.98320484,0.99993646,-0.015498757,-0.01673162,14,early_mid +qwen_400k,0.9699773,0.9682413,0.9997704,-0.029793084,-0.03152913,22,middle +qwen_400k,0.9580884,0.9558412,0.9983553,-0.04026693,-0.042514145,29,late_mid +qwen_400k,0.9148766,0.9173591,0.99830496,-0.08342838,-0.08094585,35,late +qwen_800k,0.9899683,0.9896326,0.99994457,-0.009976268,-0.010311961,7,early +qwen_800k,0.9868173,0.98572755,0.99994314,-0.013125837,-0.014215589,14,early_mid +qwen_800k,0.9739447,0.9729233,0.9997934,-0.025848687,-0.026870131,22,middle +qwen_800k,0.95486164,0.95309997,0.9981552,-0.043293536,-0.04505521,29,late_mid +qwen_800k,0.9358968,0.93043613,0.99775326,-0.06185645,-0.06731713,35,late +qwen_2m,0.9908798,0.9905167,0.9999402,-0.009060442,-0.009423494,7,early +qwen_2m,0.989565,0.98875475,0.9999511,-0.010386109,-0.011196375,14,early_mid +qwen_2m,0.9692019,0.9686675,0.9997675,-0.03056556,-0.031099975,22,middle +qwen_2m,0.93922085,0.93831193,0.9968462,-0.057625353,-0.058534265,29,late_mid +qwen_2m,0.9208069,0.9072825,0.9965475,-0.075740635,-0.08926505,35,late diff --git a/exp2a_modified/results/qwen/similarity_2m_L14_early_mid.csv b/exp2a_modified/results/qwen/similarity_2m_L14_early_mid.csv new file mode 100644 index 0000000000000000000000000000000000000000..158537daf0cee31c73ea8936af63c30f043f48e6 --- /dev/null +++ b/exp2a_modified/results/qwen/similarity_2m_L14_early_mid.csv @@ -0,0 +1,7 @@ +,left,right,above,under,far,close +left,0.9999998,0.9999511,0.99657696,0.996454,0.98376906,0.9835031 +right,0.9999511,1.0,0.99654865,0.99649066,0.98354805,0.98321724 +above,0.99657696,0.99654865,1.0000001,0.9999223,0.989565,0.9892365 +under,0.996454,0.99649066,0.9999223,1.0000001,0.98912716,0.98875475 +far,0.98376906,0.98354805,0.989565,0.98912716,1.0000005,0.9999435 +close,0.9835031,0.98321724,0.9892365,0.98875475,0.9999435,1.0 diff --git a/exp2a_modified/results/qwen/similarity_2m_L22_middle.csv b/exp2a_modified/results/qwen/similarity_2m_L22_middle.csv new file mode 100644 index 0000000000000000000000000000000000000000..e21a66e73816a216a549f8dea57c99fbda7a2ba7 --- /dev/null +++ b/exp2a_modified/results/qwen/similarity_2m_L22_middle.csv @@ -0,0 +1,7 @@ +,left,right,above,under,far,close +left,0.99999976,0.9997675,0.98960626,0.9895336,0.9607725,0.9608675 +right,0.9997675,1.0,0.9901829,0.99017763,0.96098214,0.96100146 +above,0.98960626,0.9901829,0.9999995,0.99960077,0.9692019,0.9689368 +under,0.9895336,0.99017763,0.99960077,0.9999995,0.96889305,0.9686675 +far,0.9607725,0.96098214,0.9692019,0.96889305,0.9999999,0.999784 +close,0.9608675,0.96100146,0.9689368,0.9686675,0.999784,1.0 diff --git a/exp2a_modified/results/qwen/similarity_2m_L29_late_mid.csv b/exp2a_modified/results/qwen/similarity_2m_L29_late_mid.csv new file mode 100644 index 0000000000000000000000000000000000000000..51c71c3ecd198af72fe666d509454fdb76ff958d --- /dev/null +++ b/exp2a_modified/results/qwen/similarity_2m_L29_late_mid.csv @@ -0,0 +1,7 @@ +,left,right,above,under,far,close +left,0.99999976,0.9968462,0.96972966,0.9656495,0.9295452,0.9282496 +right,0.9968462,1.0000004,0.9678302,0.96753937,0.9298134,0.92841697 +above,0.96972966,0.9678302,0.99999964,0.99251354,0.93922085,0.93715733 +under,0.9656495,0.96753937,0.99251354,0.9999999,0.9394452,0.93831193 +far,0.9295452,0.9298134,0.93922085,0.9394452,0.9999995,0.9991802 +close,0.9282496,0.92841697,0.93715733,0.93831193,0.9991802,0.9999995 diff --git a/exp2a_modified/results/qwen/similarity_2m_L35_late.csv b/exp2a_modified/results/qwen/similarity_2m_L35_late.csv new file mode 100644 index 0000000000000000000000000000000000000000..a0c6ee0146c874cb1545554574ec3170e138c568 --- /dev/null +++ b/exp2a_modified/results/qwen/similarity_2m_L35_late.csv @@ -0,0 +1,7 @@ +,left,right,above,under,far,close +left,1.0000002,0.9965475,0.9649773,0.96030045,0.8989403,0.89823353 +right,0.9965475,1.0,0.9598953,0.9629967,0.8915997,0.89078486 +above,0.9649773,0.9598953,1.0000002,0.98364305,0.9208069,0.9149355 +under,0.96030045,0.9629967,0.98364305,1.0,0.9093851,0.9072825 +far,0.8989403,0.8915997,0.9208069,0.9093851,0.99999994,0.996667 +close,0.89823353,0.89078486,0.9149355,0.9072825,0.996667,0.99999976 diff --git a/exp2a_modified/results/qwen/similarity_2m_L7_early.csv b/exp2a_modified/results/qwen/similarity_2m_L7_early.csv new file mode 100644 index 0000000000000000000000000000000000000000..518df4a234e32752518d83cb87de3a3789f98a83 --- /dev/null +++ b/exp2a_modified/results/qwen/similarity_2m_L7_early.csv @@ -0,0 +1,7 @@ +,left,right,above,under,far,close +left,0.9999995,0.9999402,0.9966904,0.9964489,0.98790896,0.9878886 +right,0.9999402,0.9999999,0.996633,0.9964943,0.98773515,0.987622 +above,0.9966904,0.996633,0.99999964,0.99989176,0.9908798,0.9907012 +under,0.9964489,0.9964943,0.99989176,1.0000005,0.9907862,0.9905167 +far,0.98790896,0.98773515,0.9908798,0.9907862,1.0,0.999933 +close,0.9878886,0.987622,0.9907012,0.9905167,0.999933,1.0000002 diff --git a/exp2a_modified/results/qwen/similarity_400k_L14_early_mid.csv b/exp2a_modified/results/qwen/similarity_400k_L14_early_mid.csv new file mode 100644 index 0000000000000000000000000000000000000000..85c45018e9ad4fa62b131189727622bc06401d26 --- /dev/null +++ b/exp2a_modified/results/qwen/similarity_400k_L14_early_mid.csv @@ -0,0 +1,7 @@ +,left,right,above,under,far,close +left,0.9999994,0.99993646,0.9955939,0.99546295,0.97487485,0.9744284 +right,0.99993646,0.9999994,0.9957069,0.9956356,0.97504747,0.9745439 +above,0.9955939,0.9957069,0.9999995,0.99986076,0.9844377,0.98397595 +under,0.99546295,0.9956356,0.99986076,1.0,0.9837175,0.98320484 +far,0.97487485,0.97504747,0.9844377,0.9837175,0.9999999,0.9999202 +close,0.9744284,0.9745439,0.98397595,0.98320484,0.9999202,1.0 diff --git a/exp2a_modified/results/qwen/similarity_400k_L22_middle.csv b/exp2a_modified/results/qwen/similarity_400k_L22_middle.csv new file mode 100644 index 0000000000000000000000000000000000000000..545f8f970a42999a5d15f6fe9ab7e3a86042bf88 --- /dev/null +++ b/exp2a_modified/results/qwen/similarity_400k_L22_middle.csv @@ -0,0 +1,7 @@ +,left,right,above,under,far,close +left,1.0000002,0.9997704,0.9914931,0.9915282,0.9591845,0.958396 +right,0.9997704,1.0000005,0.9918085,0.99199045,0.9597441,0.95890886 +above,0.9914931,0.9918085,1.0000002,0.9995384,0.9699773,0.969287 +under,0.9915282,0.99199045,0.9995384,1.0,0.96905524,0.9682413 +far,0.9591845,0.9597441,0.9699773,0.96905524,0.99999976,0.99974537 +close,0.958396,0.95890886,0.969287,0.9682413,0.99974537,0.99999964 diff --git a/exp2a_modified/results/qwen/similarity_400k_L29_late_mid.csv b/exp2a_modified/results/qwen/similarity_400k_L29_late_mid.csv new file mode 100644 index 0000000000000000000000000000000000000000..f815b6e30b653795a4b3b5fe033386fcb71ff4ea --- /dev/null +++ b/exp2a_modified/results/qwen/similarity_400k_L29_late_mid.csv @@ -0,0 +1,7 @@ +,left,right,above,under,far,close +left,1.0,0.9983553,0.9822931,0.9812791,0.9502559,0.9498185 +right,0.9983553,1.0000005,0.9825355,0.9827614,0.9502156,0.9497454 +above,0.9822931,0.9825355,1.0,0.9977466,0.9580884,0.95665336 +under,0.9812791,0.9827614,0.9977466,1.0000001,0.95612884,0.9558412 +far,0.9502559,0.9502156,0.9580884,0.95612884,1.0000002,0.99927115 +close,0.9498185,0.9497454,0.95665336,0.9558412,0.99927115,1.0 diff --git a/exp2a_modified/results/qwen/similarity_400k_L35_late.csv b/exp2a_modified/results/qwen/similarity_400k_L35_late.csv new file mode 100644 index 0000000000000000000000000000000000000000..8124c12e61d681b62b1f2815fa860291b95b175c --- /dev/null +++ b/exp2a_modified/results/qwen/similarity_400k_L35_late.csv @@ -0,0 +1,7 @@ +,left,right,above,under,far,close +left,1.0,0.99830496,0.9709184,0.97107494,0.9330967,0.93299454 +right,0.99830496,0.9999998,0.97170156,0.97301006,0.92797637,0.9278939 +above,0.9709184,0.97170156,1.0000002,0.99484855,0.9148766,0.9112967 +under,0.97107494,0.97301006,0.99484855,1.0,0.9179872,0.9173591 +far,0.9330967,0.92797637,0.9148766,0.9179872,0.9999998,0.9976874 +close,0.93299454,0.9278939,0.9112967,0.9173591,0.9976874,0.9999999 diff --git a/exp2a_modified/results/qwen/similarity_400k_L7_early.csv b/exp2a_modified/results/qwen/similarity_400k_L7_early.csv new file mode 100644 index 0000000000000000000000000000000000000000..b133ee8e509cde9a84b1861650f9f7889b7d04bc --- /dev/null +++ b/exp2a_modified/results/qwen/similarity_400k_L7_early.csv @@ -0,0 +1,7 @@ +,left,right,above,under,far,close +left,1.0,0.99994236,0.99604154,0.9956762,0.9851828,0.9851691 +right,0.99994236,0.99999994,0.9960893,0.9958205,0.98517305,0.9850775 +above,0.99604154,0.9960893,0.9999996,0.99986976,0.9894593,0.98925185 +under,0.9956762,0.9958205,0.99986976,1.0000001,0.9894949,0.9892013 +far,0.9851828,0.98517305,0.9894593,0.9894949,1.0000001,0.99992925 +close,0.9851691,0.9850775,0.98925185,0.9892013,0.99992925,1.0000002 diff --git a/exp2a_modified/results/qwen/similarity_800k_L14_early_mid.csv b/exp2a_modified/results/qwen/similarity_800k_L14_early_mid.csv new file mode 100644 index 0000000000000000000000000000000000000000..9fb13ad2a43791014bdecf3fba6441bee5cf75a7 --- /dev/null +++ b/exp2a_modified/results/qwen/similarity_800k_L14_early_mid.csv @@ -0,0 +1,7 @@ +,left,right,above,under,far,close +left,1.0000001,0.99994314,0.99614424,0.9959903,0.97900426,0.97858274 +right,0.99994314,1.0000005,0.9961765,0.9960942,0.97887444,0.9783888 +above,0.99614424,0.9961765,0.9999998,0.9998914,0.9868173,0.9863455 +under,0.9959903,0.9960942,0.9998914,0.9999999,0.9862487,0.98572755 +far,0.97900426,0.97887444,0.9868173,0.9862487,1.0000006,0.999931 +close,0.97858274,0.9783888,0.9863455,0.98572755,0.999931,1.0 diff --git a/exp2a_modified/results/qwen/similarity_800k_L22_middle.csv b/exp2a_modified/results/qwen/similarity_800k_L22_middle.csv new file mode 100644 index 0000000000000000000000000000000000000000..5233105833353c44e984d5023db2e70634eee6be --- /dev/null +++ b/exp2a_modified/results/qwen/similarity_800k_L22_middle.csv @@ -0,0 +1,7 @@ +,left,right,above,under,far,close +left,0.99999994,0.9997934,0.9918604,0.99187034,0.965433,0.9649955 +right,0.9997934,1.0,0.99216706,0.9923101,0.96557045,0.96507204 +above,0.9918604,0.99216706,1.0,0.9996005,0.9739447,0.9735157 +under,0.99187034,0.9923101,0.9996005,0.9999995,0.97344637,0.9729233 +far,0.965433,0.96557045,0.9739447,0.97344637,0.9999998,0.9997673 +close,0.9649955,0.96507204,0.9735157,0.9729233,0.9997673,1.0000002 diff --git a/exp2a_modified/results/qwen/similarity_800k_L29_late_mid.csv b/exp2a_modified/results/qwen/similarity_800k_L29_late_mid.csv new file mode 100644 index 0000000000000000000000000000000000000000..ce53787366de1891154e6b2f2e21fe69afb04e8e --- /dev/null +++ b/exp2a_modified/results/qwen/similarity_800k_L29_late_mid.csv @@ -0,0 +1,7 @@ +,left,right,above,under,far,close +left,1.0000005,0.9981552,0.9773686,0.9768101,0.94618565,0.94496274 +right,0.9981552,1.0000005,0.9765825,0.9779184,0.9456187,0.9443376 +above,0.9773686,0.9765825,1.0000001,0.99626654,0.95486164,0.95301676 +under,0.9768101,0.9779184,0.99626654,1.0000002,0.95387346,0.95309997 +far,0.94618565,0.9456187,0.95486164,0.95387346,1.0000002,0.99927366 +close,0.94496274,0.9443376,0.95301676,0.95309997,0.99927366,1.0000002 diff --git a/exp2a_modified/results/qwen/similarity_800k_L35_late.csv b/exp2a_modified/results/qwen/similarity_800k_L35_late.csv new file mode 100644 index 0000000000000000000000000000000000000000..f6c7cd87977faae5e707d4488ad2493ceecb7faa --- /dev/null +++ b/exp2a_modified/results/qwen/similarity_800k_L35_late.csv @@ -0,0 +1,7 @@ +,left,right,above,under,far,close +left,0.9999998,0.99775326,0.9703489,0.96939826,0.93214816,0.931001 +right,0.99775326,1.0,0.9679116,0.96959454,0.9250057,0.9237515 +above,0.9703489,0.9679116,1.0000002,0.9927238,0.9358968,0.93023026 +under,0.96939826,0.96959454,0.9927238,1.0000004,0.93206686,0.93043613 +far,0.93214816,0.9250057,0.9358968,0.93206686,1.0000002,0.9972799 +close,0.931001,0.9237515,0.93023026,0.93043613,0.9972799,1.0 diff --git a/exp2a_modified/results/qwen/similarity_800k_L7_early.csv b/exp2a_modified/results/qwen/similarity_800k_L7_early.csv new file mode 100644 index 0000000000000000000000000000000000000000..09a9fc4ef7997368dd8ffedbdae7d50ff1330944 --- /dev/null +++ b/exp2a_modified/results/qwen/similarity_800k_L7_early.csv @@ -0,0 +1,7 @@ +,left,right,above,under,far,close +left,1.0,0.99994457,0.9962832,0.99595183,0.98650336,0.98646617 +right,0.99994457,0.99999976,0.9962819,0.99604106,0.9864354,0.9863136 +above,0.9962832,0.9962819,1.0000002,0.9998869,0.9899683,0.9897391 +under,0.99595183,0.99604106,0.9998869,0.9999999,0.98994654,0.9896326 +far,0.98650336,0.9864354,0.9899683,0.98994654,0.99999964,0.99993044 +close,0.98646617,0.9863136,0.9897391,0.9896326,0.99993044,0.9999993 diff --git a/exp2a_modified/results/qwen/similarity_80k_L14_early_mid.csv b/exp2a_modified/results/qwen/similarity_80k_L14_early_mid.csv new file mode 100644 index 0000000000000000000000000000000000000000..504d8543fd97dc8c4fcc84e17f14a3ae2de48856 --- /dev/null +++ b/exp2a_modified/results/qwen/similarity_80k_L14_early_mid.csv @@ -0,0 +1,7 @@ +,left,right,above,under,far,close +left,1.0,0.99989814,0.99553716,0.9954311,0.97197545,0.97172505 +right,0.99989814,1.0000001,0.9958216,0.99575305,0.9727046,0.9724134 +above,0.99553716,0.9958216,1.0000005,0.99979544,0.9823469,0.9821155 +under,0.9954311,0.99575305,0.99979544,0.9999998,0.9813094,0.98100173 +far,0.97197545,0.9727046,0.9823469,0.9813094,0.9999999,0.9998916 +close,0.97172505,0.9724134,0.9821155,0.98100173,0.9998916,0.9999995 diff --git a/exp2a_modified/results/qwen/similarity_80k_L22_middle.csv b/exp2a_modified/results/qwen/similarity_80k_L22_middle.csv new file mode 100644 index 0000000000000000000000000000000000000000..7f3273596bc778506172556953e3ac4d4d9a4825 --- /dev/null +++ b/exp2a_modified/results/qwen/similarity_80k_L22_middle.csv @@ -0,0 +1,7 @@ +,left,right,above,under,far,close +left,1.0,0.99973243,0.9919374,0.991159,0.9592458,0.9587638 +right,0.99973243,0.9999997,0.992398,0.9917904,0.96054536,0.9600166 +above,0.9919374,0.992398,0.99999976,0.9995494,0.96985906,0.9692464 +under,0.991159,0.9917904,0.9995494,1.0000005,0.96841073,0.96774256 +far,0.9592458,0.96054536,0.96985906,0.96841073,0.9999999,0.99969804 +close,0.9587638,0.9600166,0.9692464,0.96774256,0.99969804,0.99999994 diff --git a/exp2a_modified/results/qwen/similarity_80k_L29_late_mid.csv b/exp2a_modified/results/qwen/similarity_80k_L29_late_mid.csv new file mode 100644 index 0000000000000000000000000000000000000000..ef8e1e43ea7d4841e72d43382ed11f680eadb8ec --- /dev/null +++ b/exp2a_modified/results/qwen/similarity_80k_L29_late_mid.csv @@ -0,0 +1,7 @@ +,left,right,above,under,far,close +left,0.99999964,0.99680495,0.9805011,0.97955596,0.94359356,0.94387704 +right,0.99680495,0.9999999,0.9815129,0.9817121,0.9434773,0.94357944 +above,0.9805011,0.9815129,0.9999999,0.99741185,0.94964135,0.94810516 +under,0.97955596,0.9817121,0.99741185,1.0000001,0.9479513,0.94838035 +far,0.94359356,0.9434773,0.94964135,0.9479513,1.0,0.9989624 +close,0.94387704,0.94357944,0.94810516,0.94838035,0.9989624,0.99999964 diff --git a/exp2a_modified/results/qwen/similarity_80k_L35_late.csv b/exp2a_modified/results/qwen/similarity_80k_L35_late.csv new file mode 100644 index 0000000000000000000000000000000000000000..2e5c0a5ace08a6fb8fc2383cd15221af7e888bb6 --- /dev/null +++ b/exp2a_modified/results/qwen/similarity_80k_L35_late.csv @@ -0,0 +1,7 @@ +,left,right,above,under,far,close +left,1.0000001,0.9987229,0.9772568,0.97572494,0.92090666,0.9189262 +right,0.9987229,0.9999999,0.9772356,0.9762312,0.9178261,0.91580224 +above,0.9772568,0.9772356,1.0000002,0.99792993,0.91188186,0.9081069 +under,0.97572494,0.9762312,0.99792993,0.9999997,0.91435254,0.91212624 +far,0.92090666,0.9178261,0.91188186,0.91435254,0.9999998,0.998495 +close,0.9189262,0.91580224,0.9081069,0.91212624,0.998495,0.9999997 diff --git a/exp2a_modified/results/qwen/similarity_80k_L7_early.csv b/exp2a_modified/results/qwen/similarity_80k_L7_early.csv new file mode 100644 index 0000000000000000000000000000000000000000..3b8073b0164f2a78a2435456a97dab5bd1a0008e --- /dev/null +++ b/exp2a_modified/results/qwen/similarity_80k_L7_early.csv @@ -0,0 +1,7 @@ +,left,right,above,under,far,close +left,0.99999994,0.99993646,0.99620664,0.99586976,0.9840237,0.9839864 +right,0.99993646,1.0,0.9962809,0.9960503,0.9841163,0.9839929 +above,0.99620664,0.9962809,1.0,0.9998498,0.9885977,0.98840046 +under,0.99586976,0.9960503,0.9998498,1.0000001,0.98879474,0.98850304 +far,0.9840237,0.9841163,0.9885977,0.98879474,1.0000002,0.99992657 +close,0.9839864,0.9839929,0.98840046,0.98850304,0.99992657,1.0 diff --git a/exp2a_modified/results/qwen/similarity_vanilla_L14_early_mid.csv b/exp2a_modified/results/qwen/similarity_vanilla_L14_early_mid.csv new file mode 100644 index 0000000000000000000000000000000000000000..d5baff46694ce532fbf3bba1d1d50a393cf0f4a1 --- /dev/null +++ b/exp2a_modified/results/qwen/similarity_vanilla_L14_early_mid.csv @@ -0,0 +1,7 @@ +,left,right,above,under,far,close +left,1.0000005,0.9998846,0.99582946,0.9956888,0.97501063,0.9748072 +right,0.9998846,1.0000004,0.9961206,0.9959967,0.9758598,0.97561693 +above,0.99582946,0.9961206,0.99999976,0.9997723,0.98418283,0.9840138 +under,0.9956888,0.9959967,0.9997723,0.9999996,0.9831575,0.98290306 +far,0.97501063,0.9758598,0.98418283,0.9831575,1.0000001,0.9998947 +close,0.9748072,0.97561693,0.9840138,0.98290306,0.9998947,0.9999996 diff --git a/exp2a_modified/results/qwen/similarity_vanilla_L22_middle.csv b/exp2a_modified/results/qwen/similarity_vanilla_L22_middle.csv new file mode 100644 index 0000000000000000000000000000000000000000..9d108f1f8ffc458168cece07ef1a203851556416 --- /dev/null +++ b/exp2a_modified/results/qwen/similarity_vanilla_L22_middle.csv @@ -0,0 +1,7 @@ +,left,right,above,under,far,close +left,1.0,0.99965596,0.99035466,0.9894786,0.96180725,0.9615563 +right,0.99965596,1.0000002,0.99138314,0.99066836,0.9639702,0.96368074 +above,0.99035466,0.99138314,0.99999964,0.99955785,0.9776592,0.9770752 +under,0.9894786,0.99066836,0.99955785,0.9999997,0.97625387,0.9756624 +far,0.96180725,0.9639702,0.9776592,0.97625387,1.0000001,0.99976575 +close,0.9615563,0.96368074,0.9770752,0.9756624,0.99976575,1.0000005 diff --git a/exp2a_modified/results/qwen/similarity_vanilla_L29_late_mid.csv b/exp2a_modified/results/qwen/similarity_vanilla_L29_late_mid.csv new file mode 100644 index 0000000000000000000000000000000000000000..cb73096b5c8fb4be0863158324737c401ab0e738 --- /dev/null +++ b/exp2a_modified/results/qwen/similarity_vanilla_L29_late_mid.csv @@ -0,0 +1,7 @@ +,left,right,above,under,far,close +left,0.9999999,0.99512017,0.9745686,0.97302496,0.9357954,0.9351796 +right,0.99512017,1.0,0.97529674,0.9750935,0.93320036,0.9321554 +above,0.9745686,0.97529674,0.99999976,0.9973251,0.95032614,0.9476396 +under,0.97302496,0.9750935,0.9973251,0.9999995,0.94890064,0.94788146 +far,0.9357954,0.93320036,0.95032614,0.94890064,0.99999976,0.9989178 +close,0.9351796,0.9321554,0.9476396,0.94788146,0.9989178,1.0000001 diff --git a/exp2a_modified/results/qwen/similarity_vanilla_L35_late.csv b/exp2a_modified/results/qwen/similarity_vanilla_L35_late.csv new file mode 100644 index 0000000000000000000000000000000000000000..4e093bf4a334cd859ebc97a05cf300a8abb6f3c1 --- /dev/null +++ b/exp2a_modified/results/qwen/similarity_vanilla_L35_late.csv @@ -0,0 +1,7 @@ +,left,right,above,under,far,close +left,0.9999999,0.99848515,0.9831016,0.9811994,0.93792963,0.93604606 +right,0.99848515,1.0,0.9825343,0.9811074,0.93322587,0.9311956 +above,0.9831016,0.9825343,0.99999994,0.9989464,0.9415084,0.938514 +under,0.9811994,0.9811074,0.9989464,0.9999999,0.94198036,0.93939054 +far,0.93792963,0.93322587,0.9415084,0.94198036,1.0000002,0.99916875 +close,0.93604606,0.9311956,0.938514,0.93939054,0.99916875,1.0000002 diff --git a/exp2a_modified/results/qwen/similarity_vanilla_L7_early.csv b/exp2a_modified/results/qwen/similarity_vanilla_L7_early.csv new file mode 100644 index 0000000000000000000000000000000000000000..53230acc4059fd417368e02b863d6408db293326 --- /dev/null +++ b/exp2a_modified/results/qwen/similarity_vanilla_L7_early.csv @@ -0,0 +1,7 @@ +,left,right,above,under,far,close +left,1.0,0.9999392,0.99629605,0.9960357,0.9831143,0.98305386 +right,0.9999392,0.99999976,0.9963436,0.9961799,0.9831868,0.9830458 +above,0.99629605,0.9963436,1.0,0.99985766,0.9878544,0.98767114 +under,0.9960357,0.9961799,0.99985766,0.9999999,0.988078,0.9878074 +far,0.9831143,0.9831868,0.9878544,0.988078,0.9999998,0.9999287 +close,0.98305386,0.9830458,0.98767114,0.9878074,0.9999287,1.0000004 diff --git a/exp2a_modified/results_all_layers/molmo/similarity_2m_L1.csv b/exp2a_modified/results_all_layers/molmo/similarity_2m_L1.csv new file mode 100644 index 0000000000000000000000000000000000000000..42988792d6c3ecf1ddba9303c9ed847752a348d5 --- /dev/null +++ b/exp2a_modified/results_all_layers/molmo/similarity_2m_L1.csv @@ -0,0 +1,7 @@ +,left,right,above,under,far,close +left,0.9999996,0.99996567,0.99694955,0.9969008,0.9866452,0.9868368 +right,0.99996567,1.0,0.9970762,0.99703825,0.98699915,0.9871588 +above,0.99694955,0.9970762,1.0000002,0.9999305,0.98742366,0.98748 +under,0.9969008,0.99703825,0.9999305,0.99999964,0.98731005,0.9873435 +far,0.9866452,0.98699915,0.98742366,0.98731005,1.0000004,0.99995756 +close,0.9868368,0.9871588,0.98748,0.9873435,0.99995756,0.99999994 diff --git a/exp2a_modified/results_all_layers/molmo/similarity_2m_L14.csv b/exp2a_modified/results_all_layers/molmo/similarity_2m_L14.csv new file mode 100644 index 0000000000000000000000000000000000000000..202a8f5ae433627bcbb4d356aa1710e04fe7604a --- /dev/null +++ b/exp2a_modified/results_all_layers/molmo/similarity_2m_L14.csv @@ -0,0 +1,7 @@ +,left,right,above,under,far,close +left,0.9999989,0.9668932,0.9363602,0.9288738,0.8933626,0.8915282 +right,0.9668932,1.0000001,0.93723714,0.92857337,0.8977658,0.8928321 +above,0.9363602,0.93723714,0.99999964,0.9172457,0.8834505,0.8713281 +under,0.9288738,0.92857337,0.9172457,0.99999976,0.8675222,0.8765899 +far,0.8933626,0.8977658,0.8834505,0.8675222,1.0000002,0.9940043 +close,0.8915282,0.8928321,0.8713281,0.8765899,0.9940043,0.99999917 diff --git a/exp2a_modified/results_all_layers/molmo/similarity_2m_L16.csv b/exp2a_modified/results_all_layers/molmo/similarity_2m_L16.csv new file mode 100644 index 0000000000000000000000000000000000000000..d94618e5929f9d918715d2d9a4d5fea6afff8423 --- /dev/null +++ b/exp2a_modified/results_all_layers/molmo/similarity_2m_L16.csv @@ -0,0 +1,7 @@ +,left,right,above,under,far,close +left,0.9999999,0.9604546,0.8414299,0.8386873,0.81286216,0.8076469 +right,0.9604546,0.99999976,0.8351713,0.8426815,0.8152706,0.8099931 +above,0.8414299,0.8351713,0.99999994,0.8934198,0.8158267,0.797244 +under,0.8386873,0.8426815,0.8934198,1.0000004,0.80905294,0.819305 +far,0.81286216,0.8152706,0.8158267,0.80905294,1.0,0.99201447 +close,0.8076469,0.8099931,0.797244,0.819305,0.99201447,1.0 diff --git a/exp2a_modified/results_all_layers/molmo/similarity_2m_L19.csv b/exp2a_modified/results_all_layers/molmo/similarity_2m_L19.csv new file mode 100644 index 0000000000000000000000000000000000000000..6236b7fe82a33da13d50a852188a773f3f98d206 --- /dev/null +++ b/exp2a_modified/results_all_layers/molmo/similarity_2m_L19.csv @@ -0,0 +1,7 @@ +,left,right,above,under,far,close +left,0.9999994,0.9528682,0.8079404,0.7898549,0.75441873,0.74726176 +right,0.9528682,1.0,0.79594153,0.79201853,0.74864805,0.74139905 +above,0.8079404,0.79594153,1.0,0.86362475,0.74899715,0.72680587 +under,0.7898549,0.79201853,0.86362475,0.9999998,0.73787785,0.7498276 +far,0.75441873,0.74864805,0.74899715,0.73787785,1.0000002,0.99016166 +close,0.74726176,0.74139905,0.72680587,0.7498276,0.99016166,0.99999976 diff --git a/exp2a_modified/results_all_layers/molmo/similarity_2m_L22.csv b/exp2a_modified/results_all_layers/molmo/similarity_2m_L22.csv new file mode 100644 index 0000000000000000000000000000000000000000..42edc0763ceb63e93bffd6fe993a81edac76adda --- /dev/null +++ b/exp2a_modified/results_all_layers/molmo/similarity_2m_L22.csv @@ -0,0 +1,7 @@ +,left,right,above,under,far,close +left,1.0000004,0.968021,0.81545186,0.7958045,0.7329019,0.7262979 +right,0.968021,0.9999998,0.80376875,0.79435533,0.7200212,0.713209 +above,0.81545186,0.80376875,1.0000005,0.88850933,0.7169925,0.69870776 +under,0.7958045,0.79435533,0.88850933,0.9999999,0.7193377,0.7300614 +far,0.7329019,0.7200212,0.7169925,0.7193377,1.0000004,0.9909742 +close,0.7262979,0.713209,0.69870776,0.7300614,0.9909742,1.0000001 diff --git a/exp2a_modified/results_all_layers/molmo/similarity_2m_L23.csv b/exp2a_modified/results_all_layers/molmo/similarity_2m_L23.csv new file mode 100644 index 0000000000000000000000000000000000000000..22bff83bf1e9901b3c969a59790b3fb5965728b9 --- /dev/null +++ b/exp2a_modified/results_all_layers/molmo/similarity_2m_L23.csv @@ -0,0 +1,7 @@ +,left,right,above,under,far,close +left,0.9999999,0.9722762,0.82004917,0.80911404,0.741459,0.7350105 +right,0.9722762,1.0000001,0.8107565,0.80805796,0.72700655,0.7204243 +above,0.82004917,0.8107565,1.0000002,0.8974714,0.72002435,0.70405394 +under,0.80911404,0.80805796,0.8974714,0.9999996,0.7317405,0.7395531 +far,0.741459,0.72700655,0.72002435,0.7317405,1.0000001,0.9923998 +close,0.7350105,0.7204243,0.70405394,0.7395531,0.9923998,1.0000002 diff --git a/exp2a_modified/results_all_layers/molmo/similarity_2m_L27.csv b/exp2a_modified/results_all_layers/molmo/similarity_2m_L27.csv new file mode 100644 index 0000000000000000000000000000000000000000..37b525a2cb7af8b2c310b52c946a9cef2365b67a --- /dev/null +++ b/exp2a_modified/results_all_layers/molmo/similarity_2m_L27.csv @@ -0,0 +1,7 @@ +,left,right,above,under,far,close +left,1.0000005,0.97985363,0.8395502,0.83292717,0.7642052,0.7575661 +right,0.97985363,1.0000005,0.8307585,0.8321127,0.7512693,0.7457788 +above,0.8395502,0.8307585,1.0000004,0.9228009,0.74233305,0.72784907 +under,0.83292717,0.8321127,0.9228009,1.0000002,0.7598206,0.76596385 +far,0.7642052,0.7512693,0.74233305,0.7598206,1.0,0.9897876 +close,0.7575661,0.7457788,0.72784907,0.76596385,0.9897876,1.0000001 diff --git a/exp2a_modified/results_all_layers/molmo/similarity_2m_L3.csv b/exp2a_modified/results_all_layers/molmo/similarity_2m_L3.csv new file mode 100644 index 0000000000000000000000000000000000000000..4f7070b1f78ae40e877fd36010a7bc563593ee97 --- /dev/null +++ b/exp2a_modified/results_all_layers/molmo/similarity_2m_L3.csv @@ -0,0 +1,7 @@ +,left,right,above,under,far,close +left,0.99999976,0.9999529,0.99508595,0.99499094,0.96680975,0.9671318 +right,0.9999529,1.0000001,0.9951982,0.99512076,0.9673524,0.967643 +above,0.99508595,0.9951982,1.0000004,0.99987465,0.96995735,0.9701104 +under,0.99499094,0.99512076,0.99987465,1.0,0.96996546,0.9700815 +far,0.96680975,0.9673524,0.96995735,0.96996546,0.9999999,0.99992484 +close,0.9671318,0.967643,0.9701104,0.9700815,0.99992484,0.99999976 diff --git a/exp2a_modified/results_all_layers/molmo/similarity_2m_L8.csv b/exp2a_modified/results_all_layers/molmo/similarity_2m_L8.csv new file mode 100644 index 0000000000000000000000000000000000000000..b5eeabc6d088aae9188f0369499f7ac6dd5e0718 --- /dev/null +++ b/exp2a_modified/results_all_layers/molmo/similarity_2m_L8.csv @@ -0,0 +1,7 @@ +,left,right,above,under,far,close +left,1.0000004,0.99994385,0.98479706,0.9839156,0.9600158,0.96003926 +right,0.99994385,0.9999996,0.98478305,0.98395336,0.96012515,0.9600971 +above,0.98479706,0.98478305,0.99999964,0.99974245,0.9567363,0.95649827 +under,0.9839156,0.98395336,0.99974245,1.0000001,0.9563007,0.95596004 +far,0.9600158,0.96012515,0.9567363,0.9563007,0.99999976,0.99987334 +close,0.96003926,0.9600971,0.95649827,0.95596004,0.99987334,0.9999998 diff --git a/exp2a_modified/results_all_layers/molmo/similarity_2m_L9.csv b/exp2a_modified/results_all_layers/molmo/similarity_2m_L9.csv new file mode 100644 index 0000000000000000000000000000000000000000..9919476ad069886d0546c1039ff9511c7f32972d --- /dev/null +++ b/exp2a_modified/results_all_layers/molmo/similarity_2m_L9.csv @@ -0,0 +1,7 @@ +,left,right,above,under,far,close +left,1.0000004,0.99993783,0.97894144,0.9779963,0.9359634,0.935759 +right,0.99993783,0.99999964,0.9790031,0.9781102,0.9355576,0.93529886 +above,0.97894144,0.9790031,1.0000001,0.9996749,0.9340033,0.9334327 +under,0.9779963,0.9781102,0.9996749,0.9999996,0.93427753,0.9336074 +far,0.9359634,0.9355576,0.9340033,0.93427753,1.0000001,0.9998367 +close,0.935759,0.93529886,0.9334327,0.9336074,0.9998367,1.0000001 diff --git a/exp2a_modified/results_all_layers/molmo/similarity_400k_L11.csv b/exp2a_modified/results_all_layers/molmo/similarity_400k_L11.csv new file mode 100644 index 0000000000000000000000000000000000000000..f169f0dc3ae5ca586367606574ee7c78d270b2a8 --- /dev/null +++ b/exp2a_modified/results_all_layers/molmo/similarity_400k_L11.csv @@ -0,0 +1,7 @@ +,left,right,above,under,far,close +left,1.0,0.9998104,0.9729609,0.9724998,0.9170009,0.9168552 +right,0.9998104,0.9999998,0.9733326,0.97293776,0.9164552,0.91629785 +above,0.9729609,0.9733326,1.0000001,0.9994147,0.92720956,0.9263004 +under,0.9724998,0.97293776,0.9994147,1.0000004,0.92730814,0.92647916 +far,0.9170009,0.9164552,0.92720956,0.92730814,0.9999999,0.99975073 +close,0.9168552,0.91629785,0.9263004,0.92647916,0.99975073,1.0 diff --git a/exp2a_modified/results_all_layers/molmo/similarity_400k_L13.csv b/exp2a_modified/results_all_layers/molmo/similarity_400k_L13.csv new file mode 100644 index 0000000000000000000000000000000000000000..d84e01595203a2f85e8fd39f6b32e432c98e8a03 --- /dev/null +++ b/exp2a_modified/results_all_layers/molmo/similarity_400k_L13.csv @@ -0,0 +1,7 @@ +,left,right,above,under,far,close +left,1.0000005,0.9983739,0.9708368,0.97001463,0.92518365,0.92525065 +right,0.9983739,1.0000001,0.9714834,0.9709337,0.9258892,0.9259387 +above,0.9708368,0.9714834,1.0000001,0.9966369,0.93268144,0.93088496 +under,0.97001463,0.9709337,0.9966369,1.0000001,0.931912,0.93169504 +far,0.92518365,0.9258892,0.93268144,0.931912,1.0,0.9991321 +close,0.92525065,0.9259387,0.93088496,0.93169504,0.9991321,1.0000002 diff --git a/exp2a_modified/results_all_layers/molmo/similarity_400k_L15.csv b/exp2a_modified/results_all_layers/molmo/similarity_400k_L15.csv new file mode 100644 index 0000000000000000000000000000000000000000..43b6a057955de2838a40d9aaec7a369da016e035 --- /dev/null +++ b/exp2a_modified/results_all_layers/molmo/similarity_400k_L15.csv @@ -0,0 +1,7 @@ +,left,right,above,under,far,close +left,1.0000001,0.98625815,0.931838,0.93246627,0.90053076,0.8999422 +right,0.98625815,1.0000004,0.9285742,0.9321176,0.9019694,0.9012791 +above,0.931838,0.9285742,1.0000004,0.967097,0.88848084,0.88157034 +under,0.93246627,0.9321176,0.967097,1.0000002,0.88585705,0.88790464 +far,0.90053076,0.9019694,0.88848084,0.88585705,0.99999976,0.9974839 +close,0.8999422,0.9012791,0.88157034,0.88790464,0.9974839,0.9999998 diff --git a/exp2a_modified/results_all_layers/molmo/similarity_400k_L16.csv b/exp2a_modified/results_all_layers/molmo/similarity_400k_L16.csv new file mode 100644 index 0000000000000000000000000000000000000000..025531842d0e722d286379caa24563131b330641 --- /dev/null +++ b/exp2a_modified/results_all_layers/molmo/similarity_400k_L16.csv @@ -0,0 +1,7 @@ +,left,right,above,under,far,close +left,0.99999994,0.9883753,0.92532074,0.9256134,0.8832295,0.88197345 +right,0.9883753,1.0000005,0.92213887,0.92566484,0.8865908,0.88546425 +above,0.92532074,0.92213887,0.99999994,0.9678088,0.86070627,0.85359925 +under,0.9256134,0.92566484,0.9678088,1.0000004,0.85984963,0.8611323 +far,0.8832295,0.8865908,0.86070627,0.85984963,1.0000001,0.9974767 +close,0.88197345,0.88546425,0.85359925,0.8611323,0.9974767,1.0000001 diff --git a/exp2a_modified/results_all_layers/molmo/similarity_400k_L18.csv b/exp2a_modified/results_all_layers/molmo/similarity_400k_L18.csv new file mode 100644 index 0000000000000000000000000000000000000000..82d1dc74d9b0f26b82ccab55ce30712b77e6de12 --- /dev/null +++ b/exp2a_modified/results_all_layers/molmo/similarity_400k_L18.csv @@ -0,0 +1,7 @@ +,left,right,above,under,far,close +left,1.0,0.9794933,0.8877649,0.87625694,0.83653045,0.8323091 +right,0.9794933,1.0000002,0.88494354,0.87769973,0.83838356,0.8330399 +above,0.8877649,0.88494354,0.99999964,0.9323021,0.8070833,0.78904563 +under,0.87625694,0.87769973,0.9323021,1.0000002,0.7966786,0.80220544 +far,0.83653045,0.83838356,0.8070833,0.7966786,1.0000001,0.9921131 +close,0.8323091,0.8330399,0.78904563,0.80220544,0.9921131,1.0000004 diff --git a/exp2a_modified/results_all_layers/molmo/similarity_400k_L19.csv b/exp2a_modified/results_all_layers/molmo/similarity_400k_L19.csv new file mode 100644 index 0000000000000000000000000000000000000000..897a42abe4698c87a844d6e06127ce53cd621a5c --- /dev/null +++ b/exp2a_modified/results_all_layers/molmo/similarity_400k_L19.csv @@ -0,0 +1,7 @@ +,left,right,above,under,far,close +left,1.0000001,0.9835917,0.89433604,0.8799748,0.8249269,0.82165074 +right,0.9835917,1.0,0.89446956,0.8852003,0.82732373,0.82341003 +above,0.89433604,0.89446956,1.0,0.9350607,0.8004133,0.78341514 +under,0.8799748,0.8852003,0.9350607,1.0000004,0.7830846,0.7915684 +far,0.8249269,0.82732373,0.8004133,0.7830846,1.0000001,0.9916222 +close,0.82165074,0.82341003,0.78341514,0.7915684,0.9916222,0.9999999 diff --git a/exp2a_modified/results_all_layers/molmo/similarity_400k_L24.csv b/exp2a_modified/results_all_layers/molmo/similarity_400k_L24.csv new file mode 100644 index 0000000000000000000000000000000000000000..d018f38089a2ec40def3c59c4544f208dfea857c --- /dev/null +++ b/exp2a_modified/results_all_layers/molmo/similarity_400k_L24.csv @@ -0,0 +1,7 @@ +,left,right,above,under,far,close +left,1.0,0.9888178,0.88127077,0.86854887,0.7877093,0.7841833 +right,0.9888178,1.0000002,0.88065463,0.87178373,0.78230405,0.77880806 +above,0.88127077,0.88065463,1.0000002,0.94386065,0.7402826,0.7252144 +under,0.86854887,0.87178373,0.94386065,1.0,0.7324298,0.7392968 +far,0.7877093,0.78230405,0.7402826,0.7324298,1.0000002,0.9911046 +close,0.7841833,0.77880806,0.7252144,0.7392968,0.9911046,1.0 diff --git a/exp2a_modified/results_all_layers/molmo/similarity_400k_L29.csv b/exp2a_modified/results_all_layers/molmo/similarity_400k_L29.csv new file mode 100644 index 0000000000000000000000000000000000000000..862aef630fbef05ee8fff2b4d794a8a80636f8f2 --- /dev/null +++ b/exp2a_modified/results_all_layers/molmo/similarity_400k_L29.csv @@ -0,0 +1,7 @@ +,left,right,above,under,far,close +left,1.0000002,0.99379545,0.9166699,0.9115854,0.85213107,0.8461733 +right,0.99379545,0.9999999,0.91515905,0.91266775,0.8467967,0.84122396 +above,0.9166699,0.91515905,1.0000001,0.9620296,0.81820846,0.8068087 +under,0.9115854,0.91266775,0.9620296,1.0000001,0.8218258,0.8227976 +far,0.85213107,0.8467967,0.81820846,0.8218258,0.99999994,0.9944927 +close,0.8461733,0.84122396,0.8068087,0.8227976,0.9944927,1.0000004 diff --git a/exp2a_modified/results_all_layers/molmo/similarity_400k_L3.csv b/exp2a_modified/results_all_layers/molmo/similarity_400k_L3.csv new file mode 100644 index 0000000000000000000000000000000000000000..c4216614d7ec18c1e53c63135d0c5fffb4ac039a --- /dev/null +++ b/exp2a_modified/results_all_layers/molmo/similarity_400k_L3.csv @@ -0,0 +1,7 @@ +,left,right,above,under,far,close +left,1.0000006,0.9999455,0.99331796,0.9933099,0.9621074,0.9623606 +right,0.9999455,1.0000002,0.9934757,0.99346894,0.962541,0.9627754 +above,0.99331796,0.9934757,0.99999976,0.99979967,0.96679616,0.96689725 +under,0.9933099,0.99346894,0.99979967,0.99999964,0.96654034,0.96660626 +far,0.9621074,0.962541,0.96679616,0.96654034,0.99999976,0.9999125 +close,0.9623606,0.9627754,0.96689725,0.96660626,0.9999125,1.0000001 diff --git a/exp2a_modified/results_all_layers/molmo/similarity_400k_L30.csv b/exp2a_modified/results_all_layers/molmo/similarity_400k_L30.csv new file mode 100644 index 0000000000000000000000000000000000000000..38af406eb94bfe4f55eaf7fa29e5f6e072599160 --- /dev/null +++ b/exp2a_modified/results_all_layers/molmo/similarity_400k_L30.csv @@ -0,0 +1,7 @@ +,left,right,above,under,far,close +left,0.9999992,0.99583,0.9416358,0.9372427,0.89715046,0.89276665 +right,0.99583,0.99999994,0.9394299,0.93712306,0.89277047,0.8888465 +above,0.9416358,0.9394299,1.0000005,0.9741942,0.8795686,0.8721995 +under,0.9372427,0.93712306,0.9741942,0.99999994,0.8838877,0.88406736 +far,0.89715046,0.89277047,0.8795686,0.8838877,1.0000001,0.9969183 +close,0.89276665,0.8888465,0.8721995,0.88406736,0.9969183,1.0000004 diff --git a/exp2a_modified/results_all_layers/molmo/similarity_400k_L31.csv b/exp2a_modified/results_all_layers/molmo/similarity_400k_L31.csv new file mode 100644 index 0000000000000000000000000000000000000000..9743251160ed42e91fdc6ba0ad340b4b9695560b --- /dev/null +++ b/exp2a_modified/results_all_layers/molmo/similarity_400k_L31.csv @@ -0,0 +1,7 @@ +,left,right,above,under,far,close +left,1.0000001,0.99682474,0.95443934,0.95085055,0.92124707,0.9183261 +right,0.99682474,1.0000005,0.9529462,0.95104045,0.91831386,0.91579354 +above,0.95443934,0.9529462,0.99999976,0.9797501,0.9089592,0.90330064 +under,0.95085055,0.95104045,0.9797501,1.0000005,0.910488,0.911077 +far,0.92124707,0.91831386,0.9089592,0.910488,1.0000002,0.99741966 +close,0.9183261,0.91579354,0.90330064,0.911077,0.99741966,1.0000001 diff --git a/exp2a_modified/results_all_layers/molmo/similarity_400k_L4.csv b/exp2a_modified/results_all_layers/molmo/similarity_400k_L4.csv new file mode 100644 index 0000000000000000000000000000000000000000..0467b9810b4c3844e470567c3cfa82553b67c58c --- /dev/null +++ b/exp2a_modified/results_all_layers/molmo/similarity_400k_L4.csv @@ -0,0 +1,7 @@ +,left,right,above,under,far,close +left,0.9999996,0.99994797,0.9926395,0.9924043,0.9549062,0.95504236 +right,0.99994797,0.99999964,0.99274707,0.99252754,0.9551932,0.9553066 +above,0.9926395,0.99274707,1.0000001,0.9998073,0.9593648,0.9593786 +under,0.9924043,0.99252754,0.9998073,1.0000002,0.959176,0.95914954 +far,0.9549062,0.9551932,0.9593648,0.959176,1.0000008,0.9999226 +close,0.95504236,0.9553066,0.9593786,0.95914954,0.9999226,1.0000001 diff --git a/exp2a_modified/results_all_layers/molmo/similarity_400k_L8.csv b/exp2a_modified/results_all_layers/molmo/similarity_400k_L8.csv new file mode 100644 index 0000000000000000000000000000000000000000..986422b4573e40ba57937216db6fd6a6b432ed21 --- /dev/null +++ b/exp2a_modified/results_all_layers/molmo/similarity_400k_L8.csv @@ -0,0 +1,7 @@ +,left,right,above,under,far,close +left,1.0000004,0.99992937,0.97991574,0.97839576,0.93550354,0.93554133 +right,0.99992937,1.0000005,0.9796684,0.97816914,0.93497646,0.93498343 +above,0.97991574,0.9796684,1.000001,0.999682,0.93930745,0.9389894 +under,0.97839576,0.97816914,0.999682,1.0000001,0.938605,0.9382677 +far,0.93550354,0.93497646,0.93930745,0.938605,1.0000001,0.99987316 +close,0.93554133,0.93498343,0.9389894,0.9382677,0.99987316,1.0 diff --git a/exp2a_modified/results_all_layers/molmo/similarity_800k_L13.csv b/exp2a_modified/results_all_layers/molmo/similarity_800k_L13.csv new file mode 100644 index 0000000000000000000000000000000000000000..3ea451c5e2e7735c744f22bccc0f3e3a6a98be23 --- /dev/null +++ b/exp2a_modified/results_all_layers/molmo/similarity_800k_L13.csv @@ -0,0 +1,7 @@ +,left,right,above,under,far,close +left,1.0000002,0.9990981,0.970269,0.96832395,0.9112702,0.91077095 +right,0.9990981,1.0000005,0.97084796,0.96887577,0.9112278,0.9107026 +above,0.970269,0.97084796,1.0,0.9983258,0.92944044,0.9281176 +under,0.96832395,0.96887577,0.9983258,1.0000006,0.9282915,0.92717594 +far,0.9112702,0.9112278,0.92944044,0.9282915,0.9999999,0.9996043 +close,0.91077095,0.9107026,0.9281176,0.92717594,0.9996043,0.99999946 diff --git a/exp2a_modified/results_all_layers/molmo/similarity_800k_L17.csv b/exp2a_modified/results_all_layers/molmo/similarity_800k_L17.csv new file mode 100644 index 0000000000000000000000000000000000000000..6bad1d355d621aa21d03b7d2e5cc4438454e7805 --- /dev/null +++ b/exp2a_modified/results_all_layers/molmo/similarity_800k_L17.csv @@ -0,0 +1,7 @@ +,left,right,above,under,far,close +left,1.0000002,0.97651416,0.8991644,0.9013486,0.8537958,0.85110706 +right,0.97651416,0.9999996,0.8929029,0.8954901,0.8546095,0.8500097 +above,0.8991644,0.8929029,1.0000005,0.92934775,0.84376293,0.8280096 +under,0.9013486,0.8954901,0.92934775,0.9999997,0.8475287,0.85349274 +far,0.8537958,0.8546095,0.84376293,0.8475287,1.0000002,0.9957177 +close,0.85110706,0.8500097,0.8280096,0.85349274,0.9957177,1.0000004 diff --git a/exp2a_modified/results_all_layers/molmo/similarity_800k_L2.csv b/exp2a_modified/results_all_layers/molmo/similarity_800k_L2.csv new file mode 100644 index 0000000000000000000000000000000000000000..8bf379b57ca245231dfa287b2368de12f4d2dfa2 --- /dev/null +++ b/exp2a_modified/results_all_layers/molmo/similarity_800k_L2.csv @@ -0,0 +1,7 @@ +,left,right,above,under,far,close +left,0.9999996,0.9999599,0.9963067,0.9961767,0.96559566,0.9659162 +right,0.9999599,0.9999999,0.9963605,0.99623704,0.9658674,0.9661668 +above,0.9963067,0.9963605,1.0,0.9998985,0.96477735,0.96499026 +under,0.9961767,0.99623704,0.9998985,0.99999946,0.9644962,0.96468014 +far,0.96559566,0.9658674,0.96477735,0.9644962,1.0000001,0.9999389 +close,0.9659162,0.9661668,0.96499026,0.96468014,0.9999389,0.99999976 diff --git a/exp2a_modified/results_all_layers/molmo/similarity_800k_L21.csv b/exp2a_modified/results_all_layers/molmo/similarity_800k_L21.csv new file mode 100644 index 0000000000000000000000000000000000000000..d5657e3d59d8f4d7aefa35f05a06ef42611d8bf5 --- /dev/null +++ b/exp2a_modified/results_all_layers/molmo/similarity_800k_L21.csv @@ -0,0 +1,7 @@ +,left,right,above,under,far,close +left,1.0000001,0.9810878,0.8625393,0.8459594,0.7820133,0.7802125 +right,0.9810878,1.0000001,0.8568121,0.84812903,0.7811643,0.77853 +above,0.8625393,0.8568121,1.0,0.9303678,0.7698853,0.7555749 +under,0.8459594,0.84812903,0.9303678,1.0,0.7679723,0.7752001 +far,0.7820133,0.7811643,0.7698853,0.7679723,0.99999994,0.99342626 +close,0.7802125,0.77853,0.7555749,0.7752001,0.99342626,0.99999994 diff --git a/exp2a_modified/results_all_layers/molmo/similarity_800k_L22.csv b/exp2a_modified/results_all_layers/molmo/similarity_800k_L22.csv new file mode 100644 index 0000000000000000000000000000000000000000..b1ae34304a68d9dfcdc00f165c470d211c1c391f --- /dev/null +++ b/exp2a_modified/results_all_layers/molmo/similarity_800k_L22.csv @@ -0,0 +1,7 @@ +,left,right,above,under,far,close +left,1.0,0.9832525,0.84858704,0.8340912,0.76876587,0.7669391 +right,0.9832525,1.0,0.84172136,0.83437735,0.76478976,0.7623553 +above,0.84858704,0.84172136,1.0000002,0.9335502,0.7571263,0.7430105 +under,0.8340912,0.83437735,0.9335502,1.0000002,0.75997436,0.7672483 +far,0.76876587,0.76478976,0.7571263,0.75997436,1.0000002,0.9933284 +close,0.7669391,0.7623553,0.7430105,0.7672483,0.9933284,0.99999964 diff --git a/exp2a_modified/results_all_layers/molmo/similarity_800k_L24.csv b/exp2a_modified/results_all_layers/molmo/similarity_800k_L24.csv new file mode 100644 index 0000000000000000000000000000000000000000..e562bb64f0cf4ccf10b1556aed3324ef06560b2c --- /dev/null +++ b/exp2a_modified/results_all_layers/molmo/similarity_800k_L24.csv @@ -0,0 +1,7 @@ +,left,right,above,under,far,close +left,0.9999999,0.9859223,0.8510503,0.84454507,0.77902687,0.77793175 +right,0.9859223,1.0000004,0.843931,0.8433985,0.77323216,0.7721319 +above,0.8510503,0.843931,0.99999994,0.93822294,0.76291394,0.74934536 +under,0.84454507,0.8433985,0.93822294,1.0000004,0.77207065,0.7787418 +far,0.77902687,0.77323216,0.76291394,0.77207065,1.0000004,0.99158245 +close,0.77793175,0.7721319,0.74934536,0.7787418,0.99158245,0.99999994 diff --git a/exp2a_modified/results_all_layers/molmo/similarity_800k_L9.csv b/exp2a_modified/results_all_layers/molmo/similarity_800k_L9.csv new file mode 100644 index 0000000000000000000000000000000000000000..254fef89b0336bd76ffef0b8bdddbaa8568f82e3 --- /dev/null +++ b/exp2a_modified/results_all_layers/molmo/similarity_800k_L9.csv @@ -0,0 +1,7 @@ +,left,right,above,under,far,close +left,1.0000005,0.9999273,0.9688313,0.966918,0.90678734,0.90639675 +right,0.9999273,0.9999999,0.96866924,0.9667914,0.90537804,0.90494335 +above,0.9688313,0.96866924,0.99999994,0.99957424,0.9190762,0.918466 +under,0.966918,0.9667914,0.99957424,1.0000002,0.91882795,0.9181914 +far,0.90678734,0.90537804,0.9190762,0.91882795,1.0000001,0.99988115 +close,0.90639675,0.90494335,0.918466,0.9181914,0.99988115,1.0000002 diff --git a/exp2a_modified/results_all_layers/molmo/similarity_80k_L11.csv b/exp2a_modified/results_all_layers/molmo/similarity_80k_L11.csv new file mode 100644 index 0000000000000000000000000000000000000000..0a398e8fbd5d5cb796b5a14eee3ae458d1c9bd9d --- /dev/null +++ b/exp2a_modified/results_all_layers/molmo/similarity_80k_L11.csv @@ -0,0 +1,7 @@ +,left,right,above,under,far,close +left,1.0000001,0.9998746,0.9818277,0.9806811,0.93544316,0.9354167 +right,0.9998746,0.9999997,0.9815798,0.98043174,0.9344683,0.9343873 +above,0.9818277,0.9815798,1.0000004,0.99949455,0.93617505,0.9358846 +under,0.9806811,0.98043174,0.99949455,1.0000004,0.9367875,0.9365246 +far,0.93544316,0.9344683,0.93617505,0.9367875,0.9999997,0.9997808 +close,0.9354167,0.9343873,0.9358846,0.9365246,0.9997808,1.0 diff --git a/exp2a_modified/results_all_layers/molmo/similarity_80k_L12.csv b/exp2a_modified/results_all_layers/molmo/similarity_80k_L12.csv new file mode 100644 index 0000000000000000000000000000000000000000..2c6c6b36b5ba26c7b84136dd83562452389a0d3f --- /dev/null +++ b/exp2a_modified/results_all_layers/molmo/similarity_80k_L12.csv @@ -0,0 +1,7 @@ +,left,right,above,under,far,close +left,0.9999995,0.9998461,0.9835828,0.98255426,0.9420724,0.9420879 +right,0.9998461,0.9999999,0.9833106,0.9822882,0.9411314,0.9410758 +above,0.9835828,0.9833106,1.0000002,0.99952763,0.94417953,0.94374335 +under,0.98255426,0.9822882,0.99952763,0.9999996,0.9445885,0.94417286 +far,0.9420724,0.9411314,0.94417953,0.9445885,0.9999995,0.9997688 +close,0.9420879,0.9410758,0.94374335,0.94417286,0.9997688,0.9999999 diff --git a/exp2a_modified/results_all_layers/molmo/similarity_80k_L14.csv b/exp2a_modified/results_all_layers/molmo/similarity_80k_L14.csv new file mode 100644 index 0000000000000000000000000000000000000000..6c50fdd15b5f908f348b42a0371fe3a391b59d20 --- /dev/null +++ b/exp2a_modified/results_all_layers/molmo/similarity_80k_L14.csv @@ -0,0 +1,7 @@ +,left,right,above,under,far,close +left,1.0000007,0.9992051,0.9846699,0.98415923,0.9411766,0.94009876 +right,0.9992051,1.0000001,0.9845168,0.9839645,0.9410156,0.9397725 +above,0.9846699,0.9845168,0.9999998,0.99858326,0.9439891,0.94184554 +under,0.98415923,0.9839645,0.99858326,0.9999999,0.94452685,0.94274354 +far,0.9411766,0.9410156,0.9439891,0.94452685,1.0000005,0.9993545 +close,0.94009876,0.9397725,0.94184554,0.94274354,0.9993545,0.99999976 diff --git a/exp2a_modified/results_all_layers/molmo/similarity_80k_L15.csv b/exp2a_modified/results_all_layers/molmo/similarity_80k_L15.csv new file mode 100644 index 0000000000000000000000000000000000000000..1ae71fe6af9947da69938cc8b457443b672fabae --- /dev/null +++ b/exp2a_modified/results_all_layers/molmo/similarity_80k_L15.csv @@ -0,0 +1,7 @@ +,left,right,above,under,far,close +left,0.9999998,0.9985394,0.9757978,0.976221,0.9290044,0.9281907 +right,0.9985394,1.0000001,0.97606635,0.97681665,0.92933494,0.92828196 +above,0.9757978,0.97606635,0.99999964,0.996955,0.92958707,0.92611146 +under,0.976221,0.97681665,0.996955,1.0000005,0.9314498,0.92956555 +far,0.9290044,0.92933494,0.92958707,0.9314498,0.99999976,0.99872535 +close,0.9281907,0.92828196,0.92611146,0.92956555,0.99872535,0.9999998 diff --git a/exp2a_modified/results_all_layers/molmo/similarity_80k_L22.csv b/exp2a_modified/results_all_layers/molmo/similarity_80k_L22.csv new file mode 100644 index 0000000000000000000000000000000000000000..49bbb328bde450c53ae105f3b9474dca99da6a7d --- /dev/null +++ b/exp2a_modified/results_all_layers/molmo/similarity_80k_L22.csv @@ -0,0 +1,7 @@ +,left,right,above,under,far,close +left,0.9999998,0.9961628,0.9459622,0.9441557,0.81353015,0.81464225 +right,0.9961628,1.0000006,0.9481163,0.9469218,0.81279284,0.8130624 +above,0.9459622,0.9481163,1.0000004,0.9887129,0.7860634,0.78064495 +under,0.9441557,0.9469218,0.9887129,0.99999994,0.7948243,0.79665476 +far,0.81353015,0.81279284,0.7860634,0.7948243,1.0000002,0.9961064 +close,0.81464225,0.8130624,0.78064495,0.79665476,0.9961064,1.0000005 diff --git a/exp2a_modified/results_all_layers/molmo/similarity_80k_L25.csv b/exp2a_modified/results_all_layers/molmo/similarity_80k_L25.csv new file mode 100644 index 0000000000000000000000000000000000000000..0fdf09d31565f6292657b92a7381f032d58567c1 --- /dev/null +++ b/exp2a_modified/results_all_layers/molmo/similarity_80k_L25.csv @@ -0,0 +1,7 @@ +,left,right,above,under,far,close +left,1.0000007,0.9969774,0.9472751,0.9456428,0.80519253,0.8061406 +right,0.9969774,1.0000002,0.94934076,0.94837534,0.8019907,0.8026036 +above,0.9472751,0.94934076,0.9999999,0.9898292,0.7703127,0.7659391 +under,0.9456428,0.94837534,0.9898292,1.0000004,0.7797778,0.7813867 +far,0.80519253,0.8019907,0.7703127,0.7797778,0.9999998,0.99602765 +close,0.8061406,0.8026036,0.7659391,0.7813867,0.99602765,1.0000006 diff --git a/exp2a_modified/results_all_layers/molmo/similarity_80k_L3.csv b/exp2a_modified/results_all_layers/molmo/similarity_80k_L3.csv new file mode 100644 index 0000000000000000000000000000000000000000..8960dca2aa040abf5fef378212a4f06d48ca3095 --- /dev/null +++ b/exp2a_modified/results_all_layers/molmo/similarity_80k_L3.csv @@ -0,0 +1,7 @@ +,left,right,above,under,far,close +left,0.9999997,0.99994695,0.9921401,0.9921442,0.9632536,0.96356046 +right,0.99994695,1.0000002,0.9922247,0.99224305,0.96360916,0.9638944 +above,0.9921401,0.9922247,0.9999991,0.9997709,0.9683188,0.96848416 +under,0.9921442,0.99224305,0.9997709,0.99999976,0.96805084,0.96818906 +far,0.9632536,0.96360916,0.9683188,0.96805084,1.0,0.9999057 +close,0.96356046,0.9638944,0.96848416,0.96818906,0.9999057,1.0000008 diff --git a/exp2a_modified/results_all_layers/molmo/similarity_80k_L30.csv b/exp2a_modified/results_all_layers/molmo/similarity_80k_L30.csv new file mode 100644 index 0000000000000000000000000000000000000000..73b5a283ef5cf423affcf592407a5e8b20ba267a --- /dev/null +++ b/exp2a_modified/results_all_layers/molmo/similarity_80k_L30.csv @@ -0,0 +1,7 @@ +,left,right,above,under,far,close +left,1.0,0.99860156,0.9633992,0.9635804,0.8906492,0.8891324 +right,0.99860156,1.0000001,0.9637042,0.9643675,0.8872438,0.8857729 +above,0.9633992,0.9637042,1.0000005,0.9947204,0.86920613,0.8655477 +under,0.9635804,0.9643675,0.9947204,1.0000002,0.8770182,0.87624425 +far,0.8906492,0.8872438,0.86920613,0.8770182,0.99999946,0.9978271 +close,0.8891324,0.8857729,0.8655477,0.87624425,0.9978271,1.0000001 diff --git a/exp2a_modified/results_all_layers/molmo/similarity_80k_L6.csv b/exp2a_modified/results_all_layers/molmo/similarity_80k_L6.csv new file mode 100644 index 0000000000000000000000000000000000000000..e0c1cacd81b365dcb1a046229dd0db81fdc8553f --- /dev/null +++ b/exp2a_modified/results_all_layers/molmo/similarity_80k_L6.csv @@ -0,0 +1,7 @@ +,left,right,above,under,far,close +left,1.0000005,0.9999342,0.9846145,0.9833463,0.9410094,0.9415399 +right,0.9999342,1.0000002,0.9844082,0.9831588,0.9409639,0.9414707 +above,0.9846145,0.9844082,0.9999996,0.99965036,0.94482744,0.9451473 +under,0.9833463,0.9831588,0.99965036,1.0000004,0.94445574,0.9447468 +far,0.9410094,0.9409639,0.94482744,0.94445574,1.0000002,0.9998886 +close,0.9415399,0.9414707,0.9451473,0.9447468,0.9998886,1.0000001 diff --git a/exp2a_modified/results_all_layers/molmo/similarity_80k_L8.csv b/exp2a_modified/results_all_layers/molmo/similarity_80k_L8.csv new file mode 100644 index 0000000000000000000000000000000000000000..00f31d44e102090e1d5fdbf6829e3d175eeb4e7a --- /dev/null +++ b/exp2a_modified/results_all_layers/molmo/similarity_80k_L8.csv @@ -0,0 +1,7 @@ +,left,right,above,under,far,close +left,1.0000006,0.99991876,0.9806238,0.9789433,0.9457356,0.94627696 +right,0.99991876,1.0000007,0.9803283,0.9786759,0.94546795,0.94596493 +above,0.9806238,0.9803283,0.9999998,0.9995875,0.94557315,0.9457743 +under,0.9789433,0.9786759,0.9995875,0.9999995,0.9444971,0.9446461 +far,0.9457356,0.94546795,0.94557315,0.9444971,1.0000005,0.9998704 +close,0.94627696,0.94596493,0.9457743,0.9446461,0.9998704,1.0000001 diff --git a/exp2a_modified/results_all_layers/molmo/similarity_vanilla_L1.csv b/exp2a_modified/results_all_layers/molmo/similarity_vanilla_L1.csv new file mode 100644 index 0000000000000000000000000000000000000000..e590f2294663033daf348a7b5425df9094687180 --- /dev/null +++ b/exp2a_modified/results_all_layers/molmo/similarity_vanilla_L1.csv @@ -0,0 +1,7 @@ +,left,right,above,under,far,close +left,1.0000006,0.9999656,0.9967409,0.9967324,0.9855942,0.985875 +right,0.9999656,1.0,0.9967898,0.9967888,0.9857657,0.98602295 +above,0.9967409,0.9967898,0.99999976,0.99989766,0.9863839,0.98652494 +under,0.9967324,0.9967888,0.99989766,1.0000004,0.986194,0.9862979 +far,0.9855942,0.9857657,0.9863839,0.986194,0.9999999,0.99994165 +close,0.985875,0.98602295,0.98652494,0.9862979,0.99994165,0.99999994 diff --git a/exp2a_modified/results_all_layers/molmo/similarity_vanilla_L10.csv b/exp2a_modified/results_all_layers/molmo/similarity_vanilla_L10.csv new file mode 100644 index 0000000000000000000000000000000000000000..24484372287f9654f41b4084705f31f8afb9e062 --- /dev/null +++ b/exp2a_modified/results_all_layers/molmo/similarity_vanilla_L10.csv @@ -0,0 +1,7 @@ +,left,right,above,under,far,close +left,0.9999998,0.999853,0.98235714,0.9811125,0.9204409,0.92072546 +right,0.999853,1.0000001,0.9822074,0.980996,0.9190188,0.9192732 +above,0.98235714,0.9822074,1.0,0.99935097,0.9173129,0.9171824 +under,0.9811125,0.980996,0.99935097,0.99999976,0.9183355,0.91820735 +far,0.9204409,0.9190188,0.9173129,0.9183355,0.9999998,0.9997884 +close,0.92072546,0.9192732,0.9171824,0.91820735,0.9997884,0.99999976 diff --git a/exp2a_modified/results_all_layers/molmo/similarity_vanilla_L12.csv b/exp2a_modified/results_all_layers/molmo/similarity_vanilla_L12.csv new file mode 100644 index 0000000000000000000000000000000000000000..ec52db5c32fd6b269bd738dddc3b2280187d9618 --- /dev/null +++ b/exp2a_modified/results_all_layers/molmo/similarity_vanilla_L12.csv @@ -0,0 +1,7 @@ +,left,right,above,under,far,close +left,1.0000002,0.9996937,0.9827635,0.9817543,0.91139483,0.91157585 +right,0.9996937,1.0,0.98277396,0.9818351,0.9101802,0.91025144 +above,0.9827635,0.98277396,1.0000002,0.9989954,0.9180365,0.9176543 +under,0.9817543,0.9818351,0.9989954,0.99999976,0.91947675,0.9191305 +far,0.91139483,0.9101802,0.9180365,0.91947675,0.9999999,0.99963146 +close,0.91157585,0.91025144,0.9176543,0.9191305,0.99963146,1.0000002 diff --git a/exp2a_modified/results_all_layers/molmo/similarity_vanilla_L16.csv b/exp2a_modified/results_all_layers/molmo/similarity_vanilla_L16.csv new file mode 100644 index 0000000000000000000000000000000000000000..67eb11589d8c49ac206778d1de29cb00764ddaac --- /dev/null +++ b/exp2a_modified/results_all_layers/molmo/similarity_vanilla_L16.csv @@ -0,0 +1,7 @@ +,left,right,above,under,far,close +left,0.9999996,0.99707025,0.972778,0.97178674,0.8999234,0.89854175 +right,0.99707025,0.9999999,0.9740833,0.972814,0.8959664,0.89391136 +above,0.972778,0.9740833,0.99999994,0.9958273,0.89193356,0.8884378 +under,0.97178674,0.972814,0.9958273,1.0000002,0.8928033,0.88960046 +far,0.8999234,0.8959664,0.89193356,0.8928033,1.0000005,0.9984127 +close,0.89854175,0.89391136,0.8884378,0.88960046,0.9984127,0.9999995 diff --git a/exp2a_modified/results_all_layers/molmo/similarity_vanilla_L25.csv b/exp2a_modified/results_all_layers/molmo/similarity_vanilla_L25.csv new file mode 100644 index 0000000000000000000000000000000000000000..f57b32130423f79b231a2bba2fc97fee14f02fca --- /dev/null +++ b/exp2a_modified/results_all_layers/molmo/similarity_vanilla_L25.csv @@ -0,0 +1,7 @@ +,left,right,above,under,far,close +left,1.0000002,0.99561656,0.9626304,0.96095175,0.8273436,0.82454926 +right,0.99561656,1.0000001,0.96632063,0.96511674,0.82288647,0.8188273 +above,0.9626304,0.96632063,1.0000005,0.9926268,0.79999393,0.7933115 +under,0.96095175,0.96511674,0.9926268,0.99999946,0.8099013,0.8064356 +far,0.8273436,0.82288647,0.79999393,0.8099013,0.9999999,0.9958041 +close,0.82454926,0.8188273,0.7933115,0.8064356,0.9958041,1.0000006 diff --git a/exp2a_modified/results_all_layers/molmo/similarity_vanilla_L26.csv b/exp2a_modified/results_all_layers/molmo/similarity_vanilla_L26.csv new file mode 100644 index 0000000000000000000000000000000000000000..dd4646342ec9bd9e722278172894eb7409b08538 --- /dev/null +++ b/exp2a_modified/results_all_layers/molmo/similarity_vanilla_L26.csv @@ -0,0 +1,7 @@ +,left,right,above,under,far,close +left,1.0000005,0.9955554,0.9608875,0.9594548,0.8109677,0.8083561 +right,0.9955554,0.9999999,0.964953,0.9638739,0.8058286,0.80206555 +above,0.9608875,0.964953,1.0000001,0.992614,0.7811126,0.7749344 +under,0.9594548,0.9638739,0.992614,1.0,0.79343164,0.7902819 +far,0.8109677,0.8058286,0.7811126,0.79343164,1.0000005,0.9958171 +close,0.8083561,0.80206555,0.7749344,0.7902819,0.9958171,1.0 diff --git a/exp2a_modified/results_all_layers/molmo/similarity_vanilla_L27.csv b/exp2a_modified/results_all_layers/molmo/similarity_vanilla_L27.csv new file mode 100644 index 0000000000000000000000000000000000000000..5ae77608fcb948d5b35861901f330a991512eb0a --- /dev/null +++ b/exp2a_modified/results_all_layers/molmo/similarity_vanilla_L27.csv @@ -0,0 +1,7 @@ +,left,right,above,under,far,close +left,0.99999964,0.99569494,0.9619821,0.96054596,0.81380177,0.8111917 +right,0.99569494,0.99999964,0.96562964,0.96463555,0.80925936,0.805528 +above,0.9619821,0.96562964,0.99999964,0.99282384,0.7848503,0.77902806 +under,0.96054596,0.96463555,0.99282384,0.9999997,0.79773426,0.79467237 +far,0.81380177,0.80925936,0.7848503,0.79773426,1.0000004,0.99596685 +close,0.8111917,0.805528,0.77902806,0.79467237,0.99596685,1.0000004 diff --git a/exp2a_modified/results_all_layers/molmo/similarity_vanilla_L3.csv b/exp2a_modified/results_all_layers/molmo/similarity_vanilla_L3.csv new file mode 100644 index 0000000000000000000000000000000000000000..c1e1011959cf35a6173aa41a8af23ab3c0b24603 --- /dev/null +++ b/exp2a_modified/results_all_layers/molmo/similarity_vanilla_L3.csv @@ -0,0 +1,7 @@ +,left,right,above,under,far,close +left,1.0,0.9999284,0.9910807,0.9909821,0.959515,0.9600588 +right,0.9999284,1.0000005,0.9911723,0.9911004,0.9598949,0.960405 +above,0.9910807,0.9911723,1.0000006,0.9997134,0.9656428,0.9658985 +under,0.9909821,0.9911004,0.9997134,0.9999999,0.96554655,0.96577287 +far,0.959515,0.9598949,0.9656428,0.96554655,0.9999995,0.99986976 +close,0.9600588,0.960405,0.9658985,0.96577287,0.99986976,0.9999995 diff --git a/exp2a_modified/results_all_layers/molmo/similarity_vanilla_L4.csv b/exp2a_modified/results_all_layers/molmo/similarity_vanilla_L4.csv new file mode 100644 index 0000000000000000000000000000000000000000..0c1bcc99ddf6df18ef0e040a8d84a6a1e0a2a1bb --- /dev/null +++ b/exp2a_modified/results_all_layers/molmo/similarity_vanilla_L4.csv @@ -0,0 +1,7 @@ +,left,right,above,under,far,close +left,0.99999857,0.99992627,0.9910081,0.99069315,0.9519504,0.95249826 +right,0.99992627,1.0000004,0.9910429,0.99076366,0.9520496,0.9525697 +above,0.9910081,0.9910429,0.99999994,0.9997006,0.95720917,0.9574985 +under,0.99069315,0.99076366,0.9997006,1.0,0.9574559,0.95771855 +far,0.9519504,0.9520496,0.95720917,0.9574559,1.0000004,0.9998938 +close,0.95249826,0.9525697,0.9574985,0.95771855,0.9998938,0.99999976 diff --git a/exp2a_modified/results_all_layers/molmo/similarity_vanilla_L7.csv b/exp2a_modified/results_all_layers/molmo/similarity_vanilla_L7.csv new file mode 100644 index 0000000000000000000000000000000000000000..b48d7c38c3c2c601bf3d367d5b007b43b0174eeb --- /dev/null +++ b/exp2a_modified/results_all_layers/molmo/similarity_vanilla_L7.csv @@ -0,0 +1,7 @@ +,left,right,above,under,far,close +left,1.0000005,0.99989855,0.982339,0.9801788,0.9288674,0.9294997 +right,0.99989855,0.99999994,0.98201495,0.9798733,0.92850107,0.92909956 +above,0.982339,0.98201495,1.0000001,0.9994195,0.9344384,0.93445104 +under,0.9801788,0.9798733,0.9994195,0.9999997,0.93389845,0.93388873 +far,0.9288674,0.92850107,0.9344384,0.93389845,1.0000001,0.9998091 +close,0.9294997,0.92909956,0.93445104,0.93388873,0.9998091,1.0000006 diff --git a/exp2a_modified/results_all_layers/molmo/similarity_vanilla_L8.csv b/exp2a_modified/results_all_layers/molmo/similarity_vanilla_L8.csv new file mode 100644 index 0000000000000000000000000000000000000000..cbdde0e0a67e7ae2c783a850ec7718ff73cd11bd --- /dev/null +++ b/exp2a_modified/results_all_layers/molmo/similarity_vanilla_L8.csv @@ -0,0 +1,7 @@ +,left,right,above,under,far,close +left,1.0,0.9998911,0.98246425,0.98021257,0.9351027,0.9358711 +right,0.9998911,0.9999999,0.9821566,0.9799254,0.93458635,0.935318 +above,0.98246425,0.9821566,0.99999994,0.9994089,0.94040704,0.9405312 +under,0.98021257,0.9799254,0.9994089,1.0000002,0.93969786,0.939772 +far,0.9351027,0.93458635,0.94040704,0.93969786,1.0,0.9997947 +close,0.9358711,0.935318,0.9405312,0.939772,0.9997947,0.99999976 diff --git a/exp2a_modified/results_all_layers/qwen/similarity_2m_L10.csv b/exp2a_modified/results_all_layers/qwen/similarity_2m_L10.csv new file mode 100644 index 0000000000000000000000000000000000000000..2b5ef616a8737111ae6a5906a06273228dbbde9d --- /dev/null +++ b/exp2a_modified/results_all_layers/qwen/similarity_2m_L10.csv @@ -0,0 +1,7 @@ +,left,right,above,under,far,close +left,1.0000004,0.9999279,0.9969735,0.99672747,0.9849618,0.98463273 +right,0.9999279,0.9999998,0.99702895,0.996873,0.9847228,0.9842775 +above,0.9969735,0.99702895,1.0,0.9999144,0.9902956,0.989804 +under,0.99672747,0.996873,0.9999144,1.0,0.99020207,0.98963207 +far,0.9849618,0.9847228,0.9902956,0.99020207,0.99999994,0.99990666 +close,0.98463273,0.9842775,0.989804,0.98963207,0.99990666,1.0000002 diff --git a/exp2a_modified/results_all_layers/qwen/similarity_2m_L12.csv b/exp2a_modified/results_all_layers/qwen/similarity_2m_L12.csv new file mode 100644 index 0000000000000000000000000000000000000000..07259bf0cd34f892e603aa2a3ffbc4c077c4c255 --- /dev/null +++ b/exp2a_modified/results_all_layers/qwen/similarity_2m_L12.csv @@ -0,0 +1,7 @@ +,left,right,above,under,far,close +left,0.99999964,0.99995035,0.9973424,0.9971682,0.98827934,0.987991 +right,0.99995035,1.0000002,0.99733007,0.99721956,0.98810345,0.9877443 +above,0.9973424,0.99733007,0.9999999,0.99994093,0.9920075,0.9916463 +under,0.9971682,0.99721956,0.99994093,1.0000002,0.9918548,0.9914459 +far,0.98827934,0.98810345,0.9920075,0.9918548,1.0,0.99994236 +close,0.987991,0.9877443,0.9916463,0.9914459,0.99994236,0.9999999 diff --git a/exp2a_modified/results_all_layers/qwen/similarity_2m_L15.csv b/exp2a_modified/results_all_layers/qwen/similarity_2m_L15.csv new file mode 100644 index 0000000000000000000000000000000000000000..93ef9925de1f69c0804e4b587a0289bf4f269bc4 --- /dev/null +++ b/exp2a_modified/results_all_layers/qwen/similarity_2m_L15.csv @@ -0,0 +1,7 @@ +,left,right,above,under,far,close +left,0.99999994,0.99995005,0.9959887,0.9958621,0.98068404,0.9804044 +right,0.99995005,1.0000007,0.9959804,0.9959192,0.98055434,0.9802144 +above,0.9959887,0.9959804,1.0,0.9999193,0.98727787,0.986935 +under,0.9958621,0.9959192,0.9999193,1.0000001,0.98687696,0.9864931 +far,0.98068404,0.98055434,0.98727787,0.98687696,0.9999999,0.99993765 +close,0.9804044,0.9802144,0.986935,0.9864931,0.99993765,0.99999976 diff --git a/exp2a_modified/results_all_layers/qwen/similarity_2m_L18.csv b/exp2a_modified/results_all_layers/qwen/similarity_2m_L18.csv new file mode 100644 index 0000000000000000000000000000000000000000..beddeb68e6dd4416c24915ccf244f2e0f394bd11 --- /dev/null +++ b/exp2a_modified/results_all_layers/qwen/similarity_2m_L18.csv @@ -0,0 +1,7 @@ +,left,right,above,under,far,close +left,1.0000005,0.9999313,0.9902024,0.9896158,0.9626217,0.96290785 +right,0.9999313,1.0000004,0.9903549,0.9898485,0.9627737,0.96300423 +above,0.9902024,0.9903549,1.0000002,0.99978554,0.97417885,0.9741787 +under,0.9896158,0.9898485,0.99978554,1.0000002,0.9740943,0.97403085 +far,0.9626217,0.9627737,0.97417885,0.9740943,1.0000002,0.9998981 +close,0.96290785,0.96300423,0.9741787,0.97403085,0.9998981,0.99999976 diff --git a/exp2a_modified/results_all_layers/qwen/similarity_2m_L20.csv b/exp2a_modified/results_all_layers/qwen/similarity_2m_L20.csv new file mode 100644 index 0000000000000000000000000000000000000000..6ac2c17ae2e80ba31b7f46da5b8dae00cfd09366 --- /dev/null +++ b/exp2a_modified/results_all_layers/qwen/similarity_2m_L20.csv @@ -0,0 +1,7 @@ +,left,right,above,under,far,close +left,0.99999976,0.99984246,0.99088097,0.9905366,0.9632629,0.96367776 +right,0.99984246,0.9999995,0.99107337,0.9908248,0.9635223,0.9638858 +above,0.99088097,0.99107337,0.99999976,0.99970245,0.9744312,0.9745373 +under,0.9905366,0.9908248,0.99970245,0.9999994,0.973981,0.97405624 +far,0.9632629,0.9635223,0.9744312,0.973981,1.0,0.9998683 +close,0.96367776,0.9638858,0.9745373,0.97405624,0.9998683,0.9999999 diff --git a/exp2a_modified/results_all_layers/qwen/similarity_2m_L29.csv b/exp2a_modified/results_all_layers/qwen/similarity_2m_L29.csv new file mode 100644 index 0000000000000000000000000000000000000000..51c71c3ecd198af72fe666d509454fdb76ff958d --- /dev/null +++ b/exp2a_modified/results_all_layers/qwen/similarity_2m_L29.csv @@ -0,0 +1,7 @@ +,left,right,above,under,far,close +left,0.99999976,0.9968462,0.96972966,0.9656495,0.9295452,0.9282496 +right,0.9968462,1.0000004,0.9678302,0.96753937,0.9298134,0.92841697 +above,0.96972966,0.9678302,0.99999964,0.99251354,0.93922085,0.93715733 +under,0.9656495,0.96753937,0.99251354,0.9999999,0.9394452,0.93831193 +far,0.9295452,0.9298134,0.93922085,0.9394452,0.9999995,0.9991802 +close,0.9282496,0.92841697,0.93715733,0.93831193,0.9991802,0.9999995 diff --git a/exp2a_modified/results_all_layers/qwen/similarity_2m_L32.csv b/exp2a_modified/results_all_layers/qwen/similarity_2m_L32.csv new file mode 100644 index 0000000000000000000000000000000000000000..b02c942899112cdb6636ca45a9d4047ec18f3085 --- /dev/null +++ b/exp2a_modified/results_all_layers/qwen/similarity_2m_L32.csv @@ -0,0 +1,7 @@ +,left,right,above,under,far,close +left,1.0,0.9981306,0.9843659,0.9826802,0.94846433,0.94720006 +right,0.9981306,1.0000001,0.9825328,0.98395187,0.947194,0.9457304 +above,0.9843659,0.9825328,0.99999976,0.9934573,0.9549477,0.95287406 +under,0.9826802,0.98395187,0.9934573,0.99999905,0.9553579,0.954003 +far,0.94846433,0.947194,0.9549477,0.9553579,0.99999994,0.9989809 +close,0.94720006,0.9457304,0.95287406,0.954003,0.9989809,1.0000006 diff --git a/exp2a_modified/results_all_layers/qwen/similarity_2m_L34.csv b/exp2a_modified/results_all_layers/qwen/similarity_2m_L34.csv new file mode 100644 index 0000000000000000000000000000000000000000..7f95d3732211316441c593c67624125db7d41c5b --- /dev/null +++ b/exp2a_modified/results_all_layers/qwen/similarity_2m_L34.csv @@ -0,0 +1,7 @@ +,left,right,above,under,far,close +left,1.0,0.99862164,0.9881121,0.9859156,0.9610244,0.96063864 +right,0.99862164,1.0000007,0.9861483,0.98696625,0.95899296,0.95854414 +above,0.9881121,0.9861483,0.99999976,0.99387085,0.9664202,0.96441233 +under,0.9859156,0.98696625,0.99387085,1.0,0.963601,0.96305376 +far,0.9610244,0.95899296,0.9664202,0.963601,0.9999999,0.99844897 +close,0.96063864,0.95854414,0.96441233,0.96305376,0.99844897,0.99999976 diff --git a/exp2a_modified/results_all_layers/qwen/similarity_2m_L35.csv b/exp2a_modified/results_all_layers/qwen/similarity_2m_L35.csv new file mode 100644 index 0000000000000000000000000000000000000000..a0c6ee0146c874cb1545554574ec3170e138c568 --- /dev/null +++ b/exp2a_modified/results_all_layers/qwen/similarity_2m_L35.csv @@ -0,0 +1,7 @@ +,left,right,above,under,far,close +left,1.0000002,0.9965475,0.9649773,0.96030045,0.8989403,0.89823353 +right,0.9965475,1.0,0.9598953,0.9629967,0.8915997,0.89078486 +above,0.9649773,0.9598953,1.0000002,0.98364305,0.9208069,0.9149355 +under,0.96030045,0.9629967,0.98364305,1.0,0.9093851,0.9072825 +far,0.8989403,0.8915997,0.9208069,0.9093851,0.99999994,0.996667 +close,0.89823353,0.89078486,0.9149355,0.9072825,0.996667,0.99999976 diff --git a/exp2a_modified/results_all_layers/qwen/similarity_2m_L4.csv b/exp2a_modified/results_all_layers/qwen/similarity_2m_L4.csv new file mode 100644 index 0000000000000000000000000000000000000000..4dac6a7d8307f59b830a967207d33d5ce21529f7 --- /dev/null +++ b/exp2a_modified/results_all_layers/qwen/similarity_2m_L4.csv @@ -0,0 +1,7 @@ +,left,right,above,under,far,close +left,0.99999976,0.9999678,0.9964819,0.9962919,0.9905517,0.9906428 +right,0.9999678,1.0,0.9964253,0.9962834,0.9904249,0.9904758 +above,0.9964819,0.9964253,1.0000004,0.9999342,0.9925095,0.9924183 +under,0.9962919,0.9962834,0.9999342,0.99999964,0.9925219,0.9923811 +far,0.9905517,0.9904249,0.9925095,0.9925219,1.0,0.99996006 +close,0.9906428,0.9904758,0.9924183,0.9923811,0.99996006,0.9999998 diff --git a/exp2a_modified/results_all_layers/qwen/similarity_400k_L0.csv b/exp2a_modified/results_all_layers/qwen/similarity_400k_L0.csv new file mode 100644 index 0000000000000000000000000000000000000000..5173079c0938b7d508941eac605c20b25a8d46bc --- /dev/null +++ b/exp2a_modified/results_all_layers/qwen/similarity_400k_L0.csv @@ -0,0 +1,7 @@ +,left,right,above,under,far,close +left,0.9999999,0.9999825,0.9966258,0.9966197,0.99496907,0.99500704 +right,0.9999825,1.0000004,0.9965285,0.99654496,0.994918,0.99493414 +above,0.9966258,0.9965285,1.0000002,0.9999587,0.99436843,0.9944271 +under,0.9966197,0.99654496,0.9999587,1.0000002,0.9944416,0.9944626 +far,0.99496907,0.994918,0.99436843,0.9944416,0.9999999,0.9999715 +close,0.99500704,0.99493414,0.9944271,0.9944626,0.9999715,1.0000001 diff --git a/exp2a_modified/results_all_layers/qwen/similarity_400k_L10.csv b/exp2a_modified/results_all_layers/qwen/similarity_400k_L10.csv new file mode 100644 index 0000000000000000000000000000000000000000..19d4dabe5d80058b509eb6a83e45cad90d3d6c23 --- /dev/null +++ b/exp2a_modified/results_all_layers/qwen/similarity_400k_L10.csv @@ -0,0 +1,7 @@ +,left,right,above,under,far,close +left,1.0000002,0.9999062,0.99621683,0.99593097,0.9756885,0.9750582 +right,0.9999062,0.9999995,0.9962953,0.9961039,0.9755514,0.9747947 +above,0.99621683,0.9962953,0.9999999,0.99988145,0.9836272,0.98296696 +under,0.99593097,0.9961039,0.99988145,1.0000004,0.9837301,0.982993 +far,0.9756885,0.9755514,0.9836272,0.9837301,1.0000002,0.99988467 +close,0.9750582,0.9747947,0.98296696,0.982993,0.99988467,0.9999999 diff --git a/exp2a_modified/results_all_layers/qwen/similarity_400k_L12.csv b/exp2a_modified/results_all_layers/qwen/similarity_400k_L12.csv new file mode 100644 index 0000000000000000000000000000000000000000..d60b7fbed35997c39c16918706c305a84905cd48 --- /dev/null +++ b/exp2a_modified/results_all_layers/qwen/similarity_400k_L12.csv @@ -0,0 +1,7 @@ +,left,right,above,under,far,close +left,1.0000001,0.999938,0.9967728,0.9965588,0.98182,0.98131776 +right,0.999938,0.99999976,0.99681985,0.99666685,0.9817499,0.9811795 +above,0.9967728,0.99681985,1.0000001,0.9999003,0.98781735,0.98728365 +under,0.9965588,0.99666685,0.9999003,0.99999994,0.9876146,0.9870275 +far,0.98182,0.9817499,0.98781735,0.9876146,1.0000004,0.9999255 +close,0.98131776,0.9811795,0.98728365,0.9870275,0.9999255,1.0000002 diff --git a/exp2a_modified/results_all_layers/qwen/similarity_400k_L14.csv b/exp2a_modified/results_all_layers/qwen/similarity_400k_L14.csv new file mode 100644 index 0000000000000000000000000000000000000000..85c45018e9ad4fa62b131189727622bc06401d26 --- /dev/null +++ b/exp2a_modified/results_all_layers/qwen/similarity_400k_L14.csv @@ -0,0 +1,7 @@ +,left,right,above,under,far,close +left,0.9999994,0.99993646,0.9955939,0.99546295,0.97487485,0.9744284 +right,0.99993646,0.9999994,0.9957069,0.9956356,0.97504747,0.9745439 +above,0.9955939,0.9957069,0.9999995,0.99986076,0.9844377,0.98397595 +under,0.99546295,0.9956356,0.99986076,1.0,0.9837175,0.98320484 +far,0.97487485,0.97504747,0.9844377,0.9837175,0.9999999,0.9999202 +close,0.9744284,0.9745439,0.98397595,0.98320484,0.9999202,1.0 diff --git a/exp2a_modified/results_all_layers/qwen/similarity_400k_L2.csv b/exp2a_modified/results_all_layers/qwen/similarity_400k_L2.csv new file mode 100644 index 0000000000000000000000000000000000000000..9253910776bc9adeb3d9d12ef16eae6557a03918 --- /dev/null +++ b/exp2a_modified/results_all_layers/qwen/similarity_400k_L2.csv @@ -0,0 +1,7 @@ +,left,right,above,under,far,close +left,0.9999998,0.99997324,0.99724066,0.9972278,0.9951744,0.9951922 +right,0.99997324,0.9999999,0.9971627,0.997188,0.9951372,0.9951252 +above,0.99724066,0.9971627,0.99999994,0.9999502,0.99492645,0.994876 +under,0.9972278,0.997188,0.9999502,1.0000002,0.995003,0.9949185 +far,0.9951744,0.9951372,0.99492645,0.995003,0.99999994,0.9999702 +close,0.9951922,0.9951252,0.994876,0.9949185,0.9999702,1.0 diff --git a/exp2a_modified/results_all_layers/qwen/similarity_400k_L20.csv b/exp2a_modified/results_all_layers/qwen/similarity_400k_L20.csv new file mode 100644 index 0000000000000000000000000000000000000000..a74577227cfcae20c00145cb28f931b8e796ca5c --- /dev/null +++ b/exp2a_modified/results_all_layers/qwen/similarity_400k_L20.csv @@ -0,0 +1,7 @@ +,left,right,above,under,far,close +left,0.9999998,0.9998139,0.99177617,0.99181545,0.9602272,0.96027493 +right,0.9998139,0.99999946,0.99191034,0.99205554,0.960712,0.96073496 +above,0.99177617,0.99191034,1.0000001,0.9996311,0.9722074,0.97216636 +under,0.99181545,0.99205554,0.9996311,0.99999976,0.97181207,0.9717153 +far,0.9602272,0.960712,0.9722074,0.97181207,1.0,0.9998067 +close,0.96027493,0.96073496,0.97216636,0.9717153,0.9998067,1.0000004 diff --git a/exp2a_modified/results_all_layers/qwen/similarity_400k_L22.csv b/exp2a_modified/results_all_layers/qwen/similarity_400k_L22.csv new file mode 100644 index 0000000000000000000000000000000000000000..545f8f970a42999a5d15f6fe9ab7e3a86042bf88 --- /dev/null +++ b/exp2a_modified/results_all_layers/qwen/similarity_400k_L22.csv @@ -0,0 +1,7 @@ +,left,right,above,under,far,close +left,1.0000002,0.9997704,0.9914931,0.9915282,0.9591845,0.958396 +right,0.9997704,1.0000005,0.9918085,0.99199045,0.9597441,0.95890886 +above,0.9914931,0.9918085,1.0000002,0.9995384,0.9699773,0.969287 +under,0.9915282,0.99199045,0.9995384,1.0,0.96905524,0.9682413 +far,0.9591845,0.9597441,0.9699773,0.96905524,0.99999976,0.99974537 +close,0.958396,0.95890886,0.969287,0.9682413,0.99974537,0.99999964 diff --git a/exp2a_modified/results_all_layers/qwen/similarity_400k_L23.csv b/exp2a_modified/results_all_layers/qwen/similarity_400k_L23.csv new file mode 100644 index 0000000000000000000000000000000000000000..27cb5211808bb7cc304921da08bee19c99f81c64 --- /dev/null +++ b/exp2a_modified/results_all_layers/qwen/similarity_400k_L23.csv @@ -0,0 +1,7 @@ +,left,right,above,under,far,close +left,1.0,0.9996206,0.9909068,0.99122643,0.9605173,0.95973885 +right,0.9996206,0.9999999,0.9912755,0.99183464,0.9613559,0.9605228 +above,0.9909068,0.9912755,1.0,0.9993224,0.9701215,0.96924245 +under,0.99122643,0.99183464,0.9993224,1.0,0.9695488,0.9686081 +far,0.9605173,0.9613559,0.9701215,0.9695488,0.99999976,0.99969226 +close,0.95973885,0.9605228,0.96924245,0.9686081,0.99969226,0.9999999 diff --git a/exp2a_modified/results_all_layers/qwen/similarity_800k_L11.csv b/exp2a_modified/results_all_layers/qwen/similarity_800k_L11.csv new file mode 100644 index 0000000000000000000000000000000000000000..f0f328690b77e854412eb7ac30ad0f7fe72fd0ae --- /dev/null +++ b/exp2a_modified/results_all_layers/qwen/similarity_800k_L11.csv @@ -0,0 +1,7 @@ +,left,right,above,under,far,close +left,0.99999976,0.99992317,0.9961459,0.9959073,0.9802227,0.97976327 +right,0.99992317,0.99999976,0.99627405,0.9961121,0.98003256,0.97947514 +above,0.9961459,0.99627405,0.9999998,0.9999038,0.9867655,0.9861976 +under,0.9959073,0.9961121,0.9999038,0.99999976,0.9865621,0.98593044 +far,0.9802227,0.98003256,0.9867655,0.9865621,1.0,0.99991107 +close,0.97976327,0.97947514,0.9861976,0.98593044,0.99991107,0.99999976 diff --git a/exp2a_modified/results_all_layers/qwen/similarity_800k_L16.csv b/exp2a_modified/results_all_layers/qwen/similarity_800k_L16.csv new file mode 100644 index 0000000000000000000000000000000000000000..041db65e66a69e15a90f749957d43653aeb4179a --- /dev/null +++ b/exp2a_modified/results_all_layers/qwen/similarity_800k_L16.csv @@ -0,0 +1,7 @@ +,left,right,above,under,far,close +left,0.99999994,0.99994206,0.99464464,0.9944856,0.9717562,0.9715159 +right,0.99994206,0.99999946,0.99474555,0.99464774,0.97197044,0.97167844 +above,0.99464464,0.99474555,0.99999994,0.99985784,0.98180145,0.98150694 +under,0.9944856,0.99464774,0.99985784,0.9999999,0.9814975,0.9811598 +far,0.9717562,0.97197044,0.98180145,0.9814975,0.9999999,0.9999232 +close,0.9715159,0.97167844,0.98150694,0.9811598,0.9999232,1.0000004 diff --git a/exp2a_modified/results_all_layers/qwen/similarity_800k_L18.csv b/exp2a_modified/results_all_layers/qwen/similarity_800k_L18.csv new file mode 100644 index 0000000000000000000000000000000000000000..b9d2a8b3efa539a48ebc52f49998d6841e88669b --- /dev/null +++ b/exp2a_modified/results_all_layers/qwen/similarity_800k_L18.csv @@ -0,0 +1,7 @@ +,left,right,above,under,far,close +left,0.9999999,0.9999192,0.9910408,0.99066764,0.96094275,0.9613255 +right,0.9999192,1.0,0.9912627,0.9909726,0.9612793,0.9616088 +above,0.9910408,0.9912627,0.9999998,0.9997663,0.9739356,0.97408307 +under,0.99066764,0.9909726,0.9997663,1.0000001,0.9737946,0.9738607 +far,0.96094275,0.9612793,0.9739356,0.9737946,1.0,0.99987805 +close,0.9613255,0.9616088,0.97408307,0.9738607,0.99987805,1.0 diff --git a/exp2a_modified/results_all_layers/qwen/similarity_800k_L19.csv b/exp2a_modified/results_all_layers/qwen/similarity_800k_L19.csv new file mode 100644 index 0000000000000000000000000000000000000000..c428c56446f748a9ccf350784d84d1452fc9b334 --- /dev/null +++ b/exp2a_modified/results_all_layers/qwen/similarity_800k_L19.csv @@ -0,0 +1,7 @@ +,left,right,above,under,far,close +left,1.0000001,0.99989754,0.99091136,0.9905985,0.9611153,0.9613677 +right,0.99989754,0.99999976,0.99113727,0.990911,0.961465,0.9616713 +above,0.99091136,0.99113727,0.99999994,0.99972,0.9733992,0.9734692 +under,0.9905985,0.990911,0.99972,1.0,0.97307193,0.9730456 +far,0.9611153,0.961465,0.9733992,0.97307193,0.99999976,0.9998554 +close,0.9613677,0.9616713,0.9734692,0.9730456,0.9998554,0.9999993 diff --git a/exp2a_modified/results_all_layers/qwen/similarity_800k_L28.csv b/exp2a_modified/results_all_layers/qwen/similarity_800k_L28.csv new file mode 100644 index 0000000000000000000000000000000000000000..2ef4b49318d5343667bef80d85a6a5e1555627e0 --- /dev/null +++ b/exp2a_modified/results_all_layers/qwen/similarity_800k_L28.csv @@ -0,0 +1,7 @@ +,left,right,above,under,far,close +left,0.99999994,0.9983307,0.9812103,0.97986466,0.95222527,0.95106506 +right,0.9983307,0.9999998,0.98115665,0.98150814,0.9523004,0.95115864 +above,0.9812103,0.98115665,1.0000002,0.9970282,0.961478,0.96023726 +under,0.97986466,0.98150814,0.9970282,1.0000002,0.95972025,0.9589932 +far,0.95222527,0.9523004,0.961478,0.95972025,1.0,0.9995632 +close,0.95106506,0.95115864,0.96023726,0.9589932,0.9995632,1.0 diff --git a/exp2a_modified/results_all_layers/qwen/similarity_800k_L3.csv b/exp2a_modified/results_all_layers/qwen/similarity_800k_L3.csv new file mode 100644 index 0000000000000000000000000000000000000000..845c32950b90c1fc8a1cdee0d3edb287eae6f457 --- /dev/null +++ b/exp2a_modified/results_all_layers/qwen/similarity_800k_L3.csv @@ -0,0 +1,7 @@ +,left,right,above,under,far,close +left,1.0000007,0.99997044,0.9966085,0.9964365,0.9916053,0.99161464 +right,0.99997044,1.0,0.99657834,0.99644804,0.9915597,0.9915339 +above,0.9966085,0.99657834,0.99999976,0.99993896,0.9929594,0.99281967 +under,0.9964365,0.99644804,0.99993896,0.99999964,0.9930259,0.99284494 +far,0.9916053,0.9915597,0.9929594,0.9930259,0.99999976,0.9999632 +close,0.99161464,0.9915339,0.99281967,0.99284494,0.9999632,1.0 diff --git a/exp2a_modified/results_all_layers/qwen/similarity_800k_L34.csv b/exp2a_modified/results_all_layers/qwen/similarity_800k_L34.csv new file mode 100644 index 0000000000000000000000000000000000000000..aa7b17ce5a7e5fd13cd87d0d6a6a17ab639811df --- /dev/null +++ b/exp2a_modified/results_all_layers/qwen/similarity_800k_L34.csv @@ -0,0 +1,7 @@ +,left,right,above,under,far,close +left,0.99999994,0.99917954,0.98940235,0.9887433,0.9714507,0.9707074 +right,0.99917954,1.0000002,0.98841774,0.98896194,0.96930856,0.9686197 +above,0.98940235,0.98841774,1.0000004,0.9972,0.9721248,0.9698465 +under,0.9887433,0.98896194,0.9972,0.9999998,0.97080654,0.97015685 +far,0.9714507,0.96930856,0.9721248,0.97080654,0.9999997,0.9986616 +close,0.9707074,0.9686197,0.9698465,0.97015685,0.9986616,0.9999998 diff --git a/exp2a_modified/results_all_layers/qwen/similarity_800k_L4.csv b/exp2a_modified/results_all_layers/qwen/similarity_800k_L4.csv new file mode 100644 index 0000000000000000000000000000000000000000..83e4c2636ead79dbaa4ded0a00f83e019a1135dd --- /dev/null +++ b/exp2a_modified/results_all_layers/qwen/similarity_800k_L4.csv @@ -0,0 +1,7 @@ +,left,right,above,under,far,close +left,0.9999998,0.9999653,0.99660754,0.9963964,0.99089134,0.9909977 +right,0.9999653,1.0,0.99658066,0.99641883,0.990836,0.9909009 +above,0.99660754,0.99658066,1.0000002,0.9999325,0.99261117,0.9925268 +under,0.9963964,0.99641883,0.9999325,0.9999999,0.99267435,0.992537 +far,0.99089134,0.990836,0.99261117,0.99267435,1.0,0.9999572 +close,0.9909977,0.9909009,0.9925268,0.992537,0.9999572,1.0 diff --git a/exp2a_modified/results_all_layers/qwen/similarity_800k_L8.csv b/exp2a_modified/results_all_layers/qwen/similarity_800k_L8.csv new file mode 100644 index 0000000000000000000000000000000000000000..923705d5df8fcebb1862b7be8405665e530ea3aa --- /dev/null +++ b/exp2a_modified/results_all_layers/qwen/similarity_800k_L8.csv @@ -0,0 +1,7 @@ +,left,right,above,under,far,close +left,1.0000005,0.99992883,0.99572694,0.9953599,0.9809648,0.9805831 +right,0.99992883,1.0,0.99570477,0.9954245,0.9807246,0.9802425 +above,0.99572694,0.99570477,0.99999946,0.9998778,0.9873349,0.98695254 +under,0.9953599,0.9954245,0.9998778,1.0000002,0.98731625,0.98686075 +far,0.9809648,0.9807246,0.9873349,0.98731625,1.0000001,0.9998944 +close,0.9805831,0.9802425,0.98695254,0.98686075,0.9998944,1.0 diff --git a/exp2a_modified/results_all_layers/qwen/similarity_80k_L1.csv b/exp2a_modified/results_all_layers/qwen/similarity_80k_L1.csv new file mode 100644 index 0000000000000000000000000000000000000000..d1170ba2a24225257f456b3164bf55b884ddb709 --- /dev/null +++ b/exp2a_modified/results_all_layers/qwen/similarity_80k_L1.csv @@ -0,0 +1,7 @@ +,left,right,above,under,far,close +left,1.0000005,0.9999857,0.997537,0.99750614,0.99602103,0.9960292 +right,0.9999857,1.0,0.9974724,0.9974597,0.9959948,0.9959873 +above,0.997537,0.9974724,0.9999995,0.99996614,0.995733,0.9957192 +under,0.99750614,0.9974597,0.99996614,1.0000001,0.99577284,0.99573433 +far,0.99602103,0.9959948,0.995733,0.99577284,1.0,0.99997926 +close,0.9960292,0.9959873,0.9957192,0.99573433,0.99997926,1.0000002 diff --git a/exp2a_modified/results_all_layers/qwen/similarity_80k_L18.csv b/exp2a_modified/results_all_layers/qwen/similarity_80k_L18.csv new file mode 100644 index 0000000000000000000000000000000000000000..742900a6064ff17e2d3c7426e59f347ba9b8dfef --- /dev/null +++ b/exp2a_modified/results_all_layers/qwen/similarity_80k_L18.csv @@ -0,0 +1,7 @@ +,left,right,above,under,far,close +left,1.0000005,0.9998728,0.9922519,0.9919103,0.95968443,0.9600568 +right,0.9998728,0.9999992,0.992624,0.9923522,0.9609567,0.96128404 +above,0.9922519,0.992624,1.0000002,0.99969876,0.9724196,0.97261846 +under,0.9919103,0.9923522,0.99969876,1.0000001,0.9714567,0.971553 +far,0.95968443,0.9609567,0.9724196,0.9714567,0.99999994,0.99983764 +close,0.9600568,0.96128404,0.97261846,0.971553,0.99983764,0.9999998 diff --git a/exp2a_modified/results_all_layers/qwen/similarity_80k_L19.csv b/exp2a_modified/results_all_layers/qwen/similarity_80k_L19.csv new file mode 100644 index 0000000000000000000000000000000000000000..24bcc4f1defbddef7cc584389278923e515379ca --- /dev/null +++ b/exp2a_modified/results_all_layers/qwen/similarity_80k_L19.csv @@ -0,0 +1,7 @@ +,left,right,above,under,far,close +left,1.0000002,0.99985087,0.9916113,0.9912901,0.9564438,0.9569487 +right,0.99985087,0.99999994,0.99198925,0.9917387,0.9576603,0.9581179 +above,0.9916113,0.99198925,1.0000002,0.99965644,0.9699724,0.97023576 +under,0.9912901,0.9917387,0.99965644,1.0000001,0.96875155,0.96889967 +far,0.9564438,0.9576603,0.9699724,0.96875155,1.0000001,0.9998205 +close,0.9569487,0.9581179,0.97023576,0.96889967,0.9998205,0.9999997 diff --git a/exp2a_modified/results_all_layers/qwen/similarity_80k_L2.csv b/exp2a_modified/results_all_layers/qwen/similarity_80k_L2.csv new file mode 100644 index 0000000000000000000000000000000000000000..e534c174e86f3724d5a116641231e688c6451d89 --- /dev/null +++ b/exp2a_modified/results_all_layers/qwen/similarity_80k_L2.csv @@ -0,0 +1,7 @@ +,left,right,above,under,far,close +left,1.0000001,0.99996996,0.9972026,0.9971765,0.99498284,0.99501574 +right,0.99996996,0.99999994,0.9971231,0.99713933,0.9949637,0.99496245 +above,0.9972026,0.9971231,1.0,0.9999435,0.9947851,0.9947563 +under,0.9971765,0.99713933,0.9999435,1.0000001,0.9948796,0.99481106 +far,0.99498284,0.9949637,0.9947851,0.9948796,0.9999998,0.9999674 +close,0.99501574,0.99496245,0.9947563,0.99481106,0.9999674,1.0000002 diff --git a/exp2a_modified/results_all_layers/qwen/similarity_80k_L20.csv b/exp2a_modified/results_all_layers/qwen/similarity_80k_L20.csv new file mode 100644 index 0000000000000000000000000000000000000000..4d0e62417d73f49c21a173827fab4ac938fdf89b --- /dev/null +++ b/exp2a_modified/results_all_layers/qwen/similarity_80k_L20.csv @@ -0,0 +1,7 @@ +,left,right,above,under,far,close +left,1.0000004,0.9997544,0.9924994,0.99214756,0.9599799,0.96029186 +right,0.9997544,1.0000002,0.9926766,0.9924371,0.9608808,0.9611578 +above,0.9924994,0.9926766,0.9999998,0.9995853,0.9710134,0.970981 +under,0.99214756,0.9924371,0.9995853,0.9999997,0.96975076,0.96970254 +far,0.9599799,0.9608808,0.9710134,0.96975076,1.0000002,0.9997475 +close,0.96029186,0.9611578,0.970981,0.96970254,0.9997475,0.9999994 diff --git a/exp2a_modified/results_all_layers/qwen/similarity_80k_L21.csv b/exp2a_modified/results_all_layers/qwen/similarity_80k_L21.csv new file mode 100644 index 0000000000000000000000000000000000000000..a2cdf165cb2226d1f5528153c1aa768ea492e4ae --- /dev/null +++ b/exp2a_modified/results_all_layers/qwen/similarity_80k_L21.csv @@ -0,0 +1,7 @@ +,left,right,above,under,far,close +left,0.99999994,0.99976397,0.9925872,0.99202883,0.96131456,0.9612564 +right,0.99976397,0.9999999,0.99277973,0.99236333,0.96228194,0.96216977 +above,0.9925872,0.99277973,1.0,0.9995711,0.97150266,0.9711201 +under,0.99202883,0.99236333,0.9995711,0.9999999,0.97049063,0.9700897 +far,0.96131456,0.96228194,0.97150266,0.97049063,0.9999999,0.99971235 +close,0.9612564,0.96216977,0.9711201,0.9700897,0.99971235,0.99999964 diff --git a/exp2a_modified/results_all_layers/qwen/similarity_80k_L24.csv b/exp2a_modified/results_all_layers/qwen/similarity_80k_L24.csv new file mode 100644 index 0000000000000000000000000000000000000000..6522b79dbd1f72431fbb626b1d9aa3e05666b690 --- /dev/null +++ b/exp2a_modified/results_all_layers/qwen/similarity_80k_L24.csv @@ -0,0 +1,7 @@ +,left,right,above,under,far,close +left,0.99999946,0.9993346,0.9908675,0.98955053,0.9614693,0.961163 +right,0.9993346,0.99999994,0.9913343,0.99062204,0.96290696,0.9624785 +above,0.9908675,0.9913343,0.99999994,0.999084,0.96746826,0.9668733 +under,0.98955053,0.99062204,0.999084,1.0000005,0.9667615,0.9660672 +far,0.9614693,0.96290696,0.96746826,0.9667615,1.0,0.99967754 +close,0.961163,0.9624785,0.9668733,0.9660672,0.99967754,1.0000004 diff --git a/exp2a_modified/results_all_layers/qwen/similarity_80k_L26.csv b/exp2a_modified/results_all_layers/qwen/similarity_80k_L26.csv new file mode 100644 index 0000000000000000000000000000000000000000..2f6a4d0feb9ad5d0d60dec4b764c994e81dd9b02 --- /dev/null +++ b/exp2a_modified/results_all_layers/qwen/similarity_80k_L26.csv @@ -0,0 +1,7 @@ +,left,right,above,under,far,close +left,1.0,0.9974345,0.9883921,0.9867337,0.95545715,0.95513105 +right,0.9974345,1.0000005,0.9898561,0.98948836,0.9545716,0.9541896 +above,0.9883921,0.9898561,0.99999976,0.99844027,0.95897925,0.957747 +under,0.9867337,0.98948836,0.99844027,0.9999998,0.9568786,0.9559021 +far,0.95545715,0.9545716,0.95897925,0.9568786,1.0000002,0.9994968 +close,0.95513105,0.9541896,0.957747,0.9559021,0.9994968,0.99999976 diff --git a/exp2a_modified/results_all_layers/qwen/similarity_80k_L33.csv b/exp2a_modified/results_all_layers/qwen/similarity_80k_L33.csv new file mode 100644 index 0000000000000000000000000000000000000000..2ade223eb5689bd5f6baa4b8ee8720198a5dd2ba --- /dev/null +++ b/exp2a_modified/results_all_layers/qwen/similarity_80k_L33.csv @@ -0,0 +1,7 @@ +,left,right,above,under,far,close +left,1.0000002,0.9990598,0.98291564,0.98156786,0.9457929,0.9446182 +right,0.9990598,1.0,0.9829129,0.9820812,0.9437456,0.94259566 +above,0.98291564,0.9829129,1.0000002,0.9988637,0.93830496,0.9362183 +under,0.98156786,0.9820812,0.9988637,1.0000006,0.938463,0.9373274 +far,0.9457929,0.9437456,0.93830496,0.938463,0.9999999,0.99878216 +close,0.9446182,0.94259566,0.9362183,0.9373274,0.99878216,0.9999995 diff --git a/exp2a_modified/results_all_layers/qwen/similarity_80k_L34.csv b/exp2a_modified/results_all_layers/qwen/similarity_80k_L34.csv new file mode 100644 index 0000000000000000000000000000000000000000..a870906edc8999eff8c7aed4fd0bef4759f48894 --- /dev/null +++ b/exp2a_modified/results_all_layers/qwen/similarity_80k_L34.csv @@ -0,0 +1,7 @@ +,left,right,above,under,far,close +left,0.99999946,0.9992671,0.98676854,0.98558205,0.9560134,0.95518816 +right,0.9992671,1.0000002,0.9868725,0.986134,0.9545964,0.95380795 +above,0.98676854,0.9868725,1.0,0.99891776,0.9505547,0.9488404 +under,0.98558205,0.986134,0.99891776,1.0000002,0.95090795,0.95008534 +far,0.9560134,0.9545964,0.9505547,0.95090795,0.99999994,0.9989349 +close,0.95518816,0.95380795,0.9488404,0.95008534,0.9989349,0.9999999 diff --git a/exp2a_modified/results_all_layers/qwen/similarity_80k_L7.csv b/exp2a_modified/results_all_layers/qwen/similarity_80k_L7.csv new file mode 100644 index 0000000000000000000000000000000000000000..3b8073b0164f2a78a2435456a97dab5bd1a0008e --- /dev/null +++ b/exp2a_modified/results_all_layers/qwen/similarity_80k_L7.csv @@ -0,0 +1,7 @@ +,left,right,above,under,far,close +left,0.99999994,0.99993646,0.99620664,0.99586976,0.9840237,0.9839864 +right,0.99993646,1.0,0.9962809,0.9960503,0.9841163,0.9839929 +above,0.99620664,0.9962809,1.0,0.9998498,0.9885977,0.98840046 +under,0.99586976,0.9960503,0.9998498,1.0000001,0.98879474,0.98850304 +far,0.9840237,0.9841163,0.9885977,0.98879474,1.0000002,0.99992657 +close,0.9839864,0.9839929,0.98840046,0.98850304,0.99992657,1.0 diff --git a/exp2a_modified/results_all_layers/qwen/similarity_vanilla_L0.csv b/exp2a_modified/results_all_layers/qwen/similarity_vanilla_L0.csv new file mode 100644 index 0000000000000000000000000000000000000000..935384431de205626d76915c8f60621a3e74831a --- /dev/null +++ b/exp2a_modified/results_all_layers/qwen/similarity_vanilla_L0.csv @@ -0,0 +1,7 @@ +,left,right,above,under,far,close +left,0.9999996,0.9999809,0.9964006,0.99639076,0.9947318,0.994771 +right,0.9999809,1.0000001,0.99629366,0.9963068,0.9946768,0.9946947 +above,0.9964006,0.99629366,0.9999999,0.9999533,0.9943122,0.99437153 +under,0.99639076,0.9963068,0.9999533,1.0,0.9944073,0.99442804 +far,0.9947318,0.9946768,0.9943122,0.9944073,1.0000002,0.9999715 +close,0.994771,0.9946947,0.99437153,0.99442804,0.9999715,1.0 diff --git a/exp2a_modified/results_all_layers/qwen/similarity_vanilla_L13.csv b/exp2a_modified/results_all_layers/qwen/similarity_vanilla_L13.csv new file mode 100644 index 0000000000000000000000000000000000000000..2bba8d72da1f64323565bb2c98243bec376fb46c --- /dev/null +++ b/exp2a_modified/results_all_layers/qwen/similarity_vanilla_L13.csv @@ -0,0 +1,7 @@ +,left,right,above,under,far,close +left,1.0,0.9998866,0.99638027,0.996217,0.9767302,0.9763456 +right,0.9998866,1.0000002,0.996529,0.9963802,0.97718465,0.9767773 +above,0.99638027,0.996529,1.0000004,0.9997895,0.9843622,0.98402786 +under,0.996217,0.9963802,0.9997895,0.99999976,0.9834894,0.9830793 +far,0.9767302,0.97718465,0.9843622,0.9834894,1.0,0.9998983 +close,0.9763456,0.9767773,0.98402786,0.9830793,0.9998983,1.0 diff --git a/exp2a_modified/results_all_layers/qwen/similarity_vanilla_L15.csv b/exp2a_modified/results_all_layers/qwen/similarity_vanilla_L15.csv new file mode 100644 index 0000000000000000000000000000000000000000..47c6fa9ab2a2519575891c9f9bb27a6e61245823 --- /dev/null +++ b/exp2a_modified/results_all_layers/qwen/similarity_vanilla_L15.csv @@ -0,0 +1,7 @@ +,left,right,above,under,far,close +left,0.9999999,0.9998824,0.99530697,0.9951682,0.97280705,0.97265506 +right,0.9998824,0.9999997,0.9956616,0.99554044,0.97390836,0.9737184 +above,0.99530697,0.9956616,0.9999999,0.99978304,0.98303926,0.9829526 +under,0.9951682,0.99554044,0.99978304,1.0,0.98212075,0.9819604 +far,0.97280705,0.97390836,0.98303926,0.98212075,0.9999999,0.9999 +close,0.97265506,0.9737184,0.9829526,0.9819604,0.9999,1.0 diff --git a/exp2a_modified/results_all_layers/qwen/similarity_vanilla_L17.csv b/exp2a_modified/results_all_layers/qwen/similarity_vanilla_L17.csv new file mode 100644 index 0000000000000000000000000000000000000000..b8eb9444930f86e8f81d71ff17170faef4341000 --- /dev/null +++ b/exp2a_modified/results_all_layers/qwen/similarity_vanilla_L17.csv @@ -0,0 +1,7 @@ +,left,right,above,under,far,close +left,0.9999999,0.99986565,0.9945604,0.99425375,0.96971893,0.9699175 +right,0.99986565,1.0000001,0.9949876,0.99472845,0.97107923,0.971228 +above,0.9945604,0.9949876,1.0,0.99977803,0.98140824,0.98151416 +under,0.99425375,0.99472845,0.99977803,1.0000002,0.98072654,0.9807515 +far,0.96971893,0.97107923,0.98140824,0.98072654,1.0000002,0.99989426 +close,0.9699175,0.971228,0.98151416,0.9807515,0.99989426,1.0 diff --git a/exp2a_modified/results_all_layers/qwen/similarity_vanilla_L19.csv b/exp2a_modified/results_all_layers/qwen/similarity_vanilla_L19.csv new file mode 100644 index 0000000000000000000000000000000000000000..814cecf3931be7bb8ac1e8db041ab367f0ed6d7e --- /dev/null +++ b/exp2a_modified/results_all_layers/qwen/similarity_vanilla_L19.csv @@ -0,0 +1,7 @@ +,left,right,above,under,far,close +left,0.99999964,0.99983704,0.99200815,0.9915658,0.9646212,0.9649466 +right,0.99983704,0.9999998,0.99265784,0.9922899,0.96632683,0.96660006 +above,0.99200815,0.99265784,1.0000002,0.9997119,0.97971606,0.9798732 +under,0.9915658,0.9922899,0.9997119,1.0000002,0.9787948,0.97885394 +far,0.9646212,0.96632683,0.97971606,0.9787948,0.99999964,0.99986744 +close,0.9649466,0.96660006,0.9798732,0.97885394,0.99986744,1.0000002 diff --git a/exp2a_modified/results_all_layers/qwen/similarity_vanilla_L21.csv b/exp2a_modified/results_all_layers/qwen/similarity_vanilla_L21.csv new file mode 100644 index 0000000000000000000000000000000000000000..f6e192c3541b86d07bbc117d48525b7ff8e98180 --- /dev/null +++ b/exp2a_modified/results_all_layers/qwen/similarity_vanilla_L21.csv @@ -0,0 +1,7 @@ +,left,right,above,under,far,close +left,1.0000002,0.99971116,0.9923471,0.9917683,0.9674295,0.967368 +right,0.99971116,1.0,0.99296165,0.9925066,0.9690616,0.96897626 +above,0.9923471,0.99296165,1.0000005,0.9995874,0.97954905,0.9790876 +under,0.9917683,0.9925066,0.9995874,0.9999997,0.9786419,0.97820413 +far,0.9674295,0.9690616,0.97954905,0.9786419,1.0000002,0.99976456 +close,0.967368,0.96897626,0.9790876,0.97820413,0.99976456,1.0000001 diff --git a/exp2a_modified/results_all_layers/qwen/similarity_vanilla_L24.csv b/exp2a_modified/results_all_layers/qwen/similarity_vanilla_L24.csv new file mode 100644 index 0000000000000000000000000000000000000000..58c0444439446608ff4f70b48f2a4388ad1f942d --- /dev/null +++ b/exp2a_modified/results_all_layers/qwen/similarity_vanilla_L24.csv @@ -0,0 +1,7 @@ +,left,right,above,under,far,close +left,1.0000002,0.9991637,0.99021405,0.9888708,0.96523786,0.96511954 +right,0.9991637,1.0000002,0.99088657,0.9901332,0.96652216,0.96623725 +above,0.99021405,0.99088657,1.0000005,0.999221,0.9777443,0.9772632 +under,0.9888708,0.9901332,0.999221,0.99999976,0.976744,0.97616464 +far,0.96523786,0.96652216,0.9777443,0.976744,1.0000002,0.9997183 +close,0.96511954,0.96623725,0.9772632,0.97616464,0.9997183,1.0000001 diff --git a/exp2a_modified/results_all_layers/qwen/similarity_vanilla_L34.csv b/exp2a_modified/results_all_layers/qwen/similarity_vanilla_L34.csv new file mode 100644 index 0000000000000000000000000000000000000000..b376919a054b07d17ce03b7c98f0516b6d603a42 --- /dev/null +++ b/exp2a_modified/results_all_layers/qwen/similarity_vanilla_L34.csv @@ -0,0 +1,7 @@ +,left,right,above,under,far,close +left,1.0000001,0.9990366,0.98988014,0.98878044,0.96655,0.9658309 +right,0.9990366,0.99999934,0.9896452,0.98895586,0.96440226,0.9635564 +above,0.98988014,0.9896452,0.9999997,0.9993703,0.9675567,0.9662877 +under,0.98878044,0.98895586,0.9993703,1.0,0.9674345,0.9663628 +far,0.96655,0.96440226,0.9675567,0.9674345,0.99999976,0.9995395 +close,0.9658309,0.9635564,0.9662877,0.9663628,0.9995395,0.9999999 diff --git a/exp2a_modified/results_all_layers/qwen/similarity_vanilla_L35.csv b/exp2a_modified/results_all_layers/qwen/similarity_vanilla_L35.csv new file mode 100644 index 0000000000000000000000000000000000000000..4e093bf4a334cd859ebc97a05cf300a8abb6f3c1 --- /dev/null +++ b/exp2a_modified/results_all_layers/qwen/similarity_vanilla_L35.csv @@ -0,0 +1,7 @@ +,left,right,above,under,far,close +left,0.9999999,0.99848515,0.9831016,0.9811994,0.93792963,0.93604606 +right,0.99848515,1.0,0.9825343,0.9811074,0.93322587,0.9311956 +above,0.9831016,0.9825343,0.99999994,0.9989464,0.9415084,0.938514 +under,0.9811994,0.9811074,0.9989464,0.9999999,0.94198036,0.93939054 +far,0.93792963,0.93322587,0.9415084,0.94198036,1.0000002,0.99916875 +close,0.93604606,0.9311956,0.938514,0.93939054,0.99916875,1.0000002 diff --git a/exp2a_modified/results_all_layers/qwen/similarity_vanilla_L5.csv b/exp2a_modified/results_all_layers/qwen/similarity_vanilla_L5.csv new file mode 100644 index 0000000000000000000000000000000000000000..c3be87f9482cf52631770d2799601fb453bf4861 --- /dev/null +++ b/exp2a_modified/results_all_layers/qwen/similarity_vanilla_L5.csv @@ -0,0 +1,7 @@ +,left,right,above,under,far,close +left,0.99999976,0.9999653,0.99707854,0.9968866,0.9933731,0.99343836 +right,0.9999653,1.0,0.9971042,0.99696136,0.9934415,0.99346304 +above,0.99707854,0.9971042,1.0,0.99993163,0.99437475,0.9942957 +under,0.9968866,0.99696136,0.99993163,1.0,0.99444014,0.9943154 +far,0.9933731,0.9934415,0.99437475,0.99444014,0.99999964,0.9999603 +close,0.99343836,0.99346304,0.9942957,0.9943154,0.9999603,0.9999997 diff --git a/exp2a_modified/results_all_layers/qwen/similarity_vanilla_L6.csv b/exp2a_modified/results_all_layers/qwen/similarity_vanilla_L6.csv new file mode 100644 index 0000000000000000000000000000000000000000..97192d82d4cc66d4eda672533e13a8abfb785836 --- /dev/null +++ b/exp2a_modified/results_all_layers/qwen/similarity_vanilla_L6.csv @@ -0,0 +1,7 @@ +,left,right,above,under,far,close +left,0.99999964,0.99994695,0.99714756,0.9969235,0.99164486,0.9916973 +right,0.99994695,0.99999964,0.99718165,0.99704397,0.99172103,0.99170184 +above,0.99714756,0.99718165,1.0000001,0.99988997,0.9928707,0.9927653 +under,0.9969235,0.99704397,0.99988997,0.99999964,0.9930192,0.9928371 +far,0.99164486,0.99172103,0.9928707,0.9930192,1.0,0.999939 +close,0.9916973,0.99170184,0.9927653,0.9928371,0.999939,0.99999976 diff --git a/exp2a_swap_analysis/compute_swap_cosine.py b/exp2a_swap_analysis/compute_swap_cosine.py new file mode 100644 index 0000000000000000000000000000000000000000..534d781e4b6949dfd3bba496bd4bc1f88d87ee60 --- /dev/null +++ b/exp2a_swap_analysis/compute_swap_cosine.py @@ -0,0 +1,327 @@ +#!/usr/bin/env python3 +""" +Post-hoc Analysis: cos(original, swapped) per sample + +Loads saved vectors_{scale}.npz from exp2a_swap_analysis results, +computes cosine similarity between original and swapped embeddings per sample, +and reports category-level statistics. + +This measures whether the model's representation actually changes when +obj1↔obj2 are swapped — the fundamental test of spatial relation encoding. + +Usage: + python compute_swap_cosine.py --model_type molmo + python compute_swap_cosine.py --model_type molmo --results_dir /path/to/results +""" + +import os +import json +import argparse +import numpy as np +import pandas as pd +import matplotlib +matplotlib.use('Agg') +import matplotlib.pyplot as plt + +CATEGORY_ORDER = ['left', 'right', 'above', 'under', 'far', 'close'] +GROUP_MAP = { + 'left': 'horizontal', 'right': 'horizontal', + 'above': 'vertical', 'under': 'vertical', + 'far': 'distance', 'close': 'distance', +} +GROUP_ORDER = ['horizontal', 'vertical', 'distance'] + +SCALE_COLORS = { + 'vanilla': '#1f77b4', '80k': '#ff7f0e', '400k': '#2ca02c', + '800k': '#d62728', '2m': '#9467bd', 'roborefer': '#8c564b', +} + + +def cosine_sim_per_sample(orig: np.ndarray, swap: np.ndarray) -> np.ndarray: + """Compute cosine similarity per row (sample).""" + # orig, swap: (N, D) + dot = np.sum(orig * swap, axis=1) + norm_o = np.linalg.norm(orig, axis=1) + norm_s = np.linalg.norm(swap, axis=1) + return dot / (norm_o * norm_s + 1e-10) + + +def analyze_scale(npz_path: str, scale: str) -> dict: + """Analyze one scale's NPZ file. Returns dict of results.""" + data = np.load(npz_path, allow_pickle=True) + + # Find available layers + layer_keys = sorted([k for k in data.files if k.startswith('orig_L')]) + layers = [int(k.replace('orig_L', '')) for k in layer_keys] + + scale_results = {} + + for layer in layers: + orig = data.get(f'orig_L{layer}') + swap = data.get(f'swap_L{layer}') + labels = data.get(f'labels_L{layer}') + + if orig is None or swap is None or labels is None: + continue + + labels = np.array([str(l) for l in labels]) + cos_sims = cosine_sim_per_sample(orig, swap) + + layer_result = { + 'overall_mean': float(np.mean(cos_sims)), + 'overall_std': float(np.std(cos_sims)), + 'overall_n': len(cos_sims), + } + + # Per category + for cat in CATEGORY_ORDER: + mask = labels == cat + if mask.any(): + cat_sims = cos_sims[mask] + layer_result[f'{cat}_mean'] = float(np.mean(cat_sims)) + layer_result[f'{cat}_std'] = float(np.std(cat_sims)) + layer_result[f'{cat}_n'] = int(mask.sum()) + + # Per group + for group in GROUP_ORDER: + group_cats = [c for c in CATEGORY_ORDER if GROUP_MAP[c] == group] + mask = np.isin(labels, group_cats) + if mask.any(): + group_sims = cos_sims[mask] + layer_result[f'{group}_mean'] = float(np.mean(group_sims)) + layer_result[f'{group}_std'] = float(np.std(group_sims)) + + scale_results[layer] = layer_result + + return scale_results + + +def plot_swap_cosine_by_layer( + all_results: dict, # {scale: {layer: {category_mean, ...}}} + model_type: str, + save_path: str, +): + """Plot cos(orig, swap) across layers for each scale.""" + fig, axes = plt.subplots(1, 3, figsize=(21, 6)) + scale_order = ['vanilla', '80k', '400k', '800k', '2m', 'roborefer'] + + for idx, group in enumerate(GROUP_ORDER): + ax = axes[idx] + for scale in scale_order: + if scale not in all_results: + continue + results = all_results[scale] + layers = sorted(results.keys()) + vals = [results[l].get(f'{group}_mean', np.nan) for l in layers] + color = SCALE_COLORS.get(scale, 'gray') + ax.plot(layers, vals, '-o', color=color, label=scale, linewidth=2, markersize=5) + + ax.set_xlabel('Layer Index', fontsize=11) + ax.set_ylabel('cos(original, swapped)', fontsize=11) + ax.set_title(f'{group}', fontsize=13, fontweight='bold') + ax.legend(fontsize=9) + ax.grid(True, alpha=0.3) + ax.set_ylim(None, 1.02) + + fig.suptitle( + f'{model_type.upper()} - cos(original, swapped) Across Layers\n' + f'(Lower = model distinguishes swap more)', + fontsize=14, fontweight='bold', y=1.04 + ) + plt.tight_layout() + plt.savefig(save_path, dpi=300, bbox_inches='tight') + plt.close() + print(f"Saved: {save_path}") + + +def plot_swap_cosine_barplot( + all_results: dict, + model_type: str, + save_path: str, +): + """Bar plot at deepest layer: per-category cos(orig, swap) across scales.""" + scale_order = ['vanilla', '80k', '400k', '800k', '2m', 'roborefer'] + available_scales = [s for s in scale_order if s in all_results] + + if not available_scales: + return + + # Use deepest layer + sample_layers = sorted(all_results[available_scales[0]].keys()) + deepest = sample_layers[-1] + + fig, ax = plt.subplots(figsize=(14, 6)) + x = np.arange(len(CATEGORY_ORDER)) + width = 0.8 / len(available_scales) + + for i, scale in enumerate(available_scales): + results = all_results[scale] + if deepest not in results: + continue + layer_data = results[deepest] + vals = [layer_data.get(f'{cat}_mean', 0) for cat in CATEGORY_ORDER] + offset = (i - len(available_scales) / 2 + 0.5) * width + color = SCALE_COLORS.get(scale, 'gray') + bars = ax.bar(x + offset, vals, width, label=scale, color=color) + for bar, val in zip(bars, vals): + if val > 0: + ax.annotate(f'{val:.3f}', xy=(bar.get_x() + bar.get_width() / 2, bar.get_height()), + xytext=(0, 2), textcoords='offset points', + ha='center', va='bottom', fontsize=6, rotation=90) + + ax.set_xticks(x) + ax.set_xticklabels(CATEGORY_ORDER, fontsize=11) + ax.set_ylabel('cos(original, swapped)', fontsize=12) + ax.set_title(f'{model_type.upper()} - Layer {deepest}: cos(original, swapped) by Category\n' + f'(Lower = model representation changes more on swap)', + fontsize=13, fontweight='bold') + ax.legend(fontsize=9) + ax.grid(True, alpha=0.3, axis='y') + + plt.tight_layout() + plt.savefig(save_path, dpi=300, bbox_inches='tight') + plt.close() + print(f"Saved: {save_path}") + + +def plot_swap_cosine_distribution( + all_results_raw: dict, # {scale: {layer: {'sims': array, 'labels': array}}} + model_type: str, + save_dir: str, +): + """Histogram of per-sample cos(orig, swap) at deepest layer, per group.""" + scale_order = ['vanilla', '80k', '400k', '800k', '2m', 'roborefer'] + available_scales = [s for s in scale_order if s in all_results_raw] + + if not available_scales: + return + + sample_layers = sorted(all_results_raw[available_scales[0]].keys()) + deepest = sample_layers[-1] + + for group in GROUP_ORDER: + fig, axes = plt.subplots(1, len(available_scales), figsize=(5 * len(available_scales), 4), + sharey=True, sharex=True) + if len(available_scales) == 1: + axes = [axes] + + for i, scale in enumerate(available_scales): + ax = axes[i] + raw = all_results_raw[scale].get(deepest) + if raw is None: + continue + sims = raw['sims'] + labels = raw['labels'] + group_cats = [c for c in CATEGORY_ORDER if GROUP_MAP[c] == group] + mask = np.isin(labels, group_cats) + + if mask.any(): + group_sims = sims[mask] + ax.hist(group_sims, bins=30, alpha=0.7, color=SCALE_COLORS.get(scale, 'gray'), + edgecolor='white', linewidth=0.5) + ax.axvline(np.mean(group_sims), color='red', linestyle='--', linewidth=1.5, + label=f'mean={np.mean(group_sims):.3f}') + ax.legend(fontsize=8) + + ax.set_title(f'{scale}', fontsize=11, fontweight='bold') + ax.set_xlabel('cos(orig, swap)', fontsize=9) + if i == 0: + ax.set_ylabel('Count', fontsize=9) + + fig.suptitle(f'{model_type.upper()} - {group} - Layer {deepest}: Distribution of cos(orig, swap)', + fontsize=13, fontweight='bold') + plt.tight_layout() + plt.savefig(os.path.join(save_dir, f'swap_cosine_dist_{group}.png'), dpi=200, bbox_inches='tight') + plt.close() + + print(f"Saved distribution plots to {save_dir}") + + +def main(): + parser = argparse.ArgumentParser(description='Post-hoc: cos(original, swapped) analysis') + parser.add_argument('--model_type', type=str, required=True, choices=['molmo', 'nvila', 'qwen']) + parser.add_argument('--results_dir', type=str, + default='/data/shared/Qwen/experiments/exp2a_swap_analysis/results') + args = parser.parse_args() + + model_dir = os.path.join(args.results_dir, args.model_type) + plots_dir = os.path.join(model_dir, 'plots') + os.makedirs(plots_dir, exist_ok=True) + + scale_order = ['vanilla', '80k', '400k', '800k', '2m', 'roborefer'] + + all_results = {} # {scale: {layer: stats_dict}} + all_results_raw = {} # {scale: {layer: {'sims': array, 'labels': array}}} + + for scale in scale_order: + npz_path = os.path.join(model_dir, f'vectors_{scale}.npz') + if not os.path.exists(npz_path): + continue + + print(f"\nProcessing {args.model_type} - {scale}") + results = analyze_scale(npz_path, scale) + all_results[scale] = results + + # Also extract raw per-sample cosines for distribution plots + data = np.load(npz_path, allow_pickle=True) + raw_layers = {} + for layer in sorted(results.keys()): + orig = data.get(f'orig_L{layer}') + swap = data.get(f'swap_L{layer}') + labels = data.get(f'labels_L{layer}') + if orig is not None and swap is not None and labels is not None: + sims = cosine_sim_per_sample(orig, swap) + raw_layers[layer] = { + 'sims': sims, + 'labels': np.array([str(l) for l in labels]), + } + all_results_raw[scale] = raw_layers + + # Print summary for deepest layer + deepest = sorted(results.keys())[-1] + r = results[deepest] + print(f" Layer {deepest} (deepest):") + print(f" Overall: {r['overall_mean']:.4f} ± {r['overall_std']:.4f} (n={r['overall_n']})") + for cat in CATEGORY_ORDER: + m = r.get(f'{cat}_mean') + s = r.get(f'{cat}_std') + n = r.get(f'{cat}_n', 0) + if m is not None: + print(f" {cat:>6s}: {m:.4f} ± {s:.4f} (n={n})") + + if not all_results: + print("No data found. Check results_dir.") + return + + # Save JSON + json_data = {} + for scale, layers in all_results.items(): + json_data[scale] = {str(l): v for l, v in layers.items()} + json_path = os.path.join(model_dir, 'swap_cosine_stats.json') + with open(json_path, 'w') as f: + json.dump(json_data, f, indent=2) + print(f"\nSaved stats: {json_path}") + + # Save CSV (one row per scale×layer) + csv_rows = [] + for scale, layers in all_results.items(): + for layer, stats in sorted(layers.items()): + row = {'scale': scale, 'layer': layer} + row.update(stats) + csv_rows.append(row) + csv_path = os.path.join(model_dir, 'swap_cosine_stats.csv') + pd.DataFrame(csv_rows).to_csv(csv_path, index=False) + print(f"Saved CSV: {csv_path}") + + # Plots + plot_swap_cosine_by_layer(all_results, args.model_type, + os.path.join(plots_dir, 'swap_cosine_by_layer.png')) + plot_swap_cosine_barplot(all_results, args.model_type, + os.path.join(plots_dir, 'swap_cosine_barplot.png')) + plot_swap_cosine_distribution(all_results_raw, args.model_type, plots_dir) + + print(f"\nDone. Results in: {model_dir}") + + +if __name__ == '__main__': + main() \ No newline at end of file diff --git a/exp2a_swap_analysis/exp2a_swap_analysis.py b/exp2a_swap_analysis/exp2a_swap_analysis.py new file mode 100644 index 0000000000000000000000000000000000000000..50f8721569066980deb52f2676439607503723d7 --- /dev/null +++ b/exp2a_swap_analysis/exp2a_swap_analysis.py @@ -0,0 +1,1586 @@ +#!/usr/bin/env python3 +""" +Experiment 2-A Swap Analysis: Minimal Pair Probing for Spatial Representations + +Creates minimal pairs by swapping obj1↔obj2 in spatial questions: + Original: "Is A to the left or right of B?" → left + Swapped: "Is B to the left or right of A?" → right + +Analyses: + 1. Difference vectors: Δ = feature(swapped) - feature(original) + 2. Within-group Δ consistency (do all left→right swaps point same direction?) + 3. Cross-group Δ alignment (Δ_vertical vs Δ_distance) for perspective bias + 4. PCA visualization of per-sample embeddings + 5. Scaling effects on all of the above + +Cross-group analysis (perspective bias hypothesis): + For far/close samples, use bbox to determine vertical relationship. + Create vertical swap pairs for the same image+objects. + Measure cos(Δ_vertical, Δ_distance) — high = entangled, low = disentangled. + Expect: vanilla = high alignment, scaled = lower alignment. + +Usage: + # Single scale (for parallel execution) + python exp2a_swap_analysis.py --model_type molmo --scales vanilla --device cuda + + # Merge after all scales finish + python exp2a_swap_analysis.py --model_type molmo --merge +""" + +import os +import sys +import json +import argparse +import base64 +import logging +import random +import re +from io import BytesIO +from collections import defaultdict +from typing import Dict, List, Tuple, Optional, Any +from abc import ABC, abstractmethod + +import torch +import numpy as np +import pandas as pd +from PIL import Image +from tqdm import tqdm +import matplotlib +matplotlib.use('Agg') +import matplotlib.pyplot as plt +import seaborn as sns +from sklearn.metrics.pairwise import cosine_similarity +from sklearn.decomposition import PCA + +logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s') +logger = logging.getLogger(__name__) + +# ============================================================================ +# Constants +# ============================================================================ + +CATEGORY_ORDER = ['left', 'right', 'above', 'under', 'far', 'close'] + +OPPOSITE_MAP = { + 'left': 'right', 'right': 'left', + 'above': 'under', 'under': 'above', + 'far': 'close', 'close': 'far', +} + +GROUP_MAP = { + 'left': 'horizontal', 'right': 'horizontal', + 'above': 'vertical', 'under': 'vertical', + 'far': 'distance', 'close': 'distance', +} + +GROUP_ORDER = ['horizontal', 'vertical', 'distance'] + +SCALE_COLORS = { + 'vanilla': '#1f77b4', '80k': '#ff7f0e', '400k': '#2ca02c', + '800k': '#d62728', '2m': '#9467bd', 'roborefer': '#8c564b', +} + +MODEL_CONFIGS = { + 'molmo': { + 'vanilla': 'allenai/Molmo-7B-O-0924', + '80k': '/data/shared/Qwen/molmo/outputs/data_scale_exp_80k/unshared', + '400k': '/data/shared/Qwen/molmo/outputs/data_scale_exp_400k/unshared', + '800k': '/data/shared/Qwen/molmo/outputs/data_scale_exp_800k/unshared', + '2m': '/data/shared/Qwen/molmo/outputs/data_scale_exp_2m/unshared', + }, + 'nvila': { + 'vanilla': '/data/shared/Qwen/mydisk/NVILA-Lite-2B', + '80k': '/data/shared/Qwen/mydisk/output/DATA/NVILA-Lite-2B-DATA_SCALE_EXP_80K-20251108_180221', + '400k': '/data/shared/Qwen/mydisk/output/DATA/NVILA-Lite-2B-DATA_SCALE_EXP_400K-20251108_180221', + '800k': '/data/shared/Qwen/mydisk/output/DATA/NVILA-Lite-2B-DATA_SCALE_EXP_800K-20251108_180221', + '2m': '/data/shared/Qwen/mydisk/output/DATA/NVILA-Lite-2B-DATA_SCALE_EXP_2M-20260205_003632', + 'roborefer': '/data/shared/Qwen/mydisk/RoboRefer_model', + }, + 'qwen': { + 'vanilla': 'Qwen/Qwen2.5-VL-3B-Instruct', + '80k': '/data/shared/Qwen/mydisk/output/Qwen/Qwen2.5-VL-3B-Instruct-data_scale_exp_80k-20251114_120221', + '400k': '/data/shared/Qwen/mydisk/output/Qwen/Qwen2.5-VL-3B-Instruct-data_scale_exp_400k-20251114_120221', + '800k': '/data/shared/Qwen/mydisk/output/Qwen/Qwen2.5-VL-3B-Instruct-data_scale_exp_800k-20251114_120221', + '2m': '/data/shared/Qwen/mydisk/output/Qwen/Qwen2.5-VL-3B-Instruct-data_scale_exp_2m-20260109_120517', + }, +} + + +# ============================================================================ +# Data Loading & Swap Pair Creation +# ============================================================================ + +OBJECT_PATTERNS = [ + re.compile(r'between\s+(.+?)\s+and\s+(.+?)\s+in', re.IGNORECASE), + re.compile(r'of\s+(.+?)\s+and\s+(.+?)\s+in', re.IGNORECASE), + re.compile(r'positions\s+of\s+(.+?)\s+and\s+(.+?)\s+interact', re.IGNORECASE), + re.compile(r'How\s+are\s+(.+?)\s+and\s+(.+?)\s+positioned', re.IGNORECASE), + re.compile(r'arrangement\s+of\s+(.+?)\s+and\s+(.+?)\s+in', re.IGNORECASE), +] + + +def extract_objects(question: str) -> Tuple[str, str]: + for pattern in OBJECT_PATTERNS: + m = pattern.search(question) + if m: + return m.group(1).strip(), m.group(2).strip() + raise ValueError(f"Could not extract objects from: {question}") + + +def decode_base64_image(base64_str: str) -> Image.Image: + image_data = base64.b64decode(base64_str) + return Image.open(BytesIO(image_data)).convert('RGB') + + +def check_answer(generated_text: str, expected_category: str) -> bool: + if not generated_text or not generated_text.strip(): + return False + text = generated_text.strip().lower() + expected = expected_category.lower() + opposite = OPPOSITE_MAP[expected] + pos_exp = text.find(expected) + pos_opp = text.find(opposite) + if pos_exp == -1: + return False + if pos_opp == -1: + return True + return pos_exp < pos_opp + + +def load_swap_pairs(tsv_path: str, seed: int = 42) -> List[dict]: + """Load EmbSpatialBench TSV and create swap pairs for all samples.""" + rng = random.Random(seed) + df = pd.read_csv(tsv_path, sep='\t') + + pairs = [] + stats = defaultdict(lambda: {'total': 0, 'success': 0}) + + for _, row in df.iterrows(): + category = row['category'] + stats[category]['total'] += 1 + + try: + if category in ['left', 'right', 'above', 'under']: + obj1, obj2 = extract_objects(row['question']) + if category in ['left', 'right']: + template = "Is the {} to the left or right of the {}?" + else: + template = "Is the {} above or under the {}?" + + pair = { + 'index': row['index'], + 'image_base64': row['image'], + 'original_question': template.format(obj1, obj2), + 'swapped_question': template.format(obj2, obj1), + 'original_answer': category, + 'swapped_answer': OPPOSITE_MAP[category], + 'group': GROUP_MAP[category], + 'category': category, + 'obj1': obj1, + 'obj2': obj2, + } + + elif category in ['far', 'close']: + answer_key = row['answer'] + options = {k: row[k] for k in ['A', 'B', 'C', 'D']} + target_object = options[answer_key] + candidates = [v for k, v in options.items() if k != answer_key] + reference_object = rng.choice(candidates) + + pair = { + 'index': row['index'], + 'image_base64': row['image'], + 'original_question': f"Compared to {reference_object}, is {target_object} far or close from you?", + 'swapped_question': f"Compared to {target_object}, is {reference_object} far or close from you?", + 'original_answer': category, + 'swapped_answer': OPPOSITE_MAP[category], + 'group': 'distance', + 'category': category, + 'target_object': target_object, + 'reference_object': reference_object, + } + else: + continue + + pairs.append(pair) + stats[category]['success'] += 1 + + except Exception as e: + logger.warning(f"Failed to create swap pair for index {row['index']}: {e}") + continue + + logger.info("Swap pair creation stats:") + for cat in CATEGORY_ORDER: + s = stats[cat] + logger.info(f" {cat}: {s['success']}/{s['total']}") + logger.info(f" Total pairs: {len(pairs)}") + + return pairs + + +def build_hf_bbox_cache(hf_dataset_name: str = 'FlagEval/EmbSpatial-Bench') -> Dict[int, dict]: + """Load HF dataset and build bbox lookup cache keyed by question_id.""" + from datasets import load_dataset + logger.info(f"Loading HF dataset: {hf_dataset_name}") + ds = load_dataset(hf_dataset_name, split='test') + + cache = {} + for item in ds: + qid = item['question_id'] + cache[qid] = { + 'objects': item['objects'], + 'relation': item['relation'], + 'data_source': item['data_source'], + 'answer': item['answer'], + 'answer_options': item['answer_options'], + } + + logger.info(f"Built bbox cache: {len(cache)} entries") + return cache + + +def get_bbox_center_y(bbox: list) -> float: + """BBox [x, y, width, height] -> center y coordinate.""" + return bbox[1] + bbox[3] / 2 + + +def create_cross_group_quads( + swap_pairs: List[dict], + hf_cache: Dict[int, dict], + threshold_ratio: float = 0.05, +) -> List[dict]: + """ + For far/close swap pairs, create additional vertical queries using bbox. + + Returns quads: each has distance swap pair + vertical swap pair for same image/objects. + Only includes samples where vertical relationship is unambiguous. + """ + IMAGE_HEIGHTS = {'ai2thor': 300, 'mp3d': 480, 'scannet': 968} + + quads = [] + stats = {'total': 0, 'matched': 0, 'ambiguous': 0, 'no_bbox': 0} + + distance_pairs = [p for p in swap_pairs if p['group'] == 'distance'] + + for pair in distance_pairs: + stats['total'] += 1 + idx = pair['index'] + + if idx not in hf_cache: + stats['no_bbox'] += 1 + continue + + hf_item = hf_cache[idx] + names = hf_item['objects']['name'] + bboxes = hf_item['objects']['bbox'] + + target = pair['target_object'] + reference = pair['reference_object'] + + # Find bbox for target and reference + target_bbox_y, ref_bbox_y = None, None + for name, bbox in zip(names, bboxes): + if name == target: + target_bbox_y = get_bbox_center_y(bbox) + if name == reference: + ref_bbox_y = get_bbox_center_y(bbox) + + if target_bbox_y is None or ref_bbox_y is None: + stats['no_bbox'] += 1 + continue + + # Determine vertical relationship + image_height = IMAGE_HEIGHTS.get(hf_item['data_source'], 480) + threshold = image_height * threshold_ratio + y_diff = target_bbox_y - ref_bbox_y + + if abs(y_diff) < threshold: + stats['ambiguous'] += 1 + continue + + # y increases downward in image coordinates + # target_bbox_y < ref_bbox_y → target is above reference + if target_bbox_y < ref_bbox_y: + vert_original_answer = 'above' + else: + vert_original_answer = 'under' + + vert_original_q = f"Is the {target} above or under the {reference}?" + vert_swapped_q = f"Is the {reference} above or under the {target}?" + + quad = { + 'index': idx, + 'image_base64': pair['image_base64'], + # Distance pair (from original swap pair) + 'dist_original_q': pair['original_question'], + 'dist_swapped_q': pair['swapped_question'], + 'dist_original_answer': pair['original_answer'], + 'dist_swapped_answer': pair['swapped_answer'], + # Vertical pair (newly created from bbox) + 'vert_original_q': vert_original_q, + 'vert_swapped_q': vert_swapped_q, + 'vert_original_answer': vert_original_answer, + 'vert_swapped_answer': OPPOSITE_MAP[vert_original_answer], + # Metadata + 'target_object': target, + 'reference_object': reference, + 'target_bbox_y': target_bbox_y, + 'ref_bbox_y': ref_bbox_y, + 'y_diff': y_diff, + 'data_source': hf_item['data_source'], + } + quads.append(quad) + stats['matched'] += 1 + + logger.info(f"Cross-group quads: {stats['matched']}/{stats['total']} " + f"(ambiguous={stats['ambiguous']}, no_bbox={stats['no_bbox']})") + return quads + + +# ============================================================================ +# Base Extractor (identical to exp2a_correct_filter) +# ============================================================================ + +class BaseHiddenStateExtractor(ABC): + def __init__(self, model_path: str, device: str = 'cuda', target_layers: List[int] = None): + self.model_path = model_path + self.device = device + self.hidden_states = {} + self.hooks = [] + self._load_model() + num_layers = self._get_num_layers() + if target_layers is None: + self.target_layers = list(range(num_layers)) + logger.info(f"Model has {num_layers} layers. Extracting ALL.") + else: + self.target_layers = target_layers + self._register_hooks() + + def _register_hooks(self): + for layer_idx in self.target_layers: + module = self._get_layer_module(layer_idx) + if module is not None: + hook = module.register_forward_hook(self._make_hook(layer_idx)) + self.hooks.append(hook) + + def _make_hook(self, layer_idx: int): + def hook_fn(module, input, output): + if isinstance(output, tuple): + hidden = output[0] + else: + hidden = output + if hidden.shape[1] > 1: # prefill only + last_token = hidden[:, -1, :].detach().cpu().float() + self.hidden_states[layer_idx] = last_token.squeeze(0) + return hook_fn + + @abstractmethod + def _load_model(self): pass + @abstractmethod + def _get_num_layers(self) -> int: pass + @abstractmethod + def _get_layer_module(self, layer_idx: int): pass + @abstractmethod + def extract_and_predict(self, image: Image.Image, question: str) -> Tuple[Dict[int, torch.Tensor], str]: pass + + def cleanup(self): + for hook in self.hooks: + hook.remove() + self.hooks = [] + if hasattr(self, 'model'): + del self.model + if hasattr(self, 'processor'): + del self.processor + torch.cuda.empty_cache() + + +# ============================================================================ +# Molmo Extractor +# ============================================================================ + +class MolmoExtractor(BaseHiddenStateExtractor): + def _load_model(self): + config_path = os.path.join(self.model_path, "config.yaml") + checkpoint_path = os.path.join(self.model_path, "model.pt") + if os.path.exists(config_path) and os.path.exists(checkpoint_path): + self._load_native_model() + self.is_native = True + else: + self._load_hf_model() + self.is_native = False + + def _load_native_model(self): + from olmo.config import ModelConfig + from olmo.model import Molmo as NativeMolmoModel + from olmo.data.model_preprocessor import MultiModalPreprocessor + from olmo.data.data_formatter import DataFormatter + + _original_load = torch.load + def _unsafe_load_wrapper(*args, **kwargs): + if 'weights_only' not in kwargs: + kwargs['weights_only'] = False + return _original_load(*args, **kwargs) + torch.load = _unsafe_load_wrapper + + cfg = ModelConfig.load( + os.path.join(self.model_path, "config.yaml"), + key="model", validate_paths=False + ) + cfg.init_device = "cpu" + self.model = NativeMolmoModel(cfg) + state_dict = torch.load(os.path.join(self.model_path, "model.pt"), map_location="cpu") + self.model.load_state_dict(state_dict) + self.model = self.model.to(self.device, dtype=torch.bfloat16).eval() + self.tokenizer = cfg.get_tokenizer() + + v_cfg = cfg.vision_backbone + h, w = cfg.llm_patches_per_crop() + image_padding_mask = 2 if cfg.fix_image_padding else (1 if cfg.image_padding_embed else None) + + class SafeDataFormatter(DataFormatter): + def get_system_prompt(self, style, for_inference, messages, rng=None): + if style is None: + style = "User" + return super().get_system_prompt(style, for_inference, messages, rng) + + self.formatter = SafeDataFormatter( + prompt_templates=cfg.prompt_type, message_format=cfg.message_formatting, + system_prompt=cfg.system_prompt_kind, always_start_with_space=cfg.always_start_with_space, + default_inference_len=cfg.default_inference_len + ) + self.preprocessor = MultiModalPreprocessor( + tokenizer=self.tokenizer, normalize=str(v_cfg.image_model_type), + crop_mode=cfg.crop_mode, max_crops=cfg.max_crops, + overlap_margins=cfg.overlap_margins, resize=v_cfg.resize_mode, + use_col_tokens=cfg.use_col_tokens, base_image_input_size=v_cfg.image_default_input_size, + image_pooling_w=cfg.image_pooling_w, image_pooling_h=cfg.image_pooling_h, + image_token_length_w=w, image_token_length_h=h, + image_patch_size=v_cfg.image_patch_size, image_padding_mask=image_padding_mask, + pad_value=cfg.pad_value, loss_token_weighting=cfg.multi_annotation_weighting, + ) + logger.info(f"Loaded native Molmo from {self.model_path}") + + def _load_hf_model(self): + from transformers import AutoModelForCausalLM, AutoProcessor + self.model = AutoModelForCausalLM.from_pretrained( + self.model_path, torch_dtype=torch.bfloat16, + trust_remote_code=True, device_map=self.device + ).eval() + self.processor = AutoProcessor.from_pretrained(self.model_path, trust_remote_code=True) + logger.info(f"Loaded HF Molmo from {self.model_path}") + + def _get_num_layers(self) -> int: + if self.is_native: + return len(self.model.transformer.blocks) + if hasattr(self.model, 'model') and hasattr(self.model.model, 'transformer'): + return len(self.model.model.transformer.blocks) + return 32 + + def _get_layer_module(self, layer_idx: int): + if self.is_native: + return self.model.transformer.blocks[layer_idx] + return self.model.model.transformer.blocks[layer_idx] + + def extract_and_predict(self, image, question): + self.hidden_states = {} + if self.is_native: + example = {"messages": [question], "image": image} + messages, _ = self.formatter(example, is_training=False, for_inference=True, rng=np.random) + batch = self.preprocessor(np.array(image), messages, is_training=False, require_image_features=True) + if 'input_ids' not in batch and 'input_tokens' in batch: + batch['input_ids'] = batch['input_tokens'] + + def to_t(x): + return torch.from_numpy(x) if isinstance(x, np.ndarray) else x + + input_ids = to_t(batch['input_ids']).unsqueeze(0).to(self.device).long() + images_t = to_t(batch['images']).unsqueeze(0).to(self.device, dtype=torch.bfloat16) + image_masks = to_t(batch['image_masks']).unsqueeze(0).to(self.device, dtype=torch.bfloat16) + image_input_idx = to_t(batch['image_input_idx']).unsqueeze(0).to(self.device) + + with torch.inference_mode(), torch.autocast("cuda", dtype=torch.bfloat16): + gen = self.model.generate( + input_ids=input_ids, images=images_t, + image_masks=image_masks, image_input_idx=image_input_idx, + max_steps=20, beam_size=1, + ) + generated_ids = gen.token_ids[0, 0] + answer = self.tokenizer.decode(generated_ids.tolist()).strip() + for eos in ['<|endoftext|>', '', '<|end|>']: + answer = answer.replace(eos, '').strip() + else: + from transformers import GenerationConfig + inputs = self.processor.process(images=[image], text=question) + processed = {} + for k, v in inputs.items(): + v = v.to(self.device).unsqueeze(0) + if v.dtype == torch.float32: + v = v.to(dtype=torch.bfloat16) + processed[k] = v + with torch.no_grad(), torch.autocast("cuda", dtype=torch.bfloat16): + output = self.model.generate_from_batch( + processed, + GenerationConfig(max_new_tokens=20, stop_strings="<|endoftext|>"), + tokenizer=self.processor.tokenizer, + ) + input_len = processed['input_ids'].shape[1] + answer = self.processor.tokenizer.decode(output[0, input_len:], skip_special_tokens=True).strip() + + return self.hidden_states.copy(), answer + + +# ============================================================================ +# NVILA Extractor +# ============================================================================ + +class NVILAExtractor(BaseHiddenStateExtractor): + def _load_model(self): + original_sys_path = sys.path.copy() + sys.path = [p for p in sys.path if 'RoboRefer' not in p] + modules_to_remove = [k for k in list(sys.modules.keys()) if 'llava' in k.lower()] + removed = {m: sys.modules.pop(m) for m in modules_to_remove} + try: + import llava + from llava.media import Image as LLaVAImage + from llava import conversation as clib + except Exception as err: + sys.path = original_sys_path + for m, mod in removed.items(): + sys.modules[m] = mod + raise RuntimeError(f"Failed to import llava: {err}") + sys.path = original_sys_path + self.LLaVAImage = LLaVAImage + self.clib = clib + self.model = llava.load(self.model_path, model_base=None) + self._find_llm_backbone() + logger.info(f"Loaded NVILA from {self.model_path}") + + def _find_llm_backbone(self): + candidates = [] + if hasattr(self.model, 'llm'): + if hasattr(self.model.llm, 'model') and hasattr(self.model.llm.model, 'layers'): + candidates.append(self.model.llm.model.layers) + if hasattr(self.model.llm, 'layers'): + candidates.append(self.model.llm.layers) + if hasattr(self.model, 'model'): + if hasattr(self.model.model, 'model') and hasattr(self.model.model.model, 'layers'): + candidates.append(self.model.model.model.layers) + if hasattr(self.model.model, 'layers'): + candidates.append(self.model.model.layers) + for name, module in self.model.named_modules(): + if name.endswith('.layers') and hasattr(module, '__len__') and len(module) > 0: + candidates.append(module) + if candidates: + self.llm_backbone = candidates[0] + else: + raise ValueError("Could not locate transformer layers in NVILA model") + + def _get_num_layers(self) -> int: + return len(self.llm_backbone) if hasattr(self, 'llm_backbone') else 24 + + def _get_layer_module(self, layer_idx: int): + return self.llm_backbone[layer_idx] + + def extract_and_predict(self, image, question): + self.hidden_states = {} + import tempfile + with tempfile.NamedTemporaryFile(suffix='.png', delete=False) as f: + temp_path = f.name + image.save(temp_path) + try: + prompt = [self.LLaVAImage(temp_path), question] + from transformers import GenerationConfig + response = self.model.generate_content( + prompt, generation_config=GenerationConfig(max_new_tokens=20, do_sample=False) + ) + finally: + os.unlink(temp_path) + answer = str(response[0] if isinstance(response, list) else response).strip() + return self.hidden_states.copy(), answer + + +class RoboReferExtractor(NVILAExtractor): + ROBOREFER_PATH = '/data/shared/Qwen/RoboRefer' + + def _load_model(self): + original_sys_path = sys.path.copy() + if self.ROBOREFER_PATH not in sys.path: + sys.path.insert(0, self.ROBOREFER_PATH) + modules_to_remove = [k for k in list(sys.modules.keys()) if 'llava' in k.lower()] + removed = {m: sys.modules.pop(m) for m in modules_to_remove} + try: + import llava + from llava.media import Image as LLaVAImage + from llava import conversation as clib + except Exception as err: + sys.path = original_sys_path + for m, mod in removed.items(): + sys.modules[m] = mod + raise RuntimeError(f"Failed to import RoboRefer llava: {err}") + sys.path = original_sys_path + self.LLaVAImage = LLaVAImage + self.clib = clib + self.model = llava.load(self.model_path, model_base=None) + self._find_llm_backbone() + logger.info(f"Loaded RoboRefer from {self.model_path}") + + +# ============================================================================ +# Qwen2.5-VL Extractor +# ============================================================================ + +class Qwen25VLExtractor(BaseHiddenStateExtractor): + BASE_MODEL = "Qwen/Qwen2.5-VL-3B-Instruct" + + def _load_model(self): + from transformers import Qwen2_5_VLForConditionalGeneration, AutoProcessor + try: + self.model = Qwen2_5_VLForConditionalGeneration.from_pretrained( + self.model_path, torch_dtype=torch.bfloat16, device_map=self.device + ) + except ImportError: + self.model = Qwen2_5_VLForConditionalGeneration.from_pretrained( + self.model_path, torch_dtype=torch.bfloat16 + ).to(self.device) + self.model.eval() + if self.model_path.startswith('/'): + self.processor = AutoProcessor.from_pretrained(self.BASE_MODEL) + else: + self.processor = AutoProcessor.from_pretrained(self.model_path) + logger.info(f"Loaded Qwen2.5-VL from {self.model_path}") + + def _get_num_layers(self) -> int: + return len(self.model.model.layers) + + def _get_layer_module(self, layer_idx: int): + return self.model.model.layers[layer_idx] + + def extract_and_predict(self, image, question): + self.hidden_states = {} + messages = [{"role": "user", "content": [ + {"type": "image", "image": image}, + {"type": "text", "text": question} + ]}] + text = self.processor.apply_chat_template(messages, tokenize=False, add_generation_prompt=True) + from qwen_vl_utils import process_vision_info + image_inputs, video_inputs = process_vision_info(messages) + inputs = self.processor( + text=[text], images=image_inputs, videos=video_inputs, + padding=True, return_tensors="pt" + ).to(self.device) + with torch.no_grad(): + output_ids = self.model.generate(**inputs, max_new_tokens=20, do_sample=False) + input_len = inputs['input_ids'].shape[1] + answer = self.processor.tokenizer.decode(output_ids[0, input_len:], skip_special_tokens=True).strip() + return self.hidden_states.copy(), answer + + +def get_extractor(model_type: str, model_path: str, scale: str = None, **kwargs): + if model_type == 'nvila' and scale == 'roborefer': + return RoboReferExtractor(model_path, **kwargs) + extractors = {'molmo': MolmoExtractor, 'nvila': NVILAExtractor, 'qwen': Qwen25VLExtractor} + return extractors[model_type](model_path, **kwargs) + + +# ============================================================================ +# Feature Extraction Pipeline +# ============================================================================ + +def run_single_query(extractor, image, question): + """Run a single forward pass and return (hidden_states_dict, predicted_text).""" + hidden_states, predicted = extractor.extract_and_predict(image, question) + result = {} + for layer_idx in extractor.target_layers: + if layer_idx in hidden_states: + state = hidden_states[layer_idx].numpy().flatten() + if state.size > 0: + result[layer_idx] = state + return result, predicted + + +def extract_swap_features( + extractor: BaseHiddenStateExtractor, + swap_pairs: List[dict], + max_samples_per_category: int = 0, +) -> List[dict]: + """Extract features for all swap pairs. Returns per-sample records with delta vectors.""" + rng = random.Random(42) + + # Optional: limit samples per category + if max_samples_per_category > 0: + grouped = defaultdict(list) + for p in swap_pairs: + grouped[p['category']].append(p) + limited = [] + for cat in CATEGORY_ORDER: + samples = grouped[cat] + if len(samples) > max_samples_per_category: + samples = rng.sample(samples, max_samples_per_category) + limited.extend(samples) + swap_pairs = limited + + records = [] + for pair in tqdm(swap_pairs, desc="Swap pairs"): + try: + image = decode_base64_image(pair['image_base64']) + + hs_orig, pred_orig = run_single_query(extractor, image, pair['original_question']) + hs_swap, pred_swap = run_single_query(extractor, image, pair['swapped_question']) + + is_correct_orig = check_answer(pred_orig, pair['original_answer']) + is_correct_swap = check_answer(pred_swap, pair['swapped_answer']) + + # Compute delta vectors + delta = {} + for layer_idx in extractor.target_layers: + if layer_idx in hs_orig and layer_idx in hs_swap: + delta[layer_idx] = hs_swap[layer_idx] - hs_orig[layer_idx] + + record = { + 'index': pair['index'], + 'group': pair['group'], + 'category': pair['category'], + 'original_answer': pair['original_answer'], + 'swapped_answer': pair['swapped_answer'], + 'pred_orig': pred_orig, + 'pred_swap': pred_swap, + 'is_correct_orig': is_correct_orig, + 'is_correct_swap': is_correct_swap, + 'hs_orig': hs_orig, + 'hs_swap': hs_swap, + 'delta': delta, + } + records.append(record) + + mark_o = "O" if is_correct_orig else "X" + mark_s = "O" if is_correct_swap else "X" + tqdm.write(f" #{pair['index']:<6} {pair['category']:<6} " + f"orig[{mark_o}]=\"{pred_orig[:40]}\" swap[{mark_s}]=\"{pred_swap[:40]}\"") + + except Exception as e: + logger.warning(f"Error on index {pair['index']}: {e}") + continue + + logger.info(f"Extracted {len(records)} swap pair records") + for group in GROUP_ORDER: + n = sum(1 for r in records if r['group'] == group) + logger.info(f" {group}: {n}") + return records + + +def extract_cross_group_features( + extractor: BaseHiddenStateExtractor, + quads: List[dict], +) -> List[dict]: + """Extract features for cross-group quads (4 forward passes each).""" + records = [] + + for quad in tqdm(quads, desc="Cross-group quads"): + try: + image = decode_base64_image(quad['image_base64']) + + hs_d_orig, pred_d_orig = run_single_query(extractor, image, quad['dist_original_q']) + hs_d_swap, pred_d_swap = run_single_query(extractor, image, quad['dist_swapped_q']) + hs_v_orig, pred_v_orig = run_single_query(extractor, image, quad['vert_original_q']) + hs_v_swap, pred_v_swap = run_single_query(extractor, image, quad['vert_swapped_q']) + + delta_dist, delta_vert = {}, {} + for layer_idx in extractor.target_layers: + if layer_idx in hs_d_orig and layer_idx in hs_d_swap: + delta_dist[layer_idx] = hs_d_swap[layer_idx] - hs_d_orig[layer_idx] + if layer_idx in hs_v_orig and layer_idx in hs_v_swap: + delta_vert[layer_idx] = hs_v_swap[layer_idx] - hs_v_orig[layer_idx] + + record = { + 'index': quad['index'], + 'delta_dist': delta_dist, + 'delta_vert': delta_vert, + 'pred_d_orig': pred_d_orig, + 'pred_d_swap': pred_d_swap, + 'pred_v_orig': pred_v_orig, + 'pred_v_swap': pred_v_swap, + 'is_correct_d_orig': check_answer(pred_d_orig, quad['dist_original_answer']), + 'is_correct_d_swap': check_answer(pred_d_swap, quad['dist_swapped_answer']), + 'is_correct_v_orig': check_answer(pred_v_orig, quad['vert_original_answer']), + 'is_correct_v_swap': check_answer(pred_v_swap, quad['vert_swapped_answer']), + 'data_source': quad['data_source'], + } + records.append(record) + + tqdm.write(f" #{quad['index']:<6} dist=[{pred_d_orig[:20]}/{pred_d_swap[:20]}] " + f"vert=[{pred_v_orig[:20]}/{pred_v_swap[:20]}]") + + except Exception as e: + logger.warning(f"Error on cross-group index {quad['index']}: {e}") + continue + + logger.info(f"Extracted {len(records)} cross-group quad records") + return records + + +# ============================================================================ +# Analysis Functions +# ============================================================================ + +def compute_delta_consistency(records: List[dict], target_layers: List[int]) -> dict: + """ + For each group × layer, compute average pairwise cosine similarity among Δ vectors. + High consistency = all swaps point in the same direction = model encodes concept linearly. + """ + results = {} + for group in GROUP_ORDER: + group_recs = [r for r in records if r['group'] == group] + for layer in target_layers: + deltas = [r['delta'][layer] for r in group_recs if layer in r['delta']] + if len(deltas) < 2: + continue + deltas_arr = np.array(deltas) + sim = cosine_similarity(deltas_arr) + upper_tri = sim[np.triu_indices(len(deltas), k=1)] + results[(group, layer)] = { + 'mean': float(np.mean(upper_tri)), + 'std': float(np.std(upper_tri)), + 'n': len(deltas), + } + return results + + +def compute_cross_group_alignment(quad_records: List[dict], target_layers: List[int]) -> dict: + """ + For each layer, compute cos(Δ_vert, Δ_dist) per sample, then average. + High alignment = perspective bias (vertical ≈ distance in representation). + Also compute alignment using mean Δ vectors. + """ + results = {} + for layer in target_layers: + per_sample = [] + delta_verts, delta_dists = [], [] + + for rec in quad_records: + if layer in rec['delta_vert'] and layer in rec['delta_dist']: + dv = rec['delta_vert'][layer] + dd = rec['delta_dist'][layer] + norm_v = np.linalg.norm(dv) + norm_d = np.linalg.norm(dd) + if norm_v > 1e-10 and norm_d > 1e-10: + cos = np.dot(dv, dd) / (norm_v * norm_d) + per_sample.append(float(cos)) + delta_verts.append(dv) + delta_dists.append(dd) + + if not per_sample: + continue + + # Mean delta vector alignment + mean_dv = np.mean(delta_verts, axis=0) + mean_dd = np.mean(delta_dists, axis=0) + norm_mv = np.linalg.norm(mean_dv) + norm_md = np.linalg.norm(mean_dd) + mean_alignment = float(np.dot(mean_dv, mean_dd) / (norm_mv * norm_md + 1e-10)) + + # Permutation control: shuffle delta_dist and recompute + rng = np.random.RandomState(42) + n_perm = 100 + perm_alignments = [] + for _ in range(n_perm): + shuffled_dd = [delta_dists[i] for i in rng.permutation(len(delta_dists))] + perm_cos = [] + for dv, dd in zip(delta_verts, shuffled_dd): + norm_v = np.linalg.norm(dv) + norm_d = np.linalg.norm(dd) + if norm_v > 1e-10 and norm_d > 1e-10: + perm_cos.append(np.dot(dv, dd) / (norm_v * norm_d)) + perm_alignments.append(np.mean(perm_cos)) + + results[layer] = { + 'per_sample_mean': float(np.mean(per_sample)), + 'per_sample_std': float(np.std(per_sample)), + 'mean_delta_alignment': mean_alignment, + 'permutation_mean': float(np.mean(perm_alignments)), + 'permutation_std': float(np.std(perm_alignments)), + 'n_samples': len(per_sample), + } + return results + + +def compute_prediction_stats(records: List[dict], scale: str) -> dict: + """Compute accuracy stats from swap pair records.""" + stats = {'scale': scale} + total_correct_orig, total_correct_swap, total_both, total_n = 0, 0, 0, 0 + + for group in GROUP_ORDER: + group_recs = [r for r in records if r['group'] == group] + n = len(group_recs) + c_orig = sum(1 for r in group_recs if r['is_correct_orig']) + c_swap = sum(1 for r in group_recs if r['is_correct_swap']) + c_both = sum(1 for r in group_recs if r['is_correct_orig'] and r['is_correct_swap']) + + stats[f'{group}_n'] = n + stats[f'{group}_acc_orig'] = c_orig / n if n > 0 else 0 + stats[f'{group}_acc_swap'] = c_swap / n if n > 0 else 0 + stats[f'{group}_acc_both'] = c_both / n if n > 0 else 0 + + total_correct_orig += c_orig + total_correct_swap += c_swap + total_both += c_both + total_n += n + + stats['overall_acc_orig'] = total_correct_orig / total_n if total_n > 0 else 0 + stats['overall_acc_swap'] = total_correct_swap / total_n if total_n > 0 else 0 + stats['overall_acc_both'] = total_both / total_n if total_n > 0 else 0 + stats['overall_n'] = total_n + return stats + + +# ============================================================================ +# Saving & Loading Intermediate Results +# ============================================================================ + +def save_scale_results( + scale: str, + swap_records: List[dict], + quad_records: List[dict], + delta_consistency: dict, + cross_alignment: dict, + pred_stats: dict, + target_layers: List[int], + output_dir: str, +): + """Save all per-scale results to disk.""" + # 1. Predictions CSV + pred_rows = [] + for r in swap_records: + pred_rows.append({ + 'index': r['index'], 'group': r['group'], 'category': r['category'], + 'pred_orig': r['pred_orig'], 'pred_swap': r['pred_swap'], + 'is_correct_orig': r['is_correct_orig'], 'is_correct_swap': r['is_correct_swap'], + }) + pd.DataFrame(pred_rows).to_csv( + os.path.join(output_dir, f'predictions_{scale}.csv'), index=False + ) + + # 2. Delta consistency JSON + consistency_data = {} + for (group, layer), vals in delta_consistency.items(): + consistency_data[f'{group}_L{layer}'] = vals + with open(os.path.join(output_dir, f'delta_consistency_{scale}.json'), 'w') as f: + json.dump(consistency_data, f, indent=2) + + # 3. Cross-group alignment JSON + alignment_data = {} + for layer, vals in cross_alignment.items(): + alignment_data[f'L{layer}'] = vals + with open(os.path.join(output_dir, f'cross_alignment_{scale}.json'), 'w') as f: + json.dump(alignment_data, f, indent=2) + + # 4. Prediction stats JSON + with open(os.path.join(output_dir, f'pred_stats_{scale}.json'), 'w') as f: + json.dump(pred_stats, f, indent=2) + + # 5. Delta vectors for representative layers (for PCA) — NPZ + rep_layers = get_representative_layers(target_layers, n=5) + delta_data = {} + for layer in rep_layers: + groups_list, categories_list, vectors = [], [], [] + for r in swap_records: + if layer in r['delta']: + groups_list.append(r['group']) + categories_list.append(r['category']) + vectors.append(r['delta'][layer]) + if vectors: + delta_data[f'delta_L{layer}'] = np.array(vectors) + delta_data[f'groups_L{layer}'] = np.array(groups_list) + delta_data[f'categories_L{layer}'] = np.array(categories_list) + + # Also save original/swapped embeddings for PCA + orig_vecs, swap_vecs, labels = [], [], [] + for r in swap_records: + if layer in r['hs_orig'] and layer in r['hs_swap']: + orig_vecs.append(r['hs_orig'][layer]) + swap_vecs.append(r['hs_swap'][layer]) + labels.append(r['category']) + if orig_vecs: + delta_data[f'orig_L{layer}'] = np.array(orig_vecs) + delta_data[f'swap_L{layer}'] = np.array(swap_vecs) + delta_data[f'labels_L{layer}'] = np.array(labels) + + np.savez_compressed(os.path.join(output_dir, f'vectors_{scale}.npz'), **delta_data) + + # 6. Cross-group delta vectors for representative layers + if quad_records: + cg_data = {} + for layer in rep_layers: + dverts, ddists = [], [] + for rec in quad_records: + if layer in rec['delta_vert'] and layer in rec['delta_dist']: + dverts.append(rec['delta_vert'][layer]) + ddists.append(rec['delta_dist'][layer]) + if dverts: + cg_data[f'delta_vert_L{layer}'] = np.array(dverts) + cg_data[f'delta_dist_L{layer}'] = np.array(ddists) + np.savez_compressed(os.path.join(output_dir, f'cross_group_vectors_{scale}.npz'), **cg_data) + + logger.info(f"Saved results for scale={scale} to {output_dir}") + + +def load_scale_consistency(output_dir: str, scale: str) -> dict: + path = os.path.join(output_dir, f'delta_consistency_{scale}.json') + if not os.path.exists(path): + return {} + with open(path) as f: + raw = json.load(f) + result = {} + for key, vals in raw.items(): + # Parse "horizontal_L5" -> ('horizontal', 5) + parts = key.rsplit('_L', 1) + if len(parts) == 2: + group, layer = parts[0], int(parts[1]) + result[(group, layer)] = vals + return result + + +def load_scale_alignment(output_dir: str, scale: str) -> dict: + path = os.path.join(output_dir, f'cross_alignment_{scale}.json') + if not os.path.exists(path): + return {} + with open(path) as f: + raw = json.load(f) + result = {} + for key, vals in raw.items(): + layer = int(key.replace('L', '')) + result[layer] = vals + return result + + +# ============================================================================ +# Visualization +# ============================================================================ + +def get_representative_layers(all_layers: List[int], n: int = 5) -> List[int]: + if len(all_layers) <= n: + return list(all_layers) + indices = np.linspace(0, len(all_layers) - 1, n, dtype=int) + return [all_layers[i] for i in indices] + + +def plot_delta_consistency_trajectory( + delta_consistency: dict, + scale: str, + model_type: str, + save_path: str, +): + """Plot Δ consistency (mean pairwise cosine of Δ vectors) across layers, per group.""" + fig, ax = plt.subplots(figsize=(12, 6)) + colors = {'horizontal': '#2ca02c', 'vertical': '#ff7f0e', 'distance': '#9467bd'} + + for group in GROUP_ORDER: + layers, vals = [], [] + for (g, l), v in sorted(delta_consistency.items(), key=lambda x: x[0][1]): + if g == group: + layers.append(l) + vals.append(v['mean']) + if layers: + ax.plot(layers, vals, '-o', color=colors[group], label=group, linewidth=2, markersize=3) + + ax.set_xlabel('Layer Index', fontsize=12) + ax.set_ylabel('Δ Consistency (mean pairwise cosine)', fontsize=12) + ax.set_title(f'{model_type.upper()} ({scale}) - Within-Group Δ Vector Consistency', fontsize=14, fontweight='bold') + ax.legend(fontsize=11) + ax.grid(True, alpha=0.3) + plt.tight_layout() + plt.savefig(save_path, dpi=300, bbox_inches='tight') + plt.close() + logger.info(f"Saved: {save_path}") + + +def plot_cross_group_alignment_trajectory( + cross_alignment: dict, + scale: str, + model_type: str, + save_path: str, +): + """Plot cos(Δ_vert, Δ_dist) across layers, with permutation baseline.""" + fig, ax = plt.subplots(figsize=(12, 6)) + + layers = sorted(cross_alignment.keys()) + actual = [cross_alignment[l]['per_sample_mean'] for l in layers] + mean_delta = [cross_alignment[l]['mean_delta_alignment'] for l in layers] + perm_mean = [cross_alignment[l]['permutation_mean'] for l in layers] + perm_std = [cross_alignment[l]['permutation_std'] for l in layers] + + ax.plot(layers, actual, '-o', color='#d62728', label='cos(Δ_vert, Δ_dist) per-sample mean', + linewidth=2.5, markersize=3) + ax.plot(layers, mean_delta, '--s', color='#e377c2', label='cos(mean_Δ_vert, mean_Δ_dist)', + linewidth=1.5, markersize=3) + ax.plot(layers, perm_mean, ':', color='gray', label='permutation control', linewidth=1.5) + ax.fill_between(layers, + [m - 2*s for m, s in zip(perm_mean, perm_std)], + [m + 2*s for m, s in zip(perm_mean, perm_std)], + alpha=0.2, color='gray') + + ax.set_xlabel('Layer Index', fontsize=12) + ax.set_ylabel('Cosine Alignment', fontsize=12) + ax.set_title(f'{model_type.upper()} ({scale}) - Cross-Group Δ Alignment (Perspective Bias)', + fontsize=14, fontweight='bold') + ax.legend(fontsize=9) + ax.grid(True, alpha=0.3) + plt.tight_layout() + plt.savefig(save_path, dpi=300, bbox_inches='tight') + plt.close() + logger.info(f"Saved: {save_path}") + + +def plot_pca_embeddings( + vectors_npz_path: str, + scale: str, + model_type: str, + save_dir: str, +): + """PCA visualization for representative layers.""" + data = np.load(vectors_npz_path, allow_pickle=True) + + # Find available layers + layer_keys = [k for k in data.files if k.startswith('orig_L')] + layers = sorted([int(k.replace('orig_L', '')) for k in layer_keys]) + + for layer in layers: + orig = data.get(f'orig_L{layer}') + swap = data.get(f'swap_L{layer}') + labels = data.get(f'labels_L{layer}') + deltas = data.get(f'delta_L{layer}') + groups = data.get(f'groups_L{layer}') + cats = data.get(f'categories_L{layer}') + + if orig is None or swap is None: + continue + + # ---- Plot 1: Original embeddings colored by category ---- + fig, axes = plt.subplots(1, 3, figsize=(24, 7)) + + pca = PCA(n_components=2) + all_vecs = np.vstack([orig, swap]) + all_pca = pca.fit_transform(all_vecs) + orig_pca = all_pca[:len(orig)] + swap_pca = all_pca[len(orig):] + + cat_colors = { + 'left': '#1f77b4', 'right': '#aec7e8', + 'above': '#ff7f0e', 'under': '#ffbb78', + 'far': '#2ca02c', 'close': '#98df8a', + } + + ax = axes[0] + for cat in CATEGORY_ORDER: + mask = np.array([str(l) == cat for l in labels]) + if mask.any(): + ax.scatter(orig_pca[mask, 0], orig_pca[mask, 1], + c=cat_colors.get(cat, 'gray'), label=f'{cat} (orig)', + alpha=0.5, s=15, marker='o') + ax.scatter(swap_pca[mask, 0], swap_pca[mask, 1], + c=cat_colors.get(cat, 'gray'), + alpha=0.5, s=15, marker='x') + ax.set_title('Embeddings by Category\n(o=original, x=swapped)', fontsize=11) + ax.legend(fontsize=7, ncol=2, loc='best') + ax.grid(True, alpha=0.2) + + # ---- Plot 2: Δ vectors colored by group ---- + ax = axes[1] + if deltas is not None and groups is not None: + pca_d = PCA(n_components=2) + delta_pca = pca_d.fit_transform(deltas) + group_colors = {'horizontal': '#2ca02c', 'vertical': '#ff7f0e', 'distance': '#9467bd'} + for group in GROUP_ORDER: + mask = np.array([str(g) == group for g in groups]) + if mask.any(): + ax.scatter(delta_pca[mask, 0], delta_pca[mask, 1], + c=group_colors.get(group, 'gray'), label=group, + alpha=0.5, s=15) + ax.set_title('Δ Vectors by Group', fontsize=11) + ax.legend(fontsize=9) + ax.grid(True, alpha=0.2) + + # ---- Plot 3: Δ vectors colored by specific category ---- + ax = axes[2] + if deltas is not None and cats is not None: + # Reuse delta_pca from above + for cat in CATEGORY_ORDER: + mask = np.array([str(c) == cat for c in cats]) + if mask.any(): + ax.scatter(delta_pca[mask, 0], delta_pca[mask, 1], + c=cat_colors.get(cat, 'gray'), label=cat, + alpha=0.5, s=15) + ax.set_title('Δ Vectors by Category', fontsize=11) + ax.legend(fontsize=8, ncol=2) + ax.grid(True, alpha=0.2) + + fig.suptitle(f'{model_type.upper()} ({scale}) - Layer {layer} - PCA', + fontsize=14, fontweight='bold') + plt.tight_layout() + plt.savefig(os.path.join(save_dir, f'pca_{scale}_L{layer}.png'), dpi=200, bbox_inches='tight') + plt.close() + + logger.info(f"Saved PCA plots to {save_dir}") + + +def plot_cross_scale_consistency( + all_consistency: Dict[str, dict], + model_type: str, + save_path: str, +): + """Compare Δ consistency across scales for each group.""" + fig, axes = plt.subplots(1, 3, figsize=(21, 6)) + scale_order = ['vanilla', '80k', '400k', '800k', '2m', 'roborefer'] + + for idx, group in enumerate(GROUP_ORDER): + ax = axes[idx] + for scale in scale_order: + if scale not in all_consistency: + continue + consistency = all_consistency[scale] + layers, vals = [], [] + for (g, l), v in sorted(consistency.items(), key=lambda x: x[0][1]): + if g == group: + layers.append(l) + vals.append(v['mean']) + if layers: + color = SCALE_COLORS.get(scale, 'gray') + ax.plot(layers, vals, '-', color=color, label=scale, linewidth=2) + + ax.set_xlabel('Layer Index', fontsize=11) + ax.set_ylabel('Δ Consistency', fontsize=11) + ax.set_title(f'{group}', fontsize=13, fontweight='bold') + ax.legend(fontsize=9) + ax.grid(True, alpha=0.3) + + fig.suptitle(f'{model_type.upper()} - Δ Consistency Across Scales', + fontsize=15, fontweight='bold', y=1.02) + plt.tight_layout() + plt.savefig(save_path, dpi=300, bbox_inches='tight') + plt.close() + logger.info(f"Saved: {save_path}") + + +def plot_cross_scale_alignment( + all_alignment: Dict[str, dict], + model_type: str, + save_path: str, +): + """Compare cross-group alignment across scales.""" + fig, ax = plt.subplots(figsize=(12, 6)) + scale_order = ['vanilla', '80k', '400k', '800k', '2m', 'roborefer'] + + for scale in scale_order: + if scale not in all_alignment: + continue + alignment = all_alignment[scale] + layers = sorted(alignment.keys()) + vals = [alignment[l]['per_sample_mean'] for l in layers] + color = SCALE_COLORS.get(scale, 'gray') + ax.plot(layers, vals, '-', color=color, label=scale, linewidth=2) + + ax.set_xlabel('Layer Index', fontsize=12) + ax.set_ylabel('cos(Δ_vert, Δ_dist)', fontsize=12) + ax.set_title(f'{model_type.upper()} - Cross-Group Alignment Across Scales\n' + f'(High=entangled, Low=disentangled)', + fontsize=14, fontweight='bold') + ax.legend(fontsize=10) + ax.grid(True, alpha=0.3) + plt.tight_layout() + plt.savefig(save_path, dpi=300, bbox_inches='tight') + plt.close() + logger.info(f"Saved: {save_path}") + + +def plot_summary_barplot( + all_consistency: Dict[str, dict], + all_alignment: Dict[str, dict], + model_type: str, + save_path: str, +): + """Summary bar plot: for the deepest layer, show Δ consistency per group + alignment.""" + scale_order = ['vanilla', '80k', '400k', '800k', '2m', 'roborefer'] + available_scales = [s for s in scale_order if s in all_consistency] + + if not available_scales: + return + + # Find deepest layer + sample_cons = all_consistency[available_scales[0]] + max_layer = max(l for (_, l) in sample_cons.keys()) + + fig, axes = plt.subplots(1, 2, figsize=(16, 6)) + + # Left: Δ consistency bar chart + ax = axes[0] + x = np.arange(len(GROUP_ORDER)) + width = 0.8 / len(available_scales) + for i, scale in enumerate(available_scales): + cons = all_consistency[scale] + vals = [cons.get((g, max_layer), {}).get('mean', 0) for g in GROUP_ORDER] + offset = (i - len(available_scales) / 2 + 0.5) * width + color = SCALE_COLORS.get(scale, 'gray') + ax.bar(x + offset, vals, width, label=scale, color=color) + ax.set_xticks(x) + ax.set_xticklabels(GROUP_ORDER) + ax.set_ylabel('Δ Consistency') + ax.set_title(f'Δ Consistency at Layer {max_layer}', fontweight='bold') + ax.legend(fontsize=8) + ax.grid(True, alpha=0.3, axis='y') + + # Right: Cross-group alignment bar chart + ax = axes[1] + available_align_scales = [s for s in available_scales if s in all_alignment] + if available_align_scales: + vals = [] + colors_list = [] + for scale in available_align_scales: + alignment = all_alignment[scale] + val = alignment.get(max_layer, {}).get('per_sample_mean', 0) + vals.append(val) + colors_list.append(SCALE_COLORS.get(scale, 'gray')) + + ax.bar(range(len(vals)), vals, color=colors_list) + ax.set_xticks(range(len(vals))) + ax.set_xticklabels(available_align_scales, fontsize=10) + ax.set_ylabel('cos(Δ_vert, Δ_dist)') + ax.set_title(f'Cross-Group Alignment at Layer {max_layer}\n(Lower=more disentangled)', + fontweight='bold') + ax.grid(True, alpha=0.3, axis='y') + + fig.suptitle(f'{model_type.upper()} - Summary at Deepest Layer', + fontsize=15, fontweight='bold', y=1.02) + plt.tight_layout() + plt.savefig(save_path, dpi=300, bbox_inches='tight') + plt.close() + logger.info(f"Saved: {save_path}") + + +# ============================================================================ +# Main Pipeline +# ============================================================================ + +def process_scale(args, scale: str, swap_pairs: List[dict], quads: List[dict]): + """Process a single scale: extract features, analyze, save.""" + model_configs = MODEL_CONFIGS[args.model_type] + model_path = model_configs[scale] + + logger.info(f"\n{'='*60}") + logger.info(f"Processing {args.model_type} - {scale}") + logger.info(f"Model path: {model_path}") + logger.info(f"{'='*60}") + + extractor = get_extractor(args.model_type, model_path, scale=scale, device=args.device) + target_layers = extractor.target_layers + + # Phase A: Extract swap pair features + logger.info("\n--- Phase A: Extracting swap pair features ---") + swap_records = extract_swap_features(extractor, swap_pairs, + max_samples_per_category=args.max_samples_per_category) + + # Phase B: Extract cross-group features + logger.info("\n--- Phase B: Extracting cross-group features ---") + quad_records = extract_cross_group_features(extractor, quads) if quads else [] + + # Phase C: Analyze + logger.info("\n--- Phase C: Analysis ---") + delta_consistency = compute_delta_consistency(swap_records, target_layers) + cross_alignment = compute_cross_group_alignment(quad_records, target_layers) + pred_stats = compute_prediction_stats(swap_records, scale) + + # Log key results + max_layer = max(target_layers) + for group in GROUP_ORDER: + key = (group, max_layer) + if key in delta_consistency: + logger.info(f" Δ consistency [{group}, L{max_layer}]: " + f"{delta_consistency[key]['mean']:.4f} ± {delta_consistency[key]['std']:.4f}") + if max_layer in cross_alignment: + ca = cross_alignment[max_layer] + logger.info(f" Cross-group alignment L{max_layer}: " + f"{ca['per_sample_mean']:.4f} (perm={ca['permutation_mean']:.4f})") + + logger.info(f" Accuracy orig={pred_stats['overall_acc_orig']:.1%}, " + f"swap={pred_stats['overall_acc_swap']:.1%}, " + f"both={pred_stats['overall_acc_both']:.1%}") + + # Phase D: Save results + logger.info("\n--- Phase D: Saving results ---") + output_dir = os.path.join(args.output_dir, args.model_type) + os.makedirs(output_dir, exist_ok=True) + + save_scale_results( + scale, swap_records, quad_records, delta_consistency, + cross_alignment, pred_stats, target_layers, output_dir, + ) + + # Phase E: Per-scale plots + logger.info("\n--- Phase E: Per-scale plots ---") + plots_dir = os.path.join(output_dir, 'plots') + os.makedirs(plots_dir, exist_ok=True) + + plot_delta_consistency_trajectory( + delta_consistency, scale, args.model_type, + os.path.join(plots_dir, f'delta_consistency_{scale}.png') + ) + + if cross_alignment: + plot_cross_group_alignment_trajectory( + cross_alignment, scale, args.model_type, + os.path.join(plots_dir, f'cross_alignment_{scale}.png') + ) + + npz_path = os.path.join(output_dir, f'vectors_{scale}.npz') + if os.path.exists(npz_path): + pca_dir = os.path.join(plots_dir, 'pca') + os.makedirs(pca_dir, exist_ok=True) + plot_pca_embeddings(npz_path, scale, args.model_type, pca_dir) + + # Cleanup + del swap_records, quad_records + extractor.cleanup() + + logger.info(f"\n Scale {scale} complete.") + + +def run_merge(args): + """Merge mode: load per-scale results, generate cross-scale comparisons.""" + output_dir = os.path.join(args.output_dir, args.model_type) + plots_dir = os.path.join(output_dir, 'plots') + os.makedirs(plots_dir, exist_ok=True) + + scale_order = ['vanilla', '80k', '400k', '800k', '2m', 'roborefer'] + available_scales = [s for s in scale_order if s in args.scales] + + all_consistency = {} + all_alignment = {} + + for scale in available_scales: + cons = load_scale_consistency(output_dir, scale) + if cons: + all_consistency[scale] = cons + logger.info(f"Loaded consistency for {scale}: {len(cons)} entries") + + align = load_scale_alignment(output_dir, scale) + if align: + all_alignment[scale] = align + logger.info(f"Loaded alignment for {scale}: {len(align)} entries") + + # Cross-scale plots + if len(all_consistency) > 1: + plot_cross_scale_consistency( + all_consistency, args.model_type, + os.path.join(plots_dir, 'cross_scale_consistency.png') + ) + + if len(all_alignment) > 1: + plot_cross_scale_alignment( + all_alignment, args.model_type, + os.path.join(plots_dir, 'cross_scale_alignment.png') + ) + + if all_consistency: + plot_summary_barplot( + all_consistency, all_alignment, args.model_type, + os.path.join(plots_dir, 'summary_barplot.png') + ) + + # Summary CSV + summary_rows = [] + for scale in available_scales: + pred_path = os.path.join(output_dir, f'pred_stats_{scale}.json') + if os.path.exists(pred_path): + with open(pred_path) as f: + row = json.load(f) + # Add alignment at deepest layer + if scale in all_alignment: + max_layer = max(all_alignment[scale].keys()) + row['alignment_deepest'] = all_alignment[scale][max_layer]['per_sample_mean'] + row['alignment_perm'] = all_alignment[scale][max_layer]['permutation_mean'] + summary_rows.append(row) + + if summary_rows: + pd.DataFrame(summary_rows).to_csv( + os.path.join(output_dir, 'summary.csv'), index=False + ) + logger.info(f"Saved summary CSV") + + logger.info(f"\n=== Merge Complete ===\nResults in: {output_dir}") + + +def main(): + parser = argparse.ArgumentParser(description='Exp 2-A Swap Analysis') + parser.add_argument('--data_path', type=str, + default='/data/shared/Qwen/EmbSpatial-Bench/EmbSpatial-Bench.tsv') + parser.add_argument('--model_type', type=str, required=True, + choices=['molmo', 'nvila', 'qwen']) + parser.add_argument('--scales', type=str, nargs='+', + default=['vanilla', '80k', '400k', '800k', '2m']) + parser.add_argument('--output_dir', type=str, + default='/data/shared/Qwen/experiments/exp2a_swap_analysis/results') + parser.add_argument('--device', type=str, default='cuda') + parser.add_argument('--seed', type=int, default=42) + parser.add_argument('--merge', action='store_true', + help='Merge mode: read per-scale results and generate cross-scale plots.') + parser.add_argument('--no-auto-roborefer', action='store_true', dest='no_auto_roborefer') + parser.add_argument('--skip-cross-group', action='store_true', + help='Skip cross-group analysis (faster, no HF dataset needed).') + parser.add_argument('--max-samples-per-category', type=int, default=200, + help='Limit samples per category (default=200). Set 0 for no limit.') + + args = parser.parse_args() + + if args.model_type == 'nvila' and 'roborefer' not in args.scales and not args.no_auto_roborefer: + args.scales.append('roborefer') + + np.random.seed(args.seed) + torch.manual_seed(args.seed) + random.seed(args.seed) + + # Merge mode + if args.merge: + logger.info("\n=== MERGE MODE ===") + run_merge(args) + return + + # Normal mode + logger.info("\n=== Loading & Creating Swap Pairs ===") + swap_pairs = load_swap_pairs(args.data_path, args.seed) + + # Cross-group quads + quads = [] + if not args.skip_cross_group: + try: + hf_cache = build_hf_bbox_cache() + quads = create_cross_group_quads(swap_pairs, hf_cache) + except Exception as e: + logger.warning(f"Cross-group setup failed: {e}. Skipping cross-group analysis.") + quads = [] + + model_configs = MODEL_CONFIGS[args.model_type] + + for scale in args.scales: + if scale not in model_configs: + logger.warning(f"Scale {scale} not in config for {args.model_type}, skipping...") + continue + + model_path = model_configs[scale] + if not os.path.exists(model_path) and not model_path.startswith(('Qwen/', 'allenai/')): + logger.warning(f"Model path not found: {model_path}, skipping...") + continue + + try: + process_scale(args, scale, swap_pairs, quads) + except Exception as e: + logger.error(f"Failed {args.model_type} - {scale}: {e}") + import traceback + traceback.print_exc() + continue + + logger.info(f"\n{'='*60}") + logger.info("=== All scales complete ===") + logger.info(f"Results: {os.path.join(args.output_dir, args.model_type)}") + logger.info(f"{'='*60}") + + +if __name__ == '__main__': + main() \ No newline at end of file diff --git a/exp2a_swap_analysis/run_swap_molmo.sh b/exp2a_swap_analysis/run_swap_molmo.sh new file mode 100644 index 0000000000000000000000000000000000000000..35ee16ab16c363dea3c514d8a50e8314f1f57a5f --- /dev/null +++ b/exp2a_swap_analysis/run_swap_molmo.sh @@ -0,0 +1,62 @@ +#!/bin/bash +set -e + +SCRIPT="/data/shared/Qwen/experiments/exp2a_swap_analysis/exp2a_swap_analysis.py" +PYTHON="conda run --no-capture-output -n molmo python" +MODEL="molmo" +LOG_DIR="/data/shared/Qwen/experiments/exp2a_swap_analysis/logs/${MODEL}" +mkdir -p "$LOG_DIR" + +SCALES=("vanilla" "80k" "400k" "800k" "2m") +GPUS=(7 6 5 1 0) + +echo "=========================================" +echo " Molmo Swap Analysis: Launching ${#SCALES[@]} scales in parallel" +echo "=========================================" + +PIDS=() +for i in "${!SCALES[@]}"; do + scale="${SCALES[$i]}" + gpu="${GPUS[$i]}" + log="${LOG_DIR}/${scale}.log" + + echo "[GPU $gpu] $scale -> $log" + CUDA_VISIBLE_DEVICES=$gpu $PYTHON $SCRIPT \ + --model_type $MODEL \ + --scales $scale \ + --device cuda \ + --no-auto-roborefer \ + > "$log" 2>&1 & + PIDS+=($!) +done + +echo "" +echo "Waiting for all ${#PIDS[@]} processes..." +echo "PIDs: ${PIDS[*]}" +echo "" + +FAILED=0 +for i in "${!PIDS[@]}"; do + pid="${PIDS[$i]}" + scale="${SCALES[$i]}" + if wait $pid; then + echo "[DONE] $scale (PID $pid) - SUCCESS" + else + echo "[FAIL] $scale (PID $pid) - EXIT CODE $?" + FAILED=$((FAILED + 1)) + fi +done + +echo "" +if [ $FAILED -gt 0 ]; then + echo "WARNING: $FAILED scale(s) failed. Check logs in $LOG_DIR" +fi + +echo "=========================================" +echo " Molmo Swap Analysis: Running merge" +echo "=========================================" +$PYTHON $SCRIPT --model_type $MODEL --merge 2>&1 | tee "${LOG_DIR}/merge.log" + +echo "" +echo "ALL DONE: $MODEL" +echo "Results: /data/shared/Qwen/experiments/exp2a_swap_analysis/results/${MODEL}/" \ No newline at end of file diff --git a/exp2a_swap_analysis/run_swap_nvila.sh b/exp2a_swap_analysis/run_swap_nvila.sh new file mode 100644 index 0000000000000000000000000000000000000000..737048e3c79d8ed31ca0e512e177baf2ad3f350f --- /dev/null +++ b/exp2a_swap_analysis/run_swap_nvila.sh @@ -0,0 +1,63 @@ +#!/bin/bash +set -e + +SCRIPT="/data/shared/Qwen/experiments/exp2a_swap_analysis/exp2a_swap_analysis.py" +PYTHON="conda run --no-capture-output -n vila python" +MODEL="nvila" +LOG_DIR="/data/shared/Qwen/experiments/exp2a_swap_analysis/logs/${MODEL}" +mkdir -p "$LOG_DIR" + +# NVILA has 6 scales (including roborefer) +SCALES=("vanilla" "80k" "400k" "800k" "2m" "roborefer") +GPUS=(7 6 5 4 3 2) + +echo "=========================================" +echo " NVILA Swap Analysis: Launching ${#SCALES[@]} scales in parallel" +echo "=========================================" + +PIDS=() +for i in "${!SCALES[@]}"; do + scale="${SCALES[$i]}" + gpu="${GPUS[$i]}" + log="${LOG_DIR}/${scale}.log" + + echo "[GPU $gpu] $scale -> $log" + CUDA_VISIBLE_DEVICES=$gpu $PYTHON $SCRIPT \ + --model_type $MODEL \ + --scales $scale \ + --device cuda \ + --no-auto-roborefer \ + > "$log" 2>&1 & + PIDS+=($!) +done + +echo "" +echo "Waiting for all ${#PIDS[@]} processes..." +echo "PIDs: ${PIDS[*]}" +echo "" + +FAILED=0 +for i in "${!PIDS[@]}"; do + pid="${PIDS[$i]}" + scale="${SCALES[$i]}" + if wait $pid; then + echo "[DONE] $scale (PID $pid) - SUCCESS" + else + echo "[FAIL] $scale (PID $pid) - EXIT CODE $?" + FAILED=$((FAILED + 1)) + fi +done + +echo "" +if [ $FAILED -gt 0 ]; then + echo "WARNING: $FAILED scale(s) failed. Check logs in $LOG_DIR" +fi + +echo "=========================================" +echo " NVILA Swap Analysis: Running merge" +echo "=========================================" +$PYTHON $SCRIPT --model_type $MODEL --merge 2>&1 | tee "${LOG_DIR}/merge.log" + +echo "" +echo "ALL DONE: $MODEL" +echo "Results: /data/shared/Qwen/experiments/exp2a_swap_analysis/results/${MODEL}/" \ No newline at end of file diff --git a/exp2a_swap_analysis/run_swap_qwen.sh b/exp2a_swap_analysis/run_swap_qwen.sh new file mode 100644 index 0000000000000000000000000000000000000000..5b9a0f2171f849be225c3530bae57974a0fcc51d --- /dev/null +++ b/exp2a_swap_analysis/run_swap_qwen.sh @@ -0,0 +1,62 @@ +#!/bin/bash +set -e + +SCRIPT="/data/shared/Qwen/experiments/exp2a_swap_analysis/exp2a_swap_analysis.py" +PYTHON="/usr/bin/python3" +MODEL="qwen" +LOG_DIR="/data/shared/Qwen/experiments/exp2a_swap_analysis/logs/${MODEL}" +mkdir -p "$LOG_DIR" + +SCALES=("vanilla" "80k" "400k" "800k" "2m") +GPUS=(7 6 5 4 3) + +echo "=========================================" +echo " Qwen Swap Analysis: Launching ${#SCALES[@]} scales in parallel" +echo "=========================================" + +PIDS=() +for i in "${!SCALES[@]}"; do + scale="${SCALES[$i]}" + gpu="${GPUS[$i]}" + log="${LOG_DIR}/${scale}.log" + + echo "[GPU $gpu] $scale -> $log" + CUDA_VISIBLE_DEVICES=$gpu $PYTHON $SCRIPT \ + --model_type $MODEL \ + --scales $scale \ + --device cuda \ + --no-auto-roborefer \ + > "$log" 2>&1 & + PIDS+=($!) +done + +echo "" +echo "Waiting for all ${#PIDS[@]} processes..." +echo "PIDs: ${PIDS[*]}" +echo "" + +FAILED=0 +for i in "${!PIDS[@]}"; do + pid="${PIDS[$i]}" + scale="${SCALES[$i]}" + if wait $pid; then + echo "[DONE] $scale (PID $pid) - SUCCESS" + else + echo "[FAIL] $scale (PID $pid) - EXIT CODE $?" + FAILED=$((FAILED + 1)) + fi +done + +echo "" +if [ $FAILED -gt 0 ]; then + echo "WARNING: $FAILED scale(s) failed. Check logs in $LOG_DIR" +fi + +echo "=========================================" +echo " Qwen Swap Analysis: Running merge" +echo "=========================================" +$PYTHON $SCRIPT --model_type $MODEL --merge 2>&1 | tee "${LOG_DIR}/merge.log" + +echo "" +echo "ALL DONE: $MODEL" +echo "Results: /data/shared/Qwen/experiments/exp2a_swap_analysis/results/${MODEL}/" \ No newline at end of file diff --git a/spatial-representation-probe/README-0226.md b/spatial-representation-probe/README-0226.md new file mode 100644 index 0000000000000000000000000000000000000000..4da17e872c8682a2ca9abc08b276d4d13c7a5248 --- /dev/null +++ b/spatial-representation-probe/README-0226.md @@ -0,0 +1,103 @@ +# Swap Analysis: Minimal Pair Probing for Spatial Representations + +This repository contains `swap_analysis.py`, a comprehensive pipeline for evaluating and visualizing how Vision-Language Models (VLMs) represent spatial relationships. + +The script works by creating **minimal pairs** from spatial questions. It takes an original query and swaps the target and reference objects, measuring how the model's hidden states and predictions change in response. + +**Example Minimal Pair:** +* *Original:* "Is A to the left or right of B?" ➔ Expected: `left` +* *Swapped:* "Is B to the left or right of A?" ➔ Expected: `right` + +## 🌟 Key Features & Analyses + +The script runs inference, extracts hidden states across model layers, and performs the following analyses: +1. **Difference Vectors (Deltas):** Computes $\Delta = \text{feature(swapped)} - \text{feature(original)}$. +2. **Within-Category Delta Consistency:** Measures if all swaps of a specific category (e.g., left $\rightarrow$ right) point in the same direction in the latent space. +3. **Sign-Corrected Group Consistency:** Aligns opposite categories by flipping their vectors to check global axis consistency. +4. **Cross-Group Delta Alignment:** Compares orthogonal dimensions (e.g., $\Delta_{vertical}$ vs. $\Delta_{distance}$) to detect perspective bias. +5. **Similarity Heatmaps:** Generates $6 \times 6$ cross-category cosine similarity matrices based on mean deltas. +6. **Prediction Statistics:** Tracks and visualizes original, swapped, and "both-correct" accuracy trajectories across different data scales. +7. **PCA Visualizations:** Plots 2D and 3D PCA projections of per-sample embeddings and delta vectors. +8. **Robust Filtering:** Isolates analyses to "both-correct" samples to ensure representations are tied to successful spatial understanding. + +## 🤖 Supported Models + +The script natively supports hidden-state extraction for multiple model architectures, segmented into legacy base models, new large models, and merge-only configurations (for cross-scale plotting). + +* **Legacy (Qwen2.5-VL-3B scale experiments):** + * `molmo` (Molmo-7B-O variants) + * `nvila` (NVILA-Lite-2B variants) + * `nvila_synthetic` (NVILA mixed-data variants) + * `qwen` (Qwen2.5-VL-3B variants) +* **New Large Models:** + * `molmo_big` (Molmo2-8B) + * `qwen_big` (Qwen3-VL-32B-Instruct) + * `qwen_super` (Qwen3-VL-235B-A22B-Instruct) + * `big_trio` (Molmo2-8B + RoboRefer + Qwen3-VL-32B) +* **Merge-Only (Requires `--merge`):** + * `molmo_all` (Combines `molmo` and `molmo_big` outputs) + * `qwen_all` (Combines `qwen` and `qwen_big` outputs) + * `nvila_synth_compare` (Compares NVILA baselines against synthetic-mix checkpoints) + +## 🚀 Usage + +### 1. Standard Inference +Extract features and generate single-scale analyses for a specific model family. + +```bash +# Legacy model evaluation +python swap_analysis.py --model_type qwen + +# New large model evaluation +python swap_analysis.py --model_type qwen_big + +``` + +### 2. Merge Mode (Cross-Scale Analysis) + +Once you have run inference on individual scales or models, use the `--merge` flag to aggregate the JSON/NPZ data and generate cross-scale trajectory plots. + +```bash +# Combine qwen base scales with qwen_big (Qwen3-32B) results +python swap_analysis.py --model_type qwen_all --merge + +``` + +### Command Line Arguments + +| Argument | Description | Default | +| --- | --- | --- | +| `--model_type` | **(Required)** The model architecture/family to run. | None | +| `--data_path` | Path to the `EmbSpatial-Bench.tsv` dataset. | `/data/.../EmbSpatial-Bench.tsv` | +| `--scales` | Specific scales to process (e.g., `vanilla`, `80k`). If omitted, runs all default scales for the model. | *Model-dependent* | +| `--output_dir` | Base directory for saving CSVs, JSONs, NPZs, and plots. | `/data/.../results` | +| `--merge` | Generates cross-scale/cross-model comparison plots from saved data instead of running inference. | `False` | +| `--question-type` | `mcq` for A/B letter answers or `short` for single-word generation. | `mcq` | +| `--max-samples-per-category` | Limit samples per spatial category for faster debugging/runs. | `200` | +| `--no-filtering` | Disables filtering of 'Unknown' reference objects in distance queries. | `False` | + +## 📂 Output Structure + +Results are saved in your specified `--output_dir` under a subfolder named after the `--model_type`. + +```text +results/{model_type}/ +├── csv/ # Delta heatmaps, prediction rows, and cross-scale summaries +├── json/ # Consistency metrics, alignments, and validity checks per scale +├── npz/ # Raw hidden states and delta vectors for offline analysis +└── plots/ # Visualizations + ├── all/ # Unfiltered analysis plots (PCA, bar charts, heatmaps) + ├── both_correct/ # Strict analysis plots (only pairs where model got both right) + └── accuracy/ # Grouped and per-category accuracy bar/line charts + +``` + +## 🛠 Prerequisites + +* `torch` +* `transformers` +* `pandas`, `numpy`, `scikit-learn` +* `matplotlib`, `seaborn`, `tqdm` +* `Pillow` +* *Model-specific libraries:* `qwen_vl_utils`, `llava`, `olmo` (depending on the models being tested). + diff --git a/spatial-representation-probe/README.md b/spatial-representation-probe/README.md new file mode 100644 index 0000000000000000000000000000000000000000..e17abada3e0d99d0de0f5a3f53c4a1fcd415b1e7 --- /dev/null +++ b/spatial-representation-probe/README.md @@ -0,0 +1,113 @@ +# Swap Analysis: Minimal Pair Probing for Spatial Representations + +This repository contains `swap_analysis.py`, a comprehensive pipeline for evaluating and visualizing how Vision-Language Models (VLMs) represent spatial relationships. + +The script creates **minimal pairs** from spatial questions by swapping the target and reference objects, measuring how the model's hidden states and predictions change in response. + +**Example Minimal Pair:** +* *Original:* "Is A to the left or right of B?" ➔ Expected: `left` +* *Swapped:* "Is B to the left or right of A?" ➔ Expected: `right` + +## 🌟 Key Features & Analyses + +The script runs inference, extracts hidden states across model layers, and performs the following analyses: +1. **Difference Vectors (Deltas):** Computes $\Delta = \text{feature(swapped)} - \text{feature(original)}$. +2. **Within-Category Delta Consistency:** Measures if all swaps of a specific category (e.g., left $\rightarrow$ right) point in the same direction in the latent space. +3. **Sign-Corrected Group Consistency:** Aligns opposite categories by flipping their vectors to check global axis consistency. +4. **Cross-Group Delta Alignment:** Compares orthogonal dimensions (e.g., $\Delta_{vertical}$ vs. $\Delta_{distance}$) to detect perspective bias. +5. **Similarity Heatmaps:** Generates $6 \times 6$ cross-category cosine similarity matrices based on mean deltas. +6. **Prediction Statistics:** Tracks and visualizes original, swapped, and "both-correct" accuracy trajectories across different data scales. +7. **PCA Visualizations:** Plots 2D and 3D PCA projections of per-sample embeddings and delta vectors. +8. **Robust Filtering:** Isolates analyses to "both-correct" samples to ensure representations are tied to successful spatial understanding. + +## 🤖 Supported Models + +The pipeline supports multiple model architectures, segmented into legacy base models, new large models, and merge-only configurations for cross-scale evaluations. + +* **Legacy (Qwen2.5-VL-3B scale experiments):** + * `molmo` (Molmo-7B-O variants) + * `nvila` (NVILA-Lite-2B variants, including `roborefer` and `roborefer_depth`) + * `nvila_synthetic` (NVILA mixed-data variants) + * `qwen` (Qwen2.5-VL-3B variants) +* **New Large Models:** + * `molmo_big` (Molmo2-8B) + * `qwen_big` (Qwen3-VL-32B-Instruct) + * `qwen_super` (Qwen3-VL-235B-A22B-Instruct) + * `big_trio` (Molmo2-8B + RoboRefer + Qwen3-VL-32B) +* **Merge-Only (Requires `--merge`):** + * `molmo_all` (Combines `molmo` and `molmo_big` outputs) + * `qwen_all` (Combines `qwen` and `qwen_big` outputs) + * `nvila_synth_compare` (Compares NVILA baselines against synthetic-mix checkpoints) + +## 🚀 Usage + +### 1. Standard Inference +Extract features and generate single-scale analyses. Outputs will be saved in `{question_type}/saved_data/{model_type}_{scale}/`. + +```bash +# Evaluate standard legacy models +python swap_analysis.py --model_type qwen --scales vanilla 80k + +# Evaluate specific modalities (e.g., RoboRefer with depth) +python swap_analysis.py --model_type nvila --scales roborefer_depth + +``` + +### 2. Merge Mode (Cross-Scale Analysis) + +Aggregate JSON/NPZ data from previously run individual scales to generate cross-scale trajectory plots and summaries. + +```bash +# Combine qwen base scales with qwen_big (Qwen3-32B) results into a specific compare group +python swap_analysis.py --model_type qwen_all --merge --group-name qwen_scaling_trajectory + +``` + +## ⚙️ Command Line Arguments + +| Argument | Description | Default | +| --- | --- | --- | +| `--model_type` | **(Required)** The model architecture/family to run. | None | +| `--data_path` | Path to the `EmbSpatial-Bench.tsv` dataset. | `/data/.../EmbSpatial-Bench.tsv` | +| `--scales` | Specific scales to process (e.g., `vanilla`, `80k`). If omitted, runs default scales for the chosen model. | *Model-dependent* | +| `--question-type` | `short_answer` (single word output) or `mcq` (A/B letter choice). Dictates root output folder. | `short_answer` | +| `--output_dir` | Root directory for saved data. | `./{question_type}/saved_data` | +| `--merge` | Generates cross-scale comparison plots from saved data instead of running inference. | `False` | +| `--group-name` | Folder name under `compare/` for merged cross-scale outputs. | Same as `--model_type` | +| `--max-samples-per-category` | Limit samples per spatial category for faster debugging/runs. | `200` | +| `--no-filtering` | Disables filtering of 'Unknown' reference objects in distance queries. | `False` | +| `--no-auto-roborefer` | Prevents automatic inclusion of `roborefer` scale when running `nvila`. | `False` | + +## 📂 Output Directory Structure + +The script organizes outputs based on the `question_type`, isolating raw scale data from merged comparison views. + +```text +{question_type}/ +├── logs/ +│ ├── {model_type}_{scale}.log # Per-scale inference logs +│ └── {group_name}.log # Merge/Compare logs +├── saved_data/ +│ └── {model_type}_{scale}/ # Individual scale outputs +│ ├── csv/ # Delta heatmaps, predictions +│ ├── json/ # Consistency metrics, alignment, validity +│ ├── npz/ # Raw hidden states & deltas (vectors) +│ └── plots/ # Single-scale PCA, bar charts, heatmaps +└── compare/ + └── {group_name}/ # Cross-scale merged outputs (via --merge) + ├── csv/ # summary.csv across all scales + └── plots/ + ├── accuracy/ # Trajectory and per-category accuracy + ├── all/ # Unfiltered cross-scale plots + └── both_correct/ # Filtered (both-correct) cross-scale plots + +``` + +## 🛠 Prerequisites + +* `torch` +* `transformers` +* `pandas`, `numpy`, `scikit-learn` +* `matplotlib`, `seaborn`, `tqdm` +* `Pillow` +* *Model-specific libraries:* `qwen_vl_utils`, `llava`, `olmo` (depending on the models being tested). diff --git a/spatial-representation-probe/run_molmo.sh b/spatial-representation-probe/run_molmo.sh new file mode 100644 index 0000000000000000000000000000000000000000..805a73c9a0e76a250d0ddb45ead4f675ce953865 --- /dev/null +++ b/spatial-representation-probe/run_molmo.sh @@ -0,0 +1,69 @@ +#!/bin/bash +set -e + +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +SCRIPT="$SCRIPT_DIR/swap_analysis.py" +PYTHON="conda run --no-capture-output -n molmo python" +MODEL="molmo" +QUESTION_TYPE="short_answer" # change to mcq for MCQ A/B format + +# Logs managed by swap_analysis.py → {SCRIPT_DIR}/{QUESTION_TYPE}/logs/{vlm_key}.log +STDOUT_LOG_DIR="$SCRIPT_DIR/$QUESTION_TYPE/logs" +mkdir -p "$STDOUT_LOG_DIR" + +# GPU plan: Molmo ~25GB each +SCALES=("vanilla" "80k" "400k" "800k" "2m") +GPUS=(0 1 2 3 4) + +echo "=========================================" +echo " Molmo Swap Analysis: Launching ${#SCALES[@]} scales in parallel" +echo "=========================================" + +PIDS=() +for i in "${!SCALES[@]}"; do + scale="${SCALES[$i]}" + gpu="${GPUS[$i]}" + log="${STDOUT_LOG_DIR}/${MODEL}_${scale}_stdout.log" + + echo "[GPU $gpu] $MODEL/$scale -> $log" + CUDA_VISIBLE_DEVICES=$gpu $PYTHON "$SCRIPT" \ + --model_type $MODEL \ + --scales $scale \ + --device cuda \ + --no-auto-roborefer \ + --question-type $QUESTION_TYPE \ + > "$log" 2>&1 & + PIDS+=($!) +done + +echo "" +echo "Waiting for all ${#PIDS[@]} processes..." +FAILED=0 +for i in "${!PIDS[@]}"; do + pid="${PIDS[$i]}" + scale="${SCALES[$i]}" + if wait $pid; then + echo "[DONE] $MODEL/$scale (PID $pid) - SUCCESS" + else + echo "[FAIL] $MODEL/$scale (PID $pid) - EXIT CODE $?" + FAILED=$((FAILED + 1)) + fi +done + +if [ $FAILED -gt 0 ]; then + echo "WARNING: $FAILED scale(s) failed. Check logs in $STDOUT_LOG_DIR" +fi + +echo "=========================================" +echo " Molmo Swap Analysis: Running merge" +echo "=========================================" +$PYTHON "$SCRIPT" --model_type $MODEL \ + --scales vanilla 80k 400k 800k 2m \ + --merge --group-name molmo \ + --question-type $QUESTION_TYPE \ + 2>&1 | tee "${STDOUT_LOG_DIR}/molmo_merge_stdout.log" + +echo "" +echo "ALL DONE: $MODEL" +echo "Results: $SCRIPT_DIR/$QUESTION_TYPE/saved_data/molmo_*/" +echo "Compare: $SCRIPT_DIR/$QUESTION_TYPE/compare/molmo/" diff --git a/spatial-representation-probe/run_nvila.sh b/spatial-representation-probe/run_nvila.sh new file mode 100644 index 0000000000000000000000000000000000000000..78e8495be546907a3984b22d5ba9c2dd6f77da64 --- /dev/null +++ b/spatial-representation-probe/run_nvila.sh @@ -0,0 +1,80 @@ +#!/bin/bash +set -e + +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +SCRIPT="$SCRIPT_DIR/swap_analysis.py" +PYTHON="conda run --no-capture-output -n vila python" +MODEL="nvila" +QUESTION_TYPE="short_answer" # change to mcq for MCQ A/B format + +# Logs managed by swap_analysis.py → {SCRIPT_DIR}/{QUESTION_TYPE}/logs/{vlm_key}.log +# Shell stdout (conda messages, early crashes) goes here: +STDOUT_LOG_DIR="$SCRIPT_DIR/$QUESTION_TYPE/logs" +mkdir -p "$STDOUT_LOG_DIR" + +# GPU plan: NVILA ~8GB each +SCALES=("vanilla" "80k" "400k" "800k" "2m" "roborefer") +GPUS=(2 3 4 5 6 7) + +echo "=========================================" +echo " NVILA Swap Analysis: Launching ${#SCALES[@]} scales in parallel" +echo "=========================================" + +PIDS=() +for i in "${!SCALES[@]}"; do + scale="${SCALES[$i]}" + gpu="${GPUS[$i]}" + log="${STDOUT_LOG_DIR}/${MODEL}_${scale}_stdout.log" + + echo "[GPU $gpu] $MODEL/$scale -> $log" + CUDA_VISIBLE_DEVICES=$gpu $PYTHON "$SCRIPT" \ + --model_type $MODEL \ + --scales $scale \ + --device cuda \ + --no-auto-roborefer \ + --question-type $QUESTION_TYPE \ + > "$log" 2>&1 & + PIDS+=($!) +done + +echo "" +echo "Waiting for all ${#PIDS[@]} processes..." +FAILED=0 +for i in "${!PIDS[@]}"; do + pid="${PIDS[$i]}" + scale="${SCALES[$i]}" + if wait $pid; then + echo "[DONE] $MODEL/$scale (PID $pid) - SUCCESS" + else + echo "[FAIL] $MODEL/$scale (PID $pid) - EXIT CODE $?" + FAILED=$((FAILED + 1)) + fi +done + +if [ $FAILED -gt 0 ]; then + echo "WARNING: $FAILED scale(s) failed. Check logs in $STDOUT_LOG_DIR" +fi + +echo "=========================================" +echo " NVILA Swap Analysis: Merge 1/2 (without roborefer)" +echo "=========================================" +$PYTHON "$SCRIPT" --model_type $MODEL \ + --scales vanilla 80k 400k 800k 2m \ + --merge --group-name nvila \ + --question-type $QUESTION_TYPE \ + 2>&1 | tee "${STDOUT_LOG_DIR}/nvila_merge_stdout.log" + +echo "=========================================" +echo " NVILA Swap Analysis: Merge 2/2 (with roborefer)" +echo "=========================================" +$PYTHON "$SCRIPT" --model_type $MODEL \ + --scales vanilla 80k 400k 800k 2m roborefer \ + --merge --group-name nvila_with_roborefer \ + --question-type $QUESTION_TYPE \ + 2>&1 | tee "${STDOUT_LOG_DIR}/nvila_with_roborefer_merge_stdout.log" + +echo "" +echo "ALL DONE: $MODEL" +echo "Results: $SCRIPT_DIR/$QUESTION_TYPE/saved_data/nvila_*/" +echo "Compare: $SCRIPT_DIR/$QUESTION_TYPE/compare/nvila/" +echo "Compare: $SCRIPT_DIR/$QUESTION_TYPE/compare/nvila_with_roborefer/" diff --git a/spatial-representation-probe/run_nvila_synthetic_mix.sh b/spatial-representation-probe/run_nvila_synthetic_mix.sh new file mode 100644 index 0000000000000000000000000000000000000000..2b8d690f19208c58bc458953f7bd42ddbd5c2cd6 --- /dev/null +++ b/spatial-representation-probe/run_nvila_synthetic_mix.sh @@ -0,0 +1,77 @@ +#!/bin/bash +set -e + +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +SCRIPT="$SCRIPT_DIR/swap_analysis.py" +PYTHON="conda run --no-capture-output -n vila python" +QUESTION_TYPE="short_answer" # change to mcq for MCQ A/B format + +# Logs managed by swap_analysis.py → {SCRIPT_DIR}/{QUESTION_TYPE}/logs/{vlm_key}.log +STDOUT_LOG_DIR="$SCRIPT_DIR/$QUESTION_TYPE/logs" +mkdir -p "$STDOUT_LOG_DIR" + +# 6 models to run for nvila_synth_compare: +# vanilla / 80k / 400k → model_type=nvila (fine-tuned baselines) +# 80k-5pct / 80k-10pct / 400k-5pct → model_type=nvila_synthetic (synthetic-mix models) +# +# GPU assignment (NVILA ~8GB each): +# GPU 0: nvila/vanilla GPU 1: nvila/80k +# GPU 2: nvila_synthetic/80k-5pct GPU 3: nvila_synthetic/80k-10pct +# GPU 4: nvila/400k GPU 5: nvila_synthetic/400k-5pct + +declare -a MODEL_TYPES=("nvila" "nvila" "nvila_synthetic" "nvila_synthetic" "nvila" "nvila_synthetic" "nvila" "nvila-synthetic") +declare -a SCALES=( "vanilla" "80k" "80k-5pct" "80k-10pct" "400k" "400k-5pct" "800k" "800k-5pct") +declare -a GPUS=( 0 1 2 3 4 5 6 7) + +echo "=========================================" +echo " NVILA-Synthetic Mix: Launching models in parallel" +echo "=========================================" + +PIDS=() +for i in "${!SCALES[@]}"; do + mtype="${MODEL_TYPES[$i]}" + scale="${SCALES[$i]}" + gpu="${GPUS[$i]}" + log="${STDOUT_LOG_DIR}/${mtype}_${scale}_stdout.log" + + echo "[GPU $gpu] ${mtype}/${scale} -> $log" + CUDA_VISIBLE_DEVICES=$gpu $PYTHON "$SCRIPT" \ + --model_type $mtype \ + --scales $scale \ + --device cuda \ + --no-auto-roborefer \ + --question-type $QUESTION_TYPE \ + > "$log" 2>&1 & + PIDS+=($!) +done + +echo "" +echo "Waiting for all ${#PIDS[@]} processes..." +FAILED=0 +for i in "${!PIDS[@]}"; do + pid="${PIDS[$i]}" + label="${MODEL_TYPES[$i]}/${SCALES[$i]}" + if wait $pid; then + echo "[DONE] $label (PID $pid) - SUCCESS" + else + echo "[FAIL] $label (PID $pid) - EXIT CODE $?" + FAILED=$((FAILED + 1)) + fi +done + +if [ $FAILED -gt 0 ]; then + echo "WARNING: $FAILED process(es) failed. Check logs in $STDOUT_LOG_DIR" +fi + +echo "=========================================" +echo " NVILA-Synthetic Mix: Merge (vanilla / 80k / 80k-5pct / 80k-10pct / 400k / 400k-5pct / 800k / 800k-5pct)" +echo "=========================================" +$PYTHON "$SCRIPT" --model_type nvila_synth_compare \ + --merge --group-name nvila_synth_compare \ + --question-type $QUESTION_TYPE \ + 2>&1 | tee "${STDOUT_LOG_DIR}/nvila_synth_compare_merge_stdout.log" + +echo "" +echo "ALL DONE" +echo "Results: $SCRIPT_DIR/$QUESTION_TYPE/saved_data/{nvila,nvila_synthetic}_*/" +echo "Compare: $SCRIPT_DIR/$QUESTION_TYPE/compare/nvila_synth_compare/" diff --git a/spatial-representation-probe/run_qwen.sh b/spatial-representation-probe/run_qwen.sh new file mode 100644 index 0000000000000000000000000000000000000000..6d0e35210dd824ef6cb139a888f122c32a2561eb --- /dev/null +++ b/spatial-representation-probe/run_qwen.sh @@ -0,0 +1,70 @@ +#!/bin/bash +set -e + +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +SCRIPT="$SCRIPT_DIR/swap_analysis.py" +PYTHON="/usr/bin/python3" +MODEL="qwen" +QUESTION_TYPE="short_answer" # change to mcq for MCQ A/B format + +# Logs managed by swap_analysis.py → {SCRIPT_DIR}/{QUESTION_TYPE}/logs/{vlm_key}.log +STDOUT_LOG_DIR="$SCRIPT_DIR/$QUESTION_TYPE/logs" +mkdir -p "$STDOUT_LOG_DIR" + +# GPU plan: Qwen ~10GB each +# GPU 5: vanilla GPU 6: 80k + 400k GPU 7: 800k + 2m +SCALES=("vanilla" "80k" "400k" "800k" "2m") +GPUS=(5 6 6 7 7) + +echo "=========================================" +echo " Qwen Swap Analysis: Launching ${#SCALES[@]} scales in parallel" +echo "=========================================" + +PIDS=() +for i in "${!SCALES[@]}"; do + scale="${SCALES[$i]}" + gpu="${GPUS[$i]}" + log="${STDOUT_LOG_DIR}/${MODEL}_${scale}_stdout.log" + + echo "[GPU $gpu] $MODEL/$scale -> $log" + CUDA_VISIBLE_DEVICES=$gpu $PYTHON "$SCRIPT" \ + --model_type $MODEL \ + --scales $scale \ + --device cuda \ + --no-auto-roborefer \ + --question-type $QUESTION_TYPE \ + > "$log" 2>&1 & + PIDS+=($!) +done + +echo "" +echo "Waiting for all ${#PIDS[@]} processes..." +FAILED=0 +for i in "${!PIDS[@]}"; do + pid="${PIDS[$i]}" + scale="${SCALES[$i]}" + if wait $pid; then + echo "[DONE] $MODEL/$scale (PID $pid) - SUCCESS" + else + echo "[FAIL] $MODEL/$scale (PID $pid) - EXIT CODE $?" + FAILED=$((FAILED + 1)) + fi +done + +if [ $FAILED -gt 0 ]; then + echo "WARNING: $FAILED scale(s) failed. Check logs in $STDOUT_LOG_DIR" +fi + +echo "=========================================" +echo " Qwen Swap Analysis: Running merge" +echo "=========================================" +$PYTHON "$SCRIPT" --model_type $MODEL \ + --scales vanilla 80k 400k 800k 2m \ + --merge --group-name qwen \ + --question-type $QUESTION_TYPE \ + 2>&1 | tee "${STDOUT_LOG_DIR}/qwen_merge_stdout.log" + +echo "" +echo "ALL DONE: $MODEL" +echo "Results: $SCRIPT_DIR/$QUESTION_TYPE/saved_data/qwen_*/" +echo "Compare: $SCRIPT_DIR/$QUESTION_TYPE/compare/qwen/" diff --git a/spatial-representation-probe/swap_analysis.py b/spatial-representation-probe/swap_analysis.py new file mode 100644 index 0000000000000000000000000000000000000000..dbb8df2f5705377b16c31e7066ce9fa012669fbc --- /dev/null +++ b/spatial-representation-probe/swap_analysis.py @@ -0,0 +1,3630 @@ +#!/usr/bin/env python3 +""" +Swap Analysis: Minimal Pair Probing for Spatial Representations + +Creates minimal pairs by swapping obj1<->obj2 in spatial questions: + Original: "Is A to the left or right of B?" -> left + Swapped: "Is B to the left or right of A?" -> right + +Supported model types +--------------------- + Legacy (Qwen2.5-VL-3B scale experiments): + molmo | nvila | qwen + New large models: + molmo_big : Molmo2-8B + qwen_big : Qwen3-VL-32B-Instruct + qwen_super : Qwen3-VL-235B-A22B-Instruct + big_trio : Molmo2-8B + RoboRefer + Qwen3-VL-32B + Merge-only (--merge required): + molmo_all : molmo (vanilla→2m) + molmo_big (molmo2) + qwen_all : qwen (vanilla→2m) + qwen_big (qwen3_32b) + +Usage examples +-------------- + # Legacy model (Qwen2.5-VL-3B scale) + python swap_analysis.py --model_type qwen + + # New large model (Qwen3-VL-32B) + conda run -n qwen3 python swap_analysis.py --model_type qwen_big + + # Cross-family merge (combine qwen + qwen_big results) + conda run -n qwen3 python swap_analysis.py --model_type qwen_all --merge + +Analyses: + 1. Difference vectors: delta = feature(swapped) - feature(original) + 2. Within-category delta consistency (do all left->right swaps point same direction?) + 3. Sign-corrected group consistency (align opposite categories by flipping) + 4. Cross-group delta alignment (delta_vertical vs delta_distance) for perspective bias + 5. Delta-based 6x6 similarity heatmap (mean delta per category as representation) + 6. Prediction stats visualization (bar chart + cross-scale trajectory) + 7. Both-correct filtering for delta analysis + 8. PCA visualization of per-sample embeddings + 9. Scaling effects on all of the above + +Fixes applied: + Fix 1: "Answer with only one word." appended to all prompts + Fix 2: Synonym handling (below/beneath->under, near/nearby->close, distant->far) + Fix 4: Cross-group quads index matching via string normalization + Fix 5: Within-category + sign-corrected delta consistency (replaces wrong group-level) + Fix 6: Prediction stats bar chart + cross-scale line plot + Fix 7: Delta-based 6x6 heatmap and trajectory + Fix 8: Category validity check + both-correct delta filtering +""" + +import os +import sys +import json +import argparse +import base64 +import logging +import random +import re +from io import BytesIO +from collections import defaultdict +from typing import Dict, List, Tuple, Optional, Any +from abc import ABC, abstractmethod + +import torch +import numpy as np +import pandas as pd +from PIL import Image +from tqdm import tqdm +import matplotlib +matplotlib.use('Agg') +import matplotlib.pyplot as plt +from mpl_toolkits.mplot3d import Axes3D # noqa: F401 +import seaborn as sns +from sklearn.metrics.pairwise import cosine_similarity +from sklearn.decomposition import PCA + +logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s') +logger = logging.getLogger(__name__) + +_HERE = os.path.dirname(os.path.abspath(__file__)) + +# ── Local HuggingFace cache helpers ────────────────────────────────────────── + +HF_HUB_DIR = '/data/shared/Qwen/mydisk/huggingface/hub' + + +def resolve_local_path(model_path: str) -> str: + """Return local snapshot path for a HF model ID if cached, else return the ID unchanged.""" + if os.path.isabs(model_path): + return model_path + cache_name = 'models--' + model_path.replace('/', '--') + snapshots_dir = os.path.join(HF_HUB_DIR, cache_name, 'snapshots') + if os.path.isdir(snapshots_dir): + snapshots = sorted(os.listdir(snapshots_dir)) + if snapshots: + local_path = os.path.join(snapshots_dir, snapshots[-1]) + logger.info(f"Local cache found: {model_path} → {local_path}") + return local_path + logger.warning( + f"Model not found in local cache: '{model_path}'\n" + f" Expected at: {snapshots_dir}\n" + f" Will fall back to online HuggingFace Hub download.\n" + f" To cache locally first: python -c \"from huggingface_hub import snapshot_download; " + f"snapshot_download('{model_path}', cache_dir='{HF_HUB_DIR}')\"" + ) + return model_path + + +def _setup_file_logging(name: str, log_dir: str) -> str: + """Attach a named FileHandler to the root logger. + + Writes to {log_dir}/{name}.log (append mode). + Returns the log file path. + """ + os.makedirs(log_dir, exist_ok=True) + log_path = os.path.join(log_dir, f'{name}.log') + fh = logging.FileHandler(log_path, mode='a', encoding='utf-8') + fh.setLevel(logging.INFO) + fh.setFormatter(logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')) + logging.getLogger().addHandler(fh) + return log_path + + +def get_model_key(model_type: str, scale: str) -> str: + """Return VLM key for (model_type, scale). E.g. nvila_80k, nvila_synthetic_80k-5pct.""" + return f"{model_type}_{scale}" + + +# ============================================================================ +# Constants +# ============================================================================ + +CATEGORY_ORDER = ['left', 'right', 'above', 'below', 'far', 'close'] + +OPPOSITE_MAP = { + 'left': 'right', 'right': 'left', + 'above': 'below', 'below': 'above', + 'under': 'above', # short-mode vertical answer + 'far': 'close', 'close': 'far', +} + +# Opposite map for short-answer mode (vertical uses 'above'/'under', not 'above'/'below') +SHORT_OPPOSITE_MAP = { + 'left': 'right', 'right': 'left', + 'above': 'below', 'below': 'above', + 'far': 'close', 'close': 'far', +} + +GROUP_MAP = { + 'left': 'horizontal', 'right': 'horizontal', + 'above': 'vertical', 'below': 'vertical', + 'far': 'distance', 'close': 'distance', +} + +GROUP_ORDER = ['horizontal', 'vertical', 'distance'] + +# Fix 5: Canonical categories for sign-corrected consistency +CANONICAL_CATEGORIES = { + 'horizontal': 'left', + 'vertical': 'above', + 'distance': 'far', +} + +# Fix 2: Synonyms for answer matching +# 'below' is now primary; 'under'/'beneath' recognized as synonyms +SYNONYMS = { + 'below': ['under', 'beneath'], + 'close': ['near', 'nearby'], + 'far': ['distant'], +} + +# ── MCQ question templates (option order alternated per pair for A/B bias control) ── +_Q_TAIL_MCQ = "Answer with a single letter A or B." +MCQ_TEMPLATES = { + 'horizontal': { + 'left_first': "Is the {obj1} to the left or right of the {obj2}? (A) left (B) right " + _Q_TAIL_MCQ, + 'right_first': "Is the {obj1} to the left or right of the {obj2}? (A) right (B) left " + _Q_TAIL_MCQ, + }, + 'vertical': { + 'above_first': "Is the {obj1} above or below the {obj2}? (A) above (B) below " + _Q_TAIL_MCQ, + 'below_first': "Is the {obj1} above or below the {obj2}? (A) below (B) above " + _Q_TAIL_MCQ, + }, + 'distance': { + 'far_first': "Compared to {ref}, is {subj} far or close from you? (A) far (B) close " + _Q_TAIL_MCQ, + 'close_first': "Compared to {ref}, is {subj} far or close from you? (A) close (B) far " + _Q_TAIL_MCQ, + }, +} +MCQ_LETTER = { + 'horizontal': { + 'left_first': {'left': 'a', 'right': 'b'}, + 'right_first': {'left': 'b', 'right': 'a'}, + }, + 'vertical': { + 'above_first': {'above': 'a', 'below': 'b'}, + 'below_first': {'above': 'b', 'below': 'a'}, + }, + 'distance': { + 'far_first': {'far': 'a', 'close': 'b'}, + 'close_first': {'far': 'b', 'close': 'a'}, + }, +} + +SCALE_COLORS = { + 'vanilla': '#1f77b4', '80k': '#ff7f0e', '400k': '#2ca02c', + '800k': '#d62728', '2m': '#9467bd', 'roborefer':'#8c564b', + # New large models + 'molmo2': '#17becf', # cyan + 'qwen3_32b': '#bcbd22', # yellow-green + 'qwen3_235b': '#e377c2', # pink + # Synthetic-mix NVILA at 80k scale (shades of teal, light→dark by mix ratio) + '80k-5pct': '#b2dfdb', # very light teal + '80k-10pct': '#00b894', # teal + '80k-20pct': '#00897b', # darker teal + '80k-30pct': '#004d40', # deep teal + # Synthetic-mix NVILA at 400k scale + '400k-5pct': '#66bb6a', # light green (near 400k's #2ca02c) +} + +# Canonical scale ordering used by accuracy/ylim plots (add new scales here to control x-axis) +SCALE_ORDER = [ + 'vanilla', '80k', '80k-5pct', '80k-10pct', '80k-20pct', '80k-30pct', + '400k', '400k-5pct', '800k', '2m', 'roborefer', + 'molmo2', 'qwen3_32b', 'qwen3_235b', +] + +# Human-readable legend labels (only entries that differ from the key are needed) +SCALE_DISPLAY_NAMES = { + '80k-5pct': '80k 5%', + '80k-10pct': '80k 10%', + '80k-20pct': '80k 20%', + '80k-30pct': '80k 30%', + '400k-5pct': '400k 5%', +} +# Category colors aligned with group: horizontal=orange, vertical=green, distance=purple +CAT_COLORS = { + 'left': '#ff7f0e', 'right': '#ffbb78', # horizontal → orange + 'above': '#2ca02c', 'below': '#98df8a', # vertical → green + 'far': '#9467bd', 'close': '#c5b0d5', # distance → purple +} +GROUP_COLORS = { + 'horizontal': '#ff7f0e', + 'vertical': '#2ca02c', + 'distance': '#9467bd', +} + +# Short-answer (non-MCQ) question templates +SHORT_TEMPLATES = { + 'horizontal': "Is the {obj1} to the left or right of the {obj2}? Answer with only one word.", + 'vertical': "Is the {obj1} above or below the {obj2}? Answer with only one word.", + 'distance': "Compared to {ref}, is {subj} far or close from you? Answer with only one word.", +} + +MODEL_CONFIGS = { + 'molmo': { + 'vanilla': 'allenai/Molmo-7B-O-0924', + '80k': '/data/shared/Qwen/molmo/outputs/data_scale_exp_80k/unshared', + '400k': '/data/shared/Qwen/molmo/outputs/data_scale_exp_400k/unshared', + '800k': '/data/shared/Qwen/molmo/outputs/data_scale_exp_800k/unshared', + '2m': '/data/shared/Qwen/molmo/outputs/data_scale_exp_2m/unshared', + }, + 'nvila': { + 'vanilla': '/data/shared/Qwen/mydisk/NVILA-Lite-2B', + '80k': '/data/shared/Qwen/mydisk/output/DATA/NVILA-Lite-2B-DATA_SCALE_EXP_80K-20251108_180221', + '400k': '/data/shared/Qwen/mydisk/output/DATA/NVILA-Lite-2B-DATA_SCALE_EXP_400K-20251108_180221', + '800k': '/data/shared/Qwen/mydisk/output/DATA/NVILA-Lite-2B-DATA_SCALE_EXP_800K-20251108_180221', + '2m': '/data/shared/Qwen/mydisk/output/DATA/NVILA-Lite-2B-DATA_SCALE_EXP_2M-20260205_003632', + # '80k': '/data/shared/Qwen/mydisk/output/SINGLE/NVILA-Lite-2B-SINGLE_REFSPATIAL_16M-20260217_035008/checkpoint-1250', + # '400k': '/data/shared/Qwen/mydisk/output/SINGLE/NVILA-Lite-2B-SINGLE_REFSPATIAL_16M-20260217_035008/checkpoint-6250', + # '800k': '/data/shared/Qwen/mydisk/output/SINGLE/NVILA-Lite-2B-SINGLE_REFSPATIAL_16M-20260217_035008/checkpoint-12500', + # '2m': '/data/shared/Qwen/mydisk/output/SINGLE/NVILA-Lite-2B-SINGLE_REFSPATIAL_16M-20260217_035008/checkpoint-31250', + 'roborefer': '/data/shared/Qwen/mydisk/RoboRefer_model', + 'roborefer_depth': '/data/shared/Qwen/mydisk/RoboRefer_depth_model', # fill in actual path + }, + 'qwen': { + 'vanilla': 'Qwen/Qwen2.5-VL-3B-Instruct', + '80k': '/data/shared/Qwen/mydisk/output/Qwen/Qwen2.5-VL-3B-Instruct-data_scale_exp_80k-20251114_120221', + '400k': '/data/shared/Qwen/mydisk/output/Qwen/Qwen2.5-VL-3B-Instruct-data_scale_exp_400k-20251114_120221', + '800k': '/data/shared/Qwen/mydisk/output/Qwen/Qwen2.5-VL-3B-Instruct-data_scale_exp_800k-20251114_120221', + '2m': '/data/shared/Qwen/mydisk/output/Qwen/Qwen2.5-VL-3B-Instruct-data_scale_exp_2m-20260109_120517', + }, + # NVILA trained with synthetic data mixed in at different ratios + 'nvila_synthetic': { + '80k-5pct': '/data/shared/Qwen/mydisk/output/SYNTHETIC/NVILA-Lite-2B-SYNTHETIC_MIX_5PCT_2M-20260226_023301/checkpoint-1250', + '80k-10pct': '/data/shared/Qwen/mydisk/output/SYNTHETIC/NVILA-Lite-2B-SYNTHETIC_MIX_10PCT_80K-20260224_234537', + '80k-20pct': '/data/shared/Qwen/mydisk/output/SYNTHETIC/NVILA-Lite-2B-SYNTHETIC_MIX_20PCT_80K-20260224_232347', + '80k-30pct': '/data/shared/Qwen/mydisk/output/SYNTHETIC/NVILA-Lite-2B-SYNTHETIC_MIX_30PCT_80K-20260224_232347', + '400k-5pct': '/data/shared/Qwen/mydisk/output/SYNTHETIC/NVILA-Lite-2B-SYNTHETIC_MIX_5PCT_2M-20260226_023301/checkpoint-6250', + }, +} + +# ── New large / cross-family models ────────────────────────────────────────── +# Each scale maps to (ExtractorClassName, HF-model-ID-or-absolute-path). +# resolve_local_path() converts HF IDs to local snapshot dirs when cached. +MODEL_CONFIGS_NEW = { + 'molmo_big': { + 'molmo2': ('Molmo2Extractor', 'allenai/Molmo2-8B'), + }, + 'qwen_big': { + 'qwen3_32b': ('Qwen3VLExtractor', 'Qwen/Qwen3-VL-32B-Instruct'), + }, + 'qwen_super': { + 'qwen3_235b': ('Qwen3VLExtractor', 'Qwen/Qwen3-VL-235B-A22B-Instruct'), + }, + 'big_trio': { + 'molmo2': ('Molmo2Extractor', 'allenai/Molmo2-8B'), + 'roborefer': ('RoboReferExtractor', '/data/shared/Qwen/mydisk/RoboRefer_model'), + 'qwen3_32b': ('Qwen3VLExtractor', 'Qwen/Qwen3-VL-32B-Instruct'), + }, +} + +# ── Merge-only: combine existing per-scale data from multiple source dirs ───── +MERGE_ONLY_CONFIGS = { + 'molmo_all': { + 'scale_order': ['vanilla', '80k', '400k', '800k', '2m', 'molmo2'], + 'scale_sources': { + 'vanilla': 'molmo', '80k': 'molmo', '400k': 'molmo', + '800k': 'molmo', '2m': 'molmo', 'molmo2': 'molmo_big', + }, + 'required_dirs': ['molmo', 'molmo_big'], + }, + 'qwen_all': { + 'scale_order': ['vanilla', '80k', '400k', '800k', '2m', 'qwen3_32b'], + 'scale_sources': { + 'vanilla': 'qwen', '80k': 'qwen', '400k': 'qwen', + '800k': 'qwen', '2m': 'qwen', 'qwen3_32b': 'qwen_big', + }, + 'required_dirs': ['qwen', 'qwen_big'], + }, + # Compare NVILA baselines against synthetic-mix checkpoints + 'nvila_synth_compare': { + 'scale_order': ['vanilla', '80k', '80k-5pct', '80k-10pct', '400k', '400k-5pct', '800k', '800k-5pct'], + 'scale_sources': { + 'vanilla': 'nvila', + '80k': 'nvila', + '80k-5pct': 'nvila_synthetic', + '80k-10pct': 'nvila_synthetic', + '400k': 'nvila', + '400k-5pct': 'nvila_synthetic', + '800k': 'nvila', + '800k-5pct': 'nvila_synthetic' + }, + 'required_dirs': ['nvila', 'nvila_synthetic'], + }, +} + +# Default scale run order for new runnable types +SCALE_ORDERS_NEW = { + 'molmo_big': ['molmo2'], + 'qwen_big': ['qwen3_32b'], + 'qwen_super': ['qwen3_235b'], + 'big_trio': ['molmo2', 'roborefer', 'qwen3_32b'], +} + +ALL_MODEL_TYPES = ( + list(MODEL_CONFIGS.keys()) + + list(MODEL_CONFIGS_NEW.keys()) + + list(MERGE_ONLY_CONFIGS.keys()) +) + + +# ============================================================================ +# Data Loading & Swap Pair Creation +# ============================================================================ + +OBJECT_PATTERNS = [ + re.compile(r'between\s+(.+?)\s+and\s+(.+?)\s+in', re.IGNORECASE), + re.compile(r'of\s+(.+?)\s+and\s+(.+?)\s+in', re.IGNORECASE), + re.compile(r'positions\s+of\s+(.+?)\s+and\s+(.+?)\s+interact', re.IGNORECASE), + re.compile(r'How\s+are\s+(.+?)\s+and\s+(.+?)\s+positioned', re.IGNORECASE), + re.compile(r'arrangement\s+of\s+(.+?)\s+and\s+(.+?)\s+in', re.IGNORECASE), +] + + +def extract_objects(question: str) -> Tuple[str, str]: + for pattern in OBJECT_PATTERNS: + m = pattern.search(question) + if m: + return m.group(1).strip(), m.group(2).strip() + raise ValueError(f"Could not extract objects from: {question}") + + +def decode_base64_image(base64_str: str) -> Image.Image: + image_data = base64.b64decode(base64_str) + return Image.open(BytesIO(image_data)).convert('RGB') + + +# ============================================================================ +# Answer Matching (Fix 2: synonym support) +# ============================================================================ + +def find_earliest_position(text: str, word: str) -> int: + """Find earliest position of word or any of its synonyms in text.""" + positions = [] + pos = text.find(word) + if pos != -1: + positions.append(pos) + for syn in SYNONYMS.get(word, []): + pos = text.find(syn) + if pos != -1: + positions.append(pos) + return min(positions) if positions else -1 + + +def check_answer(generated_text: str, expected_category: str, mcq_map: dict = None) -> bool: + if not generated_text or not generated_text.strip(): + return False + text = generated_text.strip().lower() + expected = expected_category.lower() + opposite = OPPOSITE_MAP[expected] + + if mcq_map: + exp_letter = mcq_map.get(expected) + opp_letter = mcq_map.get(opposite) + # Standalone letter response (e.g. "A", "A.", "A)", "B") + if exp_letter and text in (exp_letter, exp_letter+'.', exp_letter+')', exp_letter+','): + return True + if opp_letter and text in (opp_letter, opp_letter+'.', opp_letter+')', opp_letter+','): + return False + else: + exp_letter = opp_letter = None + + # MCQ inline pattern "(a)"/"(b)" — variant-aware + mcq_exp = f'({exp_letter})' if exp_letter else None + mcq_opp = f'({opp_letter})' if opp_letter else None + + def earliest_with_mcq(word, mcq_pat=None): + positions = [] + pos = text.find(word) + if pos != -1: + positions.append(pos) + for syn in SYNONYMS.get(word, []): + pos = text.find(syn) + if pos != -1: + positions.append(pos) + if mcq_pat: + pos = text.find(mcq_pat) + if pos != -1: + positions.append(pos) + return min(positions) if positions else -1 + + pos_exp = earliest_with_mcq(expected, mcq_exp) + pos_opp = earliest_with_mcq(opposite, mcq_opp) + if pos_exp == -1: + return False + if pos_opp == -1: + return True + return pos_exp < pos_opp + + +# ============================================================================ +# Swap Pair Loading (Fix 1: prompt suffix) +# ============================================================================ + +def load_swap_pairs(tsv_path: str, seed: int = 42, filter_unknown: bool = True, + question_type: str = 'mcq') -> List[dict]: + """Load EmbSpatialBench TSV and create swap pairs for all samples. + + Args: + filter_unknown: If True (default), skip far/close pairs where target_object + is Unknown/empty, and remove Unknown/empty values from reference_object + candidates before sampling. Pairs with no valid candidates are dropped. + Use --no-filtering to disable. + question_type: 'short_answer' (default) uses the original "Answer with only one word." format; + 'mcq' uses MCQ A/B templates with letter answers. + """ + rng = random.Random(seed) + df = pd.read_csv(tsv_path, sep='\t') + + pairs = [] + stats = defaultdict(lambda: {'total': 0, 'success': 0}) + + def _valid_obj(v): + return bool(v) and str(v).strip().lower() not in ('unknown', 'n/a', '') + + for _, row in df.iterrows(): + category = row['category'] + stats[category]['total'] += 1 + + try: + if category in ['left', 'right', 'above', 'under', 'below']: + obj1, obj2 = extract_objects(row['question']) + if category in ['left', 'right']: + grp = 'horizontal' + else: + grp = 'vertical' + + if question_type == 'short_answer': + # Single-word format; normalize 'under' → 'below' + if category == 'under': + category = 'below' + tmpl = SHORT_TEMPLATES[grp] + pair = { + 'index': row['index'], + 'question_id': str(row['question_id']), + 'image_base64': row['image'], + 'original_question': tmpl.format(obj1=obj1, obj2=obj2), + 'swapped_question': tmpl.format(obj1=obj2, obj2=obj1), + 'original_answer': category, + 'swapped_answer': SHORT_OPPOSITE_MAP[category], + 'group': grp, + 'category': category, + 'obj1': obj1, 'obj2': obj2, + 'mcq_map': None, + } + else: + # MCQ format; normalize 'under' → 'below' + if category == 'under': + category = 'below' + variant = ('left_first' if grp == 'horizontal' else 'above_first') \ + if len(pairs) % 2 == 0 else \ + ('right_first' if grp == 'horizontal' else 'below_first') + tmpl = MCQ_TEMPLATES[grp][variant] + mcq_map = MCQ_LETTER[grp][variant] + pair = { + 'index': row['index'], + 'question_id': str(row['question_id']), + 'image_base64': row['image'], + 'original_question': tmpl.format(obj1=obj1, obj2=obj2), + 'swapped_question': tmpl.format(obj1=obj2, obj2=obj1), + 'original_answer': category, + 'swapped_answer': OPPOSITE_MAP[category], + 'group': GROUP_MAP[category], + 'category': category, + 'obj1': obj1, 'obj2': obj2, + 'mcq_map': mcq_map, + } + + elif category in ['far', 'close']: + answer_key = row['answer'] + options = {k: row[k] for k in ['A', 'B', 'C', 'D']} + target_object = options[answer_key] + candidates = [v for k, v in options.items() if k != answer_key] + + if filter_unknown: + if not _valid_obj(target_object): + continue + candidates = [v for v in candidates if _valid_obj(v)] + if not candidates: + continue + + reference_object = rng.choice(candidates) + + if question_type == 'short_answer': + tmpl = SHORT_TEMPLATES['distance'] + pair = { + 'index': row['index'], + 'question_id': str(row['question_id']), + 'image_base64': row['image'], + 'original_question': tmpl.format(ref=reference_object, subj=target_object), + 'swapped_question': tmpl.format(ref=target_object, subj=reference_object), + 'original_answer': category, + 'swapped_answer': OPPOSITE_MAP[category], + 'group': 'distance', + 'category': category, + 'target_object': target_object, + 'reference_object': reference_object, + 'mcq_map': None, + } + else: + variant = 'far_first' if len(pairs) % 2 == 0 else 'close_first' + tmpl = MCQ_TEMPLATES['distance'][variant] + mcq_map = MCQ_LETTER['distance'][variant] + pair = { + 'index': row['index'], + 'question_id': str(row['question_id']), + 'image_base64': row['image'], + 'original_question': tmpl.format(ref=reference_object, subj=target_object), + 'swapped_question': tmpl.format(ref=target_object, subj=reference_object), + 'original_answer': category, + 'swapped_answer': OPPOSITE_MAP[category], + 'group': 'distance', + 'category': category, + 'target_object': target_object, + 'reference_object': reference_object, + 'mcq_map': mcq_map, + } + else: + continue + + pairs.append(pair) + stats[category]['success'] += 1 + + except Exception as e: + logger.warning(f"Failed to create swap pair for index {row['index']}: {e}") + continue + + logger.info("Swap pair creation stats:") + for cat in CATEGORY_ORDER: + s = stats[cat] + logger.info(f" {cat}: {s['success']}/{s['total']}") + logger.info(f" Total pairs: {len(pairs)}") + + return pairs + + +# ============================================================================ +# HF Bbox Cache (Fix 4: string-normalized keys) +# ============================================================================ + +def build_hf_bbox_cache(hf_dataset_name: str = 'FlagEval/EmbSpatial-Bench') -> Dict[str, dict]: + """Load HF dataset and build bbox lookup cache keyed by string-normalized question_id.""" + from datasets import load_dataset + logger.info(f"Loading HF dataset: {hf_dataset_name}") + ds = load_dataset(hf_dataset_name, split='test') + + cache = {} + for item in ds: + # Fix 4: Normalize key to string for consistent matching + qid = str(item['question_id']) + cache[qid] = { + 'objects': item['objects'], + 'relation': item['relation'], + 'data_source': item['data_source'], + 'answer': item['answer'], + 'answer_options': item['answer_options'], + } + + # Fix 4: Log sample keys for debugging + sample_keys = list(cache.keys())[:5] + logger.info(f"Built bbox cache: {len(cache)} entries (sample keys: {sample_keys})") + return cache + + +def get_bbox_center_y(bbox: list) -> float: + return bbox[1] + bbox[3] / 2 + + +def create_cross_group_quads( + swap_pairs: List[dict], + hf_cache: Dict[str, dict], + threshold_ratio: float = 0.05, + question_type: str = 'mcq', +) -> List[dict]: + """For far/close swap pairs, create additional vertical queries using bbox.""" + IMAGE_HEIGHTS = {'ai2thor': 300, 'mp3d': 480, 'scannet': 968} + + quads = [] + stats = {'total': 0, 'matched': 0, 'ambiguous': 0, 'no_bbox': 0} + + distance_pairs = [p for p in swap_pairs if p['group'] == 'distance'] + + # Fix 4: Use question_id (e.g. 'mp3d_0') to match HF dataset, not integer index + n_matched_keys = sum(1 for p in distance_pairs if p['question_id'] in hf_cache) + logger.info(f"Matched {n_matched_keys}/{len(distance_pairs)} question_ids between TSV and HF dataset") + + for pair in distance_pairs: + stats['total'] += 1 + qid = pair['question_id'] + + if qid not in hf_cache: + stats['no_bbox'] += 1 + continue + + hf_item = hf_cache[qid] + names = hf_item['objects']['name'] + bboxes = hf_item['objects']['bbox'] + + target = pair['target_object'] + reference = pair['reference_object'] + + target_bbox_y, ref_bbox_y = None, None + for name, bbox in zip(names, bboxes): + if name == target: + target_bbox_y = get_bbox_center_y(bbox) + if name == reference: + ref_bbox_y = get_bbox_center_y(bbox) + + if target_bbox_y is None or ref_bbox_y is None: + stats['no_bbox'] += 1 + continue + + image_height = IMAGE_HEIGHTS.get(hf_item['data_source'], 480) + threshold = image_height * threshold_ratio + y_diff = target_bbox_y - ref_bbox_y + + if abs(y_diff) < threshold: + stats['ambiguous'] += 1 + continue + + if target_bbox_y < ref_bbox_y: + vert_original_answer = 'above' + else: + vert_original_answer = 'below' + + if question_type == 'short_answer': + vert_tmpl = SHORT_TEMPLATES['vertical'] + vert_mcq_map = None + vert_original_q = vert_tmpl.format(obj1=target, obj2=reference) + vert_swapped_q = vert_tmpl.format(obj1=reference, obj2=target) + vert_swapped_answer = SHORT_OPPOSITE_MAP[vert_original_answer] + else: + vert_variant = 'above_first' if len(quads) % 2 == 0 else 'below_first' + vert_tmpl = MCQ_TEMPLATES['vertical'][vert_variant] + vert_mcq_map = MCQ_LETTER['vertical'][vert_variant] + vert_original_q = vert_tmpl.format(obj1=target, obj2=reference) + vert_swapped_q = vert_tmpl.format(obj1=reference, obj2=target) + vert_swapped_answer = OPPOSITE_MAP[vert_original_answer] + + quad = { + 'index': pair['index'], + 'image_base64': pair['image_base64'], + 'dist_original_q': pair['original_question'], + 'dist_swapped_q': pair['swapped_question'], + 'dist_original_answer': pair['original_answer'], + 'dist_swapped_answer': pair['swapped_answer'], + 'dist_mcq_map': pair['mcq_map'], + 'vert_original_q': vert_original_q, + 'vert_swapped_q': vert_swapped_q, + 'vert_original_answer': vert_original_answer, + 'vert_swapped_answer': vert_swapped_answer, + 'vert_mcq_map': vert_mcq_map, + 'target_object': target, + 'reference_object': reference, + 'target_bbox_y': target_bbox_y, + 'ref_bbox_y': ref_bbox_y, + 'y_diff': y_diff, + 'data_source': hf_item['data_source'], + } + quads.append(quad) + stats['matched'] += 1 + + logger.info(f"Cross-group quads: {stats['matched']}/{stats['total']} " + f"(ambiguous={stats['ambiguous']}, no_bbox={stats['no_bbox']})") + return quads + + +# ============================================================================ +# Base Extractor +# ============================================================================ + +class BaseHiddenStateExtractor(ABC): + def __init__(self, model_path: str, device: str = 'cuda', target_layers: List[int] = None): + self.model_path = model_path + self.device = device + self.hidden_states = {} + self.hooks = [] + self._load_model() + num_layers = self._get_num_layers() + if target_layers is None: + self.target_layers = list(range(num_layers)) + logger.info(f"Model has {num_layers} layers. Extracting ALL.") + else: + self.target_layers = target_layers + self._register_hooks() + + def _register_hooks(self): + for layer_idx in self.target_layers: + module = self._get_layer_module(layer_idx) + if module is not None: + hook = module.register_forward_hook(self._make_hook(layer_idx)) + self.hooks.append(hook) + + def _make_hook(self, layer_idx: int): + def hook_fn(module, input, output): + if isinstance(output, tuple): + hidden = output[0] + else: + hidden = output + if hidden.shape[1] > 1: # prefill only + last_token = hidden[:, -1, :].detach().cpu().float() + self.hidden_states[layer_idx] = last_token.squeeze(0) + return hook_fn + + @abstractmethod + def _load_model(self): pass + @abstractmethod + def _get_num_layers(self) -> int: pass + @abstractmethod + def _get_layer_module(self, layer_idx: int): pass + @abstractmethod + def extract_and_predict(self, image: Image.Image, question: str) -> Tuple[Dict[int, torch.Tensor], str]: pass + + def cleanup(self): + for hook in self.hooks: + hook.remove() + self.hooks = [] + if hasattr(self, 'model'): + del self.model + if hasattr(self, 'processor'): + del self.processor + torch.cuda.empty_cache() + + +# ============================================================================ +# Molmo Extractor +# ============================================================================ + +class MolmoExtractor(BaseHiddenStateExtractor): + def _load_model(self): + config_path = os.path.join(self.model_path, "config.yaml") + checkpoint_path = os.path.join(self.model_path, "model.pt") + if os.path.exists(config_path) and os.path.exists(checkpoint_path): + self._load_native_model() + self.is_native = True + else: + self._load_hf_model() + self.is_native = False + + def _load_native_model(self): + from olmo.config import ModelConfig + from olmo.model import Molmo as NativeMolmoModel + from olmo.data.model_preprocessor import MultiModalPreprocessor + from olmo.data.data_formatter import DataFormatter + + _original_load = torch.load + def _unsafe_load_wrapper(*args, **kwargs): + if 'weights_only' not in kwargs: + kwargs['weights_only'] = False + return _original_load(*args, **kwargs) + torch.load = _unsafe_load_wrapper + + cfg = ModelConfig.load( + os.path.join(self.model_path, "config.yaml"), + key="model", validate_paths=False + ) + cfg.init_device = "cpu" + self.model = NativeMolmoModel(cfg) + state_dict = torch.load(os.path.join(self.model_path, "model.pt"), map_location="cpu") + self.model.load_state_dict(state_dict) + self.model = self.model.to(self.device, dtype=torch.bfloat16).eval() + self.tokenizer = cfg.get_tokenizer() + + v_cfg = cfg.vision_backbone + h, w = cfg.llm_patches_per_crop() + image_padding_mask = 2 if cfg.fix_image_padding else (1 if cfg.image_padding_embed else None) + + class SafeDataFormatter(DataFormatter): + def get_system_prompt(self, style, for_inference, messages, rng=None): + if style is None: + style = "User" + return super().get_system_prompt(style, for_inference, messages, rng) + + self.formatter = SafeDataFormatter( + prompt_templates=cfg.prompt_type, message_format=cfg.message_formatting, + system_prompt=cfg.system_prompt_kind, always_start_with_space=cfg.always_start_with_space, + default_inference_len=cfg.default_inference_len + ) + self.preprocessor = MultiModalPreprocessor( + tokenizer=self.tokenizer, normalize=str(v_cfg.image_model_type), + crop_mode=cfg.crop_mode, max_crops=cfg.max_crops, + overlap_margins=cfg.overlap_margins, resize=v_cfg.resize_mode, + use_col_tokens=cfg.use_col_tokens, base_image_input_size=v_cfg.image_default_input_size, + image_pooling_w=cfg.image_pooling_w, image_pooling_h=cfg.image_pooling_h, + image_token_length_w=w, image_token_length_h=h, + image_patch_size=v_cfg.image_patch_size, image_padding_mask=image_padding_mask, + pad_value=cfg.pad_value, loss_token_weighting=cfg.multi_annotation_weighting, + ) + logger.info(f"Loaded native Molmo from {self.model_path}") + + def _load_hf_model(self): + from transformers import AutoModelForCausalLM, AutoProcessor + self.model = AutoModelForCausalLM.from_pretrained( + self.model_path, torch_dtype=torch.bfloat16, + trust_remote_code=True, device_map=self.device + ).eval() + self.processor = AutoProcessor.from_pretrained(self.model_path, trust_remote_code=True) + logger.info(f"Loaded HF Molmo from {self.model_path}") + + def _get_num_layers(self) -> int: + if self.is_native: + return len(self.model.transformer.blocks) + if hasattr(self.model, 'model') and hasattr(self.model.model, 'transformer'): + return len(self.model.model.transformer.blocks) + return 32 + + def _get_layer_module(self, layer_idx: int): + if self.is_native: + return self.model.transformer.blocks[layer_idx] + return self.model.model.transformer.blocks[layer_idx] + + def extract_and_predict(self, image, question): + self.hidden_states = {} + if self.is_native: + example = {"messages": [question], "image": image} + messages, _ = self.formatter(example, is_training=False, for_inference=True, rng=np.random) + batch = self.preprocessor(np.array(image), messages, is_training=False, require_image_features=True) + if 'input_ids' not in batch and 'input_tokens' in batch: + batch['input_ids'] = batch['input_tokens'] + + def to_t(x): + return torch.from_numpy(x) if isinstance(x, np.ndarray) else x + + input_ids = to_t(batch['input_ids']).unsqueeze(0).to(self.device).long() + images_t = to_t(batch['images']).unsqueeze(0).to(self.device, dtype=torch.bfloat16) + image_masks = to_t(batch['image_masks']).unsqueeze(0).to(self.device, dtype=torch.bfloat16) + image_input_idx = to_t(batch['image_input_idx']).unsqueeze(0).to(self.device) + + with torch.inference_mode(), torch.autocast("cuda", dtype=torch.bfloat16): + gen = self.model.generate( + input_ids=input_ids, images=images_t, + image_masks=image_masks, image_input_idx=image_input_idx, + max_steps=20, beam_size=1, + ) + generated_ids = gen.token_ids[0, 0] + answer = self.tokenizer.decode(generated_ids.tolist()).strip() + for eos in ['<|endoftext|>', '', '<|end|>']: + answer = answer.replace(eos, '').strip() + else: + from transformers import GenerationConfig + inputs = self.processor.process(images=[image], text=question) + processed = {} + for k, v in inputs.items(): + v = v.to(self.device).unsqueeze(0) + if v.dtype == torch.float32: + v = v.to(dtype=torch.bfloat16) + processed[k] = v + with torch.no_grad(), torch.autocast("cuda", dtype=torch.bfloat16): + output = self.model.generate_from_batch( + processed, + GenerationConfig(max_new_tokens=20, stop_strings="<|endoftext|>"), + tokenizer=self.processor.tokenizer, + ) + input_len = processed['input_ids'].shape[1] + answer = self.processor.tokenizer.decode(output[0, input_len:], skip_special_tokens=True).strip() + + return self.hidden_states.copy(), answer + + +# ============================================================================ +# NVILA Extractor +# ============================================================================ + +class NVILAExtractor(BaseHiddenStateExtractor): + def _load_model(self): + original_sys_path = sys.path.copy() + sys.path = [p for p in sys.path if 'RoboRefer' not in p] + modules_to_remove = [k for k in list(sys.modules.keys()) if 'llava' in k.lower()] + removed = {m: sys.modules.pop(m) for m in modules_to_remove} + try: + import llava + from llava.media import Image as LLaVAImage + from llava import conversation as clib + except Exception as err: + sys.path = original_sys_path + for m, mod in removed.items(): + sys.modules[m] = mod + raise RuntimeError(f"Failed to import llava: {err}") + sys.path = original_sys_path + self.LLaVAImage = LLaVAImage + self.clib = clib + self.model = llava.load(self.model_path, model_base=None) + self._find_llm_backbone() + logger.info(f"Loaded NVILA from {self.model_path}") + + def _find_llm_backbone(self): + candidates = [] + if hasattr(self.model, 'llm'): + if hasattr(self.model.llm, 'model') and hasattr(self.model.llm.model, 'layers'): + candidates.append(self.model.llm.model.layers) + if hasattr(self.model.llm, 'layers'): + candidates.append(self.model.llm.layers) + if hasattr(self.model, 'model'): + if hasattr(self.model.model, 'model') and hasattr(self.model.model.model, 'layers'): + candidates.append(self.model.model.model.layers) + if hasattr(self.model.model, 'layers'): + candidates.append(self.model.model.layers) + for name, module in self.model.named_modules(): + if name.endswith('.layers') and hasattr(module, '__len__') and len(module) > 0: + candidates.append(module) + if candidates: + self.llm_backbone = candidates[0] + else: + raise ValueError("Could not locate transformer layers in NVILA model") + + def _get_num_layers(self) -> int: + return len(self.llm_backbone) if hasattr(self, 'llm_backbone') else 24 + + def _get_layer_module(self, layer_idx: int): + return self.llm_backbone[layer_idx] + + def extract_and_predict(self, image, question): + self.hidden_states = {} + import tempfile + with tempfile.NamedTemporaryFile(suffix='.png', delete=False) as f: + temp_path = f.name + image.save(temp_path) + try: + prompt = [self.LLaVAImage(temp_path), question] + from transformers import GenerationConfig + response = self.model.generate_content( + prompt, generation_config=GenerationConfig(max_new_tokens=20, do_sample=False) + ) + finally: + os.unlink(temp_path) + answer = str(response[0] if isinstance(response, list) else response).strip() + return self.hidden_states.copy(), answer + + +class RoboReferExtractor(NVILAExtractor): + ROBOREFER_PATH = '/data/shared/Qwen/RoboRefer' + + def _load_model(self): + original_sys_path = sys.path.copy() + if self.ROBOREFER_PATH not in sys.path: + sys.path.insert(0, self.ROBOREFER_PATH) + modules_to_remove = [k for k in list(sys.modules.keys()) if 'llava' in k.lower()] + removed = {m: sys.modules.pop(m) for m in modules_to_remove} + try: + import llava + from llava.media import Image as LLaVAImage + from llava import conversation as clib + except Exception as err: + sys.path = original_sys_path + for m, mod in removed.items(): + sys.modules[m] = mod + raise RuntimeError(f"Failed to import RoboRefer llava: {err}") + sys.path = original_sys_path + self.LLaVAImage = LLaVAImage + self.clib = clib + self.model = llava.load(self.model_path, model_base=None) + self._find_llm_backbone() + logger.info(f"Loaded RoboRefer from {self.model_path}") + + +class RoboReferDepthExtractor(RoboReferExtractor): + """RoboRefer with depth-image input instead of RGB. + + Usage: pass the depth PIL image to extract_and_predict() instead of the RGB image. + In practice this means loading depth images in load_swap_pairs() / extract_swap_features() + rather than changing anything here. If the depth image is stored as a separate column in + the dataset, add a 'depth_image_base64' key to the pair dict and decode it before calling + run_single_query(). + + TODO: confirm depth image path / format with the actual dataset layout. + """ + # Inherits extract_and_predict() from NVILAExtractor (via RoboReferExtractor) unchanged. + # The caller is responsible for passing the correct (depth) PIL Image. + + +# ============================================================================ +# Qwen2.5-VL Extractor +# ============================================================================ + +class Qwen25VLExtractor(BaseHiddenStateExtractor): + BASE_MODEL = "Qwen/Qwen2.5-VL-3B-Instruct" + + def _load_model(self): + from transformers import Qwen2_5_VLForConditionalGeneration, AutoProcessor + try: + self.model = Qwen2_5_VLForConditionalGeneration.from_pretrained( + self.model_path, torch_dtype=torch.bfloat16, device_map=self.device + ) + except ImportError: + self.model = Qwen2_5_VLForConditionalGeneration.from_pretrained( + self.model_path, torch_dtype=torch.bfloat16 + ).to(self.device) + self.model.eval() + if self.model_path.startswith('/'): + self.processor = AutoProcessor.from_pretrained(self.BASE_MODEL) + else: + self.processor = AutoProcessor.from_pretrained(self.model_path) + logger.info(f"Loaded Qwen2.5-VL from {self.model_path}") + + def _get_num_layers(self) -> int: + return len(self.model.model.layers) + + def _get_layer_module(self, layer_idx: int): + return self.model.model.layers[layer_idx] + + def extract_and_predict(self, image, question): + self.hidden_states = {} + messages = [{"role": "user", "content": [ + {"type": "image", "image": image}, + {"type": "text", "text": question} + ]}] + text = self.processor.apply_chat_template(messages, tokenize=False, add_generation_prompt=True) + from qwen_vl_utils import process_vision_info + image_inputs, video_inputs = process_vision_info(messages) + inputs = self.processor( + text=[text], images=image_inputs, videos=video_inputs, + padding=True, return_tensors="pt" + ).to(self.device) + with torch.no_grad(): + output_ids = self.model.generate(**inputs, max_new_tokens=20, do_sample=False) + input_len = inputs['input_ids'].shape[1] + answer = self.processor.tokenizer.decode(output_ids[0, input_len:], skip_special_tokens=True).strip() + return self.hidden_states.copy(), answer + + +# ============================================================================ +# New Extractors: Molmo2-8B and Qwen3-VL family +# ============================================================================ + +class Molmo2Extractor(BaseHiddenStateExtractor): + """Extractor for allenai/Molmo2-8B (AutoModelForImageTextToText, messages-dict input).""" + + def _load_model(self): + from transformers import AutoProcessor, AutoModelForImageTextToText + self.processor = AutoProcessor.from_pretrained(self.model_path, trust_remote_code=True) + self.model = AutoModelForImageTextToText.from_pretrained( + self.model_path, trust_remote_code=True, torch_dtype='auto', device_map='auto', + ).eval() + self._find_llm_layers() + logger.info(f"Loaded Molmo2 from {self.model_path}") + + def _find_llm_layers(self): + candidates = [ + ['model', 'layers'], + ['language_model', 'model', 'layers'], + ['model', 'model', 'layers'], + ] + for path in candidates: + obj = self.model + for attr in path: + obj = getattr(obj, attr, None) + if obj is None: + break + if obj is not None and hasattr(obj, '__len__') and len(obj) > 0: + self.llm_layers = obj + logger.info(f"Molmo2: layers at '{'.'.join(path)}', count={len(obj)}") + return + best, best_len = None, 0 + for name, module in self.model.named_modules(): + if name.endswith('.layers') and hasattr(module, '__len__') and len(module) > best_len: + best, best_len = module, len(module) + logger.info(f"Molmo2: layers via scan at '{name}', count={best_len}") + if best is not None: + self.llm_layers = best + return + raise ValueError("Could not find transformer layers in Molmo2 model") + + def _get_num_layers(self) -> int: + return len(self.llm_layers) + + def _get_layer_module(self, layer_idx: int): + return self.llm_layers[layer_idx] + + def extract_and_predict(self, image, question): + self.hidden_states = {} + messages = [{"role": "user", "content": [ + {"type": "image", "image": image}, + {"type": "text", "text": question}, + ]}] + inputs = self.processor.apply_chat_template( + messages, tokenize=True, add_generation_prompt=True, + return_tensors="pt", return_dict=True, + ) + inputs = {k: v.to(self.model.device) for k, v in inputs.items()} + with torch.inference_mode(): + generated_ids = self.model.generate(**inputs, max_new_tokens=20, do_sample=False) + input_len = inputs['input_ids'].shape[1] + answer = self.processor.tokenizer.decode( + generated_ids[0, input_len:], skip_special_tokens=True).strip() + return self.hidden_states.copy(), answer + + +class Qwen3VLExtractor(BaseHiddenStateExtractor): + """Extractor for Qwen3-VL family (32B dense, 235B MoE). + + Key differences from Qwen25VLExtractor: + - AutoModelForImageTextToText + trust_remote_code=True + - process_vision_info requires image_patch_size=16 + - processor call requires do_resize=False + - 32×32 px patches → different min/max_pixels + """ + + MIN_PIXELS = 256 * 32 * 32 # 262,144 (mp3d/scannet → natural res; ai2thor → ~256 tokens) + MAX_PIXELS = 16384 * 32 * 32 # 16,777,216 + + def _load_model(self): + from transformers import AutoProcessor, AutoModelForImageTextToText + self.processor = AutoProcessor.from_pretrained(self.model_path, trust_remote_code=True) + self.model = AutoModelForImageTextToText.from_pretrained( + self.model_path, trust_remote_code=True, torch_dtype='auto', + device_map='auto', attn_implementation='flash_attention_2', + ).eval() + self._find_llm_layers() + logger.info(f"Loaded Qwen3-VL from {self.model_path}") + + def _find_llm_layers(self): + candidates = [ + ['model', 'language_model', 'model', 'layers'], # Qwen3-VL expected + ['language_model', 'model', 'layers'], + ['model', 'model', 'layers'], + ['model', 'layers'], + ] + for path in candidates: + obj = self.model + for attr in path: + obj = getattr(obj, attr, None) + if obj is None: + break + if obj is not None and hasattr(obj, '__len__') and len(obj) > 0: + self.llm_layers = obj + logger.info(f"Qwen3-VL: layers at '{'.'.join(path)}', count={len(obj)}") + return + best, best_len = None, 0 + for name, module in self.model.named_modules(): + if name.endswith('.layers') and hasattr(module, '__len__') and len(module) > best_len: + best, best_len = module, len(module) + logger.info(f"Qwen3-VL: layers via scan at '{name}', count={best_len}") + if best is not None: + self.llm_layers = best + return + raise ValueError("Could not find transformer layers in Qwen3-VL model") + + def _get_num_layers(self) -> int: + return len(self.llm_layers) + + def _get_layer_module(self, layer_idx: int): + return self.llm_layers[layer_idx] + + def extract_and_predict(self, image, question): + self.hidden_states = {} + messages = [{"role": "user", "content": [ + {"type": "image", "image": image, + "min_pixels": self.MIN_PIXELS, "max_pixels": self.MAX_PIXELS}, + {"type": "text", "text": question}, + ]}] + text = self.processor.apply_chat_template(messages, tokenize=False, add_generation_prompt=True) + from qwen_vl_utils import process_vision_info + images, videos, _ = process_vision_info( + messages, image_patch_size=16, return_video_kwargs=True, return_video_metadata=True, + ) + inputs = self.processor( + text=text, images=images, videos=videos, do_resize=False, return_tensors="pt", + ).to(self.model.device) + with torch.no_grad(): + output_ids = self.model.generate(**inputs, max_new_tokens=20, do_sample=False) + input_len = inputs['input_ids'].shape[1] + answer = self.processor.tokenizer.decode( + output_ids[0, input_len:], skip_special_tokens=True).strip() + return self.hidden_states.copy(), answer + + +EXTRACTOR_CLASSES = { + 'MolmoExtractor': MolmoExtractor, + 'NVILAExtractor': NVILAExtractor, + 'RoboReferExtractor': RoboReferExtractor, + 'Qwen25VLExtractor': Qwen25VLExtractor, + 'Molmo2Extractor': Molmo2Extractor, + 'Qwen3VLExtractor': Qwen3VLExtractor, +} + + +def get_extractor(model_type: str, model_path: str = None, scale: str = None, **kwargs): + """Create an extractor for any model_type (legacy or new-large).""" + # New large models: (ExtractorClass, path) tuples in MODEL_CONFIGS_NEW + if model_type in MODEL_CONFIGS_NEW: + cls_name, raw_path = MODEL_CONFIGS_NEW[model_type][scale] + resolved = resolve_local_path(raw_path) + logger.info(f"Creating {cls_name} for scale='{scale}' from {resolved}") + return EXTRACTOR_CLASSES[cls_name](resolved, **kwargs) + # Legacy models + if model_type == 'nvila' and scale == 'roborefer': + return RoboReferExtractor(model_path, **kwargs) + if model_type == 'nvila' and scale == 'roborefer_depth': + return RoboReferDepthExtractor(model_path, **kwargs) + legacy = { + 'molmo': MolmoExtractor, 'nvila': NVILAExtractor, 'qwen': Qwen25VLExtractor, + 'nvila_synthetic': NVILAExtractor, + } + return legacy[model_type](model_path, **kwargs) + + +# ============================================================================ +# Feature Extraction Pipeline +# ============================================================================ + +def run_single_query(extractor, image, question): + hidden_states, predicted = extractor.extract_and_predict(image, question) + result = {} + for layer_idx in extractor.target_layers: + if layer_idx in hidden_states: + state = hidden_states[layer_idx].numpy().flatten() + if state.size > 0: + result[layer_idx] = state + return result, predicted + + +def extract_swap_features( + extractor: BaseHiddenStateExtractor, + swap_pairs: List[dict], + max_samples_per_category: int = 0, +) -> List[dict]: + """Extract features for all swap pairs.""" + rng = random.Random(42) + + if max_samples_per_category > 0: + grouped = defaultdict(list) + for p in swap_pairs: + grouped[p['category']].append(p) + limited = [] + for cat in CATEGORY_ORDER: + samples = grouped[cat] + if len(samples) > max_samples_per_category: + samples = rng.sample(samples, max_samples_per_category) + limited.extend(samples) + swap_pairs = limited + + records = [] + for pair in tqdm(swap_pairs, desc="Swap pairs"): + try: + image = decode_base64_image(pair['image_base64']) + hs_orig, pred_orig = run_single_query(extractor, image, pair['original_question']) + hs_swap, pred_swap = run_single_query(extractor, image, pair['swapped_question']) + + is_correct_orig = check_answer(pred_orig, pair['original_answer'], pair['mcq_map']) + is_correct_swap = check_answer(pred_swap, pair['swapped_answer'], pair['mcq_map']) + + delta = {} + for layer_idx in extractor.target_layers: + if layer_idx in hs_orig and layer_idx in hs_swap: + delta[layer_idx] = hs_swap[layer_idx] - hs_orig[layer_idx] + + record = { + 'index': pair['index'], + 'group': pair['group'], + 'category': pair['category'], + 'original_answer': pair['original_answer'], + 'swapped_answer': pair['swapped_answer'], + 'pred_orig': pred_orig, + 'pred_swap': pred_swap, + 'is_correct_orig': is_correct_orig, + 'is_correct_swap': is_correct_swap, + 'hs_orig': hs_orig, + 'hs_swap': hs_swap, + 'delta': delta, + } + records.append(record) + + mark_o = "O" if is_correct_orig else "X" + mark_s = "O" if is_correct_swap else "X" + logger.info(f" #{pair['index']:<6} {pair['category']:<6} " + f"orig[{mark_o}]=\"{pred_orig[:40]}\" swap[{mark_s}]=\"{pred_swap[:40]}\"" + + (f" [{len(records)}/{len(swap_pairs)}]" if len(records) % 50 == 0 else "")) + + except Exception as e: + logger.warning(f"Error on index {pair['index']}: {e}") + continue + + logger.info(f"Extracted {len(records)} swap pair records") + + # Fix 8: Per-category accuracy logging + for cat in CATEGORY_ORDER: + cat_recs = [r for r in records if r['category'] == cat] + n = len(cat_recs) + if n == 0: + continue + c_orig = sum(1 for r in cat_recs if r['is_correct_orig']) + c_swap = sum(1 for r in cat_recs if r['is_correct_swap']) + c_both = sum(1 for r in cat_recs if r['is_correct_orig'] and r['is_correct_swap']) + logger.info(f" {cat:>6s} (n={n}): acc_orig={c_orig/n:.1%}, acc_swap={c_swap/n:.1%}, " + f"acc_both={c_both/n:.1%}") + + return records + + +def extract_cross_group_features( + extractor: BaseHiddenStateExtractor, + quads: List[dict], +) -> List[dict]: + """Extract features for cross-group quads (4 forward passes each).""" + records = [] + for quad in tqdm(quads, desc="Cross-group quads"): + try: + image = decode_base64_image(quad['image_base64']) + hs_d_orig, pred_d_orig = run_single_query(extractor, image, quad['dist_original_q']) + hs_d_swap, pred_d_swap = run_single_query(extractor, image, quad['dist_swapped_q']) + hs_v_orig, pred_v_orig = run_single_query(extractor, image, quad['vert_original_q']) + hs_v_swap, pred_v_swap = run_single_query(extractor, image, quad['vert_swapped_q']) + + delta_dist, delta_vert = {}, {} + for layer_idx in extractor.target_layers: + if layer_idx in hs_d_orig and layer_idx in hs_d_swap: + delta_dist[layer_idx] = hs_d_swap[layer_idx] - hs_d_orig[layer_idx] + if layer_idx in hs_v_orig and layer_idx in hs_v_swap: + delta_vert[layer_idx] = hs_v_swap[layer_idx] - hs_v_orig[layer_idx] + + record = { + 'index': quad['index'], + 'delta_dist': delta_dist, + 'delta_vert': delta_vert, + 'pred_d_orig': pred_d_orig, 'pred_d_swap': pred_d_swap, + 'pred_v_orig': pred_v_orig, 'pred_v_swap': pred_v_swap, + 'is_correct_d_orig': check_answer(pred_d_orig, quad['dist_original_answer'], quad['dist_mcq_map']), + 'is_correct_d_swap': check_answer(pred_d_swap, quad['dist_swapped_answer'], quad['dist_mcq_map']), + 'is_correct_v_orig': check_answer(pred_v_orig, quad['vert_original_answer'], quad['vert_mcq_map']), + 'is_correct_v_swap': check_answer(pred_v_swap, quad['vert_swapped_answer'], quad['vert_mcq_map']), + 'data_source': quad['data_source'], + } + records.append(record) + + tqdm.write(f" #{quad['index']:<6} dist=[{pred_d_orig[:20]}/{pred_d_swap[:20]}] " + f"vert=[{pred_v_orig[:20]}/{pred_v_swap[:20]}]") + + except Exception as e: + logger.warning(f"Error on cross-group index {quad['index']}: {e}") + continue + + logger.info(f"Extracted {len(records)} cross-group quad records") + return records + + +# ============================================================================ +# Analysis Functions +# ============================================================================ + +# Fix 5: Within-category + sign-corrected delta consistency + +def compute_delta_consistency(records: List[dict], target_layers: List[int]): + """Compute TWO types of delta consistency. + + Returns: + within_cat_results: {(category, layer) -> {mean, std, n}} + sign_corrected_results: {(group, layer) -> {mean, std, n}} + """ + within_cat_results = {} + sign_corrected_results = {} + + for group in GROUP_ORDER: + canonical = CANONICAL_CATEGORIES[group] + opposite = OPPOSITE_MAP[canonical] + group_recs = [r for r in records if r['group'] == group] + + for layer in target_layers: + # (a) Within-category consistency + for cat in [canonical, opposite]: + cat_deltas = [r['delta'][layer] for r in group_recs + if r['category'] == cat and layer in r['delta']] + if len(cat_deltas) >= 2: + arr = np.array(cat_deltas) + sim = cosine_similarity(arr) + upper = sim[np.triu_indices(len(cat_deltas), k=1)] + within_cat_results[(cat, layer)] = { + 'mean': float(np.mean(upper)), + 'std': float(np.std(upper)), + 'n': len(cat_deltas), + } + + # (b) Sign-corrected group consistency + all_deltas = [] + for r in group_recs: + if layer not in r['delta']: + continue + d = r['delta'][layer] + if r['category'] == opposite: + d = -d # flip to align with canonical direction + all_deltas.append(d) + + if len(all_deltas) >= 2: + arr = np.array(all_deltas) + sim = cosine_similarity(arr) + upper = sim[np.triu_indices(len(all_deltas), k=1)] + sign_corrected_results[(group, layer)] = { + 'mean': float(np.mean(upper)), + 'std': float(np.std(upper)), + 'n': len(all_deltas), + } + + return within_cat_results, sign_corrected_results + + +# Fix 7: Delta-based similarity matrix + +def compute_delta_similarity_matrix(records: List[dict], layer: int) -> Optional[pd.DataFrame]: + """Compute 6x6 cosine similarity using mean delta per category.""" + cat_deltas = {} + for cat in CATEGORY_ORDER: + deltas = [r['delta'][layer] for r in records if r['category'] == cat and layer in r['delta']] + if deltas: + cat_deltas[cat] = np.mean(deltas, axis=0) + + available = [c for c in CATEGORY_ORDER if c in cat_deltas] + if len(available) < 2: + return None + + vectors = np.array([cat_deltas[c] for c in available]) + sim = cosine_similarity(vectors) + return pd.DataFrame(sim, index=available, columns=available) + + +# Fix 8: Both-correct filtering + +def filter_both_correct(records: List[dict]) -> List[dict]: + """Filter to pairs where both orig and swap predictions are correct.""" + return [r for r in records if r['is_correct_orig'] and r['is_correct_swap']] + + +# Fix 8: Category validity check + +def check_category_validity(records: List[dict], scale: str) -> Dict[str, dict]: + """Check per-category accuracy and flag unreliable categories.""" + validity = {} + for cat in CATEGORY_ORDER: + cat_recs = [r for r in records if r['category'] == cat] + n = len(cat_recs) + if n == 0: + validity[cat] = {'n': 0, 'acc_orig': 0, 'acc_swap': 0, 'reliable': False} + continue + acc_orig = sum(1 for r in cat_recs if r['is_correct_orig']) / n + acc_swap = sum(1 for r in cat_recs if r['is_correct_swap']) / n + reliable = acc_orig >= 0.5 and acc_swap >= 0.5 + validity[cat] = { + 'n': n, 'acc_orig': acc_orig, 'acc_swap': acc_swap, + 'reliable': reliable, + } + if not reliable: + logger.warning(f" [!] Category '{cat}' unreliable at scale={scale}: " + f"acc_orig={acc_orig:.1%}, acc_swap={acc_swap:.1%}") + return validity + + +def compute_cross_group_alignment(quad_records: List[dict], target_layers: List[int]) -> dict: + results = {} + for layer in target_layers: + per_sample = [] + delta_verts, delta_dists = [], [] + + for rec in quad_records: + if layer in rec['delta_vert'] and layer in rec['delta_dist']: + dv = rec['delta_vert'][layer] + dd = rec['delta_dist'][layer] + norm_v, norm_d = np.linalg.norm(dv), np.linalg.norm(dd) + if norm_v > 1e-10 and norm_d > 1e-10: + per_sample.append(float(np.dot(dv, dd) / (norm_v * norm_d))) + delta_verts.append(dv) + delta_dists.append(dd) + + if not per_sample: + continue + + mean_dv = np.mean(delta_verts, axis=0) + mean_dd = np.mean(delta_dists, axis=0) + norm_mv, norm_md = np.linalg.norm(mean_dv), np.linalg.norm(mean_dd) + mean_alignment = float(np.dot(mean_dv, mean_dd) / (norm_mv * norm_md + 1e-10)) + + rng = np.random.RandomState(42) + perm_alignments = [] + for _ in range(100): + shuffled_dd = [delta_dists[i] for i in rng.permutation(len(delta_dists))] + perm_cos = [] + for dv, dd in zip(delta_verts, shuffled_dd): + nv, nd = np.linalg.norm(dv), np.linalg.norm(dd) + if nv > 1e-10 and nd > 1e-10: + perm_cos.append(np.dot(dv, dd) / (nv * nd)) + perm_alignments.append(np.mean(perm_cos)) + + results[layer] = { + 'per_sample_mean': float(np.mean(per_sample)), + 'per_sample_std': float(np.std(per_sample)), + 'mean_delta_alignment': mean_alignment, + 'permutation_mean': float(np.mean(perm_alignments)), + 'permutation_std': float(np.std(perm_alignments)), + 'n_samples': len(per_sample), + } + return results + + +def compute_prediction_stats(records: List[dict], scale: str) -> dict: + stats = {'scale': scale} + total_correct_orig, total_correct_swap, total_both, total_n = 0, 0, 0, 0 + + for group in GROUP_ORDER: + group_recs = [r for r in records if r['group'] == group] + n = len(group_recs) + c_orig = sum(1 for r in group_recs if r['is_correct_orig']) + c_swap = sum(1 for r in group_recs if r['is_correct_swap']) + c_both = sum(1 for r in group_recs if r['is_correct_orig'] and r['is_correct_swap']) + stats[f'{group}_n'] = n + stats[f'{group}_acc_orig'] = c_orig / n if n > 0 else 0 + stats[f'{group}_acc_swap'] = c_swap / n if n > 0 else 0 + stats[f'{group}_acc_both'] = c_both / n if n > 0 else 0 + total_correct_orig += c_orig + total_correct_swap += c_swap + total_both += c_both + total_n += n + + stats['overall_acc_orig'] = total_correct_orig / total_n if total_n > 0 else 0 + stats['overall_acc_swap'] = total_correct_swap / total_n if total_n > 0 else 0 + stats['overall_acc_both'] = total_both / total_n if total_n > 0 else 0 + stats['overall_n'] = total_n + return stats + + +# ============================================================================ +# Saving & Loading +# ============================================================================ + +def get_representative_layers(all_layers, n=5): + if len(all_layers) <= n: + return list(all_layers) + indices = np.linspace(0, len(all_layers) - 1, n, dtype=int) + return [all_layers[i] for i in indices] + + +def save_scale_results( + scale, swap_records, quad_records, + within_cat_consistency, sign_corrected_consistency, + cross_alignment, pred_stats, target_layers, + category_validity, delta_heatmaps, + output_dir, both_correct_tag="all_pairs", +): + """Save all per-scale results to disk.""" + csv_dir = os.path.join(output_dir, 'csv') + json_dir = os.path.join(output_dir, 'json') + os.makedirs(csv_dir, exist_ok=True) + os.makedirs(json_dir, exist_ok=True) + + # 1. Predictions CSV (tagged so all_pairs and both_correct don't overwrite each other) + pred_rows = [] + for r in swap_records: + pred_rows.append({ + 'index': r['index'], 'group': r['group'], 'category': r['category'], + 'pred_orig': r['pred_orig'], 'pred_swap': r['pred_swap'], + 'is_correct_orig': r['is_correct_orig'], 'is_correct_swap': r['is_correct_swap'], + }) + pd.DataFrame(pred_rows).to_csv( + os.path.join(csv_dir, f'predictions_{scale}_{both_correct_tag}.csv'), index=False) + + # 2. Within-category consistency JSON + wc_data = {} + for (cat, layer), vals in within_cat_consistency.items(): + wc_data[f'{cat}_L{layer}'] = vals + with open(os.path.join(json_dir, f'within_cat_consistency_{scale}_{both_correct_tag}.json'), 'w') as f: + json.dump(wc_data, f, indent=2) + + # 3. Sign-corrected consistency JSON + sc_data = {} + for (group, layer), vals in sign_corrected_consistency.items(): + sc_data[f'{group}_L{layer}'] = vals + with open(os.path.join(json_dir, f'sign_corrected_consistency_{scale}_{both_correct_tag}.json'), 'w') as f: + json.dump(sc_data, f, indent=2) + + # 4. Cross-group alignment JSON + alignment_data = {} + for layer, vals in cross_alignment.items(): + alignment_data[f'L{layer}'] = vals + with open(os.path.join(json_dir, f'cross_alignment_{scale}.json'), 'w') as f: + json.dump(alignment_data, f, indent=2) + + # 5. Prediction stats JSON + with open(os.path.join(json_dir, f'pred_stats_{scale}.json'), 'w') as f: + json.dump(pred_stats, f, indent=2) + + # 6. Category validity JSON (Fix 8) + with open(os.path.join(json_dir, f'category_validity_{scale}.json'), 'w') as f: + json.dump(category_validity, f, indent=2) + + # 7. Delta heatmap CSVs (Fix 7) + for layer, df in delta_heatmaps.items(): + if df is not None: + df.to_csv(os.path.join(csv_dir, f'delta_similarity_{scale}_L{layer}_{both_correct_tag}.csv')) + + logger.info(f"Saved results for scale={scale} ({both_correct_tag}) to {output_dir}") + + +def save_vectors_npz(scale, swap_records, quad_records, target_layers, output_dir): + """Save ALL vectors with correctness metadata to NPZ (once per scale). + + This enables post-hoc filtering (both_correct, all_with_validity) from saved data. + """ + rep_layers = list(target_layers) # save ALL layers (not just 5 representative) + delta_data = {} + for layer in rep_layers: + groups_list, categories_list, vectors = [], [], [] + orig_vecs, swap_vecs, labels = [], [], [] + correct_orig_list, correct_swap_list, indices_list = [], [], [] + for r in swap_records: + if layer in r['delta']: + groups_list.append(r['group']) + categories_list.append(r['category']) + vectors.append(r['delta'][layer]) + correct_orig_list.append(r['is_correct_orig']) + correct_swap_list.append(r['is_correct_swap']) + indices_list.append(r['index']) + if layer in r['hs_orig'] and layer in r['hs_swap']: + orig_vecs.append(r['hs_orig'][layer]) + swap_vecs.append(r['hs_swap'][layer]) + labels.append(r['category']) + if vectors: + delta_data[f'delta_L{layer}'] = np.array(vectors) + delta_data[f'groups_L{layer}'] = np.array(groups_list) + delta_data[f'categories_L{layer}'] = np.array(categories_list) + delta_data[f'is_correct_orig_L{layer}'] = np.array(correct_orig_list) + delta_data[f'is_correct_swap_L{layer}'] = np.array(correct_swap_list) + delta_data[f'indices_L{layer}'] = np.array(indices_list) + if orig_vecs: + delta_data[f'orig_L{layer}'] = np.array(orig_vecs) + delta_data[f'swap_L{layer}'] = np.array(swap_vecs) + delta_data[f'labels_L{layer}'] = np.array(labels) + + npz_dir = os.path.join(output_dir, 'npz') + os.makedirs(npz_dir, exist_ok=True) + np.savez_compressed(os.path.join(npz_dir, f'vectors_{scale}.npz'), **delta_data) + logger.info(f"Saved vectors NPZ with correctness metadata for scale={scale}") + + # Cross-group delta vectors + if quad_records: + cg_data = {} + for layer in rep_layers: + dverts, ddists = [], [] + for rec in quad_records: + if layer in rec['delta_vert'] and layer in rec['delta_dist']: + dverts.append(rec['delta_vert'][layer]) + ddists.append(rec['delta_dist'][layer]) + if dverts: + cg_data[f'delta_vert_L{layer}'] = np.array(dverts) + cg_data[f'delta_dist_L{layer}'] = np.array(ddists) + np.savez_compressed(os.path.join(npz_dir, f'cross_group_vectors_{scale}.npz'), **cg_data) + + +def load_scale_consistency(output_dir, scale, tag='all_pairs'): + """Load sign-corrected consistency.""" + path = os.path.join(output_dir, 'json', f'sign_corrected_consistency_{scale}_{tag}.json') + if not os.path.exists(path): + return {} + with open(path) as f: + raw = json.load(f) + result = {} + for key, vals in raw.items(): + parts = key.rsplit('_L', 1) + if len(parts) == 2: + result[(parts[0], int(parts[1]))] = vals + return result + + +def load_within_cat_consistency(output_dir, scale, tag='all_pairs'): + path = os.path.join(output_dir, 'json', f'within_cat_consistency_{scale}_{tag}.json') + if not os.path.exists(path): + return {} + with open(path) as f: + raw = json.load(f) + result = {} + for key, vals in raw.items(): + parts = key.rsplit('_L', 1) + if len(parts) == 2: + result[(parts[0], int(parts[1]))] = vals + return result + + +def load_scale_alignment(output_dir, scale): + path = os.path.join(output_dir, 'json', f'cross_alignment_{scale}.json') + if not os.path.exists(path): + return {} + with open(path) as f: + raw = json.load(f) + result = {} + for key, vals in raw.items(): + result[int(key.replace('L', ''))] = vals + return result + + +def load_delta_heatmaps(output_dir, scale, tag='all_pairs'): + import glob as glob_mod + pattern = os.path.join(output_dir, 'csv', f'delta_similarity_{scale}_L*_{tag}.csv') + files = glob_mod.glob(pattern) + result = {} + for fpath in files: + basename = os.path.basename(fpath) + # delta_similarity_{scale}_L{layer}_{tag}.csv + part = basename.replace(f'delta_similarity_{scale}_L', '').replace(f'_{tag}.csv', '') + try: + layer = int(part) + except ValueError: + continue + result[layer] = pd.read_csv(fpath, index_col=0) + return result + + +# ============================================================================ +# Visualization +# ============================================================================ + +def plot_within_cat_consistency_trajectory(within_cat, scale, model_type, save_path): + """Plot within-category delta consistency across layers.""" + fig, ax = plt.subplots(figsize=(12, 6)) + cat_colors = CAT_COLORS + for cat in CATEGORY_ORDER: + layers, vals = [], [] + for (c, l), v in sorted(within_cat.items(), key=lambda x: x[0][1]): + if c == cat: + layers.append(l) + vals.append(v['mean']) + if layers: + ax.plot(layers, vals, '-o', color=cat_colors[cat], label=cat, linewidth=2, markersize=3) + ax.set_xlabel('Layer Index') + ax.set_ylabel('Within-Category Consistency') + ax.set_title(f'{model_type.upper()} ({scale}) - Within-Category Delta Consistency', fontweight='bold') + ax.legend(fontsize=9) + ax.grid(True, alpha=0.3) + plt.tight_layout() + plt.savefig(save_path, dpi=300, bbox_inches='tight') + plt.close() + logger.info(f"Saved: {save_path}") + + +def plot_sign_corrected_consistency_trajectory(sign_corrected, scale, model_type, save_path): + """Plot sign-corrected group consistency across layers.""" + fig, ax = plt.subplots(figsize=(12, 6)) + colors = GROUP_COLORS + for group in GROUP_ORDER: + layers, vals = [], [] + for (g, l), v in sorted(sign_corrected.items(), key=lambda x: x[0][1]): + if g == group: + layers.append(l) + vals.append(v['mean']) + if layers: + ax.plot(layers, vals, '-o', color=colors[group], label=group, linewidth=2, markersize=3) + ax.set_xlabel('Layer Index') + ax.set_ylabel('Sign-Corrected Consistency') + ax.set_title(f'{model_type.upper()} ({scale}) - Sign-Corrected Group Consistency', fontweight='bold') + ax.legend(fontsize=11) + ax.grid(True, alpha=0.3) + plt.tight_layout() + plt.savefig(save_path, dpi=300, bbox_inches='tight') + plt.close() + logger.info(f"Saved: {save_path}") + + +def plot_cross_group_alignment_trajectory(cross_alignment, scale, model_type, save_path): + fig, ax = plt.subplots(figsize=(12, 6)) + layers = sorted(cross_alignment.keys()) + actual = [cross_alignment[l]['per_sample_mean'] for l in layers] + mean_delta = [cross_alignment[l]['mean_delta_alignment'] for l in layers] + perm_mean = [cross_alignment[l]['permutation_mean'] for l in layers] + perm_std = [cross_alignment[l]['permutation_std'] for l in layers] + + ax.plot(layers, actual, '-o', color='#d62728', label='cos(d_vert, d_dist) per-sample mean', + linewidth=2.5, markersize=3) + ax.plot(layers, mean_delta, '--s', color='#e377c2', label='cos(mean_d_vert, mean_d_dist)', + linewidth=1.5, markersize=3) + ax.plot(layers, perm_mean, ':', color='gray', label='permutation control', linewidth=1.5) + ax.fill_between(layers, + [m - 2*s for m, s in zip(perm_mean, perm_std)], + [m + 2*s for m, s in zip(perm_mean, perm_std)], + alpha=0.2, color='gray') + ax.set_xlabel('Layer Index') + ax.set_ylabel('Cosine Alignment') + ax.set_title(f'{model_type.upper()} ({scale}) - Cross-Group Alignment (Perspective Bias)', fontweight='bold') + ax.legend(fontsize=9) + ax.grid(True, alpha=0.3) + plt.tight_layout() + plt.savefig(save_path, dpi=300, bbox_inches='tight') + plt.close() + logger.info(f"Saved: {save_path}") + + +# Fix 7: Delta heatmap visualization + +def plot_delta_heatmap(sim_df, title, save_path): + """Plot delta-based similarity heatmap.""" + plt.figure(figsize=(10, 8)) + available_order = [c for c in CATEGORY_ORDER if c in sim_df.index] + sim_df_ordered = sim_df.loc[available_order, available_order] + + annot = sim_df_ordered.round(4).astype(str) + sns.heatmap(sim_df_ordered, annot=annot, fmt='', cmap='RdBu_r', + center=0, vmin=-1, vmax=1, square=True, linewidths=0.5, + cbar_kws={'label': 'Cosine Similarity'}) + plt.title(title, fontsize=14, fontweight='bold') + plt.tight_layout() + plt.savefig(save_path, dpi=300, bbox_inches='tight') + plt.close() + logger.info(f"Saved delta heatmap: {save_path}") + + +# Fix 6: Prediction stats visualization + +def plot_pred_stats_bars(all_pred_stats, model_type, save_path): + """Bar chart: per-group accuracy (orig/swap/both) across scales.""" + fig, axes = plt.subplots(1, len(GROUP_ORDER), figsize=(7 * len(GROUP_ORDER), 6)) + if len(GROUP_ORDER) == 1: + axes = [axes] + + available = [s for s in SCALE_ORDER if any(d['scale'] == s for d in all_pred_stats)] + if not available: + # Fallback: use whatever scales are present (preserves insertion order) + seen = [] + for d in all_pred_stats: + if d['scale'] not in seen: + seen.append(d['scale']) + available = seen + + for idx, group in enumerate(GROUP_ORDER): + ax = axes[idx] + x = np.arange(3) # orig, swap, both + width = 0.8 / len(available) + for i, scale in enumerate(available): + entry = next((d for d in all_pred_stats if d['scale'] == scale), None) + if entry is None: + continue + vals = [entry.get(f'{group}_acc_orig', 0), + entry.get(f'{group}_acc_swap', 0), + entry.get(f'{group}_acc_both', 0)] + offset = (i - len(available) / 2 + 0.5) * width + color = SCALE_COLORS.get(scale, 'gray') + ax.bar(x + offset, vals, width, label=scale, color=color) + ax.set_xticks(x) + ax.set_xticklabels(['orig', 'swap', 'both']) + ax.set_ylabel('Accuracy') + ax.set_title(group, fontweight='bold') + ax.legend(fontsize=7) + ax.set_ylim(0, 1.1) + ax.axhline(y=0.5, color='gray', linestyle='--', alpha=0.5) + ax.grid(True, alpha=0.3, axis='y') + + fig.suptitle(f'{model_type.upper()} - Prediction Accuracy by Group', fontsize=15, fontweight='bold', y=1.02) + plt.tight_layout() + plt.savefig(save_path, dpi=300, bbox_inches='tight') + plt.close() + logger.info(f"Saved: {save_path}") + + +def plot_pred_stats_trajectory(all_pred_stats, model_type, save_path): + """Line plot: acc_both trajectory across scales per group.""" + fig, ax = plt.subplots(figsize=(10, 6)) + available = [s for s in SCALE_ORDER if any(d['scale'] == s for d in all_pred_stats)] + if not available: + seen = [] + for d in all_pred_stats: + if d['scale'] not in seen: + seen.append(d['scale']) + available = seen + colors = GROUP_COLORS + + for group in GROUP_ORDER: + x_vals, y_vals = [], [] + for i, scale in enumerate(available): + entry = next((d for d in all_pred_stats if d['scale'] == scale), None) + if entry: + x_vals.append(i) + y_vals.append(entry.get(f'{group}_acc_both', 0)) + if x_vals: + ax.plot(x_vals, y_vals, '-o', color=colors[group], label=group, linewidth=2.5, markersize=6) + + ax.set_xticks(range(len(available))) + ax.set_xticklabels(available) + ax.set_xlabel('Scale') + ax.set_ylabel('Accuracy (both correct)') + ax.set_title(f'{model_type.upper()} - Both-Correct Accuracy Across Scales', fontweight='bold') + ax.legend(fontsize=10) + ax.set_ylim(0, 1.05) + ax.axhline(y=0.5, color='gray', linestyle='--', alpha=0.5) + ax.grid(True, alpha=0.3) + plt.tight_layout() + plt.savefig(save_path, dpi=300, bbox_inches='tight') + plt.close() + logger.info(f"Saved: {save_path}") + + +def plot_pca_embeddings(vectors_npz_path, scale, model_type, save_dir, bc_only=False): + data = np.load(vectors_npz_path, allow_pickle=True) + layer_keys = [k for k in data.files if k.startswith('orig_L')] + layers = sorted([int(k.replace('orig_L', '')) for k in layer_keys]) + + cat_colors = CAT_COLORS + + for layer in layers: + orig = data.get(f'orig_L{layer}') + swap = data.get(f'swap_L{layer}') + labels = data.get(f'labels_L{layer}') + deltas = data.get(f'delta_L{layer}') + cats = data.get(f'categories_L{layer}') + groups = data.get(f'groups_L{layer}') + + if bc_only and deltas is not None: + co = data.get(f'is_correct_orig_L{layer}') + cs = data.get(f'is_correct_swap_L{layer}') + if co is not None and cs is not None: + bc_mask = co.astype(bool) & cs.astype(bool) + if orig is not None and len(orig) == len(bc_mask): + orig = orig[bc_mask] + swap = swap[bc_mask] + labels = labels[bc_mask] if labels is not None else None + if len(deltas) == len(bc_mask): + deltas = deltas[bc_mask] + cats = cats[bc_mask] if cats is not None else None + groups = groups[bc_mask] if groups is not None else None + + if orig is None or swap is None or len(orig) == 0: + continue + + fig, axes = plt.subplots(1, 3, figsize=(24, 7)) + + pca = PCA(n_components=2) + all_vecs = np.vstack([orig, swap]) + all_pca = pca.fit_transform(all_vecs) + orig_pca = all_pca[:len(orig)] + swap_pca = all_pca[len(orig):] + + ax = axes[0] + for cat in CATEGORY_ORDER: + mask = np.array([str(l) == cat for l in labels]) + if mask.any(): + ax.scatter(orig_pca[mask, 0], orig_pca[mask, 1], + c=cat_colors.get(cat, 'gray'), label=f'{cat} (orig)', + alpha=0.5, s=15, marker='o') + ax.scatter(swap_pca[mask, 0], swap_pca[mask, 1], + c=cat_colors.get(cat, 'gray'), + alpha=0.5, s=15, marker='x') + ax.set_title('Embeddings by Category\n(o=orig, x=swap)', fontsize=11) + ax.legend(fontsize=7, ncol=2) + ax.grid(True, alpha=0.2) + + ax = axes[1] + if deltas is not None and cats is not None: + pca_d = PCA(n_components=2) + delta_pca = pca_d.fit_transform(deltas) + group_colors = GROUP_COLORS + if groups is not None: + for group in GROUP_ORDER: + mask = np.array([str(g) == group for g in groups]) + if mask.any(): + ax.scatter(delta_pca[mask, 0], delta_pca[mask, 1], + c=group_colors.get(group, 'gray'), label=group, alpha=0.5, s=15) + ax.set_title('Delta Vectors by Group', fontsize=11) + ax.legend(fontsize=9) + ax.grid(True, alpha=0.2) + + ax = axes[2] + if deltas is not None and cats is not None: + for cat in CATEGORY_ORDER: + mask = np.array([str(c) == cat for c in cats]) + if mask.any(): + ax.scatter(delta_pca[mask, 0], delta_pca[mask, 1], + c=cat_colors.get(cat, 'gray'), label=cat, alpha=0.5, s=15) + ax.set_title('Delta Vectors by Category', fontsize=11) + ax.legend(fontsize=8, ncol=2) + ax.grid(True, alpha=0.2) + + fig.suptitle(f'{model_type.upper()} ({scale}) - Layer {layer} - PCA', fontweight='bold') + plt.tight_layout() + plt.savefig(os.path.join(save_dir, f'pca_{scale}_L{layer}.png'), dpi=200, bbox_inches='tight') + plt.close() + + logger.info(f"Saved PCA plots to {save_dir}") + + +def plot_pca_3d(vectors_npz_path, scale, model_type, save_dir, bc_only=False): + """Generate 3-panel 3D PCA figure per representative layer.""" + data = np.load(vectors_npz_path, allow_pickle=True) + layer_keys = [k for k in data.files if k.startswith('orig_L')] + layers = sorted([int(k.replace('orig_L', '')) for k in layer_keys]) + + if not layers: + logger.info(f" [pca_3d] No orig_L* keys found in {vectors_npz_path}") + return + + os.makedirs(save_dir, exist_ok=True) + + def scatter3d(ax, xs, ys, zs, c, label, alpha=0.45, s=12, marker='o'): + ax.scatter(xs, ys, zs, c=c, label=label, alpha=alpha, s=s, marker=marker) + + for layer in layers: + orig = data.get(f'orig_L{layer}') + swap = data.get(f'swap_L{layer}') + labels = data.get(f'labels_L{layer}') + deltas = data.get(f'delta_L{layer}') + cats = data.get(f'categories_L{layer}') + groups = data.get(f'groups_L{layer}') + + if bc_only and deltas is not None: + co = data.get(f'is_correct_orig_L{layer}') + cs = data.get(f'is_correct_swap_L{layer}') + if co is not None and cs is not None: + bc_mask = co.astype(bool) & cs.astype(bool) + if orig is not None and len(orig) == len(bc_mask): + orig = orig[bc_mask] + swap = swap[bc_mask] + labels = labels[bc_mask] if labels is not None else None + if len(deltas) == len(bc_mask): + deltas = deltas[bc_mask] + cats = cats[bc_mask] if cats is not None else None + groups = groups[bc_mask] if groups is not None else None + + if orig is None or swap is None or len(orig) == 0: + continue + + # Panel 1: embeddings + pca_emb = PCA(n_components=3) + all_vecs = np.vstack([orig, swap]) + all_proj = pca_emb.fit_transform(all_vecs) + orig_proj = all_proj[:len(orig)] + swap_proj = all_proj[len(orig):] + ev1 = pca_emb.explained_variance_ratio_ + + # Panels 2/3: delta vectors + has_delta = (deltas is not None and len(deltas) >= 3) + if has_delta: + pca_d = PCA(n_components=3) + delta_proj = pca_d.fit_transform(deltas) + ev2 = pca_d.explained_variance_ratio_ + else: + delta_proj = None + ev2 = None + + fig = plt.figure(figsize=(30, 8)) + + ax1 = fig.add_subplot(131, projection='3d') + for cat in CATEGORY_ORDER: + mask = np.array([str(l) == cat for l in labels]) + if not mask.any(): + continue + c = CAT_COLORS.get(cat, 'gray') + scatter3d(ax1, orig_proj[mask, 0], orig_proj[mask, 1], orig_proj[mask, 2], + c=c, label=f'{cat} (orig)', marker='o') + scatter3d(ax1, swap_proj[mask, 0], swap_proj[mask, 1], swap_proj[mask, 2], + c=c, label=f'{cat} (swap)', marker='^') + ax1.set_title('Embeddings by Category\n(o=orig, ^=swap)', fontsize=10) + ax1.set_xlabel(f'PC1 ({ev1[0]:.1%})', fontsize=8) + ax1.set_ylabel(f'PC2 ({ev1[1]:.1%})', fontsize=8) + ax1.set_zlabel(f'PC3 ({ev1[2]:.1%})', fontsize=8) + ax1.legend(fontsize=6, ncol=2, loc='upper left') + + ax2 = fig.add_subplot(132, projection='3d') + if has_delta and groups is not None: + for group in GROUP_ORDER: + mask = np.array([str(g) == group for g in groups]) + if not mask.any(): + continue + scatter3d(ax2, delta_proj[mask, 0], delta_proj[mask, 1], delta_proj[mask, 2], + c=GROUP_COLORS.get(group, 'gray'), label=group) + ax2.set_title('Delta Vectors by Group', fontsize=10) + ax2.set_xlabel(f'PC1 ({ev2[0]:.1%})', fontsize=8) + ax2.set_ylabel(f'PC2 ({ev2[1]:.1%})', fontsize=8) + ax2.set_zlabel(f'PC3 ({ev2[2]:.1%})', fontsize=8) + ax2.legend(fontsize=8) + else: + ax2.set_title('Delta Vectors by Group\n(no data)', fontsize=10) + + ax3 = fig.add_subplot(133, projection='3d') + if has_delta and cats is not None: + for cat in CATEGORY_ORDER: + mask = np.array([str(c) == cat for c in cats]) + if not mask.any(): + continue + scatter3d(ax3, delta_proj[mask, 0], delta_proj[mask, 1], delta_proj[mask, 2], + c=CAT_COLORS.get(cat, 'gray'), label=cat) + ax3.set_title('Delta Vectors by Category', fontsize=10) + ax3.set_xlabel(f'PC1 ({ev2[0]:.1%})', fontsize=8) + ax3.set_ylabel(f'PC2 ({ev2[1]:.1%})', fontsize=8) + ax3.set_zlabel(f'PC3 ({ev2[2]:.1%})', fontsize=8) + ax3.legend(fontsize=7, ncol=2) + else: + ax3.set_title('Delta Vectors by Category\n(no data)', fontsize=10) + + fig.suptitle(f'{model_type.upper()} ({scale}) - Layer {layer} - 3D PCA', fontweight='bold') + plt.tight_layout() + plt.savefig(os.path.join(save_dir, f'pca_{scale}_L{layer}.png'), dpi=200, + bbox_inches='tight', pad_inches=0.4) + plt.close() + + logger.info(f"Saved 3D PCA plots to {save_dir}") + + +# Cross-scale plots + +def plot_cross_scale_consistency(all_consistency, model_type, save_path, title_prefix='Sign-Corrected'): + fig, axes = plt.subplots(1, 3, figsize=(21, 6)) + + for idx, group in enumerate(GROUP_ORDER): + ax = axes[idx] + for scale in SCALE_ORDER: + if scale not in all_consistency: + continue + consistency = all_consistency[scale] + layers, vals = [], [] + for (g, l), v in sorted(consistency.items(), key=lambda x: x[0][1]): + if g == group: + layers.append(l) + vals.append(v['mean']) + if layers: + ax.plot(layers, vals, '-', color=SCALE_COLORS.get(scale, 'gray'), + label=SCALE_DISPLAY_NAMES.get(scale, scale), linewidth=2) + ax.set_xlabel('Layer Index') + ax.set_ylabel('Consistency') + ax.set_title(group, fontweight='bold') + ax.legend(fontsize=9) + ax.grid(True, alpha=0.3) + + fig.suptitle(f'{model_type.upper()} - {title_prefix} Consistency Across Scales', + fontsize=15, fontweight='bold', y=1.02) + plt.tight_layout() + plt.savefig(save_path, dpi=300, bbox_inches='tight') + plt.close() + logger.info(f"Saved: {save_path}") + + +def plot_cross_scale_within_cat_consistency(all_within_cat, model_type, save_path): + """Cross-scale within-category consistency.""" + fig, axes = plt.subplots(2, 3, figsize=(21, 12)) + + for idx, cat in enumerate(CATEGORY_ORDER): + ax = axes[idx // 3][idx % 3] + for scale in SCALE_ORDER: + if scale not in all_within_cat: + continue + wc = all_within_cat[scale] + layers, vals = [], [] + for (c, l), v in sorted(wc.items(), key=lambda x: x[0][1]): + if c == cat: + layers.append(l) + vals.append(v['mean']) + if layers: + ax.plot(layers, vals, '-', color=SCALE_COLORS.get(scale, 'gray'), + label=SCALE_DISPLAY_NAMES.get(scale, scale), linewidth=2) + ax.set_xlabel('Layer Index') + ax.set_ylabel('Consistency') + ax.set_title(cat, fontweight='bold') + ax.legend(fontsize=8) + ax.grid(True, alpha=0.3) + + fig.suptitle(f'{model_type.upper()} - Within-Category Consistency Across Scales', + fontsize=15, fontweight='bold', y=1.02) + plt.tight_layout() + plt.savefig(save_path, dpi=300, bbox_inches='tight') + plt.close() + logger.info(f"Saved: {save_path}") + + +def plot_cross_scale_alignment(all_alignment, model_type, save_path): + fig, ax = plt.subplots(figsize=(12, 6)) + for scale in SCALE_ORDER: + if scale not in all_alignment: + continue + alignment = all_alignment[scale] + layers = sorted(alignment.keys()) + vals = [alignment[l]['per_sample_mean'] for l in layers] + ax.plot(layers, vals, '-', color=SCALE_COLORS.get(scale, 'gray'), + label=SCALE_DISPLAY_NAMES.get(scale, scale), linewidth=2) + ax.set_xlabel('Layer Index') + ax.set_ylabel('cos(d_vert, d_dist)') + ax.set_title(f'{model_type.upper()} - Cross-Group Alignment Across Scales\n' + f'(High=entangled, Low=disentangled)', fontweight='bold') + ax.legend(fontsize=10) + ax.grid(True, alpha=0.3) + plt.tight_layout() + plt.savefig(save_path, dpi=300, bbox_inches='tight') + plt.close() + logger.info(f"Saved: {save_path}") + + +# Fix 7: Delta-based trajectory (cross-layer, per-scale) + +def plot_delta_trajectory(all_delta_heatmaps, model_type, save_path): + """Cross-layer trajectory of delta-based similarities for key pairs.""" + pairs = [ + ('above', 'far', 'above-far'), ('below', 'close', 'below-close'), + ('left', 'right', 'left-right'), + ] + fig, axes = plt.subplots(1, len(pairs), figsize=(7 * len(pairs), 6)) + if len(pairs) == 1: + axes = [axes] + + for idx, (cat1, cat2, label) in enumerate(pairs): + ax = axes[idx] + for scale in SCALE_ORDER: + if scale not in all_delta_heatmaps: + continue + hm = all_delta_heatmaps[scale] + layers = sorted(hm.keys()) + vals = [] + valid_layers = [] + for l in layers: + df = hm[l] + if df is not None and cat1 in df.index and cat2 in df.columns: + valid_layers.append(l) + vals.append(df.loc[cat1, cat2]) + if valid_layers: + ax.plot(valid_layers, vals, '-', color=SCALE_COLORS.get(scale, 'gray'), + label=SCALE_DISPLAY_NAMES.get(scale, scale), linewidth=2) + ax.set_xlabel('Layer Index') + ax.set_ylabel('Delta Cosine Similarity') + ax.set_title(label, fontweight='bold') + ax.legend(fontsize=9) + ax.grid(True, alpha=0.3) + ax.axhline(y=0, color='gray', linestyle='--', alpha=0.5) + + fig.suptitle(f'{model_type.upper()} - Delta-Based Similarity Trajectory', + fontsize=15, fontweight='bold', y=1.02) + plt.tight_layout() + plt.savefig(save_path, dpi=300, bbox_inches='tight') + plt.close() + logger.info(f"Saved: {save_path}") + + +def plot_summary_barplot(all_consistency, all_alignment, model_type, save_path): + available_scales = [s for s in SCALE_ORDER if s in all_consistency] + if not available_scales: + return + + sample_cons = all_consistency[available_scales[0]] + max_layer = max(l for (_, l) in sample_cons.keys()) + + fig, axes = plt.subplots(1, 2, figsize=(16, 6)) + + ax = axes[0] + x = np.arange(len(GROUP_ORDER)) + width = 0.8 / len(available_scales) + for i, scale in enumerate(available_scales): + cons = all_consistency[scale] + vals = [cons.get((g, max_layer), {}).get('mean', 0) for g in GROUP_ORDER] + offset = (i - len(available_scales) / 2 + 0.5) * width + ax.bar(x + offset, vals, width, + label=SCALE_DISPLAY_NAMES.get(scale, scale), + color=SCALE_COLORS.get(scale, 'gray')) + ax.set_xticks(x) + ax.set_xticklabels(GROUP_ORDER) + ax.set_ylabel('Sign-Corrected Consistency') + ax.set_title(f'Consistency at Layer {max_layer}', fontweight='bold') + ax.legend(fontsize=8) + ax.grid(True, alpha=0.3, axis='y') + + ax = axes[1] + available_align = [s for s in available_scales if s in all_alignment] + if available_align: + vals = [all_alignment[s].get(max_layer, {}).get('per_sample_mean', 0) for s in available_align] + colors = [SCALE_COLORS.get(s, 'gray') for s in available_align] + ax.bar(range(len(vals)), vals, color=colors) + ax.set_xticks(range(len(vals))) + ax.set_xticklabels([SCALE_DISPLAY_NAMES.get(s, s) for s in available_align]) + ax.set_ylabel('cos(d_vert, d_dist)') + ax.set_title(f'Cross-Group Alignment at L{max_layer}\n(Lower=disentangled)', fontweight='bold') + ax.grid(True, alpha=0.3, axis='y') + + fig.suptitle(f'{model_type.upper()} - Summary at Deepest Layer', fontsize=15, fontweight='bold', y=1.02) + plt.tight_layout() + plt.savefig(save_path, dpi=300, bbox_inches='tight') + plt.close() + logger.info(f"Saved: {save_path}") + + +# ============================================================================ +# Main Pipeline +# ============================================================================ + +def process_scale(args, scale, swap_pairs, quads): + # Resolve model path from the correct config dict + if args.model_type in MODEL_CONFIGS_NEW: + cls_name, model_path = MODEL_CONFIGS_NEW[args.model_type][scale] + else: + model_path = MODEL_CONFIGS[args.model_type][scale] + cls_name = None + + logger.info(f"\n{'='*60}") + logger.info(f"Processing {args.model_type} - {scale}" + + (f" [{cls_name}]" if cls_name else "")) + logger.info(f"Model path: {model_path}") + logger.info(f"{'='*60}") + + extractor = get_extractor(args.model_type, model_path, scale=scale, device=args.device) + target_layers = extractor.target_layers + + vlm_key = get_model_key(args.model_type, scale) + output_dir = os.path.join(args.output_dir, vlm_key) + plots_dir = os.path.join(output_dir, 'plots') + os.makedirs(plots_dir, exist_ok=True) + + # Phase A: Extract swap pair features + logger.info("\n--- Phase A: Extracting swap pair features ---") + swap_records = extract_swap_features(extractor, swap_pairs, + max_samples_per_category=args.max_samples_per_category) + + # Phase B: Extract cross-group features + logger.info("\n--- Phase B: Extracting cross-group features ---") + quad_records = extract_cross_group_features(extractor, quads) if quads else [] + + # Phase C: Analysis + + # Fix 8: Category validity check + logger.info("\n--- Phase C: Analysis ---") + category_validity = check_category_validity(swap_records, scale) + unreliable_cats = [c for c, v in category_validity.items() if not v['reliable']] + if unreliable_cats: + logger.warning(f" Unreliable categories: {unreliable_cats}") + + # Fix 5: Two types of consistency (all pairs) + within_cat_all, sign_corrected_all = compute_delta_consistency(swap_records, target_layers) + + # Fix 8: Both-correct filtered consistency + both_correct_records = filter_both_correct(swap_records) + logger.info(f" Both-correct pairs: {len(both_correct_records)}/{len(swap_records)}") + within_cat_bc, sign_corrected_bc = compute_delta_consistency(both_correct_records, target_layers) + + # Cross-group alignment + cross_alignment = compute_cross_group_alignment(quad_records, target_layers) + pred_stats = compute_prediction_stats(swap_records, scale) + + # Fix 7: Delta-based heatmaps (for all layers) + delta_heatmaps_all = {} + delta_heatmaps_bc = {} + for layer in target_layers: + delta_heatmaps_all[layer] = compute_delta_similarity_matrix(swap_records, layer) + if both_correct_records: + delta_heatmaps_bc[layer] = compute_delta_similarity_matrix(both_correct_records, layer) + + # Log key results + max_layer = max(target_layers) + for group in GROUP_ORDER: + key = (group, max_layer) + if key in sign_corrected_all: + logger.info(f" Sign-corrected [{group}, L{max_layer}]: " + f"{sign_corrected_all[key]['mean']:.4f} +/- {sign_corrected_all[key]['std']:.4f}") + if max_layer in cross_alignment: + ca = cross_alignment[max_layer] + logger.info(f" Cross-group alignment L{max_layer}: " + f"{ca['per_sample_mean']:.4f} (perm={ca['permutation_mean']:.4f})") + logger.info(f" Accuracy orig={pred_stats['overall_acc_orig']:.1%}, " + f"swap={pred_stats['overall_acc_swap']:.1%}, " + f"both={pred_stats['overall_acc_both']:.1%}") + + # Phase D: Save results (both all_pairs and both_correct) + logger.info("\n--- Phase D: Saving results ---") + + # Save vectors NPZ ONCE with all records + correctness metadata + save_vectors_npz(scale, swap_records, quad_records, target_layers, output_dir) + + save_scale_results( + scale, swap_records, quad_records, + within_cat_all, sign_corrected_all, + cross_alignment, pred_stats, target_layers, + category_validity, delta_heatmaps_all, + output_dir, both_correct_tag='all_pairs', + ) + if both_correct_records: + save_scale_results( + scale, both_correct_records, quad_records, + within_cat_bc, sign_corrected_bc, + cross_alignment, pred_stats, target_layers, + category_validity, delta_heatmaps_bc, + output_dir, both_correct_tag='both_correct', + ) + + # Phase E: Per-scale plots (generate into separate subdirs) + logger.info("\n--- Phase E: Per-scale plots ---") + + for condition, wc_data, sc_data in [ + ('all', within_cat_all, sign_corrected_all), + ('both_correct', within_cat_bc, sign_corrected_bc), + ]: + if condition == 'both_correct' and not both_correct_records: + continue + + cond_dir = os.path.join(plots_dir, condition) + os.makedirs(cond_dir, exist_ok=True) + + wc_dir = os.path.join(cond_dir, 'within_cat_consistency') + sc_dir = os.path.join(cond_dir, 'sign_corrected') + ca_dir = os.path.join(cond_dir, 'cross_alignment') + os.makedirs(wc_dir, exist_ok=True) + os.makedirs(sc_dir, exist_ok=True) + os.makedirs(ca_dir, exist_ok=True) + + # Within-category consistency + plot_within_cat_consistency_trajectory( + wc_data, scale, args.model_type, + os.path.join(wc_dir, f'within_cat_consistency_{scale}.png')) + + # Sign-corrected consistency + plot_sign_corrected_consistency_trajectory( + sc_data, scale, args.model_type, + os.path.join(sc_dir, f'sign_corrected_consistency_{scale}.png')) + + # Cross-group alignment + if cross_alignment: + plot_cross_group_alignment_trajectory( + cross_alignment, scale, args.model_type, + os.path.join(ca_dir, f'cross_alignment_{scale}.png')) + + # PCA (from full NPZ) — 2D and 3D, all-pairs and both-correct + npz_path = os.path.join(output_dir, 'npz', f'vectors_{scale}.npz') + if os.path.exists(npz_path): + pca_dir = os.path.join(plots_dir, 'all', 'pca') + pca_3d_dir = os.path.join(plots_dir, 'all', 'pca_3d') + bc_pca_dir = os.path.join(plots_dir, 'both_correct', 'pca') + bc_pca_3d_dir = os.path.join(plots_dir, 'both_correct', 'pca_3d') + for d in (pca_dir, pca_3d_dir, bc_pca_dir, bc_pca_3d_dir): + os.makedirs(d, exist_ok=True) + plot_pca_embeddings(npz_path, scale, args.model_type, pca_dir) + plot_pca_3d(npz_path, scale, args.model_type, pca_3d_dir) + plot_pca_embeddings(npz_path, scale, args.model_type, bc_pca_dir, bc_only=True) + plot_pca_3d(npz_path, scale, args.model_type, bc_pca_3d_dir, bc_only=True) + + # Prediction stats bar (per-scale) + if pred_stats: + pred_plot_dir = os.path.join(plots_dir, 'all', 'pred_stats') + os.makedirs(pred_plot_dir, exist_ok=True) + plot_pred_stats_bars([pred_stats], args.model_type, + os.path.join(pred_plot_dir, f'pred_stats_{scale}.png')) + + # Per-scale accuracy charts + if pred_stats: + acc_dir = os.path.join(output_dir, 'accuracy') + logger.info(f"\n--- Accuracy Charts [{scale}] ---") + run_accuracy_charts([pred_stats], {scale: category_validity}, args.model_type, acc_dir) + + # Per-scale all-layer heatmaps + PCA (moved from merge phase) + logger.info(f"\n--- All-Layer Heatmaps [{scale}] ---") + run_all_layer_heatmaps(output_dir, args.model_type, [scale]) + logger.info(f"\n--- All-Layer PCA [{scale}] ---") + run_all_layer_pca(output_dir, args.model_type, [scale]) + + # Cleanup + del swap_records, quad_records, both_correct_records + extractor.cleanup() + + logger.info(f"\n Scale {scale} complete.") + + +# ============================================================================ +# Accuracy Chart (integrated from accuracy_chart.py) +# ============================================================================ + +def _acc_plot_group_bars(pred_stats, model_type, ax_list): + available = [s for s in SCALE_ORDER if any(d['scale'] == s for d in pred_stats)] + x = np.arange(3) + width = 0.8 / max(len(available), 1) + for idx, group in enumerate(GROUP_ORDER): + ax = ax_list[idx] + for i, scale in enumerate(available): + entry = next((d for d in pred_stats if d['scale'] == scale), None) + if entry is None: + continue + vals = [entry.get(f'{group}_acc_orig', 0), + entry.get(f'{group}_acc_swap', 0), + entry.get(f'{group}_acc_both', 0)] + offset = (i - len(available) / 2 + 0.5) * width + ax.bar(x + offset, vals, width, label=scale, + color=SCALE_COLORS.get(scale, 'gray'), alpha=0.85) + ax.set_xticks(x) + ax.set_xticklabels(['orig', 'swap', 'both'], fontsize=10) + ax.set_ylabel('Accuracy', fontsize=9) + ax.set_title(group.capitalize(), fontweight='bold', fontsize=11, + color=GROUP_COLORS.get(group, 'black')) + ax.legend(fontsize=7, ncol=2) + ax.set_ylim(0, 1.15) + ax.axhline(y=0.5, color='gray', linestyle='--', alpha=0.5, linewidth=1) + ax.grid(True, alpha=0.3, axis='y') + + +def _acc_plot_both_trajectory(pred_stats, model_type, ax): + available = [s for s in SCALE_ORDER if any(d['scale'] == s for d in pred_stats)] + x_ticks = range(len(available)) + for group in GROUP_ORDER: + y_vals = [next((d for d in pred_stats if d['scale'] == s), {}).get(f'{group}_acc_both', 0) + for s in available] + ax.plot(x_ticks, y_vals, '-o', color=GROUP_COLORS.get(group, 'gray'), + label=group, linewidth=2.5, markersize=7) + y_overall = [next((d for d in pred_stats if d['scale'] == s), {}).get('overall_acc_both', 0) + for s in available] + ax.plot(x_ticks, y_overall, '--s', color='black', label='overall', + linewidth=2, markersize=6, alpha=0.7) + ax.set_xticks(list(x_ticks)) + ax.set_xticklabels(available, fontsize=9) + ax.set_xlabel('Scale', fontsize=9) + ax.set_ylabel('Accuracy (both correct)', fontsize=9) + ax.set_title('Both-Correct Accuracy Trajectory', fontweight='bold', fontsize=11) + ax.legend(fontsize=9) + ax.set_ylim(0, 1.05) + ax.axhline(y=0.5, color='gray', linestyle='--', alpha=0.5, linewidth=1) + ax.grid(True, alpha=0.3) + + +def _acc_plot_overall_trajectory(pred_stats, model_type, ax): + available = [s for s in SCALE_ORDER if any(d['scale'] == s for d in pred_stats)] + x_ticks = range(len(available)) + for metric, label, ls in [ + ('overall_acc_orig', 'orig', '-o'), + ('overall_acc_swap', 'swap', '-s'), + ('overall_acc_both', 'both', '-^'), + ]: + y_vals = [next((d for d in pred_stats if d['scale'] == s), {}).get(metric, 0) + for s in available] + ax.plot(x_ticks, y_vals, ls, label=label, linewidth=2.2, markersize=6) + ax.set_xticks(list(x_ticks)) + ax.set_xticklabels(available, fontsize=9) + ax.set_xlabel('Scale', fontsize=9) + ax.set_ylabel('Overall Accuracy', fontsize=9) + ax.set_title('Overall Accuracy Trajectory', fontweight='bold', fontsize=11) + ax.legend(fontsize=9) + ax.set_ylim(0, 1.05) + ax.axhline(y=0.5, color='gray', linestyle='--', alpha=0.5, linewidth=1) + ax.grid(True, alpha=0.3) + + +def _acc_plot_category_accuracy(cat_validity, model_type, ax_orig, ax_swap, pred_stats=None): + available = [s for s in SCALE_ORDER if s in cat_validity] + cats_with_overall = CATEGORY_ORDER + ['overall'] + x = np.arange(len(cats_with_overall)) + width = 0.8 / max(len(available), 1) + overall_key = {'acc_orig': 'overall_acc_orig', 'acc_swap': 'overall_acc_swap'} + for ax, metric, title in [ + (ax_orig, 'acc_orig', 'Per-Category Accuracy (orig)'), + (ax_swap, 'acc_swap', 'Per-Category Accuracy (swap)'), + ]: + for i, scale in enumerate(available): + cv = cat_validity[scale] + vals = [cv.get(cat, {}).get(metric, 0) for cat in CATEGORY_ORDER] + if pred_stats is not None: + entry = next((d for d in pred_stats if d['scale'] == scale), None) + vals.append(entry.get(overall_key[metric], 0) if entry else 0) + else: + vals.append(0) + offset = (i - len(available) / 2 + 0.5) * width + ax.bar(x + offset, vals, width, label=scale, + color=SCALE_COLORS.get(scale, 'gray'), alpha=0.85) + for j, cat in enumerate(CATEGORY_ORDER): + ax.axvspan(j - 0.45, j + 0.45, color=CAT_COLORS.get(cat, 'gray'), alpha=0.06, linewidth=0) + ax.axvline(x=len(CATEGORY_ORDER) - 0.5, color='black', linewidth=1.2, linestyle=':', alpha=0.6) + ax.set_xticks(x) + ax.set_xticklabels(cats_with_overall, fontsize=9, rotation=15) + ax.set_ylabel('Accuracy', fontsize=9) + ax.set_title(title, fontweight='bold', fontsize=11) + ax.legend(fontsize=7, ncol=2) + ax.set_ylim(0, 1.15) + ax.axhline(y=0.5, color='gray', linestyle='--', alpha=0.5, linewidth=1) + ax.grid(True, alpha=0.3, axis='y') + if available: + last_cv = cat_validity[available[-1]] + for j, cat in enumerate(CATEGORY_ORDER): + if not last_cv.get(cat, {}).get('reliable', True): + ax.text(j, 1.08, '✗', ha='center', va='center', + fontsize=9, color='red', fontweight='bold') + + +def _acc_plot_category_per_scale(cat_validity, model_type, save_dir, pred_stats=None): + cats_with_overall = CATEGORY_ORDER + ['overall'] + overall_key = {'acc_orig': 'overall_acc_orig', 'acc_swap': 'overall_acc_swap'} + for scale in sorted(cat_validity.keys(), + key=lambda s: SCALE_ORDER.index(s) if s in SCALE_ORDER else 99): + cv = cat_validity[scale] + ps_entry = next((d for d in pred_stats if d['scale'] == scale), None) if pred_stats else None + fig, axes = plt.subplots(1, 2, figsize=(16, 5)) + x = np.arange(len(cats_with_overall)) + width = 0.55 + for ax, metric, title in [ + (axes[0], 'acc_orig', f'acc_orig ({scale})'), + (axes[1], 'acc_swap', f'acc_swap ({scale})'), + ]: + vals = [cv.get(cat, {}).get(metric, 0) for cat in CATEGORY_ORDER] + overall_val = ps_entry.get(overall_key[metric], 0) if ps_entry else 0 + vals.append(overall_val) + colors = [CAT_COLORS.get(cat, 'gray') for cat in CATEGORY_ORDER] + ['#333333'] + bars = ax.bar(x, vals, width, color=colors, alpha=0.85, edgecolor='white') + ax.axvline(x=len(CATEGORY_ORDER) - 0.5, color='black', + linewidth=1.2, linestyle=':', alpha=0.6) + ax.set_xticks(x) + ax.set_xticklabels(cats_with_overall, fontsize=10) + ax.set_ylabel('Accuracy', fontsize=10) + ax.set_title(title, fontweight='bold', fontsize=12) + ax.set_ylim(0, 1.15) + ax.axhline(y=0.5, color='gray', linestyle='--', alpha=0.5) + ax.grid(True, alpha=0.3, axis='y') + for bar, cat in zip(bars, cats_with_overall): + reliable = cv.get(cat, {}).get('reliable', True) if cat != 'overall' else True + h = bar.get_height() + ax.text(bar.get_x() + bar.get_width() / 2, h + 0.02, + f'{h:.2f}' + ('' if reliable else ' ✗'), + ha='center', va='bottom', fontsize=8, + color='red' if not reliable else 'black') + fig.suptitle(f'{model_type.upper()} - Category Accuracy ({scale})', + fontsize=13, fontweight='bold') + plt.tight_layout() + out = os.path.join(save_dir, f'category_accuracy_{scale}.png') + plt.savefig(out, dpi=200, bbox_inches='tight') + plt.close() + logger.info(f"Saved: {out}") + + +def run_accuracy_charts(pred_stats, cat_validity, model_type, save_dir): + """Generate all accuracy chart plots into save_dir.""" + os.makedirs(save_dir, exist_ok=True) + + # Group bars + fig, axes = plt.subplots(1, 3, figsize=(21, 6)) + _acc_plot_group_bars(pred_stats, model_type, axes) + fig.suptitle(f'{model_type.upper()} - Prediction Accuracy by Group', + fontsize=15, fontweight='bold') + plt.tight_layout() + plt.savefig(os.path.join(save_dir, 'accuracy_group_bars.png'), dpi=200, bbox_inches='tight') + plt.close() + logger.info(f"Saved: {os.path.join(save_dir, 'accuracy_group_bars.png')}") + + # Trajectory + fig, axes = plt.subplots(1, 2, figsize=(16, 6)) + _acc_plot_both_trajectory(pred_stats, model_type, axes[0]) + _acc_plot_overall_trajectory(pred_stats, model_type, axes[1]) + fig.suptitle(f'{model_type.upper()} - Accuracy Trajectory Across Scales', + fontsize=14, fontweight='bold') + plt.tight_layout() + plt.savefig(os.path.join(save_dir, 'accuracy_trajectory.png'), dpi=200, bbox_inches='tight') + plt.close() + logger.info(f"Saved: {os.path.join(save_dir, 'accuracy_trajectory.png')}") + + if cat_validity: + # Category bars (all scales overlay) + fig, axes = plt.subplots(1, 2, figsize=(20, 6)) + _acc_plot_category_accuracy(cat_validity, model_type, axes[0], axes[1], + pred_stats=pred_stats) + fig.suptitle(f'{model_type.upper()} - Per-Category Accuracy Across Scales', + fontsize=14, fontweight='bold') + plt.tight_layout() + plt.savefig(os.path.join(save_dir, 'accuracy_category.png'), dpi=200, bbox_inches='tight') + plt.close() + logger.info(f"Saved: {os.path.join(save_dir, 'accuracy_category.png')}") + + # Per-scale category bars + _acc_plot_category_per_scale(cat_validity, model_type, save_dir, pred_stats=pred_stats) + + # Combined accuracy_chart.png + fig = plt.figure(figsize=(24, 14)) + ax_h = fig.add_subplot(3, 3, 1) + ax_v = fig.add_subplot(3, 3, 2) + ax_d = fig.add_subplot(3, 3, 3) + _acc_plot_group_bars(pred_stats, model_type, [ax_h, ax_v, ax_d]) + ax_tb = fig.add_subplot(3, 3, 4) + ax_to = fig.add_subplot(3, 3, 5) + _acc_plot_both_trajectory(pred_stats, model_type, ax_tb) + _acc_plot_overall_trajectory(pred_stats, model_type, ax_to) + ax_note = fig.add_subplot(3, 3, 6) + ax_note.axis('off') + available_scales = [s for s in SCALE_ORDER if any(d['scale'] == s for d in pred_stats)] + ax_note.text(0.1, 0.6, + f'Scales: {", ".join(available_scales)}\n\n✗ = unreliable category\n-- = 0.5 chance level', + transform=ax_note.transAxes, fontsize=11, va='top', family='monospace') + if cat_validity: + ax_co = fig.add_subplot(3, 2, 5) + ax_cs = fig.add_subplot(3, 2, 6) + _acc_plot_category_accuracy(cat_validity, model_type, ax_co, ax_cs, pred_stats=pred_stats) + fig.suptitle(f'{model_type.upper()} — Accuracy Summary', + fontsize=17, fontweight='bold', y=1.01) + plt.tight_layout() + plt.savefig(os.path.join(save_dir, 'accuracy_chart.png'), dpi=200, bbox_inches='tight') + plt.close() + logger.info(f"Saved: {os.path.join(save_dir, 'accuracy_chart.png')}") + + +# ============================================================================ +# Unify Consistency Y-axis (integrated from unify_consistency_ylim.py) +# ============================================================================ + +def _ylim_compute(all_vals, margin_ratio=0.08): + if not all_vals: + return -1, 1 + ymin, ymax = min(all_vals), max(all_vals) + margin = (ymax - ymin) * margin_ratio + return ymin - margin, ymax + margin + + +def _ylim_load_keyed_json(path): + if not os.path.exists(path): + return None + with open(path) as f: + raw = json.load(f) + if not raw: + return None + result = {} + for key, vals in raw.items(): + parts = key.rsplit('_L', 1) + if len(parts) == 2: + result[(parts[0], int(parts[1]))] = vals + return result if result else None + + +def _ylim_load_alignment_json(path): + if not os.path.exists(path): + return None + with open(path) as f: + raw = json.load(f) + if not raw: + return None + result = {int(k[1:]): v for k, v in raw.items() if k.startswith('L')} + return result if result else None + + +def _ylim_plot_sign_corrected(data, scale, model_type, save_path, ylim): + fig, ax = plt.subplots(figsize=(12, 6)) + for group in GROUP_ORDER: + layers, vals = [], [] + for (g, l), v in sorted(data.items(), key=lambda x: x[0][1]): + if g == group: + layers.append(l) + vals.append(v['mean']) + if layers: + ax.plot(layers, vals, '-o', color=GROUP_COLORS[group], + label=group, linewidth=2, markersize=3) + ax.set_ylim(ylim) + ax.set_xlabel('Layer Index') + ax.set_ylabel('Sign-Corrected Consistency') + ax.set_title(f'{model_type.upper()} ({scale}) - Sign-Corrected Group Consistency', + fontweight='bold') + ax.legend(fontsize=11) + ax.grid(True, alpha=0.3) + plt.tight_layout() + plt.savefig(save_path, dpi=300, bbox_inches='tight') + plt.close() + + +def _ylim_plot_within_cat(data, scale, model_type, save_path, ylim): + fig, ax = plt.subplots(figsize=(12, 6)) + for cat in CATEGORY_ORDER: + layers, vals = [], [] + for (c, l), v in sorted(data.items(), key=lambda x: x[0][1]): + if c == cat: + layers.append(l) + vals.append(v['mean']) + if layers: + ax.plot(layers, vals, '-o', color=CAT_COLORS[cat], + label=cat, linewidth=2, markersize=3) + ax.set_ylim(ylim) + ax.set_xlabel('Layer Index') + ax.set_ylabel('Within-Category Consistency') + ax.set_title(f'{model_type.upper()} ({scale}) - Within-Category Delta Consistency', + fontweight='bold') + ax.legend(fontsize=9) + ax.grid(True, alpha=0.3) + plt.tight_layout() + plt.savefig(save_path, dpi=300, bbox_inches='tight') + plt.close() + + +def _ylim_plot_cross_alignment(data, scale, model_type, save_path, ylim): + fig, ax = plt.subplots(figsize=(12, 6)) + layers = sorted(data.keys()) + ax.plot(layers, [data[l]['per_sample_mean'] for l in layers], '-o', color='#d62728', + label='cos(d_vert, d_dist) per-sample mean', linewidth=2.5, markersize=3) + ax.plot(layers, [data[l]['mean_delta_alignment'] for l in layers], '--s', color='#e377c2', + label='cos(mean_d_vert, mean_d_dist)', linewidth=1.5, markersize=3) + perm_mean = [data[l]['permutation_mean'] for l in layers] + perm_std = [data[l]['permutation_std'] for l in layers] + ax.plot(layers, perm_mean, ':', color='gray', label='permutation control', linewidth=1.5) + ax.fill_between(layers, + [m - 2*s for m, s in zip(perm_mean, perm_std)], + [m + 2*s for m, s in zip(perm_mean, perm_std)], + alpha=0.2, color='gray') + ax.set_ylim(ylim) + ax.set_xlabel('Layer Index') + ax.set_ylabel('Cosine Alignment') + ax.set_title(f'{model_type.upper()} ({scale}) - Cross-Group Alignment (Perspective Bias)', + fontweight='bold') + ax.legend(fontsize=9) + ax.grid(True, alpha=0.3) + plt.tight_layout() + plt.savefig(save_path, dpi=300, bbox_inches='tight') + plt.close() + + +def _ylim_process_plot_type(scale_dir_map, plots_dir, conditions, model_type, + plot_name, json_pattern, loader, val_gatherer, plotter, + subfolder=None): + """Re-plot one plot type across all conditions with a unified y-axis. + + scale_dir_map: {scale: path_to_saved_data/vlm_key} (ordered dict) + """ + logger.info(f" [unify ylim] {plot_name}") + for condition, condition_tag in conditions: + cond_plot_dir = os.path.join(plots_dir, condition) + if not os.path.isdir(cond_plot_dir): + continue + save_dir = os.path.join(cond_plot_dir, subfolder) if subfolder else cond_plot_dir + os.makedirs(save_dir, exist_ok=True) + all_data = {} + for scale, scale_dir in scale_dir_map.items(): + path = os.path.join(scale_dir, 'json', + json_pattern.format(scale=scale, tag=condition_tag)) + loaded = loader(path) + if loaded: + all_data[scale] = loaded + if not all_data: + continue + all_vals = val_gatherer(all_data) + ylim = _ylim_compute(all_vals) + for scale, data in all_data.items(): + save_path = os.path.join(save_dir, f'{plot_name}_{scale}.png') + plotter(data, scale, model_type, save_path, ylim) + logger.info(f" {condition}: y=[{ylim[0]:.4f}, {ylim[1]:.4f}], {len(all_data)} scales") + + +def run_unify_ylim(scale_dir_map: dict, plots_dir: str, model_type: str): + """Unify y-axis for sign_corrected, within_cat, and cross_alignment plots. + + scale_dir_map: {scale: path_to_saved_data/vlm_key} + plots_dir: compare/{group_name}/plots/ (where unified plots are written) + """ + conditions = [ + ('all', 'all_pairs'), + ('both_correct', 'both_correct'), + ] + + def gather_keyed(all_data): + return [v['mean'] for data in all_data.values() for v in data.values()] + + def gather_alignment(all_data): + vals = [] + for data in all_data.values(): + for v in data.values(): + vals += [v['per_sample_mean'], v['mean_delta_alignment'], + v['permutation_mean'] + 2 * v['permutation_std'], + v['permutation_mean'] - 2 * v['permutation_std']] + return vals + + _ylim_process_plot_type( + scale_dir_map, plots_dir, conditions, model_type, + plot_name='sign_corrected_consistency', + json_pattern='sign_corrected_consistency_{scale}_{tag}.json', + loader=_ylim_load_keyed_json, + val_gatherer=gather_keyed, + plotter=_ylim_plot_sign_corrected, + subfolder='sign_corrected', + ) + _ylim_process_plot_type( + scale_dir_map, plots_dir, conditions, model_type, + plot_name='within_cat_consistency', + json_pattern='within_cat_consistency_{scale}_{tag}.json', + loader=_ylim_load_keyed_json, + val_gatherer=gather_keyed, + plotter=_ylim_plot_within_cat, + subfolder='within_cat_consistency', + ) + _ylim_process_plot_type( + scale_dir_map, plots_dir, conditions, model_type, + plot_name='cross_alignment', + json_pattern='cross_alignment_{scale}.json', + loader=_ylim_load_alignment_json, + val_gatherer=gather_alignment, + plotter=_ylim_plot_cross_alignment, + subfolder='cross_alignment', + ) + + +def _check_merge_only_sources(output_dir: str, model_type: str) -> bool: + """Verify required source directories have data for a merge-only model_type. + + With the new per-scale directory layout, data lives at + {output_dir}/{model_type}_{scale}/ instead of {output_dir}/{model_type}/. + Returns True if all sources look healthy, False (with warnings) if not. + """ + mc = MERGE_ONLY_CONFIGS[model_type] + ok = True + for req_model_type in mc['required_dirs']: + # Look for any saved_data/{req_model_type}_{scale}/ directories + if not os.path.isdir(output_dir): + logger.warning( + f"[{model_type}] output_dir not found: {output_dir}\n" + f" → Run inference first: python swap_analysis.py --model_type {req_model_type}" + ) + ok = False + continue + matching = [ + d for d in os.listdir(output_dir) + if d.startswith(f'{req_model_type}_') + and os.path.isdir(os.path.join(output_dir, d)) + ] + if not matching: + logger.warning( + f"[{model_type}] No '{req_model_type}_*' directories found in {output_dir}\n" + f" → Run inference first: python swap_analysis.py --model_type {req_model_type}" + ) + ok = False + continue + any_data = False + for d in matching: + json_dir = os.path.join(output_dir, d, 'json') + if os.path.isdir(json_dir) and any( + f.startswith('pred_stats_') for f in os.listdir(json_dir) + ): + scale = d[len(req_model_type) + 1:] # strip "{req_model_type}_" prefix + logger.info(f" [{req_model_type}/{scale}] found data in {d}/") + any_data = True + if not any_data: + logger.warning( + f"[{model_type}] No pred_stats JSON found in any '{req_model_type}_*' directory.\n" + f" → Inference may not have completed for '{req_model_type}'." + ) + ok = False + return ok + + +def _load_scale_data_multi(output_dir: str, model_type: str, scale: str, scale_sources: dict): + """Load per-scale data for one scale, looking in the correct source directory. + + With the new per-scale layout, data lives at {output_dir}/{src_model_type}_{scale}/. + Returns (sc, sc_bc, wc, wc_bc, align, pred_stat, cat_validity, dh, dh_bc). + Any unavailable item is None / {}. + """ + src_model_type = scale_sources.get(scale, model_type) + src_dir = os.path.join(output_dir, get_model_key(src_model_type, scale)) + + sc = load_scale_consistency(src_dir, scale, 'all_pairs') + sc_bc = load_scale_consistency(src_dir, scale, 'both_correct') + wc = load_within_cat_consistency(src_dir, scale, 'all_pairs') + wc_bc = load_within_cat_consistency(src_dir, scale, 'both_correct') + align = load_scale_alignment(src_dir, scale) + + pred_stat = None + pred_path = os.path.join(src_dir, 'json', f'pred_stats_{scale}.json') + if os.path.exists(pred_path): + with open(pred_path) as f: + pred_stat = json.load(f) + + cat_validity = None + cv_path = os.path.join(src_dir, 'json', f'category_validity_{scale}.json') + if os.path.exists(cv_path): + with open(cv_path) as f: + cat_validity = json.load(f) + + dh = load_delta_heatmaps(src_dir, scale, 'all_pairs') + dh_bc = load_delta_heatmaps(src_dir, scale, 'both_correct') + + return sc, sc_bc, wc, wc_bc, align, pred_stat, cat_validity, dh, dh_bc + + +# --------------------------------------------------------------------------- +# All-layer heatmap + PCA helpers (called from run_merge / run_merge_extended) +# --------------------------------------------------------------------------- + +def _get_csv_layers(csv_dir: str, scale: str, tag: str) -> list: + """Return sorted list of layer indices that have a delta_similarity CSV.""" + import glob as _glob + pattern = os.path.join(csv_dir, f'delta_similarity_{scale}_L*_{tag}.csv') + layers = [] + for fpath in _glob.glob(pattern): + m = re.search( + rf'delta_similarity_{re.escape(scale)}_L(\d+)_{re.escape(tag)}\.csv$', + os.path.basename(fpath)) + if m: + layers.append(int(m.group(1))) + return sorted(layers) + + +def run_all_layer_heatmaps(model_dir: str, model_type: str, scales: list): + """Generate delta-similarity heatmaps for ALL layers from pre-computed CSVs. + + Reads {model_dir}/csv/delta_similarity_{scale}_L{n}_{tag}.csv + Writes {model_dir}/plots/all/heatmap/heatmap_{scale}_L{n}.png (all_pairs) + {model_dir}/plots/both_correct/heatmap/heatmap_{scale}_L{n}.png (both_correct) + + Skips a scale if the NPZ is missing or any all_pairs CSV is absent + (indicates inference was not fully completed for that scale). + """ + TAG_TO_DIR = { + 'all_pairs': os.path.join(model_dir, 'plots', 'all', 'heatmap'), + 'both_correct': os.path.join(model_dir, 'plots', 'both_correct', 'heatmap'), + } + + for scale in scales: + npz_path = os.path.join(model_dir, 'npz', f'vectors_{scale}.npz') + csv_dir = os.path.join(model_dir, 'csv') + + if not os.path.exists(npz_path): + logger.warning(f' [{model_type}/{scale}] NPZ not found, skipping heatmaps.') + continue + + data = np.load(npz_path, allow_pickle=True) + npz_layers = sorted( + int(k.replace('orig_L', '')) + for k in data.files if k.startswith('orig_L') + ) + data.close() + + if not npz_layers: + logger.warning(f' [{model_type}/{scale}] No orig_L* keys in NPZ, skipping heatmaps.') + continue + + csv_layers = _get_csv_layers(csv_dir, scale, 'all_pairs') + missing = set(npz_layers) - set(csv_layers) + if missing: + logger.warning( + f' [{model_type}/{scale}] {len(missing)} NPZ layers lack CSVs ' + f'(e.g. L{sorted(missing)[:5]}). Skipping all-layer heatmaps.') + continue + + for out_dir in TAG_TO_DIR.values(): + os.makedirs(out_dir, exist_ok=True) + + logger.info(f' [{model_type}/{scale}] Generating heatmaps for {len(npz_layers)} layers...') + saved = 0 + for layer in npz_layers: + for tag, out_dir in TAG_TO_DIR.items(): + csv_path = os.path.join(csv_dir, f'delta_similarity_{scale}_L{layer}_{tag}.csv') + if not os.path.exists(csv_path): + continue # both_correct CSV may be absent for some layers + df = pd.read_csv(csv_path, index_col=0) + available = [c for c in CATEGORY_ORDER if c in df.index] + if not available: + continue + df = df.loc[available, available] + title = ( + f'{model_type.upper()} ({scale}) \u2014 Delta Heatmap L{layer} ' + f'({"both-correct" if tag == "both_correct" else "all pairs"})' + ) + out_path = os.path.join(out_dir, f'heatmap_{scale}_L{layer}.png') + plot_delta_heatmap(df, title, out_path) + saved += 1 + logger.info(f' [{model_type}/{scale}] Saved {saved} heatmaps') + + +def run_all_layer_pca(model_dir: str, model_type: str, scales: list): + """Generate 2D and 3D PCA plots for ALL layers from saved NPZ files. + + Writes {model_dir}/plots/all/pca/pca_{scale}_L{n}.png (all pairs) + {model_dir}/plots/all/pca_3d/pca_{scale}_L{n}.png + {model_dir}/plots/both_correct/pca/pca_{scale}_L{n}.png (both-correct only) + {model_dir}/plots/both_correct/pca_3d/pca_{scale}_L{n}.png + """ + for scale in scales: + npz_path = os.path.join(model_dir, 'npz', f'vectors_{scale}.npz') + if not os.path.exists(npz_path): + logger.warning(f' [{model_type}/{scale}] NPZ not found, skipping PCA.') + continue + + # All-pairs PCA + pca_2d_dir = os.path.join(model_dir, 'plots', 'all', 'pca') + pca_3d_dir = os.path.join(model_dir, 'plots', 'all', 'pca_3d') + os.makedirs(pca_2d_dir, exist_ok=True) + os.makedirs(pca_3d_dir, exist_ok=True) + logger.info(f' [{model_type}/{scale}] Generating all-layer 2D PCA...') + plot_pca_embeddings(npz_path, scale, model_type, pca_2d_dir) + logger.info(f' [{model_type}/{scale}] Generating all-layer 3D PCA...') + plot_pca_3d(npz_path, scale, model_type, pca_3d_dir) + + # Both-correct PCA + bc_pca_2d_dir = os.path.join(model_dir, 'plots', 'both_correct', 'pca') + bc_pca_3d_dir = os.path.join(model_dir, 'plots', 'both_correct', 'pca_3d') + os.makedirs(bc_pca_2d_dir, exist_ok=True) + os.makedirs(bc_pca_3d_dir, exist_ok=True) + logger.info(f' [{model_type}/{scale}] Generating both-correct 2D PCA...') + plot_pca_embeddings(npz_path, scale, model_type, bc_pca_2d_dir, bc_only=True) + logger.info(f' [{model_type}/{scale}] Generating both-correct 3D PCA...') + plot_pca_3d(npz_path, scale, model_type, bc_pca_3d_dir, bc_only=True) + + +def run_merge(args): + # Per-scale data lives in saved_data/{model_type}_{scale}/ + def _scale_dir(scale): + return os.path.join(args.output_dir, get_model_key(args.model_type, scale)) + + # Cross-scale (compare) output: {question_type}/compare/{group_name}/ + group_name = args.group_name or args.model_type + if args.merge_output_dir: + merge_out = args.merge_output_dir + else: + qt_root = os.path.dirname(args.output_dir.rstrip('/')) # one level up from saved_data/ + merge_out = os.path.join(qt_root, 'compare', group_name) + plots_dir = os.path.join(merge_out, 'plots') + os.makedirs(plots_dir, exist_ok=True) + + scale_order = ['vanilla', '80k', '400k', '800k', '2m', 'roborefer', + 'roborefer_depth', '10pct', '20pct', '30pct'] + available_scales = [s for s in scale_order if s in args.scales] + + # Load per-scale results + all_sign_corrected = {} + all_sign_corrected_bc = {} + all_within_cat = {} + all_within_cat_bc = {} + all_alignment = {} + all_pred_stats = [] + all_cat_validity = {} + all_delta_heatmaps = {} + all_delta_heatmaps_bc = {} + + for scale in available_scales: + sd = _scale_dir(scale) + sc = load_scale_consistency(sd, scale, 'all_pairs') + if sc: + all_sign_corrected[scale] = sc + sc_bc = load_scale_consistency(sd, scale, 'both_correct') + if sc_bc: + all_sign_corrected_bc[scale] = sc_bc + wc = load_within_cat_consistency(sd, scale, 'all_pairs') + if wc: + all_within_cat[scale] = wc + wc_bc = load_within_cat_consistency(sd, scale, 'both_correct') + if wc_bc: + all_within_cat_bc[scale] = wc_bc + align = load_scale_alignment(sd, scale) + if align: + all_alignment[scale] = align + pred_path = os.path.join(sd, 'json', f'pred_stats_{scale}.json') + if os.path.exists(pred_path): + with open(pred_path) as f: + all_pred_stats.append(json.load(f)) + cv_path = os.path.join(sd, 'json', f'category_validity_{scale}.json') + if os.path.exists(cv_path): + with open(cv_path) as f: + all_cat_validity[scale] = json.load(f) + dh = load_delta_heatmaps(sd, scale, 'all_pairs') + if dh: + all_delta_heatmaps[scale] = dh + dh_bc = load_delta_heatmaps(sd, scale, 'both_correct') + if dh_bc: + all_delta_heatmaps_bc[scale] = dh_bc + + logger.info(f" Loaded data for {scale}") + + # Generate cross-scale plots into condition subdirs + for condition, sc_data, wc_data, dh_data, tag_label in [ + ('all', all_sign_corrected, all_within_cat, all_delta_heatmaps, 'all pairs'), + ('both_correct', all_sign_corrected_bc, all_within_cat_bc, all_delta_heatmaps_bc, 'both-correct'), + ]: + cond_dir = os.path.join(plots_dir, condition) + sc_dir = os.path.join(cond_dir, 'sign_corrected') + wc_dir = os.path.join(cond_dir, 'within_cat_consistency') + dt_dir = os.path.join(cond_dir, 'delta_trajectory') + os.makedirs(sc_dir, exist_ok=True) + os.makedirs(wc_dir, exist_ok=True) + os.makedirs(dt_dir, exist_ok=True) + + if len(sc_data) > 1: + plot_cross_scale_consistency( + sc_data, args.model_type, + os.path.join(sc_dir, 'cross_scale_sign_corrected.png'), + title_prefix=f'Sign-Corrected ({tag_label})') + + if len(wc_data) > 1: + plot_cross_scale_within_cat_consistency( + wc_data, args.model_type, + os.path.join(wc_dir, 'cross_scale_within_cat.png')) + + if dh_data: + plot_delta_trajectory(dh_data, args.model_type, + os.path.join(dt_dir, 'delta_trajectory.png')) + + # Cross-scale alignment + pred stats + summary (shared across conditions) + all_cond_dir = os.path.join(plots_dir, 'all') + ca_dir = os.path.join(all_cond_dir, 'cross_alignment') + pred_stats_dir = os.path.join(all_cond_dir, 'pred_stats') + summary_dir = os.path.join(all_cond_dir, 'summary') + os.makedirs(ca_dir, exist_ok=True) + os.makedirs(pred_stats_dir, exist_ok=True) + os.makedirs(summary_dir, exist_ok=True) + + if len(all_alignment) > 1: + plot_cross_scale_alignment( + all_alignment, args.model_type, + os.path.join(ca_dir, 'cross_scale_alignment.png')) + + # Prediction stats plots + if all_pred_stats: + plot_pred_stats_bars(all_pred_stats, args.model_type, + os.path.join(pred_stats_dir, 'pred_stats_bars.png')) + plot_pred_stats_trajectory(all_pred_stats, args.model_type, + os.path.join(pred_stats_dir, 'pred_stats_trajectory.png')) + + # Summary barplot + if all_sign_corrected: + plot_summary_barplot( + all_sign_corrected, all_alignment, args.model_type, + os.path.join(summary_dir, 'summary_barplot.png')) + + # Summary CSV + summary_rows = [] + for scale in available_scales: + ps = next((p for p in all_pred_stats if p.get('scale') == scale), None) + if ps is None: + continue + row = dict(ps) + if scale in all_alignment: + max_layer = max(all_alignment[scale].keys()) + row['alignment_deepest'] = all_alignment[scale][max_layer]['per_sample_mean'] + row['alignment_perm'] = all_alignment[scale][max_layer]['permutation_mean'] + summary_rows.append(row) + + if summary_rows: + csv_dir = os.path.join(merge_out, 'csv') + os.makedirs(csv_dir, exist_ok=True) + pd.DataFrame(summary_rows).to_csv(os.path.join(csv_dir, 'summary.csv'), index=False) + + # Accuracy charts (cross-scale) + if all_pred_stats: + acc_dir = os.path.join(plots_dir, 'accuracy') + logger.info("\n--- Accuracy Charts ---") + run_accuracy_charts(all_pred_stats, all_cat_validity, args.model_type, acc_dir) + + # Unify y-axis across scales for per-scale trajectory plots + logger.info("\n--- Unifying Y-axis ---") + scale_dir_map = {s: _scale_dir(s) for s in available_scales} + run_unify_ylim(scale_dir_map, plots_dir, args.model_type) + + # All-layer heatmaps + PCA are now done per-scale in process_scale(); skip here. + + logger.info(f"\n=== Merge Complete ===\nResults in: {merge_out}") + + +def run_merge_extended(args): + """Generate cross-scale plots for new / merge-only model_types. + + - Runnable types (molmo_big, qwen_big, qwen_super, big_trio): + loads all data from results/{model_type}/ and saves plots there. + - Merge-only types (molmo_all, qwen_all): + loads per-scale data from the respective source directories, + saves all cross-scale plots to results/{model_type}/. + """ + is_merge_only = args.model_type in MERGE_ONLY_CONFIGS + + # ── Determine scale order and data source strategy ──────────────────────── + if is_merge_only: + mc = MERGE_ONLY_CONFIGS[args.model_type] + scale_order = mc['scale_order'] + scale_sources = mc['scale_sources'] + + logger.info(f"\n=== MERGE-ONLY mode: {args.model_type} ===") + logger.info("Checking required source directories...") + sources_ok = _check_merge_only_sources(args.output_dir, args.model_type) + if not sources_ok: + logger.warning( + f"\n[WARNING] One or more source directories are missing or incomplete.\n" + f" Cross-scale plots for '{args.model_type}' may be partial.\n" + f" Run the missing model types first (see warnings above), then retry merge." + ) + else: + scale_order = SCALE_ORDERS_NEW.get( + args.model_type, list(MODEL_CONFIGS_NEW[args.model_type])) + scale_sources = None # all data lives in results/{model_type}/ + + available_scales = [s for s in scale_order if s in args.scales] + logger.info(f"Merging scales (in order): {available_scales}") + + # ── Determine output dir (compare/{group_name}/) ────────────────────────── + group_name = args.group_name or args.model_type + if args.merge_output_dir: + merge_out = args.merge_output_dir + else: + qt_root = os.path.dirname(args.output_dir.rstrip('/')) + merge_out = os.path.join(qt_root, 'compare', group_name) + plots_dir = os.path.join(merge_out, 'plots') + os.makedirs(plots_dir, exist_ok=True) + + # ── Per-scale data directory resolver ───────────────────────────────────── + def _scale_dir(scale): + if is_merge_only: + src_model_type = scale_sources[scale] + else: + src_model_type = args.model_type + return os.path.join(args.output_dir, get_model_key(src_model_type, scale)) + + # ── Load per-scale data ─────────────────────────────────────────────────── + all_sign_corrected = {} + all_sign_corrected_bc = {} + all_within_cat = {} + all_within_cat_bc = {} + all_alignment = {} + all_pred_stats = [] + all_cat_validity = {} + all_delta_heatmaps = {} + all_delta_heatmaps_bc = {} + + for scale in available_scales: + sd = _scale_dir(scale) + sc = load_scale_consistency(sd, scale, 'all_pairs') + sc_bc = load_scale_consistency(sd, scale, 'both_correct') + wc = load_within_cat_consistency(sd, scale, 'all_pairs') + wc_bc = load_within_cat_consistency(sd, scale, 'both_correct') + align = load_scale_alignment(sd, scale) + + pred_stat = None + pred_path = os.path.join(sd, 'json', f'pred_stats_{scale}.json') + if os.path.exists(pred_path): + with open(pred_path) as f: + pred_stat = json.load(f) + + cat_validity = None + cv_path = os.path.join(sd, 'json', f'category_validity_{scale}.json') + if os.path.exists(cv_path): + with open(cv_path) as f: + cat_validity = json.load(f) + + dh = load_delta_heatmaps(sd, scale, 'all_pairs') + dh_bc = load_delta_heatmaps(sd, scale, 'both_correct') + + if sc: + all_sign_corrected[scale] = sc + if sc_bc: + all_sign_corrected_bc[scale] = sc_bc + if wc: + all_within_cat[scale] = wc + if wc_bc: + all_within_cat_bc[scale] = wc_bc + if align: + all_alignment[scale] = align + if pred_stat is not None: + all_pred_stats.append(pred_stat) + if cat_validity is not None: + all_cat_validity[scale] = cat_validity + if dh: + all_delta_heatmaps[scale] = dh + if dh_bc: + all_delta_heatmaps_bc[scale] = dh_bc + + logger.info(f" Loaded data for '{scale}'" + + (f" (from '{scale_sources[scale]}')" if is_merge_only else "")) + + # ── Cross-scale plots ───────────────────────────────────────────────────── + for condition, sc_data, wc_data, dh_data, tag_label in [ + ('all', all_sign_corrected, all_within_cat, all_delta_heatmaps, 'all pairs'), + ('both_correct', all_sign_corrected_bc, all_within_cat_bc, all_delta_heatmaps_bc, 'both-correct'), + ]: + cond_dir = os.path.join(plots_dir, condition) + sc_dir = os.path.join(cond_dir, 'sign_corrected') + wc_dir = os.path.join(cond_dir, 'within_cat_consistency') + dt_dir = os.path.join(cond_dir, 'delta_trajectory') + os.makedirs(sc_dir, exist_ok=True) + os.makedirs(wc_dir, exist_ok=True) + os.makedirs(dt_dir, exist_ok=True) + + if len(sc_data) > 1: + plot_cross_scale_consistency( + sc_data, args.model_type, + os.path.join(sc_dir, 'cross_scale_sign_corrected.png'), + title_prefix=f'Sign-Corrected ({tag_label})') + + if len(wc_data) > 1: + plot_cross_scale_within_cat_consistency( + wc_data, args.model_type, + os.path.join(wc_dir, 'cross_scale_within_cat.png')) + + if dh_data: + plot_delta_trajectory( + dh_data, args.model_type, + os.path.join(dt_dir, 'delta_trajectory.png')) + + # ── Alignment and prediction stats ──────────────────────────────────────── + all_cond_dir = os.path.join(plots_dir, 'all') + ca_dir = os.path.join(all_cond_dir, 'cross_alignment') + pred_stats_dir = os.path.join(all_cond_dir, 'pred_stats') + summary_dir = os.path.join(all_cond_dir, 'summary') + os.makedirs(ca_dir, exist_ok=True) + os.makedirs(pred_stats_dir, exist_ok=True) + os.makedirs(summary_dir, exist_ok=True) + + if len(all_alignment) > 1: + plot_cross_scale_alignment( + all_alignment, args.model_type, + os.path.join(ca_dir, 'cross_scale_alignment.png')) + + if all_pred_stats: + plot_pred_stats_bars( + all_pred_stats, args.model_type, + os.path.join(pred_stats_dir, 'pred_stats_bars.png')) + plot_pred_stats_trajectory( + all_pred_stats, args.model_type, + os.path.join(pred_stats_dir, 'pred_stats_trajectory.png')) + + if all_sign_corrected: + plot_summary_barplot( + all_sign_corrected, all_alignment, args.model_type, + os.path.join(summary_dir, 'summary_barplot.png')) + + # ── Summary CSV ─────────────────────────────────────────────────────────── + summary_rows = [] + for scale in available_scales: + ps = next((p for p in all_pred_stats if p.get('scale') == scale), None) + if ps is None: + continue + row = dict(ps) + if scale in all_alignment: + max_layer = max(all_alignment[scale].keys()) + row['alignment_deepest'] = all_alignment[scale][max_layer]['per_sample_mean'] + row['alignment_perm'] = all_alignment[scale][max_layer]['permutation_mean'] + summary_rows.append(row) + if summary_rows: + csv_dir = os.path.join(merge_out, 'csv') + os.makedirs(csv_dir, exist_ok=True) + pd.DataFrame(summary_rows).to_csv(os.path.join(csv_dir, 'summary.csv'), index=False) + + # ── Accuracy charts ─────────────────────────────────────────────────────── + if all_pred_stats: + acc_dir = os.path.join(plots_dir, 'accuracy') + logger.info("\n--- Accuracy Charts ---") + run_accuracy_charts(all_pred_stats, all_cat_validity, args.model_type, acc_dir) + + # ── Unify y-axis ────────────────────────────────────────────────────────── + logger.info("\n--- Unifying Y-axis ---") + scale_dir_map = {s: _scale_dir(s) for s in available_scales} + run_unify_ylim(scale_dir_map, plots_dir, args.model_type) + + # All-layer heatmaps + PCA are now done per-scale in process_scale(); skip here. + + logger.info(f"\n=== Merge Complete ===\nResults saved to: {merge_out}") + + +def main(): + # Default scales per legacy model_type (new types use their own defaults) + _LEGACY_DEFAULT_SCALES = { + 'molmo': ['vanilla', '80k', '400k', '800k', '2m'], + 'nvila': ['vanilla', '80k', '400k', '800k', '2m'], + 'qwen': ['vanilla', '80k', '400k', '800k', '2m'], + 'nvila_synthetic': ['80k-5pct', '80k-10pct', '80k-20pct', '80k-30pct', '400k-5pct'], + } + + parser = argparse.ArgumentParser( + description='Swap Analysis — Spatial Representation Probing', + formatter_class=argparse.RawDescriptionHelpFormatter, + ) + parser.add_argument('--data_path', type=str, + default='/data/shared/Qwen/EmbSpatial-Bench/EmbSpatial-Bench.tsv') + parser.add_argument('--model_type', type=str, required=True, + choices=ALL_MODEL_TYPES, + help=( + 'Legacy: molmo | nvila | qwen\n' + 'Synthetic: nvila_synthetic\n' + 'New large: molmo_big | qwen_big | qwen_super | big_trio\n' + 'Merge-only (--merge required): molmo_all | qwen_all' + )) + parser.add_argument('--scales', type=str, nargs='+', default=None, + help='Scales to process (default: all for the given model_type).') + parser.add_argument('--output_dir', type=str, default=None, + help='Root for saved_data/. Defaults to ' + '{script_dir}/{question_type}/saved_data.') + parser.add_argument('--device', type=str, default='cuda') + parser.add_argument('--seed', type=int, default=42) + parser.add_argument('--merge', action='store_true', + help='Merge mode: generate cross-scale plots from saved per-scale data.') + parser.add_argument('--merge-output-dir', type=str, default=None, dest='merge_output_dir', + help='(Deprecated) Override output dir for cross-scale plots. ' + 'Use --group-name instead.') + parser.add_argument('--group-name', type=str, default=None, dest='group_name', + help='Folder name under compare/ for merged output. ' + 'Defaults to model_type.') + parser.add_argument('--no-auto-roborefer', action='store_true', dest='no_auto_roborefer', + help='Disable automatic inclusion of roborefer scale for nvila.') + parser.add_argument('--skip-cross-group', action='store_true') + parser.add_argument('--max-samples-per-category', type=int, default=200, + dest='max_samples_per_category') + parser.add_argument('--no-filtering', action='store_true', dest='no_filtering', + help='Disable Unknown/empty filtering for far/close reference objects.' + ' By default, Unknown candidates are removed before sampling.') + parser.add_argument('--question-type', type=str, default='short_answer', + choices=['short_answer', 'mcq'], dest='question_type', + help='short_answer (default): "Answer with only one word." format; ' + 'mcq: MCQ A/B format with letter answers.') + + args = parser.parse_args() + + # ── Compute output_dir and log_dir from question_type ──────────────────── + _HERE_UPDATED = os.path.dirname(os.path.abspath(__file__)) + if args.output_dir is None: + args.output_dir = os.path.join(_HERE_UPDATED, args.question_type, 'saved_data') + log_dir = os.path.join(_HERE_UPDATED, args.question_type, 'logs') + + # ── Validate: merge-only types require --merge ──────────────────────────── + if args.model_type in MERGE_ONLY_CONFIGS and not args.merge: + parser.error( + f"'{args.model_type}' is a merge-only type. Add --merge to run it.\n" + f" Example: python swap_analysis.py --model_type {args.model_type} --merge" + ) + + # ── Default scales ──────────────────────────────────────────────────────── + if args.scales is None: + if args.model_type in MERGE_ONLY_CONFIGS: + args.scales = MERGE_ONLY_CONFIGS[args.model_type]['scale_order'] + elif args.model_type in MODEL_CONFIGS_NEW: + args.scales = list(MODEL_CONFIGS_NEW[args.model_type].keys()) + else: + args.scales = _LEGACY_DEFAULT_SCALES.get( + args.model_type, ['vanilla', '80k', '400k', '800k', '2m']) + + # Legacy nvila: auto-include roborefer + if args.model_type == 'nvila' and 'roborefer' not in args.scales and not args.no_auto_roborefer: + args.scales.append('roborefer') + + np.random.seed(args.seed) + torch.manual_seed(args.seed) + random.seed(args.seed) + + # ── Merge mode ─────────────────────────────────────────────────────────── + if args.merge: + group_name = args.group_name or args.model_type + log_path = _setup_file_logging(group_name, log_dir) + logger.info(f"Logging to: {log_path}") + logger.info("\n=== MERGE MODE ===") + if args.model_type in MODEL_CONFIGS_NEW or args.model_type in MERGE_ONLY_CONFIGS: + run_merge_extended(args) + else: + run_merge(args) + return + + # ── Inference mode ──────────────────────────────────────────────────────── + logger.info("\n=== Loading & Creating Swap Pairs ===") + swap_pairs = load_swap_pairs(args.data_path, args.seed, + filter_unknown=not args.no_filtering, + question_type=args.question_type) + + quads = [] + if not args.skip_cross_group: + try: + hf_cache = build_hf_bbox_cache() + quads = create_cross_group_quads(swap_pairs, hf_cache, + question_type=args.question_type) + except Exception as e: + logger.warning(f"Cross-group setup failed: {e}. Skipping.") + quads = [] + + # ── Resolve config for the chosen model_type ───────────────────────────── + if args.model_type in MODEL_CONFIGS_NEW: + model_configs = MODEL_CONFIGS_NEW[args.model_type] + else: + model_configs = MODEL_CONFIGS[args.model_type] + + for scale in args.scales: + if scale not in model_configs: + logger.warning(f"Scale '{scale}' not in config for '{args.model_type}', skipping.") + continue + + # Per-scale log file + vlm_key = get_model_key(args.model_type, scale) + log_path = _setup_file_logging(vlm_key, log_dir) + logger.info(f"Logging to: {log_path}") + + # Validate model path exists (skip HF IDs that start with org/ prefix) + if args.model_type in MODEL_CONFIGS_NEW: + _, raw_path = model_configs[scale] + else: + raw_path = model_configs[scale] + if not os.path.isabs(raw_path) and not raw_path.startswith(('Qwen/', 'allenai/')): + if not os.path.exists(raw_path): + logger.warning(f"Model path not found: {raw_path} (scale='{scale}'), skipping.") + continue + + try: + process_scale(args, scale, swap_pairs, quads) + except Exception as e: + logger.error(f"Failed {args.model_type} - {scale}: {e}") + import traceback + traceback.print_exc() + continue + + logger.info(f"\n{'='*60}") + logger.info("=== All scales complete ===") + logger.info(f"Results: {args.output_dir}") + logger.info(f"{'='*60}") + + +if __name__ == '__main__': + main() \ No newline at end of file diff --git a/spatial-representation-probe/swap_analysis_0226.py b/spatial-representation-probe/swap_analysis_0226.py new file mode 100644 index 0000000000000000000000000000000000000000..3a5114f1ef6fa9fca5a12d18c3ed0734e56d889e --- /dev/null +++ b/spatial-representation-probe/swap_analysis_0226.py @@ -0,0 +1,3568 @@ +#!/usr/bin/env python3 +""" +Swap Analysis: Minimal Pair Probing for Spatial Representations + +Creates minimal pairs by swapping obj1<->obj2 in spatial questions: + Original: "Is A to the left or right of B?" -> left + Swapped: "Is B to the left or right of A?" -> right + +Supported model types +--------------------- + Legacy (Qwen2.5-VL-3B scale experiments): + molmo | nvila | qwen + New large models: + molmo_big : Molmo2-8B + qwen_big : Qwen3-VL-32B-Instruct + qwen_super : Qwen3-VL-235B-A22B-Instruct + big_trio : Molmo2-8B + RoboRefer + Qwen3-VL-32B + Merge-only (--merge required): + molmo_all : molmo (vanilla→2m) + molmo_big (molmo2) + qwen_all : qwen (vanilla→2m) + qwen_big (qwen3_32b) + +Usage examples +-------------- + # Legacy model (Qwen2.5-VL-3B scale) + python swap_analysis.py --model_type qwen + + # New large model (Qwen3-VL-32B) + conda run -n qwen3 python swap_analysis.py --model_type qwen_big + + # Cross-family merge (combine qwen + qwen_big results) + conda run -n qwen3 python swap_analysis.py --model_type qwen_all --merge + +Analyses: + 1. Difference vectors: delta = feature(swapped) - feature(original) + 2. Within-category delta consistency (do all left->right swaps point same direction?) + 3. Sign-corrected group consistency (align opposite categories by flipping) + 4. Cross-group delta alignment (delta_vertical vs delta_distance) for perspective bias + 5. Delta-based 6x6 similarity heatmap (mean delta per category as representation) + 6. Prediction stats visualization (bar chart + cross-scale trajectory) + 7. Both-correct filtering for delta analysis + 8. PCA visualization of per-sample embeddings + 9. Scaling effects on all of the above + +Fixes applied: + Fix 1: "Answer with only one word." appended to all prompts + Fix 2: Synonym handling (below/beneath->under, near/nearby->close, distant->far) + Fix 4: Cross-group quads index matching via string normalization + Fix 5: Within-category + sign-corrected delta consistency (replaces wrong group-level) + Fix 6: Prediction stats bar chart + cross-scale line plot + Fix 7: Delta-based 6x6 heatmap and trajectory + Fix 8: Category validity check + both-correct delta filtering +""" + +import os +import sys +import json +import argparse +import base64 +import logging +import random +import re +from io import BytesIO +from collections import defaultdict +from typing import Dict, List, Tuple, Optional, Any +from abc import ABC, abstractmethod + +import torch +import numpy as np +import pandas as pd +from PIL import Image +from tqdm import tqdm +import matplotlib +matplotlib.use('Agg') +import matplotlib.pyplot as plt +from mpl_toolkits.mplot3d import Axes3D # noqa: F401 +import seaborn as sns +from sklearn.metrics.pairwise import cosine_similarity +from sklearn.decomposition import PCA + +logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s') +logger = logging.getLogger(__name__) + +_HERE = os.path.dirname(os.path.abspath(__file__)) + +# ── Local HuggingFace cache helpers ────────────────────────────────────────── + +HF_HUB_DIR = '/data/shared/Qwen/mydisk/huggingface/hub' + + +def resolve_local_path(model_path: str) -> str: + """Return local snapshot path for a HF model ID if cached, else return the ID unchanged.""" + if os.path.isabs(model_path): + return model_path + cache_name = 'models--' + model_path.replace('/', '--') + snapshots_dir = os.path.join(HF_HUB_DIR, cache_name, 'snapshots') + if os.path.isdir(snapshots_dir): + snapshots = sorted(os.listdir(snapshots_dir)) + if snapshots: + local_path = os.path.join(snapshots_dir, snapshots[-1]) + logger.info(f"Local cache found: {model_path} → {local_path}") + return local_path + logger.warning( + f"Model not found in local cache: '{model_path}'\n" + f" Expected at: {snapshots_dir}\n" + f" Will fall back to online HuggingFace Hub download.\n" + f" To cache locally first: python -c \"from huggingface_hub import snapshot_download; " + f"snapshot_download('{model_path}', cache_dir='{HF_HUB_DIR}')\"" + ) + return model_path + + +def _setup_file_logging(model_type: str) -> str: + """Attach a per-model-type FileHandler to the root logger. + + Writes to /logs/{model_type}.log (append mode). + Returns the log file path. + """ + log_dir = os.path.join(_HERE, 'logs') + os.makedirs(log_dir, exist_ok=True) + log_path = os.path.join(log_dir, f'{model_type}.log') + fh = logging.FileHandler(log_path, mode='a', encoding='utf-8') + fh.setLevel(logging.INFO) + fh.setFormatter(logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')) + logging.getLogger().addHandler(fh) + return log_path + + +# ============================================================================ +# Constants +# ============================================================================ + +CATEGORY_ORDER = ['left', 'right', 'above', 'below', 'far', 'close'] + +OPPOSITE_MAP = { + 'left': 'right', 'right': 'left', + 'above': 'below', 'below': 'above', + 'under': 'above', # short-mode vertical answer + 'far': 'close', 'close': 'far', +} + +# Opposite map for short-answer mode (vertical uses 'above'/'under', not 'above'/'below') +SHORT_OPPOSITE_MAP = { + 'left': 'right', 'right': 'left', + 'above': 'below', 'below': 'above', + 'far': 'close', 'close': 'far', +} + +GROUP_MAP = { + 'left': 'horizontal', 'right': 'horizontal', + 'above': 'vertical', 'below': 'vertical', + 'far': 'distance', 'close': 'distance', +} + +GROUP_ORDER = ['horizontal', 'vertical', 'distance'] + +# Fix 5: Canonical categories for sign-corrected consistency +CANONICAL_CATEGORIES = { + 'horizontal': 'left', + 'vertical': 'above', + 'distance': 'far', +} + +# Fix 2: Synonyms for answer matching +# 'below' is now primary; 'under'/'beneath' recognized as synonyms +SYNONYMS = { + 'below': ['under', 'beneath'], + 'close': ['near', 'nearby'], + 'far': ['distant'], +} + +# ── MCQ question templates (option order alternated per pair for A/B bias control) ── +_Q_TAIL_MCQ = "Answer with a single letter A or B." +MCQ_TEMPLATES = { + 'horizontal': { + 'left_first': "Is the {obj1} to the left or right of the {obj2}? (A) left (B) right " + _Q_TAIL_MCQ, + 'right_first': "Is the {obj1} to the left or right of the {obj2}? (A) right (B) left " + _Q_TAIL_MCQ, + }, + 'vertical': { + 'above_first': "Is the {obj1} above or below the {obj2}? (A) above (B) below " + _Q_TAIL_MCQ, + 'below_first': "Is the {obj1} above or below the {obj2}? (A) below (B) above " + _Q_TAIL_MCQ, + }, + 'distance': { + 'far_first': "Compared to {ref}, is {subj} far or close from you? (A) far (B) close " + _Q_TAIL_MCQ, + 'close_first': "Compared to {ref}, is {subj} far or close from you? (A) close (B) far " + _Q_TAIL_MCQ, + }, +} +MCQ_LETTER = { + 'horizontal': { + 'left_first': {'left': 'a', 'right': 'b'}, + 'right_first': {'left': 'b', 'right': 'a'}, + }, + 'vertical': { + 'above_first': {'above': 'a', 'below': 'b'}, + 'below_first': {'above': 'b', 'below': 'a'}, + }, + 'distance': { + 'far_first': {'far': 'a', 'close': 'b'}, + 'close_first': {'far': 'b', 'close': 'a'}, + }, +} + +SCALE_COLORS = { + 'vanilla': '#1f77b4', '80k': '#ff7f0e', '400k': '#2ca02c', + '800k': '#d62728', '2m': '#9467bd', 'roborefer':'#8c564b', + # New large models + 'molmo2': '#17becf', # cyan + 'qwen3_32b': '#bcbd22', # yellow-green + 'qwen3_235b': '#e377c2', # pink + # Synthetic-mix NVILA at 80k scale (shades of teal, light→dark by mix ratio) + '80k-5pct': '#b2dfdb', # very light teal + '80k-10pct': '#00b894', # teal + '80k-20pct': '#00897b', # darker teal + '80k-30pct': '#004d40', # deep teal + # Synthetic-mix NVILA at 400k scale + '400k-5pct': '#66bb6a', # light green (near 400k's #2ca02c) +} + +# Canonical scale ordering used by accuracy/ylim plots (add new scales here to control x-axis) +SCALE_ORDER = [ + 'vanilla', '80k', '80k-5pct', '80k-10pct', '80k-20pct', '80k-30pct', + '400k', '400k-5pct', '800k', '2m', 'roborefer', + 'molmo2', 'qwen3_32b', 'qwen3_235b', +] + +# Human-readable legend labels (only entries that differ from the key are needed) +SCALE_DISPLAY_NAMES = { + '80k-5pct': '80k 5%', + '80k-10pct': '80k 10%', + '80k-20pct': '80k 20%', + '80k-30pct': '80k 30%', + '400k-5pct': '400k 5%', +} +# Category colors aligned with group: horizontal=orange, vertical=green, distance=purple +CAT_COLORS = { + 'left': '#ff7f0e', 'right': '#ffbb78', # horizontal → orange + 'above': '#2ca02c', 'below': '#98df8a', # vertical → green + 'far': '#9467bd', 'close': '#c5b0d5', # distance → purple +} +GROUP_COLORS = { + 'horizontal': '#ff7f0e', + 'vertical': '#2ca02c', + 'distance': '#9467bd', +} + +# Short-answer (non-MCQ) question templates +SHORT_TEMPLATES = { + 'horizontal': "Is the {obj1} to the left or right of the {obj2}? Answer with only one word.", + 'vertical': "Is the {obj1} above or below the {obj2}? Answer with only one word.", + 'distance': "Compared to {ref}, is {subj} far or close from you? Answer with only one word.", +} + +MODEL_CONFIGS = { + 'molmo': { + 'vanilla': 'allenai/Molmo-7B-O-0924', + '80k': '/data/shared/Qwen/molmo/outputs/data_scale_exp_80k/unshared', + '400k': '/data/shared/Qwen/molmo/outputs/data_scale_exp_400k/unshared', + '800k': '/data/shared/Qwen/molmo/outputs/data_scale_exp_800k/unshared', + '2m': '/data/shared/Qwen/molmo/outputs/data_scale_exp_2m/unshared', + }, + 'nvila': { + 'vanilla': '/data/shared/Qwen/mydisk/NVILA-Lite-2B', + '80k': '/data/shared/Qwen/mydisk/output/DATA/NVILA-Lite-2B-DATA_SCALE_EXP_80K-20251108_180221', + '400k': '/data/shared/Qwen/mydisk/output/DATA/NVILA-Lite-2B-DATA_SCALE_EXP_400K-20251108_180221', + '800k': '/data/shared/Qwen/mydisk/output/DATA/NVILA-Lite-2B-DATA_SCALE_EXP_800K-20251108_180221', + '2m': '/data/shared/Qwen/mydisk/output/DATA/NVILA-Lite-2B-DATA_SCALE_EXP_2M-20260205_003632', + # '80k': '/data/shared/Qwen/mydisk/output/SINGLE/NVILA-Lite-2B-SINGLE_REFSPATIAL_16M-20260217_035008/checkpoint-1250', + # '400k': '/data/shared/Qwen/mydisk/output/SINGLE/NVILA-Lite-2B-SINGLE_REFSPATIAL_16M-20260217_035008/checkpoint-6250', + # '800k': '/data/shared/Qwen/mydisk/output/SINGLE/NVILA-Lite-2B-SINGLE_REFSPATIAL_16M-20260217_035008/checkpoint-12500', + # '2m': '/data/shared/Qwen/mydisk/output/SINGLE/NVILA-Lite-2B-SINGLE_REFSPATIAL_16M-20260217_035008/checkpoint-31250', + 'roborefer': '/data/shared/Qwen/mydisk/RoboRefer_model', + }, + 'qwen': { + 'vanilla': 'Qwen/Qwen2.5-VL-3B-Instruct', + '80k': '/data/shared/Qwen/mydisk/output/Qwen/Qwen2.5-VL-3B-Instruct-data_scale_exp_80k-20251114_120221', + '400k': '/data/shared/Qwen/mydisk/output/Qwen/Qwen2.5-VL-3B-Instruct-data_scale_exp_400k-20251114_120221', + '800k': '/data/shared/Qwen/mydisk/output/Qwen/Qwen2.5-VL-3B-Instruct-data_scale_exp_800k-20251114_120221', + '2m': '/data/shared/Qwen/mydisk/output/Qwen/Qwen2.5-VL-3B-Instruct-data_scale_exp_2m-20260109_120517', + }, + # NVILA trained with synthetic data mixed in at different ratios + 'nvila_synthetic': { + '80k-5pct': '/data/shared/Qwen/mydisk/output/SYNTHETIC/NVILA-Lite-2B-SYNTHETIC_MIX_5PCT_2M-20260226_023301/checkpoint-1250', + '80k-10pct': '/data/shared/Qwen/mydisk/output/SYNTHETIC/NVILA-Lite-2B-SYNTHETIC_MIX_10PCT_80K-20260224_234537', + '80k-20pct': '/data/shared/Qwen/mydisk/output/SYNTHETIC/NVILA-Lite-2B-SYNTHETIC_MIX_20PCT_80K-20260224_232347', + '80k-30pct': '/data/shared/Qwen/mydisk/output/SYNTHETIC/NVILA-Lite-2B-SYNTHETIC_MIX_30PCT_80K-20260224_232347', + '400k-5pct': '/data/shared/Qwen/mydisk/output/SYNTHETIC/NVILA-Lite-2B-SYNTHETIC_MIX_5PCT_2M-20260226_023301/checkpoint-6250', + }, +} + +# ── New large / cross-family models ────────────────────────────────────────── +# Each scale maps to (ExtractorClassName, HF-model-ID-or-absolute-path). +# resolve_local_path() converts HF IDs to local snapshot dirs when cached. +MODEL_CONFIGS_NEW = { + 'molmo_big': { + 'molmo2': ('Molmo2Extractor', 'allenai/Molmo2-8B'), + }, + 'qwen_big': { + 'qwen3_32b': ('Qwen3VLExtractor', 'Qwen/Qwen3-VL-32B-Instruct'), + }, + 'qwen_super': { + 'qwen3_235b': ('Qwen3VLExtractor', 'Qwen/Qwen3-VL-235B-A22B-Instruct'), + }, + 'big_trio': { + 'molmo2': ('Molmo2Extractor', 'allenai/Molmo2-8B'), + 'roborefer': ('RoboReferExtractor', '/data/shared/Qwen/mydisk/RoboRefer_model'), + 'qwen3_32b': ('Qwen3VLExtractor', 'Qwen/Qwen3-VL-32B-Instruct'), + }, +} + +# ── Merge-only: combine existing per-scale data from multiple source dirs ───── +MERGE_ONLY_CONFIGS = { + 'molmo_all': { + 'scale_order': ['vanilla', '80k', '400k', '800k', '2m', 'molmo2'], + 'scale_sources': { + 'vanilla': 'molmo', '80k': 'molmo', '400k': 'molmo', + '800k': 'molmo', '2m': 'molmo', 'molmo2': 'molmo_big', + }, + 'required_dirs': ['molmo', 'molmo_big'], + }, + 'qwen_all': { + 'scale_order': ['vanilla', '80k', '400k', '800k', '2m', 'qwen3_32b'], + 'scale_sources': { + 'vanilla': 'qwen', '80k': 'qwen', '400k': 'qwen', + '800k': 'qwen', '2m': 'qwen', 'qwen3_32b': 'qwen_big', + }, + 'required_dirs': ['qwen', 'qwen_big'], + }, + # Compare NVILA baselines against synthetic-mix checkpoints + 'nvila_synth_compare': { + 'scale_order': ['vanilla', '80k', '80k-5pct', '80k-10pct', '400k', '400k-5pct'], + 'scale_sources': { + 'vanilla': 'nvila', '80k': 'nvila', + '80k-5pct': 'nvila_synthetic', '80k-10pct': 'nvila_synthetic', + '400k': 'nvila', + '400k-5pct': 'nvila_synthetic', + }, + 'required_dirs': ['nvila', 'nvila_synthetic'], + }, +} + +# Default scale run order for new runnable types +SCALE_ORDERS_NEW = { + 'molmo_big': ['molmo2'], + 'qwen_big': ['qwen3_32b'], + 'qwen_super': ['qwen3_235b'], + 'big_trio': ['molmo2', 'roborefer', 'qwen3_32b'], +} + +ALL_MODEL_TYPES = ( + list(MODEL_CONFIGS.keys()) + + list(MODEL_CONFIGS_NEW.keys()) + + list(MERGE_ONLY_CONFIGS.keys()) +) + + +# ============================================================================ +# Data Loading & Swap Pair Creation +# ============================================================================ + +OBJECT_PATTERNS = [ + re.compile(r'between\s+(.+?)\s+and\s+(.+?)\s+in', re.IGNORECASE), + re.compile(r'of\s+(.+?)\s+and\s+(.+?)\s+in', re.IGNORECASE), + re.compile(r'positions\s+of\s+(.+?)\s+and\s+(.+?)\s+interact', re.IGNORECASE), + re.compile(r'How\s+are\s+(.+?)\s+and\s+(.+?)\s+positioned', re.IGNORECASE), + re.compile(r'arrangement\s+of\s+(.+?)\s+and\s+(.+?)\s+in', re.IGNORECASE), +] + + +def extract_objects(question: str) -> Tuple[str, str]: + for pattern in OBJECT_PATTERNS: + m = pattern.search(question) + if m: + return m.group(1).strip(), m.group(2).strip() + raise ValueError(f"Could not extract objects from: {question}") + + +def decode_base64_image(base64_str: str) -> Image.Image: + image_data = base64.b64decode(base64_str) + return Image.open(BytesIO(image_data)).convert('RGB') + + +# ============================================================================ +# Answer Matching (Fix 2: synonym support) +# ============================================================================ + +def find_earliest_position(text: str, word: str) -> int: + """Find earliest position of word or any of its synonyms in text.""" + positions = [] + pos = text.find(word) + if pos != -1: + positions.append(pos) + for syn in SYNONYMS.get(word, []): + pos = text.find(syn) + if pos != -1: + positions.append(pos) + return min(positions) if positions else -1 + + +def check_answer(generated_text: str, expected_category: str, mcq_map: dict = None) -> bool: + if not generated_text or not generated_text.strip(): + return False + text = generated_text.strip().lower() + expected = expected_category.lower() + opposite = OPPOSITE_MAP[expected] + + if mcq_map: + exp_letter = mcq_map.get(expected) + opp_letter = mcq_map.get(opposite) + # Standalone letter response (e.g. "A", "A.", "A)", "B") + if exp_letter and text in (exp_letter, exp_letter+'.', exp_letter+')', exp_letter+','): + return True + if opp_letter and text in (opp_letter, opp_letter+'.', opp_letter+')', opp_letter+','): + return False + else: + exp_letter = opp_letter = None + + # MCQ inline pattern "(a)"/"(b)" — variant-aware + mcq_exp = f'({exp_letter})' if exp_letter else None + mcq_opp = f'({opp_letter})' if opp_letter else None + + def earliest_with_mcq(word, mcq_pat=None): + positions = [] + pos = text.find(word) + if pos != -1: + positions.append(pos) + for syn in SYNONYMS.get(word, []): + pos = text.find(syn) + if pos != -1: + positions.append(pos) + if mcq_pat: + pos = text.find(mcq_pat) + if pos != -1: + positions.append(pos) + return min(positions) if positions else -1 + + pos_exp = earliest_with_mcq(expected, mcq_exp) + pos_opp = earliest_with_mcq(opposite, mcq_opp) + if pos_exp == -1: + return False + if pos_opp == -1: + return True + return pos_exp < pos_opp + + +# ============================================================================ +# Swap Pair Loading (Fix 1: prompt suffix) +# ============================================================================ + +def load_swap_pairs(tsv_path: str, seed: int = 42, filter_unknown: bool = True, + question_type: str = 'mcq') -> List[dict]: + """Load EmbSpatialBench TSV and create swap pairs for all samples. + + Args: + filter_unknown: If True (default), skip far/close pairs where target_object + is Unknown/empty, and remove Unknown/empty values from reference_object + candidates before sampling. Pairs with no valid candidates are dropped. + Use --no-filtering to disable. + question_type: 'mcq' (default) uses MCQ A/B templates with letter answers; + 'short' uses the original "Answer with only one word." format. + """ + rng = random.Random(seed) + df = pd.read_csv(tsv_path, sep='\t') + + pairs = [] + stats = defaultdict(lambda: {'total': 0, 'success': 0}) + + def _valid_obj(v): + return bool(v) and str(v).strip().lower() not in ('unknown', 'n/a', '') + + for _, row in df.iterrows(): + category = row['category'] + stats[category]['total'] += 1 + + try: + if category in ['left', 'right', 'above', 'under', 'below']: + obj1, obj2 = extract_objects(row['question']) + if category in ['left', 'right']: + grp = 'horizontal' + else: + grp = 'vertical' + + if question_type == 'short': + # Single-word format; normalize 'under' → 'below' + if category == 'under': + category = 'below' + tmpl = SHORT_TEMPLATES[grp] + pair = { + 'index': row['index'], + 'question_id': str(row['question_id']), + 'image_base64': row['image'], + 'original_question': tmpl.format(obj1=obj1, obj2=obj2), + 'swapped_question': tmpl.format(obj1=obj2, obj2=obj1), + 'original_answer': category, + 'swapped_answer': SHORT_OPPOSITE_MAP[category], + 'group': grp, + 'category': category, + 'obj1': obj1, 'obj2': obj2, + 'mcq_map': None, + } + else: + # MCQ format; normalize 'under' → 'below' + if category == 'under': + category = 'below' + variant = ('left_first' if grp == 'horizontal' else 'above_first') \ + if len(pairs) % 2 == 0 else \ + ('right_first' if grp == 'horizontal' else 'below_first') + tmpl = MCQ_TEMPLATES[grp][variant] + mcq_map = MCQ_LETTER[grp][variant] + pair = { + 'index': row['index'], + 'question_id': str(row['question_id']), + 'image_base64': row['image'], + 'original_question': tmpl.format(obj1=obj1, obj2=obj2), + 'swapped_question': tmpl.format(obj1=obj2, obj2=obj1), + 'original_answer': category, + 'swapped_answer': OPPOSITE_MAP[category], + 'group': GROUP_MAP[category], + 'category': category, + 'obj1': obj1, 'obj2': obj2, + 'mcq_map': mcq_map, + } + + elif category in ['far', 'close']: + answer_key = row['answer'] + options = {k: row[k] for k in ['A', 'B', 'C', 'D']} + target_object = options[answer_key] + candidates = [v for k, v in options.items() if k != answer_key] + + if filter_unknown: + if not _valid_obj(target_object): + continue + candidates = [v for v in candidates if _valid_obj(v)] + if not candidates: + continue + + reference_object = rng.choice(candidates) + + if question_type == 'short': + tmpl = SHORT_TEMPLATES['distance'] + pair = { + 'index': row['index'], + 'question_id': str(row['question_id']), + 'image_base64': row['image'], + 'original_question': tmpl.format(ref=reference_object, subj=target_object), + 'swapped_question': tmpl.format(ref=target_object, subj=reference_object), + 'original_answer': category, + 'swapped_answer': OPPOSITE_MAP[category], + 'group': 'distance', + 'category': category, + 'target_object': target_object, + 'reference_object': reference_object, + 'mcq_map': None, + } + else: + variant = 'far_first' if len(pairs) % 2 == 0 else 'close_first' + tmpl = MCQ_TEMPLATES['distance'][variant] + mcq_map = MCQ_LETTER['distance'][variant] + pair = { + 'index': row['index'], + 'question_id': str(row['question_id']), + 'image_base64': row['image'], + 'original_question': tmpl.format(ref=reference_object, subj=target_object), + 'swapped_question': tmpl.format(ref=target_object, subj=reference_object), + 'original_answer': category, + 'swapped_answer': OPPOSITE_MAP[category], + 'group': 'distance', + 'category': category, + 'target_object': target_object, + 'reference_object': reference_object, + 'mcq_map': mcq_map, + } + else: + continue + + pairs.append(pair) + stats[category]['success'] += 1 + + except Exception as e: + logger.warning(f"Failed to create swap pair for index {row['index']}: {e}") + continue + + logger.info("Swap pair creation stats:") + for cat in CATEGORY_ORDER: + s = stats[cat] + logger.info(f" {cat}: {s['success']}/{s['total']}") + logger.info(f" Total pairs: {len(pairs)}") + + return pairs + + +# ============================================================================ +# HF Bbox Cache (Fix 4: string-normalized keys) +# ============================================================================ + +def build_hf_bbox_cache(hf_dataset_name: str = 'FlagEval/EmbSpatial-Bench') -> Dict[str, dict]: + """Load HF dataset and build bbox lookup cache keyed by string-normalized question_id.""" + from datasets import load_dataset + logger.info(f"Loading HF dataset: {hf_dataset_name}") + ds = load_dataset(hf_dataset_name, split='test') + + cache = {} + for item in ds: + # Fix 4: Normalize key to string for consistent matching + qid = str(item['question_id']) + cache[qid] = { + 'objects': item['objects'], + 'relation': item['relation'], + 'data_source': item['data_source'], + 'answer': item['answer'], + 'answer_options': item['answer_options'], + } + + # Fix 4: Log sample keys for debugging + sample_keys = list(cache.keys())[:5] + logger.info(f"Built bbox cache: {len(cache)} entries (sample keys: {sample_keys})") + return cache + + +def get_bbox_center_y(bbox: list) -> float: + return bbox[1] + bbox[3] / 2 + + +def create_cross_group_quads( + swap_pairs: List[dict], + hf_cache: Dict[str, dict], + threshold_ratio: float = 0.05, + question_type: str = 'mcq', +) -> List[dict]: + """For far/close swap pairs, create additional vertical queries using bbox.""" + IMAGE_HEIGHTS = {'ai2thor': 300, 'mp3d': 480, 'scannet': 968} + + quads = [] + stats = {'total': 0, 'matched': 0, 'ambiguous': 0, 'no_bbox': 0} + + distance_pairs = [p for p in swap_pairs if p['group'] == 'distance'] + + # Fix 4: Use question_id (e.g. 'mp3d_0') to match HF dataset, not integer index + n_matched_keys = sum(1 for p in distance_pairs if p['question_id'] in hf_cache) + logger.info(f"Matched {n_matched_keys}/{len(distance_pairs)} question_ids between TSV and HF dataset") + + for pair in distance_pairs: + stats['total'] += 1 + qid = pair['question_id'] + + if qid not in hf_cache: + stats['no_bbox'] += 1 + continue + + hf_item = hf_cache[qid] + names = hf_item['objects']['name'] + bboxes = hf_item['objects']['bbox'] + + target = pair['target_object'] + reference = pair['reference_object'] + + target_bbox_y, ref_bbox_y = None, None + for name, bbox in zip(names, bboxes): + if name == target: + target_bbox_y = get_bbox_center_y(bbox) + if name == reference: + ref_bbox_y = get_bbox_center_y(bbox) + + if target_bbox_y is None or ref_bbox_y is None: + stats['no_bbox'] += 1 + continue + + image_height = IMAGE_HEIGHTS.get(hf_item['data_source'], 480) + threshold = image_height * threshold_ratio + y_diff = target_bbox_y - ref_bbox_y + + if abs(y_diff) < threshold: + stats['ambiguous'] += 1 + continue + + if target_bbox_y < ref_bbox_y: + vert_original_answer = 'above' + else: + vert_original_answer = 'below' + + if question_type == 'short': + vert_tmpl = SHORT_TEMPLATES['vertical'] + vert_mcq_map = None + vert_original_q = vert_tmpl.format(obj1=target, obj2=reference) + vert_swapped_q = vert_tmpl.format(obj1=reference, obj2=target) + vert_swapped_answer = SHORT_OPPOSITE_MAP[vert_original_answer] + else: + vert_variant = 'above_first' if len(quads) % 2 == 0 else 'below_first' + vert_tmpl = MCQ_TEMPLATES['vertical'][vert_variant] + vert_mcq_map = MCQ_LETTER['vertical'][vert_variant] + vert_original_q = vert_tmpl.format(obj1=target, obj2=reference) + vert_swapped_q = vert_tmpl.format(obj1=reference, obj2=target) + vert_swapped_answer = OPPOSITE_MAP[vert_original_answer] + + quad = { + 'index': pair['index'], + 'image_base64': pair['image_base64'], + 'dist_original_q': pair['original_question'], + 'dist_swapped_q': pair['swapped_question'], + 'dist_original_answer': pair['original_answer'], + 'dist_swapped_answer': pair['swapped_answer'], + 'dist_mcq_map': pair['mcq_map'], + 'vert_original_q': vert_original_q, + 'vert_swapped_q': vert_swapped_q, + 'vert_original_answer': vert_original_answer, + 'vert_swapped_answer': vert_swapped_answer, + 'vert_mcq_map': vert_mcq_map, + 'target_object': target, + 'reference_object': reference, + 'target_bbox_y': target_bbox_y, + 'ref_bbox_y': ref_bbox_y, + 'y_diff': y_diff, + 'data_source': hf_item['data_source'], + } + quads.append(quad) + stats['matched'] += 1 + + logger.info(f"Cross-group quads: {stats['matched']}/{stats['total']} " + f"(ambiguous={stats['ambiguous']}, no_bbox={stats['no_bbox']})") + return quads + + +# ============================================================================ +# Base Extractor +# ============================================================================ + +class BaseHiddenStateExtractor(ABC): + def __init__(self, model_path: str, device: str = 'cuda', target_layers: List[int] = None): + self.model_path = model_path + self.device = device + self.hidden_states = {} + self.hooks = [] + self._load_model() + num_layers = self._get_num_layers() + if target_layers is None: + self.target_layers = list(range(num_layers)) + logger.info(f"Model has {num_layers} layers. Extracting ALL.") + else: + self.target_layers = target_layers + self._register_hooks() + + def _register_hooks(self): + for layer_idx in self.target_layers: + module = self._get_layer_module(layer_idx) + if module is not None: + hook = module.register_forward_hook(self._make_hook(layer_idx)) + self.hooks.append(hook) + + def _make_hook(self, layer_idx: int): + def hook_fn(module, input, output): + if isinstance(output, tuple): + hidden = output[0] + else: + hidden = output + if hidden.shape[1] > 1: # prefill only + last_token = hidden[:, -1, :].detach().cpu().float() + self.hidden_states[layer_idx] = last_token.squeeze(0) + return hook_fn + + @abstractmethod + def _load_model(self): pass + @abstractmethod + def _get_num_layers(self) -> int: pass + @abstractmethod + def _get_layer_module(self, layer_idx: int): pass + @abstractmethod + def extract_and_predict(self, image: Image.Image, question: str) -> Tuple[Dict[int, torch.Tensor], str]: pass + + def cleanup(self): + for hook in self.hooks: + hook.remove() + self.hooks = [] + if hasattr(self, 'model'): + del self.model + if hasattr(self, 'processor'): + del self.processor + torch.cuda.empty_cache() + + +# ============================================================================ +# Molmo Extractor +# ============================================================================ + +class MolmoExtractor(BaseHiddenStateExtractor): + def _load_model(self): + config_path = os.path.join(self.model_path, "config.yaml") + checkpoint_path = os.path.join(self.model_path, "model.pt") + if os.path.exists(config_path) and os.path.exists(checkpoint_path): + self._load_native_model() + self.is_native = True + else: + self._load_hf_model() + self.is_native = False + + def _load_native_model(self): + from olmo.config import ModelConfig + from olmo.model import Molmo as NativeMolmoModel + from olmo.data.model_preprocessor import MultiModalPreprocessor + from olmo.data.data_formatter import DataFormatter + + _original_load = torch.load + def _unsafe_load_wrapper(*args, **kwargs): + if 'weights_only' not in kwargs: + kwargs['weights_only'] = False + return _original_load(*args, **kwargs) + torch.load = _unsafe_load_wrapper + + cfg = ModelConfig.load( + os.path.join(self.model_path, "config.yaml"), + key="model", validate_paths=False + ) + cfg.init_device = "cpu" + self.model = NativeMolmoModel(cfg) + state_dict = torch.load(os.path.join(self.model_path, "model.pt"), map_location="cpu") + self.model.load_state_dict(state_dict) + self.model = self.model.to(self.device, dtype=torch.bfloat16).eval() + self.tokenizer = cfg.get_tokenizer() + + v_cfg = cfg.vision_backbone + h, w = cfg.llm_patches_per_crop() + image_padding_mask = 2 if cfg.fix_image_padding else (1 if cfg.image_padding_embed else None) + + class SafeDataFormatter(DataFormatter): + def get_system_prompt(self, style, for_inference, messages, rng=None): + if style is None: + style = "User" + return super().get_system_prompt(style, for_inference, messages, rng) + + self.formatter = SafeDataFormatter( + prompt_templates=cfg.prompt_type, message_format=cfg.message_formatting, + system_prompt=cfg.system_prompt_kind, always_start_with_space=cfg.always_start_with_space, + default_inference_len=cfg.default_inference_len + ) + self.preprocessor = MultiModalPreprocessor( + tokenizer=self.tokenizer, normalize=str(v_cfg.image_model_type), + crop_mode=cfg.crop_mode, max_crops=cfg.max_crops, + overlap_margins=cfg.overlap_margins, resize=v_cfg.resize_mode, + use_col_tokens=cfg.use_col_tokens, base_image_input_size=v_cfg.image_default_input_size, + image_pooling_w=cfg.image_pooling_w, image_pooling_h=cfg.image_pooling_h, + image_token_length_w=w, image_token_length_h=h, + image_patch_size=v_cfg.image_patch_size, image_padding_mask=image_padding_mask, + pad_value=cfg.pad_value, loss_token_weighting=cfg.multi_annotation_weighting, + ) + logger.info(f"Loaded native Molmo from {self.model_path}") + + def _load_hf_model(self): + from transformers import AutoModelForCausalLM, AutoProcessor + self.model = AutoModelForCausalLM.from_pretrained( + self.model_path, torch_dtype=torch.bfloat16, + trust_remote_code=True, device_map=self.device + ).eval() + self.processor = AutoProcessor.from_pretrained(self.model_path, trust_remote_code=True) + logger.info(f"Loaded HF Molmo from {self.model_path}") + + def _get_num_layers(self) -> int: + if self.is_native: + return len(self.model.transformer.blocks) + if hasattr(self.model, 'model') and hasattr(self.model.model, 'transformer'): + return len(self.model.model.transformer.blocks) + return 32 + + def _get_layer_module(self, layer_idx: int): + if self.is_native: + return self.model.transformer.blocks[layer_idx] + return self.model.model.transformer.blocks[layer_idx] + + def extract_and_predict(self, image, question): + self.hidden_states = {} + if self.is_native: + example = {"messages": [question], "image": image} + messages, _ = self.formatter(example, is_training=False, for_inference=True, rng=np.random) + batch = self.preprocessor(np.array(image), messages, is_training=False, require_image_features=True) + if 'input_ids' not in batch and 'input_tokens' in batch: + batch['input_ids'] = batch['input_tokens'] + + def to_t(x): + return torch.from_numpy(x) if isinstance(x, np.ndarray) else x + + input_ids = to_t(batch['input_ids']).unsqueeze(0).to(self.device).long() + images_t = to_t(batch['images']).unsqueeze(0).to(self.device, dtype=torch.bfloat16) + image_masks = to_t(batch['image_masks']).unsqueeze(0).to(self.device, dtype=torch.bfloat16) + image_input_idx = to_t(batch['image_input_idx']).unsqueeze(0).to(self.device) + + with torch.inference_mode(), torch.autocast("cuda", dtype=torch.bfloat16): + gen = self.model.generate( + input_ids=input_ids, images=images_t, + image_masks=image_masks, image_input_idx=image_input_idx, + max_steps=20, beam_size=1, + ) + generated_ids = gen.token_ids[0, 0] + answer = self.tokenizer.decode(generated_ids.tolist()).strip() + for eos in ['<|endoftext|>', '', '<|end|>']: + answer = answer.replace(eos, '').strip() + else: + from transformers import GenerationConfig + inputs = self.processor.process(images=[image], text=question) + processed = {} + for k, v in inputs.items(): + v = v.to(self.device).unsqueeze(0) + if v.dtype == torch.float32: + v = v.to(dtype=torch.bfloat16) + processed[k] = v + with torch.no_grad(), torch.autocast("cuda", dtype=torch.bfloat16): + output = self.model.generate_from_batch( + processed, + GenerationConfig(max_new_tokens=20, stop_strings="<|endoftext|>"), + tokenizer=self.processor.tokenizer, + ) + input_len = processed['input_ids'].shape[1] + answer = self.processor.tokenizer.decode(output[0, input_len:], skip_special_tokens=True).strip() + + return self.hidden_states.copy(), answer + + +# ============================================================================ +# NVILA Extractor +# ============================================================================ + +class NVILAExtractor(BaseHiddenStateExtractor): + def _load_model(self): + original_sys_path = sys.path.copy() + sys.path = [p for p in sys.path if 'RoboRefer' not in p] + modules_to_remove = [k for k in list(sys.modules.keys()) if 'llava' in k.lower()] + removed = {m: sys.modules.pop(m) for m in modules_to_remove} + try: + import llava + from llava.media import Image as LLaVAImage + from llava import conversation as clib + except Exception as err: + sys.path = original_sys_path + for m, mod in removed.items(): + sys.modules[m] = mod + raise RuntimeError(f"Failed to import llava: {err}") + sys.path = original_sys_path + self.LLaVAImage = LLaVAImage + self.clib = clib + self.model = llava.load(self.model_path, model_base=None) + self._find_llm_backbone() + logger.info(f"Loaded NVILA from {self.model_path}") + + def _find_llm_backbone(self): + candidates = [] + if hasattr(self.model, 'llm'): + if hasattr(self.model.llm, 'model') and hasattr(self.model.llm.model, 'layers'): + candidates.append(self.model.llm.model.layers) + if hasattr(self.model.llm, 'layers'): + candidates.append(self.model.llm.layers) + if hasattr(self.model, 'model'): + if hasattr(self.model.model, 'model') and hasattr(self.model.model.model, 'layers'): + candidates.append(self.model.model.model.layers) + if hasattr(self.model.model, 'layers'): + candidates.append(self.model.model.layers) + for name, module in self.model.named_modules(): + if name.endswith('.layers') and hasattr(module, '__len__') and len(module) > 0: + candidates.append(module) + if candidates: + self.llm_backbone = candidates[0] + else: + raise ValueError("Could not locate transformer layers in NVILA model") + + def _get_num_layers(self) -> int: + return len(self.llm_backbone) if hasattr(self, 'llm_backbone') else 24 + + def _get_layer_module(self, layer_idx: int): + return self.llm_backbone[layer_idx] + + def extract_and_predict(self, image, question): + self.hidden_states = {} + import tempfile + with tempfile.NamedTemporaryFile(suffix='.png', delete=False) as f: + temp_path = f.name + image.save(temp_path) + try: + prompt = [self.LLaVAImage(temp_path), question] + from transformers import GenerationConfig + response = self.model.generate_content( + prompt, generation_config=GenerationConfig(max_new_tokens=20, do_sample=False) + ) + finally: + os.unlink(temp_path) + answer = str(response[0] if isinstance(response, list) else response).strip() + return self.hidden_states.copy(), answer + + +class RoboReferExtractor(NVILAExtractor): + ROBOREFER_PATH = '/data/shared/Qwen/RoboRefer' + + def _load_model(self): + original_sys_path = sys.path.copy() + if self.ROBOREFER_PATH not in sys.path: + sys.path.insert(0, self.ROBOREFER_PATH) + modules_to_remove = [k for k in list(sys.modules.keys()) if 'llava' in k.lower()] + removed = {m: sys.modules.pop(m) for m in modules_to_remove} + try: + import llava + from llava.media import Image as LLaVAImage + from llava import conversation as clib + except Exception as err: + sys.path = original_sys_path + for m, mod in removed.items(): + sys.modules[m] = mod + raise RuntimeError(f"Failed to import RoboRefer llava: {err}") + sys.path = original_sys_path + self.LLaVAImage = LLaVAImage + self.clib = clib + self.model = llava.load(self.model_path, model_base=None) + self._find_llm_backbone() + logger.info(f"Loaded RoboRefer from {self.model_path}") + + +# ============================================================================ +# Qwen2.5-VL Extractor +# ============================================================================ + +class Qwen25VLExtractor(BaseHiddenStateExtractor): + BASE_MODEL = "Qwen/Qwen2.5-VL-3B-Instruct" + + def _load_model(self): + from transformers import Qwen2_5_VLForConditionalGeneration, AutoProcessor + try: + self.model = Qwen2_5_VLForConditionalGeneration.from_pretrained( + self.model_path, torch_dtype=torch.bfloat16, device_map=self.device + ) + except ImportError: + self.model = Qwen2_5_VLForConditionalGeneration.from_pretrained( + self.model_path, torch_dtype=torch.bfloat16 + ).to(self.device) + self.model.eval() + if self.model_path.startswith('/'): + self.processor = AutoProcessor.from_pretrained(self.BASE_MODEL) + else: + self.processor = AutoProcessor.from_pretrained(self.model_path) + logger.info(f"Loaded Qwen2.5-VL from {self.model_path}") + + def _get_num_layers(self) -> int: + return len(self.model.model.layers) + + def _get_layer_module(self, layer_idx: int): + return self.model.model.layers[layer_idx] + + def extract_and_predict(self, image, question): + self.hidden_states = {} + messages = [{"role": "user", "content": [ + {"type": "image", "image": image}, + {"type": "text", "text": question} + ]}] + text = self.processor.apply_chat_template(messages, tokenize=False, add_generation_prompt=True) + from qwen_vl_utils import process_vision_info + image_inputs, video_inputs = process_vision_info(messages) + inputs = self.processor( + text=[text], images=image_inputs, videos=video_inputs, + padding=True, return_tensors="pt" + ).to(self.device) + with torch.no_grad(): + output_ids = self.model.generate(**inputs, max_new_tokens=20, do_sample=False) + input_len = inputs['input_ids'].shape[1] + answer = self.processor.tokenizer.decode(output_ids[0, input_len:], skip_special_tokens=True).strip() + return self.hidden_states.copy(), answer + + +# ============================================================================ +# New Extractors: Molmo2-8B and Qwen3-VL family +# ============================================================================ + +class Molmo2Extractor(BaseHiddenStateExtractor): + """Extractor for allenai/Molmo2-8B (AutoModelForImageTextToText, messages-dict input).""" + + def _load_model(self): + from transformers import AutoProcessor, AutoModelForImageTextToText + self.processor = AutoProcessor.from_pretrained(self.model_path, trust_remote_code=True) + self.model = AutoModelForImageTextToText.from_pretrained( + self.model_path, trust_remote_code=True, torch_dtype='auto', device_map='auto', + ).eval() + self._find_llm_layers() + logger.info(f"Loaded Molmo2 from {self.model_path}") + + def _find_llm_layers(self): + candidates = [ + ['model', 'layers'], + ['language_model', 'model', 'layers'], + ['model', 'model', 'layers'], + ] + for path in candidates: + obj = self.model + for attr in path: + obj = getattr(obj, attr, None) + if obj is None: + break + if obj is not None and hasattr(obj, '__len__') and len(obj) > 0: + self.llm_layers = obj + logger.info(f"Molmo2: layers at '{'.'.join(path)}', count={len(obj)}") + return + best, best_len = None, 0 + for name, module in self.model.named_modules(): + if name.endswith('.layers') and hasattr(module, '__len__') and len(module) > best_len: + best, best_len = module, len(module) + logger.info(f"Molmo2: layers via scan at '{name}', count={best_len}") + if best is not None: + self.llm_layers = best + return + raise ValueError("Could not find transformer layers in Molmo2 model") + + def _get_num_layers(self) -> int: + return len(self.llm_layers) + + def _get_layer_module(self, layer_idx: int): + return self.llm_layers[layer_idx] + + def extract_and_predict(self, image, question): + self.hidden_states = {} + messages = [{"role": "user", "content": [ + {"type": "image", "image": image}, + {"type": "text", "text": question}, + ]}] + inputs = self.processor.apply_chat_template( + messages, tokenize=True, add_generation_prompt=True, + return_tensors="pt", return_dict=True, + ) + inputs = {k: v.to(self.model.device) for k, v in inputs.items()} + with torch.inference_mode(): + generated_ids = self.model.generate(**inputs, max_new_tokens=20, do_sample=False) + input_len = inputs['input_ids'].shape[1] + answer = self.processor.tokenizer.decode( + generated_ids[0, input_len:], skip_special_tokens=True).strip() + return self.hidden_states.copy(), answer + + +class Qwen3VLExtractor(BaseHiddenStateExtractor): + """Extractor for Qwen3-VL family (32B dense, 235B MoE). + + Key differences from Qwen25VLExtractor: + - AutoModelForImageTextToText + trust_remote_code=True + - process_vision_info requires image_patch_size=16 + - processor call requires do_resize=False + - 32×32 px patches → different min/max_pixels + """ + + MIN_PIXELS = 256 * 32 * 32 # 262,144 (mp3d/scannet → natural res; ai2thor → ~256 tokens) + MAX_PIXELS = 16384 * 32 * 32 # 16,777,216 + + def _load_model(self): + from transformers import AutoProcessor, AutoModelForImageTextToText + self.processor = AutoProcessor.from_pretrained(self.model_path, trust_remote_code=True) + self.model = AutoModelForImageTextToText.from_pretrained( + self.model_path, trust_remote_code=True, torch_dtype='auto', + device_map='auto', attn_implementation='flash_attention_2', + ).eval() + self._find_llm_layers() + logger.info(f"Loaded Qwen3-VL from {self.model_path}") + + def _find_llm_layers(self): + candidates = [ + ['model', 'language_model', 'model', 'layers'], # Qwen3-VL expected + ['language_model', 'model', 'layers'], + ['model', 'model', 'layers'], + ['model', 'layers'], + ] + for path in candidates: + obj = self.model + for attr in path: + obj = getattr(obj, attr, None) + if obj is None: + break + if obj is not None and hasattr(obj, '__len__') and len(obj) > 0: + self.llm_layers = obj + logger.info(f"Qwen3-VL: layers at '{'.'.join(path)}', count={len(obj)}") + return + best, best_len = None, 0 + for name, module in self.model.named_modules(): + if name.endswith('.layers') and hasattr(module, '__len__') and len(module) > best_len: + best, best_len = module, len(module) + logger.info(f"Qwen3-VL: layers via scan at '{name}', count={best_len}") + if best is not None: + self.llm_layers = best + return + raise ValueError("Could not find transformer layers in Qwen3-VL model") + + def _get_num_layers(self) -> int: + return len(self.llm_layers) + + def _get_layer_module(self, layer_idx: int): + return self.llm_layers[layer_idx] + + def extract_and_predict(self, image, question): + self.hidden_states = {} + messages = [{"role": "user", "content": [ + {"type": "image", "image": image, + "min_pixels": self.MIN_PIXELS, "max_pixels": self.MAX_PIXELS}, + {"type": "text", "text": question}, + ]}] + text = self.processor.apply_chat_template(messages, tokenize=False, add_generation_prompt=True) + from qwen_vl_utils import process_vision_info + images, videos, _ = process_vision_info( + messages, image_patch_size=16, return_video_kwargs=True, return_video_metadata=True, + ) + inputs = self.processor( + text=text, images=images, videos=videos, do_resize=False, return_tensors="pt", + ).to(self.model.device) + with torch.no_grad(): + output_ids = self.model.generate(**inputs, max_new_tokens=20, do_sample=False) + input_len = inputs['input_ids'].shape[1] + answer = self.processor.tokenizer.decode( + output_ids[0, input_len:], skip_special_tokens=True).strip() + return self.hidden_states.copy(), answer + + +EXTRACTOR_CLASSES = { + 'MolmoExtractor': MolmoExtractor, + 'NVILAExtractor': NVILAExtractor, + 'RoboReferExtractor': RoboReferExtractor, + 'Qwen25VLExtractor': Qwen25VLExtractor, + 'Molmo2Extractor': Molmo2Extractor, + 'Qwen3VLExtractor': Qwen3VLExtractor, +} + + +def get_extractor(model_type: str, model_path: str = None, scale: str = None, **kwargs): + """Create an extractor for any model_type (legacy or new-large).""" + # New large models: (ExtractorClass, path) tuples in MODEL_CONFIGS_NEW + if model_type in MODEL_CONFIGS_NEW: + cls_name, raw_path = MODEL_CONFIGS_NEW[model_type][scale] + resolved = resolve_local_path(raw_path) + logger.info(f"Creating {cls_name} for scale='{scale}' from {resolved}") + return EXTRACTOR_CLASSES[cls_name](resolved, **kwargs) + # Legacy models + if model_type == 'nvila' and scale == 'roborefer': + return RoboReferExtractor(model_path, **kwargs) + legacy = { + 'molmo': MolmoExtractor, 'nvila': NVILAExtractor, 'qwen': Qwen25VLExtractor, + 'nvila_synthetic': NVILAExtractor, + } + return legacy[model_type](model_path, **kwargs) + + +# ============================================================================ +# Feature Extraction Pipeline +# ============================================================================ + +def run_single_query(extractor, image, question): + hidden_states, predicted = extractor.extract_and_predict(image, question) + result = {} + for layer_idx in extractor.target_layers: + if layer_idx in hidden_states: + state = hidden_states[layer_idx].numpy().flatten() + if state.size > 0: + result[layer_idx] = state + return result, predicted + + +def extract_swap_features( + extractor: BaseHiddenStateExtractor, + swap_pairs: List[dict], + max_samples_per_category: int = 0, +) -> List[dict]: + """Extract features for all swap pairs.""" + rng = random.Random(42) + + if max_samples_per_category > 0: + grouped = defaultdict(list) + for p in swap_pairs: + grouped[p['category']].append(p) + limited = [] + for cat in CATEGORY_ORDER: + samples = grouped[cat] + if len(samples) > max_samples_per_category: + samples = rng.sample(samples, max_samples_per_category) + limited.extend(samples) + swap_pairs = limited + + records = [] + for pair in tqdm(swap_pairs, desc="Swap pairs"): + try: + image = decode_base64_image(pair['image_base64']) + hs_orig, pred_orig = run_single_query(extractor, image, pair['original_question']) + hs_swap, pred_swap = run_single_query(extractor, image, pair['swapped_question']) + + is_correct_orig = check_answer(pred_orig, pair['original_answer'], pair['mcq_map']) + is_correct_swap = check_answer(pred_swap, pair['swapped_answer'], pair['mcq_map']) + + delta = {} + for layer_idx in extractor.target_layers: + if layer_idx in hs_orig and layer_idx in hs_swap: + delta[layer_idx] = hs_swap[layer_idx] - hs_orig[layer_idx] + + record = { + 'index': pair['index'], + 'group': pair['group'], + 'category': pair['category'], + 'original_answer': pair['original_answer'], + 'swapped_answer': pair['swapped_answer'], + 'pred_orig': pred_orig, + 'pred_swap': pred_swap, + 'is_correct_orig': is_correct_orig, + 'is_correct_swap': is_correct_swap, + 'hs_orig': hs_orig, + 'hs_swap': hs_swap, + 'delta': delta, + } + records.append(record) + + mark_o = "O" if is_correct_orig else "X" + mark_s = "O" if is_correct_swap else "X" + logger.info(f" #{pair['index']:<6} {pair['category']:<6} " + f"orig[{mark_o}]=\"{pred_orig[:40]}\" swap[{mark_s}]=\"{pred_swap[:40]}\"" + + (f" [{len(records)}/{len(swap_pairs)}]" if len(records) % 50 == 0 else "")) + + except Exception as e: + logger.warning(f"Error on index {pair['index']}: {e}") + continue + + logger.info(f"Extracted {len(records)} swap pair records") + + # Fix 8: Per-category accuracy logging + for cat in CATEGORY_ORDER: + cat_recs = [r for r in records if r['category'] == cat] + n = len(cat_recs) + if n == 0: + continue + c_orig = sum(1 for r in cat_recs if r['is_correct_orig']) + c_swap = sum(1 for r in cat_recs if r['is_correct_swap']) + c_both = sum(1 for r in cat_recs if r['is_correct_orig'] and r['is_correct_swap']) + logger.info(f" {cat:>6s} (n={n}): acc_orig={c_orig/n:.1%}, acc_swap={c_swap/n:.1%}, " + f"acc_both={c_both/n:.1%}") + + return records + + +def extract_cross_group_features( + extractor: BaseHiddenStateExtractor, + quads: List[dict], +) -> List[dict]: + """Extract features for cross-group quads (4 forward passes each).""" + records = [] + for quad in tqdm(quads, desc="Cross-group quads"): + try: + image = decode_base64_image(quad['image_base64']) + hs_d_orig, pred_d_orig = run_single_query(extractor, image, quad['dist_original_q']) + hs_d_swap, pred_d_swap = run_single_query(extractor, image, quad['dist_swapped_q']) + hs_v_orig, pred_v_orig = run_single_query(extractor, image, quad['vert_original_q']) + hs_v_swap, pred_v_swap = run_single_query(extractor, image, quad['vert_swapped_q']) + + delta_dist, delta_vert = {}, {} + for layer_idx in extractor.target_layers: + if layer_idx in hs_d_orig and layer_idx in hs_d_swap: + delta_dist[layer_idx] = hs_d_swap[layer_idx] - hs_d_orig[layer_idx] + if layer_idx in hs_v_orig and layer_idx in hs_v_swap: + delta_vert[layer_idx] = hs_v_swap[layer_idx] - hs_v_orig[layer_idx] + + record = { + 'index': quad['index'], + 'delta_dist': delta_dist, + 'delta_vert': delta_vert, + 'pred_d_orig': pred_d_orig, 'pred_d_swap': pred_d_swap, + 'pred_v_orig': pred_v_orig, 'pred_v_swap': pred_v_swap, + 'is_correct_d_orig': check_answer(pred_d_orig, quad['dist_original_answer'], quad['dist_mcq_map']), + 'is_correct_d_swap': check_answer(pred_d_swap, quad['dist_swapped_answer'], quad['dist_mcq_map']), + 'is_correct_v_orig': check_answer(pred_v_orig, quad['vert_original_answer'], quad['vert_mcq_map']), + 'is_correct_v_swap': check_answer(pred_v_swap, quad['vert_swapped_answer'], quad['vert_mcq_map']), + 'data_source': quad['data_source'], + } + records.append(record) + + tqdm.write(f" #{quad['index']:<6} dist=[{pred_d_orig[:20]}/{pred_d_swap[:20]}] " + f"vert=[{pred_v_orig[:20]}/{pred_v_swap[:20]}]") + + except Exception as e: + logger.warning(f"Error on cross-group index {quad['index']}: {e}") + continue + + logger.info(f"Extracted {len(records)} cross-group quad records") + return records + + +# ============================================================================ +# Analysis Functions +# ============================================================================ + +# Fix 5: Within-category + sign-corrected delta consistency + +def compute_delta_consistency(records: List[dict], target_layers: List[int]): + """Compute TWO types of delta consistency. + + Returns: + within_cat_results: {(category, layer) -> {mean, std, n}} + sign_corrected_results: {(group, layer) -> {mean, std, n}} + """ + within_cat_results = {} + sign_corrected_results = {} + + for group in GROUP_ORDER: + canonical = CANONICAL_CATEGORIES[group] + opposite = OPPOSITE_MAP[canonical] + group_recs = [r for r in records if r['group'] == group] + + for layer in target_layers: + # (a) Within-category consistency + for cat in [canonical, opposite]: + cat_deltas = [r['delta'][layer] for r in group_recs + if r['category'] == cat and layer in r['delta']] + if len(cat_deltas) >= 2: + arr = np.array(cat_deltas) + sim = cosine_similarity(arr) + upper = sim[np.triu_indices(len(cat_deltas), k=1)] + within_cat_results[(cat, layer)] = { + 'mean': float(np.mean(upper)), + 'std': float(np.std(upper)), + 'n': len(cat_deltas), + } + + # (b) Sign-corrected group consistency + all_deltas = [] + for r in group_recs: + if layer not in r['delta']: + continue + d = r['delta'][layer] + if r['category'] == opposite: + d = -d # flip to align with canonical direction + all_deltas.append(d) + + if len(all_deltas) >= 2: + arr = np.array(all_deltas) + sim = cosine_similarity(arr) + upper = sim[np.triu_indices(len(all_deltas), k=1)] + sign_corrected_results[(group, layer)] = { + 'mean': float(np.mean(upper)), + 'std': float(np.std(upper)), + 'n': len(all_deltas), + } + + return within_cat_results, sign_corrected_results + + +# Fix 7: Delta-based similarity matrix + +def compute_delta_similarity_matrix(records: List[dict], layer: int) -> Optional[pd.DataFrame]: + """Compute 6x6 cosine similarity using mean delta per category.""" + cat_deltas = {} + for cat in CATEGORY_ORDER: + deltas = [r['delta'][layer] for r in records if r['category'] == cat and layer in r['delta']] + if deltas: + cat_deltas[cat] = np.mean(deltas, axis=0) + + available = [c for c in CATEGORY_ORDER if c in cat_deltas] + if len(available) < 2: + return None + + vectors = np.array([cat_deltas[c] for c in available]) + sim = cosine_similarity(vectors) + return pd.DataFrame(sim, index=available, columns=available) + + +# Fix 8: Both-correct filtering + +def filter_both_correct(records: List[dict]) -> List[dict]: + """Filter to pairs where both orig and swap predictions are correct.""" + return [r for r in records if r['is_correct_orig'] and r['is_correct_swap']] + + +# Fix 8: Category validity check + +def check_category_validity(records: List[dict], scale: str) -> Dict[str, dict]: + """Check per-category accuracy and flag unreliable categories.""" + validity = {} + for cat in CATEGORY_ORDER: + cat_recs = [r for r in records if r['category'] == cat] + n = len(cat_recs) + if n == 0: + validity[cat] = {'n': 0, 'acc_orig': 0, 'acc_swap': 0, 'reliable': False} + continue + acc_orig = sum(1 for r in cat_recs if r['is_correct_orig']) / n + acc_swap = sum(1 for r in cat_recs if r['is_correct_swap']) / n + reliable = acc_orig >= 0.5 and acc_swap >= 0.5 + validity[cat] = { + 'n': n, 'acc_orig': acc_orig, 'acc_swap': acc_swap, + 'reliable': reliable, + } + if not reliable: + logger.warning(f" [!] Category '{cat}' unreliable at scale={scale}: " + f"acc_orig={acc_orig:.1%}, acc_swap={acc_swap:.1%}") + return validity + + +def compute_cross_group_alignment(quad_records: List[dict], target_layers: List[int]) -> dict: + results = {} + for layer in target_layers: + per_sample = [] + delta_verts, delta_dists = [], [] + + for rec in quad_records: + if layer in rec['delta_vert'] and layer in rec['delta_dist']: + dv = rec['delta_vert'][layer] + dd = rec['delta_dist'][layer] + norm_v, norm_d = np.linalg.norm(dv), np.linalg.norm(dd) + if norm_v > 1e-10 and norm_d > 1e-10: + per_sample.append(float(np.dot(dv, dd) / (norm_v * norm_d))) + delta_verts.append(dv) + delta_dists.append(dd) + + if not per_sample: + continue + + mean_dv = np.mean(delta_verts, axis=0) + mean_dd = np.mean(delta_dists, axis=0) + norm_mv, norm_md = np.linalg.norm(mean_dv), np.linalg.norm(mean_dd) + mean_alignment = float(np.dot(mean_dv, mean_dd) / (norm_mv * norm_md + 1e-10)) + + rng = np.random.RandomState(42) + perm_alignments = [] + for _ in range(100): + shuffled_dd = [delta_dists[i] for i in rng.permutation(len(delta_dists))] + perm_cos = [] + for dv, dd in zip(delta_verts, shuffled_dd): + nv, nd = np.linalg.norm(dv), np.linalg.norm(dd) + if nv > 1e-10 and nd > 1e-10: + perm_cos.append(np.dot(dv, dd) / (nv * nd)) + perm_alignments.append(np.mean(perm_cos)) + + results[layer] = { + 'per_sample_mean': float(np.mean(per_sample)), + 'per_sample_std': float(np.std(per_sample)), + 'mean_delta_alignment': mean_alignment, + 'permutation_mean': float(np.mean(perm_alignments)), + 'permutation_std': float(np.std(perm_alignments)), + 'n_samples': len(per_sample), + } + return results + + +def compute_prediction_stats(records: List[dict], scale: str) -> dict: + stats = {'scale': scale} + total_correct_orig, total_correct_swap, total_both, total_n = 0, 0, 0, 0 + + for group in GROUP_ORDER: + group_recs = [r for r in records if r['group'] == group] + n = len(group_recs) + c_orig = sum(1 for r in group_recs if r['is_correct_orig']) + c_swap = sum(1 for r in group_recs if r['is_correct_swap']) + c_both = sum(1 for r in group_recs if r['is_correct_orig'] and r['is_correct_swap']) + stats[f'{group}_n'] = n + stats[f'{group}_acc_orig'] = c_orig / n if n > 0 else 0 + stats[f'{group}_acc_swap'] = c_swap / n if n > 0 else 0 + stats[f'{group}_acc_both'] = c_both / n if n > 0 else 0 + total_correct_orig += c_orig + total_correct_swap += c_swap + total_both += c_both + total_n += n + + stats['overall_acc_orig'] = total_correct_orig / total_n if total_n > 0 else 0 + stats['overall_acc_swap'] = total_correct_swap / total_n if total_n > 0 else 0 + stats['overall_acc_both'] = total_both / total_n if total_n > 0 else 0 + stats['overall_n'] = total_n + return stats + + +# ============================================================================ +# Saving & Loading +# ============================================================================ + +def get_representative_layers(all_layers, n=5): + if len(all_layers) <= n: + return list(all_layers) + indices = np.linspace(0, len(all_layers) - 1, n, dtype=int) + return [all_layers[i] for i in indices] + + +def save_scale_results( + scale, swap_records, quad_records, + within_cat_consistency, sign_corrected_consistency, + cross_alignment, pred_stats, target_layers, + category_validity, delta_heatmaps, + output_dir, both_correct_tag="all_pairs", +): + """Save all per-scale results to disk.""" + csv_dir = os.path.join(output_dir, 'csv') + json_dir = os.path.join(output_dir, 'json') + os.makedirs(csv_dir, exist_ok=True) + os.makedirs(json_dir, exist_ok=True) + + # 1. Predictions CSV (tagged so all_pairs and both_correct don't overwrite each other) + pred_rows = [] + for r in swap_records: + pred_rows.append({ + 'index': r['index'], 'group': r['group'], 'category': r['category'], + 'pred_orig': r['pred_orig'], 'pred_swap': r['pred_swap'], + 'is_correct_orig': r['is_correct_orig'], 'is_correct_swap': r['is_correct_swap'], + }) + pd.DataFrame(pred_rows).to_csv( + os.path.join(csv_dir, f'predictions_{scale}_{both_correct_tag}.csv'), index=False) + + # 2. Within-category consistency JSON + wc_data = {} + for (cat, layer), vals in within_cat_consistency.items(): + wc_data[f'{cat}_L{layer}'] = vals + with open(os.path.join(json_dir, f'within_cat_consistency_{scale}_{both_correct_tag}.json'), 'w') as f: + json.dump(wc_data, f, indent=2) + + # 3. Sign-corrected consistency JSON + sc_data = {} + for (group, layer), vals in sign_corrected_consistency.items(): + sc_data[f'{group}_L{layer}'] = vals + with open(os.path.join(json_dir, f'sign_corrected_consistency_{scale}_{both_correct_tag}.json'), 'w') as f: + json.dump(sc_data, f, indent=2) + + # 4. Cross-group alignment JSON + alignment_data = {} + for layer, vals in cross_alignment.items(): + alignment_data[f'L{layer}'] = vals + with open(os.path.join(json_dir, f'cross_alignment_{scale}.json'), 'w') as f: + json.dump(alignment_data, f, indent=2) + + # 5. Prediction stats JSON + with open(os.path.join(json_dir, f'pred_stats_{scale}.json'), 'w') as f: + json.dump(pred_stats, f, indent=2) + + # 6. Category validity JSON (Fix 8) + with open(os.path.join(json_dir, f'category_validity_{scale}.json'), 'w') as f: + json.dump(category_validity, f, indent=2) + + # 7. Delta heatmap CSVs (Fix 7) + for layer, df in delta_heatmaps.items(): + if df is not None: + df.to_csv(os.path.join(csv_dir, f'delta_similarity_{scale}_L{layer}_{both_correct_tag}.csv')) + + logger.info(f"Saved results for scale={scale} ({both_correct_tag}) to {output_dir}") + + +def save_vectors_npz(scale, swap_records, quad_records, target_layers, output_dir): + """Save ALL vectors with correctness metadata to NPZ (once per scale). + + This enables post-hoc filtering (both_correct, all_with_validity) from saved data. + """ + rep_layers = list(target_layers) # save ALL layers (not just 5 representative) + delta_data = {} + for layer in rep_layers: + groups_list, categories_list, vectors = [], [], [] + orig_vecs, swap_vecs, labels = [], [], [] + correct_orig_list, correct_swap_list, indices_list = [], [], [] + for r in swap_records: + if layer in r['delta']: + groups_list.append(r['group']) + categories_list.append(r['category']) + vectors.append(r['delta'][layer]) + correct_orig_list.append(r['is_correct_orig']) + correct_swap_list.append(r['is_correct_swap']) + indices_list.append(r['index']) + if layer in r['hs_orig'] and layer in r['hs_swap']: + orig_vecs.append(r['hs_orig'][layer]) + swap_vecs.append(r['hs_swap'][layer]) + labels.append(r['category']) + if vectors: + delta_data[f'delta_L{layer}'] = np.array(vectors) + delta_data[f'groups_L{layer}'] = np.array(groups_list) + delta_data[f'categories_L{layer}'] = np.array(categories_list) + delta_data[f'is_correct_orig_L{layer}'] = np.array(correct_orig_list) + delta_data[f'is_correct_swap_L{layer}'] = np.array(correct_swap_list) + delta_data[f'indices_L{layer}'] = np.array(indices_list) + if orig_vecs: + delta_data[f'orig_L{layer}'] = np.array(orig_vecs) + delta_data[f'swap_L{layer}'] = np.array(swap_vecs) + delta_data[f'labels_L{layer}'] = np.array(labels) + + npz_dir = os.path.join(output_dir, 'npz') + os.makedirs(npz_dir, exist_ok=True) + np.savez_compressed(os.path.join(npz_dir, f'vectors_{scale}.npz'), **delta_data) + logger.info(f"Saved vectors NPZ with correctness metadata for scale={scale}") + + # Cross-group delta vectors + if quad_records: + cg_data = {} + for layer in rep_layers: + dverts, ddists = [], [] + for rec in quad_records: + if layer in rec['delta_vert'] and layer in rec['delta_dist']: + dverts.append(rec['delta_vert'][layer]) + ddists.append(rec['delta_dist'][layer]) + if dverts: + cg_data[f'delta_vert_L{layer}'] = np.array(dverts) + cg_data[f'delta_dist_L{layer}'] = np.array(ddists) + np.savez_compressed(os.path.join(npz_dir, f'cross_group_vectors_{scale}.npz'), **cg_data) + + +def load_scale_consistency(output_dir, scale, tag='all_pairs'): + """Load sign-corrected consistency.""" + path = os.path.join(output_dir, 'json', f'sign_corrected_consistency_{scale}_{tag}.json') + if not os.path.exists(path): + return {} + with open(path) as f: + raw = json.load(f) + result = {} + for key, vals in raw.items(): + parts = key.rsplit('_L', 1) + if len(parts) == 2: + result[(parts[0], int(parts[1]))] = vals + return result + + +def load_within_cat_consistency(output_dir, scale, tag='all_pairs'): + path = os.path.join(output_dir, 'json', f'within_cat_consistency_{scale}_{tag}.json') + if not os.path.exists(path): + return {} + with open(path) as f: + raw = json.load(f) + result = {} + for key, vals in raw.items(): + parts = key.rsplit('_L', 1) + if len(parts) == 2: + result[(parts[0], int(parts[1]))] = vals + return result + + +def load_scale_alignment(output_dir, scale): + path = os.path.join(output_dir, 'json', f'cross_alignment_{scale}.json') + if not os.path.exists(path): + return {} + with open(path) as f: + raw = json.load(f) + result = {} + for key, vals in raw.items(): + result[int(key.replace('L', ''))] = vals + return result + + +def load_delta_heatmaps(output_dir, scale, tag='all_pairs'): + import glob as glob_mod + pattern = os.path.join(output_dir, 'csv', f'delta_similarity_{scale}_L*_{tag}.csv') + files = glob_mod.glob(pattern) + result = {} + for fpath in files: + basename = os.path.basename(fpath) + # delta_similarity_{scale}_L{layer}_{tag}.csv + part = basename.replace(f'delta_similarity_{scale}_L', '').replace(f'_{tag}.csv', '') + try: + layer = int(part) + except ValueError: + continue + result[layer] = pd.read_csv(fpath, index_col=0) + return result + + +# ============================================================================ +# Visualization +# ============================================================================ + +def plot_within_cat_consistency_trajectory(within_cat, scale, model_type, save_path): + """Plot within-category delta consistency across layers.""" + fig, ax = plt.subplots(figsize=(12, 6)) + cat_colors = CAT_COLORS + for cat in CATEGORY_ORDER: + layers, vals = [], [] + for (c, l), v in sorted(within_cat.items(), key=lambda x: x[0][1]): + if c == cat: + layers.append(l) + vals.append(v['mean']) + if layers: + ax.plot(layers, vals, '-o', color=cat_colors[cat], label=cat, linewidth=2, markersize=3) + ax.set_xlabel('Layer Index') + ax.set_ylabel('Within-Category Consistency') + ax.set_title(f'{model_type.upper()} ({scale}) - Within-Category Delta Consistency', fontweight='bold') + ax.legend(fontsize=9) + ax.grid(True, alpha=0.3) + plt.tight_layout() + plt.savefig(save_path, dpi=300, bbox_inches='tight') + plt.close() + logger.info(f"Saved: {save_path}") + + +def plot_sign_corrected_consistency_trajectory(sign_corrected, scale, model_type, save_path): + """Plot sign-corrected group consistency across layers.""" + fig, ax = plt.subplots(figsize=(12, 6)) + colors = GROUP_COLORS + for group in GROUP_ORDER: + layers, vals = [], [] + for (g, l), v in sorted(sign_corrected.items(), key=lambda x: x[0][1]): + if g == group: + layers.append(l) + vals.append(v['mean']) + if layers: + ax.plot(layers, vals, '-o', color=colors[group], label=group, linewidth=2, markersize=3) + ax.set_xlabel('Layer Index') + ax.set_ylabel('Sign-Corrected Consistency') + ax.set_title(f'{model_type.upper()} ({scale}) - Sign-Corrected Group Consistency', fontweight='bold') + ax.legend(fontsize=11) + ax.grid(True, alpha=0.3) + plt.tight_layout() + plt.savefig(save_path, dpi=300, bbox_inches='tight') + plt.close() + logger.info(f"Saved: {save_path}") + + +def plot_cross_group_alignment_trajectory(cross_alignment, scale, model_type, save_path): + fig, ax = plt.subplots(figsize=(12, 6)) + layers = sorted(cross_alignment.keys()) + actual = [cross_alignment[l]['per_sample_mean'] for l in layers] + mean_delta = [cross_alignment[l]['mean_delta_alignment'] for l in layers] + perm_mean = [cross_alignment[l]['permutation_mean'] for l in layers] + perm_std = [cross_alignment[l]['permutation_std'] for l in layers] + + ax.plot(layers, actual, '-o', color='#d62728', label='cos(d_vert, d_dist) per-sample mean', + linewidth=2.5, markersize=3) + ax.plot(layers, mean_delta, '--s', color='#e377c2', label='cos(mean_d_vert, mean_d_dist)', + linewidth=1.5, markersize=3) + ax.plot(layers, perm_mean, ':', color='gray', label='permutation control', linewidth=1.5) + ax.fill_between(layers, + [m - 2*s for m, s in zip(perm_mean, perm_std)], + [m + 2*s for m, s in zip(perm_mean, perm_std)], + alpha=0.2, color='gray') + ax.set_xlabel('Layer Index') + ax.set_ylabel('Cosine Alignment') + ax.set_title(f'{model_type.upper()} ({scale}) - Cross-Group Alignment (Perspective Bias)', fontweight='bold') + ax.legend(fontsize=9) + ax.grid(True, alpha=0.3) + plt.tight_layout() + plt.savefig(save_path, dpi=300, bbox_inches='tight') + plt.close() + logger.info(f"Saved: {save_path}") + + +# Fix 7: Delta heatmap visualization + +def plot_delta_heatmap(sim_df, title, save_path): + """Plot delta-based similarity heatmap.""" + plt.figure(figsize=(10, 8)) + available_order = [c for c in CATEGORY_ORDER if c in sim_df.index] + sim_df_ordered = sim_df.loc[available_order, available_order] + + annot = sim_df_ordered.round(4).astype(str) + sns.heatmap(sim_df_ordered, annot=annot, fmt='', cmap='RdBu_r', + center=0, vmin=-1, vmax=1, square=True, linewidths=0.5, + cbar_kws={'label': 'Cosine Similarity'}) + plt.title(title, fontsize=14, fontweight='bold') + plt.tight_layout() + plt.savefig(save_path, dpi=300, bbox_inches='tight') + plt.close() + logger.info(f"Saved delta heatmap: {save_path}") + + +# Fix 6: Prediction stats visualization + +def plot_pred_stats_bars(all_pred_stats, model_type, save_path): + """Bar chart: per-group accuracy (orig/swap/both) across scales.""" + fig, axes = plt.subplots(1, len(GROUP_ORDER), figsize=(7 * len(GROUP_ORDER), 6)) + if len(GROUP_ORDER) == 1: + axes = [axes] + + available = [s for s in SCALE_ORDER if any(d['scale'] == s for d in all_pred_stats)] + if not available: + # Fallback: use whatever scales are present (preserves insertion order) + seen = [] + for d in all_pred_stats: + if d['scale'] not in seen: + seen.append(d['scale']) + available = seen + + for idx, group in enumerate(GROUP_ORDER): + ax = axes[idx] + x = np.arange(3) # orig, swap, both + width = 0.8 / len(available) + for i, scale in enumerate(available): + entry = next((d for d in all_pred_stats if d['scale'] == scale), None) + if entry is None: + continue + vals = [entry.get(f'{group}_acc_orig', 0), + entry.get(f'{group}_acc_swap', 0), + entry.get(f'{group}_acc_both', 0)] + offset = (i - len(available) / 2 + 0.5) * width + color = SCALE_COLORS.get(scale, 'gray') + ax.bar(x + offset, vals, width, label=scale, color=color) + ax.set_xticks(x) + ax.set_xticklabels(['orig', 'swap', 'both']) + ax.set_ylabel('Accuracy') + ax.set_title(group, fontweight='bold') + ax.legend(fontsize=7) + ax.set_ylim(0, 1.1) + ax.axhline(y=0.5, color='gray', linestyle='--', alpha=0.5) + ax.grid(True, alpha=0.3, axis='y') + + fig.suptitle(f'{model_type.upper()} - Prediction Accuracy by Group', fontsize=15, fontweight='bold', y=1.02) + plt.tight_layout() + plt.savefig(save_path, dpi=300, bbox_inches='tight') + plt.close() + logger.info(f"Saved: {save_path}") + + +def plot_pred_stats_trajectory(all_pred_stats, model_type, save_path): + """Line plot: acc_both trajectory across scales per group.""" + fig, ax = plt.subplots(figsize=(10, 6)) + available = [s for s in SCALE_ORDER if any(d['scale'] == s for d in all_pred_stats)] + if not available: + seen = [] + for d in all_pred_stats: + if d['scale'] not in seen: + seen.append(d['scale']) + available = seen + colors = GROUP_COLORS + + for group in GROUP_ORDER: + x_vals, y_vals = [], [] + for i, scale in enumerate(available): + entry = next((d for d in all_pred_stats if d['scale'] == scale), None) + if entry: + x_vals.append(i) + y_vals.append(entry.get(f'{group}_acc_both', 0)) + if x_vals: + ax.plot(x_vals, y_vals, '-o', color=colors[group], label=group, linewidth=2.5, markersize=6) + + ax.set_xticks(range(len(available))) + ax.set_xticklabels(available) + ax.set_xlabel('Scale') + ax.set_ylabel('Accuracy (both correct)') + ax.set_title(f'{model_type.upper()} - Both-Correct Accuracy Across Scales', fontweight='bold') + ax.legend(fontsize=10) + ax.set_ylim(0, 1.05) + ax.axhline(y=0.5, color='gray', linestyle='--', alpha=0.5) + ax.grid(True, alpha=0.3) + plt.tight_layout() + plt.savefig(save_path, dpi=300, bbox_inches='tight') + plt.close() + logger.info(f"Saved: {save_path}") + + +def plot_pca_embeddings(vectors_npz_path, scale, model_type, save_dir, bc_only=False): + data = np.load(vectors_npz_path, allow_pickle=True) + layer_keys = [k for k in data.files if k.startswith('orig_L')] + layers = sorted([int(k.replace('orig_L', '')) for k in layer_keys]) + + cat_colors = CAT_COLORS + + for layer in layers: + orig = data.get(f'orig_L{layer}') + swap = data.get(f'swap_L{layer}') + labels = data.get(f'labels_L{layer}') + deltas = data.get(f'delta_L{layer}') + cats = data.get(f'categories_L{layer}') + groups = data.get(f'groups_L{layer}') + + if bc_only and deltas is not None: + co = data.get(f'is_correct_orig_L{layer}') + cs = data.get(f'is_correct_swap_L{layer}') + if co is not None and cs is not None: + bc_mask = co.astype(bool) & cs.astype(bool) + if orig is not None and len(orig) == len(bc_mask): + orig = orig[bc_mask] + swap = swap[bc_mask] + labels = labels[bc_mask] if labels is not None else None + if len(deltas) == len(bc_mask): + deltas = deltas[bc_mask] + cats = cats[bc_mask] if cats is not None else None + groups = groups[bc_mask] if groups is not None else None + + if orig is None or swap is None or len(orig) == 0: + continue + + fig, axes = plt.subplots(1, 3, figsize=(24, 7)) + + pca = PCA(n_components=2) + all_vecs = np.vstack([orig, swap]) + all_pca = pca.fit_transform(all_vecs) + orig_pca = all_pca[:len(orig)] + swap_pca = all_pca[len(orig):] + + ax = axes[0] + for cat in CATEGORY_ORDER: + mask = np.array([str(l) == cat for l in labels]) + if mask.any(): + ax.scatter(orig_pca[mask, 0], orig_pca[mask, 1], + c=cat_colors.get(cat, 'gray'), label=f'{cat} (orig)', + alpha=0.5, s=15, marker='o') + ax.scatter(swap_pca[mask, 0], swap_pca[mask, 1], + c=cat_colors.get(cat, 'gray'), + alpha=0.5, s=15, marker='x') + ax.set_title('Embeddings by Category\n(o=orig, x=swap)', fontsize=11) + ax.legend(fontsize=7, ncol=2) + ax.grid(True, alpha=0.2) + + ax = axes[1] + if deltas is not None and cats is not None: + pca_d = PCA(n_components=2) + delta_pca = pca_d.fit_transform(deltas) + group_colors = GROUP_COLORS + if groups is not None: + for group in GROUP_ORDER: + mask = np.array([str(g) == group for g in groups]) + if mask.any(): + ax.scatter(delta_pca[mask, 0], delta_pca[mask, 1], + c=group_colors.get(group, 'gray'), label=group, alpha=0.5, s=15) + ax.set_title('Delta Vectors by Group', fontsize=11) + ax.legend(fontsize=9) + ax.grid(True, alpha=0.2) + + ax = axes[2] + if deltas is not None and cats is not None: + for cat in CATEGORY_ORDER: + mask = np.array([str(c) == cat for c in cats]) + if mask.any(): + ax.scatter(delta_pca[mask, 0], delta_pca[mask, 1], + c=cat_colors.get(cat, 'gray'), label=cat, alpha=0.5, s=15) + ax.set_title('Delta Vectors by Category', fontsize=11) + ax.legend(fontsize=8, ncol=2) + ax.grid(True, alpha=0.2) + + fig.suptitle(f'{model_type.upper()} ({scale}) - Layer {layer} - PCA', fontweight='bold') + plt.tight_layout() + plt.savefig(os.path.join(save_dir, f'pca_{scale}_L{layer}.png'), dpi=200, bbox_inches='tight') + plt.close() + + logger.info(f"Saved PCA plots to {save_dir}") + + +def plot_pca_3d(vectors_npz_path, scale, model_type, save_dir, bc_only=False): + """Generate 3-panel 3D PCA figure per representative layer.""" + data = np.load(vectors_npz_path, allow_pickle=True) + layer_keys = [k for k in data.files if k.startswith('orig_L')] + layers = sorted([int(k.replace('orig_L', '')) for k in layer_keys]) + + if not layers: + logger.info(f" [pca_3d] No orig_L* keys found in {vectors_npz_path}") + return + + os.makedirs(save_dir, exist_ok=True) + + def scatter3d(ax, xs, ys, zs, c, label, alpha=0.45, s=12, marker='o'): + ax.scatter(xs, ys, zs, c=c, label=label, alpha=alpha, s=s, marker=marker) + + for layer in layers: + orig = data.get(f'orig_L{layer}') + swap = data.get(f'swap_L{layer}') + labels = data.get(f'labels_L{layer}') + deltas = data.get(f'delta_L{layer}') + cats = data.get(f'categories_L{layer}') + groups = data.get(f'groups_L{layer}') + + if bc_only and deltas is not None: + co = data.get(f'is_correct_orig_L{layer}') + cs = data.get(f'is_correct_swap_L{layer}') + if co is not None and cs is not None: + bc_mask = co.astype(bool) & cs.astype(bool) + if orig is not None and len(orig) == len(bc_mask): + orig = orig[bc_mask] + swap = swap[bc_mask] + labels = labels[bc_mask] if labels is not None else None + if len(deltas) == len(bc_mask): + deltas = deltas[bc_mask] + cats = cats[bc_mask] if cats is not None else None + groups = groups[bc_mask] if groups is not None else None + + if orig is None or swap is None or len(orig) == 0: + continue + + # Panel 1: embeddings + pca_emb = PCA(n_components=3) + all_vecs = np.vstack([orig, swap]) + all_proj = pca_emb.fit_transform(all_vecs) + orig_proj = all_proj[:len(orig)] + swap_proj = all_proj[len(orig):] + ev1 = pca_emb.explained_variance_ratio_ + + # Panels 2/3: delta vectors + has_delta = (deltas is not None and len(deltas) >= 3) + if has_delta: + pca_d = PCA(n_components=3) + delta_proj = pca_d.fit_transform(deltas) + ev2 = pca_d.explained_variance_ratio_ + else: + delta_proj = None + ev2 = None + + fig = plt.figure(figsize=(30, 8)) + + ax1 = fig.add_subplot(131, projection='3d') + for cat in CATEGORY_ORDER: + mask = np.array([str(l) == cat for l in labels]) + if not mask.any(): + continue + c = CAT_COLORS.get(cat, 'gray') + scatter3d(ax1, orig_proj[mask, 0], orig_proj[mask, 1], orig_proj[mask, 2], + c=c, label=f'{cat} (orig)', marker='o') + scatter3d(ax1, swap_proj[mask, 0], swap_proj[mask, 1], swap_proj[mask, 2], + c=c, label=f'{cat} (swap)', marker='^') + ax1.set_title('Embeddings by Category\n(o=orig, ^=swap)', fontsize=10) + ax1.set_xlabel(f'PC1 ({ev1[0]:.1%})', fontsize=8) + ax1.set_ylabel(f'PC2 ({ev1[1]:.1%})', fontsize=8) + ax1.set_zlabel(f'PC3 ({ev1[2]:.1%})', fontsize=8) + ax1.legend(fontsize=6, ncol=2, loc='upper left') + + ax2 = fig.add_subplot(132, projection='3d') + if has_delta and groups is not None: + for group in GROUP_ORDER: + mask = np.array([str(g) == group for g in groups]) + if not mask.any(): + continue + scatter3d(ax2, delta_proj[mask, 0], delta_proj[mask, 1], delta_proj[mask, 2], + c=GROUP_COLORS.get(group, 'gray'), label=group) + ax2.set_title('Delta Vectors by Group', fontsize=10) + ax2.set_xlabel(f'PC1 ({ev2[0]:.1%})', fontsize=8) + ax2.set_ylabel(f'PC2 ({ev2[1]:.1%})', fontsize=8) + ax2.set_zlabel(f'PC3 ({ev2[2]:.1%})', fontsize=8) + ax2.legend(fontsize=8) + else: + ax2.set_title('Delta Vectors by Group\n(no data)', fontsize=10) + + ax3 = fig.add_subplot(133, projection='3d') + if has_delta and cats is not None: + for cat in CATEGORY_ORDER: + mask = np.array([str(c) == cat for c in cats]) + if not mask.any(): + continue + scatter3d(ax3, delta_proj[mask, 0], delta_proj[mask, 1], delta_proj[mask, 2], + c=CAT_COLORS.get(cat, 'gray'), label=cat) + ax3.set_title('Delta Vectors by Category', fontsize=10) + ax3.set_xlabel(f'PC1 ({ev2[0]:.1%})', fontsize=8) + ax3.set_ylabel(f'PC2 ({ev2[1]:.1%})', fontsize=8) + ax3.set_zlabel(f'PC3 ({ev2[2]:.1%})', fontsize=8) + ax3.legend(fontsize=7, ncol=2) + else: + ax3.set_title('Delta Vectors by Category\n(no data)', fontsize=10) + + fig.suptitle(f'{model_type.upper()} ({scale}) - Layer {layer} - 3D PCA', fontweight='bold') + plt.tight_layout() + plt.savefig(os.path.join(save_dir, f'pca_{scale}_L{layer}.png'), dpi=200, + bbox_inches='tight', pad_inches=0.4) + plt.close() + + logger.info(f"Saved 3D PCA plots to {save_dir}") + + +# Cross-scale plots + +def plot_cross_scale_consistency(all_consistency, model_type, save_path, title_prefix='Sign-Corrected'): + fig, axes = plt.subplots(1, 3, figsize=(21, 6)) + + for idx, group in enumerate(GROUP_ORDER): + ax = axes[idx] + for scale in SCALE_ORDER: + if scale not in all_consistency: + continue + consistency = all_consistency[scale] + layers, vals = [], [] + for (g, l), v in sorted(consistency.items(), key=lambda x: x[0][1]): + if g == group: + layers.append(l) + vals.append(v['mean']) + if layers: + ax.plot(layers, vals, '-', color=SCALE_COLORS.get(scale, 'gray'), + label=SCALE_DISPLAY_NAMES.get(scale, scale), linewidth=2) + ax.set_xlabel('Layer Index') + ax.set_ylabel('Consistency') + ax.set_title(group, fontweight='bold') + ax.legend(fontsize=9) + ax.grid(True, alpha=0.3) + + fig.suptitle(f'{model_type.upper()} - {title_prefix} Consistency Across Scales', + fontsize=15, fontweight='bold', y=1.02) + plt.tight_layout() + plt.savefig(save_path, dpi=300, bbox_inches='tight') + plt.close() + logger.info(f"Saved: {save_path}") + + +def plot_cross_scale_within_cat_consistency(all_within_cat, model_type, save_path): + """Cross-scale within-category consistency.""" + fig, axes = plt.subplots(2, 3, figsize=(21, 12)) + + for idx, cat in enumerate(CATEGORY_ORDER): + ax = axes[idx // 3][idx % 3] + for scale in SCALE_ORDER: + if scale not in all_within_cat: + continue + wc = all_within_cat[scale] + layers, vals = [], [] + for (c, l), v in sorted(wc.items(), key=lambda x: x[0][1]): + if c == cat: + layers.append(l) + vals.append(v['mean']) + if layers: + ax.plot(layers, vals, '-', color=SCALE_COLORS.get(scale, 'gray'), + label=SCALE_DISPLAY_NAMES.get(scale, scale), linewidth=2) + ax.set_xlabel('Layer Index') + ax.set_ylabel('Consistency') + ax.set_title(cat, fontweight='bold') + ax.legend(fontsize=8) + ax.grid(True, alpha=0.3) + + fig.suptitle(f'{model_type.upper()} - Within-Category Consistency Across Scales', + fontsize=15, fontweight='bold', y=1.02) + plt.tight_layout() + plt.savefig(save_path, dpi=300, bbox_inches='tight') + plt.close() + logger.info(f"Saved: {save_path}") + + +def plot_cross_scale_alignment(all_alignment, model_type, save_path): + fig, ax = plt.subplots(figsize=(12, 6)) + for scale in SCALE_ORDER: + if scale not in all_alignment: + continue + alignment = all_alignment[scale] + layers = sorted(alignment.keys()) + vals = [alignment[l]['per_sample_mean'] for l in layers] + ax.plot(layers, vals, '-', color=SCALE_COLORS.get(scale, 'gray'), + label=SCALE_DISPLAY_NAMES.get(scale, scale), linewidth=2) + ax.set_xlabel('Layer Index') + ax.set_ylabel('cos(d_vert, d_dist)') + ax.set_title(f'{model_type.upper()} - Cross-Group Alignment Across Scales\n' + f'(High=entangled, Low=disentangled)', fontweight='bold') + ax.legend(fontsize=10) + ax.grid(True, alpha=0.3) + plt.tight_layout() + plt.savefig(save_path, dpi=300, bbox_inches='tight') + plt.close() + logger.info(f"Saved: {save_path}") + + +# Fix 7: Delta-based trajectory (cross-layer, per-scale) + +def plot_delta_trajectory(all_delta_heatmaps, model_type, save_path): + """Cross-layer trajectory of delta-based similarities for key pairs.""" + pairs = [ + ('above', 'far', 'above-far'), ('below', 'close', 'below-close'), + ('left', 'right', 'left-right'), + ] + fig, axes = plt.subplots(1, len(pairs), figsize=(7 * len(pairs), 6)) + if len(pairs) == 1: + axes = [axes] + + for idx, (cat1, cat2, label) in enumerate(pairs): + ax = axes[idx] + for scale in SCALE_ORDER: + if scale not in all_delta_heatmaps: + continue + hm = all_delta_heatmaps[scale] + layers = sorted(hm.keys()) + vals = [] + valid_layers = [] + for l in layers: + df = hm[l] + if df is not None and cat1 in df.index and cat2 in df.columns: + valid_layers.append(l) + vals.append(df.loc[cat1, cat2]) + if valid_layers: + ax.plot(valid_layers, vals, '-', color=SCALE_COLORS.get(scale, 'gray'), + label=SCALE_DISPLAY_NAMES.get(scale, scale), linewidth=2) + ax.set_xlabel('Layer Index') + ax.set_ylabel('Delta Cosine Similarity') + ax.set_title(label, fontweight='bold') + ax.legend(fontsize=9) + ax.grid(True, alpha=0.3) + ax.axhline(y=0, color='gray', linestyle='--', alpha=0.5) + + fig.suptitle(f'{model_type.upper()} - Delta-Based Similarity Trajectory', + fontsize=15, fontweight='bold', y=1.02) + plt.tight_layout() + plt.savefig(save_path, dpi=300, bbox_inches='tight') + plt.close() + logger.info(f"Saved: {save_path}") + + +def plot_summary_barplot(all_consistency, all_alignment, model_type, save_path): + available_scales = [s for s in SCALE_ORDER if s in all_consistency] + if not available_scales: + return + + sample_cons = all_consistency[available_scales[0]] + max_layer = max(l for (_, l) in sample_cons.keys()) + + fig, axes = plt.subplots(1, 2, figsize=(16, 6)) + + ax = axes[0] + x = np.arange(len(GROUP_ORDER)) + width = 0.8 / len(available_scales) + for i, scale in enumerate(available_scales): + cons = all_consistency[scale] + vals = [cons.get((g, max_layer), {}).get('mean', 0) for g in GROUP_ORDER] + offset = (i - len(available_scales) / 2 + 0.5) * width + ax.bar(x + offset, vals, width, + label=SCALE_DISPLAY_NAMES.get(scale, scale), + color=SCALE_COLORS.get(scale, 'gray')) + ax.set_xticks(x) + ax.set_xticklabels(GROUP_ORDER) + ax.set_ylabel('Sign-Corrected Consistency') + ax.set_title(f'Consistency at Layer {max_layer}', fontweight='bold') + ax.legend(fontsize=8) + ax.grid(True, alpha=0.3, axis='y') + + ax = axes[1] + available_align = [s for s in available_scales if s in all_alignment] + if available_align: + vals = [all_alignment[s].get(max_layer, {}).get('per_sample_mean', 0) for s in available_align] + colors = [SCALE_COLORS.get(s, 'gray') for s in available_align] + ax.bar(range(len(vals)), vals, color=colors) + ax.set_xticks(range(len(vals))) + ax.set_xticklabels([SCALE_DISPLAY_NAMES.get(s, s) for s in available_align]) + ax.set_ylabel('cos(d_vert, d_dist)') + ax.set_title(f'Cross-Group Alignment at L{max_layer}\n(Lower=disentangled)', fontweight='bold') + ax.grid(True, alpha=0.3, axis='y') + + fig.suptitle(f'{model_type.upper()} - Summary at Deepest Layer', fontsize=15, fontweight='bold', y=1.02) + plt.tight_layout() + plt.savefig(save_path, dpi=300, bbox_inches='tight') + plt.close() + logger.info(f"Saved: {save_path}") + + +# ============================================================================ +# Main Pipeline +# ============================================================================ + +def process_scale(args, scale, swap_pairs, quads): + # Resolve model path from the correct config dict + if args.model_type in MODEL_CONFIGS_NEW: + cls_name, model_path = MODEL_CONFIGS_NEW[args.model_type][scale] + else: + model_path = MODEL_CONFIGS[args.model_type][scale] + cls_name = None + + logger.info(f"\n{'='*60}") + logger.info(f"Processing {args.model_type} - {scale}" + + (f" [{cls_name}]" if cls_name else "")) + logger.info(f"Model path: {model_path}") + logger.info(f"{'='*60}") + + extractor = get_extractor(args.model_type, model_path, scale=scale, device=args.device) + target_layers = extractor.target_layers + + output_dir = os.path.join(args.output_dir, args.model_type) + plots_dir = os.path.join(output_dir, 'plots') + os.makedirs(plots_dir, exist_ok=True) + + # Phase A: Extract swap pair features + logger.info("\n--- Phase A: Extracting swap pair features ---") + swap_records = extract_swap_features(extractor, swap_pairs, + max_samples_per_category=args.max_samples_per_category) + + # Phase B: Extract cross-group features + logger.info("\n--- Phase B: Extracting cross-group features ---") + quad_records = extract_cross_group_features(extractor, quads) if quads else [] + + # Phase C: Analysis + + # Fix 8: Category validity check + logger.info("\n--- Phase C: Analysis ---") + category_validity = check_category_validity(swap_records, scale) + unreliable_cats = [c for c, v in category_validity.items() if not v['reliable']] + if unreliable_cats: + logger.warning(f" Unreliable categories: {unreliable_cats}") + + # Fix 5: Two types of consistency (all pairs) + within_cat_all, sign_corrected_all = compute_delta_consistency(swap_records, target_layers) + + # Fix 8: Both-correct filtered consistency + both_correct_records = filter_both_correct(swap_records) + logger.info(f" Both-correct pairs: {len(both_correct_records)}/{len(swap_records)}") + within_cat_bc, sign_corrected_bc = compute_delta_consistency(both_correct_records, target_layers) + + # Cross-group alignment + cross_alignment = compute_cross_group_alignment(quad_records, target_layers) + pred_stats = compute_prediction_stats(swap_records, scale) + + # Fix 7: Delta-based heatmaps (for all layers) + delta_heatmaps_all = {} + delta_heatmaps_bc = {} + for layer in target_layers: + delta_heatmaps_all[layer] = compute_delta_similarity_matrix(swap_records, layer) + if both_correct_records: + delta_heatmaps_bc[layer] = compute_delta_similarity_matrix(both_correct_records, layer) + + # Log key results + max_layer = max(target_layers) + for group in GROUP_ORDER: + key = (group, max_layer) + if key in sign_corrected_all: + logger.info(f" Sign-corrected [{group}, L{max_layer}]: " + f"{sign_corrected_all[key]['mean']:.4f} +/- {sign_corrected_all[key]['std']:.4f}") + if max_layer in cross_alignment: + ca = cross_alignment[max_layer] + logger.info(f" Cross-group alignment L{max_layer}: " + f"{ca['per_sample_mean']:.4f} (perm={ca['permutation_mean']:.4f})") + logger.info(f" Accuracy orig={pred_stats['overall_acc_orig']:.1%}, " + f"swap={pred_stats['overall_acc_swap']:.1%}, " + f"both={pred_stats['overall_acc_both']:.1%}") + + # Phase D: Save results (both all_pairs and both_correct) + logger.info("\n--- Phase D: Saving results ---") + + # Save vectors NPZ ONCE with all records + correctness metadata + save_vectors_npz(scale, swap_records, quad_records, target_layers, output_dir) + + save_scale_results( + scale, swap_records, quad_records, + within_cat_all, sign_corrected_all, + cross_alignment, pred_stats, target_layers, + category_validity, delta_heatmaps_all, + output_dir, both_correct_tag='all_pairs', + ) + if both_correct_records: + save_scale_results( + scale, both_correct_records, quad_records, + within_cat_bc, sign_corrected_bc, + cross_alignment, pred_stats, target_layers, + category_validity, delta_heatmaps_bc, + output_dir, both_correct_tag='both_correct', + ) + + # Phase E: Per-scale plots (generate into separate subdirs) + logger.info("\n--- Phase E: Per-scale plots ---") + + for condition, wc_data, sc_data in [ + ('all', within_cat_all, sign_corrected_all), + ('both_correct', within_cat_bc, sign_corrected_bc), + ]: + if condition == 'both_correct' and not both_correct_records: + continue + + cond_dir = os.path.join(plots_dir, condition) + os.makedirs(cond_dir, exist_ok=True) + + wc_dir = os.path.join(cond_dir, 'within_cat_consistency') + sc_dir = os.path.join(cond_dir, 'sign_corrected') + ca_dir = os.path.join(cond_dir, 'cross_alignment') + os.makedirs(wc_dir, exist_ok=True) + os.makedirs(sc_dir, exist_ok=True) + os.makedirs(ca_dir, exist_ok=True) + + # Within-category consistency + plot_within_cat_consistency_trajectory( + wc_data, scale, args.model_type, + os.path.join(wc_dir, f'within_cat_consistency_{scale}.png')) + + # Sign-corrected consistency + plot_sign_corrected_consistency_trajectory( + sc_data, scale, args.model_type, + os.path.join(sc_dir, f'sign_corrected_consistency_{scale}.png')) + + # Cross-group alignment + if cross_alignment: + plot_cross_group_alignment_trajectory( + cross_alignment, scale, args.model_type, + os.path.join(ca_dir, f'cross_alignment_{scale}.png')) + + # PCA (from full NPZ) — 2D and 3D, all-pairs and both-correct + npz_path = os.path.join(output_dir, 'npz', f'vectors_{scale}.npz') + if os.path.exists(npz_path): + pca_dir = os.path.join(plots_dir, 'all', 'pca') + pca_3d_dir = os.path.join(plots_dir, 'all', 'pca_3d') + bc_pca_dir = os.path.join(plots_dir, 'both_correct', 'pca') + bc_pca_3d_dir = os.path.join(plots_dir, 'both_correct', 'pca_3d') + for d in (pca_dir, pca_3d_dir, bc_pca_dir, bc_pca_3d_dir): + os.makedirs(d, exist_ok=True) + plot_pca_embeddings(npz_path, scale, args.model_type, pca_dir) + plot_pca_3d(npz_path, scale, args.model_type, pca_3d_dir) + plot_pca_embeddings(npz_path, scale, args.model_type, bc_pca_dir, bc_only=True) + plot_pca_3d(npz_path, scale, args.model_type, bc_pca_3d_dir, bc_only=True) + + # Prediction stats bar (per-scale) + if pred_stats: + pred_plot_dir = os.path.join(plots_dir, 'all', 'pred_stats') + os.makedirs(pred_plot_dir, exist_ok=True) + plot_pred_stats_bars([pred_stats], args.model_type, + os.path.join(pred_plot_dir, f'pred_stats_{scale}.png')) + + # Cleanup + del swap_records, quad_records, both_correct_records + extractor.cleanup() + + logger.info(f"\n Scale {scale} complete.") + + +# ============================================================================ +# Accuracy Chart (integrated from accuracy_chart.py) +# ============================================================================ + +def _acc_plot_group_bars(pred_stats, model_type, ax_list): + available = [s for s in SCALE_ORDER if any(d['scale'] == s for d in pred_stats)] + x = np.arange(3) + width = 0.8 / max(len(available), 1) + for idx, group in enumerate(GROUP_ORDER): + ax = ax_list[idx] + for i, scale in enumerate(available): + entry = next((d for d in pred_stats if d['scale'] == scale), None) + if entry is None: + continue + vals = [entry.get(f'{group}_acc_orig', 0), + entry.get(f'{group}_acc_swap', 0), + entry.get(f'{group}_acc_both', 0)] + offset = (i - len(available) / 2 + 0.5) * width + ax.bar(x + offset, vals, width, label=scale, + color=SCALE_COLORS.get(scale, 'gray'), alpha=0.85) + ax.set_xticks(x) + ax.set_xticklabels(['orig', 'swap', 'both'], fontsize=10) + ax.set_ylabel('Accuracy', fontsize=9) + ax.set_title(group.capitalize(), fontweight='bold', fontsize=11, + color=GROUP_COLORS.get(group, 'black')) + ax.legend(fontsize=7, ncol=2) + ax.set_ylim(0, 1.15) + ax.axhline(y=0.5, color='gray', linestyle='--', alpha=0.5, linewidth=1) + ax.grid(True, alpha=0.3, axis='y') + + +def _acc_plot_both_trajectory(pred_stats, model_type, ax): + available = [s for s in SCALE_ORDER if any(d['scale'] == s for d in pred_stats)] + x_ticks = range(len(available)) + for group in GROUP_ORDER: + y_vals = [next((d for d in pred_stats if d['scale'] == s), {}).get(f'{group}_acc_both', 0) + for s in available] + ax.plot(x_ticks, y_vals, '-o', color=GROUP_COLORS.get(group, 'gray'), + label=group, linewidth=2.5, markersize=7) + y_overall = [next((d for d in pred_stats if d['scale'] == s), {}).get('overall_acc_both', 0) + for s in available] + ax.plot(x_ticks, y_overall, '--s', color='black', label='overall', + linewidth=2, markersize=6, alpha=0.7) + ax.set_xticks(list(x_ticks)) + ax.set_xticklabels(available, fontsize=9) + ax.set_xlabel('Scale', fontsize=9) + ax.set_ylabel('Accuracy (both correct)', fontsize=9) + ax.set_title('Both-Correct Accuracy Trajectory', fontweight='bold', fontsize=11) + ax.legend(fontsize=9) + ax.set_ylim(0, 1.05) + ax.axhline(y=0.5, color='gray', linestyle='--', alpha=0.5, linewidth=1) + ax.grid(True, alpha=0.3) + + +def _acc_plot_overall_trajectory(pred_stats, model_type, ax): + available = [s for s in SCALE_ORDER if any(d['scale'] == s for d in pred_stats)] + x_ticks = range(len(available)) + for metric, label, ls in [ + ('overall_acc_orig', 'orig', '-o'), + ('overall_acc_swap', 'swap', '-s'), + ('overall_acc_both', 'both', '-^'), + ]: + y_vals = [next((d for d in pred_stats if d['scale'] == s), {}).get(metric, 0) + for s in available] + ax.plot(x_ticks, y_vals, ls, label=label, linewidth=2.2, markersize=6) + ax.set_xticks(list(x_ticks)) + ax.set_xticklabels(available, fontsize=9) + ax.set_xlabel('Scale', fontsize=9) + ax.set_ylabel('Overall Accuracy', fontsize=9) + ax.set_title('Overall Accuracy Trajectory', fontweight='bold', fontsize=11) + ax.legend(fontsize=9) + ax.set_ylim(0, 1.05) + ax.axhline(y=0.5, color='gray', linestyle='--', alpha=0.5, linewidth=1) + ax.grid(True, alpha=0.3) + + +def _acc_plot_category_accuracy(cat_validity, model_type, ax_orig, ax_swap, pred_stats=None): + available = [s for s in SCALE_ORDER if s in cat_validity] + cats_with_overall = CATEGORY_ORDER + ['overall'] + x = np.arange(len(cats_with_overall)) + width = 0.8 / max(len(available), 1) + overall_key = {'acc_orig': 'overall_acc_orig', 'acc_swap': 'overall_acc_swap'} + for ax, metric, title in [ + (ax_orig, 'acc_orig', 'Per-Category Accuracy (orig)'), + (ax_swap, 'acc_swap', 'Per-Category Accuracy (swap)'), + ]: + for i, scale in enumerate(available): + cv = cat_validity[scale] + vals = [cv.get(cat, {}).get(metric, 0) for cat in CATEGORY_ORDER] + if pred_stats is not None: + entry = next((d for d in pred_stats if d['scale'] == scale), None) + vals.append(entry.get(overall_key[metric], 0) if entry else 0) + else: + vals.append(0) + offset = (i - len(available) / 2 + 0.5) * width + ax.bar(x + offset, vals, width, label=scale, + color=SCALE_COLORS.get(scale, 'gray'), alpha=0.85) + for j, cat in enumerate(CATEGORY_ORDER): + ax.axvspan(j - 0.45, j + 0.45, color=CAT_COLORS.get(cat, 'gray'), alpha=0.06, linewidth=0) + ax.axvline(x=len(CATEGORY_ORDER) - 0.5, color='black', linewidth=1.2, linestyle=':', alpha=0.6) + ax.set_xticks(x) + ax.set_xticklabels(cats_with_overall, fontsize=9, rotation=15) + ax.set_ylabel('Accuracy', fontsize=9) + ax.set_title(title, fontweight='bold', fontsize=11) + ax.legend(fontsize=7, ncol=2) + ax.set_ylim(0, 1.15) + ax.axhline(y=0.5, color='gray', linestyle='--', alpha=0.5, linewidth=1) + ax.grid(True, alpha=0.3, axis='y') + if available: + last_cv = cat_validity[available[-1]] + for j, cat in enumerate(CATEGORY_ORDER): + if not last_cv.get(cat, {}).get('reliable', True): + ax.text(j, 1.08, '✗', ha='center', va='center', + fontsize=9, color='red', fontweight='bold') + + +def _acc_plot_category_per_scale(cat_validity, model_type, save_dir, pred_stats=None): + cats_with_overall = CATEGORY_ORDER + ['overall'] + overall_key = {'acc_orig': 'overall_acc_orig', 'acc_swap': 'overall_acc_swap'} + for scale in sorted(cat_validity.keys(), + key=lambda s: SCALE_ORDER.index(s) if s in SCALE_ORDER else 99): + cv = cat_validity[scale] + ps_entry = next((d for d in pred_stats if d['scale'] == scale), None) if pred_stats else None + fig, axes = plt.subplots(1, 2, figsize=(16, 5)) + x = np.arange(len(cats_with_overall)) + width = 0.55 + for ax, metric, title in [ + (axes[0], 'acc_orig', f'acc_orig ({scale})'), + (axes[1], 'acc_swap', f'acc_swap ({scale})'), + ]: + vals = [cv.get(cat, {}).get(metric, 0) for cat in CATEGORY_ORDER] + overall_val = ps_entry.get(overall_key[metric], 0) if ps_entry else 0 + vals.append(overall_val) + colors = [CAT_COLORS.get(cat, 'gray') for cat in CATEGORY_ORDER] + ['#333333'] + bars = ax.bar(x, vals, width, color=colors, alpha=0.85, edgecolor='white') + ax.axvline(x=len(CATEGORY_ORDER) - 0.5, color='black', + linewidth=1.2, linestyle=':', alpha=0.6) + ax.set_xticks(x) + ax.set_xticklabels(cats_with_overall, fontsize=10) + ax.set_ylabel('Accuracy', fontsize=10) + ax.set_title(title, fontweight='bold', fontsize=12) + ax.set_ylim(0, 1.15) + ax.axhline(y=0.5, color='gray', linestyle='--', alpha=0.5) + ax.grid(True, alpha=0.3, axis='y') + for bar, cat in zip(bars, cats_with_overall): + reliable = cv.get(cat, {}).get('reliable', True) if cat != 'overall' else True + h = bar.get_height() + ax.text(bar.get_x() + bar.get_width() / 2, h + 0.02, + f'{h:.2f}' + ('' if reliable else ' ✗'), + ha='center', va='bottom', fontsize=8, + color='red' if not reliable else 'black') + fig.suptitle(f'{model_type.upper()} - Category Accuracy ({scale})', + fontsize=13, fontweight='bold') + plt.tight_layout() + out = os.path.join(save_dir, f'category_accuracy_{scale}.png') + plt.savefig(out, dpi=200, bbox_inches='tight') + plt.close() + logger.info(f"Saved: {out}") + + +def run_accuracy_charts(pred_stats, cat_validity, model_type, save_dir): + """Generate all accuracy chart plots into save_dir.""" + os.makedirs(save_dir, exist_ok=True) + + # Group bars + fig, axes = plt.subplots(1, 3, figsize=(21, 6)) + _acc_plot_group_bars(pred_stats, model_type, axes) + fig.suptitle(f'{model_type.upper()} - Prediction Accuracy by Group', + fontsize=15, fontweight='bold') + plt.tight_layout() + plt.savefig(os.path.join(save_dir, 'accuracy_group_bars.png'), dpi=200, bbox_inches='tight') + plt.close() + logger.info(f"Saved: {os.path.join(save_dir, 'accuracy_group_bars.png')}") + + # Trajectory + fig, axes = plt.subplots(1, 2, figsize=(16, 6)) + _acc_plot_both_trajectory(pred_stats, model_type, axes[0]) + _acc_plot_overall_trajectory(pred_stats, model_type, axes[1]) + fig.suptitle(f'{model_type.upper()} - Accuracy Trajectory Across Scales', + fontsize=14, fontweight='bold') + plt.tight_layout() + plt.savefig(os.path.join(save_dir, 'accuracy_trajectory.png'), dpi=200, bbox_inches='tight') + plt.close() + logger.info(f"Saved: {os.path.join(save_dir, 'accuracy_trajectory.png')}") + + if cat_validity: + # Category bars (all scales overlay) + fig, axes = plt.subplots(1, 2, figsize=(20, 6)) + _acc_plot_category_accuracy(cat_validity, model_type, axes[0], axes[1], + pred_stats=pred_stats) + fig.suptitle(f'{model_type.upper()} - Per-Category Accuracy Across Scales', + fontsize=14, fontweight='bold') + plt.tight_layout() + plt.savefig(os.path.join(save_dir, 'accuracy_category.png'), dpi=200, bbox_inches='tight') + plt.close() + logger.info(f"Saved: {os.path.join(save_dir, 'accuracy_category.png')}") + + # Per-scale category bars + _acc_plot_category_per_scale(cat_validity, model_type, save_dir, pred_stats=pred_stats) + + # Combined accuracy_chart.png + fig = plt.figure(figsize=(24, 14)) + ax_h = fig.add_subplot(3, 3, 1) + ax_v = fig.add_subplot(3, 3, 2) + ax_d = fig.add_subplot(3, 3, 3) + _acc_plot_group_bars(pred_stats, model_type, [ax_h, ax_v, ax_d]) + ax_tb = fig.add_subplot(3, 3, 4) + ax_to = fig.add_subplot(3, 3, 5) + _acc_plot_both_trajectory(pred_stats, model_type, ax_tb) + _acc_plot_overall_trajectory(pred_stats, model_type, ax_to) + ax_note = fig.add_subplot(3, 3, 6) + ax_note.axis('off') + available_scales = [s for s in SCALE_ORDER if any(d['scale'] == s for d in pred_stats)] + ax_note.text(0.1, 0.6, + f'Scales: {", ".join(available_scales)}\n\n✗ = unreliable category\n-- = 0.5 chance level', + transform=ax_note.transAxes, fontsize=11, va='top', family='monospace') + if cat_validity: + ax_co = fig.add_subplot(3, 2, 5) + ax_cs = fig.add_subplot(3, 2, 6) + _acc_plot_category_accuracy(cat_validity, model_type, ax_co, ax_cs, pred_stats=pred_stats) + fig.suptitle(f'{model_type.upper()} — Accuracy Summary', + fontsize=17, fontweight='bold', y=1.01) + plt.tight_layout() + plt.savefig(os.path.join(save_dir, 'accuracy_chart.png'), dpi=200, bbox_inches='tight') + plt.close() + logger.info(f"Saved: {os.path.join(save_dir, 'accuracy_chart.png')}") + + +# ============================================================================ +# Unify Consistency Y-axis (integrated from unify_consistency_ylim.py) +# ============================================================================ + +def _ylim_compute(all_vals, margin_ratio=0.08): + if not all_vals: + return -1, 1 + ymin, ymax = min(all_vals), max(all_vals) + margin = (ymax - ymin) * margin_ratio + return ymin - margin, ymax + margin + + +def _ylim_load_keyed_json(path): + if not os.path.exists(path): + return None + with open(path) as f: + raw = json.load(f) + if not raw: + return None + result = {} + for key, vals in raw.items(): + parts = key.rsplit('_L', 1) + if len(parts) == 2: + result[(parts[0], int(parts[1]))] = vals + return result if result else None + + +def _ylim_load_alignment_json(path): + if not os.path.exists(path): + return None + with open(path) as f: + raw = json.load(f) + if not raw: + return None + result = {int(k[1:]): v for k, v in raw.items() if k.startswith('L')} + return result if result else None + + +def _ylim_plot_sign_corrected(data, scale, model_type, save_path, ylim): + fig, ax = plt.subplots(figsize=(12, 6)) + for group in GROUP_ORDER: + layers, vals = [], [] + for (g, l), v in sorted(data.items(), key=lambda x: x[0][1]): + if g == group: + layers.append(l) + vals.append(v['mean']) + if layers: + ax.plot(layers, vals, '-o', color=GROUP_COLORS[group], + label=group, linewidth=2, markersize=3) + ax.set_ylim(ylim) + ax.set_xlabel('Layer Index') + ax.set_ylabel('Sign-Corrected Consistency') + ax.set_title(f'{model_type.upper()} ({scale}) - Sign-Corrected Group Consistency', + fontweight='bold') + ax.legend(fontsize=11) + ax.grid(True, alpha=0.3) + plt.tight_layout() + plt.savefig(save_path, dpi=300, bbox_inches='tight') + plt.close() + + +def _ylim_plot_within_cat(data, scale, model_type, save_path, ylim): + fig, ax = plt.subplots(figsize=(12, 6)) + for cat in CATEGORY_ORDER: + layers, vals = [], [] + for (c, l), v in sorted(data.items(), key=lambda x: x[0][1]): + if c == cat: + layers.append(l) + vals.append(v['mean']) + if layers: + ax.plot(layers, vals, '-o', color=CAT_COLORS[cat], + label=cat, linewidth=2, markersize=3) + ax.set_ylim(ylim) + ax.set_xlabel('Layer Index') + ax.set_ylabel('Within-Category Consistency') + ax.set_title(f'{model_type.upper()} ({scale}) - Within-Category Delta Consistency', + fontweight='bold') + ax.legend(fontsize=9) + ax.grid(True, alpha=0.3) + plt.tight_layout() + plt.savefig(save_path, dpi=300, bbox_inches='tight') + plt.close() + + +def _ylim_plot_cross_alignment(data, scale, model_type, save_path, ylim): + fig, ax = plt.subplots(figsize=(12, 6)) + layers = sorted(data.keys()) + ax.plot(layers, [data[l]['per_sample_mean'] for l in layers], '-o', color='#d62728', + label='cos(d_vert, d_dist) per-sample mean', linewidth=2.5, markersize=3) + ax.plot(layers, [data[l]['mean_delta_alignment'] for l in layers], '--s', color='#e377c2', + label='cos(mean_d_vert, mean_d_dist)', linewidth=1.5, markersize=3) + perm_mean = [data[l]['permutation_mean'] for l in layers] + perm_std = [data[l]['permutation_std'] for l in layers] + ax.plot(layers, perm_mean, ':', color='gray', label='permutation control', linewidth=1.5) + ax.fill_between(layers, + [m - 2*s for m, s in zip(perm_mean, perm_std)], + [m + 2*s for m, s in zip(perm_mean, perm_std)], + alpha=0.2, color='gray') + ax.set_ylim(ylim) + ax.set_xlabel('Layer Index') + ax.set_ylabel('Cosine Alignment') + ax.set_title(f'{model_type.upper()} ({scale}) - Cross-Group Alignment (Perspective Bias)', + fontweight='bold') + ax.legend(fontsize=9) + ax.grid(True, alpha=0.3) + plt.tight_layout() + plt.savefig(save_path, dpi=300, bbox_inches='tight') + plt.close() + + +def _ylim_process_plot_type(data_dir, plots_dir, conditions, model_type, + plot_name, json_pattern, loader, val_gatherer, plotter, + subfolder=None): + """Re-plot one plot type across all conditions with a unified y-axis.""" + logger.info(f" [unify ylim] {plot_name}") + for condition, condition_tag in conditions: + cond_plot_dir = os.path.join(plots_dir, condition) + if not os.path.isdir(cond_plot_dir): + continue + save_dir = os.path.join(cond_plot_dir, subfolder) if subfolder else cond_plot_dir + os.makedirs(save_dir, exist_ok=True) + all_data = {} + for scale in SCALE_ORDER: + path = os.path.join(data_dir, 'json', + json_pattern.format(scale=scale, tag=condition_tag)) + loaded = loader(path) + if loaded: + all_data[scale] = loaded + if not all_data: + continue + all_vals = val_gatherer(all_data) + ylim = _ylim_compute(all_vals) + for scale, data in all_data.items(): + save_path = os.path.join(save_dir, f'{plot_name}_{scale}.png') + plotter(data, scale, model_type, save_path, ylim) + logger.info(f" {condition}: y=[{ylim[0]:.4f}, {ylim[1]:.4f}], {len(all_data)} scales") + + +def run_unify_ylim(data_dir, plots_dir, model_type): + """Unify y-axis for sign_corrected, within_cat, and cross_alignment plots.""" + conditions = [ + ('all', 'all_pairs'), + ('both_correct', 'both_correct'), + ] + + def gather_keyed(all_data): + return [v['mean'] for data in all_data.values() for v in data.values()] + + def gather_alignment(all_data): + vals = [] + for data in all_data.values(): + for v in data.values(): + vals += [v['per_sample_mean'], v['mean_delta_alignment'], + v['permutation_mean'] + 2 * v['permutation_std'], + v['permutation_mean'] - 2 * v['permutation_std']] + return vals + + _ylim_process_plot_type( + data_dir, plots_dir, conditions, model_type, + plot_name='sign_corrected_consistency', + json_pattern='sign_corrected_consistency_{scale}_{tag}.json', + loader=_ylim_load_keyed_json, + val_gatherer=gather_keyed, + plotter=_ylim_plot_sign_corrected, + subfolder='sign_corrected', + ) + _ylim_process_plot_type( + data_dir, plots_dir, conditions, model_type, + plot_name='within_cat_consistency', + json_pattern='within_cat_consistency_{scale}_{tag}.json', + loader=_ylim_load_keyed_json, + val_gatherer=gather_keyed, + plotter=_ylim_plot_within_cat, + subfolder='within_cat_consistency', + ) + _ylim_process_plot_type( + data_dir, plots_dir, conditions, model_type, + plot_name='cross_alignment', + json_pattern='cross_alignment_{scale}.json', + loader=_ylim_load_alignment_json, + val_gatherer=gather_alignment, + plotter=_ylim_plot_cross_alignment, + subfolder='cross_alignment', + ) + + +def _check_merge_only_sources(output_dir: str, model_type: str) -> bool: + """Verify required source directories have data for a merge-only model_type. + Returns True if all sources look healthy, False (with warnings) if not. + """ + mc = MERGE_ONLY_CONFIGS[model_type] + ok = True + for req_dir in mc['required_dirs']: + src_path = os.path.join(output_dir, req_dir) + json_dir = os.path.join(src_path, 'json') + if not os.path.isdir(src_path): + if req_dir in MODEL_CONFIGS_NEW: + hint = f"python swap_analysis.py --model_type {req_dir}" + else: + hint = f"python swap_analysis.py --model_type {req_dir}" + logger.warning( + f"[{model_type}] Required source directory not found: {src_path}\n" + f" → Run inference first: {hint}" + ) + ok = False + elif not os.path.isdir(json_dir) or not any( + f.startswith('pred_stats_') for f in os.listdir(json_dir) + ): + logger.warning( + f"[{model_type}] Source directory exists but has no pred_stats JSON: {json_dir}\n" + f" → Inference may not have completed for '{req_dir}'." + ) + ok = False + else: + scales_found = [ + f.replace('pred_stats_', '').replace('.json', '') + for f in os.listdir(json_dir) + if f.startswith('pred_stats_') + ] + logger.info(f" [{req_dir}] found scales: {scales_found}") + return ok + + +def _load_scale_data_multi(output_dir: str, model_type: str, scale: str, scale_sources: dict): + """Load per-scale data for one scale, looking in the correct source directory. + + Returns (sc, sc_bc, wc, wc_bc, align, pred_stat, cat_validity, dh, dh_bc). + Any unavailable item is None / {}. + """ + src_dir = os.path.join(output_dir, scale_sources.get(scale, model_type)) + + sc = load_scale_consistency(src_dir, scale, 'all_pairs') + sc_bc = load_scale_consistency(src_dir, scale, 'both_correct') + wc = load_within_cat_consistency(src_dir, scale, 'all_pairs') + wc_bc = load_within_cat_consistency(src_dir, scale, 'both_correct') + align = load_scale_alignment(src_dir, scale) + + pred_stat = None + pred_path = os.path.join(src_dir, 'json', f'pred_stats_{scale}.json') + if os.path.exists(pred_path): + with open(pred_path) as f: + pred_stat = json.load(f) + + cat_validity = None + cv_path = os.path.join(src_dir, 'json', f'category_validity_{scale}.json') + if os.path.exists(cv_path): + with open(cv_path) as f: + cat_validity = json.load(f) + + dh = load_delta_heatmaps(src_dir, scale, 'all_pairs') + dh_bc = load_delta_heatmaps(src_dir, scale, 'both_correct') + + return sc, sc_bc, wc, wc_bc, align, pred_stat, cat_validity, dh, dh_bc + + +# --------------------------------------------------------------------------- +# All-layer heatmap + PCA helpers (called from run_merge / run_merge_extended) +# --------------------------------------------------------------------------- + +def _get_csv_layers(csv_dir: str, scale: str, tag: str) -> list: + """Return sorted list of layer indices that have a delta_similarity CSV.""" + import glob as _glob + pattern = os.path.join(csv_dir, f'delta_similarity_{scale}_L*_{tag}.csv') + layers = [] + for fpath in _glob.glob(pattern): + m = re.search( + rf'delta_similarity_{re.escape(scale)}_L(\d+)_{re.escape(tag)}\.csv$', + os.path.basename(fpath)) + if m: + layers.append(int(m.group(1))) + return sorted(layers) + + +def run_all_layer_heatmaps(model_dir: str, model_type: str, scales: list): + """Generate delta-similarity heatmaps for ALL layers from pre-computed CSVs. + + Reads {model_dir}/csv/delta_similarity_{scale}_L{n}_{tag}.csv + Writes {model_dir}/plots/all/heatmap/heatmap_{scale}_L{n}.png (all_pairs) + {model_dir}/plots/both_correct/heatmap/heatmap_{scale}_L{n}.png (both_correct) + + Skips a scale if the NPZ is missing or any all_pairs CSV is absent + (indicates inference was not fully completed for that scale). + """ + TAG_TO_DIR = { + 'all_pairs': os.path.join(model_dir, 'plots', 'all', 'heatmap'), + 'both_correct': os.path.join(model_dir, 'plots', 'both_correct', 'heatmap'), + } + + for scale in scales: + npz_path = os.path.join(model_dir, 'npz', f'vectors_{scale}.npz') + csv_dir = os.path.join(model_dir, 'csv') + + if not os.path.exists(npz_path): + logger.warning(f' [{model_type}/{scale}] NPZ not found, skipping heatmaps.') + continue + + data = np.load(npz_path, allow_pickle=True) + npz_layers = sorted( + int(k.replace('orig_L', '')) + for k in data.files if k.startswith('orig_L') + ) + data.close() + + if not npz_layers: + logger.warning(f' [{model_type}/{scale}] No orig_L* keys in NPZ, skipping heatmaps.') + continue + + csv_layers = _get_csv_layers(csv_dir, scale, 'all_pairs') + missing = set(npz_layers) - set(csv_layers) + if missing: + logger.warning( + f' [{model_type}/{scale}] {len(missing)} NPZ layers lack CSVs ' + f'(e.g. L{sorted(missing)[:5]}). Skipping all-layer heatmaps.') + continue + + for out_dir in TAG_TO_DIR.values(): + os.makedirs(out_dir, exist_ok=True) + + logger.info(f' [{model_type}/{scale}] Generating heatmaps for {len(npz_layers)} layers...') + saved = 0 + for layer in npz_layers: + for tag, out_dir in TAG_TO_DIR.items(): + csv_path = os.path.join(csv_dir, f'delta_similarity_{scale}_L{layer}_{tag}.csv') + if not os.path.exists(csv_path): + continue # both_correct CSV may be absent for some layers + df = pd.read_csv(csv_path, index_col=0) + available = [c for c in CATEGORY_ORDER if c in df.index] + if not available: + continue + df = df.loc[available, available] + title = ( + f'{model_type.upper()} ({scale}) \u2014 Delta Heatmap L{layer} ' + f'({"both-correct" if tag == "both_correct" else "all pairs"})' + ) + out_path = os.path.join(out_dir, f'heatmap_{scale}_L{layer}.png') + plot_delta_heatmap(df, title, out_path) + saved += 1 + logger.info(f' [{model_type}/{scale}] Saved {saved} heatmaps') + + +def run_all_layer_pca(model_dir: str, model_type: str, scales: list): + """Generate 2D and 3D PCA plots for ALL layers from saved NPZ files. + + Writes {model_dir}/plots/all/pca/pca_{scale}_L{n}.png (all pairs) + {model_dir}/plots/all/pca_3d/pca_{scale}_L{n}.png + {model_dir}/plots/both_correct/pca/pca_{scale}_L{n}.png (both-correct only) + {model_dir}/plots/both_correct/pca_3d/pca_{scale}_L{n}.png + """ + for scale in scales: + npz_path = os.path.join(model_dir, 'npz', f'vectors_{scale}.npz') + if not os.path.exists(npz_path): + logger.warning(f' [{model_type}/{scale}] NPZ not found, skipping PCA.') + continue + + # All-pairs PCA + pca_2d_dir = os.path.join(model_dir, 'plots', 'all', 'pca') + pca_3d_dir = os.path.join(model_dir, 'plots', 'all', 'pca_3d') + os.makedirs(pca_2d_dir, exist_ok=True) + os.makedirs(pca_3d_dir, exist_ok=True) + logger.info(f' [{model_type}/{scale}] Generating all-layer 2D PCA...') + plot_pca_embeddings(npz_path, scale, model_type, pca_2d_dir) + logger.info(f' [{model_type}/{scale}] Generating all-layer 3D PCA...') + plot_pca_3d(npz_path, scale, model_type, pca_3d_dir) + + # Both-correct PCA + bc_pca_2d_dir = os.path.join(model_dir, 'plots', 'both_correct', 'pca') + bc_pca_3d_dir = os.path.join(model_dir, 'plots', 'both_correct', 'pca_3d') + os.makedirs(bc_pca_2d_dir, exist_ok=True) + os.makedirs(bc_pca_3d_dir, exist_ok=True) + logger.info(f' [{model_type}/{scale}] Generating both-correct 2D PCA...') + plot_pca_embeddings(npz_path, scale, model_type, bc_pca_2d_dir, bc_only=True) + logger.info(f' [{model_type}/{scale}] Generating both-correct 3D PCA...') + plot_pca_3d(npz_path, scale, model_type, bc_pca_3d_dir, bc_only=True) + + +def run_merge(args): + # Per-scale data is always read from the standard results dir + data_dir = os.path.join(args.output_dir, args.model_type) + # Cross-scale plots go to merge_output_dir if specified, else same as data_dir + output_dir = args.merge_output_dir if args.merge_output_dir else data_dir + plots_dir = os.path.join(output_dir, 'plots') + os.makedirs(plots_dir, exist_ok=True) + + scale_order = ['vanilla', '80k', '400k', '800k', '2m', 'roborefer', '10pct', '20pct', '30pct'] + available_scales = [s for s in scale_order if s in args.scales] + + # Load per-scale results + all_sign_corrected = {} + all_sign_corrected_bc = {} + all_within_cat = {} + all_within_cat_bc = {} + all_alignment = {} + all_pred_stats = [] + all_cat_validity = {} + all_delta_heatmaps = {} + all_delta_heatmaps_bc = {} + + for scale in available_scales: + sc = load_scale_consistency(data_dir, scale, 'all_pairs') + if sc: + all_sign_corrected[scale] = sc + sc_bc = load_scale_consistency(data_dir, scale, 'both_correct') + if sc_bc: + all_sign_corrected_bc[scale] = sc_bc + wc = load_within_cat_consistency(data_dir, scale, 'all_pairs') + if wc: + all_within_cat[scale] = wc + wc_bc = load_within_cat_consistency(data_dir, scale, 'both_correct') + if wc_bc: + all_within_cat_bc[scale] = wc_bc + align = load_scale_alignment(data_dir, scale) + if align: + all_alignment[scale] = align + pred_path = os.path.join(data_dir, 'json', f'pred_stats_{scale}.json') + if os.path.exists(pred_path): + with open(pred_path) as f: + all_pred_stats.append(json.load(f)) + cv_path = os.path.join(data_dir, 'json', f'category_validity_{scale}.json') + if os.path.exists(cv_path): + with open(cv_path) as f: + all_cat_validity[scale] = json.load(f) + dh = load_delta_heatmaps(data_dir, scale, 'all_pairs') + if dh: + all_delta_heatmaps[scale] = dh + dh_bc = load_delta_heatmaps(data_dir, scale, 'both_correct') + if dh_bc: + all_delta_heatmaps_bc[scale] = dh_bc + + logger.info(f" Loaded data for {scale}") + + # Generate cross-scale plots into condition subdirs + for condition, sc_data, wc_data, dh_data, tag_label in [ + ('all', all_sign_corrected, all_within_cat, all_delta_heatmaps, 'all pairs'), + ('both_correct', all_sign_corrected_bc, all_within_cat_bc, all_delta_heatmaps_bc, 'both-correct'), + ]: + cond_dir = os.path.join(plots_dir, condition) + sc_dir = os.path.join(cond_dir, 'sign_corrected') + wc_dir = os.path.join(cond_dir, 'within_cat_consistency') + dt_dir = os.path.join(cond_dir, 'delta_trajectory') + os.makedirs(sc_dir, exist_ok=True) + os.makedirs(wc_dir, exist_ok=True) + os.makedirs(dt_dir, exist_ok=True) + + if len(sc_data) > 1: + plot_cross_scale_consistency( + sc_data, args.model_type, + os.path.join(sc_dir, 'cross_scale_sign_corrected.png'), + title_prefix=f'Sign-Corrected ({tag_label})') + + if len(wc_data) > 1: + plot_cross_scale_within_cat_consistency( + wc_data, args.model_type, + os.path.join(wc_dir, 'cross_scale_within_cat.png')) + + if dh_data: + plot_delta_trajectory(dh_data, args.model_type, + os.path.join(dt_dir, 'delta_trajectory.png')) + + # Cross-scale alignment + pred stats + summary (shared across conditions) + all_cond_dir = os.path.join(plots_dir, 'all') + ca_dir = os.path.join(all_cond_dir, 'cross_alignment') + pred_stats_dir = os.path.join(all_cond_dir, 'pred_stats') + summary_dir = os.path.join(all_cond_dir, 'summary') + os.makedirs(ca_dir, exist_ok=True) + os.makedirs(pred_stats_dir, exist_ok=True) + os.makedirs(summary_dir, exist_ok=True) + + if len(all_alignment) > 1: + plot_cross_scale_alignment( + all_alignment, args.model_type, + os.path.join(ca_dir, 'cross_scale_alignment.png')) + + # Prediction stats plots + if all_pred_stats: + plot_pred_stats_bars(all_pred_stats, args.model_type, + os.path.join(pred_stats_dir, 'pred_stats_bars.png')) + plot_pred_stats_trajectory(all_pred_stats, args.model_type, + os.path.join(pred_stats_dir, 'pred_stats_trajectory.png')) + + # Summary barplot + if all_sign_corrected: + plot_summary_barplot( + all_sign_corrected, all_alignment, args.model_type, + os.path.join(summary_dir, 'summary_barplot.png')) + + # Summary CSV + summary_rows = [] + for scale in available_scales: + pred_path = os.path.join(data_dir, 'json', f'pred_stats_{scale}.json') + if os.path.exists(pred_path): + with open(pred_path) as f: + row = json.load(f) + if scale in all_alignment: + max_layer = max(all_alignment[scale].keys()) + row['alignment_deepest'] = all_alignment[scale][max_layer]['per_sample_mean'] + row['alignment_perm'] = all_alignment[scale][max_layer]['permutation_mean'] + summary_rows.append(row) + + if summary_rows: + csv_dir = os.path.join(output_dir, 'csv') + os.makedirs(csv_dir, exist_ok=True) + pd.DataFrame(summary_rows).to_csv(os.path.join(csv_dir, 'summary.csv'), index=False) + + # Accuracy charts + if all_pred_stats: + acc_dir = os.path.join(plots_dir, 'accuracy') + logger.info("\n--- Accuracy Charts ---") + run_accuracy_charts(all_pred_stats, all_cat_validity, args.model_type, acc_dir) + + # Unify y-axis across scales for per-scale trajectory plots + logger.info("\n--- Unifying Y-axis ---") + run_unify_ylim(data_dir, plots_dir, args.model_type) + + # All-layer heatmaps + PCA (from saved CSVs / NPZ) + logger.info("\n--- All-Layer Heatmaps ---") + run_all_layer_heatmaps(data_dir, args.model_type, available_scales) + + logger.info("\n--- All-Layer PCA ---") + run_all_layer_pca(data_dir, args.model_type, available_scales) + + logger.info(f"\n=== Merge Complete ===\nResults in: {output_dir}") + + +def run_merge_extended(args): + """Generate cross-scale plots for new / merge-only model_types. + + - Runnable types (molmo_big, qwen_big, qwen_super, big_trio): + loads all data from results/{model_type}/ and saves plots there. + - Merge-only types (molmo_all, qwen_all): + loads per-scale data from the respective source directories, + saves all cross-scale plots to results/{model_type}/. + """ + is_merge_only = args.model_type in MERGE_ONLY_CONFIGS + + # ── Determine scale order and data source strategy ──────────────────────── + if is_merge_only: + mc = MERGE_ONLY_CONFIGS[args.model_type] + scale_order = mc['scale_order'] + scale_sources = mc['scale_sources'] + + logger.info(f"\n=== MERGE-ONLY mode: {args.model_type} ===") + logger.info("Checking required source directories...") + sources_ok = _check_merge_only_sources(args.output_dir, args.model_type) + if not sources_ok: + logger.warning( + f"\n[WARNING] One or more source directories are missing or incomplete.\n" + f" Cross-scale plots for '{args.model_type}' may be partial.\n" + f" Run the missing model types first (see warnings above), then retry merge." + ) + else: + scale_order = SCALE_ORDERS_NEW.get( + args.model_type, list(MODEL_CONFIGS_NEW[args.model_type])) + scale_sources = None # all data lives in results/{model_type}/ + + available_scales = [s for s in scale_order if s in args.scales] + logger.info(f"Merging scales (in order): {available_scales}") + + out_dir = os.path.join(args.output_dir, args.model_type) + plots_dir = os.path.join(out_dir, 'plots') + os.makedirs(plots_dir, exist_ok=True) + + # ── Load per-scale data ─────────────────────────────────────────────────── + all_sign_corrected = {} + all_sign_corrected_bc = {} + all_within_cat = {} + all_within_cat_bc = {} + all_alignment = {} + all_pred_stats = [] + all_cat_validity = {} + all_delta_heatmaps = {} + all_delta_heatmaps_bc = {} + + for scale in available_scales: + if is_merge_only: + (sc, sc_bc, wc, wc_bc, align, + pred_stat, cat_validity, dh, dh_bc) = _load_scale_data_multi( + args.output_dir, args.model_type, scale, scale_sources) + else: + src_dir = out_dir + sc = load_scale_consistency(src_dir, scale, 'all_pairs') + sc_bc = load_scale_consistency(src_dir, scale, 'both_correct') + wc = load_within_cat_consistency(src_dir, scale, 'all_pairs') + wc_bc = load_within_cat_consistency(src_dir, scale, 'both_correct') + align = load_scale_alignment(src_dir, scale) + + pred_stat = None + pred_path = os.path.join(src_dir, 'json', f'pred_stats_{scale}.json') + if os.path.exists(pred_path): + with open(pred_path) as f: + pred_stat = json.load(f) + + cat_validity = None + cv_path = os.path.join(src_dir, 'json', f'category_validity_{scale}.json') + if os.path.exists(cv_path): + with open(cv_path) as f: + cat_validity = json.load(f) + + dh = load_delta_heatmaps(src_dir, scale, 'all_pairs') + dh_bc = load_delta_heatmaps(src_dir, scale, 'both_correct') + + if sc: + all_sign_corrected[scale] = sc + if sc_bc: + all_sign_corrected_bc[scale] = sc_bc + if wc: + all_within_cat[scale] = wc + if wc_bc: + all_within_cat_bc[scale] = wc_bc + if align: + all_alignment[scale] = align + if pred_stat is not None: + all_pred_stats.append(pred_stat) + if cat_validity is not None: + all_cat_validity[scale] = cat_validity + if dh: + all_delta_heatmaps[scale] = dh + if dh_bc: + all_delta_heatmaps_bc[scale] = dh_bc + + logger.info(f" Loaded data for '{scale}'" + + (f" (from '{scale_sources[scale]}')" if is_merge_only else "")) + + # ── Cross-scale plots ───────────────────────────────────────────────────── + for condition, sc_data, wc_data, dh_data, tag_label in [ + ('all', all_sign_corrected, all_within_cat, all_delta_heatmaps, 'all pairs'), + ('both_correct', all_sign_corrected_bc, all_within_cat_bc, all_delta_heatmaps_bc, 'both-correct'), + ]: + cond_dir = os.path.join(plots_dir, condition) + sc_dir = os.path.join(cond_dir, 'sign_corrected') + wc_dir = os.path.join(cond_dir, 'within_cat_consistency') + dt_dir = os.path.join(cond_dir, 'delta_trajectory') + os.makedirs(sc_dir, exist_ok=True) + os.makedirs(wc_dir, exist_ok=True) + os.makedirs(dt_dir, exist_ok=True) + + if len(sc_data) > 1: + plot_cross_scale_consistency( + sc_data, args.model_type, + os.path.join(sc_dir, 'cross_scale_sign_corrected.png'), + title_prefix=f'Sign-Corrected ({tag_label})') + + if len(wc_data) > 1: + plot_cross_scale_within_cat_consistency( + wc_data, args.model_type, + os.path.join(wc_dir, 'cross_scale_within_cat.png')) + + if dh_data: + plot_delta_trajectory( + dh_data, args.model_type, + os.path.join(dt_dir, 'delta_trajectory.png')) + + # ── Alignment and prediction stats ──────────────────────────────────────── + all_cond_dir = os.path.join(plots_dir, 'all') + ca_dir = os.path.join(all_cond_dir, 'cross_alignment') + pred_stats_dir = os.path.join(all_cond_dir, 'pred_stats') + summary_dir = os.path.join(all_cond_dir, 'summary') + os.makedirs(ca_dir, exist_ok=True) + os.makedirs(pred_stats_dir, exist_ok=True) + os.makedirs(summary_dir, exist_ok=True) + + if len(all_alignment) > 1: + plot_cross_scale_alignment( + all_alignment, args.model_type, + os.path.join(ca_dir, 'cross_scale_alignment.png')) + + if all_pred_stats: + plot_pred_stats_bars( + all_pred_stats, args.model_type, + os.path.join(pred_stats_dir, 'pred_stats_bars.png')) + plot_pred_stats_trajectory( + all_pred_stats, args.model_type, + os.path.join(pred_stats_dir, 'pred_stats_trajectory.png')) + + if all_sign_corrected: + plot_summary_barplot( + all_sign_corrected, all_alignment, args.model_type, + os.path.join(summary_dir, 'summary_barplot.png')) + + # ── Summary CSV ─────────────────────────────────────────────────────────── + summary_rows = [] + for scale in available_scales: + ps = next((p for p in all_pred_stats if p.get('scale') == scale), None) + if ps is None: + continue + row = dict(ps) + if scale in all_alignment: + max_layer = max(all_alignment[scale].keys()) + row['alignment_deepest'] = all_alignment[scale][max_layer]['per_sample_mean'] + row['alignment_perm'] = all_alignment[scale][max_layer]['permutation_mean'] + summary_rows.append(row) + if summary_rows: + csv_dir = os.path.join(out_dir, 'csv') + os.makedirs(csv_dir, exist_ok=True) + pd.DataFrame(summary_rows).to_csv(os.path.join(csv_dir, 'summary.csv'), index=False) + + # ── Accuracy charts ─────────────────────────────────────────────────────── + if all_pred_stats: + acc_dir = os.path.join(plots_dir, 'accuracy') + logger.info("\n--- Accuracy Charts ---") + run_accuracy_charts(all_pred_stats, all_cat_validity, args.model_type, acc_dir) + + # ── Unify y-axis ────────────────────────────────────────────────────────── + # For merge-only types, per-scale JSON files span multiple source dirs, + # so run_unify_ylim (which expects all JSON in one dir) is skipped. + if not is_merge_only: + logger.info("\n--- Unifying Y-axis ---") + run_unify_ylim(out_dir, plots_dir, args.model_type) + else: + logger.info("\n--- Skipping y-axis unification (per-scale data spans multiple source dirs) ---") + + # ── All-layer heatmaps + PCA ────────────────────────────────────────────── + if not is_merge_only: + logger.info("\n--- All-Layer Heatmaps ---") + run_all_layer_heatmaps(out_dir, args.model_type, available_scales) + logger.info("\n--- All-Layer PCA ---") + run_all_layer_pca(out_dir, args.model_type, available_scales) + else: + # Merge-only types: NPZ/CSV files live in separate source directories + from collections import defaultdict as _defaultdict + mc_cfg = MERGE_ONLY_CONFIGS[args.model_type] + src_to_scales = _defaultdict(list) + for scale in available_scales: + src_to_scales[mc_cfg['scale_sources'][scale]].append(scale) + + logger.info("\n--- All-Layer Heatmaps (per source) ---") + for src_key, src_scales in src_to_scales.items(): + run_all_layer_heatmaps( + os.path.join(args.output_dir, src_key), src_key, src_scales) + + logger.info("\n--- All-Layer PCA (per source) ---") + for src_key, src_scales in src_to_scales.items(): + run_all_layer_pca( + os.path.join(args.output_dir, src_key), src_key, src_scales) + + logger.info(f"\n=== Merge Complete ===\nResults saved to: {out_dir}") + + +def main(): + # Default scales per legacy model_type (new types use their own defaults) + _LEGACY_DEFAULT_SCALES = { + 'molmo': ['vanilla', '80k', '400k', '800k', '2m'], + 'nvila': ['vanilla', '80k', '400k', '800k', '2m'], + 'qwen': ['vanilla', '80k', '400k', '800k', '2m'], + 'nvila_synthetic': ['80k-5pct', '80k-10pct', '80k-20pct', '80k-30pct', '400k-5pct'], + } + + parser = argparse.ArgumentParser( + description='Swap Analysis — Spatial Representation Probing', + formatter_class=argparse.RawDescriptionHelpFormatter, + ) + parser.add_argument('--data_path', type=str, + default='/data/shared/Qwen/EmbSpatial-Bench/EmbSpatial-Bench.tsv') + parser.add_argument('--model_type', type=str, required=True, + choices=ALL_MODEL_TYPES, + help=( + 'Legacy: molmo | nvila | qwen\n' + 'Synthetic: nvila_synthetic\n' + 'New large: molmo_big | qwen_big | qwen_super | big_trio\n' + 'Merge-only (--merge required): molmo_all | qwen_all' + )) + parser.add_argument('--scales', type=str, nargs='+', default=None, + help='Scales to process (default: all for the given model_type).') + parser.add_argument('--output_dir', type=str, + default='/data/shared/Qwen/experiments/swap_analysis/results') + parser.add_argument('--device', type=str, default='cuda') + parser.add_argument('--seed', type=int, default=42) + parser.add_argument('--merge', action='store_true', + help='Merge mode: generate cross-scale plots from saved per-scale data.') + parser.add_argument('--merge-output-dir', type=str, default=None, dest='merge_output_dir', + help='Override output dir for cross-scale plots (NVILA dual-merge).') + parser.add_argument('--no-auto-roborefer', action='store_true', dest='no_auto_roborefer', + help='Disable automatic inclusion of roborefer scale for nvila.') + parser.add_argument('--skip-cross-group', action='store_true') + parser.add_argument('--max-samples-per-category', type=int, default=200, + dest='max_samples_per_category') + parser.add_argument('--no-filtering', action='store_true', dest='no_filtering', + help='Disable Unknown/empty filtering for far/close reference objects.' + ' By default, Unknown candidates are removed before sampling.') + parser.add_argument('--question-type', type=str, default='mcq', + choices=['mcq', 'short'], dest='question_type', + help='mcq (default): MCQ A/B format with letter answers; ' + 'short: original "Answer with only one word." format.') + + args = parser.parse_args() + + # ── Per-model-type log file ─────────────────────────────────────────────── + log_path = _setup_file_logging(args.model_type) + logger.info(f"Logging to: {log_path}") + + # ── Validate: merge-only types require --merge ──────────────────────────── + if args.model_type in MERGE_ONLY_CONFIGS and not args.merge: + parser.error( + f"'{args.model_type}' is a merge-only type. Add --merge to run it.\n" + f" Example: python swap_analysis.py --model_type {args.model_type} --merge" + ) + + # ── Default scales ──────────────────────────────────────────────────────── + if args.scales is None: + if args.model_type in MERGE_ONLY_CONFIGS: + args.scales = MERGE_ONLY_CONFIGS[args.model_type]['scale_order'] + elif args.model_type in MODEL_CONFIGS_NEW: + args.scales = list(MODEL_CONFIGS_NEW[args.model_type].keys()) + else: + args.scales = _LEGACY_DEFAULT_SCALES.get( + args.model_type, ['vanilla', '80k', '400k', '800k', '2m']) + + # Legacy nvila: auto-include roborefer + if args.model_type == 'nvila' and 'roborefer' not in args.scales and not args.no_auto_roborefer: + args.scales.append('roborefer') + + np.random.seed(args.seed) + torch.manual_seed(args.seed) + random.seed(args.seed) + + # ── Merge mode ─────────────────────────────────────────────────────────── + if args.merge: + logger.info("\n=== MERGE MODE ===") + if args.model_type in MODEL_CONFIGS_NEW or args.model_type in MERGE_ONLY_CONFIGS: + run_merge_extended(args) + else: + run_merge(args) + return + + # ── Inference mode ──────────────────────────────────────────────────────── + logger.info("\n=== Loading & Creating Swap Pairs ===") + swap_pairs = load_swap_pairs(args.data_path, args.seed, + filter_unknown=not args.no_filtering, + question_type=args.question_type) + + quads = [] + if not args.skip_cross_group: + try: + hf_cache = build_hf_bbox_cache() + quads = create_cross_group_quads(swap_pairs, hf_cache, + question_type=args.question_type) + except Exception as e: + logger.warning(f"Cross-group setup failed: {e}. Skipping.") + quads = [] + + # ── Resolve config for the chosen model_type ───────────────────────────── + if args.model_type in MODEL_CONFIGS_NEW: + model_configs = MODEL_CONFIGS_NEW[args.model_type] + else: + model_configs = MODEL_CONFIGS[args.model_type] + + for scale in args.scales: + if scale not in model_configs: + logger.warning(f"Scale '{scale}' not in config for '{args.model_type}', skipping.") + continue + + # Validate model path exists (skip HF IDs that start with org/ prefix) + if args.model_type in MODEL_CONFIGS_NEW: + _, raw_path = model_configs[scale] + else: + raw_path = model_configs[scale] + if not os.path.isabs(raw_path) and not raw_path.startswith(('Qwen/', 'allenai/')): + if not os.path.exists(raw_path): + logger.warning(f"Model path not found: {raw_path} (scale='{scale}'), skipping.") + continue + + try: + process_scale(args, scale, swap_pairs, quads) + except Exception as e: + logger.error(f"Failed {args.model_type} - {scale}: {e}") + import traceback + traceback.print_exc() + continue + + logger.info(f"\n{'='*60}") + logger.info("=== All scales complete ===") + logger.info(f"Results: {os.path.join(args.output_dir, args.model_type)}") + logger.info(f"{'='*60}") + + +if __name__ == '__main__': + main() \ No newline at end of file diff --git a/swap_analysis_both_scripts/run_molmo_both.sh b/swap_analysis_both_scripts/run_molmo_both.sh new file mode 100644 index 0000000000000000000000000000000000000000..2f2cb04a3d906298da6f5fc61edea9915907a469 --- /dev/null +++ b/swap_analysis_both_scripts/run_molmo_both.sh @@ -0,0 +1,23 @@ +#!/bin/bash +set -e + +_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)" +SA_SCRIPT="$_DIR/swap_analysis/run_molmo.sh" +SAS_SCRIPT="$_DIR/swap_analysis_synthetic/run_molmo.sh" + +echo "=========================================" +echo " [1/2] swap_analysis/run_molmo.sh" +echo "=========================================" +bash "$SA_SCRIPT" + +echo "" +echo "Waiting 10 seconds before synthetic run..." +sleep 10 + +echo "=========================================" +echo " [2/2] swap_analysis_synthetic/run_molmo.sh" +echo "=========================================" +bash "$SAS_SCRIPT" + +echo "" +echo "ALL DONE (real + synthetic): molmo" diff --git a/swap_analysis_both_scripts/run_nvila_both.sh b/swap_analysis_both_scripts/run_nvila_both.sh new file mode 100644 index 0000000000000000000000000000000000000000..18e10004e1d6a0263e0394a95dd3560943e1d881 --- /dev/null +++ b/swap_analysis_both_scripts/run_nvila_both.sh @@ -0,0 +1,23 @@ +#!/bin/bash +set -e + +_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)" +SA_SCRIPT="$_DIR/swap_analysis/run_nvila.sh" +SAS_SCRIPT="$_DIR/swap_analysis_synthetic/run_nvila.sh" + +echo "=========================================" +echo " [1/2] swap_analysis/run_nvila.sh" +echo "=========================================" +bash "$SA_SCRIPT" + +echo "" +echo "Waiting 10 seconds before synthetic run..." +sleep 10 + +echo "=========================================" +echo " [2/2] swap_analysis_synthetic/run_nvila.sh" +echo "=========================================" +bash "$SAS_SCRIPT" + +echo "" +echo "ALL DONE (real + synthetic): nvila" diff --git a/swap_analysis_both_scripts/run_qwen_both.sh b/swap_analysis_both_scripts/run_qwen_both.sh new file mode 100644 index 0000000000000000000000000000000000000000..c0fc4d4afd9a011e3be4d0d95b1cc5f55e6407ba --- /dev/null +++ b/swap_analysis_both_scripts/run_qwen_both.sh @@ -0,0 +1,23 @@ +#!/bin/bash +set -e + +_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)" +SA_SCRIPT="$_DIR/swap_analysis/run_qwen.sh" +SAS_SCRIPT="$_DIR/swap_analysis_synthetic/run_qwen.sh" + +echo "=========================================" +echo " [1/2] swap_analysis/run_qwen.sh" +echo "=========================================" +bash "$SA_SCRIPT" + +echo "" +echo "Waiting 10 seconds before synthetic run..." +sleep 10 + +echo "=========================================" +echo " [2/2] swap_analysis_synthetic/run_qwen.sh" +echo "=========================================" +bash "$SAS_SCRIPT" + +echo "" +echo "ALL DONE (real + synthetic): qwen" diff --git a/swap_analysis_cvbench/run_molmo.sh b/swap_analysis_cvbench/run_molmo.sh new file mode 100644 index 0000000000000000000000000000000000000000..0f1628fb84b8082055cd4c906a9794f6dd8d60a2 --- /dev/null +++ b/swap_analysis_cvbench/run_molmo.sh @@ -0,0 +1,76 @@ +#!/bin/bash +set -e + +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +SCRIPT="$SCRIPT_DIR/swap_analysis.py" +PYTHON="conda run --no-capture-output -n molmo python" +MODEL="molmo" +QUESTION_TYPE="short_answer" # change to mcq for MCQ A/B format + +# Logs managed by swap_analysis.py → {SCRIPT_DIR}/{QUESTION_TYPE}/logs/{vlm_key}.log +OUTPUT_DIR="$SCRIPT_DIR/$QUESTION_TYPE/saved_data" +STDOUT_LOG_DIR="$SCRIPT_DIR/$QUESTION_TYPE/logs" +mkdir -p "$STDOUT_LOG_DIR" "$OUTPUT_DIR" + +# GPU plan: Molmo ~25GB each +SCALES=("vanilla" "80k" "400k" "800k" "2m") +GPUS=(0 1 2 3 4) + +echo "=========================================" +echo " Molmo CV-Bench Swap Analysis: Launching ${#SCALES[@]} scales in parallel" +echo "=========================================" + +PIDS=() +for i in "${!SCALES[@]}"; do + scale="${SCALES[$i]}" + gpu="${GPUS[$i]}" + log="${STDOUT_LOG_DIR}/${MODEL}_${scale}_stdout.log" + + echo "[GPU $gpu] $MODEL/$scale -> $log" + CUDA_VISIBLE_DEVICES=$gpu $PYTHON "$SCRIPT" \ + --model_type $MODEL \ + --scales $scale \ + --device cuda \ + --no-auto-roborefer \ + --question-type $QUESTION_TYPE \ + --output_dir "$OUTPUT_DIR" \ + --skip-phase-b \ + > "$log" 2>&1 & + PIDS+=($!) + + # Stagger launches to avoid I/O contention on large CV-Bench TSV files + sleep 30 +done + +echo "" +echo "Waiting for all ${#PIDS[@]} processes..." +FAILED=0 +for i in "${!PIDS[@]}"; do + pid="${PIDS[$i]}" + scale="${SCALES[$i]}" + if wait $pid; then + echo "[DONE] $MODEL/$scale (PID $pid) - SUCCESS" + else + echo "[FAIL] $MODEL/$scale (PID $pid) - EXIT CODE $?" + FAILED=$((FAILED + 1)) + fi +done + +if [ $FAILED -gt 0 ]; then + echo "WARNING: $FAILED scale(s) failed. Check logs in $STDOUT_LOG_DIR" +fi + +echo "=========================================" +echo " Molmo CV-Bench Swap Analysis: Running merge" +echo "=========================================" +$PYTHON "$SCRIPT" --model_type $MODEL \ + --scales vanilla 80k 400k 800k 2m \ + --merge --group-name molmo \ + --question-type $QUESTION_TYPE \ + --output_dir "$OUTPUT_DIR" \ + 2>&1 | tee "${STDOUT_LOG_DIR}/molmo_merge_stdout.log" + +echo "" +echo "ALL DONE: $MODEL" +echo "Results: $SCRIPT_DIR/$QUESTION_TYPE/saved_data/molmo_*/" +echo "Compare: $SCRIPT_DIR/$QUESTION_TYPE/compare/molmo/" diff --git a/swap_analysis_cvbench/run_nvila.sh b/swap_analysis_cvbench/run_nvila.sh new file mode 100644 index 0000000000000000000000000000000000000000..8beb065b4d4059facb77f7047a42f311f2f8fa16 --- /dev/null +++ b/swap_analysis_cvbench/run_nvila.sh @@ -0,0 +1,87 @@ +#!/bin/bash +set -e + +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +SCRIPT="$SCRIPT_DIR/swap_analysis.py" +PYTHON="conda run --no-capture-output -n vila python" +MODEL="nvila" +QUESTION_TYPE="short_answer" # change to mcq for MCQ A/B format + +# Logs managed by swap_analysis.py → {SCRIPT_DIR}/{QUESTION_TYPE}/logs/{vlm_key}.log +OUTPUT_DIR="$SCRIPT_DIR/$QUESTION_TYPE/saved_data" +STDOUT_LOG_DIR="$SCRIPT_DIR/$QUESTION_TYPE/logs" +mkdir -p "$STDOUT_LOG_DIR" "$OUTPUT_DIR" + +# GPU plan: NVILA ~8GB each +SCALES=("vanilla" "80k" "400k" "800k" "2m" "roborefer") +GPUS=(2 3 4 5 6 7) + +echo "=========================================" +echo " NVILA CV-Bench Swap Analysis: Launching ${#SCALES[@]} scales in parallel" +echo "=========================================" + +PIDS=() +for i in "${!SCALES[@]}"; do + scale="${SCALES[$i]}" + gpu="${GPUS[$i]}" + log="${STDOUT_LOG_DIR}/${MODEL}_${scale}_stdout.log" + + echo "[GPU $gpu] $MODEL/$scale -> $log" + CUDA_VISIBLE_DEVICES=$gpu $PYTHON "$SCRIPT" \ + --model_type $MODEL \ + --scales $scale \ + --device cuda \ + --no-auto-roborefer \ + --question-type $QUESTION_TYPE \ + --output_dir "$OUTPUT_DIR" \ + --skip-phase-b \ + > "$log" 2>&1 & + PIDS+=($!) + + # Stagger launches to avoid I/O contention on large CV-Bench TSV files + sleep 30 +done + +echo "" +echo "Waiting for all ${#PIDS[@]} processes..." +FAILED=0 +for i in "${!PIDS[@]}"; do + pid="${PIDS[$i]}" + scale="${SCALES[$i]}" + if wait $pid; then + echo "[DONE] $MODEL/$scale (PID $pid) - SUCCESS" + else + echo "[FAIL] $MODEL/$scale (PID $pid) - EXIT CODE $?" + FAILED=$((FAILED + 1)) + fi +done + +if [ $FAILED -gt 0 ]; then + echo "WARNING: $FAILED scale(s) failed. Check logs in $STDOUT_LOG_DIR" +fi + +echo "=========================================" +echo " NVILA CV-Bench Swap Analysis: Merge 1/2 (without roborefer)" +echo "=========================================" +$PYTHON "$SCRIPT" --model_type $MODEL \ + --scales vanilla 80k 400k 800k 2m \ + --merge --group-name nvila \ + --question-type $QUESTION_TYPE \ + --output_dir "$OUTPUT_DIR" \ + 2>&1 | tee "${STDOUT_LOG_DIR}/nvila_merge_stdout.log" + +echo "=========================================" +echo " NVILA CV-Bench Swap Analysis: Merge 2/2 (with roborefer)" +echo "=========================================" +$PYTHON "$SCRIPT" --model_type $MODEL \ + --scales vanilla 80k 400k 800k 2m roborefer \ + --merge --group-name nvila_with_roborefer \ + --question-type $QUESTION_TYPE \ + --output_dir "$OUTPUT_DIR" \ + 2>&1 | tee "${STDOUT_LOG_DIR}/nvila_with_roborefer_merge_stdout.log" + +echo "" +echo "ALL DONE: $MODEL" +echo "Results: $SCRIPT_DIR/$QUESTION_TYPE/saved_data/nvila_*/" +echo "Compare: $SCRIPT_DIR/$QUESTION_TYPE/compare/nvila/" +echo "Compare: $SCRIPT_DIR/$QUESTION_TYPE/compare/nvila_with_roborefer/" diff --git a/swap_analysis_cvbench/run_nvila_st.sh b/swap_analysis_cvbench/run_nvila_st.sh new file mode 100644 index 0000000000000000000000000000000000000000..ce52e1ba47792bab23492195cd57d3a63eb5c4fb --- /dev/null +++ b/swap_analysis_cvbench/run_nvila_st.sh @@ -0,0 +1,85 @@ +#!/bin/bash +set -e + +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +SCRIPT="$SCRIPT_DIR/swap_analysis.py" +PYTHON="conda run --no-capture-output -n vila python" +QUESTION_TYPE="short_answer" # change to mcq for MCQ A/B format + +# Logs managed by swap_analysis.py → {SCRIPT_DIR}/{QUESTION_TYPE}/logs/{vlm_key}.log +OUTPUT_DIR="$SCRIPT_DIR/$QUESTION_TYPE/saved_data" +STDOUT_LOG_DIR="$SCRIPT_DIR/$QUESTION_TYPE/logs" +mkdir -p "$STDOUT_LOG_DIR" "$OUTPUT_DIR" + +# 3 checkpoints from MCQ-synthetic-trained NVILA run: +# 80k-st → checkpoint-1250 (80k training steps) +# 400k-st → checkpoint-6250 (400k training steps) +# 800k-st → checkpoint-12500 (800k training steps) +# +# All from: NVILA-Lite-2B-SYNTHETIC_MIX_MCQ_5PCT_2M-20260302_030354 +# +# GPU assignment (NVILA ~8GB each): +# GPU 0: nvila_st/80k-st +# GPU 1: nvila_st/400k-st +# GPU 2: nvila_st/800k-st + +declare -a MODEL_TYPES=("nvila_st" "nvila_st" "nvila_st") +declare -a SCALES=( "80k-st" "400k-st" "800k-st") +declare -a GPUS=( 0 1 2) + +echo "=========================================" +echo " NVILA-ST CV-Bench: Launching 3 checkpoints in parallel" +echo "=========================================" + +PIDS=() +for i in "${!SCALES[@]}"; do + mtype="${MODEL_TYPES[$i]}" + scale="${SCALES[$i]}" + gpu="${GPUS[$i]}" + log="${STDOUT_LOG_DIR}/${mtype}_${scale}_stdout.log" + + echo "[GPU $gpu] ${mtype}/${scale} -> $log" + CUDA_VISIBLE_DEVICES=$gpu $PYTHON "$SCRIPT" \ + --model_type $mtype \ + --scales $scale \ + --device cuda \ + --no-auto-roborefer \ + --question-type $QUESTION_TYPE \ + --output_dir "$OUTPUT_DIR" \ + --skip-phase-b \ + > "$log" 2>&1 & + PIDS+=($!) + + # Stagger launches to avoid I/O contention on large CV-Bench TSV files + sleep 30 +done + +echo "" +echo "Waiting for all ${#PIDS[@]} processes..." +FAILED=0 +for i in "${!PIDS[@]}"; do + pid="${PIDS[$i]}" + label="${MODEL_TYPES[$i]}/${SCALES[$i]}" + if wait $pid; then + echo "[DONE] $label (PID $pid) - SUCCESS" + else + echo "[FAIL] $label (PID $pid) - EXIT CODE $?" + FAILED=$((FAILED + 1)) + fi +done + +if [ $FAILED -gt 0 ]; then + echo "WARNING: $FAILED process(es) failed. Check logs in $STDOUT_LOG_DIR" +fi + +echo "" +echo "ALL DONE (inference only)" +echo "Results: $SCRIPT_DIR/$QUESTION_TYPE/saved_data/nvila_st_*/" +echo "" +echo "To merge with nvila baseline (vanilla/80k/400k/800k/2m), run:" +echo " conda run --no-capture-output -n vila python $SCRIPT \\" +echo " --model_type nvila_st_compare --merge --group-name nvila_st_compare \\" +echo " --question-type $QUESTION_TYPE --output_dir $OUTPUT_DIR" +echo "" +echo " (Cross-alignment plots auto-included if all scales have Phase B data.)" +echo " (Run Phase B later by re-running without --skip-phase-b.)" diff --git a/swap_analysis_cvbench/run_qwen.sh b/swap_analysis_cvbench/run_qwen.sh new file mode 100644 index 0000000000000000000000000000000000000000..0db42ec41b876f779ac63b8d71b5705416f2aee5 --- /dev/null +++ b/swap_analysis_cvbench/run_qwen.sh @@ -0,0 +1,78 @@ +#!/bin/bash +set -e + +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +SCRIPT="$SCRIPT_DIR/swap_analysis.py" +PYTHON="/usr/bin/python3" +MODEL="qwen" +QUESTION_TYPE="short_answer" # change to mcq for MCQ A/B format + +# Logs managed by swap_analysis.py → {SCRIPT_DIR}/{QUESTION_TYPE}/logs/{vlm_key}.log +OUTPUT_DIR="$SCRIPT_DIR/$QUESTION_TYPE/saved_data" +STDOUT_LOG_DIR="$SCRIPT_DIR/$QUESTION_TYPE/logs" +mkdir -p "$STDOUT_LOG_DIR" "$OUTPUT_DIR" + +# GPU plan: Qwen ~10GB each +# GPU 5: vanilla GPU 6: 80k + 400k GPU 7: 800k + 2m +SCALES=("vanilla" "80k" "400k" "800k" "2m") +GPUS=(5 6 6 7 7) + +echo "=========================================" +echo " Qwen CV-Bench Swap Analysis: Launching ${#SCALES[@]} scales in parallel" +echo "=========================================" + +PIDS=() +for i in "${!SCALES[@]}"; do + scale="${SCALES[$i]}" + gpu="${GPUS[$i]}" + log="${STDOUT_LOG_DIR}/${MODEL}_${scale}_stdout.log" + + echo "[GPU $gpu] $MODEL/$scale -> $log" + CUDA_VISIBLE_DEVICES=$gpu $PYTHON "$SCRIPT" \ + --model_type $MODEL \ + --scales $scale \ + --device cuda \ + --no-auto-roborefer \ + --question-type $QUESTION_TYPE \ + --output_dir "$OUTPUT_DIR" \ + --skip-phase-b \ + > "$log" 2>&1 & + PIDS+=($!) + + # Stagger launches to avoid I/O contention on large CV-Bench TSV files + # (each process reads the full 3D TSV with 3600 base64 images before GPU use) + sleep 30 +done + +echo "" +echo "Waiting for all ${#PIDS[@]} processes..." +FAILED=0 +for i in "${!PIDS[@]}"; do + pid="${PIDS[$i]}" + scale="${SCALES[$i]}" + if wait $pid; then + echo "[DONE] $MODEL/$scale (PID $pid) - SUCCESS" + else + echo "[FAIL] $MODEL/$scale (PID $pid) - EXIT CODE $?" + FAILED=$((FAILED + 1)) + fi +done + +if [ $FAILED -gt 0 ]; then + echo "WARNING: $FAILED scale(s) failed. Check logs in $STDOUT_LOG_DIR" +fi + +echo "=========================================" +echo " Qwen CV-Bench Swap Analysis: Running merge" +echo "=========================================" +$PYTHON "$SCRIPT" --model_type $MODEL \ + --scales vanilla 80k 400k 800k 2m \ + --merge --group-name qwen \ + --question-type $QUESTION_TYPE \ + --output_dir "$OUTPUT_DIR" \ + 2>&1 | tee "${STDOUT_LOG_DIR}/qwen_merge_stdout.log" + +echo "" +echo "ALL DONE: $MODEL" +echo "Results: $SCRIPT_DIR/$QUESTION_TYPE/saved_data/qwen_*/" +echo "Compare: $SCRIPT_DIR/$QUESTION_TYPE/compare/qwen/" diff --git a/swap_analysis_cvbench/swap_analysis.py b/swap_analysis_cvbench/swap_analysis.py new file mode 100644 index 0000000000000000000000000000000000000000..deb08252f1bc038a325421515c3ac9b77884a707 --- /dev/null +++ b/swap_analysis_cvbench/swap_analysis.py @@ -0,0 +1,4073 @@ +#!/usr/bin/env python3 +""" +Swap Analysis: Minimal Pair Probing for Spatial Representations + +Creates minimal pairs by swapping obj1<->obj2 in spatial questions: + Original: "Is A to the left or right of B?" -> left + Swapped: "Is B to the left or right of A?" -> right + +Supported model types +--------------------- + Legacy (Qwen2.5-VL-3B scale experiments): + molmo | nvila | qwen + Synthetic-MCQ-trained NVILA: + nvila_st : NVILA trained with MCQ synthetic mix (80k-st / 400k-st / 800k-st) + New large models: + molmo_big : Molmo2-8B + qwen_big : Qwen3-VL-32B-Instruct + qwen_super : Qwen3-VL-235B-A22B-Instruct + big_trio : Molmo2-8B + RoboRefer + Qwen3-VL-32B + Merge-only (--merge required): + molmo_all : molmo (vanilla→2m) + molmo_big (molmo2) + qwen_all : qwen (vanilla→2m) + qwen_big (qwen3_32b) + nvila_st_compare : nvila (vanilla→2m) + nvila_st (80k-st / 400k-st / 800k-st) + +Usage examples +-------------- + # Legacy model (Qwen2.5-VL-3B scale) + python swap_analysis.py --model_type qwen + + # New large model (Qwen3-VL-32B) + conda run -n qwen3 python swap_analysis.py --model_type qwen_big + + # Cross-family merge (combine qwen + qwen_big results) + conda run -n qwen3 python swap_analysis.py --model_type qwen_all --merge + +Analyses: + 1. Difference vectors: delta = feature(swapped) - feature(original) + 2. Within-category delta consistency (do all left->right swaps point same direction?) + 3. Sign-corrected group consistency (align opposite categories by flipping) + 4. Cross-group delta alignment (delta_vertical vs delta_distance) for perspective bias + 5. Delta-based 6x6 similarity heatmap (mean delta per category as representation) + 6. Prediction stats visualization (bar chart + cross-scale trajectory) + 7. Both-correct filtering for delta analysis + 8. PCA visualization of per-sample embeddings + 9. Scaling effects on all of the above + +Fixes applied: + Fix 1: "Answer with only one word." appended to all prompts + Fix 2: Synonym handling (below/beneath->under, near/nearby->close, distant->far) + Fix 4: Cross-group quads index matching via string normalization + Fix 5: Within-category + sign-corrected delta consistency (replaces wrong group-level) + Fix 6: Prediction stats bar chart + cross-scale line plot + Fix 7: Delta-based 6x6 heatmap and trajectory + Fix 8: Category validity check + both-correct delta filtering +""" + +import ast +import os +import sys +import json +import argparse +import base64 +import logging +import random +import re +from io import BytesIO +from collections import defaultdict +from typing import Dict, List, Tuple, Optional, Any +from abc import ABC, abstractmethod + +import torch +import numpy as np +import pandas as pd +from PIL import Image +from tqdm import tqdm +import matplotlib +matplotlib.use('Agg') +import matplotlib.pyplot as plt +from mpl_toolkits.mplot3d import Axes3D # noqa: F401 +import seaborn as sns +from sklearn.metrics.pairwise import cosine_similarity +from sklearn.decomposition import PCA + +logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s') +logger = logging.getLogger(__name__) + +_HERE = os.path.dirname(os.path.abspath(__file__)) + +# ── Local HuggingFace cache helpers ────────────────────────────────────────── + +HF_HUB_DIR = '/data/shared/Qwen/mydisk/huggingface/hub' + + +def resolve_local_path(model_path: str) -> str: + """Return local snapshot path for a HF model ID if cached, else return the ID unchanged.""" + if os.path.isabs(model_path): + return model_path + cache_name = 'models--' + model_path.replace('/', '--') + snapshots_dir = os.path.join(HF_HUB_DIR, cache_name, 'snapshots') + if os.path.isdir(snapshots_dir): + snapshots = sorted(os.listdir(snapshots_dir)) + if snapshots: + local_path = os.path.join(snapshots_dir, snapshots[-1]) + logger.info(f"Local cache found: {model_path} → {local_path}") + return local_path + logger.warning( + f"Model not found in local cache: '{model_path}'\n" + f" Expected at: {snapshots_dir}\n" + f" Will fall back to online HuggingFace Hub download.\n" + f" To cache locally first: python -c \"from huggingface_hub import snapshot_download; " + f"snapshot_download('{model_path}', cache_dir='{HF_HUB_DIR}')\"" + ) + return model_path + + +def _setup_file_logging(name: str, log_dir: str) -> str: + """Attach a named FileHandler to the root logger. + + Writes to {log_dir}/{name}.log (append mode). + Returns the log file path. + """ + os.makedirs(log_dir, exist_ok=True) + log_path = os.path.join(log_dir, f'{name}.log') + fh = logging.FileHandler(log_path, mode='a', encoding='utf-8') + fh.setLevel(logging.INFO) + fh.setFormatter(logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')) + logging.getLogger().addHandler(fh) + return log_path + + +def get_model_key(model_type: str, scale: str) -> str: + """Return VLM key for (model_type, scale). E.g. nvila_80k, nvila_synthetic_80k-5pct.""" + return f"{model_type}_{scale}" + + +# ============================================================================ +# Constants +# ============================================================================ + +CATEGORY_ORDER = ['left', 'right', 'above', 'below', 'far', 'close'] + +OPPOSITE_MAP = { + 'left': 'right', 'right': 'left', + 'above': 'below', 'below': 'above', + 'under': 'above', # short-mode vertical answer + 'far': 'close', 'close': 'far', +} + +# Opposite map for short-answer mode (vertical uses 'above'/'under', not 'above'/'below') +SHORT_OPPOSITE_MAP = { + 'left': 'right', 'right': 'left', + 'above': 'below', 'below': 'above', + 'far': 'close', 'close': 'far', +} + +GROUP_MAP = { + 'left': 'horizontal', 'right': 'horizontal', + 'above': 'vertical', 'below': 'vertical', + 'far': 'distance', 'close': 'distance', +} + +GROUP_ORDER = ['horizontal', 'vertical', 'distance'] + +# Fix 5: Canonical categories for sign-corrected consistency +CANONICAL_CATEGORIES = { + 'horizontal': 'left', + 'vertical': 'above', + 'distance': 'far', +} + +# Fix 2: Synonyms for answer matching +# 'below' is now primary; 'under'/'beneath' recognized as synonyms +SYNONYMS = { + 'below': ['under', 'beneath'], + 'close': ['near', 'nearby'], + 'far': ['distant'], +} + +# ── MCQ question templates (option order alternated per pair for A/B bias control) ── +_Q_TAIL_MCQ = "Answer with a single letter A or B." +MCQ_TEMPLATES = { + 'horizontal': { + 'left_first': "Is the {obj1} to the left or right of the {obj2}? (A) left (B) right " + _Q_TAIL_MCQ, + 'right_first': "Is the {obj1} to the left or right of the {obj2}? (A) right (B) left " + _Q_TAIL_MCQ, + }, + 'vertical': { + 'above_first': "Is the {obj1} above or below the {obj2}? (A) above (B) below " + _Q_TAIL_MCQ, + 'below_first': "Is the {obj1} above or below the {obj2}? (A) below (B) above " + _Q_TAIL_MCQ, + }, + 'distance': { + 'far_first': "Compared to {ref}, is {subj} far or close from you? (A) far (B) close " + _Q_TAIL_MCQ, + 'close_first': "Compared to {ref}, is {subj} far or close from you? (A) close (B) far " + _Q_TAIL_MCQ, + }, +} +MCQ_LETTER = { + 'horizontal': { + 'left_first': {'left': 'a', 'right': 'b'}, + 'right_first': {'left': 'b', 'right': 'a'}, + }, + 'vertical': { + 'above_first': {'above': 'a', 'below': 'b'}, + 'below_first': {'above': 'b', 'below': 'a'}, + }, + 'distance': { + 'far_first': {'far': 'a', 'close': 'b'}, + 'close_first': {'far': 'b', 'close': 'a'}, + }, +} + +SCALE_COLORS = { + 'vanilla': '#1f77b4', '80k': '#ff7f0e', '400k': '#2ca02c', + '800k': '#d62728', '2m': '#9467bd', 'roborefer':'#8c564b', + # New large models + 'molmo2': '#17becf', # cyan + 'qwen3_32b': '#bcbd22', # yellow-green + 'qwen3_235b': '#e377c2', # pink + # Synthetic-mix NVILA at 80k scale (shades of teal, light→dark by mix ratio) + '80k-5pct': '#b2dfdb', # very light teal + '80k-10pct': '#00b894', # teal + '80k-20pct': '#00897b', # darker teal + '80k-30pct': '#004d40', # deep teal + # Synthetic-mix NVILA at 400k scale + '400k-5pct': '#66bb6a', # light green (near 400k's #2ca02c) + # Synthetic-mix NVILA at 800k scale + '800k-5pct': '#ef9a9a', # light red (near 800k's #d62728) + # MCQ-synthetic-trained NVILA (darker shade of the matching base scale) + '80k-st': '#b85a00', # dark orange (darker than 80k #ff7f0e) + '400k-st': '#1a6b1a', # dark green (darker than 400k #2ca02c) + '800k-st': '#911b1b', # dark red (darker than 800k #d62728) +} + +# Canonical scale ordering used by accuracy/ylim plots (add new scales here to control x-axis) +SCALE_ORDER = [ + 'vanilla', '80k', '80k-5pct', '80k-10pct', '80k-20pct', '80k-30pct', '80k-st', + '400k', '400k-5pct', '400k-st', '800k', '800k-5pct', '800k-st', '2m', 'roborefer', + 'molmo2', 'qwen3_32b', 'qwen3_235b', +] + +# Human-readable legend labels (only entries that differ from the key are needed) +SCALE_DISPLAY_NAMES = { + '80k-5pct': '80k 5%', + '80k-10pct': '80k 10%', + '80k-20pct': '80k 20%', + '80k-30pct': '80k 30%', + '400k-5pct': '400k 5%', + '800k-5pct': '800k 5%', + '80k-st': '80k ST', + '400k-st': '400k ST', + '800k-st': '800k ST', +} +# Category colors aligned with group: horizontal=orange, vertical=green, distance=purple +CAT_COLORS = { + 'left': '#ff7f0e', 'right': '#ffbb78', # horizontal → orange + 'above': '#2ca02c', 'below': '#98df8a', # vertical → green + 'far': '#9467bd', 'close': '#c5b0d5', # distance → purple +} +GROUP_COLORS = { + 'horizontal': '#ff7f0e', + 'vertical': '#2ca02c', + 'distance': '#9467bd', +} + +# Short-answer (non-MCQ) question templates +SHORT_TEMPLATES = { + 'horizontal': "Is the {obj1} to the left or right of the {obj2}? Answer with only one word.", + 'vertical': "Is the {obj1} above or below the {obj2}? Answer with only one word.", + 'distance': "Compared to {ref}, is {subj} far or close from you? Answer with only one word.", +} + +MODEL_CONFIGS = { + 'molmo': { + 'vanilla': 'allenai/Molmo-7B-O-0924', + '80k': '/data/shared/Qwen/molmo/outputs/data_scale_exp/data_scale_exp_80k/unshared', + '400k': '/data/shared/Qwen/molmo/outputs/data_scale_exp/data_scale_exp_400k/unshared', + '800k': '/data/shared/Qwen/molmo/outputs/data_scale_exp/data_scale_exp_800k/unshared', + '2m': '/data/shared/Qwen/molmo/outputs/data_scale_exp/data_scale_exp_2m/unshared', + }, + 'nvila': { + 'vanilla': '/data/shared/Qwen/mydisk/NVILA-Lite-2B', + '80k': '/data/shared/Qwen/mydisk/output/DATA/NVILA-Lite-2B-DATA_SCALE_EXP_80K-20251108_180221', + '400k': '/data/shared/Qwen/mydisk/output/DATA/NVILA-Lite-2B-DATA_SCALE_EXP_400K-20251108_180221', + '800k': '/data/shared/Qwen/mydisk/output/DATA/NVILA-Lite-2B-DATA_SCALE_EXP_800K-20251108_180221', + '2m': '/data/shared/Qwen/mydisk/output/DATA/NVILA-Lite-2B-DATA_SCALE_EXP_2M-20260205_003632', + # '80k': '/data/shared/Qwen/mydisk/output/SINGLE/NVILA-Lite-2B-SINGLE_REFSPATIAL_16M-20260217_035008/checkpoint-1250', + # '400k': '/data/shared/Qwen/mydisk/output/SINGLE/NVILA-Lite-2B-SINGLE_REFSPATIAL_16M-20260217_035008/checkpoint-6250', + # '800k': '/data/shared/Qwen/mydisk/output/SINGLE/NVILA-Lite-2B-SINGLE_REFSPATIAL_16M-20260217_035008/checkpoint-12500', + # '2m': '/data/shared/Qwen/mydisk/output/SINGLE/NVILA-Lite-2B-SINGLE_REFSPATIAL_16M-20260217_035008/checkpoint-31250', + 'roborefer': '/data/shared/Qwen/mydisk/RoboRefer_model', + 'roborefer_depth': '/data/shared/Qwen/mydisk/RoboRefer_depth_model', # fill in actual path + }, + 'qwen': { + 'vanilla': 'Qwen/Qwen2.5-VL-3B-Instruct', + '80k': '/data/shared/Qwen/mydisk/output/Qwen/data_scale_exp/Qwen2.5-VL-3B-Instruct-data_scale_exp_80k-20251114_120221', + '400k': '/data/shared/Qwen/mydisk/output/Qwen/data_scale_exp/Qwen2.5-VL-3B-Instruct-data_scale_exp_400k-20251114_120221', + '800k': '/data/shared/Qwen/mydisk/output/Qwen/data_scale_exp/Qwen2.5-VL-3B-Instruct-data_scale_exp_800k-20251114_120221', + '2m': '/data/shared/Qwen/mydisk/output/Qwen/data_scale_exp/Qwen2.5-VL-3B-Instruct-data_scale_exp_2m-20260109_120517', + }, + # NVILA trained with MCQ synthetic data mix (checkpoints at 80k/400k/800k steps) + 'nvila_st': { + '80k-st': '/data/shared/Qwen/mydisk/output/SYNTHETIC/NVILA-Lite-2B-SYNTHETIC_MIX_MCQ_5PCT_2M-20260302_030354/checkpoint-1250', + '400k-st': '/data/shared/Qwen/mydisk/output/SYNTHETIC/NVILA-Lite-2B-SYNTHETIC_MIX_MCQ_5PCT_2M-20260302_030354/checkpoint-6250', + '800k-st': '/data/shared/Qwen/mydisk/output/SYNTHETIC/NVILA-Lite-2B-SYNTHETIC_MIX_MCQ_5PCT_2M-20260302_030354/checkpoint-12500', + }, + # NVILA trained with synthetic data mixed in at different ratios + 'nvila_synthetic': { + '80k-5pct': '/data/shared/Qwen/mydisk/output/SYNTHETIC/NVILA-Lite-2B-SYNTHETIC_MIX_5PCT_2M-20260226_023301/checkpoint-1250', + '80k-10pct': '/data/shared/Qwen/mydisk/output/SYNTHETIC/NVILA-Lite-2B-SYNTHETIC_MIX_10PCT_80K-20260224_234537', + '80k-20pct': '/data/shared/Qwen/mydisk/output/SYNTHETIC/NVILA-Lite-2B-SYNTHETIC_MIX_20PCT_80K-20260224_232347', + '80k-30pct': '/data/shared/Qwen/mydisk/output/SYNTHETIC/NVILA-Lite-2B-SYNTHETIC_MIX_30PCT_80K-20260224_232347', + '400k-5pct': '/data/shared/Qwen/mydisk/output/SYNTHETIC/NVILA-Lite-2B-SYNTHETIC_MIX_5PCT_2M-20260226_023301/checkpoint-6250', + '800k-5pct': '/data/shared/Qwen/mydisk/output/SYNTHETIC/NVILA-Lite-2B-SYNTHETIC_MIX_5PCT_2M-20260226_023301/checkpoint-12500' + }, +} + +# ── New large / cross-family models ────────────────────────────────────────── +# Each scale maps to (ExtractorClassName, HF-model-ID-or-absolute-path). +# resolve_local_path() converts HF IDs to local snapshot dirs when cached. +MODEL_CONFIGS_NEW = { + 'molmo_big': { + 'molmo2': ('Molmo2Extractor', 'allenai/Molmo2-8B'), + }, + 'qwen_big': { + 'qwen3_32b': ('Qwen3VLExtractor', 'Qwen/Qwen3-VL-32B-Instruct'), + }, + 'qwen_super': { + 'qwen3_235b': ('Qwen3VLExtractor', 'Qwen/Qwen3-VL-235B-A22B-Instruct'), + }, + 'big_trio': { + 'molmo2': ('Molmo2Extractor', 'allenai/Molmo2-8B'), + 'roborefer': ('RoboReferExtractor', '/data/shared/Qwen/mydisk/RoboRefer_model'), + 'qwen3_32b': ('Qwen3VLExtractor', 'Qwen/Qwen3-VL-32B-Instruct'), + }, +} + +# ── Merge-only: combine existing per-scale data from multiple source dirs ───── +MERGE_ONLY_CONFIGS = { + 'molmo_all': { + 'scale_order': ['vanilla', '80k', '400k', '800k', '2m', 'molmo2'], + 'scale_sources': { + 'vanilla': 'molmo', '80k': 'molmo', '400k': 'molmo', + '800k': 'molmo', '2m': 'molmo', 'molmo2': 'molmo_big', + }, + 'required_dirs': ['molmo', 'molmo_big'], + }, + 'qwen_all': { + 'scale_order': ['vanilla', '80k', '400k', '800k', '2m', 'qwen3_32b'], + 'scale_sources': { + 'vanilla': 'qwen', '80k': 'qwen', '400k': 'qwen', + '800k': 'qwen', '2m': 'qwen', 'qwen3_32b': 'qwen_big', + }, + 'required_dirs': ['qwen', 'qwen_big'], + }, + # Compare NVILA baselines against MCQ-synthetic-trained checkpoints + 'nvila_st_compare': { + 'scale_order': ['vanilla', '80k', '80k-st', '400k', '400k-st', '800k', '800k-st', '2m'], + 'scale_sources': { + 'vanilla': 'nvila', + '80k': 'nvila', '80k-st': 'nvila_st', + '400k': 'nvila', '400k-st': 'nvila_st', + '800k': 'nvila', '800k-st': 'nvila_st', + '2m': 'nvila', + }, + 'required_dirs': ['nvila', 'nvila_st'], + }, + # Compare NVILA baselines against synthetic-mix checkpoints + 'nvila_synth_compare': { + 'scale_order': ['vanilla', '80k', '80k-5pct', '80k-10pct', '400k', '400k-5pct', '800k', '800k-5pct'], + 'scale_sources': { + 'vanilla': 'nvila', + '80k': 'nvila', + '80k-5pct': 'nvila_synthetic', + '80k-10pct': 'nvila_synthetic', + '400k': 'nvila', + '400k-5pct': 'nvila_synthetic', + '800k': 'nvila', + '800k-5pct': 'nvila_synthetic' + }, + 'required_dirs': ['nvila', 'nvila_synthetic'], + }, +} + +# Default scale run order for new runnable types +SCALE_ORDERS_NEW = { + 'molmo_big': ['molmo2'], + 'qwen_big': ['qwen3_32b'], + 'qwen_super': ['qwen3_235b'], + 'big_trio': ['molmo2', 'roborefer', 'qwen3_32b'], +} + +ALL_MODEL_TYPES = ( + list(MODEL_CONFIGS.keys()) + + list(MODEL_CONFIGS_NEW.keys()) + + list(MERGE_ONLY_CONFIGS.keys()) +) + + +# ============================================================================ +# Data Loading & Swap Pair Creation +# ============================================================================ + +# ── CV-Bench data paths ─────────────────────────────────────────────────────── +CV_BENCH_2D_TSV = '/data/shared/Qwen/vlm-lens-mod/spatial/CV-Bench/CV-Bench-2D-Relation-binary.tsv' +CV_BENCH_3D_TSV = '/data/shared/Qwen/vlm-lens-mod/spatial/CV-Bench/CV-Bench-3D.tsv' + +# Maps 2D relation strings to category keys +RELATION_TO_CATEGORY = { + 'above': 'above', 'below': 'below', + 'to the left of': 'left', 'to the right of': 'right', + 'left': 'left', 'right': 'right', +} + +# Regex to extract obj1, obj2 from CV-Bench 2D question text +_CV2D_OBJ_PATTERN = re.compile( + r'relative positions? of (.+?) and (.+?) in the image', + re.IGNORECASE, +) +_LEADING_THE = re.compile(r'^the\s+', re.IGNORECASE) + +OBJECT_PATTERNS = [ + re.compile(r'between\s+(.+?)\s+and\s+(.+?)\s+in', re.IGNORECASE), + re.compile(r'of\s+(.+?)\s+and\s+(.+?)\s+in', re.IGNORECASE), + re.compile(r'positions\s+of\s+(.+?)\s+and\s+(.+?)\s+interact', re.IGNORECASE), + re.compile(r'How\s+are\s+(.+?)\s+and\s+(.+?)\s+positioned', re.IGNORECASE), + re.compile(r'arrangement\s+of\s+(.+?)\s+and\s+(.+?)\s+in', re.IGNORECASE), +] + + +def extract_objects(question: str) -> Tuple[str, str]: + for pattern in OBJECT_PATTERNS: + m = pattern.search(question) + if m: + return m.group(1).strip(), m.group(2).strip() + raise ValueError(f"Could not extract objects from: {question}") + + +def decode_base64_image(base64_str: str) -> Image.Image: + image_data = base64.b64decode(base64_str) + return Image.open(BytesIO(image_data)).convert('RGB') + + +# ============================================================================ +# Answer Matching (Fix 2: synonym support) +# ============================================================================ + +def find_earliest_position(text: str, word: str) -> int: + """Find earliest position of word or any of its synonyms in text.""" + positions = [] + pos = text.find(word) + if pos != -1: + positions.append(pos) + for syn in SYNONYMS.get(word, []): + pos = text.find(syn) + if pos != -1: + positions.append(pos) + return min(positions) if positions else -1 + + +def check_answer(generated_text: str, expected_category: str, mcq_map: dict = None) -> bool: + if not generated_text or not generated_text.strip(): + return False + text = generated_text.strip().lower() + expected = expected_category.lower() + opposite = OPPOSITE_MAP[expected] + + if mcq_map: + exp_letter = mcq_map.get(expected) + opp_letter = mcq_map.get(opposite) + # Standalone letter response (e.g. "A", "A.", "A)", "B") + if exp_letter and text in (exp_letter, exp_letter+'.', exp_letter+')', exp_letter+','): + return True + if opp_letter and text in (opp_letter, opp_letter+'.', opp_letter+')', opp_letter+','): + return False + else: + exp_letter = opp_letter = None + + # MCQ inline pattern "(a)"/"(b)" — variant-aware + mcq_exp = f'({exp_letter})' if exp_letter else None + mcq_opp = f'({opp_letter})' if opp_letter else None + + def earliest_with_mcq(word, mcq_pat=None): + positions = [] + pos = text.find(word) + if pos != -1: + positions.append(pos) + for syn in SYNONYMS.get(word, []): + pos = text.find(syn) + if pos != -1: + positions.append(pos) + if mcq_pat: + pos = text.find(mcq_pat) + if pos != -1: + positions.append(pos) + return min(positions) if positions else -1 + + pos_exp = earliest_with_mcq(expected, mcq_exp) + pos_opp = earliest_with_mcq(opposite, mcq_opp) + if pos_exp == -1: + return False + if pos_opp == -1: + return True + return pos_exp < pos_opp + + +# ============================================================================ +# Swap Pair Loading (Fix 1: prompt suffix) +# ============================================================================ + +def load_swap_pairs(tsv_path: str, seed: int = 42, filter_unknown: bool = True, + question_type: str = 'mcq') -> List[dict]: + """Load EmbSpatialBench TSV and create swap pairs for all samples. + + Args: + filter_unknown: If True (default), skip far/close pairs where target_object + is Unknown/empty, and remove Unknown/empty values from reference_object + candidates before sampling. Pairs with no valid candidates are dropped. + Use --no-filtering to disable. + question_type: 'short_answer' (default) uses the original "Answer with only one word." format; + 'mcq' uses MCQ A/B templates with letter answers. + """ + rng = random.Random(seed) + df = pd.read_csv(tsv_path, sep='\t') + + pairs = [] + stats = defaultdict(lambda: {'total': 0, 'success': 0}) + + def _valid_obj(v): + return bool(v) and str(v).strip().lower() not in ('unknown', 'n/a', '') + + for _, row in df.iterrows(): + category = row['category'] + stats[category]['total'] += 1 + + try: + if category in ['left', 'right', 'above', 'under', 'below']: + obj1, obj2 = extract_objects(row['question']) + if category in ['left', 'right']: + grp = 'horizontal' + else: + grp = 'vertical' + + if question_type == 'short_answer': + # Single-word format; normalize 'under' → 'below' + if category == 'under': + category = 'below' + tmpl = SHORT_TEMPLATES[grp] + pair = { + 'index': row['index'], + 'question_id': str(row['question_id']), + 'image_base64': row['image'], + 'original_question': tmpl.format(obj1=obj1, obj2=obj2), + 'swapped_question': tmpl.format(obj1=obj2, obj2=obj1), + 'original_answer': category, + 'swapped_answer': SHORT_OPPOSITE_MAP[category], + 'group': grp, + 'category': category, + 'obj1': obj1, 'obj2': obj2, + 'mcq_map': None, + } + else: + # MCQ format; normalize 'under' → 'below' + if category == 'under': + category = 'below' + variant = ('left_first' if grp == 'horizontal' else 'above_first') \ + if len(pairs) % 2 == 0 else \ + ('right_first' if grp == 'horizontal' else 'below_first') + tmpl = MCQ_TEMPLATES[grp][variant] + mcq_map = MCQ_LETTER[grp][variant] + pair = { + 'index': row['index'], + 'question_id': str(row['question_id']), + 'image_base64': row['image'], + 'original_question': tmpl.format(obj1=obj1, obj2=obj2), + 'swapped_question': tmpl.format(obj1=obj2, obj2=obj1), + 'original_answer': category, + 'swapped_answer': OPPOSITE_MAP[category], + 'group': GROUP_MAP[category], + 'category': category, + 'obj1': obj1, 'obj2': obj2, + 'mcq_map': mcq_map, + } + + elif category in ['far', 'close']: + answer_key = row['answer'] + options = {k: row[k] for k in ['A', 'B', 'C', 'D']} + target_object = options[answer_key] + candidates = [v for k, v in options.items() if k != answer_key] + + if filter_unknown: + if not _valid_obj(target_object): + continue + candidates = [v for v in candidates if _valid_obj(v)] + if not candidates: + continue + + reference_object = rng.choice(candidates) + + if question_type == 'short_answer': + tmpl = SHORT_TEMPLATES['distance'] + pair = { + 'index': row['index'], + 'question_id': str(row['question_id']), + 'image_base64': row['image'], + 'original_question': tmpl.format(ref=reference_object, subj=target_object), + 'swapped_question': tmpl.format(ref=target_object, subj=reference_object), + 'original_answer': category, + 'swapped_answer': OPPOSITE_MAP[category], + 'group': 'distance', + 'category': category, + 'target_object': target_object, + 'reference_object': reference_object, + 'mcq_map': None, + } + else: + variant = 'far_first' if len(pairs) % 2 == 0 else 'close_first' + tmpl = MCQ_TEMPLATES['distance'][variant] + mcq_map = MCQ_LETTER['distance'][variant] + pair = { + 'index': row['index'], + 'question_id': str(row['question_id']), + 'image_base64': row['image'], + 'original_question': tmpl.format(ref=reference_object, subj=target_object), + 'swapped_question': tmpl.format(ref=target_object, subj=reference_object), + 'original_answer': category, + 'swapped_answer': OPPOSITE_MAP[category], + 'group': 'distance', + 'category': category, + 'target_object': target_object, + 'reference_object': reference_object, + 'mcq_map': mcq_map, + } + else: + continue + + pairs.append(pair) + stats[category]['success'] += 1 + + except Exception as e: + logger.warning(f"Failed to create swap pair for index {row['index']}: {e}") + continue + + logger.info("Swap pair creation stats:") + for cat in CATEGORY_ORDER: + s = stats[cat] + logger.info(f" {cat}: {s['success']}/{s['total']}") + logger.info(f" Total pairs: {len(pairs)}") + + return pairs + + +# ============================================================================ +# CV-Bench Data Loading +# ============================================================================ + +def load_cv_bench_pairs( + tsv_2d_path: str, + tsv_3d_path: str, + seed: int = 42, + question_type: str = 'short_answer', +) -> List[dict]: + """Load CV-Bench 2D/3D TSVs and create swap pairs. + + 2D (Relation): horizontal (left/right) and vertical (above/below) categories. + - Uses the same SHORT_TEMPLATES / MCQ_TEMPLATES as the original EmbSpatialBench loader. + 3D (Depth): distance (far/close) category. + - Always uses SHORT_TEMPLATES['distance'] (short-answer format, mcq_map=None). + - obj_A (red box) = subj, obj_B (blue box) = ref. + - answer='A' → obj_A closer → category='close'; answer='B' → category='far'. + """ + rng = random.Random(seed) + pairs = [] + stats = defaultdict(lambda: {'total': 0, 'success': 0}) + + # ── 2D pairs ────────────────────────────────────────────────────────────── + df2d = pd.read_csv(tsv_2d_path, sep='\t') + for idx, row in df2d.iterrows(): + actual_relation = ( + row['original_A'] if str(row['original_answer']).strip() == 'A' + else row['original_B'] + ) + category = RELATION_TO_CATEGORY.get(str(actual_relation).strip().lower()) + if category is None: + continue # skip containment or unknown relations + + grp = GROUP_MAP[category] + stats[category]['total'] += 1 + + try: + m = _CV2D_OBJ_PATTERN.search(str(row['question'])) + if not m: + continue + # Strip leading "the " to avoid "Is the the wall" duplication + obj1 = _LEADING_THE.sub('', m.group(1).strip()) + obj2 = _LEADING_THE.sub('', m.group(2).strip()) + + if question_type == 'short_answer': + tmpl = SHORT_TEMPLATES[grp] + pair = { + 'index': idx, + 'image_base64': row['image'], + 'original_question': tmpl.format(obj1=obj1, obj2=obj2), + 'swapped_question': tmpl.format(obj1=obj2, obj2=obj1), + 'original_answer': category, + 'swapped_answer': SHORT_OPPOSITE_MAP[category], + 'group': grp, + 'category': category, + 'obj1': obj1, 'obj2': obj2, + 'mcq_map': None, + } + else: # mcq + variant = ( + ('left_first' if grp == 'horizontal' else 'above_first') + if len(pairs) % 2 == 0 else + ('right_first' if grp == 'horizontal' else 'below_first') + ) + tmpl = MCQ_TEMPLATES[grp][variant] + mcq_map = MCQ_LETTER[grp][variant] + pair = { + 'index': idx, + 'image_base64': row['image'], + 'original_question': tmpl.format(obj1=obj1, obj2=obj2), + 'swapped_question': tmpl.format(obj1=obj2, obj2=obj1), + 'original_answer': category, + 'swapped_answer': OPPOSITE_MAP[category], + 'group': grp, + 'category': category, + 'obj1': obj1, 'obj2': obj2, + 'mcq_map': mcq_map, + } + + pairs.append(pair) + stats[category]['success'] += 1 + + except Exception as e: + logger.warning(f"[2D] Failed row {idx}: {e}") + continue + + # ── 3D pairs (always short_answer using distance template) ──────────────── + df3d = pd.read_csv(tsv_3d_path, sep='\t') + tmpl_dist = SHORT_TEMPLATES['distance'] + + for idx, row in df3d.iterrows(): + obj_A = str(row['A']).strip() # red box object + obj_B = str(row['B']).strip() # blue box object + actual_answer = str(row['answer']).strip() + + # obj_A is the subject being asked about (subj), obj_B is the reference + # answer='A' → obj_A is closer → subj is 'close' relative to ref + # answer='B' → obj_B is closer → subj (obj_A) is 'far' relative to ref + if actual_answer == 'A': + category = 'close' + elif actual_answer == 'B': + category = 'far' + else: + continue + + stats[category]['total'] += 1 + + try: + pair = { + 'index': idx, + 'image_base64': row['image'], + 'original_question': tmpl_dist.format(ref=obj_B, subj=obj_A), + 'swapped_question': tmpl_dist.format(ref=obj_A, subj=obj_B), + 'original_answer': category, + 'swapped_answer': OPPOSITE_MAP[category], + 'group': 'distance', + 'category': category, + 'obj1': obj_A, # subj in original question + 'obj2': obj_B, # ref in original question + 'mcq_map': None, + 'bbox_str': row.get('bbox', None), + } + pairs.append(pair) + stats[category]['success'] += 1 + + except Exception as e: + logger.warning(f"[3D] Failed row {idx}: {e}") + continue + + logger.info("CV-Bench swap pair creation stats (before balancing):") + for cat in CATEGORY_ORDER: + s = stats[cat] + if s['total'] > 0: + logger.info(f" {cat}: {s['success']}/{s['total']}") + logger.info(f" Total pairs (before balancing): {len(pairs)}") + + # ── Balance categories: downsample to minimum category count ───────────── + grouped = defaultdict(list) + for p in pairs: + grouped[p['category']].append(p) + + cat_counts = {cat: len(grouped[cat]) for cat in CATEGORY_ORDER if grouped[cat]} + if cat_counts: + min_count = min(cat_counts.values()) + logger.info(f" Balancing categories to {min_count} samples each " + f"(min category: {min(cat_counts, key=cat_counts.get)})") + balanced = [] + for cat in CATEGORY_ORDER: + samples = grouped[cat] + if len(samples) > min_count: + samples = rng.sample(samples, min_count) + balanced.extend(samples) + pairs = balanced + + logger.info("CV-Bench swap pair stats (after balancing):") + for cat in CATEGORY_ORDER: + n = sum(1 for p in pairs if p['category'] == cat) + if n > 0: + logger.info(f" {cat}: {n}") + logger.info(f" Total pairs: {len(pairs)}") + + return pairs + + +# ============================================================================ +# CV-Bench Bbox Helpers +# ============================================================================ + +def parse_cv_bench_bbox(bbox_str) -> Optional[List]: + """Parse CV-Bench-3D bbox string: '[[x0,y0,x1,y1],[x0,y0,x1,y1]]' → list.""" + if bbox_str is None: + return None + try: + if isinstance(bbox_str, float): + return None + except Exception: + pass + try: + if pd.isna(bbox_str): + return None + except Exception: + pass + try: + return ast.literal_eval(str(bbox_str)) + except Exception: + return None + + +def get_cvbench_bbox_center_y(bbox_list: List, obj_idx: int) -> float: + """Return vertical center of bbox. obj_idx: 0=A(red), 1=B(blue). + + bbox format: [x0, y0, x1, y1] in absolute pixels. + """ + bbox = bbox_list[obj_idx] + return (bbox[1] + bbox[3]) / 2 + + +# ============================================================================ +# CV-Bench Cross-Group Quads +# ============================================================================ + +def create_cv_bench_cross_group_quads( + depth_pairs: List[dict], + threshold_ratio: float = 0.05, +) -> List[dict]: + """For CV-Bench depth (distance) swap pairs, build cross-group quads by adding + a vertical (above/below) question derived from bbox center-y positions. + + Both distance and vertical questions always use short-answer templates (mcq_map=None). + """ + quads = [] + stats = {'total': 0, 'added': 0, 'ambiguous': 0, 'no_bbox': 0} + + vert_tmpl = SHORT_TEMPLATES['vertical'] + + for pair in depth_pairs: + stats['total'] += 1 + bbox_list = parse_cv_bench_bbox(pair.get('bbox_str')) + if bbox_list is None or len(bbox_list) < 2: + stats['no_bbox'] += 1 + continue + + center_y_A = get_cvbench_bbox_center_y(bbox_list, 0) # obj1 = subj (red box) + center_y_B = get_cvbench_bbox_center_y(bbox_list, 1) # obj2 = ref (blue box) + + # Estimate image height from bboxes; floor at 1024 + est_height = max(bbox_list[0][3], bbox_list[1][3]) * 1.1 + est_height = max(est_height, 1024) + threshold = est_height * threshold_ratio + + y_diff = center_y_A - center_y_B + if abs(y_diff) <= threshold: + stats['ambiguous'] += 1 + continue + + obj_A = pair['obj1'] # subj in original distance question + obj_B = pair['obj2'] # ref in original distance question + + # Vertical label for obj_A relative to obj_B (smaller y = higher in image = 'above') + vert_original_answer = 'above' if center_y_A < center_y_B else 'below' + vert_swapped_answer = SHORT_OPPOSITE_MAP[vert_original_answer] + + quad = { + 'index': pair['index'], + 'image_base64': pair['image_base64'], + 'dist_original_q': pair['original_question'], + 'dist_swapped_q': pair['swapped_question'], + 'dist_original_answer': pair['original_answer'], + 'dist_swapped_answer': pair['swapped_answer'], + 'dist_mcq_map': None, + 'vert_original_q': vert_tmpl.format(obj1=obj_A, obj2=obj_B), + 'vert_swapped_q': vert_tmpl.format(obj1=obj_B, obj2=obj_A), + 'vert_original_answer': vert_original_answer, + 'vert_swapped_answer': vert_swapped_answer, + 'vert_mcq_map': None, + 'target_object': obj_A, + 'reference_object': obj_B, + 'target_bbox_y': center_y_A, + 'ref_bbox_y': center_y_B, + 'y_diff': y_diff, + 'data_source': 'cv_bench_3d', + } + quads.append(quad) + stats['added'] += 1 + + logger.info(f"CV-Bench cross-group quads: {stats['added']}/{stats['total']} " + f"(ambiguous={stats['ambiguous']}, no_bbox={stats['no_bbox']})") + return quads + + +# ============================================================================ +# HF Bbox Cache (Fix 4: string-normalized keys) +# ============================================================================ + +def build_hf_bbox_cache(hf_dataset_name: str = 'FlagEval/EmbSpatial-Bench') -> Dict[str, dict]: + """Load HF dataset and build bbox lookup cache keyed by string-normalized question_id.""" + from datasets import load_dataset + logger.info(f"Loading HF dataset: {hf_dataset_name}") + ds = load_dataset(hf_dataset_name, split='test') + + cache = {} + for item in ds: + # Fix 4: Normalize key to string for consistent matching + qid = str(item['question_id']) + cache[qid] = { + 'objects': item['objects'], + 'relation': item['relation'], + 'data_source': item['data_source'], + 'answer': item['answer'], + 'answer_options': item['answer_options'], + } + + # Fix 4: Log sample keys for debugging + sample_keys = list(cache.keys())[:5] + logger.info(f"Built bbox cache: {len(cache)} entries (sample keys: {sample_keys})") + return cache + + +def get_bbox_center_y(bbox: list) -> float: + return bbox[1] + bbox[3] / 2 + + +def create_cross_group_quads( + swap_pairs: List[dict], + hf_cache: Dict[str, dict], + threshold_ratio: float = 0.05, + question_type: str = 'mcq', +) -> List[dict]: + """For far/close swap pairs, create additional vertical queries using bbox.""" + IMAGE_HEIGHTS = {'ai2thor': 300, 'mp3d': 480, 'scannet': 968} + + quads = [] + stats = {'total': 0, 'matched': 0, 'ambiguous': 0, 'no_bbox': 0} + + distance_pairs = [p for p in swap_pairs if p['group'] == 'distance'] + + # Fix 4: Use question_id (e.g. 'mp3d_0') to match HF dataset, not integer index + n_matched_keys = sum(1 for p in distance_pairs if p['question_id'] in hf_cache) + logger.info(f"Matched {n_matched_keys}/{len(distance_pairs)} question_ids between TSV and HF dataset") + + for pair in distance_pairs: + stats['total'] += 1 + qid = pair['question_id'] + + if qid not in hf_cache: + stats['no_bbox'] += 1 + continue + + hf_item = hf_cache[qid] + names = hf_item['objects']['name'] + bboxes = hf_item['objects']['bbox'] + + target = pair['target_object'] + reference = pair['reference_object'] + + target_bbox_y, ref_bbox_y = None, None + for name, bbox in zip(names, bboxes): + if name == target: + target_bbox_y = get_bbox_center_y(bbox) + if name == reference: + ref_bbox_y = get_bbox_center_y(bbox) + + if target_bbox_y is None or ref_bbox_y is None: + stats['no_bbox'] += 1 + continue + + image_height = IMAGE_HEIGHTS.get(hf_item['data_source'], 480) + threshold = image_height * threshold_ratio + y_diff = target_bbox_y - ref_bbox_y + + if abs(y_diff) < threshold: + stats['ambiguous'] += 1 + continue + + if target_bbox_y < ref_bbox_y: + vert_original_answer = 'above' + else: + vert_original_answer = 'below' + + if question_type == 'short_answer': + vert_tmpl = SHORT_TEMPLATES['vertical'] + vert_mcq_map = None + vert_original_q = vert_tmpl.format(obj1=target, obj2=reference) + vert_swapped_q = vert_tmpl.format(obj1=reference, obj2=target) + vert_swapped_answer = SHORT_OPPOSITE_MAP[vert_original_answer] + else: + vert_variant = 'above_first' if len(quads) % 2 == 0 else 'below_first' + vert_tmpl = MCQ_TEMPLATES['vertical'][vert_variant] + vert_mcq_map = MCQ_LETTER['vertical'][vert_variant] + vert_original_q = vert_tmpl.format(obj1=target, obj2=reference) + vert_swapped_q = vert_tmpl.format(obj1=reference, obj2=target) + vert_swapped_answer = OPPOSITE_MAP[vert_original_answer] + + quad = { + 'index': pair['index'], + 'image_base64': pair['image_base64'], + 'dist_original_q': pair['original_question'], + 'dist_swapped_q': pair['swapped_question'], + 'dist_original_answer': pair['original_answer'], + 'dist_swapped_answer': pair['swapped_answer'], + 'dist_mcq_map': pair['mcq_map'], + 'vert_original_q': vert_original_q, + 'vert_swapped_q': vert_swapped_q, + 'vert_original_answer': vert_original_answer, + 'vert_swapped_answer': vert_swapped_answer, + 'vert_mcq_map': vert_mcq_map, + 'target_object': target, + 'reference_object': reference, + 'target_bbox_y': target_bbox_y, + 'ref_bbox_y': ref_bbox_y, + 'y_diff': y_diff, + 'data_source': hf_item['data_source'], + } + quads.append(quad) + stats['matched'] += 1 + + logger.info(f"Cross-group quads: {stats['matched']}/{stats['total']} " + f"(ambiguous={stats['ambiguous']}, no_bbox={stats['no_bbox']})") + return quads + + +# ============================================================================ +# Base Extractor +# ============================================================================ + +class BaseHiddenStateExtractor(ABC): + def __init__(self, model_path: str, device: str = 'cuda', target_layers: List[int] = None): + self.model_path = model_path + self.device = device + self.hidden_states = {} + self.hooks = [] + self._load_model() + num_layers = self._get_num_layers() + if target_layers is None: + self.target_layers = list(range(num_layers)) + logger.info(f"Model has {num_layers} layers. Extracting ALL.") + else: + self.target_layers = target_layers + self._register_hooks() + + def _register_hooks(self): + for layer_idx in self.target_layers: + module = self._get_layer_module(layer_idx) + if module is not None: + hook = module.register_forward_hook(self._make_hook(layer_idx)) + self.hooks.append(hook) + + def _make_hook(self, layer_idx: int): + def hook_fn(module, input, output): + if isinstance(output, tuple): + hidden = output[0] + else: + hidden = output + if hidden.shape[1] > 1: # prefill only + last_token = hidden[:, -1, :].detach().cpu().float() + self.hidden_states[layer_idx] = last_token.squeeze(0) + return hook_fn + + @abstractmethod + def _load_model(self): pass + @abstractmethod + def _get_num_layers(self) -> int: pass + @abstractmethod + def _get_layer_module(self, layer_idx: int): pass + @abstractmethod + def extract_and_predict(self, image: Image.Image, question: str) -> Tuple[Dict[int, torch.Tensor], str]: pass + + def cleanup(self): + for hook in self.hooks: + hook.remove() + self.hooks = [] + if hasattr(self, 'model'): + del self.model + if hasattr(self, 'processor'): + del self.processor + torch.cuda.empty_cache() + + +# ============================================================================ +# Molmo Extractor +# ============================================================================ + +class MolmoExtractor(BaseHiddenStateExtractor): + def _load_model(self): + config_path = os.path.join(self.model_path, "config.yaml") + checkpoint_path = os.path.join(self.model_path, "model.pt") + if os.path.exists(config_path) and os.path.exists(checkpoint_path): + self._load_native_model() + self.is_native = True + else: + self._load_hf_model() + self.is_native = False + + def _load_native_model(self): + from olmo.config import ModelConfig + from olmo.model import Molmo as NativeMolmoModel + from olmo.data.model_preprocessor import MultiModalPreprocessor + from olmo.data.data_formatter import DataFormatter + + _original_load = torch.load + def _unsafe_load_wrapper(*args, **kwargs): + if 'weights_only' not in kwargs: + kwargs['weights_only'] = False + return _original_load(*args, **kwargs) + torch.load = _unsafe_load_wrapper + + cfg = ModelConfig.load( + os.path.join(self.model_path, "config.yaml"), + key="model", validate_paths=False + ) + cfg.init_device = "cpu" + self.model = NativeMolmoModel(cfg) + state_dict = torch.load(os.path.join(self.model_path, "model.pt"), map_location="cpu") + self.model.load_state_dict(state_dict) + self.model = self.model.to(self.device, dtype=torch.bfloat16).eval() + self.tokenizer = cfg.get_tokenizer() + + v_cfg = cfg.vision_backbone + h, w = cfg.llm_patches_per_crop() + image_padding_mask = 2 if cfg.fix_image_padding else (1 if cfg.image_padding_embed else None) + + class SafeDataFormatter(DataFormatter): + def get_system_prompt(self, style, for_inference, messages, rng=None): + if style is None: + style = "User" + return super().get_system_prompt(style, for_inference, messages, rng) + + self.formatter = SafeDataFormatter( + prompt_templates=cfg.prompt_type, message_format=cfg.message_formatting, + system_prompt=cfg.system_prompt_kind, always_start_with_space=cfg.always_start_with_space, + default_inference_len=cfg.default_inference_len + ) + self.preprocessor = MultiModalPreprocessor( + tokenizer=self.tokenizer, normalize=str(v_cfg.image_model_type), + crop_mode=cfg.crop_mode, max_crops=cfg.max_crops, + overlap_margins=cfg.overlap_margins, resize=v_cfg.resize_mode, + use_col_tokens=cfg.use_col_tokens, base_image_input_size=v_cfg.image_default_input_size, + image_pooling_w=cfg.image_pooling_w, image_pooling_h=cfg.image_pooling_h, + image_token_length_w=w, image_token_length_h=h, + image_patch_size=v_cfg.image_patch_size, image_padding_mask=image_padding_mask, + pad_value=cfg.pad_value, loss_token_weighting=cfg.multi_annotation_weighting, + ) + logger.info(f"Loaded native Molmo from {self.model_path}") + + def _load_hf_model(self): + from transformers import AutoModelForCausalLM, AutoProcessor + self.model = AutoModelForCausalLM.from_pretrained( + self.model_path, torch_dtype=torch.bfloat16, + trust_remote_code=True, device_map=self.device + ).eval() + self.processor = AutoProcessor.from_pretrained(self.model_path, trust_remote_code=True) + logger.info(f"Loaded HF Molmo from {self.model_path}") + + def _get_num_layers(self) -> int: + if self.is_native: + return len(self.model.transformer.blocks) + if hasattr(self.model, 'model') and hasattr(self.model.model, 'transformer'): + return len(self.model.model.transformer.blocks) + return 32 + + def _get_layer_module(self, layer_idx: int): + if self.is_native: + return self.model.transformer.blocks[layer_idx] + return self.model.model.transformer.blocks[layer_idx] + + def extract_and_predict(self, image, question): + self.hidden_states = {} + if self.is_native: + example = {"messages": [question], "image": image} + messages, _ = self.formatter(example, is_training=False, for_inference=True, rng=np.random) + batch = self.preprocessor(np.array(image), messages, is_training=False, require_image_features=True) + if 'input_ids' not in batch and 'input_tokens' in batch: + batch['input_ids'] = batch['input_tokens'] + + def to_t(x): + return torch.from_numpy(x) if isinstance(x, np.ndarray) else x + + input_ids = to_t(batch['input_ids']).unsqueeze(0).to(self.device).long() + images_t = to_t(batch['images']).unsqueeze(0).to(self.device, dtype=torch.bfloat16) + image_masks = to_t(batch['image_masks']).unsqueeze(0).to(self.device, dtype=torch.bfloat16) + image_input_idx = to_t(batch['image_input_idx']).unsqueeze(0).to(self.device) + + with torch.inference_mode(), torch.autocast("cuda", dtype=torch.bfloat16): + gen = self.model.generate( + input_ids=input_ids, images=images_t, + image_masks=image_masks, image_input_idx=image_input_idx, + max_steps=20, beam_size=1, + ) + generated_ids = gen.token_ids[0, 0] + answer = self.tokenizer.decode(generated_ids.tolist()).strip() + for eos in ['<|endoftext|>', '', '<|end|>']: + answer = answer.replace(eos, '').strip() + else: + from transformers import GenerationConfig + inputs = self.processor.process(images=[image], text=question) + processed = {} + for k, v in inputs.items(): + v = v.to(self.device).unsqueeze(0) + if v.dtype == torch.float32: + v = v.to(dtype=torch.bfloat16) + processed[k] = v + with torch.no_grad(), torch.autocast("cuda", dtype=torch.bfloat16): + output = self.model.generate_from_batch( + processed, + GenerationConfig(max_new_tokens=20, stop_strings="<|endoftext|>"), + tokenizer=self.processor.tokenizer, + ) + input_len = processed['input_ids'].shape[1] + answer = self.processor.tokenizer.decode(output[0, input_len:], skip_special_tokens=True).strip() + + return self.hidden_states.copy(), answer + + +# ============================================================================ +# NVILA Extractor +# ============================================================================ + +class NVILAExtractor(BaseHiddenStateExtractor): + def _load_model(self): + original_sys_path = sys.path.copy() + sys.path = [p for p in sys.path if 'RoboRefer' not in p] + modules_to_remove = [k for k in list(sys.modules.keys()) if 'llava' in k.lower()] + removed = {m: sys.modules.pop(m) for m in modules_to_remove} + try: + import llava + from llava.media import Image as LLaVAImage + from llava import conversation as clib + except Exception as err: + sys.path = original_sys_path + for m, mod in removed.items(): + sys.modules[m] = mod + raise RuntimeError(f"Failed to import llava: {err}") + sys.path = original_sys_path + self.LLaVAImage = LLaVAImage + self.clib = clib + self.model = llava.load(self.model_path, model_base=None) + self._find_llm_backbone() + logger.info(f"Loaded NVILA from {self.model_path}") + + def _find_llm_backbone(self): + candidates = [] + if hasattr(self.model, 'llm'): + if hasattr(self.model.llm, 'model') and hasattr(self.model.llm.model, 'layers'): + candidates.append(self.model.llm.model.layers) + if hasattr(self.model.llm, 'layers'): + candidates.append(self.model.llm.layers) + if hasattr(self.model, 'model'): + if hasattr(self.model.model, 'model') and hasattr(self.model.model.model, 'layers'): + candidates.append(self.model.model.model.layers) + if hasattr(self.model.model, 'layers'): + candidates.append(self.model.model.layers) + for name, module in self.model.named_modules(): + if name.endswith('.layers') and hasattr(module, '__len__') and len(module) > 0: + candidates.append(module) + if candidates: + self.llm_backbone = candidates[0] + else: + raise ValueError("Could not locate transformer layers in NVILA model") + + def _get_num_layers(self) -> int: + return len(self.llm_backbone) if hasattr(self, 'llm_backbone') else 24 + + def _get_layer_module(self, layer_idx: int): + return self.llm_backbone[layer_idx] + + def extract_and_predict(self, image, question): + self.hidden_states = {} + import tempfile + with tempfile.NamedTemporaryFile(suffix='.png', delete=False) as f: + temp_path = f.name + image.save(temp_path) + try: + prompt = [self.LLaVAImage(temp_path), question] + from transformers import GenerationConfig + response = self.model.generate_content( + prompt, generation_config=GenerationConfig(max_new_tokens=20, do_sample=False) + ) + finally: + os.unlink(temp_path) + answer = str(response[0] if isinstance(response, list) else response).strip() + return self.hidden_states.copy(), answer + + +class RoboReferExtractor(NVILAExtractor): + ROBOREFER_PATH = '/data/shared/Qwen/RoboRefer' + + def _load_model(self): + original_sys_path = sys.path.copy() + if self.ROBOREFER_PATH not in sys.path: + sys.path.insert(0, self.ROBOREFER_PATH) + modules_to_remove = [k for k in list(sys.modules.keys()) if 'llava' in k.lower()] + removed = {m: sys.modules.pop(m) for m in modules_to_remove} + try: + import llava + from llava.media import Image as LLaVAImage + from llava import conversation as clib + except Exception as err: + sys.path = original_sys_path + for m, mod in removed.items(): + sys.modules[m] = mod + raise RuntimeError(f"Failed to import RoboRefer llava: {err}") + sys.path = original_sys_path + self.LLaVAImage = LLaVAImage + self.clib = clib + self.model = llava.load(self.model_path, model_base=None) + self._find_llm_backbone() + logger.info(f"Loaded RoboRefer from {self.model_path}") + + +class RoboReferDepthExtractor(RoboReferExtractor): + """RoboRefer with depth-image input instead of RGB. + + Usage: pass the depth PIL image to extract_and_predict() instead of the RGB image. + In practice this means loading depth images in load_swap_pairs() / extract_swap_features() + rather than changing anything here. If the depth image is stored as a separate column in + the dataset, add a 'depth_image_base64' key to the pair dict and decode it before calling + run_single_query(). + + TODO: confirm depth image path / format with the actual dataset layout. + """ + # Inherits extract_and_predict() from NVILAExtractor (via RoboReferExtractor) unchanged. + # The caller is responsible for passing the correct (depth) PIL Image. + + +# ============================================================================ +# Qwen2.5-VL Extractor +# ============================================================================ + +class Qwen25VLExtractor(BaseHiddenStateExtractor): + BASE_MODEL = "Qwen/Qwen2.5-VL-3B-Instruct" + + def _load_model(self): + from transformers import Qwen2_5_VLForConditionalGeneration, AutoProcessor + try: + self.model = Qwen2_5_VLForConditionalGeneration.from_pretrained( + self.model_path, torch_dtype=torch.bfloat16, device_map=self.device + ) + except ImportError: + self.model = Qwen2_5_VLForConditionalGeneration.from_pretrained( + self.model_path, torch_dtype=torch.bfloat16 + ).to(self.device) + self.model.eval() + if self.model_path.startswith('/'): + self.processor = AutoProcessor.from_pretrained(self.BASE_MODEL) + else: + self.processor = AutoProcessor.from_pretrained(self.model_path) + logger.info(f"Loaded Qwen2.5-VL from {self.model_path}") + + def _get_num_layers(self) -> int: + return len(self.model.model.layers) + + def _get_layer_module(self, layer_idx: int): + return self.model.model.layers[layer_idx] + + def extract_and_predict(self, image, question): + self.hidden_states = {} + messages = [{"role": "user", "content": [ + {"type": "image", "image": image}, + {"type": "text", "text": question} + ]}] + text = self.processor.apply_chat_template(messages, tokenize=False, add_generation_prompt=True) + from qwen_vl_utils import process_vision_info + image_inputs, video_inputs = process_vision_info(messages) + inputs = self.processor( + text=[text], images=image_inputs, videos=video_inputs, + padding=True, return_tensors="pt" + ).to(self.device) + with torch.no_grad(): + output_ids = self.model.generate(**inputs, max_new_tokens=20, do_sample=False) + input_len = inputs['input_ids'].shape[1] + answer = self.processor.tokenizer.decode(output_ids[0, input_len:], skip_special_tokens=True).strip() + return self.hidden_states.copy(), answer + + +# ============================================================================ +# New Extractors: Molmo2-8B and Qwen3-VL family +# ============================================================================ + +class Molmo2Extractor(BaseHiddenStateExtractor): + """Extractor for allenai/Molmo2-8B (AutoModelForImageTextToText, messages-dict input).""" + + def _load_model(self): + from transformers import AutoProcessor, AutoModelForImageTextToText + self.processor = AutoProcessor.from_pretrained(self.model_path, trust_remote_code=True) + self.model = AutoModelForImageTextToText.from_pretrained( + self.model_path, trust_remote_code=True, torch_dtype='auto', device_map='auto', + ).eval() + self._find_llm_layers() + logger.info(f"Loaded Molmo2 from {self.model_path}") + + def _find_llm_layers(self): + candidates = [ + ['model', 'layers'], + ['language_model', 'model', 'layers'], + ['model', 'model', 'layers'], + ] + for path in candidates: + obj = self.model + for attr in path: + obj = getattr(obj, attr, None) + if obj is None: + break + if obj is not None and hasattr(obj, '__len__') and len(obj) > 0: + self.llm_layers = obj + logger.info(f"Molmo2: layers at '{'.'.join(path)}', count={len(obj)}") + return + best, best_len = None, 0 + for name, module in self.model.named_modules(): + if name.endswith('.layers') and hasattr(module, '__len__') and len(module) > best_len: + best, best_len = module, len(module) + logger.info(f"Molmo2: layers via scan at '{name}', count={best_len}") + if best is not None: + self.llm_layers = best + return + raise ValueError("Could not find transformer layers in Molmo2 model") + + def _get_num_layers(self) -> int: + return len(self.llm_layers) + + def _get_layer_module(self, layer_idx: int): + return self.llm_layers[layer_idx] + + def extract_and_predict(self, image, question): + self.hidden_states = {} + messages = [{"role": "user", "content": [ + {"type": "image", "image": image}, + {"type": "text", "text": question}, + ]}] + inputs = self.processor.apply_chat_template( + messages, tokenize=True, add_generation_prompt=True, + return_tensors="pt", return_dict=True, + ) + inputs = {k: v.to(self.model.device) for k, v in inputs.items()} + with torch.inference_mode(): + generated_ids = self.model.generate(**inputs, max_new_tokens=20, do_sample=False) + input_len = inputs['input_ids'].shape[1] + answer = self.processor.tokenizer.decode( + generated_ids[0, input_len:], skip_special_tokens=True).strip() + return self.hidden_states.copy(), answer + + +class Qwen3VLExtractor(BaseHiddenStateExtractor): + """Extractor for Qwen3-VL family (32B dense, 235B MoE). + + Key differences from Qwen25VLExtractor: + - AutoModelForImageTextToText + trust_remote_code=True + - process_vision_info requires image_patch_size=16 + - processor call requires do_resize=False + - 32×32 px patches → different min/max_pixels + """ + + MIN_PIXELS = 256 * 32 * 32 # 262,144 (mp3d/scannet → natural res; ai2thor → ~256 tokens) + MAX_PIXELS = 16384 * 32 * 32 # 16,777,216 + + def _load_model(self): + from transformers import AutoProcessor, AutoModelForImageTextToText + self.processor = AutoProcessor.from_pretrained(self.model_path, trust_remote_code=True) + self.model = AutoModelForImageTextToText.from_pretrained( + self.model_path, trust_remote_code=True, torch_dtype='auto', + device_map='auto', attn_implementation='flash_attention_2', + ).eval() + self._find_llm_layers() + logger.info(f"Loaded Qwen3-VL from {self.model_path}") + + def _find_llm_layers(self): + candidates = [ + ['model', 'language_model', 'model', 'layers'], # Qwen3-VL expected + ['language_model', 'model', 'layers'], + ['model', 'model', 'layers'], + ['model', 'layers'], + ] + for path in candidates: + obj = self.model + for attr in path: + obj = getattr(obj, attr, None) + if obj is None: + break + if obj is not None and hasattr(obj, '__len__') and len(obj) > 0: + self.llm_layers = obj + logger.info(f"Qwen3-VL: layers at '{'.'.join(path)}', count={len(obj)}") + return + best, best_len = None, 0 + for name, module in self.model.named_modules(): + if name.endswith('.layers') and hasattr(module, '__len__') and len(module) > best_len: + best, best_len = module, len(module) + logger.info(f"Qwen3-VL: layers via scan at '{name}', count={best_len}") + if best is not None: + self.llm_layers = best + return + raise ValueError("Could not find transformer layers in Qwen3-VL model") + + def _get_num_layers(self) -> int: + return len(self.llm_layers) + + def _get_layer_module(self, layer_idx: int): + return self.llm_layers[layer_idx] + + def extract_and_predict(self, image, question): + self.hidden_states = {} + messages = [{"role": "user", "content": [ + {"type": "image", "image": image, + "min_pixels": self.MIN_PIXELS, "max_pixels": self.MAX_PIXELS}, + {"type": "text", "text": question}, + ]}] + text = self.processor.apply_chat_template(messages, tokenize=False, add_generation_prompt=True) + from qwen_vl_utils import process_vision_info + images, videos, _ = process_vision_info( + messages, image_patch_size=16, return_video_kwargs=True, return_video_metadata=True, + ) + inputs = self.processor( + text=text, images=images, videos=videos, do_resize=False, return_tensors="pt", + ).to(self.model.device) + with torch.no_grad(): + output_ids = self.model.generate(**inputs, max_new_tokens=20, do_sample=False) + input_len = inputs['input_ids'].shape[1] + answer = self.processor.tokenizer.decode( + output_ids[0, input_len:], skip_special_tokens=True).strip() + return self.hidden_states.copy(), answer + + +EXTRACTOR_CLASSES = { + 'MolmoExtractor': MolmoExtractor, + 'NVILAExtractor': NVILAExtractor, + 'RoboReferExtractor': RoboReferExtractor, + 'Qwen25VLExtractor': Qwen25VLExtractor, + 'Molmo2Extractor': Molmo2Extractor, + 'Qwen3VLExtractor': Qwen3VLExtractor, +} + + +def get_extractor(model_type: str, model_path: str = None, scale: str = None, **kwargs): + """Create an extractor for any model_type (legacy or new-large).""" + # New large models: (ExtractorClass, path) tuples in MODEL_CONFIGS_NEW + if model_type in MODEL_CONFIGS_NEW: + cls_name, raw_path = MODEL_CONFIGS_NEW[model_type][scale] + resolved = resolve_local_path(raw_path) + logger.info(f"Creating {cls_name} for scale='{scale}' from {resolved}") + return EXTRACTOR_CLASSES[cls_name](resolved, **kwargs) + # Legacy models + if model_type == 'nvila' and scale == 'roborefer': + return RoboReferExtractor(model_path, **kwargs) + if model_type == 'nvila' and scale == 'roborefer_depth': + return RoboReferDepthExtractor(model_path, **kwargs) + legacy = { + 'molmo': MolmoExtractor, 'nvila': NVILAExtractor, 'qwen': Qwen25VLExtractor, + 'nvila_synthetic': NVILAExtractor, 'nvila_st': NVILAExtractor, + } + return legacy[model_type](model_path, **kwargs) + + +# ============================================================================ +# Feature Extraction Pipeline +# ============================================================================ + +def run_single_query(extractor, image, question): + hidden_states, predicted = extractor.extract_and_predict(image, question) + result = {} + for layer_idx in extractor.target_layers: + if layer_idx in hidden_states: + state = hidden_states[layer_idx].numpy().flatten() + if state.size > 0: + result[layer_idx] = state + return result, predicted + + +def extract_swap_features( + extractor: BaseHiddenStateExtractor, + swap_pairs: List[dict], + max_samples_per_category: int = 0, +) -> List[dict]: + """Extract features for all swap pairs.""" + rng = random.Random(42) + + if max_samples_per_category > 0: + grouped = defaultdict(list) + for p in swap_pairs: + grouped[p['category']].append(p) + limited = [] + for cat in CATEGORY_ORDER: + samples = grouped[cat] + if len(samples) > max_samples_per_category: + samples = rng.sample(samples, max_samples_per_category) + limited.extend(samples) + swap_pairs = limited + + records = [] + for pair in tqdm(swap_pairs, desc="Swap pairs"): + try: + image = decode_base64_image(pair['image_base64']) + hs_orig, pred_orig = run_single_query(extractor, image, pair['original_question']) + hs_swap, pred_swap = run_single_query(extractor, image, pair['swapped_question']) + + is_correct_orig = check_answer(pred_orig, pair['original_answer'], pair['mcq_map']) + is_correct_swap = check_answer(pred_swap, pair['swapped_answer'], pair['mcq_map']) + + delta = {} + for layer_idx in extractor.target_layers: + if layer_idx in hs_orig and layer_idx in hs_swap: + delta[layer_idx] = hs_swap[layer_idx] - hs_orig[layer_idx] + + record = { + 'index': pair['index'], + 'group': pair['group'], + 'category': pair['category'], + 'original_answer': pair['original_answer'], + 'swapped_answer': pair['swapped_answer'], + 'pred_orig': pred_orig, + 'pred_swap': pred_swap, + 'is_correct_orig': is_correct_orig, + 'is_correct_swap': is_correct_swap, + 'hs_orig': hs_orig, + 'hs_swap': hs_swap, + 'delta': delta, + } + records.append(record) + + mark_o = "O" if is_correct_orig else "X" + mark_s = "O" if is_correct_swap else "X" + logger.info(f" #{pair['index']:<6} {pair['category']:<6} " + f"orig[{mark_o}]=\"{pred_orig[:40]}\" swap[{mark_s}]=\"{pred_swap[:40]}\"" + + (f" [{len(records)}/{len(swap_pairs)}]" if len(records) % 50 == 0 else "")) + + except Exception as e: + logger.warning(f"Error on index {pair['index']}: {e}") + continue + + logger.info(f"Extracted {len(records)} swap pair records") + + # Fix 8: Per-category accuracy logging + for cat in CATEGORY_ORDER: + cat_recs = [r for r in records if r['category'] == cat] + n = len(cat_recs) + if n == 0: + continue + c_orig = sum(1 for r in cat_recs if r['is_correct_orig']) + c_swap = sum(1 for r in cat_recs if r['is_correct_swap']) + c_both = sum(1 for r in cat_recs if r['is_correct_orig'] and r['is_correct_swap']) + logger.info(f" {cat:>6s} (n={n}): acc_orig={c_orig/n:.1%}, acc_swap={c_swap/n:.1%}, " + f"acc_both={c_both/n:.1%}") + + return records + + +def extract_cross_group_features( + extractor: BaseHiddenStateExtractor, + quads: List[dict], +) -> List[dict]: + """Extract features for cross-group quads (4 forward passes each).""" + records = [] + for quad in tqdm(quads, desc="Cross-group quads"): + try: + image = decode_base64_image(quad['image_base64']) + hs_d_orig, pred_d_orig = run_single_query(extractor, image, quad['dist_original_q']) + hs_d_swap, pred_d_swap = run_single_query(extractor, image, quad['dist_swapped_q']) + hs_v_orig, pred_v_orig = run_single_query(extractor, image, quad['vert_original_q']) + hs_v_swap, pred_v_swap = run_single_query(extractor, image, quad['vert_swapped_q']) + + delta_dist, delta_vert = {}, {} + for layer_idx in extractor.target_layers: + if layer_idx in hs_d_orig and layer_idx in hs_d_swap: + delta_dist[layer_idx] = hs_d_swap[layer_idx] - hs_d_orig[layer_idx] + if layer_idx in hs_v_orig and layer_idx in hs_v_swap: + delta_vert[layer_idx] = hs_v_swap[layer_idx] - hs_v_orig[layer_idx] + + record = { + 'index': quad['index'], + 'delta_dist': delta_dist, + 'delta_vert': delta_vert, + 'pred_d_orig': pred_d_orig, 'pred_d_swap': pred_d_swap, + 'pred_v_orig': pred_v_orig, 'pred_v_swap': pred_v_swap, + 'is_correct_d_orig': check_answer(pred_d_orig, quad['dist_original_answer'], quad['dist_mcq_map']), + 'is_correct_d_swap': check_answer(pred_d_swap, quad['dist_swapped_answer'], quad['dist_mcq_map']), + 'is_correct_v_orig': check_answer(pred_v_orig, quad['vert_original_answer'], quad['vert_mcq_map']), + 'is_correct_v_swap': check_answer(pred_v_swap, quad['vert_swapped_answer'], quad['vert_mcq_map']), + 'data_source': quad['data_source'], + } + records.append(record) + + tqdm.write(f" #{quad['index']:<6} dist=[{pred_d_orig[:20]}/{pred_d_swap[:20]}] " + f"vert=[{pred_v_orig[:20]}/{pred_v_swap[:20]}]") + + except Exception as e: + logger.warning(f"Error on cross-group index {quad['index']}: {e}") + continue + + logger.info(f"Extracted {len(records)} cross-group quad records") + return records + + +# ============================================================================ +# Analysis Functions +# ============================================================================ + +# Fix 5: Within-category + sign-corrected delta consistency + +def compute_delta_consistency(records: List[dict], target_layers: List[int]): + """Compute TWO types of delta consistency. + + Returns: + within_cat_results: {(category, layer) -> {mean, std, n}} + sign_corrected_results: {(group, layer) -> {mean, std, n}} + """ + within_cat_results = {} + sign_corrected_results = {} + + for group in GROUP_ORDER: + canonical = CANONICAL_CATEGORIES[group] + opposite = OPPOSITE_MAP[canonical] + group_recs = [r for r in records if r['group'] == group] + + for layer in target_layers: + # (a) Within-category consistency + for cat in [canonical, opposite]: + cat_deltas = [r['delta'][layer] for r in group_recs + if r['category'] == cat and layer in r['delta']] + if len(cat_deltas) >= 2: + arr = np.array(cat_deltas) + sim = cosine_similarity(arr) + upper = sim[np.triu_indices(len(cat_deltas), k=1)] + within_cat_results[(cat, layer)] = { + 'mean': float(np.mean(upper)), + 'std': float(np.std(upper)), + 'n': len(cat_deltas), + } + + # (b) Sign-corrected group consistency + all_deltas = [] + for r in group_recs: + if layer not in r['delta']: + continue + d = r['delta'][layer] + if r['category'] == opposite: + d = -d # flip to align with canonical direction + all_deltas.append(d) + + if len(all_deltas) >= 2: + arr = np.array(all_deltas) + sim = cosine_similarity(arr) + upper = sim[np.triu_indices(len(all_deltas), k=1)] + sign_corrected_results[(group, layer)] = { + 'mean': float(np.mean(upper)), + 'std': float(np.std(upper)), + 'n': len(all_deltas), + } + + return within_cat_results, sign_corrected_results + + +# Fix 7: Delta-based similarity matrix + +def compute_delta_similarity_matrix(records: List[dict], layer: int) -> Optional[pd.DataFrame]: + """Compute 6x6 cosine similarity using mean delta per category.""" + cat_deltas = {} + for cat in CATEGORY_ORDER: + deltas = [r['delta'][layer] for r in records if r['category'] == cat and layer in r['delta']] + if deltas: + cat_deltas[cat] = np.mean(deltas, axis=0) + + available = [c for c in CATEGORY_ORDER if c in cat_deltas] + if len(available) < 2: + return None + + vectors = np.array([cat_deltas[c] for c in available]) + sim = cosine_similarity(vectors) + return pd.DataFrame(sim, index=available, columns=available) + + +# ============================================================ +# CHANGE 1: Add this function right after compute_delta_similarity_matrix() +# (around line ~820, after the "Fix 7: Delta-based similarity matrix" block) +# ============================================================ + +def compute_delta_norm_per_category(records: List[dict], layer: int) -> Optional[pd.DataFrame]: + """Compute mean delta vector norm per category at a given layer. + + Returns a single-column DataFrame with index = category label, column = 'norm'. + Format matches what summarize_metrics_updated.py expects: + delta_norm_{scale}_L{layer}_all_pairs.csv + ,norm + left,12.34 + right,11.89 + above,9.45 + below,9.12 + far,7.23 + close,7.58 + + Returns None if no records have delta vectors at this layer. + """ + rows = {} + for cat in CATEGORY_ORDER: + deltas = [r['delta'][layer] for r in records + if r['category'] == cat and layer in r['delta']] + if deltas: + norms = [float(np.linalg.norm(d)) for d in deltas] + rows[cat] = float(np.mean(norms)) + + if not rows: + return None + + df = pd.DataFrame.from_dict(rows, orient='index', columns=['norm']) + # Reorder to canonical category order (skip any that are missing) + ordered = [c for c in CATEGORY_ORDER if c in df.index] + return df.loc[ordered] + + +# Fix 8: Both-correct filtering + +def filter_both_correct(records: List[dict]) -> List[dict]: + """Filter to pairs where both orig and swap predictions are correct.""" + return [r for r in records if r['is_correct_orig'] and r['is_correct_swap']] + + +# Fix 8: Category validity check + +def check_category_validity(records: List[dict], scale: str) -> Dict[str, dict]: + """Check per-category accuracy and flag unreliable categories.""" + validity = {} + for cat in CATEGORY_ORDER: + cat_recs = [r for r in records if r['category'] == cat] + n = len(cat_recs) + if n == 0: + validity[cat] = {'n': 0, 'acc_orig': 0, 'acc_swap': 0, 'reliable': False} + continue + acc_orig = sum(1 for r in cat_recs if r['is_correct_orig']) / n + acc_swap = sum(1 for r in cat_recs if r['is_correct_swap']) / n + reliable = acc_orig >= 0.5 and acc_swap >= 0.5 + validity[cat] = { + 'n': n, 'acc_orig': acc_orig, 'acc_swap': acc_swap, + 'reliable': reliable, + } + if not reliable: + logger.warning(f" [!] Category '{cat}' unreliable at scale={scale}: " + f"acc_orig={acc_orig:.1%}, acc_swap={acc_swap:.1%}") + return validity + + +def compute_cross_group_alignment(quad_records: List[dict], target_layers: List[int]) -> dict: + results = {} + for layer in target_layers: + per_sample = [] + delta_verts, delta_dists = [], [] + + for rec in quad_records: + if layer in rec['delta_vert'] and layer in rec['delta_dist']: + dv = rec['delta_vert'][layer] + dd = rec['delta_dist'][layer] + norm_v, norm_d = np.linalg.norm(dv), np.linalg.norm(dd) + if norm_v > 1e-10 and norm_d > 1e-10: + per_sample.append(float(np.dot(dv, dd) / (norm_v * norm_d))) + delta_verts.append(dv) + delta_dists.append(dd) + + if not per_sample: + continue + + mean_dv = np.mean(delta_verts, axis=0) + mean_dd = np.mean(delta_dists, axis=0) + norm_mv, norm_md = np.linalg.norm(mean_dv), np.linalg.norm(mean_dd) + mean_alignment = float(np.dot(mean_dv, mean_dd) / (norm_mv * norm_md + 1e-10)) + + rng = np.random.RandomState(42) + perm_alignments = [] + for _ in range(100): + shuffled_dd = [delta_dists[i] for i in rng.permutation(len(delta_dists))] + perm_cos = [] + for dv, dd in zip(delta_verts, shuffled_dd): + nv, nd = np.linalg.norm(dv), np.linalg.norm(dd) + if nv > 1e-10 and nd > 1e-10: + perm_cos.append(np.dot(dv, dd) / (nv * nd)) + perm_alignments.append(np.mean(perm_cos)) + + results[layer] = { + 'per_sample_mean': float(np.mean(per_sample)), + 'per_sample_std': float(np.std(per_sample)), + 'mean_delta_alignment': mean_alignment, + 'permutation_mean': float(np.mean(perm_alignments)), + 'permutation_std': float(np.std(perm_alignments)), + 'n_samples': len(per_sample), + } + return results + + +def compute_prediction_stats(records: List[dict], scale: str) -> dict: + stats = {'scale': scale} + total_correct_orig, total_correct_swap, total_both, total_n = 0, 0, 0, 0 + + for group in GROUP_ORDER: + group_recs = [r for r in records if r['group'] == group] + n = len(group_recs) + c_orig = sum(1 for r in group_recs if r['is_correct_orig']) + c_swap = sum(1 for r in group_recs if r['is_correct_swap']) + c_both = sum(1 for r in group_recs if r['is_correct_orig'] and r['is_correct_swap']) + stats[f'{group}_n'] = n + stats[f'{group}_acc_orig'] = c_orig / n if n > 0 else 0 + stats[f'{group}_acc_swap'] = c_swap / n if n > 0 else 0 + stats[f'{group}_acc_both'] = c_both / n if n > 0 else 0 + total_correct_orig += c_orig + total_correct_swap += c_swap + total_both += c_both + total_n += n + + stats['overall_acc_orig'] = total_correct_orig / total_n if total_n > 0 else 0 + stats['overall_acc_swap'] = total_correct_swap / total_n if total_n > 0 else 0 + stats['overall_acc_both'] = total_both / total_n if total_n > 0 else 0 + stats['overall_n'] = total_n + return stats + + +# ============================================================================ +# Saving & Loading +# ============================================================================ + +def get_representative_layers(all_layers, n=5): + if len(all_layers) <= n: + return list(all_layers) + indices = np.linspace(0, len(all_layers) - 1, n, dtype=int) + return [all_layers[i] for i in indices] + + +def save_scale_results( + scale, swap_records, quad_records, + within_cat_consistency, sign_corrected_consistency, + cross_alignment, pred_stats, target_layers, + category_validity, delta_heatmaps, + output_dir, both_correct_tag="all_pairs", + save_alignment=True, + delta_norms=None, # <-- NEW: {layer: pd.DataFrame | None} +): + """Save all per-scale results to disk. + + Args: + save_alignment: If False, skip writing cross_alignment_{scale}.json. + Set False during Phase A save; call save_cross_alignment() + separately after Phase B completes. + """ + csv_dir = os.path.join(output_dir, 'csv') + json_dir = os.path.join(output_dir, 'json') + os.makedirs(csv_dir, exist_ok=True) + os.makedirs(json_dir, exist_ok=True) + + # 1. Predictions CSV (tagged so all_pairs and both_correct don't overwrite each other) + pred_rows = [] + for r in swap_records: + pred_rows.append({ + 'index': r['index'], 'group': r['group'], 'category': r['category'], + 'pred_orig': r['pred_orig'], 'pred_swap': r['pred_swap'], + 'is_correct_orig': r['is_correct_orig'], 'is_correct_swap': r['is_correct_swap'], + }) + pd.DataFrame(pred_rows).to_csv( + os.path.join(csv_dir, f'predictions_{scale}_{both_correct_tag}.csv'), index=False) + + # 2. Within-category consistency JSON + wc_data = {} + for (cat, layer), vals in within_cat_consistency.items(): + wc_data[f'{cat}_L{layer}'] = vals + with open(os.path.join(json_dir, f'within_cat_consistency_{scale}_{both_correct_tag}.json'), 'w') as f: + json.dump(wc_data, f, indent=2) + + # 3. Sign-corrected consistency JSON + sc_data = {} + for (group, layer), vals in sign_corrected_consistency.items(): + sc_data[f'{group}_L{layer}'] = vals + with open(os.path.join(json_dir, f'sign_corrected_consistency_{scale}_{both_correct_tag}.json'), 'w') as f: + json.dump(sc_data, f, indent=2) + + # 4. Cross-group alignment JSON (only when save_alignment=True, i.e. after Phase B) + if save_alignment: + alignment_data = {} + for layer, vals in cross_alignment.items(): + alignment_data[f'L{layer}'] = vals + with open(os.path.join(json_dir, f'cross_alignment_{scale}.json'), 'w') as f: + json.dump(alignment_data, f, indent=2) + + # 5. Prediction stats JSON + with open(os.path.join(json_dir, f'pred_stats_{scale}.json'), 'w') as f: + json.dump(pred_stats, f, indent=2) + + # 6. Category validity JSON (Fix 8) + with open(os.path.join(json_dir, f'category_validity_{scale}.json'), 'w') as f: + json.dump(category_validity, f, indent=2) + + # 7. Delta heatmap CSVs (Fix 7) + for layer, df in delta_heatmaps.items(): + if df is not None: + df.to_csv(os.path.join(csv_dir, f'delta_similarity_{scale}_L{layer}_{both_correct_tag}.csv')) + + # 8. Delta norm CSVs (mean L2 norm of delta vectors per category per layer) + if delta_norms: + for layer, df in delta_norms.items(): + if df is not None: + df.to_csv(os.path.join(csv_dir, f'delta_norm_{scale}_L{layer}_{both_correct_tag}.csv')) + + + logger.info(f"Saved results for scale={scale} ({both_correct_tag}) to {output_dir}") + + +def save_vectors_npz(scale, swap_records, target_layers, output_dir): + """Save swap-pair vectors with correctness metadata to NPZ (Phase A result). + + This enables post-hoc filtering (both_correct, all_with_validity) from saved data. + Cross-group vectors are saved separately by save_cross_group_npz() after Phase B. + """ + rep_layers = list(target_layers) # save ALL layers (not just 5 representative) + delta_data = {} + for layer in rep_layers: + groups_list, categories_list, vectors = [], [], [] + orig_vecs, swap_vecs, labels = [], [], [] + correct_orig_list, correct_swap_list, indices_list = [], [], [] + for r in swap_records: + if layer in r['delta']: + groups_list.append(r['group']) + categories_list.append(r['category']) + vectors.append(r['delta'][layer]) + correct_orig_list.append(r['is_correct_orig']) + correct_swap_list.append(r['is_correct_swap']) + indices_list.append(r['index']) + if layer in r['hs_orig'] and layer in r['hs_swap']: + orig_vecs.append(r['hs_orig'][layer]) + swap_vecs.append(r['hs_swap'][layer]) + labels.append(r['category']) + if vectors: + delta_data[f'delta_L{layer}'] = np.array(vectors) + delta_data[f'groups_L{layer}'] = np.array(groups_list) + delta_data[f'categories_L{layer}'] = np.array(categories_list) + delta_data[f'is_correct_orig_L{layer}'] = np.array(correct_orig_list) + delta_data[f'is_correct_swap_L{layer}'] = np.array(correct_swap_list) + delta_data[f'indices_L{layer}'] = np.array(indices_list) + if orig_vecs: + delta_data[f'orig_L{layer}'] = np.array(orig_vecs) + delta_data[f'swap_L{layer}'] = np.array(swap_vecs) + delta_data[f'labels_L{layer}'] = np.array(labels) + + npz_dir = os.path.join(output_dir, 'npz') + os.makedirs(npz_dir, exist_ok=True) + np.savez_compressed(os.path.join(npz_dir, f'vectors_{scale}.npz'), **delta_data) + logger.info(f"Saved vectors NPZ with correctness metadata for scale={scale}") + + +def save_cross_group_npz(scale, quad_records, target_layers, output_dir): + """Save cross-group delta vectors to NPZ (Phase B result).""" + if not quad_records: + return + rep_layers = list(target_layers) + cg_data = {} + for layer in rep_layers: + dverts, ddists = [], [] + for rec in quad_records: + if layer in rec['delta_vert'] and layer in rec['delta_dist']: + dverts.append(rec['delta_vert'][layer]) + ddists.append(rec['delta_dist'][layer]) + if dverts: + cg_data[f'delta_vert_L{layer}'] = np.array(dverts) + cg_data[f'delta_dist_L{layer}'] = np.array(ddists) + npz_dir = os.path.join(output_dir, 'npz') + os.makedirs(npz_dir, exist_ok=True) + np.savez_compressed(os.path.join(npz_dir, f'cross_group_vectors_{scale}.npz'), **cg_data) + logger.info(f"Saved cross-group vectors NPZ for scale={scale}") + + +def save_cross_alignment(scale, cross_alignment, output_dir): + """Save cross-group alignment data to JSON (Phase B result).""" + json_dir = os.path.join(output_dir, 'json') + os.makedirs(json_dir, exist_ok=True) + alignment_data = {f'L{layer}': vals for layer, vals in cross_alignment.items()} + with open(os.path.join(json_dir, f'cross_alignment_{scale}.json'), 'w') as f: + json.dump(alignment_data, f, indent=2) + logger.info(f"Saved cross-alignment JSON for scale={scale}") + + +def load_scale_consistency(output_dir, scale, tag='all_pairs'): + """Load sign-corrected consistency.""" + path = os.path.join(output_dir, 'json', f'sign_corrected_consistency_{scale}_{tag}.json') + if not os.path.exists(path): + return {} + with open(path) as f: + raw = json.load(f) + result = {} + for key, vals in raw.items(): + parts = key.rsplit('_L', 1) + if len(parts) == 2: + result[(parts[0], int(parts[1]))] = vals + return result + + +def load_within_cat_consistency(output_dir, scale, tag='all_pairs'): + path = os.path.join(output_dir, 'json', f'within_cat_consistency_{scale}_{tag}.json') + if not os.path.exists(path): + return {} + with open(path) as f: + raw = json.load(f) + result = {} + for key, vals in raw.items(): + parts = key.rsplit('_L', 1) + if len(parts) == 2: + result[(parts[0], int(parts[1]))] = vals + return result + + +def load_scale_alignment(output_dir, scale): + path = os.path.join(output_dir, 'json', f'cross_alignment_{scale}.json') + if not os.path.exists(path): + return {} + with open(path) as f: + raw = json.load(f) + result = {} + for key, vals in raw.items(): + result[int(key.replace('L', ''))] = vals + return result + + +def load_delta_heatmaps(output_dir, scale, tag='all_pairs'): + import glob as glob_mod + pattern = os.path.join(output_dir, 'csv', f'delta_similarity_{scale}_L*_{tag}.csv') + files = glob_mod.glob(pattern) + result = {} + for fpath in files: + basename = os.path.basename(fpath) + # delta_similarity_{scale}_L{layer}_{tag}.csv + part = basename.replace(f'delta_similarity_{scale}_L', '').replace(f'_{tag}.csv', '') + try: + layer = int(part) + except ValueError: + continue + result[layer] = pd.read_csv(fpath, index_col=0) + return result + + +# ============================================================================ +# Visualization +# ============================================================================ + +def plot_within_cat_consistency_trajectory(within_cat, scale, model_type, save_path): + """Plot within-category delta consistency across layers.""" + fig, ax = plt.subplots(figsize=(12, 6)) + cat_colors = CAT_COLORS + for cat in CATEGORY_ORDER: + layers, vals = [], [] + for (c, l), v in sorted(within_cat.items(), key=lambda x: x[0][1]): + if c == cat: + layers.append(l) + vals.append(v['mean']) + if layers: + ax.plot(layers, vals, '-o', color=cat_colors[cat], label=cat, linewidth=2, markersize=3) + ax.set_xlabel('Layer Index') + ax.set_ylabel('Within-Category Consistency') + ax.set_title(f'{model_type.upper()} ({scale}) - Within-Category Delta Consistency', fontweight='bold') + ax.legend(fontsize=9) + ax.grid(True, alpha=0.3) + plt.tight_layout() + plt.savefig(save_path, dpi=300, bbox_inches='tight') + plt.close() + logger.info(f"Saved: {save_path}") + + +def plot_sign_corrected_consistency_trajectory(sign_corrected, scale, model_type, save_path): + """Plot sign-corrected group consistency across layers.""" + fig, ax = plt.subplots(figsize=(12, 6)) + colors = GROUP_COLORS + for group in GROUP_ORDER: + layers, vals = [], [] + for (g, l), v in sorted(sign_corrected.items(), key=lambda x: x[0][1]): + if g == group: + layers.append(l) + vals.append(v['mean']) + if layers: + ax.plot(layers, vals, '-o', color=colors[group], label=group, linewidth=2, markersize=3) + ax.set_xlabel('Layer Index') + ax.set_ylabel('Sign-Corrected Consistency') + ax.set_title(f'{model_type.upper()} ({scale}) - Sign-Corrected Group Consistency', fontweight='bold') + ax.legend(fontsize=11) + ax.grid(True, alpha=0.3) + plt.tight_layout() + plt.savefig(save_path, dpi=300, bbox_inches='tight') + plt.close() + logger.info(f"Saved: {save_path}") + + +def plot_cross_group_alignment_trajectory(cross_alignment, scale, model_type, save_path): + fig, ax = plt.subplots(figsize=(12, 6)) + layers = sorted(cross_alignment.keys()) + actual = [cross_alignment[l]['per_sample_mean'] for l in layers] + mean_delta = [cross_alignment[l]['mean_delta_alignment'] for l in layers] + perm_mean = [cross_alignment[l]['permutation_mean'] for l in layers] + perm_std = [cross_alignment[l]['permutation_std'] for l in layers] + + ax.plot(layers, actual, '-o', color='#d62728', label='cos(d_vert, d_dist) per-sample mean', + linewidth=2.5, markersize=3) + ax.plot(layers, mean_delta, '--s', color='#e377c2', label='cos(mean_d_vert, mean_d_dist)', + linewidth=1.5, markersize=3) + ax.plot(layers, perm_mean, ':', color='gray', label='permutation control', linewidth=1.5) + ax.fill_between(layers, + [m - 2*s for m, s in zip(perm_mean, perm_std)], + [m + 2*s for m, s in zip(perm_mean, perm_std)], + alpha=0.2, color='gray') + ax.set_xlabel('Layer Index') + ax.set_ylabel('Cosine Alignment') + ax.set_title(f'{model_type.upper()} ({scale}) - Cross-Group Alignment (Perspective Bias)', fontweight='bold') + ax.legend(fontsize=9) + ax.grid(True, alpha=0.3) + plt.tight_layout() + plt.savefig(save_path, dpi=300, bbox_inches='tight') + plt.close() + logger.info(f"Saved: {save_path}") + + +# Fix 7: Delta heatmap visualization + +def plot_delta_heatmap(sim_df, title, save_path): + """Plot delta-based similarity heatmap.""" + plt.figure(figsize=(10, 8)) + available_order = [c for c in CATEGORY_ORDER if c in sim_df.index] + sim_df_ordered = sim_df.loc[available_order, available_order] + + annot = sim_df_ordered.round(4).astype(str) + sns.heatmap(sim_df_ordered, annot=annot, fmt='', cmap='RdBu_r', + center=0, vmin=-1, vmax=1, square=True, linewidths=0.5, + cbar_kws={'label': 'Cosine Similarity'}) + plt.title(title, fontsize=14, fontweight='bold') + plt.tight_layout() + plt.savefig(save_path, dpi=300, bbox_inches='tight') + plt.close() + logger.info(f"Saved delta heatmap: {save_path}") + + +# Fix 6: Prediction stats visualization + +def plot_pred_stats_bars(all_pred_stats, model_type, save_path): + """Bar chart: per-group accuracy (orig/swap/both) across scales.""" + fig, axes = plt.subplots(1, len(GROUP_ORDER), figsize=(7 * len(GROUP_ORDER), 6)) + if len(GROUP_ORDER) == 1: + axes = [axes] + + available = [s for s in SCALE_ORDER if any(d['scale'] == s for d in all_pred_stats)] + if not available: + # Fallback: use whatever scales are present (preserves insertion order) + seen = [] + for d in all_pred_stats: + if d['scale'] not in seen: + seen.append(d['scale']) + available = seen + + for idx, group in enumerate(GROUP_ORDER): + ax = axes[idx] + x = np.arange(3) # orig, swap, both + width = 0.8 / len(available) + for i, scale in enumerate(available): + entry = next((d for d in all_pred_stats if d['scale'] == scale), None) + if entry is None: + continue + vals = [entry.get(f'{group}_acc_orig', 0), + entry.get(f'{group}_acc_swap', 0), + entry.get(f'{group}_acc_both', 0)] + offset = (i - len(available) / 2 + 0.5) * width + color = SCALE_COLORS.get(scale, 'gray') + ax.bar(x + offset, vals, width, label=scale, color=color) + ax.set_xticks(x) + ax.set_xticklabels(['orig', 'swap', 'both']) + ax.set_ylabel('Accuracy') + ax.set_title(group, fontweight='bold') + ax.legend(fontsize=7) + ax.set_ylim(0, 1.1) + ax.axhline(y=0.5, color='gray', linestyle='--', alpha=0.5) + ax.grid(True, alpha=0.3, axis='y') + + fig.suptitle(f'{model_type.upper()} - Prediction Accuracy by Group', fontsize=15, fontweight='bold', y=1.02) + plt.tight_layout() + plt.savefig(save_path, dpi=300, bbox_inches='tight') + plt.close() + logger.info(f"Saved: {save_path}") + + +def plot_pred_stats_trajectory(all_pred_stats, model_type, save_path): + """Line plot: acc_both trajectory across scales per group.""" + fig, ax = plt.subplots(figsize=(10, 6)) + available = [s for s in SCALE_ORDER if any(d['scale'] == s for d in all_pred_stats)] + if not available: + seen = [] + for d in all_pred_stats: + if d['scale'] not in seen: + seen.append(d['scale']) + available = seen + colors = GROUP_COLORS + + for group in GROUP_ORDER: + x_vals, y_vals = [], [] + for i, scale in enumerate(available): + entry = next((d for d in all_pred_stats if d['scale'] == scale), None) + if entry: + x_vals.append(i) + y_vals.append(entry.get(f'{group}_acc_both', 0)) + if x_vals: + ax.plot(x_vals, y_vals, '-o', color=colors[group], label=group, linewidth=2.5, markersize=6) + + ax.set_xticks(range(len(available))) + ax.set_xticklabels(available) + ax.set_xlabel('Scale') + ax.set_ylabel('Accuracy (both correct)') + ax.set_title(f'{model_type.upper()} - Both-Correct Accuracy Across Scales', fontweight='bold') + ax.legend(fontsize=10) + ax.set_ylim(0, 1.05) + ax.axhline(y=0.5, color='gray', linestyle='--', alpha=0.5) + ax.grid(True, alpha=0.3) + plt.tight_layout() + plt.savefig(save_path, dpi=300, bbox_inches='tight') + plt.close() + logger.info(f"Saved: {save_path}") + + +def plot_pca_embeddings(vectors_npz_path, scale, model_type, save_dir, bc_only=False): + data = np.load(vectors_npz_path, allow_pickle=True) + layer_keys = [k for k in data.files if k.startswith('orig_L')] + layers = sorted([int(k.replace('orig_L', '')) for k in layer_keys]) + + cat_colors = CAT_COLORS + + for layer in layers: + orig = data.get(f'orig_L{layer}') + swap = data.get(f'swap_L{layer}') + labels = data.get(f'labels_L{layer}') + deltas = data.get(f'delta_L{layer}') + cats = data.get(f'categories_L{layer}') + groups = data.get(f'groups_L{layer}') + + if bc_only and deltas is not None: + co = data.get(f'is_correct_orig_L{layer}') + cs = data.get(f'is_correct_swap_L{layer}') + if co is not None and cs is not None: + bc_mask = co.astype(bool) & cs.astype(bool) + if orig is not None and len(orig) == len(bc_mask): + orig = orig[bc_mask] + swap = swap[bc_mask] + labels = labels[bc_mask] if labels is not None else None + if len(deltas) == len(bc_mask): + deltas = deltas[bc_mask] + cats = cats[bc_mask] if cats is not None else None + groups = groups[bc_mask] if groups is not None else None + + if orig is None or swap is None or len(orig) == 0: + continue + + fig, axes = plt.subplots(1, 3, figsize=(24, 7)) + + pca = PCA(n_components=2) + all_vecs = np.vstack([orig, swap]) + all_pca = pca.fit_transform(all_vecs) + orig_pca = all_pca[:len(orig)] + swap_pca = all_pca[len(orig):] + + ax = axes[0] + for cat in CATEGORY_ORDER: + mask = np.array([str(l) == cat for l in labels]) + if mask.any(): + ax.scatter(orig_pca[mask, 0], orig_pca[mask, 1], + c=cat_colors.get(cat, 'gray'), label=f'{cat} (orig)', + alpha=0.5, s=15, marker='o') + ax.scatter(swap_pca[mask, 0], swap_pca[mask, 1], + c=cat_colors.get(cat, 'gray'), + alpha=0.5, s=15, marker='x') + ax.set_title('Embeddings by Category\n(o=orig, x=swap)', fontsize=11) + ax.legend(fontsize=7, ncol=2) + ax.grid(True, alpha=0.2) + + ax = axes[1] + if deltas is not None and cats is not None: + pca_d = PCA(n_components=2) + delta_pca = pca_d.fit_transform(deltas) + group_colors = GROUP_COLORS + if groups is not None: + for group in GROUP_ORDER: + mask = np.array([str(g) == group for g in groups]) + if mask.any(): + ax.scatter(delta_pca[mask, 0], delta_pca[mask, 1], + c=group_colors.get(group, 'gray'), label=group, alpha=0.5, s=15) + ax.set_title('Delta Vectors by Group', fontsize=11) + ax.legend(fontsize=9) + ax.grid(True, alpha=0.2) + + ax = axes[2] + if deltas is not None and cats is not None: + for cat in CATEGORY_ORDER: + mask = np.array([str(c) == cat for c in cats]) + if mask.any(): + ax.scatter(delta_pca[mask, 0], delta_pca[mask, 1], + c=cat_colors.get(cat, 'gray'), label=cat, alpha=0.5, s=15) + ax.set_title('Delta Vectors by Category', fontsize=11) + ax.legend(fontsize=8, ncol=2) + ax.grid(True, alpha=0.2) + + fig.suptitle(f'{model_type.upper()} ({scale}) - Layer {layer} - PCA', fontweight='bold') + plt.tight_layout() + plt.savefig(os.path.join(save_dir, f'pca_{scale}_L{layer}.png'), dpi=200, bbox_inches='tight') + plt.close() + + logger.info(f"Saved PCA plots to {save_dir}") + + +def plot_pca_3d(vectors_npz_path, scale, model_type, save_dir, bc_only=False): + """Generate single-panel 3D PCA figure (Delta Vectors by Category) per layer. + + Figure style follows pca_new.py: larger fonts, single panel, PC3 via fig.text. + """ + # ── Font / marker sizes (from pca_new.py) ───────────────────────────────── + _TITLE_FS = 22 + _AXIS_FS = 18 + _TICK_FS = 14 + _LEGEND_FS = 16 + _SUPTITLE_FS = 24 + _SCATTER_S = 30 + + def _normalise_label(raw): + return 'below' if str(raw) == 'under' else str(raw) + + def scatter3d(ax, xs, ys, zs, c, label, alpha=0.55, s=_SCATTER_S, marker='o'): + ax.scatter(xs, ys, zs, c=c, label=label, alpha=alpha, s=s, marker=marker) + + data = np.load(vectors_npz_path, allow_pickle=True) + layer_keys = [k for k in data.files if k.startswith('orig_L')] + layers = sorted([int(k.replace('orig_L', '')) for k in layer_keys]) + + if not layers: + logger.info(f" [pca_3d] No orig_L* keys found in {vectors_npz_path}") + return + + os.makedirs(save_dir, exist_ok=True) + + for layer in layers: + deltas = data.get(f'delta_L{layer}') + cats = data.get(f'categories_L{layer}') + + # ── Both-correct filtering ──────────────────────────────────────────── + if bc_only and deltas is not None: + co = data.get(f'is_correct_orig_L{layer}') + cs = data.get(f'is_correct_swap_L{layer}') + if co is not None and cs is not None: + bc_mask = co.astype(bool) & cs.astype(bool) + if len(deltas) == len(bc_mask): + deltas = deltas[bc_mask] + cats = cats[bc_mask] if cats is not None else None + + has_delta = (deltas is not None and len(deltas) >= 3) + if not has_delta: + logger.info(f" [pca_3d] Layer {layer}: no delta vectors, skipping") + continue + + # ── PCA on delta vectors ────────────────────────────────────────────── + pca_d = PCA(n_components=3) + delta_proj = pca_d.fit_transform(deltas) + ev = pca_d.explained_variance_ratio_ + + # ── Figure ──────────────────────────────────────────────────────────── + fig = plt.figure(figsize=(13, 10)) + ax = fig.add_subplot(111, projection='3d') + + if cats is not None: + for cat in CATEGORY_ORDER: + mask = np.array([_normalise_label(c) == cat for c in cats]) + if not mask.any(): + continue + scatter3d(ax, + delta_proj[mask, 0], + delta_proj[mask, 1], + delta_proj[mask, 2], + c=CAT_COLORS.get(cat, 'gray'), + label=cat) + + ax.set_title('Delta Vectors by Category', fontsize=_TITLE_FS, pad=12) + ax.set_xlabel(f'PC1 ({ev[0]:.1%})', fontsize=_AXIS_FS, labelpad=25) + ax.set_ylabel(f'PC2 ({ev[1]:.1%})', fontsize=_AXIS_FS, labelpad=25) + ax.set_zlabel('') + ax.tick_params(axis='both', labelsize=_TICK_FS) + ax.legend(fontsize=_LEGEND_FS, ncol=2, loc='upper right') + + # ── Draw to get accurate axes bbox, then place PC3 label ───────────── + fig.canvas.draw() + ax_pos = ax.get_position() + + pc3_x = ax_pos.x1 + 0.04 + fig.text( + pc3_x, + (ax_pos.y0 + ax_pos.y1) / 2, + f'PC3 ({ev[2]:.1%})', + fontsize=_AXIS_FS, + va='center', ha='center', + rotation=90, + ) + + ax_cx = (ax_pos.x0 + ax_pos.x1) / 2 + fig.suptitle( + f'{model_type.upper()} ({scale}) — L{layer}', + fontsize=_SUPTITLE_FS, fontweight='bold', + x=ax_cx, y=1.01, + ) + + out_path = os.path.join(save_dir, f'pca_{scale}_L{layer}.png') + plt.savefig(out_path, dpi=200, bbox_inches='tight', pad_inches=0.5) + plt.close() + + logger.info(f"Saved 3D PCA plots to {save_dir}") + + +# Cross-scale plots + +def plot_cross_scale_consistency(all_consistency, model_type, save_path, title_prefix='Sign-Corrected'): + fig, axes = plt.subplots(1, 3, figsize=(21, 6)) + + for idx, group in enumerate(GROUP_ORDER): + ax = axes[idx] + for scale in SCALE_ORDER: + if scale not in all_consistency: + continue + consistency = all_consistency[scale] + layers, vals = [], [] + for (g, l), v in sorted(consistency.items(), key=lambda x: x[0][1]): + if g == group: + layers.append(l) + vals.append(v['mean']) + if layers: + ax.plot(layers, vals, '-', color=SCALE_COLORS.get(scale, 'gray'), + label=SCALE_DISPLAY_NAMES.get(scale, scale), linewidth=2) + ax.set_xlabel('Layer Index') + ax.set_ylabel('Consistency') + ax.set_title(group, fontweight='bold') + ax.legend(fontsize=9) + ax.grid(True, alpha=0.3) + + fig.suptitle(f'{model_type.upper()} - {title_prefix} Consistency Across Scales', + fontsize=15, fontweight='bold', y=1.02) + plt.tight_layout() + plt.savefig(save_path, dpi=300, bbox_inches='tight') + plt.close() + logger.info(f"Saved: {save_path}") + + +def plot_cross_scale_within_cat_consistency(all_within_cat, model_type, save_path): + """Cross-scale within-category consistency.""" + fig, axes = plt.subplots(2, 3, figsize=(21, 12)) + + for idx, cat in enumerate(CATEGORY_ORDER): + ax = axes[idx // 3][idx % 3] + for scale in SCALE_ORDER: + if scale not in all_within_cat: + continue + wc = all_within_cat[scale] + layers, vals = [], [] + for (c, l), v in sorted(wc.items(), key=lambda x: x[0][1]): + if c == cat: + layers.append(l) + vals.append(v['mean']) + if layers: + ax.plot(layers, vals, '-', color=SCALE_COLORS.get(scale, 'gray'), + label=SCALE_DISPLAY_NAMES.get(scale, scale), linewidth=2) + ax.set_xlabel('Layer Index') + ax.set_ylabel('Consistency') + ax.set_title(cat, fontweight='bold') + ax.legend(fontsize=8) + ax.grid(True, alpha=0.3) + + fig.suptitle(f'{model_type.upper()} - Within-Category Consistency Across Scales', + fontsize=15, fontweight='bold', y=1.02) + plt.tight_layout() + plt.savefig(save_path, dpi=300, bbox_inches='tight') + plt.close() + logger.info(f"Saved: {save_path}") + + +def plot_cross_scale_alignment(all_alignment, model_type, save_path): + fig, ax = plt.subplots(figsize=(12, 6)) + for scale in SCALE_ORDER: + if scale not in all_alignment: + continue + alignment = all_alignment[scale] + layers = sorted(alignment.keys()) + vals = [alignment[l]['per_sample_mean'] for l in layers] + ax.plot(layers, vals, '-', color=SCALE_COLORS.get(scale, 'gray'), + label=SCALE_DISPLAY_NAMES.get(scale, scale), linewidth=2) + ax.set_xlabel('Layer Index') + ax.set_ylabel('cos(d_vert, d_dist)') + ax.set_title(f'{model_type.upper()} - Cross-Group Alignment Across Scales\n' + f'(High=entangled, Low=disentangled)', fontweight='bold') + ax.legend(fontsize=10) + ax.grid(True, alpha=0.3) + plt.tight_layout() + plt.savefig(save_path, dpi=300, bbox_inches='tight') + plt.close() + logger.info(f"Saved: {save_path}") + + +# Fix 7: Delta-based trajectory (cross-layer, per-scale) + +def plot_delta_trajectory(all_delta_heatmaps, model_type, save_path): + """Cross-layer trajectory of delta-based similarities for key pairs.""" + pairs = [ + ('above', 'far', 'above-far'), ('below', 'close', 'below-close'), + ('left', 'right', 'left-right'), + ] + fig, axes = plt.subplots(1, len(pairs), figsize=(7 * len(pairs), 6)) + if len(pairs) == 1: + axes = [axes] + + for idx, (cat1, cat2, label) in enumerate(pairs): + ax = axes[idx] + for scale in SCALE_ORDER: + if scale not in all_delta_heatmaps: + continue + hm = all_delta_heatmaps[scale] + layers = sorted(hm.keys()) + vals = [] + valid_layers = [] + for l in layers: + df = hm[l] + if df is not None and cat1 in df.index and cat2 in df.columns: + valid_layers.append(l) + vals.append(df.loc[cat1, cat2]) + if valid_layers: + ax.plot(valid_layers, vals, '-', color=SCALE_COLORS.get(scale, 'gray'), + label=SCALE_DISPLAY_NAMES.get(scale, scale), linewidth=2) + ax.set_xlabel('Layer Index') + ax.set_ylabel('Delta Cosine Similarity') + ax.set_title(label, fontweight='bold') + ax.legend(fontsize=9) + ax.grid(True, alpha=0.3) + ax.axhline(y=0, color='gray', linestyle='--', alpha=0.5) + + fig.suptitle(f'{model_type.upper()} - Delta-Based Similarity Trajectory', + fontsize=15, fontweight='bold', y=1.02) + plt.tight_layout() + plt.savefig(save_path, dpi=300, bbox_inches='tight') + plt.close() + logger.info(f"Saved: {save_path}") + + +def plot_summary_barplot(all_consistency, all_alignment, model_type, save_path): + available_scales = [s for s in SCALE_ORDER if s in all_consistency] + if not available_scales: + return + + sample_cons = all_consistency[available_scales[0]] + max_layer = max(l for (_, l) in sample_cons.keys()) + + fig, axes = plt.subplots(1, 2, figsize=(16, 6)) + + ax = axes[0] + x = np.arange(len(GROUP_ORDER)) + width = 0.8 / len(available_scales) + for i, scale in enumerate(available_scales): + cons = all_consistency[scale] + vals = [cons.get((g, max_layer), {}).get('mean', 0) for g in GROUP_ORDER] + offset = (i - len(available_scales) / 2 + 0.5) * width + ax.bar(x + offset, vals, width, + label=SCALE_DISPLAY_NAMES.get(scale, scale), + color=SCALE_COLORS.get(scale, 'gray')) + ax.set_xticks(x) + ax.set_xticklabels(GROUP_ORDER) + ax.set_ylabel('Sign-Corrected Consistency') + ax.set_title(f'Consistency at Layer {max_layer}', fontweight='bold') + ax.legend(fontsize=8) + ax.grid(True, alpha=0.3, axis='y') + + ax = axes[1] + available_align = [s for s in available_scales if s in all_alignment] + if available_align: + vals = [all_alignment[s].get(max_layer, {}).get('per_sample_mean', 0) for s in available_align] + colors = [SCALE_COLORS.get(s, 'gray') for s in available_align] + ax.bar(range(len(vals)), vals, color=colors) + ax.set_xticks(range(len(vals))) + ax.set_xticklabels([SCALE_DISPLAY_NAMES.get(s, s) for s in available_align]) + ax.set_ylabel('cos(d_vert, d_dist)') + ax.set_title(f'Cross-Group Alignment at L{max_layer}\n(Lower=disentangled)', fontweight='bold') + ax.grid(True, alpha=0.3, axis='y') + + fig.suptitle(f'{model_type.upper()} - Summary at Deepest Layer', fontsize=15, fontweight='bold', y=1.02) + plt.tight_layout() + plt.savefig(save_path, dpi=300, bbox_inches='tight') + plt.close() + logger.info(f"Saved: {save_path}") + + +# ============================================================================ +# Main Pipeline +# ============================================================================ + +def process_scale(args, scale, swap_pairs, quads): + # Resolve model path from the correct config dict + if args.model_type in MODEL_CONFIGS_NEW: + cls_name, model_path = MODEL_CONFIGS_NEW[args.model_type][scale] + else: + model_path = MODEL_CONFIGS[args.model_type][scale] + cls_name = None + + logger.info(f"\n{'='*60}") + logger.info(f"Processing {args.model_type} - {scale}" + + (f" [{cls_name}]" if cls_name else "")) + logger.info(f"Model path: {model_path}") + logger.info(f"{'='*60}") + + extractor = get_extractor(args.model_type, model_path, scale=scale, device=args.device) + target_layers = extractor.target_layers + + vlm_key = get_model_key(args.model_type, scale) + output_dir = os.path.join(args.output_dir, vlm_key) + plots_dir = os.path.join(output_dir, 'plots') + os.makedirs(plots_dir, exist_ok=True) + + # ── Phase A: Extract swap pair features ─────────────────────────────────── + logger.info("\n--- Phase A: Extracting swap pair features ---") + swap_records = extract_swap_features(extractor, swap_pairs, + max_samples_per_category=args.max_samples_per_category) + + # ── Phase C_A: Analysis of swap-pair data ───────────────────────────────── + logger.info("\n--- Phase C_A: Analysis (swap pairs) ---") + + # Fix 8: Category validity check + category_validity = check_category_validity(swap_records, scale) + unreliable_cats = [c for c, v in category_validity.items() if not v['reliable']] + if unreliable_cats: + logger.warning(f" Unreliable categories: {unreliable_cats}") + + # Fix 5: Two types of consistency (all pairs) + within_cat_all, sign_corrected_all = compute_delta_consistency(swap_records, target_layers) + + # Fix 8: Both-correct filtered consistency + both_correct_records = filter_both_correct(swap_records) + logger.info(f" Both-correct pairs: {len(both_correct_records)}/{len(swap_records)}") + within_cat_bc, sign_corrected_bc = compute_delta_consistency(both_correct_records, target_layers) + + pred_stats = compute_prediction_stats(swap_records, scale) + + # Fix 7: Delta-based heatmaps (for all layers) + delta_heatmaps_all = {} + delta_heatmaps_bc = {} + delta_norms_all = {} + delta_norms_bc = {} + for layer in target_layers: + delta_heatmaps_all[layer] = compute_delta_similarity_matrix(swap_records, layer) + delta_norms_all[layer] = compute_delta_norm_per_category(swap_records, layer) + if both_correct_records: + delta_heatmaps_bc[layer] = compute_delta_similarity_matrix(both_correct_records, layer) + delta_norms_bc[layer] = compute_delta_norm_per_category(both_correct_records, layer) + + + # Log Phase A key results + max_layer = max(target_layers) + for group in GROUP_ORDER: + key = (group, max_layer) + if key in sign_corrected_all: + logger.info(f" Sign-corrected [{group}, L{max_layer}]: " + f"{sign_corrected_all[key]['mean']:.4f} +/- {sign_corrected_all[key]['std']:.4f}") + logger.info(f" Accuracy orig={pred_stats['overall_acc_orig']:.1%}, " + f"swap={pred_stats['overall_acc_swap']:.1%}, " + f"both={pred_stats['overall_acc_both']:.1%}") + + # ── Phase D_A: Save Phase A results ─────────────────────────────────────── + logger.info("\n--- Phase D_A: Saving Phase A results ---") + + save_vectors_npz(scale, swap_records, target_layers, output_dir) + + save_scale_results( + scale, swap_records, [], + within_cat_all, sign_corrected_all, + {}, pred_stats, target_layers, + category_validity, delta_heatmaps_all, + output_dir, both_correct_tag='all_pairs', + save_alignment=False, + delta_norms=delta_norms_all, + ) + if both_correct_records: + save_scale_results( + scale, both_correct_records, [], + within_cat_bc, sign_corrected_bc, + {}, pred_stats, target_layers, + category_validity, delta_heatmaps_bc, + output_dir, both_correct_tag='both_correct', + save_alignment=False, + delta_norms=delta_norms_bc, + ) + + # ── Phase E_A: Per-scale plots from Phase A data ─────────────────────────── + if args.phase1_only: + logger.info("\n--- Phase E_A: Per-scale plots [SKIPPED: --phase1-only] ---") + else: + logger.info("\n--- Phase E_A: Per-scale plots (swap-pair data) ---") + + for condition, wc_data, sc_data in [ + ('all', within_cat_all, sign_corrected_all), + ('both_correct', within_cat_bc, sign_corrected_bc), + ]: + if condition == 'both_correct' and not both_correct_records: + continue + + cond_dir = os.path.join(plots_dir, condition) + os.makedirs(cond_dir, exist_ok=True) + + wc_dir = os.path.join(cond_dir, 'within_cat_consistency') + sc_dir = os.path.join(cond_dir, 'sign_corrected') + os.makedirs(wc_dir, exist_ok=True) + os.makedirs(sc_dir, exist_ok=True) + + plot_within_cat_consistency_trajectory( + wc_data, scale, args.model_type, + os.path.join(wc_dir, f'within_cat_consistency_{scale}.png')) + + plot_sign_corrected_consistency_trajectory( + sc_data, scale, args.model_type, + os.path.join(sc_dir, f'sign_corrected_consistency_{scale}.png')) + + # PCA (from full NPZ) — 2D and 3D, all-pairs and both-correct + npz_path = os.path.join(output_dir, 'npz', f'vectors_{scale}.npz') + if os.path.exists(npz_path): + pca_dir = os.path.join(plots_dir, 'all', 'pca') + pca_3d_dir = os.path.join(plots_dir, 'all', 'pca_3d') + bc_pca_dir = os.path.join(plots_dir, 'both_correct', 'pca') + bc_pca_3d_dir = os.path.join(plots_dir, 'both_correct', 'pca_3d') + for d in (pca_dir, pca_3d_dir, bc_pca_dir, bc_pca_3d_dir): + os.makedirs(d, exist_ok=True) + plot_pca_embeddings(npz_path, scale, args.model_type, pca_dir) + plot_pca_3d(npz_path, scale, args.model_type, pca_3d_dir) + plot_pca_embeddings(npz_path, scale, args.model_type, bc_pca_dir, bc_only=True) + plot_pca_3d(npz_path, scale, args.model_type, bc_pca_3d_dir, bc_only=True) + + if pred_stats: + pred_plot_dir = os.path.join(plots_dir, 'all', 'pred_stats') + os.makedirs(pred_plot_dir, exist_ok=True) + plot_pred_stats_bars([pred_stats], args.model_type, + os.path.join(pred_plot_dir, f'pred_stats_{scale}.png')) + + if pred_stats: + acc_dir = os.path.join(output_dir, 'accuracy') + logger.info(f"\n--- Accuracy Charts [{scale}] ---") + run_accuracy_charts([pred_stats], {scale: category_validity}, args.model_type, acc_dir) + + logger.info(f"\n--- All-Layer Heatmaps [{scale}] ---") + run_all_layer_heatmaps(output_dir, args.model_type, [scale]) + logger.info(f"\n--- All-Layer PCA [{scale}] ---") + run_all_layer_pca(output_dir, args.model_type, [scale]) + + # ── Phase B: Extract cross-group features ───────────────────────────────── + skip_b = getattr(args, 'skip_phase_b', False) + if skip_b or not quads: + if skip_b: + logger.info("\n--- Phase B: Cross-group extraction [SKIPPED: --skip-phase-b] ---") + quad_records = [] + cross_alignment = {} + else: + logger.info("\n--- Phase B: Extracting cross-group features ---") + quad_records = extract_cross_group_features(extractor, quads) + + # ── Phase C_B: Cross-group analysis ─────────────────────────────────── + logger.info("\n--- Phase C_B: Analysis (cross-group) ---") + cross_alignment = compute_cross_group_alignment(quad_records, target_layers) + + if max_layer in cross_alignment: + ca = cross_alignment[max_layer] + logger.info(f" Cross-group alignment L{max_layer}: " + f"{ca['per_sample_mean']:.4f} (perm={ca['permutation_mean']:.4f})") + + # ── Phase D_B: Save Phase B results ─────────────────────────────────── + logger.info("\n--- Phase D_B: Saving Phase B results ---") + save_cross_group_npz(scale, quad_records, target_layers, output_dir) + save_cross_alignment(scale, cross_alignment, output_dir) + + # ── Phase E_B: Cross-alignment plots ────────────────────────────────── + if args.phase1_only: + logger.info("\n--- Phase E_B: Cross-alignment plots [SKIPPED: --phase1-only] ---") + else: + logger.info("\n--- Phase E_B: Per-scale plots (cross-group data) ---") + for condition in ['all', 'both_correct']: + if condition == 'both_correct' and not both_correct_records: + continue + ca_dir = os.path.join(plots_dir, condition, 'cross_alignment') + os.makedirs(ca_dir, exist_ok=True) + plot_cross_group_alignment_trajectory( + cross_alignment, scale, args.model_type, + os.path.join(ca_dir, f'cross_alignment_{scale}.png')) + + # Cleanup + del swap_records, quad_records, both_correct_records + extractor.cleanup() + + logger.info(f"\n Scale {scale} complete.") + + +# ============================================================================ +# Accuracy Chart (integrated from accuracy_chart.py) +# ============================================================================ + +def _acc_plot_group_bars(pred_stats, model_type, ax_list): + available = [s for s in SCALE_ORDER if any(d['scale'] == s for d in pred_stats)] + x = np.arange(3) + width = 0.8 / max(len(available), 1) + for idx, group in enumerate(GROUP_ORDER): + ax = ax_list[idx] + for i, scale in enumerate(available): + entry = next((d for d in pred_stats if d['scale'] == scale), None) + if entry is None: + continue + vals = [entry.get(f'{group}_acc_orig', 0), + entry.get(f'{group}_acc_swap', 0), + entry.get(f'{group}_acc_both', 0)] + offset = (i - len(available) / 2 + 0.5) * width + ax.bar(x + offset, vals, width, label=scale, + color=SCALE_COLORS.get(scale, 'gray'), alpha=0.85) + ax.set_xticks(x) + ax.set_xticklabels(['orig', 'swap', 'both'], fontsize=10) + ax.set_ylabel('Accuracy', fontsize=9) + ax.set_title(group.capitalize(), fontweight='bold', fontsize=11, + color=GROUP_COLORS.get(group, 'black')) + ax.legend(fontsize=7, ncol=2) + ax.set_ylim(0, 1.15) + ax.axhline(y=0.5, color='gray', linestyle='--', alpha=0.5, linewidth=1) + ax.grid(True, alpha=0.3, axis='y') + + +def _acc_plot_both_trajectory(pred_stats, model_type, ax): + available = [s for s in SCALE_ORDER if any(d['scale'] == s for d in pred_stats)] + x_ticks = range(len(available)) + for group in GROUP_ORDER: + y_vals = [next((d for d in pred_stats if d['scale'] == s), {}).get(f'{group}_acc_both', 0) + for s in available] + ax.plot(x_ticks, y_vals, '-o', color=GROUP_COLORS.get(group, 'gray'), + label=group, linewidth=2.5, markersize=7) + y_overall = [next((d for d in pred_stats if d['scale'] == s), {}).get('overall_acc_both', 0) + for s in available] + ax.plot(x_ticks, y_overall, '--s', color='black', label='overall', + linewidth=2, markersize=6, alpha=0.7) + ax.set_xticks(list(x_ticks)) + ax.set_xticklabels(available, fontsize=9) + ax.set_xlabel('Scale', fontsize=9) + ax.set_ylabel('Accuracy (both correct)', fontsize=9) + ax.set_title('Both-Correct Accuracy Trajectory', fontweight='bold', fontsize=11) + ax.legend(fontsize=9) + ax.set_ylim(0, 1.05) + ax.axhline(y=0.5, color='gray', linestyle='--', alpha=0.5, linewidth=1) + ax.grid(True, alpha=0.3) + + +def _acc_plot_overall_trajectory(pred_stats, model_type, ax): + available = [s for s in SCALE_ORDER if any(d['scale'] == s for d in pred_stats)] + x_ticks = range(len(available)) + for metric, label, ls in [ + ('overall_acc_orig', 'orig', '-o'), + ('overall_acc_swap', 'swap', '-s'), + ('overall_acc_both', 'both', '-^'), + ]: + y_vals = [next((d for d in pred_stats if d['scale'] == s), {}).get(metric, 0) + for s in available] + ax.plot(x_ticks, y_vals, ls, label=label, linewidth=2.2, markersize=6) + ax.set_xticks(list(x_ticks)) + ax.set_xticklabels(available, fontsize=9) + ax.set_xlabel('Scale', fontsize=9) + ax.set_ylabel('Overall Accuracy', fontsize=9) + ax.set_title('Overall Accuracy Trajectory', fontweight='bold', fontsize=11) + ax.legend(fontsize=9) + ax.set_ylim(0, 1.05) + ax.axhline(y=0.5, color='gray', linestyle='--', alpha=0.5, linewidth=1) + ax.grid(True, alpha=0.3) + + +def _acc_plot_category_accuracy(cat_validity, model_type, ax_orig, ax_swap, pred_stats=None): + available = [s for s in SCALE_ORDER if s in cat_validity] + cats_with_overall = CATEGORY_ORDER + ['overall'] + x = np.arange(len(cats_with_overall)) + width = 0.8 / max(len(available), 1) + overall_key = {'acc_orig': 'overall_acc_orig', 'acc_swap': 'overall_acc_swap'} + for ax, metric, title in [ + (ax_orig, 'acc_orig', 'Per-Category Accuracy (orig)'), + (ax_swap, 'acc_swap', 'Per-Category Accuracy (swap)'), + ]: + for i, scale in enumerate(available): + cv = cat_validity[scale] + vals = [cv.get(cat, {}).get(metric, 0) for cat in CATEGORY_ORDER] + if pred_stats is not None: + entry = next((d for d in pred_stats if d['scale'] == scale), None) + vals.append(entry.get(overall_key[metric], 0) if entry else 0) + else: + vals.append(0) + offset = (i - len(available) / 2 + 0.5) * width + ax.bar(x + offset, vals, width, label=scale, + color=SCALE_COLORS.get(scale, 'gray'), alpha=0.85) + for j, cat in enumerate(CATEGORY_ORDER): + ax.axvspan(j - 0.45, j + 0.45, color=CAT_COLORS.get(cat, 'gray'), alpha=0.06, linewidth=0) + ax.axvline(x=len(CATEGORY_ORDER) - 0.5, color='black', linewidth=1.2, linestyle=':', alpha=0.6) + ax.set_xticks(x) + ax.set_xticklabels(cats_with_overall, fontsize=9, rotation=15) + ax.set_ylabel('Accuracy', fontsize=9) + ax.set_title(title, fontweight='bold', fontsize=11) + ax.legend(fontsize=7, ncol=2) + ax.set_ylim(0, 1.15) + ax.axhline(y=0.5, color='gray', linestyle='--', alpha=0.5, linewidth=1) + ax.grid(True, alpha=0.3, axis='y') + if available: + last_cv = cat_validity[available[-1]] + for j, cat in enumerate(CATEGORY_ORDER): + if not last_cv.get(cat, {}).get('reliable', True): + ax.text(j, 1.08, '✗', ha='center', va='center', + fontsize=9, color='red', fontweight='bold') + + +def _acc_plot_category_per_scale(cat_validity, model_type, save_dir, pred_stats=None): + cats_with_overall = CATEGORY_ORDER + ['overall'] + overall_key = {'acc_orig': 'overall_acc_orig', 'acc_swap': 'overall_acc_swap'} + for scale in sorted(cat_validity.keys(), + key=lambda s: SCALE_ORDER.index(s) if s in SCALE_ORDER else 99): + cv = cat_validity[scale] + ps_entry = next((d for d in pred_stats if d['scale'] == scale), None) if pred_stats else None + fig, axes = plt.subplots(1, 2, figsize=(16, 5)) + x = np.arange(len(cats_with_overall)) + width = 0.55 + for ax, metric, title in [ + (axes[0], 'acc_orig', f'acc_orig ({scale})'), + (axes[1], 'acc_swap', f'acc_swap ({scale})'), + ]: + vals = [cv.get(cat, {}).get(metric, 0) for cat in CATEGORY_ORDER] + overall_val = ps_entry.get(overall_key[metric], 0) if ps_entry else 0 + vals.append(overall_val) + colors = [CAT_COLORS.get(cat, 'gray') for cat in CATEGORY_ORDER] + ['#333333'] + bars = ax.bar(x, vals, width, color=colors, alpha=0.85, edgecolor='white') + ax.axvline(x=len(CATEGORY_ORDER) - 0.5, color='black', + linewidth=1.2, linestyle=':', alpha=0.6) + ax.set_xticks(x) + ax.set_xticklabels(cats_with_overall, fontsize=10) + ax.set_ylabel('Accuracy', fontsize=10) + ax.set_title(title, fontweight='bold', fontsize=12) + ax.set_ylim(0, 1.15) + ax.axhline(y=0.5, color='gray', linestyle='--', alpha=0.5) + ax.grid(True, alpha=0.3, axis='y') + for bar, cat in zip(bars, cats_with_overall): + reliable = cv.get(cat, {}).get('reliable', True) if cat != 'overall' else True + h = bar.get_height() + ax.text(bar.get_x() + bar.get_width() / 2, h + 0.02, + f'{h:.2f}' + ('' if reliable else ' ✗'), + ha='center', va='bottom', fontsize=8, + color='red' if not reliable else 'black') + fig.suptitle(f'{model_type.upper()} - Category Accuracy ({scale})', + fontsize=13, fontweight='bold') + plt.tight_layout() + out = os.path.join(save_dir, f'category_accuracy_{scale}.png') + plt.savefig(out, dpi=200, bbox_inches='tight') + plt.close() + logger.info(f"Saved: {out}") + + +def run_accuracy_charts(pred_stats, cat_validity, model_type, save_dir): + """Generate all accuracy chart plots into save_dir.""" + os.makedirs(save_dir, exist_ok=True) + + # Group bars + fig, axes = plt.subplots(1, 3, figsize=(21, 6)) + _acc_plot_group_bars(pred_stats, model_type, axes) + fig.suptitle(f'{model_type.upper()} - Prediction Accuracy by Group', + fontsize=15, fontweight='bold') + plt.tight_layout() + plt.savefig(os.path.join(save_dir, 'accuracy_group_bars.png'), dpi=200, bbox_inches='tight') + plt.close() + logger.info(f"Saved: {os.path.join(save_dir, 'accuracy_group_bars.png')}") + + # Trajectory + fig, axes = plt.subplots(1, 2, figsize=(16, 6)) + _acc_plot_both_trajectory(pred_stats, model_type, axes[0]) + _acc_plot_overall_trajectory(pred_stats, model_type, axes[1]) + fig.suptitle(f'{model_type.upper()} - Accuracy Trajectory Across Scales', + fontsize=14, fontweight='bold') + plt.tight_layout() + plt.savefig(os.path.join(save_dir, 'accuracy_trajectory.png'), dpi=200, bbox_inches='tight') + plt.close() + logger.info(f"Saved: {os.path.join(save_dir, 'accuracy_trajectory.png')}") + + if cat_validity: + # Category bars (all scales overlay) + fig, axes = plt.subplots(1, 2, figsize=(20, 6)) + _acc_plot_category_accuracy(cat_validity, model_type, axes[0], axes[1], + pred_stats=pred_stats) + fig.suptitle(f'{model_type.upper()} - Per-Category Accuracy Across Scales', + fontsize=14, fontweight='bold') + plt.tight_layout() + plt.savefig(os.path.join(save_dir, 'accuracy_category.png'), dpi=200, bbox_inches='tight') + plt.close() + logger.info(f"Saved: {os.path.join(save_dir, 'accuracy_category.png')}") + + # Per-scale category bars + _acc_plot_category_per_scale(cat_validity, model_type, save_dir, pred_stats=pred_stats) + + # Combined accuracy_chart.png + fig = plt.figure(figsize=(24, 14)) + ax_h = fig.add_subplot(3, 3, 1) + ax_v = fig.add_subplot(3, 3, 2) + ax_d = fig.add_subplot(3, 3, 3) + _acc_plot_group_bars(pred_stats, model_type, [ax_h, ax_v, ax_d]) + ax_tb = fig.add_subplot(3, 3, 4) + ax_to = fig.add_subplot(3, 3, 5) + _acc_plot_both_trajectory(pred_stats, model_type, ax_tb) + _acc_plot_overall_trajectory(pred_stats, model_type, ax_to) + ax_note = fig.add_subplot(3, 3, 6) + ax_note.axis('off') + available_scales = [s for s in SCALE_ORDER if any(d['scale'] == s for d in pred_stats)] + ax_note.text(0.1, 0.6, + f'Scales: {", ".join(available_scales)}\n\n✗ = unreliable category\n-- = 0.5 chance level', + transform=ax_note.transAxes, fontsize=11, va='top', family='monospace') + if cat_validity: + ax_co = fig.add_subplot(3, 2, 5) + ax_cs = fig.add_subplot(3, 2, 6) + _acc_plot_category_accuracy(cat_validity, model_type, ax_co, ax_cs, pred_stats=pred_stats) + fig.suptitle(f'{model_type.upper()} — Accuracy Summary', + fontsize=17, fontweight='bold', y=1.01) + plt.tight_layout() + plt.savefig(os.path.join(save_dir, 'accuracy_chart.png'), dpi=200, bbox_inches='tight') + plt.close() + logger.info(f"Saved: {os.path.join(save_dir, 'accuracy_chart.png')}") + + +# ============================================================================ +# Unify Consistency Y-axis (integrated from unify_consistency_ylim.py) +# ============================================================================ + +def _ylim_compute(all_vals, margin_ratio=0.08): + if not all_vals: + return -1, 1 + ymin, ymax = min(all_vals), max(all_vals) + margin = (ymax - ymin) * margin_ratio + return ymin - margin, ymax + margin + + +def _ylim_load_keyed_json(path): + if not os.path.exists(path): + return None + with open(path) as f: + raw = json.load(f) + if not raw: + return None + result = {} + for key, vals in raw.items(): + parts = key.rsplit('_L', 1) + if len(parts) == 2: + result[(parts[0], int(parts[1]))] = vals + return result if result else None + + +def _ylim_load_alignment_json(path): + if not os.path.exists(path): + return None + with open(path) as f: + raw = json.load(f) + if not raw: + return None + result = {int(k[1:]): v for k, v in raw.items() if k.startswith('L')} + return result if result else None + + +def _ylim_plot_sign_corrected(data, scale, model_type, save_path, ylim): + fig, ax = plt.subplots(figsize=(12, 6)) + for group in GROUP_ORDER: + layers, vals = [], [] + for (g, l), v in sorted(data.items(), key=lambda x: x[0][1]): + if g == group: + layers.append(l) + vals.append(v['mean']) + if layers: + ax.plot(layers, vals, '-o', color=GROUP_COLORS[group], + label=group, linewidth=2, markersize=3) + ax.set_ylim(ylim) + ax.set_xlabel('Layer Index') + ax.set_ylabel('Sign-Corrected Consistency') + ax.set_title(f'{model_type.upper()} ({scale}) - Sign-Corrected Group Consistency', + fontweight='bold') + ax.legend(fontsize=11) + ax.grid(True, alpha=0.3) + plt.tight_layout() + plt.savefig(save_path, dpi=300, bbox_inches='tight') + plt.close() + + +def _ylim_plot_within_cat(data, scale, model_type, save_path, ylim): + fig, ax = plt.subplots(figsize=(12, 6)) + for cat in CATEGORY_ORDER: + layers, vals = [], [] + for (c, l), v in sorted(data.items(), key=lambda x: x[0][1]): + if c == cat: + layers.append(l) + vals.append(v['mean']) + if layers: + ax.plot(layers, vals, '-o', color=CAT_COLORS[cat], + label=cat, linewidth=2, markersize=3) + ax.set_ylim(ylim) + ax.set_xlabel('Layer Index') + ax.set_ylabel('Within-Category Consistency') + ax.set_title(f'{model_type.upper()} ({scale}) - Within-Category Delta Consistency', + fontweight='bold') + ax.legend(fontsize=9) + ax.grid(True, alpha=0.3) + plt.tight_layout() + plt.savefig(save_path, dpi=300, bbox_inches='tight') + plt.close() + + +def _ylim_plot_cross_alignment(data, scale, model_type, save_path, ylim): + fig, ax = plt.subplots(figsize=(12, 6)) + layers = sorted(data.keys()) + ax.plot(layers, [data[l]['per_sample_mean'] for l in layers], '-o', color='#d62728', + label='cos(d_vert, d_dist) per-sample mean', linewidth=2.5, markersize=3) + ax.plot(layers, [data[l]['mean_delta_alignment'] for l in layers], '--s', color='#e377c2', + label='cos(mean_d_vert, mean_d_dist)', linewidth=1.5, markersize=3) + perm_mean = [data[l]['permutation_mean'] for l in layers] + perm_std = [data[l]['permutation_std'] for l in layers] + ax.plot(layers, perm_mean, ':', color='gray', label='permutation control', linewidth=1.5) + ax.fill_between(layers, + [m - 2*s for m, s in zip(perm_mean, perm_std)], + [m + 2*s for m, s in zip(perm_mean, perm_std)], + alpha=0.2, color='gray') + ax.set_ylim(ylim) + ax.set_xlabel('Layer Index') + ax.set_ylabel('Cosine Alignment') + ax.set_title(f'{model_type.upper()} ({scale}) - Cross-Group Alignment (Perspective Bias)', + fontweight='bold') + ax.legend(fontsize=9) + ax.grid(True, alpha=0.3) + plt.tight_layout() + plt.savefig(save_path, dpi=300, bbox_inches='tight') + plt.close() + + +def _ylim_process_plot_type(scale_dir_map, plots_dir, conditions, model_type, + plot_name, json_pattern, loader, val_gatherer, plotter, + subfolder=None): + """Re-plot one plot type across all conditions with a unified y-axis. + + scale_dir_map: {scale: path_to_saved_data/vlm_key} (ordered dict) + """ + logger.info(f" [unify ylim] {plot_name}") + for condition, condition_tag in conditions: + cond_plot_dir = os.path.join(plots_dir, condition) + if not os.path.isdir(cond_plot_dir): + continue + save_dir = os.path.join(cond_plot_dir, subfolder) if subfolder else cond_plot_dir + os.makedirs(save_dir, exist_ok=True) + all_data = {} + for scale, scale_dir in scale_dir_map.items(): + path = os.path.join(scale_dir, 'json', + json_pattern.format(scale=scale, tag=condition_tag)) + loaded = loader(path) + if loaded: + all_data[scale] = loaded + if not all_data: + continue + all_vals = val_gatherer(all_data) + ylim = _ylim_compute(all_vals) + for scale, data in all_data.items(): + save_path = os.path.join(save_dir, f'{plot_name}_{scale}.png') + plotter(data, scale, model_type, save_path, ylim) + logger.info(f" {condition}: y=[{ylim[0]:.4f}, {ylim[1]:.4f}], {len(all_data)} scales") + + +def run_unify_ylim(scale_dir_map: dict, plots_dir: str, model_type: str): + """Unify y-axis for sign_corrected, within_cat, and cross_alignment plots. + + scale_dir_map: {scale: path_to_saved_data/vlm_key} + plots_dir: compare/{group_name}/plots/ (where unified plots are written) + """ + conditions = [ + ('all', 'all_pairs'), + ('both_correct', 'both_correct'), + ] + + def gather_keyed(all_data): + return [v['mean'] for data in all_data.values() for v in data.values()] + + def gather_alignment(all_data): + vals = [] + for data in all_data.values(): + for v in data.values(): + vals += [v['per_sample_mean'], v['mean_delta_alignment'], + v['permutation_mean'] + 2 * v['permutation_std'], + v['permutation_mean'] - 2 * v['permutation_std']] + return vals + + _ylim_process_plot_type( + scale_dir_map, plots_dir, conditions, model_type, + plot_name='sign_corrected_consistency', + json_pattern='sign_corrected_consistency_{scale}_{tag}.json', + loader=_ylim_load_keyed_json, + val_gatherer=gather_keyed, + plotter=_ylim_plot_sign_corrected, + subfolder='sign_corrected', + ) + _ylim_process_plot_type( + scale_dir_map, plots_dir, conditions, model_type, + plot_name='within_cat_consistency', + json_pattern='within_cat_consistency_{scale}_{tag}.json', + loader=_ylim_load_keyed_json, + val_gatherer=gather_keyed, + plotter=_ylim_plot_within_cat, + subfolder='within_cat_consistency', + ) + _ylim_process_plot_type( + scale_dir_map, plots_dir, conditions, model_type, + plot_name='cross_alignment', + json_pattern='cross_alignment_{scale}.json', + loader=_ylim_load_alignment_json, + val_gatherer=gather_alignment, + plotter=_ylim_plot_cross_alignment, + subfolder='cross_alignment', + ) + + +def _has_phase_b_data(scale_dir: str, scale: str) -> bool: + """Return True if cross_alignment_{scale}.json exists and is non-empty. + + Used in merge to auto-detect whether Phase B was run for a given scale. + """ + path = os.path.join(scale_dir, 'json', f'cross_alignment_{scale}.json') + if not os.path.exists(path): + return False + try: + with open(path) as f: + data = json.load(f) + return bool(data) + except Exception: + return False + + +def _check_merge_only_sources(output_dir: str, model_type: str) -> bool: + """Verify required source directories have data for a merge-only model_type. + + With the new per-scale directory layout, data lives at + {output_dir}/{model_type}_{scale}/ instead of {output_dir}/{model_type}/. + Returns True if all sources look healthy, False (with warnings) if not. + """ + mc = MERGE_ONLY_CONFIGS[model_type] + ok = True + for req_model_type in mc['required_dirs']: + # Look for any saved_data/{req_model_type}_{scale}/ directories + if not os.path.isdir(output_dir): + logger.warning( + f"[{model_type}] output_dir not found: {output_dir}\n" + f" → Run inference first: python swap_analysis.py --model_type {req_model_type}" + ) + ok = False + continue + matching = [ + d for d in os.listdir(output_dir) + if d.startswith(f'{req_model_type}_') + and os.path.isdir(os.path.join(output_dir, d)) + ] + if not matching: + logger.warning( + f"[{model_type}] No '{req_model_type}_*' directories found in {output_dir}\n" + f" → Run inference first: python swap_analysis.py --model_type {req_model_type}" + ) + ok = False + continue + any_data = False + for d in matching: + json_dir = os.path.join(output_dir, d, 'json') + if os.path.isdir(json_dir) and any( + f.startswith('pred_stats_') for f in os.listdir(json_dir) + ): + scale = d[len(req_model_type) + 1:] # strip "{req_model_type}_" prefix + logger.info(f" [{req_model_type}/{scale}] found data in {d}/") + any_data = True + if not any_data: + logger.warning( + f"[{model_type}] No pred_stats JSON found in any '{req_model_type}_*' directory.\n" + f" → Inference may not have completed for '{req_model_type}'." + ) + ok = False + return ok + + +def _load_scale_data_multi(output_dir: str, model_type: str, scale: str, scale_sources: dict): + """Load per-scale data for one scale, looking in the correct source directory. + + With the new per-scale layout, data lives at {output_dir}/{src_model_type}_{scale}/. + Returns (sc, sc_bc, wc, wc_bc, align, pred_stat, cat_validity, dh, dh_bc). + Any unavailable item is None / {}. + """ + src_model_type = scale_sources.get(scale, model_type) + src_dir = os.path.join(output_dir, get_model_key(src_model_type, scale)) + + sc = load_scale_consistency(src_dir, scale, 'all_pairs') + sc_bc = load_scale_consistency(src_dir, scale, 'both_correct') + wc = load_within_cat_consistency(src_dir, scale, 'all_pairs') + wc_bc = load_within_cat_consistency(src_dir, scale, 'both_correct') + align = load_scale_alignment(src_dir, scale) + + pred_stat = None + pred_path = os.path.join(src_dir, 'json', f'pred_stats_{scale}.json') + if os.path.exists(pred_path): + with open(pred_path) as f: + pred_stat = json.load(f) + + cat_validity = None + cv_path = os.path.join(src_dir, 'json', f'category_validity_{scale}.json') + if os.path.exists(cv_path): + with open(cv_path) as f: + cat_validity = json.load(f) + + dh = load_delta_heatmaps(src_dir, scale, 'all_pairs') + dh_bc = load_delta_heatmaps(src_dir, scale, 'both_correct') + + return sc, sc_bc, wc, wc_bc, align, pred_stat, cat_validity, dh, dh_bc + + +# --------------------------------------------------------------------------- +# All-layer heatmap + PCA helpers (called from run_merge / run_merge_extended) +# --------------------------------------------------------------------------- + +def _get_csv_layers(csv_dir: str, scale: str, tag: str) -> list: + """Return sorted list of layer indices that have a delta_similarity CSV.""" + import glob as _glob + pattern = os.path.join(csv_dir, f'delta_similarity_{scale}_L*_{tag}.csv') + layers = [] + for fpath in _glob.glob(pattern): + m = re.search( + rf'delta_similarity_{re.escape(scale)}_L(\d+)_{re.escape(tag)}\.csv$', + os.path.basename(fpath)) + if m: + layers.append(int(m.group(1))) + return sorted(layers) + + +def run_all_layer_heatmaps(model_dir: str, model_type: str, scales: list): + """Generate delta-similarity heatmaps for ALL layers from pre-computed CSVs. + + Reads {model_dir}/csv/delta_similarity_{scale}_L{n}_{tag}.csv + Writes {model_dir}/plots/all/heatmap/heatmap_{scale}_L{n}.png (all_pairs) + {model_dir}/plots/both_correct/heatmap/heatmap_{scale}_L{n}.png (both_correct) + + Skips a scale if the NPZ is missing or any all_pairs CSV is absent + (indicates inference was not fully completed for that scale). + """ + TAG_TO_DIR = { + 'all_pairs': os.path.join(model_dir, 'plots', 'all', 'heatmap'), + 'both_correct': os.path.join(model_dir, 'plots', 'both_correct', 'heatmap'), + } + + for scale in scales: + npz_path = os.path.join(model_dir, 'npz', f'vectors_{scale}.npz') + csv_dir = os.path.join(model_dir, 'csv') + + if not os.path.exists(npz_path): + logger.warning(f' [{model_type}/{scale}] NPZ not found, skipping heatmaps.') + continue + + data = np.load(npz_path, allow_pickle=True) + npz_layers = sorted( + int(k.replace('orig_L', '')) + for k in data.files if k.startswith('orig_L') + ) + data.close() + + if not npz_layers: + logger.warning(f' [{model_type}/{scale}] No orig_L* keys in NPZ, skipping heatmaps.') + continue + + csv_layers = _get_csv_layers(csv_dir, scale, 'all_pairs') + missing = set(npz_layers) - set(csv_layers) + if missing: + logger.warning( + f' [{model_type}/{scale}] {len(missing)} NPZ layers lack CSVs ' + f'(e.g. L{sorted(missing)[:5]}). Skipping all-layer heatmaps.') + continue + + for out_dir in TAG_TO_DIR.values(): + os.makedirs(out_dir, exist_ok=True) + + logger.info(f' [{model_type}/{scale}] Generating heatmaps for {len(npz_layers)} layers...') + saved = 0 + for layer in npz_layers: + for tag, out_dir in TAG_TO_DIR.items(): + csv_path = os.path.join(csv_dir, f'delta_similarity_{scale}_L{layer}_{tag}.csv') + if not os.path.exists(csv_path): + continue # both_correct CSV may be absent for some layers + df = pd.read_csv(csv_path, index_col=0) + available = [c for c in CATEGORY_ORDER if c in df.index] + if not available: + continue + df = df.loc[available, available] + title = ( + f'{model_type.upper()} ({scale}) \u2014 Delta Heatmap L{layer} ' + f'({"both-correct" if tag == "both_correct" else "all pairs"})' + ) + out_path = os.path.join(out_dir, f'heatmap_{scale}_L{layer}.png') + plot_delta_heatmap(df, title, out_path) + saved += 1 + logger.info(f' [{model_type}/{scale}] Saved {saved} heatmaps') + + +def run_all_layer_pca(model_dir: str, model_type: str, scales: list): + """Generate 2D and 3D PCA plots for ALL layers from saved NPZ files. + + Writes {model_dir}/plots/all/pca/pca_{scale}_L{n}.png (all pairs) + {model_dir}/plots/all/pca_3d/pca_{scale}_L{n}.png + {model_dir}/plots/both_correct/pca/pca_{scale}_L{n}.png (both-correct only) + {model_dir}/plots/both_correct/pca_3d/pca_{scale}_L{n}.png + """ + for scale in scales: + npz_path = os.path.join(model_dir, 'npz', f'vectors_{scale}.npz') + if not os.path.exists(npz_path): + logger.warning(f' [{model_type}/{scale}] NPZ not found, skipping PCA.') + continue + + # All-pairs PCA + pca_2d_dir = os.path.join(model_dir, 'plots', 'all', 'pca') + pca_3d_dir = os.path.join(model_dir, 'plots', 'all', 'pca_3d') + os.makedirs(pca_2d_dir, exist_ok=True) + os.makedirs(pca_3d_dir, exist_ok=True) + logger.info(f' [{model_type}/{scale}] Generating all-layer 2D PCA...') + plot_pca_embeddings(npz_path, scale, model_type, pca_2d_dir) + logger.info(f' [{model_type}/{scale}] Generating all-layer 3D PCA...') + plot_pca_3d(npz_path, scale, model_type, pca_3d_dir) + + # Both-correct PCA + bc_pca_2d_dir = os.path.join(model_dir, 'plots', 'both_correct', 'pca') + bc_pca_3d_dir = os.path.join(model_dir, 'plots', 'both_correct', 'pca_3d') + os.makedirs(bc_pca_2d_dir, exist_ok=True) + os.makedirs(bc_pca_3d_dir, exist_ok=True) + logger.info(f' [{model_type}/{scale}] Generating both-correct 2D PCA...') + plot_pca_embeddings(npz_path, scale, model_type, bc_pca_2d_dir, bc_only=True) + logger.info(f' [{model_type}/{scale}] Generating both-correct 3D PCA...') + plot_pca_3d(npz_path, scale, model_type, bc_pca_3d_dir, bc_only=True) + + +def run_merge(args): + # Per-scale data lives in saved_data/{model_type}_{scale}/ + def _scale_dir(scale): + return os.path.join(args.output_dir, get_model_key(args.model_type, scale)) + + # Cross-scale (compare) output: {question_type}/compare/{group_name}/ + group_name = args.group_name or args.model_type + if args.merge_output_dir: + merge_out = args.merge_output_dir + else: + qt_root = os.path.dirname(args.output_dir.rstrip('/')) # one level up from saved_data/ + merge_out = os.path.join(qt_root, 'compare', group_name) + plots_dir = os.path.join(merge_out, 'plots') + os.makedirs(plots_dir, exist_ok=True) + + scale_order = ['vanilla', '80k', '400k', '800k', '2m', 'roborefer', + 'roborefer_depth', '10pct', '20pct', '30pct'] + available_scales = [s for s in scale_order if s in args.scales] + + # Load per-scale results + all_sign_corrected = {} + all_sign_corrected_bc = {} + all_within_cat = {} + all_within_cat_bc = {} + all_alignment = {} + all_pred_stats = [] + all_cat_validity = {} + all_delta_heatmaps = {} + all_delta_heatmaps_bc = {} + + for scale in available_scales: + sd = _scale_dir(scale) + sc = load_scale_consistency(sd, scale, 'all_pairs') + if sc: + all_sign_corrected[scale] = sc + sc_bc = load_scale_consistency(sd, scale, 'both_correct') + if sc_bc: + all_sign_corrected_bc[scale] = sc_bc + wc = load_within_cat_consistency(sd, scale, 'all_pairs') + if wc: + all_within_cat[scale] = wc + wc_bc = load_within_cat_consistency(sd, scale, 'both_correct') + if wc_bc: + all_within_cat_bc[scale] = wc_bc + align = load_scale_alignment(sd, scale) + if align: + all_alignment[scale] = align + pred_path = os.path.join(sd, 'json', f'pred_stats_{scale}.json') + if os.path.exists(pred_path): + with open(pred_path) as f: + all_pred_stats.append(json.load(f)) + cv_path = os.path.join(sd, 'json', f'category_validity_{scale}.json') + if os.path.exists(cv_path): + with open(cv_path) as f: + all_cat_validity[scale] = json.load(f) + dh = load_delta_heatmaps(sd, scale, 'all_pairs') + if dh: + all_delta_heatmaps[scale] = dh + dh_bc = load_delta_heatmaps(sd, scale, 'both_correct') + if dh_bc: + all_delta_heatmaps_bc[scale] = dh_bc + + logger.info(f" Loaded data for {scale}") + + # Generate cross-scale plots into condition subdirs + for condition, sc_data, wc_data, dh_data, tag_label in [ + ('all', all_sign_corrected, all_within_cat, all_delta_heatmaps, 'all pairs'), + ('both_correct', all_sign_corrected_bc, all_within_cat_bc, all_delta_heatmaps_bc, 'both-correct'), + ]: + cond_dir = os.path.join(plots_dir, condition) + sc_dir = os.path.join(cond_dir, 'sign_corrected') + wc_dir = os.path.join(cond_dir, 'within_cat_consistency') + dt_dir = os.path.join(cond_dir, 'delta_trajectory') + os.makedirs(sc_dir, exist_ok=True) + os.makedirs(wc_dir, exist_ok=True) + os.makedirs(dt_dir, exist_ok=True) + + if len(sc_data) > 1: + plot_cross_scale_consistency( + sc_data, args.model_type, + os.path.join(sc_dir, 'cross_scale_sign_corrected.png'), + title_prefix=f'Sign-Corrected ({tag_label})') + + if len(wc_data) > 1: + plot_cross_scale_within_cat_consistency( + wc_data, args.model_type, + os.path.join(wc_dir, 'cross_scale_within_cat.png')) + + if dh_data: + plot_delta_trajectory(dh_data, args.model_type, + os.path.join(dt_dir, 'delta_trajectory.png')) + + # Cross-scale alignment + pred stats + summary (shared across conditions) + all_cond_dir = os.path.join(plots_dir, 'all') + ca_dir = os.path.join(all_cond_dir, 'cross_alignment') + pred_stats_dir = os.path.join(all_cond_dir, 'pred_stats') + summary_dir = os.path.join(all_cond_dir, 'summary') + os.makedirs(ca_dir, exist_ok=True) + os.makedirs(pred_stats_dir, exist_ok=True) + os.makedirs(summary_dir, exist_ok=True) + + if len(all_alignment) > 1: + plot_cross_scale_alignment( + all_alignment, args.model_type, + os.path.join(ca_dir, 'cross_scale_alignment.png')) + + # Prediction stats plots + if all_pred_stats: + plot_pred_stats_bars(all_pred_stats, args.model_type, + os.path.join(pred_stats_dir, 'pred_stats_bars.png')) + plot_pred_stats_trajectory(all_pred_stats, args.model_type, + os.path.join(pred_stats_dir, 'pred_stats_trajectory.png')) + + # Summary barplot + if all_sign_corrected: + plot_summary_barplot( + all_sign_corrected, all_alignment, args.model_type, + os.path.join(summary_dir, 'summary_barplot.png')) + + # Summary CSV + summary_rows = [] + for scale in available_scales: + ps = next((p for p in all_pred_stats if p.get('scale') == scale), None) + if ps is None: + continue + row = dict(ps) + if scale in all_alignment: + max_layer = max(all_alignment[scale].keys()) + row['alignment_deepest'] = all_alignment[scale][max_layer]['per_sample_mean'] + row['alignment_perm'] = all_alignment[scale][max_layer]['permutation_mean'] + summary_rows.append(row) + + if summary_rows: + csv_dir = os.path.join(merge_out, 'csv') + os.makedirs(csv_dir, exist_ok=True) + pd.DataFrame(summary_rows).to_csv(os.path.join(csv_dir, 'summary.csv'), index=False) + + # Accuracy charts (cross-scale) + if all_pred_stats: + acc_dir = os.path.join(plots_dir, 'accuracy') + logger.info("\n--- Accuracy Charts ---") + run_accuracy_charts(all_pred_stats, all_cat_validity, args.model_type, acc_dir) + + # Unify y-axis across scales for per-scale trajectory plots + logger.info("\n--- Unifying Y-axis ---") + scale_dir_map = {s: _scale_dir(s) for s in available_scales} + run_unify_ylim(scale_dir_map, plots_dir, args.model_type) + + # All-layer heatmaps + PCA are now done per-scale in process_scale(); skip here. + + logger.info(f"\n=== Merge Complete ===\nResults in: {merge_out}") + + +def run_merge_extended(args): + """Generate cross-scale plots for new / merge-only model_types. + + - Runnable types (molmo_big, qwen_big, qwen_super, big_trio): + loads all data from results/{model_type}/ and saves plots there. + - Merge-only types (molmo_all, qwen_all): + loads per-scale data from the respective source directories, + saves all cross-scale plots to results/{model_type}/. + """ + is_merge_only = args.model_type in MERGE_ONLY_CONFIGS + + # ── Determine scale order and data source strategy ──────────────────────── + if is_merge_only: + mc = MERGE_ONLY_CONFIGS[args.model_type] + scale_order = mc['scale_order'] + scale_sources = mc['scale_sources'] + + logger.info(f"\n=== MERGE-ONLY mode: {args.model_type} ===") + logger.info("Checking required source directories...") + sources_ok = _check_merge_only_sources(args.output_dir, args.model_type) + if not sources_ok: + logger.warning( + f"\n[WARNING] One or more source directories are missing or incomplete.\n" + f" Cross-scale plots for '{args.model_type}' may be partial.\n" + f" Run the missing model types first (see warnings above), then retry merge." + ) + else: + scale_order = SCALE_ORDERS_NEW.get( + args.model_type, list(MODEL_CONFIGS_NEW[args.model_type])) + scale_sources = None # all data lives in results/{model_type}/ + + available_scales = [s for s in scale_order if s in args.scales] + logger.info(f"Merging scales (in order): {available_scales}") + + # ── Determine output dir (compare/{group_name}/) ────────────────────────── + group_name = args.group_name or args.model_type + if args.merge_output_dir: + merge_out = args.merge_output_dir + else: + qt_root = os.path.dirname(args.output_dir.rstrip('/')) + merge_out = os.path.join(qt_root, 'compare', group_name) + plots_dir = os.path.join(merge_out, 'plots') + os.makedirs(plots_dir, exist_ok=True) + + # ── Per-scale data directory resolver ───────────────────────────────────── + def _scale_dir(scale): + if is_merge_only: + src_model_type = scale_sources[scale] + else: + src_model_type = args.model_type + return os.path.join(args.output_dir, get_model_key(src_model_type, scale)) + + # ── Load per-scale data ─────────────────────────────────────────────────── + all_sign_corrected = {} + all_sign_corrected_bc = {} + all_within_cat = {} + all_within_cat_bc = {} + all_alignment = {} + all_pred_stats = [] + all_cat_validity = {} + all_delta_heatmaps = {} + all_delta_heatmaps_bc = {} + + for scale in available_scales: + sd = _scale_dir(scale) + sc = load_scale_consistency(sd, scale, 'all_pairs') + sc_bc = load_scale_consistency(sd, scale, 'both_correct') + wc = load_within_cat_consistency(sd, scale, 'all_pairs') + wc_bc = load_within_cat_consistency(sd, scale, 'both_correct') + align = load_scale_alignment(sd, scale) + + pred_stat = None + pred_path = os.path.join(sd, 'json', f'pred_stats_{scale}.json') + if os.path.exists(pred_path): + with open(pred_path) as f: + pred_stat = json.load(f) + + cat_validity = None + cv_path = os.path.join(sd, 'json', f'category_validity_{scale}.json') + if os.path.exists(cv_path): + with open(cv_path) as f: + cat_validity = json.load(f) + + dh = load_delta_heatmaps(sd, scale, 'all_pairs') + dh_bc = load_delta_heatmaps(sd, scale, 'both_correct') + + if sc: + all_sign_corrected[scale] = sc + if sc_bc: + all_sign_corrected_bc[scale] = sc_bc + if wc: + all_within_cat[scale] = wc + if wc_bc: + all_within_cat_bc[scale] = wc_bc + if align: + all_alignment[scale] = align + if pred_stat is not None: + all_pred_stats.append(pred_stat) + if cat_validity is not None: + all_cat_validity[scale] = cat_validity + if dh: + all_delta_heatmaps[scale] = dh + if dh_bc: + all_delta_heatmaps_bc[scale] = dh_bc + + logger.info(f" Loaded data for '{scale}'" + + (f" (from '{scale_sources[scale]}')" if is_merge_only else "")) + + # ── Auto-detect Phase B data ────────────────────────────────────────────── + has_phase_b = all(_has_phase_b_data(_scale_dir(s), s) for s in available_scales) + if has_phase_b: + logger.info(" [Phase B] Cross-alignment data found for all scales → will include cross-alignment plots") + else: + missing_b = [s for s in available_scales if not _has_phase_b_data(_scale_dir(s), s)] + logger.info(f" [Phase B] Cross-alignment data missing for: {missing_b} → skipping cross-alignment plots") + + # ── Summary CSV (Phase 1 data — always saved) ───────────────────────────── + summary_rows = [] + for scale in available_scales: + ps = next((p for p in all_pred_stats if p.get('scale') == scale), None) + if ps is None: + continue + row = dict(ps) + if scale in all_alignment: + max_layer = max(all_alignment[scale].keys()) + row['alignment_deepest'] = all_alignment[scale][max_layer]['per_sample_mean'] + row['alignment_perm'] = all_alignment[scale][max_layer]['permutation_mean'] + summary_rows.append(row) + if summary_rows: + csv_dir = os.path.join(merge_out, 'csv') + os.makedirs(csv_dir, exist_ok=True) + pd.DataFrame(summary_rows).to_csv(os.path.join(csv_dir, 'summary.csv'), index=False) + + # ── Cross-scale plots (Phase 2 — skipped when --phase1-only) ───────────── + if args.phase1_only: + logger.info("\n--- Cross-scale plots [SKIPPED: --phase1-only] ---") + else: + for condition, sc_data, wc_data, dh_data, tag_label in [ + ('all', all_sign_corrected, all_within_cat, all_delta_heatmaps, 'all pairs'), + ('both_correct', all_sign_corrected_bc, all_within_cat_bc, all_delta_heatmaps_bc, 'both-correct'), + ]: + cond_dir = os.path.join(plots_dir, condition) + sc_dir = os.path.join(cond_dir, 'sign_corrected') + wc_dir = os.path.join(cond_dir, 'within_cat_consistency') + dt_dir = os.path.join(cond_dir, 'delta_trajectory') + os.makedirs(sc_dir, exist_ok=True) + os.makedirs(wc_dir, exist_ok=True) + os.makedirs(dt_dir, exist_ok=True) + + if len(sc_data) > 1: + plot_cross_scale_consistency( + sc_data, args.model_type, + os.path.join(sc_dir, 'cross_scale_sign_corrected.png'), + title_prefix=f'Sign-Corrected ({tag_label})') + + if len(wc_data) > 1: + plot_cross_scale_within_cat_consistency( + wc_data, args.model_type, + os.path.join(wc_dir, 'cross_scale_within_cat.png')) + + if dh_data: + plot_delta_trajectory( + dh_data, args.model_type, + os.path.join(dt_dir, 'delta_trajectory.png')) + + # ── Alignment and prediction stats ──────────────────────────────────── + all_cond_dir = os.path.join(plots_dir, 'all') + pred_stats_dir = os.path.join(all_cond_dir, 'pred_stats') + summary_dir = os.path.join(all_cond_dir, 'summary') + os.makedirs(pred_stats_dir, exist_ok=True) + os.makedirs(summary_dir, exist_ok=True) + + if has_phase_b and len(all_alignment) > 1: + ca_dir = os.path.join(all_cond_dir, 'cross_alignment') + os.makedirs(ca_dir, exist_ok=True) + plot_cross_scale_alignment( + all_alignment, args.model_type, + os.path.join(ca_dir, 'cross_scale_alignment.png')) + + if all_pred_stats: + plot_pred_stats_bars( + all_pred_stats, args.model_type, + os.path.join(pred_stats_dir, 'pred_stats_bars.png')) + plot_pred_stats_trajectory( + all_pred_stats, args.model_type, + os.path.join(pred_stats_dir, 'pred_stats_trajectory.png')) + + if all_sign_corrected: + plot_summary_barplot( + all_sign_corrected, all_alignment, args.model_type, + os.path.join(summary_dir, 'summary_barplot.png')) + + # ── Accuracy charts ─────────────────────────────────────────────────── + if all_pred_stats: + acc_dir = os.path.join(plots_dir, 'accuracy') + logger.info("\n--- Accuracy Charts ---") + run_accuracy_charts(all_pred_stats, all_cat_validity, args.model_type, acc_dir) + + # ── Unify y-axis ────────────────────────────────────────────────────── + logger.info("\n--- Unifying Y-axis ---") + scale_dir_map = {s: _scale_dir(s) for s in available_scales} + run_unify_ylim(scale_dir_map, plots_dir, args.model_type) + + # All-layer heatmaps + PCA are now done per-scale in process_scale(); skip here. + + logger.info(f"\n=== Merge Complete ===\nResults saved to: {merge_out}") + + +def main(): + # Default scales per legacy model_type (new types use their own defaults) + _LEGACY_DEFAULT_SCALES = { + 'molmo': ['vanilla', '80k', '400k', '800k', '2m'], + 'nvila': ['vanilla', '80k', '400k', '800k', '2m'], + 'qwen': ['vanilla', '80k', '400k', '800k', '2m'], + 'nvila_synthetic': ['80k-5pct', '80k-10pct', '80k-20pct', '80k-30pct', '400k-5pct'], + 'nvila_st': ['80k-st', '400k-st', '800k-st'], + } + + parser = argparse.ArgumentParser( + description='Swap Analysis — Spatial Representation Probing', + formatter_class=argparse.RawDescriptionHelpFormatter, + ) + parser.add_argument('--tsv_2d', type=str, default=CV_BENCH_2D_TSV, + help='Path to CV-Bench-2D-Relation-binary.tsv') + parser.add_argument('--tsv_3d', type=str, default=CV_BENCH_3D_TSV, + help='Path to CV-Bench-3D.tsv') + parser.add_argument('--model_type', type=str, required=True, + choices=ALL_MODEL_TYPES, + help=( + 'Legacy: molmo | nvila | qwen\n' + 'Synthetic: nvila_synthetic\n' + 'New large: molmo_big | qwen_big | qwen_super | big_trio\n' + 'Merge-only (--merge required): molmo_all | qwen_all' + )) + parser.add_argument('--scales', type=str, nargs='+', default=None, + help='Scales to process (default: all for the given model_type).') + parser.add_argument('--output_dir', type=str, default=None, + help='Root for saved_data/. Defaults to ' + '{script_dir}/{question_type}/saved_data.') + parser.add_argument('--device', type=str, default='cuda') + parser.add_argument('--seed', type=int, default=42) + parser.add_argument('--merge', action='store_true', + help='Merge mode: generate cross-scale plots from saved per-scale data.') + parser.add_argument('--merge-output-dir', type=str, default=None, dest='merge_output_dir', + help='(Deprecated) Override output dir for cross-scale plots. ' + 'Use --group-name instead.') + parser.add_argument('--group-name', type=str, default=None, dest='group_name', + help='Folder name under compare/ for merged output. ' + 'Defaults to model_type.') + parser.add_argument('--no-auto-roborefer', action='store_true', dest='no_auto_roborefer', + help='Disable automatic inclusion of roborefer scale for nvila.') + parser.add_argument('--skip-cross-group', action='store_true') + parser.add_argument('--max-samples-per-category', type=int, default=200, + dest='max_samples_per_category') + parser.add_argument('--question-type', type=str, default='short_answer', + choices=['short_answer', 'mcq'], dest='question_type', + help='short_answer (default): "Answer with only one word." format; ' + 'mcq: MCQ A/B format with letter answers.') + parser.add_argument('--phase1-only', action='store_true', dest='phase1_only', + help='Skip all plot generation (per-scale and cross-scale). ' + 'Data (npz/csv/json) is still saved.') + parser.add_argument('--skip-phase-b', action='store_true', dest='skip_phase_b', + help='Skip Phase B (cross-group feature extraction). ' + 'Phase A inference + analysis + plots still run normally. ' + 'Merge auto-detects whether Phase B data is available.') + + args = parser.parse_args() + + # ── Compute output_dir and log_dir from question_type ──────────────────── + _HERE_UPDATED = os.path.dirname(os.path.abspath(__file__)) + if args.output_dir is None: + args.output_dir = os.path.join(_HERE_UPDATED, args.question_type, 'saved_data') + log_dir = os.path.join(_HERE_UPDATED, args.question_type, 'logs') + + # ── Validate: merge-only types require --merge ──────────────────────────── + if args.model_type in MERGE_ONLY_CONFIGS and not args.merge: + parser.error( + f"'{args.model_type}' is a merge-only type. Add --merge to run it.\n" + f" Example: python swap_analysis.py --model_type {args.model_type} --merge" + ) + + # ── Default scales ──────────────────────────────────────────────────────── + if args.scales is None: + if args.model_type in MERGE_ONLY_CONFIGS: + args.scales = MERGE_ONLY_CONFIGS[args.model_type]['scale_order'] + elif args.model_type in MODEL_CONFIGS_NEW: + args.scales = list(MODEL_CONFIGS_NEW[args.model_type].keys()) + else: + args.scales = _LEGACY_DEFAULT_SCALES.get( + args.model_type, ['vanilla', '80k', '400k', '800k', '2m']) + + # Legacy nvila: auto-include roborefer + if args.model_type == 'nvila' and 'roborefer' not in args.scales and not args.no_auto_roborefer: + args.scales.append('roborefer') + + np.random.seed(args.seed) + torch.manual_seed(args.seed) + random.seed(args.seed) + + # ── Merge mode ─────────────────────────────────────────────────────────── + if args.merge: + group_name = args.group_name or args.model_type + log_path = _setup_file_logging(group_name, log_dir) + logger.info(f"Logging to: {log_path}") + logger.info("\n=== MERGE MODE ===") + if args.model_type in MODEL_CONFIGS_NEW or args.model_type in MERGE_ONLY_CONFIGS: + run_merge_extended(args) + else: + run_merge(args) + return + + # ── Inference mode ──────────────────────────────────────────────────────── + logger.info("\n=== Loading & Creating CV-Bench Swap Pairs ===") + swap_pairs = load_cv_bench_pairs( + args.tsv_2d, args.tsv_3d, + seed=args.seed, + question_type=args.question_type, + ) + + quads = [] + if not args.skip_cross_group and not getattr(args, 'skip_phase_b', False): + try: + depth_pairs = [p for p in swap_pairs if p['group'] == 'distance'] + quads = create_cv_bench_cross_group_quads(depth_pairs) + except Exception as e: + logger.warning(f"Cross-group setup failed: {e}. Skipping.") + quads = [] + + # ── Resolve config for the chosen model_type ───────────────────────────── + if args.model_type in MODEL_CONFIGS_NEW: + model_configs = MODEL_CONFIGS_NEW[args.model_type] + else: + model_configs = MODEL_CONFIGS[args.model_type] + + for scale in args.scales: + if scale not in model_configs: + logger.warning(f"Scale '{scale}' not in config for '{args.model_type}', skipping.") + continue + + # Per-scale log file + vlm_key = get_model_key(args.model_type, scale) + log_path = _setup_file_logging(vlm_key, log_dir) + logger.info(f"Logging to: {log_path}") + + # Validate model path exists (skip HF IDs that start with org/ prefix) + if args.model_type in MODEL_CONFIGS_NEW: + _, raw_path = model_configs[scale] + else: + raw_path = model_configs[scale] + if not os.path.isabs(raw_path) and not raw_path.startswith(('Qwen/', 'allenai/')): + if not os.path.exists(raw_path): + logger.warning(f"Model path not found: {raw_path} (scale='{scale}'), skipping.") + continue + + try: + process_scale(args, scale, swap_pairs, quads) + except Exception as e: + logger.error(f"Failed {args.model_type} - {scale}: {e}") + import traceback + traceback.print_exc() + continue + + logger.info(f"\n{'='*60}") + logger.info("=== All scales complete ===") + logger.info(f"Results: {args.output_dir}") + logger.info(f"{'='*60}") + + +if __name__ == '__main__': + main() \ No newline at end of file diff --git a/swap_analysis_synthetic/run_nvila_st_counter_only.sh b/swap_analysis_synthetic/run_nvila_st_counter_only.sh new file mode 100644 index 0000000000000000000000000000000000000000..de3b5aa2f0fefcc3296e6279342715e859eda2ea --- /dev/null +++ b/swap_analysis_synthetic/run_nvila_st_counter_only.sh @@ -0,0 +1,78 @@ +#!/bin/bash +set -e + +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +SCRIPT="$SCRIPT_DIR/swap_analysis_synthetic.py" +PYTHON="conda run --no-capture-output -n vila python" +MODEL="nvila_st" +LOG_DIR="$SCRIPT_DIR/logs_counter_only/${MODEL}" +LOG_DIR_CMP="$SCRIPT_DIR/logs_counter_only/nvila_st_compare" +mkdir -p "$LOG_DIR" "$LOG_DIR_CMP" + +# MCQ-synthetic-trained NVILA checkpoints (~8GB each) +# GPU 2: 80k-st GPU 3: 400k-st GPU 4: 800k-st +SCALES=("80k-st" "400k-st" "800k-st") +GPUS=(2 3 4) + +echo "=========================================" +echo " NVILA-ST Synthetic (counter-only): Launching ${#SCALES[@]} scales in parallel" +echo "=========================================" + +PIDS=() +for i in "${!SCALES[@]}"; do + scale="${SCALES[$i]}" + gpu="${GPUS[$i]}" + log="${LOG_DIR}/${scale}.log" + + echo "[GPU $gpu] $scale -> $log" + CUDA_VISIBLE_DEVICES=$gpu $PYTHON "$SCRIPT" \ + --model_type $MODEL \ + --scales $scale \ + --device cuda \ + --no-auto-roborefer \ + --counter-only \ + > "$log" 2>&1 & + PIDS+=($!) +done + +echo "" +echo "Waiting for all ${#PIDS[@]} processes..." +FAILED=0 +for i in "${!PIDS[@]}"; do + pid="${PIDS[$i]}" + scale="${SCALES[$i]}" + if wait $pid; then + echo "[DONE] $scale (PID $pid) - SUCCESS" + else + echo "[FAIL] $scale (PID $pid) - EXIT CODE $?" + FAILED=$((FAILED + 1)) + fi +done + +if [ $FAILED -gt 0 ]; then + echo "WARNING: $FAILED scale(s) failed. Check logs in $LOG_DIR" +fi + +# nvila_st_compare merges nvila baselines (vanilla/80k/400k/800k/2m) +# with nvila_st checkpoints (80k-st/400k-st/800k-st). +# Requires nvila counter-only baseline results (run run_nvila.sh --counter-only first). + +echo "=========================================" +echo " NVILA-ST Compare: Merge 1/2 (full, all categories)" +echo "=========================================" +$PYTHON "$SCRIPT" --model_type nvila_st_compare --merge \ + --counter-only \ + 2>&1 | tee "${LOG_DIR_CMP}/merge.log" + +echo "=========================================" +echo " NVILA-ST Compare: Merge 2/2 (VD-only)" +echo "=========================================" +$PYTHON "$SCRIPT" --model_type nvila_st_compare --merge \ + --counter-only --vd-only \ + 2>&1 | tee "${LOG_DIR_CMP}/merge_vd_only.log" + +echo "" +echo "ALL DONE: $MODEL (counter-only)" +echo "Inference results : $SCRIPT_DIR/results_counter_only/$MODEL/" +echo "Compare (full) : $SCRIPT_DIR/results_counter_only/nvila_st_compare/" +echo "Compare (vd-only) : $SCRIPT_DIR/results_counter_only/vd-only/nvila_st_compare/" diff --git a/swap_analysis_synthetic/run_qwen.sh b/swap_analysis_synthetic/run_qwen.sh new file mode 100644 index 0000000000000000000000000000000000000000..ff269749226a7d93b167ccd29fb18129ca159b03 --- /dev/null +++ b/swap_analysis_synthetic/run_qwen.sh @@ -0,0 +1,73 @@ +#!/bin/bash +set -e + +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +SCRIPT="$SCRIPT_DIR/swap_analysis_synthetic.py" +PYTHON="/usr/bin/python3" +MODEL="qwen" +LOG_DIR="$SCRIPT_DIR/logs_counter_only/${MODEL}" +mkdir -p "$LOG_DIR" + +# Qwen2-VL-7B: ~20GB per scale → 5 scales fit on GPUs 0-4 (one per GPU) +# Adjust GPUS to match your available GPU layout. +SCALES=("vanilla" "80k" "400k" "800k" "2m") +GPUS=(0 1 2 3 4) + +echo "=========================================" +echo " Qwen Synthetic Swap Analysis (counter-only): Launching ${#SCALES[@]} scales in parallel" +echo "=========================================" + +PIDS=() +for i in "${!SCALES[@]}"; do + scale="${SCALES[$i]}" + gpu="${GPUS[$i]}" + log="${LOG_DIR}/${scale}.log" + + echo "[GPU $gpu] $scale -> $log" + CUDA_VISIBLE_DEVICES=$gpu $PYTHON "$SCRIPT" \ + --model_type $MODEL \ + --scales $scale \ + --device cuda \ + --no-auto-roborefer \ + --counter-only \ + > "$log" 2>&1 & + PIDS+=($!) +done + +echo "" +echo "Waiting for all ${#PIDS[@]} processes..." +FAILED=0 +for i in "${!PIDS[@]}"; do + pid="${PIDS[$i]}" + scale="${SCALES[$i]}" + if wait $pid; then + echo "[DONE] $scale (PID $pid) - SUCCESS" + else + echo "[FAIL] $scale (PID $pid) - EXIT CODE $?" + FAILED=$((FAILED + 1)) + fi +done + +if [ $FAILED -gt 0 ]; then + echo "WARNING: $FAILED scale(s) failed. Check logs in $LOG_DIR" +fi + +echo "=========================================" +echo " Qwen: Merge 1/2 (full, all categories)" +echo "=========================================" +$PYTHON "$SCRIPT" --model_type $MODEL --merge \ + --scales vanilla 80k 400k 800k 2m \ + --counter-only \ + 2>&1 | tee "${LOG_DIR}/merge.log" + +echo "=========================================" +echo " Qwen: Merge 2/2 (VD-only)" +echo "=========================================" +$PYTHON "$SCRIPT" --model_type $MODEL --merge \ + --scales vanilla 80k 400k 800k 2m \ + --counter-only --vd-only \ + 2>&1 | tee "${LOG_DIR}/merge_vd_only.log" + +echo "ALL DONE: $MODEL (counter-only)" +echo "Results (full) : $SCRIPT_DIR/results_counter_only/$MODEL/" +echo "Results (vd-only): $SCRIPT_DIR/results_counter_only/vd-only/$MODEL/" diff --git a/swap_analysis_synthetic/run_qwen_big.sh b/swap_analysis_synthetic/run_qwen_big.sh new file mode 100644 index 0000000000000000000000000000000000000000..2abb2f48313f2975c0f58dc14c9939392a219e13 --- /dev/null +++ b/swap_analysis_synthetic/run_qwen_big.sh @@ -0,0 +1,49 @@ +#!/bin/bash +set -e + +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +SCRIPT="$SCRIPT_DIR/swap_analysis_synthetic.py" +PYTHON="$(which python)" +MODEL="qwen_big" +LOG_DIR="$SCRIPT_DIR/logs_counter_only/${MODEL}" +LOG_DIR_CMP="$SCRIPT_DIR/logs_counter_only/qwen_all" +mkdir -p "$LOG_DIR" "$LOG_DIR_CMP" + +# Qwen3-VL-32B: ~65GB with device_map='auto' → needs multiple GPUs +SCALE="qwen3_32b" + +echo "=========================================" +echo " Qwen-Big Synthetic Swap Analysis (counter-only): $SCALE" +echo "=========================================" + +log="${LOG_DIR}/${SCALE}.log" +echo "Scale: $SCALE -> $log" + +CUDA_VISIBLE_DEVICES=4,5,6,7 $PYTHON "$SCRIPT" \ + --model_type $MODEL \ + --scales $SCALE \ + --device cuda \ + --counter-only \ + 2>&1 | tee "$log" + +# qwen_all merges qwen baselines (vanilla/80k/400k/800k/2m) with qwen3_32b. +# Requires qwen baseline results to already exist (run run_qwen.sh first). + +echo "=========================================" +echo " Qwen-All: Merge 1/2 (full, all categories)" +echo "=========================================" +$PYTHON "$SCRIPT" --model_type qwen_all --merge \ + --counter-only \ + 2>&1 | tee "${LOG_DIR_CMP}/merge.log" + +echo "=========================================" +echo " Qwen-All: Merge 2/2 (VD-only)" +echo "=========================================" +$PYTHON "$SCRIPT" --model_type qwen_all --merge \ + --counter-only --vd-only \ + 2>&1 | tee "${LOG_DIR_CMP}/merge_vd_only.log" + +echo "ALL DONE: $MODEL (counter-only)" +echo "Inference results : $SCRIPT_DIR/results_counter_only/$MODEL/" +echo "Compare (full) : $SCRIPT_DIR/results_counter_only/qwen_all/" +echo "Compare (vd-only) : $SCRIPT_DIR/results_counter_only/vd-only/qwen_all/" diff --git a/swap_analysis_synthetic/swap_analysis_synthetic.py b/swap_analysis_synthetic/swap_analysis_synthetic.py new file mode 100644 index 0000000000000000000000000000000000000000..a5cae18b39b2fc1b8f9b26fd76785e5572df314b --- /dev/null +++ b/swap_analysis_synthetic/swap_analysis_synthetic.py @@ -0,0 +1,841 @@ +""" +swap_analysis_synthetic.py — Swap analysis using synthetic 2-body rendered data. + +Same experimental pipeline as swap_analysis.py, but uses locally-generated +synthetic images instead of EmbSpatial-Bench. + +Data layout (per category folder): + /data/shared/Qwen/synthetic/2body/{above,below,left,right,close,far}/ + 0000.png … 0199.png (512×512 rendered scenes) + vqa.json (200 entries; each has image path, answer, obj1, obj2) + +vqa.json entry format: + { + "image": "output/2body/{category}/{XXXX}.png", + "question": "...", + "answer": "above" | "below" | "left" | "right" | "closer" | "farther", + "obj1": {"color": str, "shape": str, "bbox": [x_min, y_min, x_max, y_max]}, + "obj2": {"color": str, "shape": str, "bbox": [x_min, y_min, x_max, y_max]} + } + +Key differences from EmbSpatial-Bench: + - Images loaded from disk PNG files, not base64-in-TSV + - Object descriptions built from color+shape (e.g. "yellow cube") + - Cross-group quads computed directly from stored bounding boxes (no HF cache) + - MCQ questions with alternating A/B order (same as swap_analysis.py) + - "farther"/"closer" added as synonyms for "far"/"close" in answer matching + +Usage: + # Single model, single scale + python swap_analysis_synthetic.py --model_type qwen --scales vanilla + python swap_analysis_synthetic.py --model_type qwen_super --scales qwen3_235b + + # All scales for a model + python swap_analysis_synthetic.py --model_type qwen + + # Merge mode (regenerate cross-scale plots from saved per-scale CSVs) + python swap_analysis_synthetic.py --model_type qwen --merge + + # Skip cross-group quads + python swap_analysis_synthetic.py --model_type qwen --skip-cross-group +""" + +import argparse +import base64 +import json +import logging +import os +import random +import sys +from collections import defaultdict +from typing import List + +import numpy as np +import torch + +# ── Import pipeline from swap_analysis (sibling directory) ────────────────── +_HERE = os.path.dirname(os.path.abspath(__file__)) +_SA_DIR = os.path.realpath(os.path.join(_HERE, '..', 'swap_analysis')) +if _SA_DIR not in sys.path: + sys.path.insert(0, _SA_DIR) + +import swap_analysis as sa +from swap_analysis import ( + CATEGORY_ORDER, OPPOSITE_MAP, GROUP_MAP, + MCQ_TEMPLATES, MCQ_LETTER, + SHORT_TEMPLATES, SHORT_OPPOSITE_MAP, + MODEL_CONFIGS, MODEL_CONFIGS_NEW, MERGE_ONLY_CONFIGS, ALL_MODEL_TYPES, + process_scale, run_merge, run_merge_extended, + logger, +) + +# ── Patch SYNONYMS so "farther"/"closer" are accepted in answer matching ───── +# Synthetic vqa.json labels are "farther"/"closer" but our generated questions +# ask "far or close", so the model may answer with either form. +if 'farther' not in sa.SYNONYMS.get('far', []): + sa.SYNONYMS.setdefault('far', []).append('farther') +if 'closer' not in sa.SYNONYMS.get('close', []): + sa.SYNONYMS.setdefault('close', []).append('closer') + +# ── Constants ──────────────────────────────────────────────────────────────── + +SYNTHETIC_DATA_DIR = '/data/shared/Qwen/synthetic/2body' +COUNTER_DATA_DIR = '/data/shared/Qwen/synthetic/2body_bias' +DEFAULT_OUTPUT_DIR = os.path.join(_HERE, 'results') +DEFAULT_OUTPUT_DIR_COUNTER = os.path.join(_HERE, 'results_counter_only') + +# Maps folder name → canonical category (keeps CATEGORY_ORDER / OPPOSITE_MAP intact) +FOLDER_TO_CATEGORY = { + 'above': 'above', + 'below': 'below', + 'left': 'left', + 'right': 'right', + 'close': 'close', + 'far': 'far', +} + +# Counter-only: folders are prefixed with "counter_" inside 2body_bias/ +COUNTER_FOLDER_TO_CATEGORY = { + 'counter_above': 'above', + 'counter_below': 'below', + 'counter_left': 'left', + 'counter_right': 'right', + 'counter_close': 'close', + 'counter_far': 'far', +} + +# Consistent-only: folders are prefixed with "consistent_" inside 2body_bias/ +CONSISTENT_FOLDER_TO_CATEGORY = { + 'consistent_above': 'above', + 'consistent_below': 'below', + 'consistent_left': 'left', + 'consistent_right': 'right', + 'consistent_close': 'close', + 'consistent_far': 'far', +} + +# VD-only: vertical (above/below) + distance (far/close) categories only +VD_CATEGORIES = {'above', 'below', 'far', 'close'} + +# Image height for vertical-ambiguity threshold in cross-group quads +SYNTHETIC_IMAGE_HEIGHT = 512 + + +# ── Logging setup ──────────────────────────────────────────────────────────── + +def _setup_file_logging_synthetic( + model_type: str, + counter_only: bool = False, + consistent_ratio: float = None, + question_type: str = 'short_answer', +) -> str: + """Write to logs[_counter_only|_consistent_ratio_X][_short]/{model_type}_synthetic.log.""" + if consistent_ratio is not None: + ratio_str = f'{consistent_ratio:g}' + log_subdir = f'logs_consistent_ratio_{ratio_str}' + elif counter_only: + log_subdir = 'logs_counter_only' + else: + log_subdir = 'logs' + if question_type == 'short_answer': + log_subdir += '_short' + log_dir = os.path.join(_HERE, log_subdir) + os.makedirs(log_dir, exist_ok=True) + log_path = os.path.join(log_dir, f'{model_type}_synthetic.log') + fh = logging.FileHandler(log_path, mode='a', encoding='utf-8') + fh.setLevel(logging.INFO) + fh.setFormatter(logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')) + logging.getLogger().addHandler(fh) + return log_path + + +# ── Data loading ───────────────────────────────────────────────────────────── + +def _build_question_pair(group: str, obj1_desc: str, obj2_desc: str, + pair_index: int, question_type: str) -> tuple: + """Return (orig_q, swap_q, mcq_map, orig_answer_key, swap_answer_key) for a swap pair. + + For short_answer, mcq_map is None and answer keys are word-form (e.g. 'left'). + For mcq, mcq_map is a dict and answer keys are the same word-form category labels. + """ + if question_type == 'short_answer': + if group == 'horizontal': + tmpl = SHORT_TEMPLATES['horizontal'] + orig_q = tmpl.format(obj1=obj1_desc, obj2=obj2_desc) + swap_q = tmpl.format(obj1=obj2_desc, obj2=obj1_desc) + elif group == 'vertical': + tmpl = SHORT_TEMPLATES['vertical'] + orig_q = tmpl.format(obj1=obj1_desc, obj2=obj2_desc) + swap_q = tmpl.format(obj1=obj2_desc, obj2=obj1_desc) + else: # distance + tmpl = SHORT_TEMPLATES['distance'] + orig_q = tmpl.format(subj=obj1_desc, ref=obj2_desc) + swap_q = tmpl.format(subj=obj2_desc, ref=obj1_desc) + return orig_q, swap_q, None + else: # mcq + if group == 'horizontal': + variant = 'left_first' if pair_index % 2 == 0 else 'right_first' + orig_q = MCQ_TEMPLATES['horizontal'][variant].format(obj1=obj1_desc, obj2=obj2_desc) + swap_q = MCQ_TEMPLATES['horizontal'][variant].format(obj1=obj2_desc, obj2=obj1_desc) + mcq_map = MCQ_LETTER['horizontal'][variant] + elif group == 'vertical': + variant = 'above_first' if pair_index % 2 == 0 else 'below_first' + orig_q = MCQ_TEMPLATES['vertical'][variant].format(obj1=obj1_desc, obj2=obj2_desc) + swap_q = MCQ_TEMPLATES['vertical'][variant].format(obj1=obj2_desc, obj2=obj1_desc) + mcq_map = MCQ_LETTER['vertical'][variant] + else: # distance + variant = 'far_first' if pair_index % 2 == 0 else 'close_first' + orig_q = MCQ_TEMPLATES['distance'][variant].format(subj=obj1_desc, ref=obj2_desc) + swap_q = MCQ_TEMPLATES['distance'][variant].format(subj=obj2_desc, ref=obj1_desc) + mcq_map = MCQ_LETTER['distance'][variant] + return orig_q, swap_q, mcq_map + + +def load_swap_pairs_synthetic( + data_dir: str, + seed: int = 42, + folder_to_category: dict = None, + question_type: str = 'short_answer', +) -> List[dict]: + """Load synthetic 2-body data and build swap pairs. + + Returns a list of dicts with the same schema as swap_analysis.load_swap_pairs(), + plus extra keys (obj1_bbox, obj2_bbox, obj1_desc, obj2_desc) consumed by + create_cross_group_quads_synthetic() and ignored by the rest of the pipeline. + + Args: + data_dir: Root directory containing category sub-folders. + seed: Random seed (currently unused; reserved for future subsampling). + folder_to_category: Mapping of folder_name → canonical category. + Defaults to FOLDER_TO_CATEGORY (standard 2body layout). + Pass COUNTER_FOLDER_TO_CATEGORY for counter-only (2body_bias) layout. + question_type: 'mcq' (default) or 'short_answer'. + """ + if folder_to_category is None: + folder_to_category = FOLDER_TO_CATEGORY + + pairs = [] + stats = defaultdict(lambda: {'total': 0, 'success': 0}) + + for folder_name, category in folder_to_category.items(): + folder_path = os.path.join(data_dir, folder_name) + json_path = os.path.join(folder_path, 'vqa.json') + + if not os.path.exists(json_path): + logger.warning(f'[synthetic] Missing vqa.json: {json_path}') + continue + + with open(json_path, encoding='utf-8') as f: + entries = json.load(f) + + group = GROUP_MAP[category] + + for i, entry in enumerate(entries): + stats[category]['total'] += 1 + + obj1 = entry['obj1'] + obj2 = entry['obj2'] + obj1_desc = f"{obj1['color']} {obj1['shape']}" + obj2_desc = f"{obj2['color']} {obj2['shape']}" + + # Load PNG from disk → base64 (same format as EmbSpatialBench TSV) + img_filename = os.path.basename(entry['image']) # e.g. "0000.png" + img_path = os.path.join(folder_path, img_filename) + if not os.path.exists(img_path): + logger.warning(f'[synthetic] Image not found: {img_path}') + continue + with open(img_path, 'rb') as fimg: + img_b64 = base64.b64encode(fimg.read()).decode('utf-8') + + orig_q, swap_q, mcq_map = _build_question_pair( + group, obj1_desc, obj2_desc, len(pairs), question_type) + + orig_ans = category + swap_ans = SHORT_OPPOSITE_MAP[category] if question_type == 'short_answer' else OPPOSITE_MAP[category] + + pair = { + # ── Standard swap pair fields (same schema as load_swap_pairs) ── + 'index': f'{folder_name}_{i:04d}', + 'question_id': f'{folder_name}_{i:04d}', + 'image_base64': img_b64, + 'original_question': orig_q, + 'swapped_question': swap_q, + 'original_answer': orig_ans, + 'swapped_answer': swap_ans, + 'group': group, + 'category': category, + 'mcq_map': mcq_map, + # ── Extra fields for cross-group quad creation ─────────────────── + 'obj1_bbox': obj1['bbox'], # [x_min, y_min, x_max, y_max] + 'obj2_bbox': obj2['bbox'], + 'obj1_desc': obj1_desc, + 'obj2_desc': obj2_desc, + } + pairs.append(pair) + stats[category]['success'] += 1 + + logger.info('Synthetic swap pair creation stats:') + for cat in CATEGORY_ORDER: + s = stats[cat] + if s['total']: + logger.info(f' {cat}: {s["success"]}/{s["total"]}') + logger.info(f' Total pairs: {len(pairs)}') + return pairs + + +def load_swap_pairs_mixed( + data_dir: str, + consistent_ratio: float, + n_per_category: int = 200, + seed: int = 42, + question_type: str = 'short_answer', +) -> List[dict]: + """Load a mix of consistent and counter samples from 2body_bias. + + For each canonical category, samples + n_consistent = round(n_per_category * consistent_ratio) entries from consistent_{cat}/ + n_counter = n_per_category - n_consistent entries from counter_{cat}/ + + The two sets are combined per category and returned as swap pairs using the + same schema as load_swap_pairs_synthetic(). + + Args: + data_dir: Root directory (typically COUNTER_DATA_DIR / 2body_bias). + consistent_ratio: Fraction of consistent samples (0.0 – 1.0). + n_per_category: Target total samples per category (default 200). + seed: Random seed for reproducible subsampling. + question_type: 'mcq' (default) or 'short_answer'. + """ + rng = random.Random(seed) + n_consistent = round(n_per_category * consistent_ratio) + n_counter = n_per_category - n_consistent + + logger.info( + f'[mixed] consistent_ratio={consistent_ratio} ' + f'→ {n_consistent} consistent + {n_counter} counter per category ' + f'(total {n_per_category})' + ) + + # Helper: load all entries from a single category folder and subsample + def _load_folder(folder_name: str, category: str, n: int) -> List[dict]: + if n <= 0: + return [] + folder_path = os.path.join(data_dir, folder_name) + json_path = os.path.join(folder_path, 'vqa.json') + if not os.path.exists(json_path): + logger.warning(f'[mixed] Missing vqa.json: {json_path}') + return [] + with open(json_path, encoding='utf-8') as f: + entries = json.load(f) + if len(entries) > n: + entries = rng.sample(entries, n) + return [(folder_path, folder_name, category, entry) for entry in entries] + + CATEGORIES = list(FOLDER_TO_CATEGORY.values()) # canonical order + + pairs = [] + stats = defaultdict(lambda: {'total': 0, 'success': 0}) + + for category in CATEGORIES: + consistent_folder = f'consistent_{category}' + counter_folder = f'counter_{category}' + + consistent_items = _load_folder(consistent_folder, category, n_consistent) + counter_items = _load_folder(counter_folder, category, n_counter) + all_items = consistent_items + counter_items + rng.shuffle(all_items) # mix the two sources + + group = GROUP_MAP[category] + + for idx, (folder_path, folder_name, cat, entry) in enumerate(all_items): + stats[cat]['total'] += 1 + + obj1 = entry['obj1'] + obj2 = entry['obj2'] + obj1_desc = f"{obj1['color']} {obj1['shape']}" + obj2_desc = f"{obj2['color']} {obj2['shape']}" + + img_filename = os.path.basename(entry['image']) + img_path = os.path.join(folder_path, img_filename) + if not os.path.exists(img_path): + logger.warning(f'[mixed] Image not found: {img_path}') + continue + with open(img_path, 'rb') as fimg: + img_b64 = base64.b64encode(fimg.read()).decode('utf-8') + + orig_q, swap_q, mcq_map = _build_question_pair( + group, obj1_desc, obj2_desc, len(pairs), question_type) + + orig_ans = cat + swap_ans = SHORT_OPPOSITE_MAP[cat] if question_type == 'short_answer' else OPPOSITE_MAP[cat] + + pair = { + 'index': f'{folder_name}_{idx:04d}', + 'question_id': f'{folder_name}_{idx:04d}', + 'image_base64': img_b64, + 'original_question': orig_q, + 'swapped_question': swap_q, + 'original_answer': orig_ans, + 'swapped_answer': swap_ans, + 'group': group, + 'category': cat, + 'mcq_map': mcq_map, + 'obj1_bbox': obj1['bbox'], + 'obj2_bbox': obj2['bbox'], + 'obj1_desc': obj1_desc, + 'obj2_desc': obj2_desc, + } + pairs.append(pair) + stats[cat]['success'] += 1 + + logger.info('Mixed swap pair creation stats:') + for cat in CATEGORY_ORDER: + s = stats[cat] + if s['total']: + logger.info(f' {cat}: {s["success"]}/{s["total"]}') + logger.info(f' Total pairs: {len(pairs)}') + return pairs + + +def create_cross_group_quads_synthetic( + swap_pairs: List[dict], + threshold_ratio: float = 0.05, + question_type: str = 'short_answer', +) -> List[dict]: + """Build cross-group quads directly from bounding boxes stored in swap_pairs. + + For each far/close swap pair, the vertical relationship (above/under) between + the two objects is derived from bbox center-Y coordinates — no HF cache needed. + + Pairs whose vertical separation is below threshold_ratio × image_height pixels + are considered ambiguous and skipped. + """ + threshold = SYNTHETIC_IMAGE_HEIGHT * threshold_ratio + quads = [] + stats = {'total': 0, 'matched': 0, 'ambiguous': 0} + + distance_pairs = [p for p in swap_pairs if p['group'] == 'distance'] + + for pair in distance_pairs: + stats['total'] += 1 + + bbox1 = pair['obj1_bbox'] # [x_min, y_min, x_max, y_max] + bbox2 = pair['obj2_bbox'] + + cy1 = (bbox1[1] + bbox1[3]) / 2.0 # center Y of obj1 + cy2 = (bbox2[1] + bbox2[3]) / 2.0 # center Y of obj2 + + y_diff = cy1 - cy2 + if abs(y_diff) < threshold: + stats['ambiguous'] += 1 + continue + + # In image coordinates: smaller Y = higher in frame = "above" + vert_orig_answer = 'above' if cy1 < cy2 else 'below' + + if question_type == 'short_answer': + vert_orig_q = SHORT_TEMPLATES['vertical'].format( + obj1=pair['obj1_desc'], obj2=pair['obj2_desc']) + vert_swap_q = SHORT_TEMPLATES['vertical'].format( + obj1=pair['obj2_desc'], obj2=pair['obj1_desc']) + vert_mcq_map = None + else: + # MCQ vert question — alternate option order to cancel A/B positional bias + vert_variant = 'above_first' if len(quads) % 2 == 0 else 'below_first' + vert_orig_q = MCQ_TEMPLATES['vertical'][vert_variant].format( + obj1=pair['obj1_desc'], obj2=pair['obj2_desc']) + vert_swap_q = MCQ_TEMPLATES['vertical'][vert_variant].format( + obj1=pair['obj2_desc'], obj2=pair['obj1_desc']) + vert_mcq_map = MCQ_LETTER['vertical'][vert_variant] + + quad = { + 'index': pair['index'], + 'image_base64': pair['image_base64'], + 'dist_original_q': pair['original_question'], + 'dist_swapped_q': pair['swapped_question'], + 'dist_original_answer': pair['original_answer'], + 'dist_swapped_answer': pair['swapped_answer'], + 'dist_mcq_map': pair['mcq_map'], + 'vert_original_q': vert_orig_q, + 'vert_swapped_q': vert_swap_q, + 'vert_original_answer': vert_orig_answer, + 'vert_swapped_answer': OPPOSITE_MAP[vert_orig_answer], + 'vert_mcq_map': vert_mcq_map, + 'target_object': pair['obj1_desc'], + 'reference_object': pair['obj2_desc'], + 'target_bbox_y': cy1, + 'ref_bbox_y': cy2, + 'y_diff': y_diff, + 'data_source': 'synthetic', + } + quads.append(quad) + stats['matched'] += 1 + + logger.info(f"Synthetic cross-group quads: {stats['matched']}/{stats['total']} " + f"(ambiguous={stats['ambiguous']})") + return quads + + +# ── VD-only merge helpers ──────────────────────────────────────────────────── + +def _filter_scale_data_vd(data): + """Filter {(category, layer): values} dict to VD categories only.""" + return {k: v for k, v in data.items() if k[0] in VD_CATEGORIES} + + +def _filter_heatmap_vd(heatmap_dict): + """Filter per-layer delta-heatmap DataFrames to VD categories only.""" + vd_order = [c for c in sa.CATEGORY_ORDER if c in VD_CATEGORIES] + result = {} + for layer, df in heatmap_dict.items(): + idx = [c for c in vd_order if c in df.index] + col = [c for c in vd_order if c in df.columns] + result[layer] = df.loc[idx, col] + return result + + +def run_merge_vd_only_synthetic(args): + """Merge in VD-only mode. + + Reads per-scale JSON/CSV data from the *full* results directory + (all 6 categories), filters to VD categories (above/below/far/close), + and writes cross-scale comparison plots to results/vd-only/{model_type}/. + + args.output_dir : write destination (e.g. results/vd-only/ or + results_counter_only/vd-only/) + args.counter_only: selects read base (results vs results_counter_only) + """ + if getattr(args, 'consistent_ratio', None) is not None: + ratio_str = f'{args.consistent_ratio:g}' + base_full = os.path.join(_HERE, f'results_consistent_ratio_{ratio_str}') + elif args.counter_only: + base_full = DEFAULT_OUTPUT_DIR_COUNTER + else: + base_full = DEFAULT_OUTPUT_DIR + + is_merge_only = args.model_type in sa.MERGE_ONLY_CONFIGS + if is_merge_only: + mc = sa.MERGE_ONLY_CONFIGS[args.model_type] + scale_order = mc['scale_order'] + scale_sources = mc['scale_sources'] + elif args.model_type in sa.MODEL_CONFIGS_NEW: + scale_order = list(sa.MODEL_CONFIGS_NEW[args.model_type].keys()) + scale_sources = None + else: + scale_order = ['vanilla', '80k', '400k', '800k', '2m', 'roborefer', + '10pct', '20pct', '30pct'] + scale_sources = None + + available_scales = [s for s in scale_order if s in args.scales] + + default_write = os.path.join(args.output_dir, args.model_type) + write_dir = getattr(args, 'merge_output_dir', None) or default_write + plots_dir = os.path.join(write_dir, 'plots') + os.makedirs(plots_dir, exist_ok=True) + + logger.info(f'[VD-only merge] Read base : {base_full}') + logger.info(f'[VD-only merge] Write dir : {write_dir}') + logger.info(f'[VD-only merge] Scales : {available_scales}') + + all_sc = {} + all_sc_bc = {} + all_wc = {} + all_wc_bc = {} + all_align = {} + all_dh = {} + all_dh_bc = {} + + phase_b_dirs = {} + + for scale in available_scales: + if is_merge_only: + src_model = scale_sources[scale] + read_dir = os.path.join(base_full, src_model) + else: + read_dir = os.path.join(base_full, args.model_type) + + phase_b_dirs[scale] = read_dir + + sc = sa.load_scale_consistency(read_dir, scale, 'all_pairs') + sc_bc = sa.load_scale_consistency(read_dir, scale, 'both_correct') + wc = sa.load_within_cat_consistency(read_dir, scale, 'all_pairs') + wc_bc = sa.load_within_cat_consistency(read_dir, scale, 'both_correct') + align = sa.load_scale_alignment(read_dir, scale) + dh = sa.load_delta_heatmaps(read_dir, scale, 'all_pairs') + dh_bc = sa.load_delta_heatmaps(read_dir, scale, 'both_correct') + + if sc: all_sc[scale] = _filter_scale_data_vd(sc) + if sc_bc: all_sc_bc[scale] = _filter_scale_data_vd(sc_bc) + if wc: all_wc[scale] = _filter_scale_data_vd(wc) + if wc_bc: all_wc_bc[scale] = _filter_scale_data_vd(wc_bc) + if align: all_align[scale] = align # no category dim + if dh: all_dh[scale] = _filter_heatmap_vd(dh) + if dh_bc: all_dh_bc[scale] = _filter_heatmap_vd(dh_bc) + + logger.info(f' Loaded + VD-filtered: {scale}') + + # ── Cross-scale trajectory plots ────────────────────────────────────────── + for condition, sc_data, wc_data, dh_data, tag_label in [ + ('all', all_sc, all_wc, all_dh, 'all pairs'), + ('both_correct', all_sc_bc, all_wc_bc, all_dh_bc, 'both-correct'), + ]: + cond_dir = os.path.join(plots_dir, condition) + sc_dir = os.path.join(cond_dir, 'sign_corrected') + wc_dir = os.path.join(cond_dir, 'within_cat_consistency') + dt_dir = os.path.join(cond_dir, 'delta_trajectory') + os.makedirs(sc_dir, exist_ok=True) + os.makedirs(wc_dir, exist_ok=True) + os.makedirs(dt_dir, exist_ok=True) + + if len(sc_data) > 1: + sa.plot_cross_scale_consistency( + sc_data, args.model_type, + os.path.join(sc_dir, 'cross_scale_sign_corrected.png'), + title_prefix=f'Sign-Corrected ({tag_label}) [VD-only]') + + if len(wc_data) > 1: + sa.plot_cross_scale_within_cat_consistency( + wc_data, args.model_type, + os.path.join(wc_dir, 'cross_scale_within_cat.png')) + + if dh_data: + sa.plot_delta_trajectory( + dh_data, args.model_type, + os.path.join(dt_dir, 'delta_trajectory.png')) + + # ── Cross-scale alignment ───────────────────────────────────────────────── + all_cond_dir = os.path.join(plots_dir, 'all') + ca_dir = os.path.join(all_cond_dir, 'cross_alignment') + os.makedirs(ca_dir, exist_ok=True) + + has_phase_b = all( + sa._has_phase_b_data(phase_b_dirs[s], s) for s in available_scales) + if has_phase_b and len(all_align) > 1: + sa.plot_cross_scale_alignment( + all_align, args.model_type, + os.path.join(ca_dir, 'cross_scale_alignment.png')) + + logger.info(f'\n=== VD-only Merge Complete ===\nResults in: {write_dir}') + + +# ── Main ───────────────────────────────────────────────────────────────────── + +def main(): + _LEGACY_DEFAULT_SCALES = { + 'molmo': ['vanilla', '80k', '400k', '800k', '2m'], + 'nvila': ['vanilla', '80k', '400k', '800k', '2m'], + 'qwen': ['vanilla', '80k', '400k', '800k', '2m'], + 'nvila_synthetic': ['80k-5pct', '80k-10pct', '80k-20pct', '80k-30pct', '400k-5pct'], + 'nvila_st': ['80k-st', '400k-st', '800k-st'], + } + + parser = argparse.ArgumentParser( + description='Swap Analysis — Synthetic 2-body data', + formatter_class=argparse.RawDescriptionHelpFormatter, + ) + parser.add_argument('--data_path', type=str, default=None, + help='Root directory containing category folders. ' + 'Defaults to SYNTHETIC_DATA_DIR (or COUNTER_DATA_DIR with --counter-only).') + parser.add_argument('--counter-only', action='store_true', dest='counter_only', + help='Use counter-only data from 2body_bias/ ' + '(folders: counter_above, counter_below, …). ' + 'Overrides --data_path default to COUNTER_DATA_DIR.') + parser.add_argument('--consistent_ratio', type=float, default=None, + dest='consistent_ratio', + help='Mix consistent and counter samples from 2body_bias/. ' + 'Value in [0, 1]: fraction of consistent samples per category. ' + 'E.g. 0.8 → 160 consistent + 40 counter out of 200 total. ' + 'Results saved under results_consistent_ratio_{value}/ and ' + 'logs_consistent_ratio_{value}/.') + parser.add_argument('--model_type', type=str, required=True, + choices=ALL_MODEL_TYPES, + help=( + 'Legacy: molmo | nvila | qwen\n' + 'New large: molmo_big | qwen_big | qwen_super | big_trio\n' + 'Merge-only (--merge required): molmo_all | qwen_all' + )) + parser.add_argument('--scales', type=str, nargs='+', default=None, + help='Scales to process (default: all for the given model_type).') + parser.add_argument('--output_dir', type=str, default=None, + help='Root output directory. Defaults to results/ (or results/vd-only/ ' + 'with --vd-only).') + parser.add_argument('--vd-only', action='store_true', dest='vd_only', + help='Run only vertical+distance categories (above/below/far/close). ' + 'Produces a 4×4 delta-similarity heatmap, 2D PCA only (no 3D). ' + 'Results saved under results/vd-only/ by default.') + parser.add_argument('--device', type=str, default='cuda') + parser.add_argument('--seed', type=int, default=42) + parser.add_argument('--merge', action='store_true', + help='Merge mode: regenerate cross-scale plots from saved per-scale data.') + parser.add_argument('--merge-output-dir', type=str, default=None, dest='merge_output_dir', + help='Override output dir for cross-scale plots.') + parser.add_argument('--no-auto-roborefer', action='store_true', dest='no_auto_roborefer', + help='Disable automatic inclusion of roborefer scale for nvila.') + parser.add_argument('--skip-cross-group', action='store_true', + help='Skip cross-group quad extraction.') + parser.add_argument('--skip-phase-b', action='store_true', dest='skip_phase_b', + help='Skip Phase B (cross-group feature extraction). ' + 'Phase A inference + analysis + plots still run normally.') + parser.add_argument('--max-samples-per-category', type=int, default=200, + dest='max_samples_per_category') + parser.add_argument('--question-type', type=str, default='short_answer', + choices=['mcq', 'short_answer'], dest='question_type', + help='Question format: short_answer (default) or mcq.') + + args = parser.parse_args() + + # ── Validate mutually exclusive modes ──────────────────────────────────── + if args.consistent_ratio is not None and args.counter_only: + parser.error('--consistent_ratio and --counter-only are mutually exclusive.') + if args.consistent_ratio is not None and not (0.0 <= args.consistent_ratio <= 1.0): + parser.error('--consistent_ratio must be in [0.0, 1.0].') + + # ── Resolve data path and folder mapping ────────────────────────────────── + if args.consistent_ratio is not None: + # Mixed mode: always read from COUNTER_DATA_DIR (contains both consistent_* and counter_*) + folder_map = None # handled by load_swap_pairs_mixed; not used for standard loading + if args.data_path is None: + args.data_path = COUNTER_DATA_DIR + elif args.counter_only: + folder_map = COUNTER_FOLDER_TO_CATEGORY + if args.data_path is None: + args.data_path = COUNTER_DATA_DIR + else: + folder_map = FOLDER_TO_CATEGORY + if args.data_path is None: + args.data_path = SYNTHETIC_DATA_DIR + + # --vd-only: filter folder_map to vertical+distance categories only + if args.vd_only and folder_map is not None: + folder_map = {k: v for k, v in folder_map.items() if v in VD_CATEGORIES} + + # ── Resolve output directory ─────────────────────────────────────────────── + if args.output_dir is None: + if args.consistent_ratio is not None: + ratio_str = f'{args.consistent_ratio:g}' + base_dir = os.path.join(_HERE, f'results_consistent_ratio_{ratio_str}') + elif args.counter_only: + base_dir = DEFAULT_OUTPUT_DIR_COUNTER + else: + base_dir = DEFAULT_OUTPUT_DIR + if args.question_type == 'short_answer': + base_dir += '_short' + if args.vd_only: + args.output_dir = os.path.join(base_dir, 'vd-only') + else: + args.output_dir = base_dir + + # ── Logging ────────────────────────────────────────────────────────────── + log_path = _setup_file_logging_synthetic( + args.model_type, + counter_only=args.counter_only, + consistent_ratio=args.consistent_ratio, + question_type=args.question_type, + ) + logger.info(f'[synthetic] Logging to : {log_path}') + logger.info(f'[synthetic] Counter-only : {args.counter_only}') + logger.info(f'[synthetic] Consistent ratio : {args.consistent_ratio}') + logger.info(f'[synthetic] Question type : {args.question_type}') + logger.info(f'[synthetic] VD-only : {args.vd_only}') + logger.info(f'[synthetic] Data dir : {args.data_path}') + logger.info(f'[synthetic] Output dir : {args.output_dir}') + + # ── VD-only: disable 3D PCA (monkey-patch sa module before process_scale) ─ + if args.vd_only: + sa.plot_pca_3d = lambda *_a, **_kw: None + + # ── Validate merge-only types ───────────────────────────────────────────── + if args.model_type in MERGE_ONLY_CONFIGS and not args.merge: + parser.error( + f"'{args.model_type}' is a merge-only type. Add --merge to run it.\n" + f" Example: python swap_analysis_synthetic.py " + f"--model_type {args.model_type} --merge" + ) + + # ── Default scales ──────────────────────────────────────────────────────── + if args.scales is None: + if args.model_type in MERGE_ONLY_CONFIGS: + args.scales = MERGE_ONLY_CONFIGS[args.model_type]['scale_order'] + elif args.model_type in MODEL_CONFIGS_NEW: + args.scales = list(MODEL_CONFIGS_NEW[args.model_type].keys()) + else: + args.scales = _LEGACY_DEFAULT_SCALES.get( + args.model_type, ['vanilla', '80k', '400k', '800k', '2m']) + + if (args.model_type == 'nvila' + and 'roborefer' not in args.scales + and not args.no_auto_roborefer): + args.scales.append('roborefer') + + np.random.seed(args.seed) + torch.manual_seed(args.seed) + random.seed(args.seed) + + # ── Merge mode ──────────────────────────────────────────────────────────── + if args.merge: + logger.info('\n=== MERGE MODE (synthetic) ===') + if args.vd_only: + # Read from full results, filter to VD categories, write to vd-only/ + run_merge_vd_only_synthetic(args) + elif args.model_type in MODEL_CONFIGS_NEW or args.model_type in MERGE_ONLY_CONFIGS: + run_merge_extended(args) + else: + run_merge(args) + return + + # ── Inference mode ──────────────────────────────────────────────────────── + logger.info('\n=== Loading Synthetic Swap Pairs ===') + if args.consistent_ratio is not None: + swap_pairs = load_swap_pairs_mixed( + args.data_path, + consistent_ratio=args.consistent_ratio, + n_per_category=args.max_samples_per_category, + seed=args.seed, + question_type=args.question_type, + ) + else: + swap_pairs = load_swap_pairs_synthetic( + args.data_path, args.seed, folder_map, + question_type=args.question_type, + ) + + quads = [] + if not args.skip_cross_group and not getattr(args, 'skip_phase_b', False): + quads = create_cross_group_quads_synthetic(swap_pairs, question_type=args.question_type) + + if args.model_type in MODEL_CONFIGS_NEW: + model_configs = MODEL_CONFIGS_NEW[args.model_type] + else: + model_configs = MODEL_CONFIGS[args.model_type] + + for scale in args.scales: + if scale not in model_configs: + logger.warning(f"Scale '{scale}' not in config for '{args.model_type}', skipping.") + continue + + # Validate model path (HF IDs like "Qwen/…" or "allenai/…" are always remote) + if args.model_type in MODEL_CONFIGS_NEW: + _, raw_path = model_configs[scale] + else: + raw_path = model_configs[scale] + if not os.path.isabs(raw_path) and not raw_path.startswith(('Qwen/', 'allenai/')): + if not os.path.exists(raw_path): + logger.warning(f'Model path not found: {raw_path} (scale={scale!r}), skipping.') + continue + + try: + process_scale(args, scale, swap_pairs, quads) + except Exception as e: + logger.error(f'Failed {args.model_type} - {scale}: {e}') + import traceback + traceback.print_exc() + continue + + logger.info(f"\n{'='*60}") + logger.info('=== All scales complete (synthetic) ===') + logger.info(f"Results: {os.path.join(args.output_dir, args.model_type)}") + logger.info(f"{'='*60}") + + +if __name__ == '__main__': + main() diff --git a/swap_analysis_updated/run_molmo.sh b/swap_analysis_updated/run_molmo.sh new file mode 100644 index 0000000000000000000000000000000000000000..8a0f42702382a1765fff70474c03f40141125955 --- /dev/null +++ b/swap_analysis_updated/run_molmo.sh @@ -0,0 +1,70 @@ +#!/bin/bash +set -e + +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +SCRIPT="$SCRIPT_DIR/swap_analysis.py" +PYTHON="conda run --no-capture-output -n molmo python" +MODEL="molmo" +QUESTION_TYPE="short_answer" # change to mcq for MCQ A/B format + +# Logs managed by swap_analysis.py → {SCRIPT_DIR}/{QUESTION_TYPE}/logs/{vlm_key}.log +STDOUT_LOG_DIR="$SCRIPT_DIR/$QUESTION_TYPE/logs" +mkdir -p "$STDOUT_LOG_DIR" + +# GPU plan: Molmo ~25GB each +SCALES=("vanilla" "80k" "400k" "800k" "2m") +GPUS=(0 1 2 3 4) + +echo "=========================================" +echo " Molmo Swap Analysis: Launching ${#SCALES[@]} scales in parallel" +echo "=========================================" + +PIDS=() +for i in "${!SCALES[@]}"; do + scale="${SCALES[$i]}" + gpu="${GPUS[$i]}" + log="${STDOUT_LOG_DIR}/${MODEL}_${scale}_stdout.log" + + echo "[GPU $gpu] $MODEL/$scale -> $log" + CUDA_VISIBLE_DEVICES=$gpu $PYTHON "$SCRIPT" \ + --model_type $MODEL \ + --scales $scale \ + --device cuda \ + --no-auto-roborefer \ + --question-type $QUESTION_TYPE \ + --skip-phase-b \ + > "$log" 2>&1 & + PIDS+=($!) +done + +echo "" +echo "Waiting for all ${#PIDS[@]} processes..." +FAILED=0 +for i in "${!PIDS[@]}"; do + pid="${PIDS[$i]}" + scale="${SCALES[$i]}" + if wait $pid; then + echo "[DONE] $MODEL/$scale (PID $pid) - SUCCESS" + else + echo "[FAIL] $MODEL/$scale (PID $pid) - EXIT CODE $?" + FAILED=$((FAILED + 1)) + fi +done + +if [ $FAILED -gt 0 ]; then + echo "WARNING: $FAILED scale(s) failed. Check logs in $STDOUT_LOG_DIR" +fi + +echo "=========================================" +echo " Molmo Swap Analysis: Running merge" +echo "=========================================" +$PYTHON "$SCRIPT" --model_type $MODEL \ + --scales vanilla 80k 400k 800k 2m \ + --merge --group-name molmo \ + --question-type $QUESTION_TYPE \ + 2>&1 | tee "${STDOUT_LOG_DIR}/molmo_merge_stdout.log" + +echo "" +echo "ALL DONE: $MODEL" +echo "Results: $SCRIPT_DIR/$QUESTION_TYPE/saved_data/molmo_*/" +echo "Compare: $SCRIPT_DIR/$QUESTION_TYPE/compare/molmo/" diff --git a/swap_analysis_updated/run_nvila.sh b/swap_analysis_updated/run_nvila.sh new file mode 100644 index 0000000000000000000000000000000000000000..497326c362bae340ee8f38bd4ef3f892c2b72cdd --- /dev/null +++ b/swap_analysis_updated/run_nvila.sh @@ -0,0 +1,81 @@ +#!/bin/bash +set -e + +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +SCRIPT="$SCRIPT_DIR/swap_analysis.py" +PYTHON="conda run --no-capture-output -n vila python" +MODEL="nvila" +QUESTION_TYPE="short_answer" # change to mcq for MCQ A/B format + +# Logs managed by swap_analysis.py → {SCRIPT_DIR}/{QUESTION_TYPE}/logs/{vlm_key}.log +# Shell stdout (conda messages, early crashes) goes here: +STDOUT_LOG_DIR="$SCRIPT_DIR/$QUESTION_TYPE/logs" +mkdir -p "$STDOUT_LOG_DIR" + +# GPU plan: NVILA ~8GB each +SCALES=("vanilla" "80k" "400k" "800k" "2m" "roborefer") +GPUS=(2 3 4 5 6 7) + +echo "=========================================" +echo " NVILA Swap Analysis: Launching ${#SCALES[@]} scales in parallel" +echo "=========================================" + +PIDS=() +for i in "${!SCALES[@]}"; do + scale="${SCALES[$i]}" + gpu="${GPUS[$i]}" + log="${STDOUT_LOG_DIR}/${MODEL}_${scale}_stdout.log" + + echo "[GPU $gpu] $MODEL/$scale -> $log" + CUDA_VISIBLE_DEVICES=$gpu $PYTHON "$SCRIPT" \ + --model_type $MODEL \ + --scales $scale \ + --device cuda \ + --no-auto-roborefer \ + --question-type $QUESTION_TYPE \ + --skip-phase-b \ + > "$log" 2>&1 & + PIDS+=($!) +done + +echo "" +echo "Waiting for all ${#PIDS[@]} processes..." +FAILED=0 +for i in "${!PIDS[@]}"; do + pid="${PIDS[$i]}" + scale="${SCALES[$i]}" + if wait $pid; then + echo "[DONE] $MODEL/$scale (PID $pid) - SUCCESS" + else + echo "[FAIL] $MODEL/$scale (PID $pid) - EXIT CODE $?" + FAILED=$((FAILED + 1)) + fi +done + +if [ $FAILED -gt 0 ]; then + echo "WARNING: $FAILED scale(s) failed. Check logs in $STDOUT_LOG_DIR" +fi + +echo "=========================================" +echo " NVILA Swap Analysis: Merge 1/2 (without roborefer)" +echo "=========================================" +$PYTHON "$SCRIPT" --model_type $MODEL \ + --scales vanilla 80k 400k 800k 2m \ + --merge --group-name nvila \ + --question-type $QUESTION_TYPE \ + 2>&1 | tee "${STDOUT_LOG_DIR}/nvila_merge_stdout.log" + +echo "=========================================" +echo " NVILA Swap Analysis: Merge 2/2 (with roborefer)" +echo "=========================================" +$PYTHON "$SCRIPT" --model_type $MODEL \ + --scales vanilla 80k 400k 800k 2m roborefer \ + --merge --group-name nvila_with_roborefer \ + --question-type $QUESTION_TYPE \ + 2>&1 | tee "${STDOUT_LOG_DIR}/nvila_with_roborefer_merge_stdout.log" + +echo "" +echo "ALL DONE: $MODEL" +echo "Results: $SCRIPT_DIR/$QUESTION_TYPE/saved_data/nvila_*/" +echo "Compare: $SCRIPT_DIR/$QUESTION_TYPE/compare/nvila/" +echo "Compare: $SCRIPT_DIR/$QUESTION_TYPE/compare/nvila_with_roborefer/" diff --git a/swap_analysis_updated/run_nvila_st.sh b/swap_analysis_updated/run_nvila_st.sh new file mode 100644 index 0000000000000000000000000000000000000000..5de69aa94dd9cabd4341fc0a155d65383578aadc --- /dev/null +++ b/swap_analysis_updated/run_nvila_st.sh @@ -0,0 +1,80 @@ +#!/bin/bash +set -e + +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +SCRIPT="$SCRIPT_DIR/swap_analysis.py" +PYTHON="conda run --no-capture-output -n vila python" +QUESTION_TYPE="short_answer" # change to mcq for MCQ A/B format + +# Logs managed by swap_analysis.py → {SCRIPT_DIR}/{QUESTION_TYPE}/logs/{vlm_key}.log +STDOUT_LOG_DIR="$SCRIPT_DIR/$QUESTION_TYPE/logs" +mkdir -p "$STDOUT_LOG_DIR" + +# 3 checkpoints from MCQ-synthetic-trained NVILA run: +# 80k-st → checkpoint-1250 (80k training steps) +# 400k-st → checkpoint-6250 (400k training steps) +# 800k-st → checkpoint-12500 (800k training steps) +# +# All from: NVILA-Lite-2B-SYNTHETIC_MIX_MCQ_5PCT_2M-20260302_030354 +# +# GPU assignment (NVILA ~8GB each): +# GPU 0: nvila_st/80k-st +# GPU 1: nvila_st/400k-st +# GPU 2: nvila_st/800k-st + +declare -a MODEL_TYPES=("nvila_st" "nvila_st" "nvila_st") +declare -a SCALES=( "80k-st" "400k-st" "800k-st") +declare -a GPUS=( 0 1 2) + +echo "=========================================" +echo " NVILA-ST: Launching 3 checkpoints in parallel" +echo "=========================================" + +PIDS=() +for i in "${!SCALES[@]}"; do + mtype="${MODEL_TYPES[$i]}" + scale="${SCALES[$i]}" + gpu="${GPUS[$i]}" + log="${STDOUT_LOG_DIR}/${mtype}_${scale}_stdout.log" + + echo "[GPU $gpu] ${mtype}/${scale} -> $log" + CUDA_VISIBLE_DEVICES=$gpu $PYTHON "$SCRIPT" \ + --model_type $mtype \ + --scales $scale \ + --device cuda \ + --no-auto-roborefer \ + --question-type $QUESTION_TYPE \ + --skip-phase-b \ + > "$log" 2>&1 & + PIDS+=($!) +done + +echo "" +echo "Waiting for all ${#PIDS[@]} processes..." +FAILED=0 +for i in "${!PIDS[@]}"; do + pid="${PIDS[$i]}" + label="${MODEL_TYPES[$i]}/${SCALES[$i]}" + if wait $pid; then + echo "[DONE] $label (PID $pid) - SUCCESS" + else + echo "[FAIL] $label (PID $pid) - EXIT CODE $?" + FAILED=$((FAILED + 1)) + fi +done + +if [ $FAILED -gt 0 ]; then + echo "WARNING: $FAILED process(es) failed. Check logs in $STDOUT_LOG_DIR" +fi + +echo "" +echo "ALL DONE (inference only)" +echo "Results: $SCRIPT_DIR/$QUESTION_TYPE/saved_data/nvila_st_*/" +echo "" +echo "To merge with nvila baseline (vanilla/80k/400k/800k/2m), run:" +echo " conda run --no-capture-output -n vila python $SCRIPT \\" +echo " --model_type nvila_st_compare --merge --group-name nvila_st_compare \\" +echo " --question-type $QUESTION_TYPE" +echo "" +echo " (Cross-alignment plots auto-included if all scales have Phase B data.)" +echo " (Run Phase B later by re-running without --skip-phase-b.)" diff --git a/swap_analysis_updated/run_nvila_synthetic_mix.sh b/swap_analysis_updated/run_nvila_synthetic_mix.sh new file mode 100644 index 0000000000000000000000000000000000000000..9a5b36217dacb27fb26c45f8663219ba0c6d8fd2 --- /dev/null +++ b/swap_analysis_updated/run_nvila_synthetic_mix.sh @@ -0,0 +1,78 @@ +#!/bin/bash +set -e + +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +SCRIPT="$SCRIPT_DIR/swap_analysis.py" +PYTHON="conda run --no-capture-output -n vila python" +QUESTION_TYPE="short_answer" # change to mcq for MCQ A/B format + +# Logs managed by swap_analysis.py → {SCRIPT_DIR}/{QUESTION_TYPE}/logs/{vlm_key}.log +STDOUT_LOG_DIR="$SCRIPT_DIR/$QUESTION_TYPE/logs" +mkdir -p "$STDOUT_LOG_DIR" + +# 6 models to run for nvila_synth_compare: +# vanilla / 80k / 400k → model_type=nvila (fine-tuned baselines) +# 80k-5pct / 80k-10pct / 400k-5pct → model_type=nvila_synthetic (synthetic-mix models) +# +# GPU assignment (NVILA ~8GB each): +# GPU 0: nvila/vanilla GPU 1: nvila/80k +# GPU 2: nvila_synthetic/80k-5pct GPU 3: nvila_synthetic/80k-10pct +# GPU 4: nvila/400k GPU 5: nvila_synthetic/400k-5pct + +declare -a MODEL_TYPES=("nvila" "nvila" "nvila_synthetic" "nvila_synthetic" "nvila" "nvila_synthetic" "nvila" "nvila_synthetic") +declare -a SCALES=( "vanilla" "80k" "80k-5pct" "80k-10pct" "400k" "400k-5pct" "800k" "800k-5pct") +declare -a GPUS=( 0 1 2 3 4 5 6 7) + +echo "=========================================" +echo " NVILA-Synthetic Mix: Launching models in parallel" +echo "=========================================" + +PIDS=() +for i in "${!SCALES[@]}"; do + mtype="${MODEL_TYPES[$i]}" + scale="${SCALES[$i]}" + gpu="${GPUS[$i]}" + log="${STDOUT_LOG_DIR}/${mtype}_${scale}_stdout.log" + + echo "[GPU $gpu] ${mtype}/${scale} -> $log" + CUDA_VISIBLE_DEVICES=$gpu $PYTHON "$SCRIPT" \ + --model_type $mtype \ + --scales $scale \ + --device cuda \ + --no-auto-roborefer \ + --question-type $QUESTION_TYPE \ + --skip-phase-b \ + > "$log" 2>&1 & + PIDS+=($!) +done + +echo "" +echo "Waiting for all ${#PIDS[@]} processes..." +FAILED=0 +for i in "${!PIDS[@]}"; do + pid="${PIDS[$i]}" + label="${MODEL_TYPES[$i]}/${SCALES[$i]}" + if wait $pid; then + echo "[DONE] $label (PID $pid) - SUCCESS" + else + echo "[FAIL] $label (PID $pid) - EXIT CODE $?" + FAILED=$((FAILED + 1)) + fi +done + +if [ $FAILED -gt 0 ]; then + echo "WARNING: $FAILED process(es) failed. Check logs in $STDOUT_LOG_DIR" +fi + +echo "=========================================" +echo " NVILA-Synthetic Mix: Merge (vanilla / 80k / 80k-5pct / 80k-10pct / 400k / 400k-5pct / 800k / 800k-5pct)" +echo "=========================================" +$PYTHON "$SCRIPT" --model_type nvila_synth_compare \ + --merge --group-name nvila_synth_compare \ + --question-type $QUESTION_TYPE \ + 2>&1 | tee "${STDOUT_LOG_DIR}/nvila_synth_compare_merge_stdout.log" + +echo "" +echo "ALL DONE" +echo "Results: $SCRIPT_DIR/$QUESTION_TYPE/saved_data/{nvila,nvila_synthetic}_*/" +echo "Compare: $SCRIPT_DIR/$QUESTION_TYPE/compare/nvila_synth_compare/" diff --git a/swap_analysis_updated/run_qwen.sh b/swap_analysis_updated/run_qwen.sh new file mode 100644 index 0000000000000000000000000000000000000000..6997eb8b5199a49a766fab8d4e8318c3076a96ac --- /dev/null +++ b/swap_analysis_updated/run_qwen.sh @@ -0,0 +1,71 @@ +#!/bin/bash +set -e + +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +SCRIPT="$SCRIPT_DIR/swap_analysis.py" +PYTHON="/usr/bin/python3" +MODEL="qwen" +QUESTION_TYPE="short_answer" # change to mcq for MCQ A/B format + +# Logs managed by swap_analysis.py → {SCRIPT_DIR}/{QUESTION_TYPE}/logs/{vlm_key}.log +STDOUT_LOG_DIR="$SCRIPT_DIR/$QUESTION_TYPE/logs" +mkdir -p "$STDOUT_LOG_DIR" + +# GPU plan: Qwen ~10GB each +# GPU 5: vanilla GPU 6: 80k + 400k GPU 7: 800k + 2m +SCALES=("vanilla" "80k" "400k" "800k" "2m") +GPUS=(5 6 6 7 7) + +echo "=========================================" +echo " Qwen Swap Analysis: Launching ${#SCALES[@]} scales in parallel" +echo "=========================================" + +PIDS=() +for i in "${!SCALES[@]}"; do + scale="${SCALES[$i]}" + gpu="${GPUS[$i]}" + log="${STDOUT_LOG_DIR}/${MODEL}_${scale}_stdout.log" + + echo "[GPU $gpu] $MODEL/$scale -> $log" + CUDA_VISIBLE_DEVICES=$gpu $PYTHON "$SCRIPT" \ + --model_type $MODEL \ + --scales $scale \ + --device cuda \ + --no-auto-roborefer \ + --question-type $QUESTION_TYPE \ + --skip-phase-b \ + > "$log" 2>&1 & + PIDS+=($!) +done + +echo "" +echo "Waiting for all ${#PIDS[@]} processes..." +FAILED=0 +for i in "${!PIDS[@]}"; do + pid="${PIDS[$i]}" + scale="${SCALES[$i]}" + if wait $pid; then + echo "[DONE] $MODEL/$scale (PID $pid) - SUCCESS" + else + echo "[FAIL] $MODEL/$scale (PID $pid) - EXIT CODE $?" + FAILED=$((FAILED + 1)) + fi +done + +if [ $FAILED -gt 0 ]; then + echo "WARNING: $FAILED scale(s) failed. Check logs in $STDOUT_LOG_DIR" +fi + +echo "=========================================" +echo " Qwen Swap Analysis: Running merge" +echo "=========================================" +$PYTHON "$SCRIPT" --model_type $MODEL \ + --scales vanilla 80k 400k 800k 2m \ + --merge --group-name qwen \ + --question-type $QUESTION_TYPE \ + 2>&1 | tee "${STDOUT_LOG_DIR}/qwen_merge_stdout.log" + +echo "" +echo "ALL DONE: $MODEL" +echo "Results: $SCRIPT_DIR/$QUESTION_TYPE/saved_data/qwen_*/" +echo "Compare: $SCRIPT_DIR/$QUESTION_TYPE/compare/qwen/" diff --git a/swap_analysis_updated/swap_analysis.py b/swap_analysis_updated/swap_analysis.py new file mode 100644 index 0000000000000000000000000000000000000000..d7c62161dc5539e1ab5d6731cdd3e1058ad933ae --- /dev/null +++ b/swap_analysis_updated/swap_analysis.py @@ -0,0 +1,3783 @@ +#!/usr/bin/env python3 +""" +Swap Analysis: Minimal Pair Probing for Spatial Representations + +Creates minimal pairs by swapping obj1<->obj2 in spatial questions: + Original: "Is A to the left or right of B?" -> left + Swapped: "Is B to the left or right of A?" -> right + +Supported model types +--------------------- + Legacy (Qwen2.5-VL-3B scale experiments): + molmo | nvila | qwen + Synthetic-MCQ-trained NVILA: + nvila_st : NVILA trained with MCQ synthetic mix (80k-st / 400k-st / 800k-st) + New large models: + molmo_big : Molmo2-8B + qwen_big : Qwen3-VL-32B-Instruct + qwen_super : Qwen3-VL-235B-A22B-Instruct + big_trio : Molmo2-8B + RoboRefer + Qwen3-VL-32B + Merge-only (--merge required): + molmo_all : molmo (vanilla→2m) + molmo_big (molmo2) + qwen_all : qwen (vanilla→2m) + qwen_big (qwen3_32b) + nvila_st_compare : nvila (vanilla→2m) + nvila_st (80k-st / 400k-st / 800k-st) + +Usage examples +-------------- + # Legacy model (Qwen2.5-VL-3B scale) + python swap_analysis.py --model_type qwen + + # New large model (Qwen3-VL-32B) + conda run -n qwen3 python swap_analysis.py --model_type qwen_big + + # Cross-family merge (combine qwen + qwen_big results) + conda run -n qwen3 python swap_analysis.py --model_type qwen_all --merge + +Analyses: + 1. Difference vectors: delta = feature(swapped) - feature(original) + 2. Within-category delta consistency (do all left->right swaps point same direction?) + 3. Sign-corrected group consistency (align opposite categories by flipping) + 4. Cross-group delta alignment (delta_vertical vs delta_distance) for perspective bias + 5. Delta-based 6x6 similarity heatmap (mean delta per category as representation) + 6. Prediction stats visualization (bar chart + cross-scale trajectory) + 7. Both-correct filtering for delta analysis + 8. PCA visualization of per-sample embeddings + 9. Scaling effects on all of the above + +Fixes applied: + Fix 1: "Answer with only one word." appended to all prompts + Fix 2: Synonym handling (below/beneath->under, near/nearby->close, distant->far) + Fix 4: Cross-group quads index matching via string normalization + Fix 5: Within-category + sign-corrected delta consistency (replaces wrong group-level) + Fix 6: Prediction stats bar chart + cross-scale line plot + Fix 7: Delta-based 6x6 heatmap and trajectory + Fix 8: Category validity check + both-correct delta filtering +""" + +import os +import sys +import json +import argparse +import base64 +import logging +import random +import re +from io import BytesIO +from collections import defaultdict +from typing import Dict, List, Tuple, Optional, Any +from abc import ABC, abstractmethod + +import torch +import numpy as np +import pandas as pd +from PIL import Image +from tqdm import tqdm +import matplotlib +matplotlib.use('Agg') +import matplotlib.pyplot as plt +from mpl_toolkits.mplot3d import Axes3D # noqa: F401 +import seaborn as sns +from sklearn.metrics.pairwise import cosine_similarity +from sklearn.decomposition import PCA + +logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s') +logger = logging.getLogger(__name__) + +_HERE = os.path.dirname(os.path.abspath(__file__)) + +# ── Local HuggingFace cache helpers ────────────────────────────────────────── + +HF_HUB_DIR = '/data/shared/Qwen/mydisk/huggingface/hub' + + +def resolve_local_path(model_path: str) -> str: + """Return local snapshot path for a HF model ID if cached, else return the ID unchanged.""" + if os.path.isabs(model_path): + return model_path + cache_name = 'models--' + model_path.replace('/', '--') + snapshots_dir = os.path.join(HF_HUB_DIR, cache_name, 'snapshots') + if os.path.isdir(snapshots_dir): + snapshots = sorted(os.listdir(snapshots_dir)) + if snapshots: + local_path = os.path.join(snapshots_dir, snapshots[-1]) + logger.info(f"Local cache found: {model_path} → {local_path}") + return local_path + logger.warning( + f"Model not found in local cache: '{model_path}'\n" + f" Expected at: {snapshots_dir}\n" + f" Will fall back to online HuggingFace Hub download.\n" + f" To cache locally first: python -c \"from huggingface_hub import snapshot_download; " + f"snapshot_download('{model_path}', cache_dir='{HF_HUB_DIR}')\"" + ) + return model_path + + +def _setup_file_logging(name: str, log_dir: str) -> str: + """Attach a named FileHandler to the root logger. + + Writes to {log_dir}/{name}.log (append mode). + Returns the log file path. + """ + os.makedirs(log_dir, exist_ok=True) + log_path = os.path.join(log_dir, f'{name}.log') + fh = logging.FileHandler(log_path, mode='a', encoding='utf-8') + fh.setLevel(logging.INFO) + fh.setFormatter(logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')) + logging.getLogger().addHandler(fh) + return log_path + + +def get_model_key(model_type: str, scale: str) -> str: + """Return VLM key for (model_type, scale). E.g. nvila_80k, nvila_synthetic_80k-5pct.""" + return f"{model_type}_{scale}" + + +# ============================================================================ +# Constants +# ============================================================================ + +CATEGORY_ORDER = ['left', 'right', 'above', 'below', 'far', 'close'] + +OPPOSITE_MAP = { + 'left': 'right', 'right': 'left', + 'above': 'below', 'below': 'above', + 'under': 'above', # short-mode vertical answer + 'far': 'close', 'close': 'far', +} + +# Opposite map for short-answer mode (vertical uses 'above'/'under', not 'above'/'below') +SHORT_OPPOSITE_MAP = { + 'left': 'right', 'right': 'left', + 'above': 'below', 'below': 'above', + 'far': 'close', 'close': 'far', +} + +GROUP_MAP = { + 'left': 'horizontal', 'right': 'horizontal', + 'above': 'vertical', 'below': 'vertical', + 'far': 'distance', 'close': 'distance', +} + +GROUP_ORDER = ['horizontal', 'vertical', 'distance'] + +# Fix 5: Canonical categories for sign-corrected consistency +CANONICAL_CATEGORIES = { + 'horizontal': 'left', + 'vertical': 'above', + 'distance': 'far', +} + +# Fix 2: Synonyms for answer matching +# 'below' is now primary; 'under'/'beneath' recognized as synonyms +SYNONYMS = { + 'below': ['under', 'beneath'], + 'close': ['near', 'nearby'], + 'far': ['distant'], +} + +# ── MCQ question templates (option order alternated per pair for A/B bias control) ── +_Q_TAIL_MCQ = "Answer with a single letter A or B." +MCQ_TEMPLATES = { + 'horizontal': { + 'left_first': "Is the {obj1} to the left or right of the {obj2}? (A) left (B) right " + _Q_TAIL_MCQ, + 'right_first': "Is the {obj1} to the left or right of the {obj2}? (A) right (B) left " + _Q_TAIL_MCQ, + }, + 'vertical': { + 'above_first': "Is the {obj1} above or below the {obj2}? (A) above (B) below " + _Q_TAIL_MCQ, + 'below_first': "Is the {obj1} above or below the {obj2}? (A) below (B) above " + _Q_TAIL_MCQ, + }, + 'distance': { + 'far_first': "Compared to {ref}, is {subj} far or close from you? (A) far (B) close " + _Q_TAIL_MCQ, + 'close_first': "Compared to {ref}, is {subj} far or close from you? (A) close (B) far " + _Q_TAIL_MCQ, + }, +} +MCQ_LETTER = { + 'horizontal': { + 'left_first': {'left': 'a', 'right': 'b'}, + 'right_first': {'left': 'b', 'right': 'a'}, + }, + 'vertical': { + 'above_first': {'above': 'a', 'below': 'b'}, + 'below_first': {'above': 'b', 'below': 'a'}, + }, + 'distance': { + 'far_first': {'far': 'a', 'close': 'b'}, + 'close_first': {'far': 'b', 'close': 'a'}, + }, +} + +SCALE_COLORS = { + 'vanilla': '#1f77b4', '80k': '#ff7f0e', '400k': '#2ca02c', + '800k': '#d62728', '2m': '#9467bd', 'roborefer':'#8c564b', + # New large models + 'molmo2': '#17becf', # cyan + 'qwen3_32b': '#bcbd22', # yellow-green + 'qwen3_235b': '#e377c2', # pink + # Synthetic-mix NVILA at 80k scale (shades of teal, light→dark by mix ratio) + '80k-5pct': '#b2dfdb', # very light teal + '80k-10pct': '#00b894', # teal + '80k-20pct': '#00897b', # darker teal + '80k-30pct': '#004d40', # deep teal + # Synthetic-mix NVILA at 400k scale + '400k-5pct': '#66bb6a', # light green (near 400k's #2ca02c) + # Synthetic-mix NVILA at 800k scale + '800k-5pct': '#ef9a9a', # light red (near 800k's #d62728) + # MCQ-synthetic-trained NVILA (darker shade of the matching base scale) + '80k-st': '#b85a00', # dark orange (darker than 80k #ff7f0e) + '400k-st': '#1a6b1a', # dark green (darker than 400k #2ca02c) + '800k-st': '#911b1b', # dark red (darker than 800k #d62728) +} + +# Canonical scale ordering used by accuracy/ylim plots (add new scales here to control x-axis) +SCALE_ORDER = [ + 'vanilla', '80k', '80k-5pct', '80k-10pct', '80k-20pct', '80k-30pct', '80k-st', + '400k', '400k-5pct', '400k-st', '800k', '800k-5pct', '800k-st', '2m', 'roborefer', + 'molmo2', 'qwen3_32b', 'qwen3_235b', +] + +# Human-readable legend labels (only entries that differ from the key are needed) +SCALE_DISPLAY_NAMES = { + '80k-5pct': '80k 5%', + '80k-10pct': '80k 10%', + '80k-20pct': '80k 20%', + '80k-30pct': '80k 30%', + '400k-5pct': '400k 5%', + '800k-5pct': '800k 5%', + '80k-st': '80k ST', + '400k-st': '400k ST', + '800k-st': '800k ST', +} +# Category colors aligned with group: horizontal=orange, vertical=green, distance=purple +CAT_COLORS = { + 'left': '#ff7f0e', 'right': '#ffbb78', # horizontal → orange + 'above': '#2ca02c', 'below': '#98df8a', # vertical → green + 'far': '#9467bd', 'close': '#c5b0d5', # distance → purple +} +GROUP_COLORS = { + 'horizontal': '#ff7f0e', + 'vertical': '#2ca02c', + 'distance': '#9467bd', +} + +# Short-answer (non-MCQ) question templates +SHORT_TEMPLATES = { + 'horizontal': "Is the {obj1} to the left or right of the {obj2}? Answer with only one word.", + 'vertical': "Is the {obj1} above or below the {obj2}? Answer with only one word.", + 'distance': "Compared to {ref}, is {subj} far or close from you? Answer with only one word.", +} + +MODEL_CONFIGS = { + 'molmo': { + 'vanilla': 'allenai/Molmo-7B-O-0924', + '80k': '/data/shared/Qwen/molmo/outputs/data_scale_exp/data_scale_exp_80k/unshared', + '400k': '/data/shared/Qwen/molmo/outputs/data_scale_exp/data_scale_exp_400k/unshared', + '800k': '/data/shared/Qwen/molmo/outputs/data_scale_exp/data_scale_exp_800k/unshared', + '2m': '/data/shared/Qwen/molmo/outputs/data_scale_exp/data_scale_exp_2m/unshared', + }, + 'nvila': { + 'vanilla': '/data/shared/Qwen/mydisk/NVILA-Lite-2B', + '80k': '/data/shared/Qwen/mydisk/output/DATA/NVILA-Lite-2B-DATA_SCALE_EXP_80K-20251108_180221', + '400k': '/data/shared/Qwen/mydisk/output/DATA/NVILA-Lite-2B-DATA_SCALE_EXP_400K-20251108_180221', + '800k': '/data/shared/Qwen/mydisk/output/DATA/NVILA-Lite-2B-DATA_SCALE_EXP_800K-20251108_180221', + '2m': '/data/shared/Qwen/mydisk/output/DATA/NVILA-Lite-2B-DATA_SCALE_EXP_2M-20260205_003632', + # '80k': '/data/shared/Qwen/mydisk/output/SINGLE/NVILA-Lite-2B-SINGLE_REFSPATIAL_16M-20260217_035008/checkpoint-1250', + # '400k': '/data/shared/Qwen/mydisk/output/SINGLE/NVILA-Lite-2B-SINGLE_REFSPATIAL_16M-20260217_035008/checkpoint-6250', + # '800k': '/data/shared/Qwen/mydisk/output/SINGLE/NVILA-Lite-2B-SINGLE_REFSPATIAL_16M-20260217_035008/checkpoint-12500', + # '2m': '/data/shared/Qwen/mydisk/output/SINGLE/NVILA-Lite-2B-SINGLE_REFSPATIAL_16M-20260217_035008/checkpoint-31250', + 'roborefer': '/data/shared/Qwen/mydisk/RoboRefer_model', + 'roborefer_depth': '/data/shared/Qwen/mydisk/RoboRefer_depth_model', # fill in actual path + }, + 'qwen': { + 'vanilla': 'Qwen/Qwen2.5-VL-3B-Instruct', + '80k': '/data/shared/Qwen/mydisk/output/Qwen/data_scale_exp/Qwen2.5-VL-3B-Instruct-data_scale_exp_80k-20251114_120221', + '400k': '/data/shared/Qwen/mydisk/output/Qwen/data_scale_exp/Qwen2.5-VL-3B-Instruct-data_scale_exp_400k-20251114_120221', + '800k': '/data/shared/Qwen/mydisk/output/Qwen/data_scale_exp/Qwen2.5-VL-3B-Instruct-data_scale_exp_800k-20251114_120221', + '2m': '/data/shared/Qwen/mydisk/output/Qwen/data_scale_exp/Qwen2.5-VL-3B-Instruct-data_scale_exp_2m-20260109_120517', + }, + # NVILA trained with MCQ synthetic data mix (checkpoints at 80k/400k/800k steps) + 'nvila_st': { + '80k-st': '/data/shared/Qwen/mydisk/output/SYNTHETIC/NVILA-Lite-2B-SYNTHETIC_MIX_MCQ_5PCT_2M-20260302_030354/checkpoint-1250', + '400k-st': '/data/shared/Qwen/mydisk/output/SYNTHETIC/NVILA-Lite-2B-SYNTHETIC_MIX_MCQ_5PCT_2M-20260302_030354/checkpoint-6250', + '800k-st': '/data/shared/Qwen/mydisk/output/SYNTHETIC/NVILA-Lite-2B-SYNTHETIC_MIX_MCQ_5PCT_2M-20260302_030354/checkpoint-12500', + }, + # NVILA trained with synthetic data mixed in at different ratios + 'nvila_synthetic': { + '80k-5pct': '/data/shared/Qwen/mydisk/output/SYNTHETIC/NVILA-Lite-2B-SYNTHETIC_MIX_5PCT_2M-20260226_023301/checkpoint-1250', + '80k-10pct': '/data/shared/Qwen/mydisk/output/SYNTHETIC/NVILA-Lite-2B-SYNTHETIC_MIX_10PCT_80K-20260224_234537', + '80k-20pct': '/data/shared/Qwen/mydisk/output/SYNTHETIC/NVILA-Lite-2B-SYNTHETIC_MIX_20PCT_80K-20260224_232347', + '80k-30pct': '/data/shared/Qwen/mydisk/output/SYNTHETIC/NVILA-Lite-2B-SYNTHETIC_MIX_30PCT_80K-20260224_232347', + '400k-5pct': '/data/shared/Qwen/mydisk/output/SYNTHETIC/NVILA-Lite-2B-SYNTHETIC_MIX_5PCT_2M-20260226_023301/checkpoint-6250', + '800k-5pct': '/data/shared/Qwen/mydisk/output/SYNTHETIC/NVILA-Lite-2B-SYNTHETIC_MIX_5PCT_2M-20260226_023301/checkpoint-12500' + }, +} + +# ── New large / cross-family models ────────────────────────────────────────── +# Each scale maps to (ExtractorClassName, HF-model-ID-or-absolute-path). +# resolve_local_path() converts HF IDs to local snapshot dirs when cached. +MODEL_CONFIGS_NEW = { + 'molmo_big': { + 'molmo2': ('Molmo2Extractor', 'allenai/Molmo2-8B'), + }, + 'qwen_big': { + 'qwen3_32b': ('Qwen3VLExtractor', 'Qwen/Qwen3-VL-32B-Instruct'), + }, + 'qwen_super': { + 'qwen3_235b': ('Qwen3VLExtractor', 'Qwen/Qwen3-VL-235B-A22B-Instruct'), + }, + 'big_trio': { + 'molmo2': ('Molmo2Extractor', 'allenai/Molmo2-8B'), + 'roborefer': ('RoboReferExtractor', '/data/shared/Qwen/mydisk/RoboRefer_model'), + 'qwen3_32b': ('Qwen3VLExtractor', 'Qwen/Qwen3-VL-32B-Instruct'), + }, +} + +# ── Merge-only: combine existing per-scale data from multiple source dirs ───── +MERGE_ONLY_CONFIGS = { + 'molmo_all': { + 'scale_order': ['vanilla', '80k', '400k', '800k', '2m', 'molmo2'], + 'scale_sources': { + 'vanilla': 'molmo', '80k': 'molmo', '400k': 'molmo', + '800k': 'molmo', '2m': 'molmo', 'molmo2': 'molmo_big', + }, + 'required_dirs': ['molmo', 'molmo_big'], + }, + 'qwen_all': { + 'scale_order': ['vanilla', '80k', '400k', '800k', '2m', 'qwen3_32b'], + 'scale_sources': { + 'vanilla': 'qwen', '80k': 'qwen', '400k': 'qwen', + '800k': 'qwen', '2m': 'qwen', 'qwen3_32b': 'qwen_big', + }, + 'required_dirs': ['qwen', 'qwen_big'], + }, + # Compare NVILA baselines against MCQ-synthetic-trained checkpoints + 'nvila_st_compare': { + 'scale_order': ['vanilla', '80k', '80k-st', '400k', '400k-st', '800k', '800k-st', '2m'], + 'scale_sources': { + 'vanilla': 'nvila', + '80k': 'nvila', '80k-st': 'nvila_st', + '400k': 'nvila', '400k-st': 'nvila_st', + '800k': 'nvila', '800k-st': 'nvila_st', + '2m': 'nvila', + }, + 'required_dirs': ['nvila', 'nvila_st'], + }, + # Compare NVILA baselines against synthetic-mix checkpoints + 'nvila_synth_compare': { + 'scale_order': ['vanilla', '80k', '80k-5pct', '80k-10pct', '400k', '400k-5pct', '800k', '800k-5pct'], + 'scale_sources': { + 'vanilla': 'nvila', + '80k': 'nvila', + '80k-5pct': 'nvila_synthetic', + '80k-10pct': 'nvila_synthetic', + '400k': 'nvila', + '400k-5pct': 'nvila_synthetic', + '800k': 'nvila', + '800k-5pct': 'nvila_synthetic' + }, + 'required_dirs': ['nvila', 'nvila_synthetic'], + }, +} + +# Default scale run order for new runnable types +SCALE_ORDERS_NEW = { + 'molmo_big': ['molmo2'], + 'qwen_big': ['qwen3_32b'], + 'qwen_super': ['qwen3_235b'], + 'big_trio': ['molmo2', 'roborefer', 'qwen3_32b'], +} + +ALL_MODEL_TYPES = ( + list(MODEL_CONFIGS.keys()) + + list(MODEL_CONFIGS_NEW.keys()) + + list(MERGE_ONLY_CONFIGS.keys()) +) + + +# ============================================================================ +# Data Loading & Swap Pair Creation +# ============================================================================ + +OBJECT_PATTERNS = [ + re.compile(r'between\s+(.+?)\s+and\s+(.+?)\s+in', re.IGNORECASE), + re.compile(r'of\s+(.+?)\s+and\s+(.+?)\s+in', re.IGNORECASE), + re.compile(r'positions\s+of\s+(.+?)\s+and\s+(.+?)\s+interact', re.IGNORECASE), + re.compile(r'How\s+are\s+(.+?)\s+and\s+(.+?)\s+positioned', re.IGNORECASE), + re.compile(r'arrangement\s+of\s+(.+?)\s+and\s+(.+?)\s+in', re.IGNORECASE), +] + + +def extract_objects(question: str) -> Tuple[str, str]: + for pattern in OBJECT_PATTERNS: + m = pattern.search(question) + if m: + return m.group(1).strip(), m.group(2).strip() + raise ValueError(f"Could not extract objects from: {question}") + + +def decode_base64_image(base64_str: str) -> Image.Image: + image_data = base64.b64decode(base64_str) + return Image.open(BytesIO(image_data)).convert('RGB') + + +# ============================================================================ +# Answer Matching (Fix 2: synonym support) +# ============================================================================ + +def find_earliest_position(text: str, word: str) -> int: + """Find earliest position of word or any of its synonyms in text.""" + positions = [] + pos = text.find(word) + if pos != -1: + positions.append(pos) + for syn in SYNONYMS.get(word, []): + pos = text.find(syn) + if pos != -1: + positions.append(pos) + return min(positions) if positions else -1 + + +def check_answer(generated_text: str, expected_category: str, mcq_map: dict = None) -> bool: + if not generated_text or not generated_text.strip(): + return False + text = generated_text.strip().lower() + expected = expected_category.lower() + opposite = OPPOSITE_MAP[expected] + + if mcq_map: + exp_letter = mcq_map.get(expected) + opp_letter = mcq_map.get(opposite) + # Standalone letter response (e.g. "A", "A.", "A)", "B") + if exp_letter and text in (exp_letter, exp_letter+'.', exp_letter+')', exp_letter+','): + return True + if opp_letter and text in (opp_letter, opp_letter+'.', opp_letter+')', opp_letter+','): + return False + else: + exp_letter = opp_letter = None + + # MCQ inline pattern "(a)"/"(b)" — variant-aware + mcq_exp = f'({exp_letter})' if exp_letter else None + mcq_opp = f'({opp_letter})' if opp_letter else None + + def earliest_with_mcq(word, mcq_pat=None): + positions = [] + pos = text.find(word) + if pos != -1: + positions.append(pos) + for syn in SYNONYMS.get(word, []): + pos = text.find(syn) + if pos != -1: + positions.append(pos) + if mcq_pat: + pos = text.find(mcq_pat) + if pos != -1: + positions.append(pos) + return min(positions) if positions else -1 + + pos_exp = earliest_with_mcq(expected, mcq_exp) + pos_opp = earliest_with_mcq(opposite, mcq_opp) + if pos_exp == -1: + return False + if pos_opp == -1: + return True + return pos_exp < pos_opp + + +# ============================================================================ +# Swap Pair Loading (Fix 1: prompt suffix) +# ============================================================================ + +def load_swap_pairs(tsv_path: str, seed: int = 42, filter_unknown: bool = True, + question_type: str = 'mcq') -> List[dict]: + """Load EmbSpatialBench TSV and create swap pairs for all samples. + + Args: + filter_unknown: If True (default), skip far/close pairs where target_object + is Unknown/empty, and remove Unknown/empty values from reference_object + candidates before sampling. Pairs with no valid candidates are dropped. + Use --no-filtering to disable. + question_type: 'short_answer' (default) uses the original "Answer with only one word." format; + 'mcq' uses MCQ A/B templates with letter answers. + """ + rng = random.Random(seed) + df = pd.read_csv(tsv_path, sep='\t') + + pairs = [] + stats = defaultdict(lambda: {'total': 0, 'success': 0}) + + def _valid_obj(v): + return bool(v) and str(v).strip().lower() not in ('unknown', 'n/a', '') + + for _, row in df.iterrows(): + category = row['category'] + stats[category]['total'] += 1 + + try: + if category in ['left', 'right', 'above', 'under', 'below']: + obj1, obj2 = extract_objects(row['question']) + if category in ['left', 'right']: + grp = 'horizontal' + else: + grp = 'vertical' + + if question_type == 'short_answer': + # Single-word format; normalize 'under' → 'below' + if category == 'under': + category = 'below' + tmpl = SHORT_TEMPLATES[grp] + pair = { + 'index': row['index'], + 'question_id': str(row['question_id']), + 'image_base64': row['image'], + 'original_question': tmpl.format(obj1=obj1, obj2=obj2), + 'swapped_question': tmpl.format(obj1=obj2, obj2=obj1), + 'original_answer': category, + 'swapped_answer': SHORT_OPPOSITE_MAP[category], + 'group': grp, + 'category': category, + 'obj1': obj1, 'obj2': obj2, + 'mcq_map': None, + } + else: + # MCQ format; normalize 'under' → 'below' + if category == 'under': + category = 'below' + variant = ('left_first' if grp == 'horizontal' else 'above_first') \ + if len(pairs) % 2 == 0 else \ + ('right_first' if grp == 'horizontal' else 'below_first') + tmpl = MCQ_TEMPLATES[grp][variant] + mcq_map = MCQ_LETTER[grp][variant] + pair = { + 'index': row['index'], + 'question_id': str(row['question_id']), + 'image_base64': row['image'], + 'original_question': tmpl.format(obj1=obj1, obj2=obj2), + 'swapped_question': tmpl.format(obj1=obj2, obj2=obj1), + 'original_answer': category, + 'swapped_answer': OPPOSITE_MAP[category], + 'group': GROUP_MAP[category], + 'category': category, + 'obj1': obj1, 'obj2': obj2, + 'mcq_map': mcq_map, + } + + elif category in ['far', 'close']: + answer_key = row['answer'] + options = {k: row[k] for k in ['A', 'B', 'C', 'D']} + target_object = options[answer_key] + candidates = [v for k, v in options.items() if k != answer_key] + + if filter_unknown: + if not _valid_obj(target_object): + continue + candidates = [v for v in candidates if _valid_obj(v)] + if not candidates: + continue + + reference_object = rng.choice(candidates) + + if question_type == 'short_answer': + tmpl = SHORT_TEMPLATES['distance'] + pair = { + 'index': row['index'], + 'question_id': str(row['question_id']), + 'image_base64': row['image'], + 'original_question': tmpl.format(ref=reference_object, subj=target_object), + 'swapped_question': tmpl.format(ref=target_object, subj=reference_object), + 'original_answer': category, + 'swapped_answer': OPPOSITE_MAP[category], + 'group': 'distance', + 'category': category, + 'target_object': target_object, + 'reference_object': reference_object, + 'mcq_map': None, + } + else: + variant = 'far_first' if len(pairs) % 2 == 0 else 'close_first' + tmpl = MCQ_TEMPLATES['distance'][variant] + mcq_map = MCQ_LETTER['distance'][variant] + pair = { + 'index': row['index'], + 'question_id': str(row['question_id']), + 'image_base64': row['image'], + 'original_question': tmpl.format(ref=reference_object, subj=target_object), + 'swapped_question': tmpl.format(ref=target_object, subj=reference_object), + 'original_answer': category, + 'swapped_answer': OPPOSITE_MAP[category], + 'group': 'distance', + 'category': category, + 'target_object': target_object, + 'reference_object': reference_object, + 'mcq_map': mcq_map, + } + else: + continue + + pairs.append(pair) + stats[category]['success'] += 1 + + except Exception as e: + logger.warning(f"Failed to create swap pair for index {row['index']}: {e}") + continue + + logger.info("Swap pair creation stats:") + for cat in CATEGORY_ORDER: + s = stats[cat] + logger.info(f" {cat}: {s['success']}/{s['total']}") + logger.info(f" Total pairs: {len(pairs)}") + + return pairs + + +# ============================================================================ +# HF Bbox Cache (Fix 4: string-normalized keys) +# ============================================================================ + +def build_hf_bbox_cache(hf_dataset_name: str = 'FlagEval/EmbSpatial-Bench') -> Dict[str, dict]: + """Load HF dataset and build bbox lookup cache keyed by string-normalized question_id.""" + from datasets import load_dataset + logger.info(f"Loading HF dataset: {hf_dataset_name}") + ds = load_dataset(hf_dataset_name, split='test') + + cache = {} + for item in ds: + # Fix 4: Normalize key to string for consistent matching + qid = str(item['question_id']) + cache[qid] = { + 'objects': item['objects'], + 'relation': item['relation'], + 'data_source': item['data_source'], + 'answer': item['answer'], + 'answer_options': item['answer_options'], + } + + # Fix 4: Log sample keys for debugging + sample_keys = list(cache.keys())[:5] + logger.info(f"Built bbox cache: {len(cache)} entries (sample keys: {sample_keys})") + return cache + + +def get_bbox_center_y(bbox: list) -> float: + return bbox[1] + bbox[3] / 2 + + +def create_cross_group_quads( + swap_pairs: List[dict], + hf_cache: Dict[str, dict], + threshold_ratio: float = 0.05, + question_type: str = 'mcq', +) -> List[dict]: + """For far/close swap pairs, create additional vertical queries using bbox.""" + IMAGE_HEIGHTS = {'ai2thor': 300, 'mp3d': 480, 'scannet': 968} + + quads = [] + stats = {'total': 0, 'matched': 0, 'ambiguous': 0, 'no_bbox': 0} + + distance_pairs = [p for p in swap_pairs if p['group'] == 'distance'] + + # Fix 4: Use question_id (e.g. 'mp3d_0') to match HF dataset, not integer index + n_matched_keys = sum(1 for p in distance_pairs if p['question_id'] in hf_cache) + logger.info(f"Matched {n_matched_keys}/{len(distance_pairs)} question_ids between TSV and HF dataset") + + for pair in distance_pairs: + stats['total'] += 1 + qid = pair['question_id'] + + if qid not in hf_cache: + stats['no_bbox'] += 1 + continue + + hf_item = hf_cache[qid] + names = hf_item['objects']['name'] + bboxes = hf_item['objects']['bbox'] + + target = pair['target_object'] + reference = pair['reference_object'] + + target_bbox_y, ref_bbox_y = None, None + for name, bbox in zip(names, bboxes): + if name == target: + target_bbox_y = get_bbox_center_y(bbox) + if name == reference: + ref_bbox_y = get_bbox_center_y(bbox) + + if target_bbox_y is None or ref_bbox_y is None: + stats['no_bbox'] += 1 + continue + + image_height = IMAGE_HEIGHTS.get(hf_item['data_source'], 480) + threshold = image_height * threshold_ratio + y_diff = target_bbox_y - ref_bbox_y + + if abs(y_diff) < threshold: + stats['ambiguous'] += 1 + continue + + if target_bbox_y < ref_bbox_y: + vert_original_answer = 'above' + else: + vert_original_answer = 'below' + + if question_type == 'short_answer': + vert_tmpl = SHORT_TEMPLATES['vertical'] + vert_mcq_map = None + vert_original_q = vert_tmpl.format(obj1=target, obj2=reference) + vert_swapped_q = vert_tmpl.format(obj1=reference, obj2=target) + vert_swapped_answer = SHORT_OPPOSITE_MAP[vert_original_answer] + else: + vert_variant = 'above_first' if len(quads) % 2 == 0 else 'below_first' + vert_tmpl = MCQ_TEMPLATES['vertical'][vert_variant] + vert_mcq_map = MCQ_LETTER['vertical'][vert_variant] + vert_original_q = vert_tmpl.format(obj1=target, obj2=reference) + vert_swapped_q = vert_tmpl.format(obj1=reference, obj2=target) + vert_swapped_answer = OPPOSITE_MAP[vert_original_answer] + + quad = { + 'index': pair['index'], + 'image_base64': pair['image_base64'], + 'dist_original_q': pair['original_question'], + 'dist_swapped_q': pair['swapped_question'], + 'dist_original_answer': pair['original_answer'], + 'dist_swapped_answer': pair['swapped_answer'], + 'dist_mcq_map': pair['mcq_map'], + 'vert_original_q': vert_original_q, + 'vert_swapped_q': vert_swapped_q, + 'vert_original_answer': vert_original_answer, + 'vert_swapped_answer': vert_swapped_answer, + 'vert_mcq_map': vert_mcq_map, + 'target_object': target, + 'reference_object': reference, + 'target_bbox_y': target_bbox_y, + 'ref_bbox_y': ref_bbox_y, + 'y_diff': y_diff, + 'data_source': hf_item['data_source'], + } + quads.append(quad) + stats['matched'] += 1 + + logger.info(f"Cross-group quads: {stats['matched']}/{stats['total']} " + f"(ambiguous={stats['ambiguous']}, no_bbox={stats['no_bbox']})") + return quads + + +# ============================================================================ +# Base Extractor +# ============================================================================ + +class BaseHiddenStateExtractor(ABC): + def __init__(self, model_path: str, device: str = 'cuda', target_layers: List[int] = None): + self.model_path = model_path + self.device = device + self.hidden_states = {} + self.hooks = [] + self._load_model() + num_layers = self._get_num_layers() + if target_layers is None: + self.target_layers = list(range(num_layers)) + logger.info(f"Model has {num_layers} layers. Extracting ALL.") + else: + self.target_layers = target_layers + self._register_hooks() + + def _register_hooks(self): + for layer_idx in self.target_layers: + module = self._get_layer_module(layer_idx) + if module is not None: + hook = module.register_forward_hook(self._make_hook(layer_idx)) + self.hooks.append(hook) + + def _make_hook(self, layer_idx: int): + def hook_fn(module, input, output): + if isinstance(output, tuple): + hidden = output[0] + else: + hidden = output + if hidden.shape[1] > 1: # prefill only + last_token = hidden[:, -1, :].detach().cpu().float() + self.hidden_states[layer_idx] = last_token.squeeze(0) + return hook_fn + + @abstractmethod + def _load_model(self): pass + @abstractmethod + def _get_num_layers(self) -> int: pass + @abstractmethod + def _get_layer_module(self, layer_idx: int): pass + @abstractmethod + def extract_and_predict(self, image: Image.Image, question: str) -> Tuple[Dict[int, torch.Tensor], str]: pass + + def cleanup(self): + for hook in self.hooks: + hook.remove() + self.hooks = [] + if hasattr(self, 'model'): + del self.model + if hasattr(self, 'processor'): + del self.processor + torch.cuda.empty_cache() + + +# ============================================================================ +# Molmo Extractor +# ============================================================================ + +class MolmoExtractor(BaseHiddenStateExtractor): + def _load_model(self): + config_path = os.path.join(self.model_path, "config.yaml") + checkpoint_path = os.path.join(self.model_path, "model.pt") + if os.path.exists(config_path) and os.path.exists(checkpoint_path): + self._load_native_model() + self.is_native = True + else: + self._load_hf_model() + self.is_native = False + + def _load_native_model(self): + from olmo.config import ModelConfig + from olmo.model import Molmo as NativeMolmoModel + from olmo.data.model_preprocessor import MultiModalPreprocessor + from olmo.data.data_formatter import DataFormatter + + _original_load = torch.load + def _unsafe_load_wrapper(*args, **kwargs): + if 'weights_only' not in kwargs: + kwargs['weights_only'] = False + return _original_load(*args, **kwargs) + torch.load = _unsafe_load_wrapper + + cfg = ModelConfig.load( + os.path.join(self.model_path, "config.yaml"), + key="model", validate_paths=False + ) + cfg.init_device = "cpu" + self.model = NativeMolmoModel(cfg) + state_dict = torch.load(os.path.join(self.model_path, "model.pt"), map_location="cpu") + self.model.load_state_dict(state_dict) + self.model = self.model.to(self.device, dtype=torch.bfloat16).eval() + self.tokenizer = cfg.get_tokenizer() + + v_cfg = cfg.vision_backbone + h, w = cfg.llm_patches_per_crop() + image_padding_mask = 2 if cfg.fix_image_padding else (1 if cfg.image_padding_embed else None) + + class SafeDataFormatter(DataFormatter): + def get_system_prompt(self, style, for_inference, messages, rng=None): + if style is None: + style = "User" + return super().get_system_prompt(style, for_inference, messages, rng) + + self.formatter = SafeDataFormatter( + prompt_templates=cfg.prompt_type, message_format=cfg.message_formatting, + system_prompt=cfg.system_prompt_kind, always_start_with_space=cfg.always_start_with_space, + default_inference_len=cfg.default_inference_len + ) + self.preprocessor = MultiModalPreprocessor( + tokenizer=self.tokenizer, normalize=str(v_cfg.image_model_type), + crop_mode=cfg.crop_mode, max_crops=cfg.max_crops, + overlap_margins=cfg.overlap_margins, resize=v_cfg.resize_mode, + use_col_tokens=cfg.use_col_tokens, base_image_input_size=v_cfg.image_default_input_size, + image_pooling_w=cfg.image_pooling_w, image_pooling_h=cfg.image_pooling_h, + image_token_length_w=w, image_token_length_h=h, + image_patch_size=v_cfg.image_patch_size, image_padding_mask=image_padding_mask, + pad_value=cfg.pad_value, loss_token_weighting=cfg.multi_annotation_weighting, + ) + logger.info(f"Loaded native Molmo from {self.model_path}") + + def _load_hf_model(self): + from transformers import AutoModelForCausalLM, AutoProcessor + self.model = AutoModelForCausalLM.from_pretrained( + self.model_path, torch_dtype=torch.bfloat16, + trust_remote_code=True, device_map=self.device + ).eval() + self.processor = AutoProcessor.from_pretrained(self.model_path, trust_remote_code=True) + logger.info(f"Loaded HF Molmo from {self.model_path}") + + def _get_num_layers(self) -> int: + if self.is_native: + return len(self.model.transformer.blocks) + if hasattr(self.model, 'model') and hasattr(self.model.model, 'transformer'): + return len(self.model.model.transformer.blocks) + return 32 + + def _get_layer_module(self, layer_idx: int): + if self.is_native: + return self.model.transformer.blocks[layer_idx] + return self.model.model.transformer.blocks[layer_idx] + + def extract_and_predict(self, image, question): + self.hidden_states = {} + if self.is_native: + example = {"messages": [question], "image": image} + messages, _ = self.formatter(example, is_training=False, for_inference=True, rng=np.random) + batch = self.preprocessor(np.array(image), messages, is_training=False, require_image_features=True) + if 'input_ids' not in batch and 'input_tokens' in batch: + batch['input_ids'] = batch['input_tokens'] + + def to_t(x): + return torch.from_numpy(x) if isinstance(x, np.ndarray) else x + + input_ids = to_t(batch['input_ids']).unsqueeze(0).to(self.device).long() + images_t = to_t(batch['images']).unsqueeze(0).to(self.device, dtype=torch.bfloat16) + image_masks = to_t(batch['image_masks']).unsqueeze(0).to(self.device, dtype=torch.bfloat16) + image_input_idx = to_t(batch['image_input_idx']).unsqueeze(0).to(self.device) + + with torch.inference_mode(), torch.autocast("cuda", dtype=torch.bfloat16): + gen = self.model.generate( + input_ids=input_ids, images=images_t, + image_masks=image_masks, image_input_idx=image_input_idx, + max_steps=20, beam_size=1, + ) + generated_ids = gen.token_ids[0, 0] + answer = self.tokenizer.decode(generated_ids.tolist()).strip() + for eos in ['<|endoftext|>', '', '<|end|>']: + answer = answer.replace(eos, '').strip() + else: + from transformers import GenerationConfig + inputs = self.processor.process(images=[image], text=question) + processed = {} + for k, v in inputs.items(): + v = v.to(self.device).unsqueeze(0) + if v.dtype == torch.float32: + v = v.to(dtype=torch.bfloat16) + processed[k] = v + with torch.no_grad(), torch.autocast("cuda", dtype=torch.bfloat16): + output = self.model.generate_from_batch( + processed, + GenerationConfig(max_new_tokens=20, stop_strings="<|endoftext|>"), + tokenizer=self.processor.tokenizer, + ) + input_len = processed['input_ids'].shape[1] + answer = self.processor.tokenizer.decode(output[0, input_len:], skip_special_tokens=True).strip() + + return self.hidden_states.copy(), answer + + +# ============================================================================ +# NVILA Extractor +# ============================================================================ + +class NVILAExtractor(BaseHiddenStateExtractor): + def _load_model(self): + original_sys_path = sys.path.copy() + sys.path = [p for p in sys.path if 'RoboRefer' not in p] + modules_to_remove = [k for k in list(sys.modules.keys()) if 'llava' in k.lower()] + removed = {m: sys.modules.pop(m) for m in modules_to_remove} + try: + import llava + from llava.media import Image as LLaVAImage + from llava import conversation as clib + except Exception as err: + sys.path = original_sys_path + for m, mod in removed.items(): + sys.modules[m] = mod + raise RuntimeError(f"Failed to import llava: {err}") + sys.path = original_sys_path + self.LLaVAImage = LLaVAImage + self.clib = clib + self.model = llava.load(self.model_path, model_base=None) + self._find_llm_backbone() + logger.info(f"Loaded NVILA from {self.model_path}") + + def _find_llm_backbone(self): + candidates = [] + if hasattr(self.model, 'llm'): + if hasattr(self.model.llm, 'model') and hasattr(self.model.llm.model, 'layers'): + candidates.append(self.model.llm.model.layers) + if hasattr(self.model.llm, 'layers'): + candidates.append(self.model.llm.layers) + if hasattr(self.model, 'model'): + if hasattr(self.model.model, 'model') and hasattr(self.model.model.model, 'layers'): + candidates.append(self.model.model.model.layers) + if hasattr(self.model.model, 'layers'): + candidates.append(self.model.model.layers) + for name, module in self.model.named_modules(): + if name.endswith('.layers') and hasattr(module, '__len__') and len(module) > 0: + candidates.append(module) + if candidates: + self.llm_backbone = candidates[0] + else: + raise ValueError("Could not locate transformer layers in NVILA model") + + def _get_num_layers(self) -> int: + return len(self.llm_backbone) if hasattr(self, 'llm_backbone') else 24 + + def _get_layer_module(self, layer_idx: int): + return self.llm_backbone[layer_idx] + + def extract_and_predict(self, image, question): + self.hidden_states = {} + import tempfile + with tempfile.NamedTemporaryFile(suffix='.png', delete=False) as f: + temp_path = f.name + image.save(temp_path) + try: + prompt = [self.LLaVAImage(temp_path), question] + from transformers import GenerationConfig + response = self.model.generate_content( + prompt, generation_config=GenerationConfig(max_new_tokens=20, do_sample=False) + ) + finally: + os.unlink(temp_path) + answer = str(response[0] if isinstance(response, list) else response).strip() + return self.hidden_states.copy(), answer + + +class RoboReferExtractor(NVILAExtractor): + ROBOREFER_PATH = '/data/shared/Qwen/RoboRefer' + + def _load_model(self): + original_sys_path = sys.path.copy() + if self.ROBOREFER_PATH not in sys.path: + sys.path.insert(0, self.ROBOREFER_PATH) + modules_to_remove = [k for k in list(sys.modules.keys()) if 'llava' in k.lower()] + removed = {m: sys.modules.pop(m) for m in modules_to_remove} + try: + import llava + from llava.media import Image as LLaVAImage + from llava import conversation as clib + except Exception as err: + sys.path = original_sys_path + for m, mod in removed.items(): + sys.modules[m] = mod + raise RuntimeError(f"Failed to import RoboRefer llava: {err}") + sys.path = original_sys_path + self.LLaVAImage = LLaVAImage + self.clib = clib + self.model = llava.load(self.model_path, model_base=None) + self._find_llm_backbone() + logger.info(f"Loaded RoboRefer from {self.model_path}") + + +class RoboReferDepthExtractor(RoboReferExtractor): + """RoboRefer with depth-image input instead of RGB. + + Usage: pass the depth PIL image to extract_and_predict() instead of the RGB image. + In practice this means loading depth images in load_swap_pairs() / extract_swap_features() + rather than changing anything here. If the depth image is stored as a separate column in + the dataset, add a 'depth_image_base64' key to the pair dict and decode it before calling + run_single_query(). + + TODO: confirm depth image path / format with the actual dataset layout. + """ + # Inherits extract_and_predict() from NVILAExtractor (via RoboReferExtractor) unchanged. + # The caller is responsible for passing the correct (depth) PIL Image. + + +# ============================================================================ +# Qwen2.5-VL Extractor +# ============================================================================ + +class Qwen25VLExtractor(BaseHiddenStateExtractor): + BASE_MODEL = "Qwen/Qwen2.5-VL-3B-Instruct" + + def _load_model(self): + from transformers import Qwen2_5_VLForConditionalGeneration, AutoProcessor + try: + self.model = Qwen2_5_VLForConditionalGeneration.from_pretrained( + self.model_path, torch_dtype=torch.bfloat16, device_map=self.device + ) + except ImportError: + self.model = Qwen2_5_VLForConditionalGeneration.from_pretrained( + self.model_path, torch_dtype=torch.bfloat16 + ).to(self.device) + self.model.eval() + if self.model_path.startswith('/'): + self.processor = AutoProcessor.from_pretrained(self.BASE_MODEL) + else: + self.processor = AutoProcessor.from_pretrained(self.model_path) + logger.info(f"Loaded Qwen2.5-VL from {self.model_path}") + + def _get_num_layers(self) -> int: + return len(self.model.model.layers) + + def _get_layer_module(self, layer_idx: int): + return self.model.model.layers[layer_idx] + + def extract_and_predict(self, image, question): + self.hidden_states = {} + messages = [{"role": "user", "content": [ + {"type": "image", "image": image}, + {"type": "text", "text": question} + ]}] + text = self.processor.apply_chat_template(messages, tokenize=False, add_generation_prompt=True) + from qwen_vl_utils import process_vision_info + image_inputs, video_inputs = process_vision_info(messages) + inputs = self.processor( + text=[text], images=image_inputs, videos=video_inputs, + padding=True, return_tensors="pt" + ).to(self.device) + with torch.no_grad(): + output_ids = self.model.generate(**inputs, max_new_tokens=20, do_sample=False) + input_len = inputs['input_ids'].shape[1] + answer = self.processor.tokenizer.decode(output_ids[0, input_len:], skip_special_tokens=True).strip() + return self.hidden_states.copy(), answer + + +# ============================================================================ +# New Extractors: Molmo2-8B and Qwen3-VL family +# ============================================================================ + +class Molmo2Extractor(BaseHiddenStateExtractor): + """Extractor for allenai/Molmo2-8B (AutoModelForImageTextToText, messages-dict input).""" + + def _load_model(self): + from transformers import AutoProcessor, AutoModelForImageTextToText + self.processor = AutoProcessor.from_pretrained(self.model_path, trust_remote_code=True) + self.model = AutoModelForImageTextToText.from_pretrained( + self.model_path, trust_remote_code=True, torch_dtype='auto', device_map='auto', + ).eval() + self._find_llm_layers() + logger.info(f"Loaded Molmo2 from {self.model_path}") + + def _find_llm_layers(self): + candidates = [ + ['model', 'layers'], + ['language_model', 'model', 'layers'], + ['model', 'model', 'layers'], + ] + for path in candidates: + obj = self.model + for attr in path: + obj = getattr(obj, attr, None) + if obj is None: + break + if obj is not None and hasattr(obj, '__len__') and len(obj) > 0: + self.llm_layers = obj + logger.info(f"Molmo2: layers at '{'.'.join(path)}', count={len(obj)}") + return + best, best_len = None, 0 + for name, module in self.model.named_modules(): + if name.endswith('.layers') and hasattr(module, '__len__') and len(module) > best_len: + best, best_len = module, len(module) + logger.info(f"Molmo2: layers via scan at '{name}', count={best_len}") + if best is not None: + self.llm_layers = best + return + raise ValueError("Could not find transformer layers in Molmo2 model") + + def _get_num_layers(self) -> int: + return len(self.llm_layers) + + def _get_layer_module(self, layer_idx: int): + return self.llm_layers[layer_idx] + + def extract_and_predict(self, image, question): + self.hidden_states = {} + messages = [{"role": "user", "content": [ + {"type": "image", "image": image}, + {"type": "text", "text": question}, + ]}] + inputs = self.processor.apply_chat_template( + messages, tokenize=True, add_generation_prompt=True, + return_tensors="pt", return_dict=True, + ) + inputs = {k: v.to(self.model.device) for k, v in inputs.items()} + with torch.inference_mode(): + generated_ids = self.model.generate(**inputs, max_new_tokens=20, do_sample=False) + input_len = inputs['input_ids'].shape[1] + answer = self.processor.tokenizer.decode( + generated_ids[0, input_len:], skip_special_tokens=True).strip() + return self.hidden_states.copy(), answer + + +class Qwen3VLExtractor(BaseHiddenStateExtractor): + """Extractor for Qwen3-VL family (32B dense, 235B MoE). + + Key differences from Qwen25VLExtractor: + - AutoModelForImageTextToText + trust_remote_code=True + - process_vision_info requires image_patch_size=16 + - processor call requires do_resize=False + - 32×32 px patches → different min/max_pixels + """ + + MIN_PIXELS = 256 * 32 * 32 # 262,144 (mp3d/scannet → natural res; ai2thor → ~256 tokens) + MAX_PIXELS = 16384 * 32 * 32 # 16,777,216 + + def _load_model(self): + from transformers import AutoProcessor, AutoModelForImageTextToText + self.processor = AutoProcessor.from_pretrained(self.model_path, trust_remote_code=True) + self.model = AutoModelForImageTextToText.from_pretrained( + self.model_path, trust_remote_code=True, torch_dtype='auto', + device_map='auto', attn_implementation='flash_attention_2', + ).eval() + self._find_llm_layers() + logger.info(f"Loaded Qwen3-VL from {self.model_path}") + + def _find_llm_layers(self): + candidates = [ + ['model', 'language_model', 'model', 'layers'], # Qwen3-VL expected + ['language_model', 'model', 'layers'], + ['model', 'model', 'layers'], + ['model', 'layers'], + ] + for path in candidates: + obj = self.model + for attr in path: + obj = getattr(obj, attr, None) + if obj is None: + break + if obj is not None and hasattr(obj, '__len__') and len(obj) > 0: + self.llm_layers = obj + logger.info(f"Qwen3-VL: layers at '{'.'.join(path)}', count={len(obj)}") + return + best, best_len = None, 0 + for name, module in self.model.named_modules(): + if name.endswith('.layers') and hasattr(module, '__len__') and len(module) > best_len: + best, best_len = module, len(module) + logger.info(f"Qwen3-VL: layers via scan at '{name}', count={best_len}") + if best is not None: + self.llm_layers = best + return + raise ValueError("Could not find transformer layers in Qwen3-VL model") + + def _get_num_layers(self) -> int: + return len(self.llm_layers) + + def _get_layer_module(self, layer_idx: int): + return self.llm_layers[layer_idx] + + def extract_and_predict(self, image, question): + self.hidden_states = {} + messages = [{"role": "user", "content": [ + {"type": "image", "image": image, + "min_pixels": self.MIN_PIXELS, "max_pixels": self.MAX_PIXELS}, + {"type": "text", "text": question}, + ]}] + text = self.processor.apply_chat_template(messages, tokenize=False, add_generation_prompt=True) + from qwen_vl_utils import process_vision_info + images, videos, _ = process_vision_info( + messages, image_patch_size=16, return_video_kwargs=True, return_video_metadata=True, + ) + inputs = self.processor( + text=text, images=images, videos=videos, do_resize=False, return_tensors="pt", + ).to(self.model.device) + with torch.no_grad(): + output_ids = self.model.generate(**inputs, max_new_tokens=20, do_sample=False) + input_len = inputs['input_ids'].shape[1] + answer = self.processor.tokenizer.decode( + output_ids[0, input_len:], skip_special_tokens=True).strip() + return self.hidden_states.copy(), answer + + +EXTRACTOR_CLASSES = { + 'MolmoExtractor': MolmoExtractor, + 'NVILAExtractor': NVILAExtractor, + 'RoboReferExtractor': RoboReferExtractor, + 'Qwen25VLExtractor': Qwen25VLExtractor, + 'Molmo2Extractor': Molmo2Extractor, + 'Qwen3VLExtractor': Qwen3VLExtractor, +} + + +def get_extractor(model_type: str, model_path: str = None, scale: str = None, **kwargs): + """Create an extractor for any model_type (legacy or new-large).""" + # New large models: (ExtractorClass, path) tuples in MODEL_CONFIGS_NEW + if model_type in MODEL_CONFIGS_NEW: + cls_name, raw_path = MODEL_CONFIGS_NEW[model_type][scale] + resolved = resolve_local_path(raw_path) + logger.info(f"Creating {cls_name} for scale='{scale}' from {resolved}") + return EXTRACTOR_CLASSES[cls_name](resolved, **kwargs) + # Legacy models + if model_type == 'nvila' and scale == 'roborefer': + return RoboReferExtractor(model_path, **kwargs) + if model_type == 'nvila' and scale == 'roborefer_depth': + return RoboReferDepthExtractor(model_path, **kwargs) + legacy = { + 'molmo': MolmoExtractor, 'nvila': NVILAExtractor, 'qwen': Qwen25VLExtractor, + 'nvila_synthetic': NVILAExtractor, 'nvila_st': NVILAExtractor, + } + return legacy[model_type](model_path, **kwargs) + + +# ============================================================================ +# Feature Extraction Pipeline +# ============================================================================ + +def run_single_query(extractor, image, question): + hidden_states, predicted = extractor.extract_and_predict(image, question) + result = {} + for layer_idx in extractor.target_layers: + if layer_idx in hidden_states: + state = hidden_states[layer_idx].numpy().flatten() + if state.size > 0: + result[layer_idx] = state + return result, predicted + + +def extract_swap_features( + extractor: BaseHiddenStateExtractor, + swap_pairs: List[dict], + max_samples_per_category: int = 0, +) -> List[dict]: + """Extract features for all swap pairs.""" + rng = random.Random(42) + + if max_samples_per_category > 0: + grouped = defaultdict(list) + for p in swap_pairs: + grouped[p['category']].append(p) + limited = [] + for cat in CATEGORY_ORDER: + samples = grouped[cat] + if len(samples) > max_samples_per_category: + samples = rng.sample(samples, max_samples_per_category) + limited.extend(samples) + swap_pairs = limited + + records = [] + for pair in tqdm(swap_pairs, desc="Swap pairs"): + try: + image = decode_base64_image(pair['image_base64']) + hs_orig, pred_orig = run_single_query(extractor, image, pair['original_question']) + hs_swap, pred_swap = run_single_query(extractor, image, pair['swapped_question']) + + is_correct_orig = check_answer(pred_orig, pair['original_answer'], pair['mcq_map']) + is_correct_swap = check_answer(pred_swap, pair['swapped_answer'], pair['mcq_map']) + + delta = {} + for layer_idx in extractor.target_layers: + if layer_idx in hs_orig and layer_idx in hs_swap: + delta[layer_idx] = hs_swap[layer_idx] - hs_orig[layer_idx] + + record = { + 'index': pair['index'], + 'group': pair['group'], + 'category': pair['category'], + 'original_answer': pair['original_answer'], + 'swapped_answer': pair['swapped_answer'], + 'pred_orig': pred_orig, + 'pred_swap': pred_swap, + 'is_correct_orig': is_correct_orig, + 'is_correct_swap': is_correct_swap, + 'hs_orig': hs_orig, + 'hs_swap': hs_swap, + 'delta': delta, + } + records.append(record) + + mark_o = "O" if is_correct_orig else "X" + mark_s = "O" if is_correct_swap else "X" + logger.info(f" #{pair['index']:<6} {pair['category']:<6} " + f"orig[{mark_o}]=\"{pred_orig[:40]}\" swap[{mark_s}]=\"{pred_swap[:40]}\"" + + (f" [{len(records)}/{len(swap_pairs)}]" if len(records) % 50 == 0 else "")) + + except Exception as e: + logger.warning(f"Error on index {pair['index']}: {e}") + continue + + logger.info(f"Extracted {len(records)} swap pair records") + + # Fix 8: Per-category accuracy logging + for cat in CATEGORY_ORDER: + cat_recs = [r for r in records if r['category'] == cat] + n = len(cat_recs) + if n == 0: + continue + c_orig = sum(1 for r in cat_recs if r['is_correct_orig']) + c_swap = sum(1 for r in cat_recs if r['is_correct_swap']) + c_both = sum(1 for r in cat_recs if r['is_correct_orig'] and r['is_correct_swap']) + logger.info(f" {cat:>6s} (n={n}): acc_orig={c_orig/n:.1%}, acc_swap={c_swap/n:.1%}, " + f"acc_both={c_both/n:.1%}") + + return records + + +def extract_cross_group_features( + extractor: BaseHiddenStateExtractor, + quads: List[dict], +) -> List[dict]: + """Extract features for cross-group quads (4 forward passes each).""" + records = [] + for quad in tqdm(quads, desc="Cross-group quads"): + try: + image = decode_base64_image(quad['image_base64']) + hs_d_orig, pred_d_orig = run_single_query(extractor, image, quad['dist_original_q']) + hs_d_swap, pred_d_swap = run_single_query(extractor, image, quad['dist_swapped_q']) + hs_v_orig, pred_v_orig = run_single_query(extractor, image, quad['vert_original_q']) + hs_v_swap, pred_v_swap = run_single_query(extractor, image, quad['vert_swapped_q']) + + delta_dist, delta_vert = {}, {} + for layer_idx in extractor.target_layers: + if layer_idx in hs_d_orig and layer_idx in hs_d_swap: + delta_dist[layer_idx] = hs_d_swap[layer_idx] - hs_d_orig[layer_idx] + if layer_idx in hs_v_orig and layer_idx in hs_v_swap: + delta_vert[layer_idx] = hs_v_swap[layer_idx] - hs_v_orig[layer_idx] + + record = { + 'index': quad['index'], + 'delta_dist': delta_dist, + 'delta_vert': delta_vert, + 'pred_d_orig': pred_d_orig, 'pred_d_swap': pred_d_swap, + 'pred_v_orig': pred_v_orig, 'pred_v_swap': pred_v_swap, + 'is_correct_d_orig': check_answer(pred_d_orig, quad['dist_original_answer'], quad['dist_mcq_map']), + 'is_correct_d_swap': check_answer(pred_d_swap, quad['dist_swapped_answer'], quad['dist_mcq_map']), + 'is_correct_v_orig': check_answer(pred_v_orig, quad['vert_original_answer'], quad['vert_mcq_map']), + 'is_correct_v_swap': check_answer(pred_v_swap, quad['vert_swapped_answer'], quad['vert_mcq_map']), + 'data_source': quad['data_source'], + } + records.append(record) + + tqdm.write(f" #{quad['index']:<6} dist=[{pred_d_orig[:20]}/{pred_d_swap[:20]}] " + f"vert=[{pred_v_orig[:20]}/{pred_v_swap[:20]}]") + + except Exception as e: + logger.warning(f"Error on cross-group index {quad['index']}: {e}") + continue + + logger.info(f"Extracted {len(records)} cross-group quad records") + return records + + +# ============================================================================ +# Analysis Functions +# ============================================================================ + +# Fix 5: Within-category + sign-corrected delta consistency + +def compute_delta_consistency(records: List[dict], target_layers: List[int]): + """Compute TWO types of delta consistency. + + Returns: + within_cat_results: {(category, layer) -> {mean, std, n}} + sign_corrected_results: {(group, layer) -> {mean, std, n}} + """ + within_cat_results = {} + sign_corrected_results = {} + + for group in GROUP_ORDER: + canonical = CANONICAL_CATEGORIES[group] + opposite = OPPOSITE_MAP[canonical] + group_recs = [r for r in records if r['group'] == group] + + for layer in target_layers: + # (a) Within-category consistency + for cat in [canonical, opposite]: + cat_deltas = [r['delta'][layer] for r in group_recs + if r['category'] == cat and layer in r['delta']] + if len(cat_deltas) >= 2: + arr = np.array(cat_deltas) + sim = cosine_similarity(arr) + upper = sim[np.triu_indices(len(cat_deltas), k=1)] + within_cat_results[(cat, layer)] = { + 'mean': float(np.mean(upper)), + 'std': float(np.std(upper)), + 'n': len(cat_deltas), + } + + # (b) Sign-corrected group consistency + all_deltas = [] + for r in group_recs: + if layer not in r['delta']: + continue + d = r['delta'][layer] + if r['category'] == opposite: + d = -d # flip to align with canonical direction + all_deltas.append(d) + + if len(all_deltas) >= 2: + arr = np.array(all_deltas) + sim = cosine_similarity(arr) + upper = sim[np.triu_indices(len(all_deltas), k=1)] + sign_corrected_results[(group, layer)] = { + 'mean': float(np.mean(upper)), + 'std': float(np.std(upper)), + 'n': len(all_deltas), + } + + return within_cat_results, sign_corrected_results + + +# Fix 7: Delta-based similarity matrix + +def compute_delta_similarity_matrix(records: List[dict], layer: int) -> Optional[pd.DataFrame]: + """Compute 6x6 cosine similarity using mean delta per category.""" + cat_deltas = {} + for cat in CATEGORY_ORDER: + deltas = [r['delta'][layer] for r in records if r['category'] == cat and layer in r['delta']] + if deltas: + cat_deltas[cat] = np.mean(deltas, axis=0) + + available = [c for c in CATEGORY_ORDER if c in cat_deltas] + if len(available) < 2: + return None + + vectors = np.array([cat_deltas[c] for c in available]) + sim = cosine_similarity(vectors) + return pd.DataFrame(sim, index=available, columns=available) + + +# ============================================================ +# CHANGE 1: Add this function right after compute_delta_similarity_matrix() +# (around line ~820, after the "Fix 7: Delta-based similarity matrix" block) +# ============================================================ + +def compute_delta_norm_per_category(records: List[dict], layer: int) -> Optional[pd.DataFrame]: + """Compute mean delta vector norm per category at a given layer. + + Returns a single-column DataFrame with index = category label, column = 'norm'. + Format matches what summarize_metrics_updated.py expects: + delta_norm_{scale}_L{layer}_all_pairs.csv + ,norm + left,12.34 + right,11.89 + above,9.45 + below,9.12 + far,7.23 + close,7.58 + + Returns None if no records have delta vectors at this layer. + """ + rows = {} + for cat in CATEGORY_ORDER: + deltas = [r['delta'][layer] for r in records + if r['category'] == cat and layer in r['delta']] + if deltas: + norms = [float(np.linalg.norm(d)) for d in deltas] + rows[cat] = float(np.mean(norms)) + + if not rows: + return None + + df = pd.DataFrame.from_dict(rows, orient='index', columns=['norm']) + # Reorder to canonical category order (skip any that are missing) + ordered = [c for c in CATEGORY_ORDER if c in df.index] + return df.loc[ordered] + + +# Fix 8: Both-correct filtering + +def filter_both_correct(records: List[dict]) -> List[dict]: + """Filter to pairs where both orig and swap predictions are correct.""" + return [r for r in records if r['is_correct_orig'] and r['is_correct_swap']] + + +# Fix 8: Category validity check + +def check_category_validity(records: List[dict], scale: str) -> Dict[str, dict]: + """Check per-category accuracy and flag unreliable categories.""" + validity = {} + for cat in CATEGORY_ORDER: + cat_recs = [r for r in records if r['category'] == cat] + n = len(cat_recs) + if n == 0: + validity[cat] = {'n': 0, 'acc_orig': 0, 'acc_swap': 0, 'reliable': False} + continue + acc_orig = sum(1 for r in cat_recs if r['is_correct_orig']) / n + acc_swap = sum(1 for r in cat_recs if r['is_correct_swap']) / n + reliable = acc_orig >= 0.5 and acc_swap >= 0.5 + validity[cat] = { + 'n': n, 'acc_orig': acc_orig, 'acc_swap': acc_swap, + 'reliable': reliable, + } + if not reliable: + logger.warning(f" [!] Category '{cat}' unreliable at scale={scale}: " + f"acc_orig={acc_orig:.1%}, acc_swap={acc_swap:.1%}") + return validity + + +def compute_cross_group_alignment(quad_records: List[dict], target_layers: List[int]) -> dict: + results = {} + for layer in target_layers: + per_sample = [] + delta_verts, delta_dists = [], [] + + for rec in quad_records: + if layer in rec['delta_vert'] and layer in rec['delta_dist']: + dv = rec['delta_vert'][layer] + dd = rec['delta_dist'][layer] + norm_v, norm_d = np.linalg.norm(dv), np.linalg.norm(dd) + if norm_v > 1e-10 and norm_d > 1e-10: + per_sample.append(float(np.dot(dv, dd) / (norm_v * norm_d))) + delta_verts.append(dv) + delta_dists.append(dd) + + if not per_sample: + continue + + mean_dv = np.mean(delta_verts, axis=0) + mean_dd = np.mean(delta_dists, axis=0) + norm_mv, norm_md = np.linalg.norm(mean_dv), np.linalg.norm(mean_dd) + mean_alignment = float(np.dot(mean_dv, mean_dd) / (norm_mv * norm_md + 1e-10)) + + rng = np.random.RandomState(42) + perm_alignments = [] + for _ in range(100): + shuffled_dd = [delta_dists[i] for i in rng.permutation(len(delta_dists))] + perm_cos = [] + for dv, dd in zip(delta_verts, shuffled_dd): + nv, nd = np.linalg.norm(dv), np.linalg.norm(dd) + if nv > 1e-10 and nd > 1e-10: + perm_cos.append(np.dot(dv, dd) / (nv * nd)) + perm_alignments.append(np.mean(perm_cos)) + + results[layer] = { + 'per_sample_mean': float(np.mean(per_sample)), + 'per_sample_std': float(np.std(per_sample)), + 'mean_delta_alignment': mean_alignment, + 'permutation_mean': float(np.mean(perm_alignments)), + 'permutation_std': float(np.std(perm_alignments)), + 'n_samples': len(per_sample), + } + return results + + +def compute_prediction_stats(records: List[dict], scale: str) -> dict: + stats = {'scale': scale} + total_correct_orig, total_correct_swap, total_both, total_n = 0, 0, 0, 0 + + for group in GROUP_ORDER: + group_recs = [r for r in records if r['group'] == group] + n = len(group_recs) + c_orig = sum(1 for r in group_recs if r['is_correct_orig']) + c_swap = sum(1 for r in group_recs if r['is_correct_swap']) + c_both = sum(1 for r in group_recs if r['is_correct_orig'] and r['is_correct_swap']) + stats[f'{group}_n'] = n + stats[f'{group}_acc_orig'] = c_orig / n if n > 0 else 0 + stats[f'{group}_acc_swap'] = c_swap / n if n > 0 else 0 + stats[f'{group}_acc_both'] = c_both / n if n > 0 else 0 + total_correct_orig += c_orig + total_correct_swap += c_swap + total_both += c_both + total_n += n + + stats['overall_acc_orig'] = total_correct_orig / total_n if total_n > 0 else 0 + stats['overall_acc_swap'] = total_correct_swap / total_n if total_n > 0 else 0 + stats['overall_acc_both'] = total_both / total_n if total_n > 0 else 0 + stats['overall_n'] = total_n + return stats + + +# ============================================================================ +# Saving & Loading +# ============================================================================ + +def get_representative_layers(all_layers, n=5): + if len(all_layers) <= n: + return list(all_layers) + indices = np.linspace(0, len(all_layers) - 1, n, dtype=int) + return [all_layers[i] for i in indices] + + +def save_scale_results( + scale, swap_records, quad_records, + within_cat_consistency, sign_corrected_consistency, + cross_alignment, pred_stats, target_layers, + category_validity, delta_heatmaps, + output_dir, both_correct_tag="all_pairs", + save_alignment=True, + delta_norms=None, # <-- NEW: {layer: pd.DataFrame | None} +): + """Save all per-scale results to disk. + + Args: + save_alignment: If False, skip writing cross_alignment_{scale}.json. + Set False during Phase A save; call save_cross_alignment() + separately after Phase B completes. + """ + csv_dir = os.path.join(output_dir, 'csv') + json_dir = os.path.join(output_dir, 'json') + os.makedirs(csv_dir, exist_ok=True) + os.makedirs(json_dir, exist_ok=True) + + # 1. Predictions CSV (tagged so all_pairs and both_correct don't overwrite each other) + pred_rows = [] + for r in swap_records: + pred_rows.append({ + 'index': r['index'], 'group': r['group'], 'category': r['category'], + 'pred_orig': r['pred_orig'], 'pred_swap': r['pred_swap'], + 'is_correct_orig': r['is_correct_orig'], 'is_correct_swap': r['is_correct_swap'], + }) + pd.DataFrame(pred_rows).to_csv( + os.path.join(csv_dir, f'predictions_{scale}_{both_correct_tag}.csv'), index=False) + + # 2. Within-category consistency JSON + wc_data = {} + for (cat, layer), vals in within_cat_consistency.items(): + wc_data[f'{cat}_L{layer}'] = vals + with open(os.path.join(json_dir, f'within_cat_consistency_{scale}_{both_correct_tag}.json'), 'w') as f: + json.dump(wc_data, f, indent=2) + + # 3. Sign-corrected consistency JSON + sc_data = {} + for (group, layer), vals in sign_corrected_consistency.items(): + sc_data[f'{group}_L{layer}'] = vals + with open(os.path.join(json_dir, f'sign_corrected_consistency_{scale}_{both_correct_tag}.json'), 'w') as f: + json.dump(sc_data, f, indent=2) + + # 4. Cross-group alignment JSON (only when save_alignment=True, i.e. after Phase B) + if save_alignment: + alignment_data = {} + for layer, vals in cross_alignment.items(): + alignment_data[f'L{layer}'] = vals + with open(os.path.join(json_dir, f'cross_alignment_{scale}.json'), 'w') as f: + json.dump(alignment_data, f, indent=2) + + # 5. Prediction stats JSON + with open(os.path.join(json_dir, f'pred_stats_{scale}.json'), 'w') as f: + json.dump(pred_stats, f, indent=2) + + # 6. Category validity JSON (Fix 8) + with open(os.path.join(json_dir, f'category_validity_{scale}.json'), 'w') as f: + json.dump(category_validity, f, indent=2) + + # 7. Delta heatmap CSVs (Fix 7) + for layer, df in delta_heatmaps.items(): + if df is not None: + df.to_csv(os.path.join(csv_dir, f'delta_similarity_{scale}_L{layer}_{both_correct_tag}.csv')) + + # 8. Delta norm CSVs (mean L2 norm of delta vectors per category per layer) + if delta_norms: + for layer, df in delta_norms.items(): + if df is not None: + df.to_csv(os.path.join(csv_dir, f'delta_norm_{scale}_L{layer}_{both_correct_tag}.csv')) + + + logger.info(f"Saved results for scale={scale} ({both_correct_tag}) to {output_dir}") + + +def save_vectors_npz(scale, swap_records, target_layers, output_dir): + """Save swap-pair vectors with correctness metadata to NPZ (Phase A result). + + This enables post-hoc filtering (both_correct, all_with_validity) from saved data. + Cross-group vectors are saved separately by save_cross_group_npz() after Phase B. + """ + rep_layers = list(target_layers) # save ALL layers (not just 5 representative) + delta_data = {} + for layer in rep_layers: + groups_list, categories_list, vectors = [], [], [] + orig_vecs, swap_vecs, labels = [], [], [] + correct_orig_list, correct_swap_list, indices_list = [], [], [] + for r in swap_records: + if layer in r['delta']: + groups_list.append(r['group']) + categories_list.append(r['category']) + vectors.append(r['delta'][layer]) + correct_orig_list.append(r['is_correct_orig']) + correct_swap_list.append(r['is_correct_swap']) + indices_list.append(r['index']) + if layer in r['hs_orig'] and layer in r['hs_swap']: + orig_vecs.append(r['hs_orig'][layer]) + swap_vecs.append(r['hs_swap'][layer]) + labels.append(r['category']) + if vectors: + delta_data[f'delta_L{layer}'] = np.array(vectors) + delta_data[f'groups_L{layer}'] = np.array(groups_list) + delta_data[f'categories_L{layer}'] = np.array(categories_list) + delta_data[f'is_correct_orig_L{layer}'] = np.array(correct_orig_list) + delta_data[f'is_correct_swap_L{layer}'] = np.array(correct_swap_list) + delta_data[f'indices_L{layer}'] = np.array(indices_list) + if orig_vecs: + delta_data[f'orig_L{layer}'] = np.array(orig_vecs) + delta_data[f'swap_L{layer}'] = np.array(swap_vecs) + delta_data[f'labels_L{layer}'] = np.array(labels) + + npz_dir = os.path.join(output_dir, 'npz') + os.makedirs(npz_dir, exist_ok=True) + np.savez_compressed(os.path.join(npz_dir, f'vectors_{scale}.npz'), **delta_data) + logger.info(f"Saved vectors NPZ with correctness metadata for scale={scale}") + + +def save_cross_group_npz(scale, quad_records, target_layers, output_dir): + """Save cross-group delta vectors to NPZ (Phase B result).""" + if not quad_records: + return + rep_layers = list(target_layers) + cg_data = {} + for layer in rep_layers: + dverts, ddists = [], [] + for rec in quad_records: + if layer in rec['delta_vert'] and layer in rec['delta_dist']: + dverts.append(rec['delta_vert'][layer]) + ddists.append(rec['delta_dist'][layer]) + if dverts: + cg_data[f'delta_vert_L{layer}'] = np.array(dverts) + cg_data[f'delta_dist_L{layer}'] = np.array(ddists) + npz_dir = os.path.join(output_dir, 'npz') + os.makedirs(npz_dir, exist_ok=True) + np.savez_compressed(os.path.join(npz_dir, f'cross_group_vectors_{scale}.npz'), **cg_data) + logger.info(f"Saved cross-group vectors NPZ for scale={scale}") + + +def save_cross_alignment(scale, cross_alignment, output_dir): + """Save cross-group alignment data to JSON (Phase B result).""" + json_dir = os.path.join(output_dir, 'json') + os.makedirs(json_dir, exist_ok=True) + alignment_data = {f'L{layer}': vals for layer, vals in cross_alignment.items()} + with open(os.path.join(json_dir, f'cross_alignment_{scale}.json'), 'w') as f: + json.dump(alignment_data, f, indent=2) + logger.info(f"Saved cross-alignment JSON for scale={scale}") + + +def load_scale_consistency(output_dir, scale, tag='all_pairs'): + """Load sign-corrected consistency.""" + path = os.path.join(output_dir, 'json', f'sign_corrected_consistency_{scale}_{tag}.json') + if not os.path.exists(path): + return {} + with open(path) as f: + raw = json.load(f) + result = {} + for key, vals in raw.items(): + parts = key.rsplit('_L', 1) + if len(parts) == 2: + result[(parts[0], int(parts[1]))] = vals + return result + + +def load_within_cat_consistency(output_dir, scale, tag='all_pairs'): + path = os.path.join(output_dir, 'json', f'within_cat_consistency_{scale}_{tag}.json') + if not os.path.exists(path): + return {} + with open(path) as f: + raw = json.load(f) + result = {} + for key, vals in raw.items(): + parts = key.rsplit('_L', 1) + if len(parts) == 2: + result[(parts[0], int(parts[1]))] = vals + return result + + +def load_scale_alignment(output_dir, scale): + path = os.path.join(output_dir, 'json', f'cross_alignment_{scale}.json') + if not os.path.exists(path): + return {} + with open(path) as f: + raw = json.load(f) + result = {} + for key, vals in raw.items(): + result[int(key.replace('L', ''))] = vals + return result + + +def load_delta_heatmaps(output_dir, scale, tag='all_pairs'): + import glob as glob_mod + pattern = os.path.join(output_dir, 'csv', f'delta_similarity_{scale}_L*_{tag}.csv') + files = glob_mod.glob(pattern) + result = {} + for fpath in files: + basename = os.path.basename(fpath) + # delta_similarity_{scale}_L{layer}_{tag}.csv + part = basename.replace(f'delta_similarity_{scale}_L', '').replace(f'_{tag}.csv', '') + try: + layer = int(part) + except ValueError: + continue + result[layer] = pd.read_csv(fpath, index_col=0) + return result + + +# ============================================================================ +# Visualization +# ============================================================================ + +def plot_within_cat_consistency_trajectory(within_cat, scale, model_type, save_path): + """Plot within-category delta consistency across layers.""" + fig, ax = plt.subplots(figsize=(12, 6)) + cat_colors = CAT_COLORS + for cat in CATEGORY_ORDER: + layers, vals = [], [] + for (c, l), v in sorted(within_cat.items(), key=lambda x: x[0][1]): + if c == cat: + layers.append(l) + vals.append(v['mean']) + if layers: + ax.plot(layers, vals, '-o', color=cat_colors[cat], label=cat, linewidth=2, markersize=3) + ax.set_xlabel('Layer Index') + ax.set_ylabel('Within-Category Consistency') + ax.set_title(f'{model_type.upper()} ({scale}) - Within-Category Delta Consistency', fontweight='bold') + ax.legend(fontsize=9) + ax.grid(True, alpha=0.3) + plt.tight_layout() + plt.savefig(save_path, dpi=300, bbox_inches='tight') + plt.close() + logger.info(f"Saved: {save_path}") + + +def plot_sign_corrected_consistency_trajectory(sign_corrected, scale, model_type, save_path): + """Plot sign-corrected group consistency across layers.""" + fig, ax = plt.subplots(figsize=(12, 6)) + colors = GROUP_COLORS + for group in GROUP_ORDER: + layers, vals = [], [] + for (g, l), v in sorted(sign_corrected.items(), key=lambda x: x[0][1]): + if g == group: + layers.append(l) + vals.append(v['mean']) + if layers: + ax.plot(layers, vals, '-o', color=colors[group], label=group, linewidth=2, markersize=3) + ax.set_xlabel('Layer Index') + ax.set_ylabel('Sign-Corrected Consistency') + ax.set_title(f'{model_type.upper()} ({scale}) - Sign-Corrected Group Consistency', fontweight='bold') + ax.legend(fontsize=11) + ax.grid(True, alpha=0.3) + plt.tight_layout() + plt.savefig(save_path, dpi=300, bbox_inches='tight') + plt.close() + logger.info(f"Saved: {save_path}") + + +def plot_cross_group_alignment_trajectory(cross_alignment, scale, model_type, save_path): + fig, ax = plt.subplots(figsize=(12, 6)) + layers = sorted(cross_alignment.keys()) + actual = [cross_alignment[l]['per_sample_mean'] for l in layers] + mean_delta = [cross_alignment[l]['mean_delta_alignment'] for l in layers] + perm_mean = [cross_alignment[l]['permutation_mean'] for l in layers] + perm_std = [cross_alignment[l]['permutation_std'] for l in layers] + + ax.plot(layers, actual, '-o', color='#d62728', label='cos(d_vert, d_dist) per-sample mean', + linewidth=2.5, markersize=3) + ax.plot(layers, mean_delta, '--s', color='#e377c2', label='cos(mean_d_vert, mean_d_dist)', + linewidth=1.5, markersize=3) + ax.plot(layers, perm_mean, ':', color='gray', label='permutation control', linewidth=1.5) + ax.fill_between(layers, + [m - 2*s for m, s in zip(perm_mean, perm_std)], + [m + 2*s for m, s in zip(perm_mean, perm_std)], + alpha=0.2, color='gray') + ax.set_xlabel('Layer Index') + ax.set_ylabel('Cosine Alignment') + ax.set_title(f'{model_type.upper()} ({scale}) - Cross-Group Alignment (Perspective Bias)', fontweight='bold') + ax.legend(fontsize=9) + ax.grid(True, alpha=0.3) + plt.tight_layout() + plt.savefig(save_path, dpi=300, bbox_inches='tight') + plt.close() + logger.info(f"Saved: {save_path}") + + +# Fix 7: Delta heatmap visualization + +def plot_delta_heatmap(sim_df, title, save_path): + """Plot delta-based similarity heatmap.""" + plt.figure(figsize=(10, 8)) + available_order = [c for c in CATEGORY_ORDER if c in sim_df.index] + sim_df_ordered = sim_df.loc[available_order, available_order] + + annot = sim_df_ordered.round(4).astype(str) + sns.heatmap(sim_df_ordered, annot=annot, fmt='', cmap='RdBu_r', + center=0, vmin=-1, vmax=1, square=True, linewidths=0.5, + cbar_kws={'label': 'Cosine Similarity'}) + plt.title(title, fontsize=14, fontweight='bold') + plt.tight_layout() + plt.savefig(save_path, dpi=300, bbox_inches='tight') + plt.close() + logger.info(f"Saved delta heatmap: {save_path}") + + +# Fix 6: Prediction stats visualization + +def plot_pred_stats_bars(all_pred_stats, model_type, save_path): + """Bar chart: per-group accuracy (orig/swap/both) across scales.""" + fig, axes = plt.subplots(1, len(GROUP_ORDER), figsize=(7 * len(GROUP_ORDER), 6)) + if len(GROUP_ORDER) == 1: + axes = [axes] + + available = [s for s in SCALE_ORDER if any(d['scale'] == s for d in all_pred_stats)] + if not available: + # Fallback: use whatever scales are present (preserves insertion order) + seen = [] + for d in all_pred_stats: + if d['scale'] not in seen: + seen.append(d['scale']) + available = seen + + for idx, group in enumerate(GROUP_ORDER): + ax = axes[idx] + x = np.arange(3) # orig, swap, both + width = 0.8 / len(available) + for i, scale in enumerate(available): + entry = next((d for d in all_pred_stats if d['scale'] == scale), None) + if entry is None: + continue + vals = [entry.get(f'{group}_acc_orig', 0), + entry.get(f'{group}_acc_swap', 0), + entry.get(f'{group}_acc_both', 0)] + offset = (i - len(available) / 2 + 0.5) * width + color = SCALE_COLORS.get(scale, 'gray') + ax.bar(x + offset, vals, width, label=scale, color=color) + ax.set_xticks(x) + ax.set_xticklabels(['orig', 'swap', 'both']) + ax.set_ylabel('Accuracy') + ax.set_title(group, fontweight='bold') + ax.legend(fontsize=7) + ax.set_ylim(0, 1.1) + ax.axhline(y=0.5, color='gray', linestyle='--', alpha=0.5) + ax.grid(True, alpha=0.3, axis='y') + + fig.suptitle(f'{model_type.upper()} - Prediction Accuracy by Group', fontsize=15, fontweight='bold', y=1.02) + plt.tight_layout() + plt.savefig(save_path, dpi=300, bbox_inches='tight') + plt.close() + logger.info(f"Saved: {save_path}") + + +def plot_pred_stats_trajectory(all_pred_stats, model_type, save_path): + """Line plot: acc_both trajectory across scales per group.""" + fig, ax = plt.subplots(figsize=(10, 6)) + available = [s for s in SCALE_ORDER if any(d['scale'] == s for d in all_pred_stats)] + if not available: + seen = [] + for d in all_pred_stats: + if d['scale'] not in seen: + seen.append(d['scale']) + available = seen + colors = GROUP_COLORS + + for group in GROUP_ORDER: + x_vals, y_vals = [], [] + for i, scale in enumerate(available): + entry = next((d for d in all_pred_stats if d['scale'] == scale), None) + if entry: + x_vals.append(i) + y_vals.append(entry.get(f'{group}_acc_both', 0)) + if x_vals: + ax.plot(x_vals, y_vals, '-o', color=colors[group], label=group, linewidth=2.5, markersize=6) + + ax.set_xticks(range(len(available))) + ax.set_xticklabels(available) + ax.set_xlabel('Scale') + ax.set_ylabel('Accuracy (both correct)') + ax.set_title(f'{model_type.upper()} - Both-Correct Accuracy Across Scales', fontweight='bold') + ax.legend(fontsize=10) + ax.set_ylim(0, 1.05) + ax.axhline(y=0.5, color='gray', linestyle='--', alpha=0.5) + ax.grid(True, alpha=0.3) + plt.tight_layout() + plt.savefig(save_path, dpi=300, bbox_inches='tight') + plt.close() + logger.info(f"Saved: {save_path}") + + +def plot_pca_embeddings(vectors_npz_path, scale, model_type, save_dir, bc_only=False): + data = np.load(vectors_npz_path, allow_pickle=True) + layer_keys = [k for k in data.files if k.startswith('orig_L')] + layers = sorted([int(k.replace('orig_L', '')) for k in layer_keys]) + + cat_colors = CAT_COLORS + + for layer in layers: + orig = data.get(f'orig_L{layer}') + swap = data.get(f'swap_L{layer}') + labels = data.get(f'labels_L{layer}') + deltas = data.get(f'delta_L{layer}') + cats = data.get(f'categories_L{layer}') + groups = data.get(f'groups_L{layer}') + + if bc_only and deltas is not None: + co = data.get(f'is_correct_orig_L{layer}') + cs = data.get(f'is_correct_swap_L{layer}') + if co is not None and cs is not None: + bc_mask = co.astype(bool) & cs.astype(bool) + if orig is not None and len(orig) == len(bc_mask): + orig = orig[bc_mask] + swap = swap[bc_mask] + labels = labels[bc_mask] if labels is not None else None + if len(deltas) == len(bc_mask): + deltas = deltas[bc_mask] + cats = cats[bc_mask] if cats is not None else None + groups = groups[bc_mask] if groups is not None else None + + if orig is None or swap is None or len(orig) == 0: + continue + + fig, axes = plt.subplots(1, 3, figsize=(24, 7)) + + pca = PCA(n_components=2) + all_vecs = np.vstack([orig, swap]) + all_pca = pca.fit_transform(all_vecs) + orig_pca = all_pca[:len(orig)] + swap_pca = all_pca[len(orig):] + + ax = axes[0] + for cat in CATEGORY_ORDER: + mask = np.array([str(l) == cat for l in labels]) + if mask.any(): + ax.scatter(orig_pca[mask, 0], orig_pca[mask, 1], + c=cat_colors.get(cat, 'gray'), label=f'{cat} (orig)', + alpha=0.5, s=15, marker='o') + ax.scatter(swap_pca[mask, 0], swap_pca[mask, 1], + c=cat_colors.get(cat, 'gray'), + alpha=0.5, s=15, marker='x') + ax.set_title('Embeddings by Category\n(o=orig, x=swap)', fontsize=11) + ax.legend(fontsize=7, ncol=2) + ax.grid(True, alpha=0.2) + + ax = axes[1] + if deltas is not None and cats is not None: + pca_d = PCA(n_components=2) + delta_pca = pca_d.fit_transform(deltas) + group_colors = GROUP_COLORS + if groups is not None: + for group in GROUP_ORDER: + mask = np.array([str(g) == group for g in groups]) + if mask.any(): + ax.scatter(delta_pca[mask, 0], delta_pca[mask, 1], + c=group_colors.get(group, 'gray'), label=group, alpha=0.5, s=15) + ax.set_title('Delta Vectors by Group', fontsize=11) + ax.legend(fontsize=9) + ax.grid(True, alpha=0.2) + + ax = axes[2] + if deltas is not None and cats is not None: + for cat in CATEGORY_ORDER: + mask = np.array([str(c) == cat for c in cats]) + if mask.any(): + ax.scatter(delta_pca[mask, 0], delta_pca[mask, 1], + c=cat_colors.get(cat, 'gray'), label=cat, alpha=0.5, s=15) + ax.set_title('Delta Vectors by Category', fontsize=11) + ax.legend(fontsize=8, ncol=2) + ax.grid(True, alpha=0.2) + + fig.suptitle(f'{model_type.upper()} ({scale}) - Layer {layer} - PCA', fontweight='bold') + plt.tight_layout() + plt.savefig(os.path.join(save_dir, f'pca_{scale}_L{layer}.png'), dpi=200, bbox_inches='tight') + plt.close() + + logger.info(f"Saved PCA plots to {save_dir}") + + +def plot_pca_3d(vectors_npz_path, scale, model_type, save_dir, bc_only=False): + """Generate single-panel 3D PCA figure (Delta Vectors by Category) per layer. + + Figure style follows pca_new.py: larger fonts, single panel, PC3 via fig.text. + """ + # ── Font / marker sizes (from pca_new.py) ───────────────────────────────── + _TITLE_FS = 22 + _AXIS_FS = 18 + _TICK_FS = 14 + _LEGEND_FS = 16 + _SUPTITLE_FS = 24 + _SCATTER_S = 30 + + def _normalise_label(raw): + return 'below' if str(raw) == 'under' else str(raw) + + def scatter3d(ax, xs, ys, zs, c, label, alpha=0.55, s=_SCATTER_S, marker='o'): + ax.scatter(xs, ys, zs, c=c, label=label, alpha=alpha, s=s, marker=marker) + + data = np.load(vectors_npz_path, allow_pickle=True) + layer_keys = [k for k in data.files if k.startswith('orig_L')] + layers = sorted([int(k.replace('orig_L', '')) for k in layer_keys]) + + if not layers: + logger.info(f" [pca_3d] No orig_L* keys found in {vectors_npz_path}") + return + + os.makedirs(save_dir, exist_ok=True) + + for layer in layers: + deltas = data.get(f'delta_L{layer}') + cats = data.get(f'categories_L{layer}') + + # ── Both-correct filtering ──────────────────────────────────────────── + if bc_only and deltas is not None: + co = data.get(f'is_correct_orig_L{layer}') + cs = data.get(f'is_correct_swap_L{layer}') + if co is not None and cs is not None: + bc_mask = co.astype(bool) & cs.astype(bool) + if len(deltas) == len(bc_mask): + deltas = deltas[bc_mask] + cats = cats[bc_mask] if cats is not None else None + + has_delta = (deltas is not None and len(deltas) >= 3) + if not has_delta: + logger.info(f" [pca_3d] Layer {layer}: no delta vectors, skipping") + continue + + # ── PCA on delta vectors ────────────────────────────────────────────── + pca_d = PCA(n_components=3) + delta_proj = pca_d.fit_transform(deltas) + ev = pca_d.explained_variance_ratio_ + + # ── Figure ──────────────────────────────────────────────────────────── + fig = plt.figure(figsize=(13, 10)) + ax = fig.add_subplot(111, projection='3d') + + if cats is not None: + for cat in CATEGORY_ORDER: + mask = np.array([_normalise_label(c) == cat for c in cats]) + if not mask.any(): + continue + scatter3d(ax, + delta_proj[mask, 0], + delta_proj[mask, 1], + delta_proj[mask, 2], + c=CAT_COLORS.get(cat, 'gray'), + label=cat) + + ax.set_title('Delta Vectors by Category', fontsize=_TITLE_FS, pad=12) + ax.set_xlabel(f'PC1 ({ev[0]:.1%})', fontsize=_AXIS_FS, labelpad=25) + ax.set_ylabel(f'PC2 ({ev[1]:.1%})', fontsize=_AXIS_FS, labelpad=25) + ax.set_zlabel('') + ax.tick_params(axis='both', labelsize=_TICK_FS) + ax.legend(fontsize=_LEGEND_FS, ncol=2, loc='upper right') + + # ── Draw to get accurate axes bbox, then place PC3 label ───────────── + fig.canvas.draw() + ax_pos = ax.get_position() + + pc3_x = ax_pos.x1 + 0.04 + fig.text( + pc3_x, + (ax_pos.y0 + ax_pos.y1) / 2, + f'PC3 ({ev[2]:.1%})', + fontsize=_AXIS_FS, + va='center', ha='center', + rotation=90, + ) + + ax_cx = (ax_pos.x0 + ax_pos.x1) / 2 + fig.suptitle( + f'{model_type.upper()} ({scale}) — L{layer}', + fontsize=_SUPTITLE_FS, fontweight='bold', + x=ax_cx, y=1.01, + ) + + out_path = os.path.join(save_dir, f'pca_{scale}_L{layer}.png') + plt.savefig(out_path, dpi=200, bbox_inches='tight', pad_inches=0.5) + plt.close() + + logger.info(f"Saved 3D PCA plots to {save_dir}") + + +# Cross-scale plots + +def plot_cross_scale_consistency(all_consistency, model_type, save_path, title_prefix='Sign-Corrected'): + fig, axes = plt.subplots(1, 3, figsize=(21, 6)) + + for idx, group in enumerate(GROUP_ORDER): + ax = axes[idx] + for scale in SCALE_ORDER: + if scale not in all_consistency: + continue + consistency = all_consistency[scale] + layers, vals = [], [] + for (g, l), v in sorted(consistency.items(), key=lambda x: x[0][1]): + if g == group: + layers.append(l) + vals.append(v['mean']) + if layers: + ax.plot(layers, vals, '-', color=SCALE_COLORS.get(scale, 'gray'), + label=SCALE_DISPLAY_NAMES.get(scale, scale), linewidth=2) + ax.set_xlabel('Layer Index') + ax.set_ylabel('Consistency') + ax.set_title(group, fontweight='bold') + ax.legend(fontsize=9) + ax.grid(True, alpha=0.3) + + fig.suptitle(f'{model_type.upper()} - {title_prefix} Consistency Across Scales', + fontsize=15, fontweight='bold', y=1.02) + plt.tight_layout() + plt.savefig(save_path, dpi=300, bbox_inches='tight') + plt.close() + logger.info(f"Saved: {save_path}") + + +def plot_cross_scale_within_cat_consistency(all_within_cat, model_type, save_path): + """Cross-scale within-category consistency.""" + fig, axes = plt.subplots(2, 3, figsize=(21, 12)) + + for idx, cat in enumerate(CATEGORY_ORDER): + ax = axes[idx // 3][idx % 3] + for scale in SCALE_ORDER: + if scale not in all_within_cat: + continue + wc = all_within_cat[scale] + layers, vals = [], [] + for (c, l), v in sorted(wc.items(), key=lambda x: x[0][1]): + if c == cat: + layers.append(l) + vals.append(v['mean']) + if layers: + ax.plot(layers, vals, '-', color=SCALE_COLORS.get(scale, 'gray'), + label=SCALE_DISPLAY_NAMES.get(scale, scale), linewidth=2) + ax.set_xlabel('Layer Index') + ax.set_ylabel('Consistency') + ax.set_title(cat, fontweight='bold') + ax.legend(fontsize=8) + ax.grid(True, alpha=0.3) + + fig.suptitle(f'{model_type.upper()} - Within-Category Consistency Across Scales', + fontsize=15, fontweight='bold', y=1.02) + plt.tight_layout() + plt.savefig(save_path, dpi=300, bbox_inches='tight') + plt.close() + logger.info(f"Saved: {save_path}") + + +def plot_cross_scale_alignment(all_alignment, model_type, save_path): + fig, ax = plt.subplots(figsize=(12, 6)) + for scale in SCALE_ORDER: + if scale not in all_alignment: + continue + alignment = all_alignment[scale] + layers = sorted(alignment.keys()) + vals = [alignment[l]['per_sample_mean'] for l in layers] + ax.plot(layers, vals, '-', color=SCALE_COLORS.get(scale, 'gray'), + label=SCALE_DISPLAY_NAMES.get(scale, scale), linewidth=2) + ax.set_xlabel('Layer Index') + ax.set_ylabel('cos(d_vert, d_dist)') + ax.set_title(f'{model_type.upper()} - Cross-Group Alignment Across Scales\n' + f'(High=entangled, Low=disentangled)', fontweight='bold') + ax.legend(fontsize=10) + ax.grid(True, alpha=0.3) + plt.tight_layout() + plt.savefig(save_path, dpi=300, bbox_inches='tight') + plt.close() + logger.info(f"Saved: {save_path}") + + +# Fix 7: Delta-based trajectory (cross-layer, per-scale) + +def plot_delta_trajectory(all_delta_heatmaps, model_type, save_path): + """Cross-layer trajectory of delta-based similarities for key pairs.""" + pairs = [ + ('above', 'far', 'above-far'), ('below', 'close', 'below-close'), + ('left', 'right', 'left-right'), + ] + fig, axes = plt.subplots(1, len(pairs), figsize=(7 * len(pairs), 6)) + if len(pairs) == 1: + axes = [axes] + + for idx, (cat1, cat2, label) in enumerate(pairs): + ax = axes[idx] + for scale in SCALE_ORDER: + if scale not in all_delta_heatmaps: + continue + hm = all_delta_heatmaps[scale] + layers = sorted(hm.keys()) + vals = [] + valid_layers = [] + for l in layers: + df = hm[l] + if df is not None and cat1 in df.index and cat2 in df.columns: + valid_layers.append(l) + vals.append(df.loc[cat1, cat2]) + if valid_layers: + ax.plot(valid_layers, vals, '-', color=SCALE_COLORS.get(scale, 'gray'), + label=SCALE_DISPLAY_NAMES.get(scale, scale), linewidth=2) + ax.set_xlabel('Layer Index') + ax.set_ylabel('Delta Cosine Similarity') + ax.set_title(label, fontweight='bold') + ax.legend(fontsize=9) + ax.grid(True, alpha=0.3) + ax.axhline(y=0, color='gray', linestyle='--', alpha=0.5) + + fig.suptitle(f'{model_type.upper()} - Delta-Based Similarity Trajectory', + fontsize=15, fontweight='bold', y=1.02) + plt.tight_layout() + plt.savefig(save_path, dpi=300, bbox_inches='tight') + plt.close() + logger.info(f"Saved: {save_path}") + + +def plot_summary_barplot(all_consistency, all_alignment, model_type, save_path): + available_scales = [s for s in SCALE_ORDER if s in all_consistency] + if not available_scales: + return + + sample_cons = all_consistency[available_scales[0]] + max_layer = max(l for (_, l) in sample_cons.keys()) + + fig, axes = plt.subplots(1, 2, figsize=(16, 6)) + + ax = axes[0] + x = np.arange(len(GROUP_ORDER)) + width = 0.8 / len(available_scales) + for i, scale in enumerate(available_scales): + cons = all_consistency[scale] + vals = [cons.get((g, max_layer), {}).get('mean', 0) for g in GROUP_ORDER] + offset = (i - len(available_scales) / 2 + 0.5) * width + ax.bar(x + offset, vals, width, + label=SCALE_DISPLAY_NAMES.get(scale, scale), + color=SCALE_COLORS.get(scale, 'gray')) + ax.set_xticks(x) + ax.set_xticklabels(GROUP_ORDER) + ax.set_ylabel('Sign-Corrected Consistency') + ax.set_title(f'Consistency at Layer {max_layer}', fontweight='bold') + ax.legend(fontsize=8) + ax.grid(True, alpha=0.3, axis='y') + + ax = axes[1] + available_align = [s for s in available_scales if s in all_alignment] + if available_align: + vals = [all_alignment[s].get(max_layer, {}).get('per_sample_mean', 0) for s in available_align] + colors = [SCALE_COLORS.get(s, 'gray') for s in available_align] + ax.bar(range(len(vals)), vals, color=colors) + ax.set_xticks(range(len(vals))) + ax.set_xticklabels([SCALE_DISPLAY_NAMES.get(s, s) for s in available_align]) + ax.set_ylabel('cos(d_vert, d_dist)') + ax.set_title(f'Cross-Group Alignment at L{max_layer}\n(Lower=disentangled)', fontweight='bold') + ax.grid(True, alpha=0.3, axis='y') + + fig.suptitle(f'{model_type.upper()} - Summary at Deepest Layer', fontsize=15, fontweight='bold', y=1.02) + plt.tight_layout() + plt.savefig(save_path, dpi=300, bbox_inches='tight') + plt.close() + logger.info(f"Saved: {save_path}") + + +# ============================================================================ +# Main Pipeline +# ============================================================================ + +def process_scale(args, scale, swap_pairs, quads): + # Resolve model path from the correct config dict + if args.model_type in MODEL_CONFIGS_NEW: + cls_name, model_path = MODEL_CONFIGS_NEW[args.model_type][scale] + else: + model_path = MODEL_CONFIGS[args.model_type][scale] + cls_name = None + + logger.info(f"\n{'='*60}") + logger.info(f"Processing {args.model_type} - {scale}" + + (f" [{cls_name}]" if cls_name else "")) + logger.info(f"Model path: {model_path}") + logger.info(f"{'='*60}") + + extractor = get_extractor(args.model_type, model_path, scale=scale, device=args.device) + target_layers = extractor.target_layers + + vlm_key = get_model_key(args.model_type, scale) + output_dir = os.path.join(args.output_dir, vlm_key) + plots_dir = os.path.join(output_dir, 'plots') + os.makedirs(plots_dir, exist_ok=True) + + # ── Phase A: Extract swap pair features ─────────────────────────────────── + logger.info("\n--- Phase A: Extracting swap pair features ---") + swap_records = extract_swap_features(extractor, swap_pairs, + max_samples_per_category=args.max_samples_per_category) + + # ── Phase C_A: Analysis of swap-pair data ───────────────────────────────── + logger.info("\n--- Phase C_A: Analysis (swap pairs) ---") + + # Fix 8: Category validity check + category_validity = check_category_validity(swap_records, scale) + unreliable_cats = [c for c, v in category_validity.items() if not v['reliable']] + if unreliable_cats: + logger.warning(f" Unreliable categories: {unreliable_cats}") + + # Fix 5: Two types of consistency (all pairs) + within_cat_all, sign_corrected_all = compute_delta_consistency(swap_records, target_layers) + + # Fix 8: Both-correct filtered consistency + both_correct_records = filter_both_correct(swap_records) + logger.info(f" Both-correct pairs: {len(both_correct_records)}/{len(swap_records)}") + within_cat_bc, sign_corrected_bc = compute_delta_consistency(both_correct_records, target_layers) + + pred_stats = compute_prediction_stats(swap_records, scale) + + # Fix 7: Delta-based heatmaps (for all layers) + delta_heatmaps_all = {} + delta_heatmaps_bc = {} + delta_norms_all = {} + delta_norms_bc = {} + for layer in target_layers: + delta_heatmaps_all[layer] = compute_delta_similarity_matrix(swap_records, layer) + delta_norms_all[layer] = compute_delta_norm_per_category(swap_records, layer) + if both_correct_records: + delta_heatmaps_bc[layer] = compute_delta_similarity_matrix(both_correct_records, layer) + delta_norms_bc[layer] = compute_delta_norm_per_category(both_correct_records, layer) + + + # Log Phase A key results + max_layer = max(target_layers) + for group in GROUP_ORDER: + key = (group, max_layer) + if key in sign_corrected_all: + logger.info(f" Sign-corrected [{group}, L{max_layer}]: " + f"{sign_corrected_all[key]['mean']:.4f} +/- {sign_corrected_all[key]['std']:.4f}") + logger.info(f" Accuracy orig={pred_stats['overall_acc_orig']:.1%}, " + f"swap={pred_stats['overall_acc_swap']:.1%}, " + f"both={pred_stats['overall_acc_both']:.1%}") + + # ── Phase D_A: Save Phase A results ─────────────────────────────────────── + logger.info("\n--- Phase D_A: Saving Phase A results ---") + + save_vectors_npz(scale, swap_records, target_layers, output_dir) + + save_scale_results( + scale, swap_records, [], + within_cat_all, sign_corrected_all, + {}, pred_stats, target_layers, + category_validity, delta_heatmaps_all, + output_dir, both_correct_tag='all_pairs', + save_alignment=False, + delta_norms=delta_norms_all, + ) + if both_correct_records: + save_scale_results( + scale, both_correct_records, [], + within_cat_bc, sign_corrected_bc, + {}, pred_stats, target_layers, + category_validity, delta_heatmaps_bc, + output_dir, both_correct_tag='both_correct', + save_alignment=False, + delta_norms=delta_norms_bc, + ) + + # ── Phase E_A: Per-scale plots from Phase A data ─────────────────────────── + if args.phase1_only: + logger.info("\n--- Phase E_A: Per-scale plots [SKIPPED: --phase1-only] ---") + else: + logger.info("\n--- Phase E_A: Per-scale plots (swap-pair data) ---") + + for condition, wc_data, sc_data in [ + ('all', within_cat_all, sign_corrected_all), + ('both_correct', within_cat_bc, sign_corrected_bc), + ]: + if condition == 'both_correct' and not both_correct_records: + continue + + cond_dir = os.path.join(plots_dir, condition) + os.makedirs(cond_dir, exist_ok=True) + + wc_dir = os.path.join(cond_dir, 'within_cat_consistency') + sc_dir = os.path.join(cond_dir, 'sign_corrected') + os.makedirs(wc_dir, exist_ok=True) + os.makedirs(sc_dir, exist_ok=True) + + plot_within_cat_consistency_trajectory( + wc_data, scale, args.model_type, + os.path.join(wc_dir, f'within_cat_consistency_{scale}.png')) + + plot_sign_corrected_consistency_trajectory( + sc_data, scale, args.model_type, + os.path.join(sc_dir, f'sign_corrected_consistency_{scale}.png')) + + # PCA (from full NPZ) — 2D and 3D, all-pairs and both-correct + npz_path = os.path.join(output_dir, 'npz', f'vectors_{scale}.npz') + if os.path.exists(npz_path): + pca_dir = os.path.join(plots_dir, 'all', 'pca') + pca_3d_dir = os.path.join(plots_dir, 'all', 'pca_3d') + bc_pca_dir = os.path.join(plots_dir, 'both_correct', 'pca') + bc_pca_3d_dir = os.path.join(plots_dir, 'both_correct', 'pca_3d') + for d in (pca_dir, pca_3d_dir, bc_pca_dir, bc_pca_3d_dir): + os.makedirs(d, exist_ok=True) + plot_pca_embeddings(npz_path, scale, args.model_type, pca_dir) + plot_pca_3d(npz_path, scale, args.model_type, pca_3d_dir) + plot_pca_embeddings(npz_path, scale, args.model_type, bc_pca_dir, bc_only=True) + plot_pca_3d(npz_path, scale, args.model_type, bc_pca_3d_dir, bc_only=True) + + if pred_stats: + pred_plot_dir = os.path.join(plots_dir, 'all', 'pred_stats') + os.makedirs(pred_plot_dir, exist_ok=True) + plot_pred_stats_bars([pred_stats], args.model_type, + os.path.join(pred_plot_dir, f'pred_stats_{scale}.png')) + + if pred_stats: + acc_dir = os.path.join(output_dir, 'accuracy') + logger.info(f"\n--- Accuracy Charts [{scale}] ---") + run_accuracy_charts([pred_stats], {scale: category_validity}, args.model_type, acc_dir) + + logger.info(f"\n--- All-Layer Heatmaps [{scale}] ---") + run_all_layer_heatmaps(output_dir, args.model_type, [scale]) + logger.info(f"\n--- All-Layer PCA [{scale}] ---") + run_all_layer_pca(output_dir, args.model_type, [scale]) + + # ── Phase B: Extract cross-group features ───────────────────────────────── + skip_b = getattr(args, 'skip_phase_b', False) + if skip_b or not quads: + if skip_b: + logger.info("\n--- Phase B: Cross-group extraction [SKIPPED: --skip-phase-b] ---") + quad_records = [] + cross_alignment = {} + else: + logger.info("\n--- Phase B: Extracting cross-group features ---") + quad_records = extract_cross_group_features(extractor, quads) + + # ── Phase C_B: Cross-group analysis ─────────────────────────────────── + logger.info("\n--- Phase C_B: Analysis (cross-group) ---") + cross_alignment = compute_cross_group_alignment(quad_records, target_layers) + + if max_layer in cross_alignment: + ca = cross_alignment[max_layer] + logger.info(f" Cross-group alignment L{max_layer}: " + f"{ca['per_sample_mean']:.4f} (perm={ca['permutation_mean']:.4f})") + + # ── Phase D_B: Save Phase B results ─────────────────────────────────── + logger.info("\n--- Phase D_B: Saving Phase B results ---") + save_cross_group_npz(scale, quad_records, target_layers, output_dir) + save_cross_alignment(scale, cross_alignment, output_dir) + + # ── Phase E_B: Cross-alignment plots ────────────────────────────────── + if args.phase1_only: + logger.info("\n--- Phase E_B: Cross-alignment plots [SKIPPED: --phase1-only] ---") + else: + logger.info("\n--- Phase E_B: Per-scale plots (cross-group data) ---") + for condition in ['all', 'both_correct']: + if condition == 'both_correct' and not both_correct_records: + continue + ca_dir = os.path.join(plots_dir, condition, 'cross_alignment') + os.makedirs(ca_dir, exist_ok=True) + plot_cross_group_alignment_trajectory( + cross_alignment, scale, args.model_type, + os.path.join(ca_dir, f'cross_alignment_{scale}.png')) + + # Cleanup + del swap_records, quad_records, both_correct_records + extractor.cleanup() + + logger.info(f"\n Scale {scale} complete.") + + +# ============================================================================ +# Accuracy Chart (integrated from accuracy_chart.py) +# ============================================================================ + +def _acc_plot_group_bars(pred_stats, model_type, ax_list): + available = [s for s in SCALE_ORDER if any(d['scale'] == s for d in pred_stats)] + x = np.arange(3) + width = 0.8 / max(len(available), 1) + for idx, group in enumerate(GROUP_ORDER): + ax = ax_list[idx] + for i, scale in enumerate(available): + entry = next((d for d in pred_stats if d['scale'] == scale), None) + if entry is None: + continue + vals = [entry.get(f'{group}_acc_orig', 0), + entry.get(f'{group}_acc_swap', 0), + entry.get(f'{group}_acc_both', 0)] + offset = (i - len(available) / 2 + 0.5) * width + ax.bar(x + offset, vals, width, label=scale, + color=SCALE_COLORS.get(scale, 'gray'), alpha=0.85) + ax.set_xticks(x) + ax.set_xticklabels(['orig', 'swap', 'both'], fontsize=10) + ax.set_ylabel('Accuracy', fontsize=9) + ax.set_title(group.capitalize(), fontweight='bold', fontsize=11, + color=GROUP_COLORS.get(group, 'black')) + ax.legend(fontsize=7, ncol=2) + ax.set_ylim(0, 1.15) + ax.axhline(y=0.5, color='gray', linestyle='--', alpha=0.5, linewidth=1) + ax.grid(True, alpha=0.3, axis='y') + + +def _acc_plot_both_trajectory(pred_stats, model_type, ax): + available = [s for s in SCALE_ORDER if any(d['scale'] == s for d in pred_stats)] + x_ticks = range(len(available)) + for group in GROUP_ORDER: + y_vals = [next((d for d in pred_stats if d['scale'] == s), {}).get(f'{group}_acc_both', 0) + for s in available] + ax.plot(x_ticks, y_vals, '-o', color=GROUP_COLORS.get(group, 'gray'), + label=group, linewidth=2.5, markersize=7) + y_overall = [next((d for d in pred_stats if d['scale'] == s), {}).get('overall_acc_both', 0) + for s in available] + ax.plot(x_ticks, y_overall, '--s', color='black', label='overall', + linewidth=2, markersize=6, alpha=0.7) + ax.set_xticks(list(x_ticks)) + ax.set_xticklabels(available, fontsize=9) + ax.set_xlabel('Scale', fontsize=9) + ax.set_ylabel('Accuracy (both correct)', fontsize=9) + ax.set_title('Both-Correct Accuracy Trajectory', fontweight='bold', fontsize=11) + ax.legend(fontsize=9) + ax.set_ylim(0, 1.05) + ax.axhline(y=0.5, color='gray', linestyle='--', alpha=0.5, linewidth=1) + ax.grid(True, alpha=0.3) + + +def _acc_plot_overall_trajectory(pred_stats, model_type, ax): + available = [s for s in SCALE_ORDER if any(d['scale'] == s for d in pred_stats)] + x_ticks = range(len(available)) + for metric, label, ls in [ + ('overall_acc_orig', 'orig', '-o'), + ('overall_acc_swap', 'swap', '-s'), + ('overall_acc_both', 'both', '-^'), + ]: + y_vals = [next((d for d in pred_stats if d['scale'] == s), {}).get(metric, 0) + for s in available] + ax.plot(x_ticks, y_vals, ls, label=label, linewidth=2.2, markersize=6) + ax.set_xticks(list(x_ticks)) + ax.set_xticklabels(available, fontsize=9) + ax.set_xlabel('Scale', fontsize=9) + ax.set_ylabel('Overall Accuracy', fontsize=9) + ax.set_title('Overall Accuracy Trajectory', fontweight='bold', fontsize=11) + ax.legend(fontsize=9) + ax.set_ylim(0, 1.05) + ax.axhline(y=0.5, color='gray', linestyle='--', alpha=0.5, linewidth=1) + ax.grid(True, alpha=0.3) + + +def _acc_plot_category_accuracy(cat_validity, model_type, ax_orig, ax_swap, pred_stats=None): + available = [s for s in SCALE_ORDER if s in cat_validity] + cats_with_overall = CATEGORY_ORDER + ['overall'] + x = np.arange(len(cats_with_overall)) + width = 0.8 / max(len(available), 1) + overall_key = {'acc_orig': 'overall_acc_orig', 'acc_swap': 'overall_acc_swap'} + for ax, metric, title in [ + (ax_orig, 'acc_orig', 'Per-Category Accuracy (orig)'), + (ax_swap, 'acc_swap', 'Per-Category Accuracy (swap)'), + ]: + for i, scale in enumerate(available): + cv = cat_validity[scale] + vals = [cv.get(cat, {}).get(metric, 0) for cat in CATEGORY_ORDER] + if pred_stats is not None: + entry = next((d for d in pred_stats if d['scale'] == scale), None) + vals.append(entry.get(overall_key[metric], 0) if entry else 0) + else: + vals.append(0) + offset = (i - len(available) / 2 + 0.5) * width + ax.bar(x + offset, vals, width, label=scale, + color=SCALE_COLORS.get(scale, 'gray'), alpha=0.85) + for j, cat in enumerate(CATEGORY_ORDER): + ax.axvspan(j - 0.45, j + 0.45, color=CAT_COLORS.get(cat, 'gray'), alpha=0.06, linewidth=0) + ax.axvline(x=len(CATEGORY_ORDER) - 0.5, color='black', linewidth=1.2, linestyle=':', alpha=0.6) + ax.set_xticks(x) + ax.set_xticklabels(cats_with_overall, fontsize=9, rotation=15) + ax.set_ylabel('Accuracy', fontsize=9) + ax.set_title(title, fontweight='bold', fontsize=11) + ax.legend(fontsize=7, ncol=2) + ax.set_ylim(0, 1.15) + ax.axhline(y=0.5, color='gray', linestyle='--', alpha=0.5, linewidth=1) + ax.grid(True, alpha=0.3, axis='y') + if available: + last_cv = cat_validity[available[-1]] + for j, cat in enumerate(CATEGORY_ORDER): + if not last_cv.get(cat, {}).get('reliable', True): + ax.text(j, 1.08, '✗', ha='center', va='center', + fontsize=9, color='red', fontweight='bold') + + +def _acc_plot_category_per_scale(cat_validity, model_type, save_dir, pred_stats=None): + cats_with_overall = CATEGORY_ORDER + ['overall'] + overall_key = {'acc_orig': 'overall_acc_orig', 'acc_swap': 'overall_acc_swap'} + for scale in sorted(cat_validity.keys(), + key=lambda s: SCALE_ORDER.index(s) if s in SCALE_ORDER else 99): + cv = cat_validity[scale] + ps_entry = next((d for d in pred_stats if d['scale'] == scale), None) if pred_stats else None + fig, axes = plt.subplots(1, 2, figsize=(16, 5)) + x = np.arange(len(cats_with_overall)) + width = 0.55 + for ax, metric, title in [ + (axes[0], 'acc_orig', f'acc_orig ({scale})'), + (axes[1], 'acc_swap', f'acc_swap ({scale})'), + ]: + vals = [cv.get(cat, {}).get(metric, 0) for cat in CATEGORY_ORDER] + overall_val = ps_entry.get(overall_key[metric], 0) if ps_entry else 0 + vals.append(overall_val) + colors = [CAT_COLORS.get(cat, 'gray') for cat in CATEGORY_ORDER] + ['#333333'] + bars = ax.bar(x, vals, width, color=colors, alpha=0.85, edgecolor='white') + ax.axvline(x=len(CATEGORY_ORDER) - 0.5, color='black', + linewidth=1.2, linestyle=':', alpha=0.6) + ax.set_xticks(x) + ax.set_xticklabels(cats_with_overall, fontsize=10) + ax.set_ylabel('Accuracy', fontsize=10) + ax.set_title(title, fontweight='bold', fontsize=12) + ax.set_ylim(0, 1.15) + ax.axhline(y=0.5, color='gray', linestyle='--', alpha=0.5) + ax.grid(True, alpha=0.3, axis='y') + for bar, cat in zip(bars, cats_with_overall): + reliable = cv.get(cat, {}).get('reliable', True) if cat != 'overall' else True + h = bar.get_height() + ax.text(bar.get_x() + bar.get_width() / 2, h + 0.02, + f'{h:.2f}' + ('' if reliable else ' ✗'), + ha='center', va='bottom', fontsize=8, + color='red' if not reliable else 'black') + fig.suptitle(f'{model_type.upper()} - Category Accuracy ({scale})', + fontsize=13, fontweight='bold') + plt.tight_layout() + out = os.path.join(save_dir, f'category_accuracy_{scale}.png') + plt.savefig(out, dpi=200, bbox_inches='tight') + plt.close() + logger.info(f"Saved: {out}") + + +def run_accuracy_charts(pred_stats, cat_validity, model_type, save_dir): + """Generate all accuracy chart plots into save_dir.""" + os.makedirs(save_dir, exist_ok=True) + + # Group bars + fig, axes = plt.subplots(1, 3, figsize=(21, 6)) + _acc_plot_group_bars(pred_stats, model_type, axes) + fig.suptitle(f'{model_type.upper()} - Prediction Accuracy by Group', + fontsize=15, fontweight='bold') + plt.tight_layout() + plt.savefig(os.path.join(save_dir, 'accuracy_group_bars.png'), dpi=200, bbox_inches='tight') + plt.close() + logger.info(f"Saved: {os.path.join(save_dir, 'accuracy_group_bars.png')}") + + # Trajectory + fig, axes = plt.subplots(1, 2, figsize=(16, 6)) + _acc_plot_both_trajectory(pred_stats, model_type, axes[0]) + _acc_plot_overall_trajectory(pred_stats, model_type, axes[1]) + fig.suptitle(f'{model_type.upper()} - Accuracy Trajectory Across Scales', + fontsize=14, fontweight='bold') + plt.tight_layout() + plt.savefig(os.path.join(save_dir, 'accuracy_trajectory.png'), dpi=200, bbox_inches='tight') + plt.close() + logger.info(f"Saved: {os.path.join(save_dir, 'accuracy_trajectory.png')}") + + if cat_validity: + # Category bars (all scales overlay) + fig, axes = plt.subplots(1, 2, figsize=(20, 6)) + _acc_plot_category_accuracy(cat_validity, model_type, axes[0], axes[1], + pred_stats=pred_stats) + fig.suptitle(f'{model_type.upper()} - Per-Category Accuracy Across Scales', + fontsize=14, fontweight='bold') + plt.tight_layout() + plt.savefig(os.path.join(save_dir, 'accuracy_category.png'), dpi=200, bbox_inches='tight') + plt.close() + logger.info(f"Saved: {os.path.join(save_dir, 'accuracy_category.png')}") + + # Per-scale category bars + _acc_plot_category_per_scale(cat_validity, model_type, save_dir, pred_stats=pred_stats) + + # Combined accuracy_chart.png + fig = plt.figure(figsize=(24, 14)) + ax_h = fig.add_subplot(3, 3, 1) + ax_v = fig.add_subplot(3, 3, 2) + ax_d = fig.add_subplot(3, 3, 3) + _acc_plot_group_bars(pred_stats, model_type, [ax_h, ax_v, ax_d]) + ax_tb = fig.add_subplot(3, 3, 4) + ax_to = fig.add_subplot(3, 3, 5) + _acc_plot_both_trajectory(pred_stats, model_type, ax_tb) + _acc_plot_overall_trajectory(pred_stats, model_type, ax_to) + ax_note = fig.add_subplot(3, 3, 6) + ax_note.axis('off') + available_scales = [s for s in SCALE_ORDER if any(d['scale'] == s for d in pred_stats)] + ax_note.text(0.1, 0.6, + f'Scales: {", ".join(available_scales)}\n\n✗ = unreliable category\n-- = 0.5 chance level', + transform=ax_note.transAxes, fontsize=11, va='top', family='monospace') + if cat_validity: + ax_co = fig.add_subplot(3, 2, 5) + ax_cs = fig.add_subplot(3, 2, 6) + _acc_plot_category_accuracy(cat_validity, model_type, ax_co, ax_cs, pred_stats=pred_stats) + fig.suptitle(f'{model_type.upper()} — Accuracy Summary', + fontsize=17, fontweight='bold', y=1.01) + plt.tight_layout() + plt.savefig(os.path.join(save_dir, 'accuracy_chart.png'), dpi=200, bbox_inches='tight') + plt.close() + logger.info(f"Saved: {os.path.join(save_dir, 'accuracy_chart.png')}") + + +# ============================================================================ +# Unify Consistency Y-axis (integrated from unify_consistency_ylim.py) +# ============================================================================ + +def _ylim_compute(all_vals, margin_ratio=0.08): + if not all_vals: + return -1, 1 + ymin, ymax = min(all_vals), max(all_vals) + margin = (ymax - ymin) * margin_ratio + return ymin - margin, ymax + margin + + +def _ylim_load_keyed_json(path): + if not os.path.exists(path): + return None + with open(path) as f: + raw = json.load(f) + if not raw: + return None + result = {} + for key, vals in raw.items(): + parts = key.rsplit('_L', 1) + if len(parts) == 2: + result[(parts[0], int(parts[1]))] = vals + return result if result else None + + +def _ylim_load_alignment_json(path): + if not os.path.exists(path): + return None + with open(path) as f: + raw = json.load(f) + if not raw: + return None + result = {int(k[1:]): v for k, v in raw.items() if k.startswith('L')} + return result if result else None + + +def _ylim_plot_sign_corrected(data, scale, model_type, save_path, ylim): + fig, ax = plt.subplots(figsize=(12, 6)) + for group in GROUP_ORDER: + layers, vals = [], [] + for (g, l), v in sorted(data.items(), key=lambda x: x[0][1]): + if g == group: + layers.append(l) + vals.append(v['mean']) + if layers: + ax.plot(layers, vals, '-o', color=GROUP_COLORS[group], + label=group, linewidth=2, markersize=3) + ax.set_ylim(ylim) + ax.set_xlabel('Layer Index') + ax.set_ylabel('Sign-Corrected Consistency') + ax.set_title(f'{model_type.upper()} ({scale}) - Sign-Corrected Group Consistency', + fontweight='bold') + ax.legend(fontsize=11) + ax.grid(True, alpha=0.3) + plt.tight_layout() + plt.savefig(save_path, dpi=300, bbox_inches='tight') + plt.close() + + +def _ylim_plot_within_cat(data, scale, model_type, save_path, ylim): + fig, ax = plt.subplots(figsize=(12, 6)) + for cat in CATEGORY_ORDER: + layers, vals = [], [] + for (c, l), v in sorted(data.items(), key=lambda x: x[0][1]): + if c == cat: + layers.append(l) + vals.append(v['mean']) + if layers: + ax.plot(layers, vals, '-o', color=CAT_COLORS[cat], + label=cat, linewidth=2, markersize=3) + ax.set_ylim(ylim) + ax.set_xlabel('Layer Index') + ax.set_ylabel('Within-Category Consistency') + ax.set_title(f'{model_type.upper()} ({scale}) - Within-Category Delta Consistency', + fontweight='bold') + ax.legend(fontsize=9) + ax.grid(True, alpha=0.3) + plt.tight_layout() + plt.savefig(save_path, dpi=300, bbox_inches='tight') + plt.close() + + +def _ylim_plot_cross_alignment(data, scale, model_type, save_path, ylim): + fig, ax = plt.subplots(figsize=(12, 6)) + layers = sorted(data.keys()) + ax.plot(layers, [data[l]['per_sample_mean'] for l in layers], '-o', color='#d62728', + label='cos(d_vert, d_dist) per-sample mean', linewidth=2.5, markersize=3) + ax.plot(layers, [data[l]['mean_delta_alignment'] for l in layers], '--s', color='#e377c2', + label='cos(mean_d_vert, mean_d_dist)', linewidth=1.5, markersize=3) + perm_mean = [data[l]['permutation_mean'] for l in layers] + perm_std = [data[l]['permutation_std'] for l in layers] + ax.plot(layers, perm_mean, ':', color='gray', label='permutation control', linewidth=1.5) + ax.fill_between(layers, + [m - 2*s for m, s in zip(perm_mean, perm_std)], + [m + 2*s for m, s in zip(perm_mean, perm_std)], + alpha=0.2, color='gray') + ax.set_ylim(ylim) + ax.set_xlabel('Layer Index') + ax.set_ylabel('Cosine Alignment') + ax.set_title(f'{model_type.upper()} ({scale}) - Cross-Group Alignment (Perspective Bias)', + fontweight='bold') + ax.legend(fontsize=9) + ax.grid(True, alpha=0.3) + plt.tight_layout() + plt.savefig(save_path, dpi=300, bbox_inches='tight') + plt.close() + + +def _ylim_process_plot_type(scale_dir_map, plots_dir, conditions, model_type, + plot_name, json_pattern, loader, val_gatherer, plotter, + subfolder=None): + """Re-plot one plot type across all conditions with a unified y-axis. + + scale_dir_map: {scale: path_to_saved_data/vlm_key} (ordered dict) + """ + logger.info(f" [unify ylim] {plot_name}") + for condition, condition_tag in conditions: + cond_plot_dir = os.path.join(plots_dir, condition) + if not os.path.isdir(cond_plot_dir): + continue + save_dir = os.path.join(cond_plot_dir, subfolder) if subfolder else cond_plot_dir + os.makedirs(save_dir, exist_ok=True) + all_data = {} + for scale, scale_dir in scale_dir_map.items(): + path = os.path.join(scale_dir, 'json', + json_pattern.format(scale=scale, tag=condition_tag)) + loaded = loader(path) + if loaded: + all_data[scale] = loaded + if not all_data: + continue + all_vals = val_gatherer(all_data) + ylim = _ylim_compute(all_vals) + for scale, data in all_data.items(): + save_path = os.path.join(save_dir, f'{plot_name}_{scale}.png') + plotter(data, scale, model_type, save_path, ylim) + logger.info(f" {condition}: y=[{ylim[0]:.4f}, {ylim[1]:.4f}], {len(all_data)} scales") + + +def run_unify_ylim(scale_dir_map: dict, plots_dir: str, model_type: str): + """Unify y-axis for sign_corrected, within_cat, and cross_alignment plots. + + scale_dir_map: {scale: path_to_saved_data/vlm_key} + plots_dir: compare/{group_name}/plots/ (where unified plots are written) + """ + conditions = [ + ('all', 'all_pairs'), + ('both_correct', 'both_correct'), + ] + + def gather_keyed(all_data): + return [v['mean'] for data in all_data.values() for v in data.values()] + + def gather_alignment(all_data): + vals = [] + for data in all_data.values(): + for v in data.values(): + vals += [v['per_sample_mean'], v['mean_delta_alignment'], + v['permutation_mean'] + 2 * v['permutation_std'], + v['permutation_mean'] - 2 * v['permutation_std']] + return vals + + _ylim_process_plot_type( + scale_dir_map, plots_dir, conditions, model_type, + plot_name='sign_corrected_consistency', + json_pattern='sign_corrected_consistency_{scale}_{tag}.json', + loader=_ylim_load_keyed_json, + val_gatherer=gather_keyed, + plotter=_ylim_plot_sign_corrected, + subfolder='sign_corrected', + ) + _ylim_process_plot_type( + scale_dir_map, plots_dir, conditions, model_type, + plot_name='within_cat_consistency', + json_pattern='within_cat_consistency_{scale}_{tag}.json', + loader=_ylim_load_keyed_json, + val_gatherer=gather_keyed, + plotter=_ylim_plot_within_cat, + subfolder='within_cat_consistency', + ) + _ylim_process_plot_type( + scale_dir_map, plots_dir, conditions, model_type, + plot_name='cross_alignment', + json_pattern='cross_alignment_{scale}.json', + loader=_ylim_load_alignment_json, + val_gatherer=gather_alignment, + plotter=_ylim_plot_cross_alignment, + subfolder='cross_alignment', + ) + + +def _has_phase_b_data(scale_dir: str, scale: str) -> bool: + """Return True if cross_alignment_{scale}.json exists and is non-empty. + + Used in merge to auto-detect whether Phase B was run for a given scale. + """ + path = os.path.join(scale_dir, 'json', f'cross_alignment_{scale}.json') + if not os.path.exists(path): + return False + try: + with open(path) as f: + data = json.load(f) + return bool(data) + except Exception: + return False + + +def _check_merge_only_sources(output_dir: str, model_type: str) -> bool: + """Verify required source directories have data for a merge-only model_type. + + With the new per-scale directory layout, data lives at + {output_dir}/{model_type}_{scale}/ instead of {output_dir}/{model_type}/. + Returns True if all sources look healthy, False (with warnings) if not. + """ + mc = MERGE_ONLY_CONFIGS[model_type] + ok = True + for req_model_type in mc['required_dirs']: + # Look for any saved_data/{req_model_type}_{scale}/ directories + if not os.path.isdir(output_dir): + logger.warning( + f"[{model_type}] output_dir not found: {output_dir}\n" + f" → Run inference first: python swap_analysis.py --model_type {req_model_type}" + ) + ok = False + continue + matching = [ + d for d in os.listdir(output_dir) + if d.startswith(f'{req_model_type}_') + and os.path.isdir(os.path.join(output_dir, d)) + ] + if not matching: + logger.warning( + f"[{model_type}] No '{req_model_type}_*' directories found in {output_dir}\n" + f" → Run inference first: python swap_analysis.py --model_type {req_model_type}" + ) + ok = False + continue + any_data = False + for d in matching: + json_dir = os.path.join(output_dir, d, 'json') + if os.path.isdir(json_dir) and any( + f.startswith('pred_stats_') for f in os.listdir(json_dir) + ): + scale = d[len(req_model_type) + 1:] # strip "{req_model_type}_" prefix + logger.info(f" [{req_model_type}/{scale}] found data in {d}/") + any_data = True + if not any_data: + logger.warning( + f"[{model_type}] No pred_stats JSON found in any '{req_model_type}_*' directory.\n" + f" → Inference may not have completed for '{req_model_type}'." + ) + ok = False + return ok + + +def _load_scale_data_multi(output_dir: str, model_type: str, scale: str, scale_sources: dict): + """Load per-scale data for one scale, looking in the correct source directory. + + With the new per-scale layout, data lives at {output_dir}/{src_model_type}_{scale}/. + Returns (sc, sc_bc, wc, wc_bc, align, pred_stat, cat_validity, dh, dh_bc). + Any unavailable item is None / {}. + """ + src_model_type = scale_sources.get(scale, model_type) + src_dir = os.path.join(output_dir, get_model_key(src_model_type, scale)) + + sc = load_scale_consistency(src_dir, scale, 'all_pairs') + sc_bc = load_scale_consistency(src_dir, scale, 'both_correct') + wc = load_within_cat_consistency(src_dir, scale, 'all_pairs') + wc_bc = load_within_cat_consistency(src_dir, scale, 'both_correct') + align = load_scale_alignment(src_dir, scale) + + pred_stat = None + pred_path = os.path.join(src_dir, 'json', f'pred_stats_{scale}.json') + if os.path.exists(pred_path): + with open(pred_path) as f: + pred_stat = json.load(f) + + cat_validity = None + cv_path = os.path.join(src_dir, 'json', f'category_validity_{scale}.json') + if os.path.exists(cv_path): + with open(cv_path) as f: + cat_validity = json.load(f) + + dh = load_delta_heatmaps(src_dir, scale, 'all_pairs') + dh_bc = load_delta_heatmaps(src_dir, scale, 'both_correct') + + return sc, sc_bc, wc, wc_bc, align, pred_stat, cat_validity, dh, dh_bc + + +# --------------------------------------------------------------------------- +# All-layer heatmap + PCA helpers (called from run_merge / run_merge_extended) +# --------------------------------------------------------------------------- + +def _get_csv_layers(csv_dir: str, scale: str, tag: str) -> list: + """Return sorted list of layer indices that have a delta_similarity CSV.""" + import glob as _glob + pattern = os.path.join(csv_dir, f'delta_similarity_{scale}_L*_{tag}.csv') + layers = [] + for fpath in _glob.glob(pattern): + m = re.search( + rf'delta_similarity_{re.escape(scale)}_L(\d+)_{re.escape(tag)}\.csv$', + os.path.basename(fpath)) + if m: + layers.append(int(m.group(1))) + return sorted(layers) + + +def run_all_layer_heatmaps(model_dir: str, model_type: str, scales: list): + """Generate delta-similarity heatmaps for ALL layers from pre-computed CSVs. + + Reads {model_dir}/csv/delta_similarity_{scale}_L{n}_{tag}.csv + Writes {model_dir}/plots/all/heatmap/heatmap_{scale}_L{n}.png (all_pairs) + {model_dir}/plots/both_correct/heatmap/heatmap_{scale}_L{n}.png (both_correct) + + Skips a scale if the NPZ is missing or any all_pairs CSV is absent + (indicates inference was not fully completed for that scale). + """ + TAG_TO_DIR = { + 'all_pairs': os.path.join(model_dir, 'plots', 'all', 'heatmap'), + 'both_correct': os.path.join(model_dir, 'plots', 'both_correct', 'heatmap'), + } + + for scale in scales: + npz_path = os.path.join(model_dir, 'npz', f'vectors_{scale}.npz') + csv_dir = os.path.join(model_dir, 'csv') + + if not os.path.exists(npz_path): + logger.warning(f' [{model_type}/{scale}] NPZ not found, skipping heatmaps.') + continue + + data = np.load(npz_path, allow_pickle=True) + npz_layers = sorted( + int(k.replace('orig_L', '')) + for k in data.files if k.startswith('orig_L') + ) + data.close() + + if not npz_layers: + logger.warning(f' [{model_type}/{scale}] No orig_L* keys in NPZ, skipping heatmaps.') + continue + + csv_layers = _get_csv_layers(csv_dir, scale, 'all_pairs') + missing = set(npz_layers) - set(csv_layers) + if missing: + logger.warning( + f' [{model_type}/{scale}] {len(missing)} NPZ layers lack CSVs ' + f'(e.g. L{sorted(missing)[:5]}). Skipping all-layer heatmaps.') + continue + + for out_dir in TAG_TO_DIR.values(): + os.makedirs(out_dir, exist_ok=True) + + logger.info(f' [{model_type}/{scale}] Generating heatmaps for {len(npz_layers)} layers...') + saved = 0 + for layer in npz_layers: + for tag, out_dir in TAG_TO_DIR.items(): + csv_path = os.path.join(csv_dir, f'delta_similarity_{scale}_L{layer}_{tag}.csv') + if not os.path.exists(csv_path): + continue # both_correct CSV may be absent for some layers + df = pd.read_csv(csv_path, index_col=0) + available = [c for c in CATEGORY_ORDER if c in df.index] + if not available: + continue + df = df.loc[available, available] + title = ( + f'{model_type.upper()} ({scale}) \u2014 Delta Heatmap L{layer} ' + f'({"both-correct" if tag == "both_correct" else "all pairs"})' + ) + out_path = os.path.join(out_dir, f'heatmap_{scale}_L{layer}.png') + plot_delta_heatmap(df, title, out_path) + saved += 1 + logger.info(f' [{model_type}/{scale}] Saved {saved} heatmaps') + + +def run_all_layer_pca(model_dir: str, model_type: str, scales: list): + """Generate 2D and 3D PCA plots for ALL layers from saved NPZ files. + + Writes {model_dir}/plots/all/pca/pca_{scale}_L{n}.png (all pairs) + {model_dir}/plots/all/pca_3d/pca_{scale}_L{n}.png + {model_dir}/plots/both_correct/pca/pca_{scale}_L{n}.png (both-correct only) + {model_dir}/plots/both_correct/pca_3d/pca_{scale}_L{n}.png + """ + for scale in scales: + npz_path = os.path.join(model_dir, 'npz', f'vectors_{scale}.npz') + if not os.path.exists(npz_path): + logger.warning(f' [{model_type}/{scale}] NPZ not found, skipping PCA.') + continue + + # All-pairs PCA + pca_2d_dir = os.path.join(model_dir, 'plots', 'all', 'pca') + pca_3d_dir = os.path.join(model_dir, 'plots', 'all', 'pca_3d') + os.makedirs(pca_2d_dir, exist_ok=True) + os.makedirs(pca_3d_dir, exist_ok=True) + logger.info(f' [{model_type}/{scale}] Generating all-layer 2D PCA...') + plot_pca_embeddings(npz_path, scale, model_type, pca_2d_dir) + logger.info(f' [{model_type}/{scale}] Generating all-layer 3D PCA...') + plot_pca_3d(npz_path, scale, model_type, pca_3d_dir) + + # Both-correct PCA + bc_pca_2d_dir = os.path.join(model_dir, 'plots', 'both_correct', 'pca') + bc_pca_3d_dir = os.path.join(model_dir, 'plots', 'both_correct', 'pca_3d') + os.makedirs(bc_pca_2d_dir, exist_ok=True) + os.makedirs(bc_pca_3d_dir, exist_ok=True) + logger.info(f' [{model_type}/{scale}] Generating both-correct 2D PCA...') + plot_pca_embeddings(npz_path, scale, model_type, bc_pca_2d_dir, bc_only=True) + logger.info(f' [{model_type}/{scale}] Generating both-correct 3D PCA...') + plot_pca_3d(npz_path, scale, model_type, bc_pca_3d_dir, bc_only=True) + + +def run_merge(args): + # Per-scale data lives in saved_data/{model_type}_{scale}/ + def _scale_dir(scale): + return os.path.join(args.output_dir, get_model_key(args.model_type, scale)) + + # Cross-scale (compare) output: {question_type}/compare/{group_name}/ + group_name = args.group_name or args.model_type + if args.merge_output_dir: + merge_out = args.merge_output_dir + else: + qt_root = os.path.dirname(args.output_dir.rstrip('/')) # one level up from saved_data/ + merge_out = os.path.join(qt_root, 'compare', group_name) + plots_dir = os.path.join(merge_out, 'plots') + os.makedirs(plots_dir, exist_ok=True) + + scale_order = ['vanilla', '80k', '400k', '800k', '2m', 'roborefer', + 'roborefer_depth', '10pct', '20pct', '30pct'] + available_scales = [s for s in scale_order if s in args.scales] + + # Load per-scale results + all_sign_corrected = {} + all_sign_corrected_bc = {} + all_within_cat = {} + all_within_cat_bc = {} + all_alignment = {} + all_pred_stats = [] + all_cat_validity = {} + all_delta_heatmaps = {} + all_delta_heatmaps_bc = {} + + for scale in available_scales: + sd = _scale_dir(scale) + sc = load_scale_consistency(sd, scale, 'all_pairs') + if sc: + all_sign_corrected[scale] = sc + sc_bc = load_scale_consistency(sd, scale, 'both_correct') + if sc_bc: + all_sign_corrected_bc[scale] = sc_bc + wc = load_within_cat_consistency(sd, scale, 'all_pairs') + if wc: + all_within_cat[scale] = wc + wc_bc = load_within_cat_consistency(sd, scale, 'both_correct') + if wc_bc: + all_within_cat_bc[scale] = wc_bc + align = load_scale_alignment(sd, scale) + if align: + all_alignment[scale] = align + pred_path = os.path.join(sd, 'json', f'pred_stats_{scale}.json') + if os.path.exists(pred_path): + with open(pred_path) as f: + all_pred_stats.append(json.load(f)) + cv_path = os.path.join(sd, 'json', f'category_validity_{scale}.json') + if os.path.exists(cv_path): + with open(cv_path) as f: + all_cat_validity[scale] = json.load(f) + dh = load_delta_heatmaps(sd, scale, 'all_pairs') + if dh: + all_delta_heatmaps[scale] = dh + dh_bc = load_delta_heatmaps(sd, scale, 'both_correct') + if dh_bc: + all_delta_heatmaps_bc[scale] = dh_bc + + logger.info(f" Loaded data for {scale}") + + # Generate cross-scale plots into condition subdirs + for condition, sc_data, wc_data, dh_data, tag_label in [ + ('all', all_sign_corrected, all_within_cat, all_delta_heatmaps, 'all pairs'), + ('both_correct', all_sign_corrected_bc, all_within_cat_bc, all_delta_heatmaps_bc, 'both-correct'), + ]: + cond_dir = os.path.join(plots_dir, condition) + sc_dir = os.path.join(cond_dir, 'sign_corrected') + wc_dir = os.path.join(cond_dir, 'within_cat_consistency') + dt_dir = os.path.join(cond_dir, 'delta_trajectory') + os.makedirs(sc_dir, exist_ok=True) + os.makedirs(wc_dir, exist_ok=True) + os.makedirs(dt_dir, exist_ok=True) + + if len(sc_data) > 1: + plot_cross_scale_consistency( + sc_data, args.model_type, + os.path.join(sc_dir, 'cross_scale_sign_corrected.png'), + title_prefix=f'Sign-Corrected ({tag_label})') + + if len(wc_data) > 1: + plot_cross_scale_within_cat_consistency( + wc_data, args.model_type, + os.path.join(wc_dir, 'cross_scale_within_cat.png')) + + if dh_data: + plot_delta_trajectory(dh_data, args.model_type, + os.path.join(dt_dir, 'delta_trajectory.png')) + + # Cross-scale alignment + pred stats + summary (shared across conditions) + all_cond_dir = os.path.join(plots_dir, 'all') + ca_dir = os.path.join(all_cond_dir, 'cross_alignment') + pred_stats_dir = os.path.join(all_cond_dir, 'pred_stats') + summary_dir = os.path.join(all_cond_dir, 'summary') + os.makedirs(ca_dir, exist_ok=True) + os.makedirs(pred_stats_dir, exist_ok=True) + os.makedirs(summary_dir, exist_ok=True) + + if len(all_alignment) > 1: + plot_cross_scale_alignment( + all_alignment, args.model_type, + os.path.join(ca_dir, 'cross_scale_alignment.png')) + + # Prediction stats plots + if all_pred_stats: + plot_pred_stats_bars(all_pred_stats, args.model_type, + os.path.join(pred_stats_dir, 'pred_stats_bars.png')) + plot_pred_stats_trajectory(all_pred_stats, args.model_type, + os.path.join(pred_stats_dir, 'pred_stats_trajectory.png')) + + # Summary barplot + if all_sign_corrected: + plot_summary_barplot( + all_sign_corrected, all_alignment, args.model_type, + os.path.join(summary_dir, 'summary_barplot.png')) + + # Summary CSV + summary_rows = [] + for scale in available_scales: + ps = next((p for p in all_pred_stats if p.get('scale') == scale), None) + if ps is None: + continue + row = dict(ps) + if scale in all_alignment: + max_layer = max(all_alignment[scale].keys()) + row['alignment_deepest'] = all_alignment[scale][max_layer]['per_sample_mean'] + row['alignment_perm'] = all_alignment[scale][max_layer]['permutation_mean'] + summary_rows.append(row) + + if summary_rows: + csv_dir = os.path.join(merge_out, 'csv') + os.makedirs(csv_dir, exist_ok=True) + pd.DataFrame(summary_rows).to_csv(os.path.join(csv_dir, 'summary.csv'), index=False) + + # Accuracy charts (cross-scale) + if all_pred_stats: + acc_dir = os.path.join(plots_dir, 'accuracy') + logger.info("\n--- Accuracy Charts ---") + run_accuracy_charts(all_pred_stats, all_cat_validity, args.model_type, acc_dir) + + # Unify y-axis across scales for per-scale trajectory plots + logger.info("\n--- Unifying Y-axis ---") + scale_dir_map = {s: _scale_dir(s) for s in available_scales} + run_unify_ylim(scale_dir_map, plots_dir, args.model_type) + + # All-layer heatmaps + PCA are now done per-scale in process_scale(); skip here. + + logger.info(f"\n=== Merge Complete ===\nResults in: {merge_out}") + + +def run_merge_extended(args): + """Generate cross-scale plots for new / merge-only model_types. + + - Runnable types (molmo_big, qwen_big, qwen_super, big_trio): + loads all data from results/{model_type}/ and saves plots there. + - Merge-only types (molmo_all, qwen_all): + loads per-scale data from the respective source directories, + saves all cross-scale plots to results/{model_type}/. + """ + is_merge_only = args.model_type in MERGE_ONLY_CONFIGS + + # ── Determine scale order and data source strategy ──────────────────────── + if is_merge_only: + mc = MERGE_ONLY_CONFIGS[args.model_type] + scale_order = mc['scale_order'] + scale_sources = mc['scale_sources'] + + logger.info(f"\n=== MERGE-ONLY mode: {args.model_type} ===") + logger.info("Checking required source directories...") + sources_ok = _check_merge_only_sources(args.output_dir, args.model_type) + if not sources_ok: + logger.warning( + f"\n[WARNING] One or more source directories are missing or incomplete.\n" + f" Cross-scale plots for '{args.model_type}' may be partial.\n" + f" Run the missing model types first (see warnings above), then retry merge." + ) + else: + scale_order = SCALE_ORDERS_NEW.get( + args.model_type, list(MODEL_CONFIGS_NEW[args.model_type])) + scale_sources = None # all data lives in results/{model_type}/ + + available_scales = [s for s in scale_order if s in args.scales] + logger.info(f"Merging scales (in order): {available_scales}") + + # ── Determine output dir (compare/{group_name}/) ────────────────────────── + group_name = args.group_name or args.model_type + if args.merge_output_dir: + merge_out = args.merge_output_dir + else: + qt_root = os.path.dirname(args.output_dir.rstrip('/')) + merge_out = os.path.join(qt_root, 'compare', group_name) + plots_dir = os.path.join(merge_out, 'plots') + os.makedirs(plots_dir, exist_ok=True) + + # ── Per-scale data directory resolver ───────────────────────────────────── + def _scale_dir(scale): + if is_merge_only: + src_model_type = scale_sources[scale] + else: + src_model_type = args.model_type + return os.path.join(args.output_dir, get_model_key(src_model_type, scale)) + + # ── Load per-scale data ─────────────────────────────────────────────────── + all_sign_corrected = {} + all_sign_corrected_bc = {} + all_within_cat = {} + all_within_cat_bc = {} + all_alignment = {} + all_pred_stats = [] + all_cat_validity = {} + all_delta_heatmaps = {} + all_delta_heatmaps_bc = {} + + for scale in available_scales: + sd = _scale_dir(scale) + sc = load_scale_consistency(sd, scale, 'all_pairs') + sc_bc = load_scale_consistency(sd, scale, 'both_correct') + wc = load_within_cat_consistency(sd, scale, 'all_pairs') + wc_bc = load_within_cat_consistency(sd, scale, 'both_correct') + align = load_scale_alignment(sd, scale) + + pred_stat = None + pred_path = os.path.join(sd, 'json', f'pred_stats_{scale}.json') + if os.path.exists(pred_path): + with open(pred_path) as f: + pred_stat = json.load(f) + + cat_validity = None + cv_path = os.path.join(sd, 'json', f'category_validity_{scale}.json') + if os.path.exists(cv_path): + with open(cv_path) as f: + cat_validity = json.load(f) + + dh = load_delta_heatmaps(sd, scale, 'all_pairs') + dh_bc = load_delta_heatmaps(sd, scale, 'both_correct') + + if sc: + all_sign_corrected[scale] = sc + if sc_bc: + all_sign_corrected_bc[scale] = sc_bc + if wc: + all_within_cat[scale] = wc + if wc_bc: + all_within_cat_bc[scale] = wc_bc + if align: + all_alignment[scale] = align + if pred_stat is not None: + all_pred_stats.append(pred_stat) + if cat_validity is not None: + all_cat_validity[scale] = cat_validity + if dh: + all_delta_heatmaps[scale] = dh + if dh_bc: + all_delta_heatmaps_bc[scale] = dh_bc + + logger.info(f" Loaded data for '{scale}'" + + (f" (from '{scale_sources[scale]}')" if is_merge_only else "")) + + # ── Auto-detect Phase B data ────────────────────────────────────────────── + has_phase_b = all(_has_phase_b_data(_scale_dir(s), s) for s in available_scales) + if has_phase_b: + logger.info(" [Phase B] Cross-alignment data found for all scales → will include cross-alignment plots") + else: + missing_b = [s for s in available_scales if not _has_phase_b_data(_scale_dir(s), s)] + logger.info(f" [Phase B] Cross-alignment data missing for: {missing_b} → skipping cross-alignment plots") + + # ── Summary CSV (Phase 1 data — always saved) ───────────────────────────── + summary_rows = [] + for scale in available_scales: + ps = next((p for p in all_pred_stats if p.get('scale') == scale), None) + if ps is None: + continue + row = dict(ps) + if scale in all_alignment: + max_layer = max(all_alignment[scale].keys()) + row['alignment_deepest'] = all_alignment[scale][max_layer]['per_sample_mean'] + row['alignment_perm'] = all_alignment[scale][max_layer]['permutation_mean'] + summary_rows.append(row) + if summary_rows: + csv_dir = os.path.join(merge_out, 'csv') + os.makedirs(csv_dir, exist_ok=True) + pd.DataFrame(summary_rows).to_csv(os.path.join(csv_dir, 'summary.csv'), index=False) + + # ── Cross-scale plots (Phase 2 — skipped when --phase1-only) ───────────── + if args.phase1_only: + logger.info("\n--- Cross-scale plots [SKIPPED: --phase1-only] ---") + else: + for condition, sc_data, wc_data, dh_data, tag_label in [ + ('all', all_sign_corrected, all_within_cat, all_delta_heatmaps, 'all pairs'), + ('both_correct', all_sign_corrected_bc, all_within_cat_bc, all_delta_heatmaps_bc, 'both-correct'), + ]: + cond_dir = os.path.join(plots_dir, condition) + sc_dir = os.path.join(cond_dir, 'sign_corrected') + wc_dir = os.path.join(cond_dir, 'within_cat_consistency') + dt_dir = os.path.join(cond_dir, 'delta_trajectory') + os.makedirs(sc_dir, exist_ok=True) + os.makedirs(wc_dir, exist_ok=True) + os.makedirs(dt_dir, exist_ok=True) + + if len(sc_data) > 1: + plot_cross_scale_consistency( + sc_data, args.model_type, + os.path.join(sc_dir, 'cross_scale_sign_corrected.png'), + title_prefix=f'Sign-Corrected ({tag_label})') + + if len(wc_data) > 1: + plot_cross_scale_within_cat_consistency( + wc_data, args.model_type, + os.path.join(wc_dir, 'cross_scale_within_cat.png')) + + if dh_data: + plot_delta_trajectory( + dh_data, args.model_type, + os.path.join(dt_dir, 'delta_trajectory.png')) + + # ── Alignment and prediction stats ──────────────────────────────────── + all_cond_dir = os.path.join(plots_dir, 'all') + pred_stats_dir = os.path.join(all_cond_dir, 'pred_stats') + summary_dir = os.path.join(all_cond_dir, 'summary') + os.makedirs(pred_stats_dir, exist_ok=True) + os.makedirs(summary_dir, exist_ok=True) + + if has_phase_b and len(all_alignment) > 1: + ca_dir = os.path.join(all_cond_dir, 'cross_alignment') + os.makedirs(ca_dir, exist_ok=True) + plot_cross_scale_alignment( + all_alignment, args.model_type, + os.path.join(ca_dir, 'cross_scale_alignment.png')) + + if all_pred_stats: + plot_pred_stats_bars( + all_pred_stats, args.model_type, + os.path.join(pred_stats_dir, 'pred_stats_bars.png')) + plot_pred_stats_trajectory( + all_pred_stats, args.model_type, + os.path.join(pred_stats_dir, 'pred_stats_trajectory.png')) + + if all_sign_corrected: + plot_summary_barplot( + all_sign_corrected, all_alignment, args.model_type, + os.path.join(summary_dir, 'summary_barplot.png')) + + # ── Accuracy charts ─────────────────────────────────────────────────── + if all_pred_stats: + acc_dir = os.path.join(plots_dir, 'accuracy') + logger.info("\n--- Accuracy Charts ---") + run_accuracy_charts(all_pred_stats, all_cat_validity, args.model_type, acc_dir) + + # ── Unify y-axis ────────────────────────────────────────────────────── + logger.info("\n--- Unifying Y-axis ---") + scale_dir_map = {s: _scale_dir(s) for s in available_scales} + run_unify_ylim(scale_dir_map, plots_dir, args.model_type) + + # All-layer heatmaps + PCA are now done per-scale in process_scale(); skip here. + + logger.info(f"\n=== Merge Complete ===\nResults saved to: {merge_out}") + + +def main(): + # Default scales per legacy model_type (new types use their own defaults) + _LEGACY_DEFAULT_SCALES = { + 'molmo': ['vanilla', '80k', '400k', '800k', '2m'], + 'nvila': ['vanilla', '80k', '400k', '800k', '2m'], + 'qwen': ['vanilla', '80k', '400k', '800k', '2m'], + 'nvila_synthetic': ['80k-5pct', '80k-10pct', '80k-20pct', '80k-30pct', '400k-5pct'], + 'nvila_st': ['80k-st', '400k-st', '800k-st'], + } + + parser = argparse.ArgumentParser( + description='Swap Analysis — Spatial Representation Probing', + formatter_class=argparse.RawDescriptionHelpFormatter, + ) + parser.add_argument('--data_path', type=str, + default='/data/shared/Qwen/EmbSpatial-Bench/EmbSpatial-Bench.tsv') + parser.add_argument('--model_type', type=str, required=True, + choices=ALL_MODEL_TYPES, + help=( + 'Legacy: molmo | nvila | qwen\n' + 'Synthetic: nvila_synthetic\n' + 'New large: molmo_big | qwen_big | qwen_super | big_trio\n' + 'Merge-only (--merge required): molmo_all | qwen_all' + )) + parser.add_argument('--scales', type=str, nargs='+', default=None, + help='Scales to process (default: all for the given model_type).') + parser.add_argument('--output_dir', type=str, default=None, + help='Root for saved_data/. Defaults to ' + '{script_dir}/{question_type}/saved_data.') + parser.add_argument('--device', type=str, default='cuda') + parser.add_argument('--seed', type=int, default=42) + parser.add_argument('--merge', action='store_true', + help='Merge mode: generate cross-scale plots from saved per-scale data.') + parser.add_argument('--merge-output-dir', type=str, default=None, dest='merge_output_dir', + help='(Deprecated) Override output dir for cross-scale plots. ' + 'Use --group-name instead.') + parser.add_argument('--group-name', type=str, default=None, dest='group_name', + help='Folder name under compare/ for merged output. ' + 'Defaults to model_type.') + parser.add_argument('--no-auto-roborefer', action='store_true', dest='no_auto_roborefer', + help='Disable automatic inclusion of roborefer scale for nvila.') + parser.add_argument('--skip-cross-group', action='store_true') + parser.add_argument('--max-samples-per-category', type=int, default=200, + dest='max_samples_per_category') + parser.add_argument('--no-filtering', action='store_true', dest='no_filtering', + help='Disable Unknown/empty filtering for far/close reference objects.' + ' By default, Unknown candidates are removed before sampling.') + parser.add_argument('--question-type', type=str, default='short_answer', + choices=['short_answer', 'mcq'], dest='question_type', + help='short_answer (default): "Answer with only one word." format; ' + 'mcq: MCQ A/B format with letter answers.') + parser.add_argument('--phase1-only', action='store_true', dest='phase1_only', + help='Skip all plot generation (per-scale and cross-scale). ' + 'Data (npz/csv/json) is still saved.') + parser.add_argument('--skip-phase-b', action='store_true', dest='skip_phase_b', + help='Skip Phase B (cross-group feature extraction). ' + 'Phase A inference + analysis + plots still run normally. ' + 'Merge auto-detects whether Phase B data is available.') + + args = parser.parse_args() + + # ── Compute output_dir and log_dir from question_type ──────────────────── + _HERE_UPDATED = os.path.dirname(os.path.abspath(__file__)) + if args.output_dir is None: + args.output_dir = os.path.join(_HERE_UPDATED, args.question_type, 'saved_data') + log_dir = os.path.join(_HERE_UPDATED, args.question_type, 'logs') + + # ── Validate: merge-only types require --merge ──────────────────────────── + if args.model_type in MERGE_ONLY_CONFIGS and not args.merge: + parser.error( + f"'{args.model_type}' is a merge-only type. Add --merge to run it.\n" + f" Example: python swap_analysis.py --model_type {args.model_type} --merge" + ) + + # ── Default scales ──────────────────────────────────────────────────────── + if args.scales is None: + if args.model_type in MERGE_ONLY_CONFIGS: + args.scales = MERGE_ONLY_CONFIGS[args.model_type]['scale_order'] + elif args.model_type in MODEL_CONFIGS_NEW: + args.scales = list(MODEL_CONFIGS_NEW[args.model_type].keys()) + else: + args.scales = _LEGACY_DEFAULT_SCALES.get( + args.model_type, ['vanilla', '80k', '400k', '800k', '2m']) + + # Legacy nvila: auto-include roborefer + if args.model_type == 'nvila' and 'roborefer' not in args.scales and not args.no_auto_roborefer: + args.scales.append('roborefer') + + np.random.seed(args.seed) + torch.manual_seed(args.seed) + random.seed(args.seed) + + # ── Merge mode ─────────────────────────────────────────────────────────── + if args.merge: + group_name = args.group_name or args.model_type + log_path = _setup_file_logging(group_name, log_dir) + logger.info(f"Logging to: {log_path}") + logger.info("\n=== MERGE MODE ===") + if args.model_type in MODEL_CONFIGS_NEW or args.model_type in MERGE_ONLY_CONFIGS: + run_merge_extended(args) + else: + run_merge(args) + return + + # ── Inference mode ──────────────────────────────────────────────────────── + logger.info("\n=== Loading & Creating Swap Pairs ===") + swap_pairs = load_swap_pairs(args.data_path, args.seed, + filter_unknown=not args.no_filtering, + question_type=args.question_type) + + quads = [] + if not args.skip_cross_group and not getattr(args, 'skip_phase_b', False): + try: + hf_cache = build_hf_bbox_cache() + quads = create_cross_group_quads(swap_pairs, hf_cache, + question_type=args.question_type) + except Exception as e: + logger.warning(f"Cross-group setup failed: {e}. Skipping.") + quads = [] + + # ── Resolve config for the chosen model_type ───────────────────────────── + if args.model_type in MODEL_CONFIGS_NEW: + model_configs = MODEL_CONFIGS_NEW[args.model_type] + else: + model_configs = MODEL_CONFIGS[args.model_type] + + for scale in args.scales: + if scale not in model_configs: + logger.warning(f"Scale '{scale}' not in config for '{args.model_type}', skipping.") + continue + + # Per-scale log file + vlm_key = get_model_key(args.model_type, scale) + log_path = _setup_file_logging(vlm_key, log_dir) + logger.info(f"Logging to: {log_path}") + + # Validate model path exists (skip HF IDs that start with org/ prefix) + if args.model_type in MODEL_CONFIGS_NEW: + _, raw_path = model_configs[scale] + else: + raw_path = model_configs[scale] + if not os.path.isabs(raw_path) and not raw_path.startswith(('Qwen/', 'allenai/')): + if not os.path.exists(raw_path): + logger.warning(f"Model path not found: {raw_path} (scale='{scale}'), skipping.") + continue + + try: + process_scale(args, scale, swap_pairs, quads) + except Exception as e: + logger.error(f"Failed {args.model_type} - {scale}: {e}") + import traceback + traceback.print_exc() + continue + + logger.info(f"\n{'='*60}") + logger.info("=== All scales complete ===") + logger.info(f"Results: {args.output_dir}") + logger.info(f"{'='*60}") + + +if __name__ == '__main__': + main() \ No newline at end of file