| |
| """ |
| Swap Analysis: Minimal Pair Probing for Spatial Representations |
| |
| Creates minimal pairs by swapping obj1<->obj2 in spatial questions: |
| Original: "Is A to the left or right of B?" -> left |
| Swapped: "Is B to the left or right of A?" -> right |
| |
| Supported model types |
| --------------------- |
| Legacy (Qwen2.5-VL-3B scale experiments): |
| molmo | nvila | qwen |
| Synthetic-MCQ-trained NVILA: |
| nvila_st : NVILA trained with MCQ synthetic mix (80k-st / 400k-st / 800k-st) |
| New large models: |
| molmo_big : Molmo2-8B |
| qwen_big : Qwen3-VL-32B-Instruct |
| qwen_super : Qwen3-VL-235B-A22B-Instruct |
| big_trio : Molmo2-8B + RoboRefer + Qwen3-VL-32B |
| Merge-only (--merge required): |
| molmo_all : molmo (vanillaβ2m) + molmo_big (molmo2) |
| qwen_all : qwen (vanillaβ2m) + qwen_big (qwen3_32b) |
| nvila_st_compare : nvila (vanillaβ2m) + nvila_st (80k-st / 400k-st / 800k-st) |
| |
| Usage examples |
| -------------- |
| # Legacy model (Qwen2.5-VL-3B scale) |
| python swap_analysis.py --model_type qwen |
| |
| # New large model (Qwen3-VL-32B) |
| conda run -n qwen3 python swap_analysis.py --model_type qwen_big |
| |
| # Cross-family merge (combine qwen + qwen_big results) |
| conda run -n qwen3 python swap_analysis.py --model_type qwen_all --merge |
| |
| Analyses: |
| 1. Difference vectors: delta = feature(swapped) - feature(original) |
| 2. Within-category delta consistency (do all left->right swaps point same direction?) |
| 3. Sign-corrected group consistency (align opposite categories by flipping) |
| 4. Cross-group delta alignment (delta_vertical vs delta_distance) for perspective bias |
| 5. Delta-based 6x6 similarity heatmap (mean delta per category as representation) |
| 6. Prediction stats visualization (bar chart + cross-scale trajectory) |
| 7. Both-correct filtering for delta analysis |
| 8. PCA visualization of per-sample embeddings |
| 9. Scaling effects on all of the above |
| |
| Fixes applied: |
| Fix 1: "Answer with only one word." appended to all prompts |
| Fix 2: Synonym handling (below/beneath->under, near/nearby->close, distant->far) |
| Fix 4: Cross-group quads index matching via string normalization |
| Fix 5: Within-category + sign-corrected delta consistency (replaces wrong group-level) |
| Fix 6: Prediction stats bar chart + cross-scale line plot |
| Fix 7: Delta-based 6x6 heatmap and trajectory |
| Fix 8: Category validity check + both-correct delta filtering |
| """ |
|
|
| import os |
| import sys |
| import json |
| import argparse |
| import base64 |
| import logging |
| import random |
| import re |
| from io import BytesIO |
| from collections import defaultdict |
| from typing import Dict, List, Tuple, Optional, Any |
| from abc import ABC, abstractmethod |
|
|
| import torch |
| import numpy as np |
| import pandas as pd |
| from PIL import Image |
| from tqdm import tqdm |
| import matplotlib |
| matplotlib.use('Agg') |
| import matplotlib.pyplot as plt |
| from mpl_toolkits.mplot3d import Axes3D |
| import seaborn as sns |
| from sklearn.metrics.pairwise import cosine_similarity |
| from sklearn.decomposition import PCA |
|
|
| logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s') |
| logger = logging.getLogger(__name__) |
|
|
| _HERE = os.path.dirname(os.path.abspath(__file__)) |
|
|
| |
|
|
| HF_HUB_DIR = '/data/shared/Qwen/mydisk/huggingface/hub' |
|
|
|
|
| def resolve_local_path(model_path: str) -> str: |
| """Return local snapshot path for a HF model ID if cached, else return the ID unchanged.""" |
| if os.path.isabs(model_path): |
| return model_path |
| cache_name = 'models--' + model_path.replace('/', '--') |
| snapshots_dir = os.path.join(HF_HUB_DIR, cache_name, 'snapshots') |
| if os.path.isdir(snapshots_dir): |
| snapshots = sorted(os.listdir(snapshots_dir)) |
| if snapshots: |
| local_path = os.path.join(snapshots_dir, snapshots[-1]) |
| logger.info(f"Local cache found: {model_path} β {local_path}") |
| return local_path |
| logger.warning( |
| f"Model not found in local cache: '{model_path}'\n" |
| f" Expected at: {snapshots_dir}\n" |
| f" Will fall back to online HuggingFace Hub download.\n" |
| f" To cache locally first: python -c \"from huggingface_hub import snapshot_download; " |
| f"snapshot_download('{model_path}', cache_dir='{HF_HUB_DIR}')\"" |
| ) |
| return model_path |
|
|
|
|
| def _setup_file_logging(name: str, log_dir: str) -> str: |
| """Attach a named FileHandler to the root logger. |
| |
| Writes to {log_dir}/{name}.log (append mode). |
| Returns the log file path. |
| """ |
| os.makedirs(log_dir, exist_ok=True) |
| log_path = os.path.join(log_dir, f'{name}.log') |
| fh = logging.FileHandler(log_path, mode='a', encoding='utf-8') |
| fh.setLevel(logging.INFO) |
| fh.setFormatter(logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')) |
| logging.getLogger().addHandler(fh) |
| return log_path |
|
|
|
|
| def get_model_key(model_type: str, scale: str) -> str: |
| """Return VLM key for (model_type, scale). E.g. nvila_80k, nvila_synthetic_80k-5pct.""" |
| return f"{model_type}_{scale}" |
|
|
|
|
| |
| |
| |
|
|
| CATEGORY_ORDER = ['left', 'right', 'above', 'below', 'far', 'close'] |
|
|
| OPPOSITE_MAP = { |
| 'left': 'right', 'right': 'left', |
| 'above': 'below', 'below': 'above', |
| 'under': 'above', |
| 'far': 'close', 'close': 'far', |
| } |
|
|
| |
| SHORT_OPPOSITE_MAP = { |
| 'left': 'right', 'right': 'left', |
| 'above': 'below', 'below': 'above', |
| 'far': 'close', 'close': 'far', |
| } |
|
|
| GROUP_MAP = { |
| 'left': 'horizontal', 'right': 'horizontal', |
| 'above': 'vertical', 'below': 'vertical', |
| 'far': 'distance', 'close': 'distance', |
| } |
|
|
| GROUP_ORDER = ['horizontal', 'vertical', 'distance'] |
|
|
| |
| CANONICAL_CATEGORIES = { |
| 'horizontal': 'left', |
| 'vertical': 'above', |
| 'distance': 'far', |
| } |
|
|
| |
| |
| SYNONYMS = { |
| 'below': ['under', 'beneath'], |
| 'close': ['near', 'nearby'], |
| 'far': ['distant'], |
| } |
|
|
| |
| _Q_TAIL_MCQ = "Answer with a single letter A or B." |
| MCQ_TEMPLATES = { |
| 'horizontal': { |
| 'left_first': "Is the {obj1} to the left or right of the {obj2}? (A) left (B) right " + _Q_TAIL_MCQ, |
| 'right_first': "Is the {obj1} to the left or right of the {obj2}? (A) right (B) left " + _Q_TAIL_MCQ, |
| }, |
| 'vertical': { |
| 'above_first': "Is the {obj1} above or below the {obj2}? (A) above (B) below " + _Q_TAIL_MCQ, |
| 'below_first': "Is the {obj1} above or below the {obj2}? (A) below (B) above " + _Q_TAIL_MCQ, |
| }, |
| 'distance': { |
| 'far_first': "Compared to {ref}, is {subj} far or close from you? (A) far (B) close " + _Q_TAIL_MCQ, |
| 'close_first': "Compared to {ref}, is {subj} far or close from you? (A) close (B) far " + _Q_TAIL_MCQ, |
| }, |
| } |
| MCQ_LETTER = { |
| 'horizontal': { |
| 'left_first': {'left': 'a', 'right': 'b'}, |
| 'right_first': {'left': 'b', 'right': 'a'}, |
| }, |
| 'vertical': { |
| 'above_first': {'above': 'a', 'below': 'b'}, |
| 'below_first': {'above': 'b', 'below': 'a'}, |
| }, |
| 'distance': { |
| 'far_first': {'far': 'a', 'close': 'b'}, |
| 'close_first': {'far': 'b', 'close': 'a'}, |
| }, |
| } |
|
|
| SCALE_COLORS = { |
| 'vanilla': '#1f77b4', '80k': '#ff7f0e', '400k': '#2ca02c', |
| '800k': '#d62728', '2m': '#9467bd', 'roborefer':'#8c564b', |
| |
| 'molmo2': '#17becf', |
| 'qwen3_32b': '#bcbd22', |
| 'qwen3_235b': '#e377c2', |
| |
| '80k-5pct': '#b2dfdb', |
| '80k-10pct': '#00b894', |
| '80k-20pct': '#00897b', |
| '80k-30pct': '#004d40', |
| |
| '400k-5pct': '#66bb6a', |
| |
| '800k-5pct': '#ef9a9a', |
| |
| '80k-st': '#b85a00', |
| '400k-st': '#1a6b1a', |
| '800k-st': '#911b1b', |
| } |
|
|
| |
| SCALE_ORDER = [ |
| 'vanilla', '80k', '80k-5pct', '80k-10pct', '80k-20pct', '80k-30pct', '80k-st', |
| '400k', '400k-5pct', '400k-st', '800k', '800k-5pct', '800k-st', '2m', 'roborefer', |
| 'molmo2', 'qwen3_32b', 'qwen3_235b', |
| ] |
|
|
| |
| SCALE_DISPLAY_NAMES = { |
| '80k-5pct': '80k 5%', |
| '80k-10pct': '80k 10%', |
| '80k-20pct': '80k 20%', |
| '80k-30pct': '80k 30%', |
| '400k-5pct': '400k 5%', |
| '800k-5pct': '800k 5%', |
| '80k-st': '80k ST', |
| '400k-st': '400k ST', |
| '800k-st': '800k ST', |
| } |
| |
| CAT_COLORS = { |
| 'left': '#ff7f0e', 'right': '#ffbb78', |
| 'above': '#2ca02c', 'below': '#98df8a', |
| 'far': '#9467bd', 'close': '#c5b0d5', |
| } |
| GROUP_COLORS = { |
| 'horizontal': '#ff7f0e', |
| 'vertical': '#2ca02c', |
| 'distance': '#9467bd', |
| } |
|
|
| |
| SHORT_TEMPLATES = { |
| 'horizontal': "Is the {obj1} to the left or right of the {obj2}? Answer with only one word.", |
| 'vertical': "Is the {obj1} above or below the {obj2}? Answer with only one word.", |
| 'distance': "Compared to {ref}, is {subj} far or close from you? Answer with only one word.", |
| } |
|
|
| MODEL_CONFIGS = { |
| 'molmo': { |
| 'vanilla': 'allenai/Molmo-7B-O-0924', |
| '80k': '/data/shared/Qwen/molmo/outputs/data_scale_exp/data_scale_exp_80k/unshared', |
| '400k': '/data/shared/Qwen/molmo/outputs/data_scale_exp/data_scale_exp_400k/unshared', |
| '800k': '/data/shared/Qwen/molmo/outputs/data_scale_exp/data_scale_exp_800k/unshared', |
| '2m': '/data/shared/Qwen/molmo/outputs/data_scale_exp/data_scale_exp_2m/unshared', |
| }, |
| 'nvila': { |
| 'vanilla': '/data/shared/Qwen/mydisk/NVILA-Lite-2B', |
| '80k': '/data/shared/Qwen/mydisk/output/DATA/NVILA-Lite-2B-DATA_SCALE_EXP_80K-20251108_180221', |
| '400k': '/data/shared/Qwen/mydisk/output/DATA/NVILA-Lite-2B-DATA_SCALE_EXP_400K-20251108_180221', |
| '800k': '/data/shared/Qwen/mydisk/output/DATA/NVILA-Lite-2B-DATA_SCALE_EXP_800K-20251108_180221', |
| '2m': '/data/shared/Qwen/mydisk/output/DATA/NVILA-Lite-2B-DATA_SCALE_EXP_2M-20260205_003632', |
| |
| |
| |
| |
| 'roborefer': '/data/shared/Qwen/mydisk/RoboRefer_model', |
| 'roborefer_depth': '/data/shared/Qwen/mydisk/RoboRefer_depth_model', |
| }, |
| 'qwen': { |
| 'vanilla': 'Qwen/Qwen2.5-VL-3B-Instruct', |
| '80k': '/data/shared/Qwen/mydisk/output/Qwen/data_scale_exp/Qwen2.5-VL-3B-Instruct-data_scale_exp_80k-20251114_120221', |
| '400k': '/data/shared/Qwen/mydisk/output/Qwen/data_scale_exp/Qwen2.5-VL-3B-Instruct-data_scale_exp_400k-20251114_120221', |
| '800k': '/data/shared/Qwen/mydisk/output/Qwen/data_scale_exp/Qwen2.5-VL-3B-Instruct-data_scale_exp_800k-20251114_120221', |
| '2m': '/data/shared/Qwen/mydisk/output/Qwen/data_scale_exp/Qwen2.5-VL-3B-Instruct-data_scale_exp_2m-20260109_120517', |
| }, |
| |
| 'nvila_st': { |
| '80k-st': '/data/shared/Qwen/mydisk/output/SYNTHETIC/NVILA-Lite-2B-SYNTHETIC_MIX_MCQ_5PCT_2M-20260302_030354/checkpoint-1250', |
| '400k-st': '/data/shared/Qwen/mydisk/output/SYNTHETIC/NVILA-Lite-2B-SYNTHETIC_MIX_MCQ_5PCT_2M-20260302_030354/checkpoint-6250', |
| '800k-st': '/data/shared/Qwen/mydisk/output/SYNTHETIC/NVILA-Lite-2B-SYNTHETIC_MIX_MCQ_5PCT_2M-20260302_030354/checkpoint-12500', |
| }, |
| |
| 'nvila_synthetic': { |
| '80k-5pct': '/data/shared/Qwen/mydisk/output/SYNTHETIC/NVILA-Lite-2B-SYNTHETIC_MIX_5PCT_2M-20260226_023301/checkpoint-1250', |
| '80k-10pct': '/data/shared/Qwen/mydisk/output/SYNTHETIC/NVILA-Lite-2B-SYNTHETIC_MIX_10PCT_80K-20260224_234537', |
| '80k-20pct': '/data/shared/Qwen/mydisk/output/SYNTHETIC/NVILA-Lite-2B-SYNTHETIC_MIX_20PCT_80K-20260224_232347', |
| '80k-30pct': '/data/shared/Qwen/mydisk/output/SYNTHETIC/NVILA-Lite-2B-SYNTHETIC_MIX_30PCT_80K-20260224_232347', |
| '400k-5pct': '/data/shared/Qwen/mydisk/output/SYNTHETIC/NVILA-Lite-2B-SYNTHETIC_MIX_5PCT_2M-20260226_023301/checkpoint-6250', |
| '800k-5pct': '/data/shared/Qwen/mydisk/output/SYNTHETIC/NVILA-Lite-2B-SYNTHETIC_MIX_5PCT_2M-20260226_023301/checkpoint-12500' |
| }, |
| } |
|
|
| |
| |
| |
| MODEL_CONFIGS_NEW = { |
| 'molmo_big': { |
| 'molmo2': ('Molmo2Extractor', 'allenai/Molmo2-8B'), |
| }, |
| 'qwen_big': { |
| 'qwen3_32b': ('Qwen3VLExtractor', 'Qwen/Qwen3-VL-32B-Instruct'), |
| }, |
| 'qwen_super': { |
| 'qwen3_235b': ('Qwen3VLExtractor', 'Qwen/Qwen3-VL-235B-A22B-Instruct'), |
| }, |
| 'big_trio': { |
| 'molmo2': ('Molmo2Extractor', 'allenai/Molmo2-8B'), |
| 'roborefer': ('RoboReferExtractor', '/data/shared/Qwen/mydisk/RoboRefer_model'), |
| 'qwen3_32b': ('Qwen3VLExtractor', 'Qwen/Qwen3-VL-32B-Instruct'), |
| }, |
| } |
|
|
| |
| MERGE_ONLY_CONFIGS = { |
| 'molmo_all': { |
| 'scale_order': ['vanilla', '80k', '400k', '800k', '2m', 'molmo2'], |
| 'scale_sources': { |
| 'vanilla': 'molmo', '80k': 'molmo', '400k': 'molmo', |
| '800k': 'molmo', '2m': 'molmo', 'molmo2': 'molmo_big', |
| }, |
| 'required_dirs': ['molmo', 'molmo_big'], |
| }, |
| 'qwen_all': { |
| 'scale_order': ['vanilla', '80k', '400k', '800k', '2m', 'qwen3_32b'], |
| 'scale_sources': { |
| 'vanilla': 'qwen', '80k': 'qwen', '400k': 'qwen', |
| '800k': 'qwen', '2m': 'qwen', 'qwen3_32b': 'qwen_big', |
| }, |
| 'required_dirs': ['qwen', 'qwen_big'], |
| }, |
| |
| 'nvila_st_compare': { |
| 'scale_order': ['vanilla', '80k', '80k-st', '400k', '400k-st', '800k', '800k-st', '2m'], |
| 'scale_sources': { |
| 'vanilla': 'nvila', |
| '80k': 'nvila', '80k-st': 'nvila_st', |
| '400k': 'nvila', '400k-st': 'nvila_st', |
| '800k': 'nvila', '800k-st': 'nvila_st', |
| '2m': 'nvila', |
| }, |
| 'required_dirs': ['nvila', 'nvila_st'], |
| }, |
| |
| 'nvila_synth_compare': { |
| 'scale_order': ['vanilla', '80k', '80k-5pct', '80k-10pct', '400k', '400k-5pct', '800k', '800k-5pct'], |
| 'scale_sources': { |
| 'vanilla': 'nvila', |
| '80k': 'nvila', |
| '80k-5pct': 'nvila_synthetic', |
| '80k-10pct': 'nvila_synthetic', |
| '400k': 'nvila', |
| '400k-5pct': 'nvila_synthetic', |
| '800k': 'nvila', |
| '800k-5pct': 'nvila_synthetic' |
| }, |
| 'required_dirs': ['nvila', 'nvila_synthetic'], |
| }, |
| } |
|
|
| |
| SCALE_ORDERS_NEW = { |
| 'molmo_big': ['molmo2'], |
| 'qwen_big': ['qwen3_32b'], |
| 'qwen_super': ['qwen3_235b'], |
| 'big_trio': ['molmo2', 'roborefer', 'qwen3_32b'], |
| } |
|
|
| ALL_MODEL_TYPES = ( |
| list(MODEL_CONFIGS.keys()) + |
| list(MODEL_CONFIGS_NEW.keys()) + |
| list(MERGE_ONLY_CONFIGS.keys()) |
| ) |
|
|
|
|
| |
| |
| |
|
|
| OBJECT_PATTERNS = [ |
| re.compile(r'between\s+(.+?)\s+and\s+(.+?)\s+in', re.IGNORECASE), |
| re.compile(r'of\s+(.+?)\s+and\s+(.+?)\s+in', re.IGNORECASE), |
| re.compile(r'positions\s+of\s+(.+?)\s+and\s+(.+?)\s+interact', re.IGNORECASE), |
| re.compile(r'How\s+are\s+(.+?)\s+and\s+(.+?)\s+positioned', re.IGNORECASE), |
| re.compile(r'arrangement\s+of\s+(.+?)\s+and\s+(.+?)\s+in', re.IGNORECASE), |
| ] |
|
|
|
|
| def extract_objects(question: str) -> Tuple[str, str]: |
| for pattern in OBJECT_PATTERNS: |
| m = pattern.search(question) |
| if m: |
| return m.group(1).strip(), m.group(2).strip() |
| raise ValueError(f"Could not extract objects from: {question}") |
|
|
|
|
| def decode_base64_image(base64_str: str) -> Image.Image: |
| image_data = base64.b64decode(base64_str) |
| return Image.open(BytesIO(image_data)).convert('RGB') |
|
|
|
|
| |
| |
| |
|
|
| def find_earliest_position(text: str, word: str) -> int: |
| """Find earliest position of word or any of its synonyms in text.""" |
| positions = [] |
| pos = text.find(word) |
| if pos != -1: |
| positions.append(pos) |
| for syn in SYNONYMS.get(word, []): |
| pos = text.find(syn) |
| if pos != -1: |
| positions.append(pos) |
| return min(positions) if positions else -1 |
|
|
|
|
| def check_answer(generated_text: str, expected_category: str, mcq_map: dict = None) -> bool: |
| if not generated_text or not generated_text.strip(): |
| return False |
| text = generated_text.strip().lower() |
| expected = expected_category.lower() |
| opposite = OPPOSITE_MAP[expected] |
|
|
| if mcq_map: |
| exp_letter = mcq_map.get(expected) |
| opp_letter = mcq_map.get(opposite) |
| |
| if exp_letter and text in (exp_letter, exp_letter+'.', exp_letter+')', exp_letter+','): |
| return True |
| if opp_letter and text in (opp_letter, opp_letter+'.', opp_letter+')', opp_letter+','): |
| return False |
| else: |
| exp_letter = opp_letter = None |
|
|
| |
| mcq_exp = f'({exp_letter})' if exp_letter else None |
| mcq_opp = f'({opp_letter})' if opp_letter else None |
|
|
| def earliest_with_mcq(word, mcq_pat=None): |
| positions = [] |
| pos = text.find(word) |
| if pos != -1: |
| positions.append(pos) |
| for syn in SYNONYMS.get(word, []): |
| pos = text.find(syn) |
| if pos != -1: |
| positions.append(pos) |
| if mcq_pat: |
| pos = text.find(mcq_pat) |
| if pos != -1: |
| positions.append(pos) |
| return min(positions) if positions else -1 |
|
|
| pos_exp = earliest_with_mcq(expected, mcq_exp) |
| pos_opp = earliest_with_mcq(opposite, mcq_opp) |
| if pos_exp == -1: |
| return False |
| if pos_opp == -1: |
| return True |
| return pos_exp < pos_opp |
|
|
|
|
| |
| |
| |
|
|
| def load_swap_pairs(tsv_path: str, seed: int = 42, filter_unknown: bool = True, |
| question_type: str = 'mcq') -> List[dict]: |
| """Load EmbSpatialBench TSV and create swap pairs for all samples. |
| |
| Args: |
| filter_unknown: If True (default), skip far/close pairs where target_object |
| is Unknown/empty, and remove Unknown/empty values from reference_object |
| candidates before sampling. Pairs with no valid candidates are dropped. |
| Use --no-filtering to disable. |
| question_type: 'short_answer' (default) uses the original "Answer with only one word." format; |
| 'mcq' uses MCQ A/B templates with letter answers. |
| """ |
| rng = random.Random(seed) |
| df = pd.read_csv(tsv_path, sep='\t') |
|
|
| pairs = [] |
| stats = defaultdict(lambda: {'total': 0, 'success': 0}) |
|
|
| def _valid_obj(v): |
| return bool(v) and str(v).strip().lower() not in ('unknown', 'n/a', '') |
|
|
| for _, row in df.iterrows(): |
| category = row['category'] |
| stats[category]['total'] += 1 |
|
|
| try: |
| if category in ['left', 'right', 'above', 'under', 'below']: |
| obj1, obj2 = extract_objects(row['question']) |
| if category in ['left', 'right']: |
| grp = 'horizontal' |
| else: |
| grp = 'vertical' |
|
|
| if question_type == 'short_answer': |
| |
| if category == 'under': |
| category = 'below' |
| tmpl = SHORT_TEMPLATES[grp] |
| pair = { |
| 'index': row['index'], |
| 'question_id': str(row['question_id']), |
| 'image_base64': row['image'], |
| 'original_question': tmpl.format(obj1=obj1, obj2=obj2), |
| 'swapped_question': tmpl.format(obj1=obj2, obj2=obj1), |
| 'original_answer': category, |
| 'swapped_answer': SHORT_OPPOSITE_MAP[category], |
| 'group': grp, |
| 'category': category, |
| 'obj1': obj1, 'obj2': obj2, |
| 'mcq_map': None, |
| } |
| else: |
| |
| if category == 'under': |
| category = 'below' |
| variant = ('left_first' if grp == 'horizontal' else 'above_first') \ |
| if len(pairs) % 2 == 0 else \ |
| ('right_first' if grp == 'horizontal' else 'below_first') |
| tmpl = MCQ_TEMPLATES[grp][variant] |
| mcq_map = MCQ_LETTER[grp][variant] |
| pair = { |
| 'index': row['index'], |
| 'question_id': str(row['question_id']), |
| 'image_base64': row['image'], |
| 'original_question': tmpl.format(obj1=obj1, obj2=obj2), |
| 'swapped_question': tmpl.format(obj1=obj2, obj2=obj1), |
| 'original_answer': category, |
| 'swapped_answer': OPPOSITE_MAP[category], |
| 'group': GROUP_MAP[category], |
| 'category': category, |
| 'obj1': obj1, 'obj2': obj2, |
| 'mcq_map': mcq_map, |
| } |
|
|
| elif category in ['far', 'close']: |
| answer_key = row['answer'] |
| options = {k: row[k] for k in ['A', 'B', 'C', 'D']} |
| target_object = options[answer_key] |
| candidates = [v for k, v in options.items() if k != answer_key] |
|
|
| if filter_unknown: |
| if not _valid_obj(target_object): |
| continue |
| candidates = [v for v in candidates if _valid_obj(v)] |
| if not candidates: |
| continue |
|
|
| reference_object = rng.choice(candidates) |
|
|
| if question_type == 'short_answer': |
| tmpl = SHORT_TEMPLATES['distance'] |
| pair = { |
| 'index': row['index'], |
| 'question_id': str(row['question_id']), |
| 'image_base64': row['image'], |
| 'original_question': tmpl.format(ref=reference_object, subj=target_object), |
| 'swapped_question': tmpl.format(ref=target_object, subj=reference_object), |
| 'original_answer': category, |
| 'swapped_answer': OPPOSITE_MAP[category], |
| 'group': 'distance', |
| 'category': category, |
| 'target_object': target_object, |
| 'reference_object': reference_object, |
| 'mcq_map': None, |
| } |
| else: |
| variant = 'far_first' if len(pairs) % 2 == 0 else 'close_first' |
| tmpl = MCQ_TEMPLATES['distance'][variant] |
| mcq_map = MCQ_LETTER['distance'][variant] |
| pair = { |
| 'index': row['index'], |
| 'question_id': str(row['question_id']), |
| 'image_base64': row['image'], |
| 'original_question': tmpl.format(ref=reference_object, subj=target_object), |
| 'swapped_question': tmpl.format(ref=target_object, subj=reference_object), |
| 'original_answer': category, |
| 'swapped_answer': OPPOSITE_MAP[category], |
| 'group': 'distance', |
| 'category': category, |
| 'target_object': target_object, |
| 'reference_object': reference_object, |
| 'mcq_map': mcq_map, |
| } |
| else: |
| continue |
|
|
| pairs.append(pair) |
| stats[category]['success'] += 1 |
|
|
| except Exception as e: |
| logger.warning(f"Failed to create swap pair for index {row['index']}: {e}") |
| continue |
|
|
| logger.info("Swap pair creation stats:") |
| for cat in CATEGORY_ORDER: |
| s = stats[cat] |
| logger.info(f" {cat}: {s['success']}/{s['total']}") |
| logger.info(f" Total pairs: {len(pairs)}") |
|
|
| return pairs |
|
|
|
|
| |
| |
| |
|
|
| def build_hf_bbox_cache(hf_dataset_name: str = 'FlagEval/EmbSpatial-Bench') -> Dict[str, dict]: |
| """Load HF dataset and build bbox lookup cache keyed by string-normalized question_id.""" |
| from datasets import load_dataset |
| logger.info(f"Loading HF dataset: {hf_dataset_name}") |
| ds = load_dataset(hf_dataset_name, split='test') |
|
|
| cache = {} |
| for item in ds: |
| |
| qid = str(item['question_id']) |
| cache[qid] = { |
| 'objects': item['objects'], |
| 'relation': item['relation'], |
| 'data_source': item['data_source'], |
| 'answer': item['answer'], |
| 'answer_options': item['answer_options'], |
| } |
|
|
| |
| sample_keys = list(cache.keys())[:5] |
| logger.info(f"Built bbox cache: {len(cache)} entries (sample keys: {sample_keys})") |
| return cache |
|
|
|
|
| def get_bbox_center_y(bbox: list) -> float: |
| return bbox[1] + bbox[3] / 2 |
|
|
|
|
| def create_cross_group_quads( |
| swap_pairs: List[dict], |
| hf_cache: Dict[str, dict], |
| threshold_ratio: float = 0.05, |
| question_type: str = 'mcq', |
| ) -> List[dict]: |
| """For far/close swap pairs, create additional vertical queries using bbox.""" |
| IMAGE_HEIGHTS = {'ai2thor': 300, 'mp3d': 480, 'scannet': 968} |
|
|
| quads = [] |
| stats = {'total': 0, 'matched': 0, 'ambiguous': 0, 'no_bbox': 0} |
|
|
| distance_pairs = [p for p in swap_pairs if p['group'] == 'distance'] |
|
|
| |
| n_matched_keys = sum(1 for p in distance_pairs if p['question_id'] in hf_cache) |
| logger.info(f"Matched {n_matched_keys}/{len(distance_pairs)} question_ids between TSV and HF dataset") |
|
|
| for pair in distance_pairs: |
| stats['total'] += 1 |
| qid = pair['question_id'] |
|
|
| if qid not in hf_cache: |
| stats['no_bbox'] += 1 |
| continue |
|
|
| hf_item = hf_cache[qid] |
| names = hf_item['objects']['name'] |
| bboxes = hf_item['objects']['bbox'] |
|
|
| target = pair['target_object'] |
| reference = pair['reference_object'] |
|
|
| target_bbox_y, ref_bbox_y = None, None |
| for name, bbox in zip(names, bboxes): |
| if name == target: |
| target_bbox_y = get_bbox_center_y(bbox) |
| if name == reference: |
| ref_bbox_y = get_bbox_center_y(bbox) |
|
|
| if target_bbox_y is None or ref_bbox_y is None: |
| stats['no_bbox'] += 1 |
| continue |
|
|
| image_height = IMAGE_HEIGHTS.get(hf_item['data_source'], 480) |
| threshold = image_height * threshold_ratio |
| y_diff = target_bbox_y - ref_bbox_y |
|
|
| if abs(y_diff) < threshold: |
| stats['ambiguous'] += 1 |
| continue |
|
|
| if target_bbox_y < ref_bbox_y: |
| vert_original_answer = 'above' |
| else: |
| vert_original_answer = 'below' |
|
|
| if question_type == 'short_answer': |
| vert_tmpl = SHORT_TEMPLATES['vertical'] |
| vert_mcq_map = None |
| vert_original_q = vert_tmpl.format(obj1=target, obj2=reference) |
| vert_swapped_q = vert_tmpl.format(obj1=reference, obj2=target) |
| vert_swapped_answer = SHORT_OPPOSITE_MAP[vert_original_answer] |
| else: |
| vert_variant = 'above_first' if len(quads) % 2 == 0 else 'below_first' |
| vert_tmpl = MCQ_TEMPLATES['vertical'][vert_variant] |
| vert_mcq_map = MCQ_LETTER['vertical'][vert_variant] |
| vert_original_q = vert_tmpl.format(obj1=target, obj2=reference) |
| vert_swapped_q = vert_tmpl.format(obj1=reference, obj2=target) |
| vert_swapped_answer = OPPOSITE_MAP[vert_original_answer] |
|
|
| quad = { |
| 'index': pair['index'], |
| 'image_base64': pair['image_base64'], |
| 'dist_original_q': pair['original_question'], |
| 'dist_swapped_q': pair['swapped_question'], |
| 'dist_original_answer': pair['original_answer'], |
| 'dist_swapped_answer': pair['swapped_answer'], |
| 'dist_mcq_map': pair['mcq_map'], |
| 'vert_original_q': vert_original_q, |
| 'vert_swapped_q': vert_swapped_q, |
| 'vert_original_answer': vert_original_answer, |
| 'vert_swapped_answer': vert_swapped_answer, |
| 'vert_mcq_map': vert_mcq_map, |
| 'target_object': target, |
| 'reference_object': reference, |
| 'target_bbox_y': target_bbox_y, |
| 'ref_bbox_y': ref_bbox_y, |
| 'y_diff': y_diff, |
| 'data_source': hf_item['data_source'], |
| } |
| quads.append(quad) |
| stats['matched'] += 1 |
|
|
| logger.info(f"Cross-group quads: {stats['matched']}/{stats['total']} " |
| f"(ambiguous={stats['ambiguous']}, no_bbox={stats['no_bbox']})") |
| return quads |
|
|
|
|
| |
| |
| |
|
|
| class BaseHiddenStateExtractor(ABC): |
| def __init__(self, model_path: str, device: str = 'cuda', target_layers: List[int] = None): |
| self.model_path = model_path |
| self.device = device |
| self.hidden_states = {} |
| self.hooks = [] |
| self._load_model() |
| num_layers = self._get_num_layers() |
| if target_layers is None: |
| self.target_layers = list(range(num_layers)) |
| logger.info(f"Model has {num_layers} layers. Extracting ALL.") |
| else: |
| self.target_layers = target_layers |
| self._register_hooks() |
|
|
| def _register_hooks(self): |
| for layer_idx in self.target_layers: |
| module = self._get_layer_module(layer_idx) |
| if module is not None: |
| hook = module.register_forward_hook(self._make_hook(layer_idx)) |
| self.hooks.append(hook) |
|
|
| def _make_hook(self, layer_idx: int): |
| def hook_fn(module, input, output): |
| if isinstance(output, tuple): |
| hidden = output[0] |
| else: |
| hidden = output |
| if hidden.shape[1] > 1: |
| last_token = hidden[:, -1, :].detach().cpu().float() |
| self.hidden_states[layer_idx] = last_token.squeeze(0) |
| return hook_fn |
|
|
| @abstractmethod |
| def _load_model(self): pass |
| @abstractmethod |
| def _get_num_layers(self) -> int: pass |
| @abstractmethod |
| def _get_layer_module(self, layer_idx: int): pass |
| @abstractmethod |
| def extract_and_predict(self, image: Image.Image, question: str) -> Tuple[Dict[int, torch.Tensor], str]: pass |
|
|
| def cleanup(self): |
| for hook in self.hooks: |
| hook.remove() |
| self.hooks = [] |
| if hasattr(self, 'model'): |
| del self.model |
| if hasattr(self, 'processor'): |
| del self.processor |
| torch.cuda.empty_cache() |
|
|
|
|
| |
| |
| |
|
|
| class MolmoExtractor(BaseHiddenStateExtractor): |
| def _load_model(self): |
| config_path = os.path.join(self.model_path, "config.yaml") |
| checkpoint_path = os.path.join(self.model_path, "model.pt") |
| if os.path.exists(config_path) and os.path.exists(checkpoint_path): |
| self._load_native_model() |
| self.is_native = True |
| else: |
| self._load_hf_model() |
| self.is_native = False |
|
|
| def _load_native_model(self): |
| from olmo.config import ModelConfig |
| from olmo.model import Molmo as NativeMolmoModel |
| from olmo.data.model_preprocessor import MultiModalPreprocessor |
| from olmo.data.data_formatter import DataFormatter |
|
|
| _original_load = torch.load |
| def _unsafe_load_wrapper(*args, **kwargs): |
| if 'weights_only' not in kwargs: |
| kwargs['weights_only'] = False |
| return _original_load(*args, **kwargs) |
| torch.load = _unsafe_load_wrapper |
|
|
| cfg = ModelConfig.load( |
| os.path.join(self.model_path, "config.yaml"), |
| key="model", validate_paths=False |
| ) |
| cfg.init_device = "cpu" |
| self.model = NativeMolmoModel(cfg) |
| state_dict = torch.load(os.path.join(self.model_path, "model.pt"), map_location="cpu") |
| self.model.load_state_dict(state_dict) |
| self.model = self.model.to(self.device, dtype=torch.bfloat16).eval() |
| self.tokenizer = cfg.get_tokenizer() |
|
|
| v_cfg = cfg.vision_backbone |
| h, w = cfg.llm_patches_per_crop() |
| image_padding_mask = 2 if cfg.fix_image_padding else (1 if cfg.image_padding_embed else None) |
|
|
| class SafeDataFormatter(DataFormatter): |
| def get_system_prompt(self, style, for_inference, messages, rng=None): |
| if style is None: |
| style = "User" |
| return super().get_system_prompt(style, for_inference, messages, rng) |
|
|
| self.formatter = SafeDataFormatter( |
| prompt_templates=cfg.prompt_type, message_format=cfg.message_formatting, |
| system_prompt=cfg.system_prompt_kind, always_start_with_space=cfg.always_start_with_space, |
| default_inference_len=cfg.default_inference_len |
| ) |
| self.preprocessor = MultiModalPreprocessor( |
| tokenizer=self.tokenizer, normalize=str(v_cfg.image_model_type), |
| crop_mode=cfg.crop_mode, max_crops=cfg.max_crops, |
| overlap_margins=cfg.overlap_margins, resize=v_cfg.resize_mode, |
| use_col_tokens=cfg.use_col_tokens, base_image_input_size=v_cfg.image_default_input_size, |
| image_pooling_w=cfg.image_pooling_w, image_pooling_h=cfg.image_pooling_h, |
| image_token_length_w=w, image_token_length_h=h, |
| image_patch_size=v_cfg.image_patch_size, image_padding_mask=image_padding_mask, |
| pad_value=cfg.pad_value, loss_token_weighting=cfg.multi_annotation_weighting, |
| ) |
| logger.info(f"Loaded native Molmo from {self.model_path}") |
|
|
| def _load_hf_model(self): |
| from transformers import AutoModelForCausalLM, AutoProcessor |
| self.model = AutoModelForCausalLM.from_pretrained( |
| self.model_path, torch_dtype=torch.bfloat16, |
| trust_remote_code=True, device_map=self.device |
| ).eval() |
| self.processor = AutoProcessor.from_pretrained(self.model_path, trust_remote_code=True) |
| logger.info(f"Loaded HF Molmo from {self.model_path}") |
|
|
| def _get_num_layers(self) -> int: |
| if self.is_native: |
| return len(self.model.transformer.blocks) |
| if hasattr(self.model, 'model') and hasattr(self.model.model, 'transformer'): |
| return len(self.model.model.transformer.blocks) |
| return 32 |
|
|
| def _get_layer_module(self, layer_idx: int): |
| if self.is_native: |
| return self.model.transformer.blocks[layer_idx] |
| return self.model.model.transformer.blocks[layer_idx] |
|
|
| def extract_and_predict(self, image, question): |
| self.hidden_states = {} |
| if self.is_native: |
| example = {"messages": [question], "image": image} |
| messages, _ = self.formatter(example, is_training=False, for_inference=True, rng=np.random) |
| batch = self.preprocessor(np.array(image), messages, is_training=False, require_image_features=True) |
| if 'input_ids' not in batch and 'input_tokens' in batch: |
| batch['input_ids'] = batch['input_tokens'] |
|
|
| def to_t(x): |
| return torch.from_numpy(x) if isinstance(x, np.ndarray) else x |
|
|
| input_ids = to_t(batch['input_ids']).unsqueeze(0).to(self.device).long() |
| images_t = to_t(batch['images']).unsqueeze(0).to(self.device, dtype=torch.bfloat16) |
| image_masks = to_t(batch['image_masks']).unsqueeze(0).to(self.device, dtype=torch.bfloat16) |
| image_input_idx = to_t(batch['image_input_idx']).unsqueeze(0).to(self.device) |
|
|
| with torch.inference_mode(), torch.autocast("cuda", dtype=torch.bfloat16): |
| gen = self.model.generate( |
| input_ids=input_ids, images=images_t, |
| image_masks=image_masks, image_input_idx=image_input_idx, |
| max_steps=20, beam_size=1, |
| ) |
| generated_ids = gen.token_ids[0, 0] |
| answer = self.tokenizer.decode(generated_ids.tolist()).strip() |
| for eos in ['<|endoftext|>', '</s>', '<|end|>']: |
| answer = answer.replace(eos, '').strip() |
| else: |
| from transformers import GenerationConfig |
| inputs = self.processor.process(images=[image], text=question) |
| processed = {} |
| for k, v in inputs.items(): |
| v = v.to(self.device).unsqueeze(0) |
| if v.dtype == torch.float32: |
| v = v.to(dtype=torch.bfloat16) |
| processed[k] = v |
| with torch.no_grad(), torch.autocast("cuda", dtype=torch.bfloat16): |
| output = self.model.generate_from_batch( |
| processed, |
| GenerationConfig(max_new_tokens=20, stop_strings="<|endoftext|>"), |
| tokenizer=self.processor.tokenizer, |
| ) |
| input_len = processed['input_ids'].shape[1] |
| answer = self.processor.tokenizer.decode(output[0, input_len:], skip_special_tokens=True).strip() |
|
|
| return self.hidden_states.copy(), answer |
|
|
|
|
| |
| |
| |
|
|
| class NVILAExtractor(BaseHiddenStateExtractor): |
| def _load_model(self): |
| original_sys_path = sys.path.copy() |
| sys.path = [p for p in sys.path if 'RoboRefer' not in p] |
| modules_to_remove = [k for k in list(sys.modules.keys()) if 'llava' in k.lower()] |
| removed = {m: sys.modules.pop(m) for m in modules_to_remove} |
| try: |
| import llava |
| from llava.media import Image as LLaVAImage |
| from llava import conversation as clib |
| except Exception as err: |
| sys.path = original_sys_path |
| for m, mod in removed.items(): |
| sys.modules[m] = mod |
| raise RuntimeError(f"Failed to import llava: {err}") |
| sys.path = original_sys_path |
| self.LLaVAImage = LLaVAImage |
| self.clib = clib |
| self.model = llava.load(self.model_path, model_base=None) |
| self._find_llm_backbone() |
| logger.info(f"Loaded NVILA from {self.model_path}") |
|
|
| def _find_llm_backbone(self): |
| candidates = [] |
| if hasattr(self.model, 'llm'): |
| if hasattr(self.model.llm, 'model') and hasattr(self.model.llm.model, 'layers'): |
| candidates.append(self.model.llm.model.layers) |
| if hasattr(self.model.llm, 'layers'): |
| candidates.append(self.model.llm.layers) |
| if hasattr(self.model, 'model'): |
| if hasattr(self.model.model, 'model') and hasattr(self.model.model.model, 'layers'): |
| candidates.append(self.model.model.model.layers) |
| if hasattr(self.model.model, 'layers'): |
| candidates.append(self.model.model.layers) |
| for name, module in self.model.named_modules(): |
| if name.endswith('.layers') and hasattr(module, '__len__') and len(module) > 0: |
| candidates.append(module) |
| if candidates: |
| self.llm_backbone = candidates[0] |
| else: |
| raise ValueError("Could not locate transformer layers in NVILA model") |
|
|
| def _get_num_layers(self) -> int: |
| return len(self.llm_backbone) if hasattr(self, 'llm_backbone') else 24 |
|
|
| def _get_layer_module(self, layer_idx: int): |
| return self.llm_backbone[layer_idx] |
|
|
| def extract_and_predict(self, image, question): |
| self.hidden_states = {} |
| import tempfile |
| with tempfile.NamedTemporaryFile(suffix='.png', delete=False) as f: |
| temp_path = f.name |
| image.save(temp_path) |
| try: |
| prompt = [self.LLaVAImage(temp_path), question] |
| from transformers import GenerationConfig |
| response = self.model.generate_content( |
| prompt, generation_config=GenerationConfig(max_new_tokens=20, do_sample=False) |
| ) |
| finally: |
| os.unlink(temp_path) |
| answer = str(response[0] if isinstance(response, list) else response).strip() |
| return self.hidden_states.copy(), answer |
|
|
|
|
| class RoboReferExtractor(NVILAExtractor): |
| ROBOREFER_PATH = '/data/shared/Qwen/RoboRefer' |
|
|
| def _load_model(self): |
| original_sys_path = sys.path.copy() |
| if self.ROBOREFER_PATH not in sys.path: |
| sys.path.insert(0, self.ROBOREFER_PATH) |
| modules_to_remove = [k for k in list(sys.modules.keys()) if 'llava' in k.lower()] |
| removed = {m: sys.modules.pop(m) for m in modules_to_remove} |
| try: |
| import llava |
| from llava.media import Image as LLaVAImage |
| from llava import conversation as clib |
| except Exception as err: |
| sys.path = original_sys_path |
| for m, mod in removed.items(): |
| sys.modules[m] = mod |
| raise RuntimeError(f"Failed to import RoboRefer llava: {err}") |
| sys.path = original_sys_path |
| self.LLaVAImage = LLaVAImage |
| self.clib = clib |
| self.model = llava.load(self.model_path, model_base=None) |
| self._find_llm_backbone() |
| logger.info(f"Loaded RoboRefer from {self.model_path}") |
|
|
|
|
| class RoboReferDepthExtractor(RoboReferExtractor): |
| """RoboRefer with depth-image input instead of RGB. |
| |
| Usage: pass the depth PIL image to extract_and_predict() instead of the RGB image. |
| In practice this means loading depth images in load_swap_pairs() / extract_swap_features() |
| rather than changing anything here. If the depth image is stored as a separate column in |
| the dataset, add a 'depth_image_base64' key to the pair dict and decode it before calling |
| run_single_query(). |
| |
| TODO: confirm depth image path / format with the actual dataset layout. |
| """ |
| |
| |
|
|
|
|
| |
| |
| |
|
|
| class Qwen25VLExtractor(BaseHiddenStateExtractor): |
| BASE_MODEL = "Qwen/Qwen2.5-VL-3B-Instruct" |
|
|
| def _load_model(self): |
| from transformers import Qwen2_5_VLForConditionalGeneration, AutoProcessor |
| try: |
| self.model = Qwen2_5_VLForConditionalGeneration.from_pretrained( |
| self.model_path, torch_dtype=torch.bfloat16, device_map=self.device |
| ) |
| except ImportError: |
| self.model = Qwen2_5_VLForConditionalGeneration.from_pretrained( |
| self.model_path, torch_dtype=torch.bfloat16 |
| ).to(self.device) |
| self.model.eval() |
| if self.model_path.startswith('/'): |
| self.processor = AutoProcessor.from_pretrained(self.BASE_MODEL) |
| else: |
| self.processor = AutoProcessor.from_pretrained(self.model_path) |
| logger.info(f"Loaded Qwen2.5-VL from {self.model_path}") |
|
|
| def _get_num_layers(self) -> int: |
| return len(self.model.model.layers) |
|
|
| def _get_layer_module(self, layer_idx: int): |
| return self.model.model.layers[layer_idx] |
|
|
| def extract_and_predict(self, image, question): |
| self.hidden_states = {} |
| messages = [{"role": "user", "content": [ |
| {"type": "image", "image": image}, |
| {"type": "text", "text": question} |
| ]}] |
| text = self.processor.apply_chat_template(messages, tokenize=False, add_generation_prompt=True) |
| from qwen_vl_utils import process_vision_info |
| image_inputs, video_inputs = process_vision_info(messages) |
| inputs = self.processor( |
| text=[text], images=image_inputs, videos=video_inputs, |
| padding=True, return_tensors="pt" |
| ).to(self.device) |
| with torch.no_grad(): |
| output_ids = self.model.generate(**inputs, max_new_tokens=20, do_sample=False) |
| input_len = inputs['input_ids'].shape[1] |
| answer = self.processor.tokenizer.decode(output_ids[0, input_len:], skip_special_tokens=True).strip() |
| return self.hidden_states.copy(), answer |
|
|
|
|
| |
| |
| |
|
|
| class Molmo2Extractor(BaseHiddenStateExtractor): |
| """Extractor for allenai/Molmo2-8B (AutoModelForImageTextToText, messages-dict input).""" |
|
|
| def _load_model(self): |
| from transformers import AutoProcessor, AutoModelForImageTextToText |
| self.processor = AutoProcessor.from_pretrained(self.model_path, trust_remote_code=True) |
| self.model = AutoModelForImageTextToText.from_pretrained( |
| self.model_path, trust_remote_code=True, torch_dtype='auto', device_map='auto', |
| ).eval() |
| self._find_llm_layers() |
| logger.info(f"Loaded Molmo2 from {self.model_path}") |
|
|
| def _find_llm_layers(self): |
| candidates = [ |
| ['model', 'layers'], |
| ['language_model', 'model', 'layers'], |
| ['model', 'model', 'layers'], |
| ] |
| for path in candidates: |
| obj = self.model |
| for attr in path: |
| obj = getattr(obj, attr, None) |
| if obj is None: |
| break |
| if obj is not None and hasattr(obj, '__len__') and len(obj) > 0: |
| self.llm_layers = obj |
| logger.info(f"Molmo2: layers at '{'.'.join(path)}', count={len(obj)}") |
| return |
| best, best_len = None, 0 |
| for name, module in self.model.named_modules(): |
| if name.endswith('.layers') and hasattr(module, '__len__') and len(module) > best_len: |
| best, best_len = module, len(module) |
| logger.info(f"Molmo2: layers via scan at '{name}', count={best_len}") |
| if best is not None: |
| self.llm_layers = best |
| return |
| raise ValueError("Could not find transformer layers in Molmo2 model") |
|
|
| def _get_num_layers(self) -> int: |
| return len(self.llm_layers) |
|
|
| def _get_layer_module(self, layer_idx: int): |
| return self.llm_layers[layer_idx] |
|
|
| def extract_and_predict(self, image, question): |
| self.hidden_states = {} |
| messages = [{"role": "user", "content": [ |
| {"type": "image", "image": image}, |
| {"type": "text", "text": question}, |
| ]}] |
| inputs = self.processor.apply_chat_template( |
| messages, tokenize=True, add_generation_prompt=True, |
| return_tensors="pt", return_dict=True, |
| ) |
| inputs = {k: v.to(self.model.device) for k, v in inputs.items()} |
| with torch.inference_mode(): |
| generated_ids = self.model.generate(**inputs, max_new_tokens=20, do_sample=False) |
| input_len = inputs['input_ids'].shape[1] |
| answer = self.processor.tokenizer.decode( |
| generated_ids[0, input_len:], skip_special_tokens=True).strip() |
| return self.hidden_states.copy(), answer |
|
|
|
|
| class Qwen3VLExtractor(BaseHiddenStateExtractor): |
| """Extractor for Qwen3-VL family (32B dense, 235B MoE). |
| |
| Key differences from Qwen25VLExtractor: |
| - AutoModelForImageTextToText + trust_remote_code=True |
| - process_vision_info requires image_patch_size=16 |
| - processor call requires do_resize=False |
| - 32Γ32 px patches β different min/max_pixels |
| """ |
|
|
| MIN_PIXELS = 256 * 32 * 32 |
| MAX_PIXELS = 16384 * 32 * 32 |
|
|
| def _load_model(self): |
| from transformers import AutoProcessor, AutoModelForImageTextToText |
| self.processor = AutoProcessor.from_pretrained(self.model_path, trust_remote_code=True) |
| self.model = AutoModelForImageTextToText.from_pretrained( |
| self.model_path, trust_remote_code=True, torch_dtype='auto', |
| device_map='auto', attn_implementation='flash_attention_2', |
| ).eval() |
| self._find_llm_layers() |
| logger.info(f"Loaded Qwen3-VL from {self.model_path}") |
|
|
| def _find_llm_layers(self): |
| candidates = [ |
| ['model', 'language_model', 'model', 'layers'], |
| ['language_model', 'model', 'layers'], |
| ['model', 'model', 'layers'], |
| ['model', 'layers'], |
| ] |
| for path in candidates: |
| obj = self.model |
| for attr in path: |
| obj = getattr(obj, attr, None) |
| if obj is None: |
| break |
| if obj is not None and hasattr(obj, '__len__') and len(obj) > 0: |
| self.llm_layers = obj |
| logger.info(f"Qwen3-VL: layers at '{'.'.join(path)}', count={len(obj)}") |
| return |
| best, best_len = None, 0 |
| for name, module in self.model.named_modules(): |
| if name.endswith('.layers') and hasattr(module, '__len__') and len(module) > best_len: |
| best, best_len = module, len(module) |
| logger.info(f"Qwen3-VL: layers via scan at '{name}', count={best_len}") |
| if best is not None: |
| self.llm_layers = best |
| return |
| raise ValueError("Could not find transformer layers in Qwen3-VL model") |
|
|
| def _get_num_layers(self) -> int: |
| return len(self.llm_layers) |
|
|
| def _get_layer_module(self, layer_idx: int): |
| return self.llm_layers[layer_idx] |
|
|
| def extract_and_predict(self, image, question): |
| self.hidden_states = {} |
| messages = [{"role": "user", "content": [ |
| {"type": "image", "image": image, |
| "min_pixels": self.MIN_PIXELS, "max_pixels": self.MAX_PIXELS}, |
| {"type": "text", "text": question}, |
| ]}] |
| text = self.processor.apply_chat_template(messages, tokenize=False, add_generation_prompt=True) |
| from qwen_vl_utils import process_vision_info |
| images, videos, _ = process_vision_info( |
| messages, image_patch_size=16, return_video_kwargs=True, return_video_metadata=True, |
| ) |
| inputs = self.processor( |
| text=text, images=images, videos=videos, do_resize=False, return_tensors="pt", |
| ).to(self.model.device) |
| with torch.no_grad(): |
| output_ids = self.model.generate(**inputs, max_new_tokens=20, do_sample=False) |
| input_len = inputs['input_ids'].shape[1] |
| answer = self.processor.tokenizer.decode( |
| output_ids[0, input_len:], skip_special_tokens=True).strip() |
| return self.hidden_states.copy(), answer |
|
|
|
|
| EXTRACTOR_CLASSES = { |
| 'MolmoExtractor': MolmoExtractor, |
| 'NVILAExtractor': NVILAExtractor, |
| 'RoboReferExtractor': RoboReferExtractor, |
| 'Qwen25VLExtractor': Qwen25VLExtractor, |
| 'Molmo2Extractor': Molmo2Extractor, |
| 'Qwen3VLExtractor': Qwen3VLExtractor, |
| } |
|
|
|
|
| def get_extractor(model_type: str, model_path: str = None, scale: str = None, **kwargs): |
| """Create an extractor for any model_type (legacy or new-large).""" |
| |
| if model_type in MODEL_CONFIGS_NEW: |
| cls_name, raw_path = MODEL_CONFIGS_NEW[model_type][scale] |
| resolved = resolve_local_path(raw_path) |
| logger.info(f"Creating {cls_name} for scale='{scale}' from {resolved}") |
| return EXTRACTOR_CLASSES[cls_name](resolved, **kwargs) |
| |
| if model_type == 'nvila' and scale == 'roborefer': |
| return RoboReferExtractor(model_path, **kwargs) |
| if model_type == 'nvila' and scale == 'roborefer_depth': |
| return RoboReferDepthExtractor(model_path, **kwargs) |
| legacy = { |
| 'molmo': MolmoExtractor, 'nvila': NVILAExtractor, 'qwen': Qwen25VLExtractor, |
| 'nvila_synthetic': NVILAExtractor, 'nvila_st': NVILAExtractor, |
| } |
| return legacy[model_type](model_path, **kwargs) |
|
|
|
|
| |
| |
| |
|
|
| def run_single_query(extractor, image, question): |
| hidden_states, predicted = extractor.extract_and_predict(image, question) |
| result = {} |
| for layer_idx in extractor.target_layers: |
| if layer_idx in hidden_states: |
| state = hidden_states[layer_idx].numpy().flatten() |
| if state.size > 0: |
| result[layer_idx] = state |
| return result, predicted |
|
|
|
|
| def extract_swap_features( |
| extractor: BaseHiddenStateExtractor, |
| swap_pairs: List[dict], |
| max_samples_per_category: int = 0, |
| ) -> List[dict]: |
| """Extract features for all swap pairs.""" |
| rng = random.Random(42) |
|
|
| if max_samples_per_category > 0: |
| grouped = defaultdict(list) |
| for p in swap_pairs: |
| grouped[p['category']].append(p) |
| limited = [] |
| for cat in CATEGORY_ORDER: |
| samples = grouped[cat] |
| if len(samples) > max_samples_per_category: |
| samples = rng.sample(samples, max_samples_per_category) |
| limited.extend(samples) |
| swap_pairs = limited |
|
|
| records = [] |
| for pair in tqdm(swap_pairs, desc="Swap pairs"): |
| try: |
| image = decode_base64_image(pair['image_base64']) |
| hs_orig, pred_orig = run_single_query(extractor, image, pair['original_question']) |
| hs_swap, pred_swap = run_single_query(extractor, image, pair['swapped_question']) |
|
|
| is_correct_orig = check_answer(pred_orig, pair['original_answer'], pair['mcq_map']) |
| is_correct_swap = check_answer(pred_swap, pair['swapped_answer'], pair['mcq_map']) |
|
|
| delta = {} |
| for layer_idx in extractor.target_layers: |
| if layer_idx in hs_orig and layer_idx in hs_swap: |
| delta[layer_idx] = hs_swap[layer_idx] - hs_orig[layer_idx] |
|
|
| record = { |
| 'index': pair['index'], |
| 'group': pair['group'], |
| 'category': pair['category'], |
| 'original_answer': pair['original_answer'], |
| 'swapped_answer': pair['swapped_answer'], |
| 'pred_orig': pred_orig, |
| 'pred_swap': pred_swap, |
| 'is_correct_orig': is_correct_orig, |
| 'is_correct_swap': is_correct_swap, |
| 'hs_orig': hs_orig, |
| 'hs_swap': hs_swap, |
| 'delta': delta, |
| } |
| records.append(record) |
|
|
| mark_o = "O" if is_correct_orig else "X" |
| mark_s = "O" if is_correct_swap else "X" |
| logger.info(f" #{pair['index']:<6} {pair['category']:<6} " |
| f"orig[{mark_o}]=\"{pred_orig[:40]}\" swap[{mark_s}]=\"{pred_swap[:40]}\"" |
| + (f" [{len(records)}/{len(swap_pairs)}]" if len(records) % 50 == 0 else "")) |
|
|
| except Exception as e: |
| logger.warning(f"Error on index {pair['index']}: {e}") |
| continue |
|
|
| logger.info(f"Extracted {len(records)} swap pair records") |
|
|
| |
| for cat in CATEGORY_ORDER: |
| cat_recs = [r for r in records if r['category'] == cat] |
| n = len(cat_recs) |
| if n == 0: |
| continue |
| c_orig = sum(1 for r in cat_recs if r['is_correct_orig']) |
| c_swap = sum(1 for r in cat_recs if r['is_correct_swap']) |
| c_both = sum(1 for r in cat_recs if r['is_correct_orig'] and r['is_correct_swap']) |
| logger.info(f" {cat:>6s} (n={n}): acc_orig={c_orig/n:.1%}, acc_swap={c_swap/n:.1%}, " |
| f"acc_both={c_both/n:.1%}") |
|
|
| return records |
|
|
|
|
| def extract_cross_group_features( |
| extractor: BaseHiddenStateExtractor, |
| quads: List[dict], |
| ) -> List[dict]: |
| """Extract features for cross-group quads (4 forward passes each).""" |
| records = [] |
| for quad in tqdm(quads, desc="Cross-group quads"): |
| try: |
| image = decode_base64_image(quad['image_base64']) |
| hs_d_orig, pred_d_orig = run_single_query(extractor, image, quad['dist_original_q']) |
| hs_d_swap, pred_d_swap = run_single_query(extractor, image, quad['dist_swapped_q']) |
| hs_v_orig, pred_v_orig = run_single_query(extractor, image, quad['vert_original_q']) |
| hs_v_swap, pred_v_swap = run_single_query(extractor, image, quad['vert_swapped_q']) |
|
|
| delta_dist, delta_vert = {}, {} |
| for layer_idx in extractor.target_layers: |
| if layer_idx in hs_d_orig and layer_idx in hs_d_swap: |
| delta_dist[layer_idx] = hs_d_swap[layer_idx] - hs_d_orig[layer_idx] |
| if layer_idx in hs_v_orig and layer_idx in hs_v_swap: |
| delta_vert[layer_idx] = hs_v_swap[layer_idx] - hs_v_orig[layer_idx] |
|
|
| record = { |
| 'index': quad['index'], |
| 'delta_dist': delta_dist, |
| 'delta_vert': delta_vert, |
| 'pred_d_orig': pred_d_orig, 'pred_d_swap': pred_d_swap, |
| 'pred_v_orig': pred_v_orig, 'pred_v_swap': pred_v_swap, |
| 'is_correct_d_orig': check_answer(pred_d_orig, quad['dist_original_answer'], quad['dist_mcq_map']), |
| 'is_correct_d_swap': check_answer(pred_d_swap, quad['dist_swapped_answer'], quad['dist_mcq_map']), |
| 'is_correct_v_orig': check_answer(pred_v_orig, quad['vert_original_answer'], quad['vert_mcq_map']), |
| 'is_correct_v_swap': check_answer(pred_v_swap, quad['vert_swapped_answer'], quad['vert_mcq_map']), |
| 'data_source': quad['data_source'], |
| } |
| records.append(record) |
|
|
| tqdm.write(f" #{quad['index']:<6} dist=[{pred_d_orig[:20]}/{pred_d_swap[:20]}] " |
| f"vert=[{pred_v_orig[:20]}/{pred_v_swap[:20]}]") |
|
|
| except Exception as e: |
| logger.warning(f"Error on cross-group index {quad['index']}: {e}") |
| continue |
|
|
| logger.info(f"Extracted {len(records)} cross-group quad records") |
| return records |
|
|
|
|
| |
| |
| |
|
|
| |
|
|
| def compute_delta_consistency(records: List[dict], target_layers: List[int]): |
| """Compute TWO types of delta consistency. |
| |
| Returns: |
| within_cat_results: {(category, layer) -> {mean, std, n}} |
| sign_corrected_results: {(group, layer) -> {mean, std, n}} |
| """ |
| within_cat_results = {} |
| sign_corrected_results = {} |
|
|
| for group in GROUP_ORDER: |
| canonical = CANONICAL_CATEGORIES[group] |
| opposite = OPPOSITE_MAP[canonical] |
| group_recs = [r for r in records if r['group'] == group] |
|
|
| for layer in target_layers: |
| |
| for cat in [canonical, opposite]: |
| cat_deltas = [r['delta'][layer] for r in group_recs |
| if r['category'] == cat and layer in r['delta']] |
| if len(cat_deltas) >= 2: |
| arr = np.array(cat_deltas) |
| sim = cosine_similarity(arr) |
| upper = sim[np.triu_indices(len(cat_deltas), k=1)] |
| within_cat_results[(cat, layer)] = { |
| 'mean': float(np.mean(upper)), |
| 'std': float(np.std(upper)), |
| 'n': len(cat_deltas), |
| } |
|
|
| |
| all_deltas = [] |
| for r in group_recs: |
| if layer not in r['delta']: |
| continue |
| d = r['delta'][layer] |
| if r['category'] == opposite: |
| d = -d |
| all_deltas.append(d) |
|
|
| if len(all_deltas) >= 2: |
| arr = np.array(all_deltas) |
| sim = cosine_similarity(arr) |
| upper = sim[np.triu_indices(len(all_deltas), k=1)] |
| sign_corrected_results[(group, layer)] = { |
| 'mean': float(np.mean(upper)), |
| 'std': float(np.std(upper)), |
| 'n': len(all_deltas), |
| } |
|
|
| return within_cat_results, sign_corrected_results |
|
|
|
|
| |
|
|
| def compute_delta_similarity_matrix(records: List[dict], layer: int) -> Optional[pd.DataFrame]: |
| """Compute 6x6 cosine similarity using mean delta per category.""" |
| cat_deltas = {} |
| for cat in CATEGORY_ORDER: |
| deltas = [r['delta'][layer] for r in records if r['category'] == cat and layer in r['delta']] |
| if deltas: |
| cat_deltas[cat] = np.mean(deltas, axis=0) |
|
|
| available = [c for c in CATEGORY_ORDER if c in cat_deltas] |
| if len(available) < 2: |
| return None |
|
|
| vectors = np.array([cat_deltas[c] for c in available]) |
| sim = cosine_similarity(vectors) |
| return pd.DataFrame(sim, index=available, columns=available) |
|
|
|
|
| |
| |
| |
| |
|
|
| def compute_delta_norm_per_category(records: List[dict], layer: int) -> Optional[pd.DataFrame]: |
| """Compute mean delta vector norm per category at a given layer. |
| |
| Returns a single-column DataFrame with index = category label, column = 'norm'. |
| Format matches what summarize_metrics_updated.py expects: |
| delta_norm_{scale}_L{layer}_all_pairs.csv |
| ,norm |
| left,12.34 |
| right,11.89 |
| above,9.45 |
| below,9.12 |
| far,7.23 |
| close,7.58 |
| |
| Returns None if no records have delta vectors at this layer. |
| """ |
| rows = {} |
| for cat in CATEGORY_ORDER: |
| deltas = [r['delta'][layer] for r in records |
| if r['category'] == cat and layer in r['delta']] |
| if deltas: |
| norms = [float(np.linalg.norm(d)) for d in deltas] |
| rows[cat] = float(np.mean(norms)) |
|
|
| if not rows: |
| return None |
|
|
| df = pd.DataFrame.from_dict(rows, orient='index', columns=['norm']) |
| |
| ordered = [c for c in CATEGORY_ORDER if c in df.index] |
| return df.loc[ordered] |
|
|
|
|
| |
|
|
| def filter_both_correct(records: List[dict]) -> List[dict]: |
| """Filter to pairs where both orig and swap predictions are correct.""" |
| return [r for r in records if r['is_correct_orig'] and r['is_correct_swap']] |
|
|
|
|
| |
|
|
| def check_category_validity(records: List[dict], scale: str) -> Dict[str, dict]: |
| """Check per-category accuracy and flag unreliable categories.""" |
| validity = {} |
| for cat in CATEGORY_ORDER: |
| cat_recs = [r for r in records if r['category'] == cat] |
| n = len(cat_recs) |
| if n == 0: |
| validity[cat] = {'n': 0, 'acc_orig': 0, 'acc_swap': 0, 'reliable': False} |
| continue |
| acc_orig = sum(1 for r in cat_recs if r['is_correct_orig']) / n |
| acc_swap = sum(1 for r in cat_recs if r['is_correct_swap']) / n |
| reliable = acc_orig >= 0.5 and acc_swap >= 0.5 |
| validity[cat] = { |
| 'n': n, 'acc_orig': acc_orig, 'acc_swap': acc_swap, |
| 'reliable': reliable, |
| } |
| if not reliable: |
| logger.warning(f" [!] Category '{cat}' unreliable at scale={scale}: " |
| f"acc_orig={acc_orig:.1%}, acc_swap={acc_swap:.1%}") |
| return validity |
|
|
|
|
| def compute_cross_group_alignment(quad_records: List[dict], target_layers: List[int]) -> dict: |
| results = {} |
| for layer in target_layers: |
| per_sample = [] |
| delta_verts, delta_dists = [], [] |
|
|
| for rec in quad_records: |
| if layer in rec['delta_vert'] and layer in rec['delta_dist']: |
| dv = rec['delta_vert'][layer] |
| dd = rec['delta_dist'][layer] |
| norm_v, norm_d = np.linalg.norm(dv), np.linalg.norm(dd) |
| if norm_v > 1e-10 and norm_d > 1e-10: |
| per_sample.append(float(np.dot(dv, dd) / (norm_v * norm_d))) |
| delta_verts.append(dv) |
| delta_dists.append(dd) |
|
|
| if not per_sample: |
| continue |
|
|
| mean_dv = np.mean(delta_verts, axis=0) |
| mean_dd = np.mean(delta_dists, axis=0) |
| norm_mv, norm_md = np.linalg.norm(mean_dv), np.linalg.norm(mean_dd) |
| mean_alignment = float(np.dot(mean_dv, mean_dd) / (norm_mv * norm_md + 1e-10)) |
|
|
| rng = np.random.RandomState(42) |
| perm_alignments = [] |
| for _ in range(100): |
| shuffled_dd = [delta_dists[i] for i in rng.permutation(len(delta_dists))] |
| perm_cos = [] |
| for dv, dd in zip(delta_verts, shuffled_dd): |
| nv, nd = np.linalg.norm(dv), np.linalg.norm(dd) |
| if nv > 1e-10 and nd > 1e-10: |
| perm_cos.append(np.dot(dv, dd) / (nv * nd)) |
| perm_alignments.append(np.mean(perm_cos)) |
|
|
| results[layer] = { |
| 'per_sample_mean': float(np.mean(per_sample)), |
| 'per_sample_std': float(np.std(per_sample)), |
| 'mean_delta_alignment': mean_alignment, |
| 'permutation_mean': float(np.mean(perm_alignments)), |
| 'permutation_std': float(np.std(perm_alignments)), |
| 'n_samples': len(per_sample), |
| } |
| return results |
|
|
|
|
| def compute_prediction_stats(records: List[dict], scale: str) -> dict: |
| stats = {'scale': scale} |
| total_correct_orig, total_correct_swap, total_both, total_n = 0, 0, 0, 0 |
|
|
| for group in GROUP_ORDER: |
| group_recs = [r for r in records if r['group'] == group] |
| n = len(group_recs) |
| c_orig = sum(1 for r in group_recs if r['is_correct_orig']) |
| c_swap = sum(1 for r in group_recs if r['is_correct_swap']) |
| c_both = sum(1 for r in group_recs if r['is_correct_orig'] and r['is_correct_swap']) |
| stats[f'{group}_n'] = n |
| stats[f'{group}_acc_orig'] = c_orig / n if n > 0 else 0 |
| stats[f'{group}_acc_swap'] = c_swap / n if n > 0 else 0 |
| stats[f'{group}_acc_both'] = c_both / n if n > 0 else 0 |
| total_correct_orig += c_orig |
| total_correct_swap += c_swap |
| total_both += c_both |
| total_n += n |
|
|
| stats['overall_acc_orig'] = total_correct_orig / total_n if total_n > 0 else 0 |
| stats['overall_acc_swap'] = total_correct_swap / total_n if total_n > 0 else 0 |
| stats['overall_acc_both'] = total_both / total_n if total_n > 0 else 0 |
| stats['overall_n'] = total_n |
| return stats |
|
|
|
|
| |
| |
| |
|
|
| def get_representative_layers(all_layers, n=5): |
| if len(all_layers) <= n: |
| return list(all_layers) |
| indices = np.linspace(0, len(all_layers) - 1, n, dtype=int) |
| return [all_layers[i] for i in indices] |
|
|
|
|
| def save_scale_results( |
| scale, swap_records, quad_records, |
| within_cat_consistency, sign_corrected_consistency, |
| cross_alignment, pred_stats, target_layers, |
| category_validity, delta_heatmaps, |
| output_dir, both_correct_tag="all_pairs", |
| save_alignment=True, |
| delta_norms=None, |
| ): |
| """Save all per-scale results to disk. |
| |
| Args: |
| save_alignment: If False, skip writing cross_alignment_{scale}.json. |
| Set False during Phase A save; call save_cross_alignment() |
| separately after Phase B completes. |
| """ |
| csv_dir = os.path.join(output_dir, 'csv') |
| json_dir = os.path.join(output_dir, 'json') |
| os.makedirs(csv_dir, exist_ok=True) |
| os.makedirs(json_dir, exist_ok=True) |
|
|
| |
| pred_rows = [] |
| for r in swap_records: |
| pred_rows.append({ |
| 'index': r['index'], 'group': r['group'], 'category': r['category'], |
| 'pred_orig': r['pred_orig'], 'pred_swap': r['pred_swap'], |
| 'is_correct_orig': r['is_correct_orig'], 'is_correct_swap': r['is_correct_swap'], |
| }) |
| pd.DataFrame(pred_rows).to_csv( |
| os.path.join(csv_dir, f'predictions_{scale}_{both_correct_tag}.csv'), index=False) |
|
|
| |
| wc_data = {} |
| for (cat, layer), vals in within_cat_consistency.items(): |
| wc_data[f'{cat}_L{layer}'] = vals |
| with open(os.path.join(json_dir, f'within_cat_consistency_{scale}_{both_correct_tag}.json'), 'w') as f: |
| json.dump(wc_data, f, indent=2) |
|
|
| |
| sc_data = {} |
| for (group, layer), vals in sign_corrected_consistency.items(): |
| sc_data[f'{group}_L{layer}'] = vals |
| with open(os.path.join(json_dir, f'sign_corrected_consistency_{scale}_{both_correct_tag}.json'), 'w') as f: |
| json.dump(sc_data, f, indent=2) |
|
|
| |
| if save_alignment: |
| alignment_data = {} |
| for layer, vals in cross_alignment.items(): |
| alignment_data[f'L{layer}'] = vals |
| with open(os.path.join(json_dir, f'cross_alignment_{scale}.json'), 'w') as f: |
| json.dump(alignment_data, f, indent=2) |
|
|
| |
| with open(os.path.join(json_dir, f'pred_stats_{scale}.json'), 'w') as f: |
| json.dump(pred_stats, f, indent=2) |
|
|
| |
| with open(os.path.join(json_dir, f'category_validity_{scale}.json'), 'w') as f: |
| json.dump(category_validity, f, indent=2) |
|
|
| |
| for layer, df in delta_heatmaps.items(): |
| if df is not None: |
| df.to_csv(os.path.join(csv_dir, f'delta_similarity_{scale}_L{layer}_{both_correct_tag}.csv')) |
|
|
| |
| if delta_norms: |
| for layer, df in delta_norms.items(): |
| if df is not None: |
| df.to_csv(os.path.join(csv_dir, f'delta_norm_{scale}_L{layer}_{both_correct_tag}.csv')) |
|
|
|
|
| logger.info(f"Saved results for scale={scale} ({both_correct_tag}) to {output_dir}") |
|
|
|
|
| def save_vectors_npz(scale, swap_records, target_layers, output_dir): |
| """Save swap-pair vectors with correctness metadata to NPZ (Phase A result). |
| |
| This enables post-hoc filtering (both_correct, all_with_validity) from saved data. |
| Cross-group vectors are saved separately by save_cross_group_npz() after Phase B. |
| """ |
| rep_layers = list(target_layers) |
| delta_data = {} |
| for layer in rep_layers: |
| groups_list, categories_list, vectors = [], [], [] |
| orig_vecs, swap_vecs, labels = [], [], [] |
| correct_orig_list, correct_swap_list, indices_list = [], [], [] |
| for r in swap_records: |
| if layer in r['delta']: |
| groups_list.append(r['group']) |
| categories_list.append(r['category']) |
| vectors.append(r['delta'][layer]) |
| correct_orig_list.append(r['is_correct_orig']) |
| correct_swap_list.append(r['is_correct_swap']) |
| indices_list.append(r['index']) |
| if layer in r['hs_orig'] and layer in r['hs_swap']: |
| orig_vecs.append(r['hs_orig'][layer]) |
| swap_vecs.append(r['hs_swap'][layer]) |
| labels.append(r['category']) |
| if vectors: |
| delta_data[f'delta_L{layer}'] = np.array(vectors) |
| delta_data[f'groups_L{layer}'] = np.array(groups_list) |
| delta_data[f'categories_L{layer}'] = np.array(categories_list) |
| delta_data[f'is_correct_orig_L{layer}'] = np.array(correct_orig_list) |
| delta_data[f'is_correct_swap_L{layer}'] = np.array(correct_swap_list) |
| delta_data[f'indices_L{layer}'] = np.array(indices_list) |
| if orig_vecs: |
| delta_data[f'orig_L{layer}'] = np.array(orig_vecs) |
| delta_data[f'swap_L{layer}'] = np.array(swap_vecs) |
| delta_data[f'labels_L{layer}'] = np.array(labels) |
|
|
| npz_dir = os.path.join(output_dir, 'npz') |
| os.makedirs(npz_dir, exist_ok=True) |
| np.savez_compressed(os.path.join(npz_dir, f'vectors_{scale}.npz'), **delta_data) |
| logger.info(f"Saved vectors NPZ with correctness metadata for scale={scale}") |
|
|
|
|
| def save_cross_group_npz(scale, quad_records, target_layers, output_dir): |
| """Save cross-group delta vectors to NPZ (Phase B result).""" |
| if not quad_records: |
| return |
| rep_layers = list(target_layers) |
| cg_data = {} |
| for layer in rep_layers: |
| dverts, ddists = [], [] |
| for rec in quad_records: |
| if layer in rec['delta_vert'] and layer in rec['delta_dist']: |
| dverts.append(rec['delta_vert'][layer]) |
| ddists.append(rec['delta_dist'][layer]) |
| if dverts: |
| cg_data[f'delta_vert_L{layer}'] = np.array(dverts) |
| cg_data[f'delta_dist_L{layer}'] = np.array(ddists) |
| npz_dir = os.path.join(output_dir, 'npz') |
| os.makedirs(npz_dir, exist_ok=True) |
| np.savez_compressed(os.path.join(npz_dir, f'cross_group_vectors_{scale}.npz'), **cg_data) |
| logger.info(f"Saved cross-group vectors NPZ for scale={scale}") |
|
|
|
|
| def save_cross_alignment(scale, cross_alignment, output_dir): |
| """Save cross-group alignment data to JSON (Phase B result).""" |
| json_dir = os.path.join(output_dir, 'json') |
| os.makedirs(json_dir, exist_ok=True) |
| alignment_data = {f'L{layer}': vals for layer, vals in cross_alignment.items()} |
| with open(os.path.join(json_dir, f'cross_alignment_{scale}.json'), 'w') as f: |
| json.dump(alignment_data, f, indent=2) |
| logger.info(f"Saved cross-alignment JSON for scale={scale}") |
|
|
|
|
| def load_scale_consistency(output_dir, scale, tag='all_pairs'): |
| """Load sign-corrected consistency.""" |
| path = os.path.join(output_dir, 'json', f'sign_corrected_consistency_{scale}_{tag}.json') |
| if not os.path.exists(path): |
| return {} |
| with open(path) as f: |
| raw = json.load(f) |
| result = {} |
| for key, vals in raw.items(): |
| parts = key.rsplit('_L', 1) |
| if len(parts) == 2: |
| result[(parts[0], int(parts[1]))] = vals |
| return result |
|
|
|
|
| def load_within_cat_consistency(output_dir, scale, tag='all_pairs'): |
| path = os.path.join(output_dir, 'json', f'within_cat_consistency_{scale}_{tag}.json') |
| if not os.path.exists(path): |
| return {} |
| with open(path) as f: |
| raw = json.load(f) |
| result = {} |
| for key, vals in raw.items(): |
| parts = key.rsplit('_L', 1) |
| if len(parts) == 2: |
| result[(parts[0], int(parts[1]))] = vals |
| return result |
|
|
|
|
| def load_scale_alignment(output_dir, scale): |
| path = os.path.join(output_dir, 'json', f'cross_alignment_{scale}.json') |
| if not os.path.exists(path): |
| return {} |
| with open(path) as f: |
| raw = json.load(f) |
| result = {} |
| for key, vals in raw.items(): |
| result[int(key.replace('L', ''))] = vals |
| return result |
|
|
|
|
| def load_delta_heatmaps(output_dir, scale, tag='all_pairs'): |
| import glob as glob_mod |
| pattern = os.path.join(output_dir, 'csv', f'delta_similarity_{scale}_L*_{tag}.csv') |
| files = glob_mod.glob(pattern) |
| result = {} |
| for fpath in files: |
| basename = os.path.basename(fpath) |
| |
| part = basename.replace(f'delta_similarity_{scale}_L', '').replace(f'_{tag}.csv', '') |
| try: |
| layer = int(part) |
| except ValueError: |
| continue |
| result[layer] = pd.read_csv(fpath, index_col=0) |
| return result |
|
|
|
|
| |
| |
| |
|
|
| def plot_within_cat_consistency_trajectory(within_cat, scale, model_type, save_path): |
| """Plot within-category delta consistency across layers.""" |
| fig, ax = plt.subplots(figsize=(12, 6)) |
| cat_colors = CAT_COLORS |
| for cat in CATEGORY_ORDER: |
| layers, vals = [], [] |
| for (c, l), v in sorted(within_cat.items(), key=lambda x: x[0][1]): |
| if c == cat: |
| layers.append(l) |
| vals.append(v['mean']) |
| if layers: |
| ax.plot(layers, vals, '-o', color=cat_colors[cat], label=cat, linewidth=2, markersize=3) |
| ax.set_xlabel('Layer Index') |
| ax.set_ylabel('Within-Category Consistency') |
| ax.set_title(f'{model_type.upper()} ({scale}) - Within-Category Delta Consistency', fontweight='bold') |
| ax.legend(fontsize=9) |
| ax.grid(True, alpha=0.3) |
| plt.tight_layout() |
| plt.savefig(save_path, dpi=300, bbox_inches='tight') |
| plt.close() |
| logger.info(f"Saved: {save_path}") |
|
|
|
|
| def plot_sign_corrected_consistency_trajectory(sign_corrected, scale, model_type, save_path): |
| """Plot sign-corrected group consistency across layers.""" |
| fig, ax = plt.subplots(figsize=(12, 6)) |
| colors = GROUP_COLORS |
| for group in GROUP_ORDER: |
| layers, vals = [], [] |
| for (g, l), v in sorted(sign_corrected.items(), key=lambda x: x[0][1]): |
| if g == group: |
| layers.append(l) |
| vals.append(v['mean']) |
| if layers: |
| ax.plot(layers, vals, '-o', color=colors[group], label=group, linewidth=2, markersize=3) |
| ax.set_xlabel('Layer Index') |
| ax.set_ylabel('Sign-Corrected Consistency') |
| ax.set_title(f'{model_type.upper()} ({scale}) - Sign-Corrected Group Consistency', fontweight='bold') |
| ax.legend(fontsize=11) |
| ax.grid(True, alpha=0.3) |
| plt.tight_layout() |
| plt.savefig(save_path, dpi=300, bbox_inches='tight') |
| plt.close() |
| logger.info(f"Saved: {save_path}") |
|
|
|
|
| def plot_cross_group_alignment_trajectory(cross_alignment, scale, model_type, save_path): |
| fig, ax = plt.subplots(figsize=(12, 6)) |
| layers = sorted(cross_alignment.keys()) |
| actual = [cross_alignment[l]['per_sample_mean'] for l in layers] |
| mean_delta = [cross_alignment[l]['mean_delta_alignment'] for l in layers] |
| perm_mean = [cross_alignment[l]['permutation_mean'] for l in layers] |
| perm_std = [cross_alignment[l]['permutation_std'] for l in layers] |
|
|
| ax.plot(layers, actual, '-o', color='#d62728', label='cos(d_vert, d_dist) per-sample mean', |
| linewidth=2.5, markersize=3) |
| ax.plot(layers, mean_delta, '--s', color='#e377c2', label='cos(mean_d_vert, mean_d_dist)', |
| linewidth=1.5, markersize=3) |
| ax.plot(layers, perm_mean, ':', color='gray', label='permutation control', linewidth=1.5) |
| ax.fill_between(layers, |
| [m - 2*s for m, s in zip(perm_mean, perm_std)], |
| [m + 2*s for m, s in zip(perm_mean, perm_std)], |
| alpha=0.2, color='gray') |
| ax.set_xlabel('Layer Index') |
| ax.set_ylabel('Cosine Alignment') |
| ax.set_title(f'{model_type.upper()} ({scale}) - Cross-Group Alignment (Perspective Bias)', fontweight='bold') |
| ax.legend(fontsize=9) |
| ax.grid(True, alpha=0.3) |
| plt.tight_layout() |
| plt.savefig(save_path, dpi=300, bbox_inches='tight') |
| plt.close() |
| logger.info(f"Saved: {save_path}") |
|
|
|
|
| |
|
|
| def plot_delta_heatmap(sim_df, title, save_path): |
| """Plot delta-based similarity heatmap.""" |
| plt.figure(figsize=(10, 8)) |
| available_order = [c for c in CATEGORY_ORDER if c in sim_df.index] |
| sim_df_ordered = sim_df.loc[available_order, available_order] |
|
|
| annot = sim_df_ordered.round(4).astype(str) |
| sns.heatmap(sim_df_ordered, annot=annot, fmt='', cmap='RdBu_r', |
| center=0, vmin=-1, vmax=1, square=True, linewidths=0.5, |
| cbar_kws={'label': 'Cosine Similarity'}) |
| plt.title(title, fontsize=14, fontweight='bold') |
| plt.tight_layout() |
| plt.savefig(save_path, dpi=300, bbox_inches='tight') |
| plt.close() |
| logger.info(f"Saved delta heatmap: {save_path}") |
|
|
|
|
| |
|
|
| def plot_pred_stats_bars(all_pred_stats, model_type, save_path): |
| """Bar chart: per-group accuracy (orig/swap/both) across scales.""" |
| fig, axes = plt.subplots(1, len(GROUP_ORDER), figsize=(7 * len(GROUP_ORDER), 6)) |
| if len(GROUP_ORDER) == 1: |
| axes = [axes] |
|
|
| available = [s for s in SCALE_ORDER if any(d['scale'] == s for d in all_pred_stats)] |
| if not available: |
| |
| seen = [] |
| for d in all_pred_stats: |
| if d['scale'] not in seen: |
| seen.append(d['scale']) |
| available = seen |
|
|
| for idx, group in enumerate(GROUP_ORDER): |
| ax = axes[idx] |
| x = np.arange(3) |
| width = 0.8 / len(available) |
| for i, scale in enumerate(available): |
| entry = next((d for d in all_pred_stats if d['scale'] == scale), None) |
| if entry is None: |
| continue |
| vals = [entry.get(f'{group}_acc_orig', 0), |
| entry.get(f'{group}_acc_swap', 0), |
| entry.get(f'{group}_acc_both', 0)] |
| offset = (i - len(available) / 2 + 0.5) * width |
| color = SCALE_COLORS.get(scale, 'gray') |
| ax.bar(x + offset, vals, width, label=scale, color=color) |
| ax.set_xticks(x) |
| ax.set_xticklabels(['orig', 'swap', 'both']) |
| ax.set_ylabel('Accuracy') |
| ax.set_title(group, fontweight='bold') |
| ax.legend(fontsize=7) |
| ax.set_ylim(0, 1.1) |
| ax.axhline(y=0.5, color='gray', linestyle='--', alpha=0.5) |
| ax.grid(True, alpha=0.3, axis='y') |
|
|
| fig.suptitle(f'{model_type.upper()} - Prediction Accuracy by Group', fontsize=15, fontweight='bold', y=1.02) |
| plt.tight_layout() |
| plt.savefig(save_path, dpi=300, bbox_inches='tight') |
| plt.close() |
| logger.info(f"Saved: {save_path}") |
|
|
|
|
| def plot_pred_stats_trajectory(all_pred_stats, model_type, save_path): |
| """Line plot: acc_both trajectory across scales per group.""" |
| fig, ax = plt.subplots(figsize=(10, 6)) |
| available = [s for s in SCALE_ORDER if any(d['scale'] == s for d in all_pred_stats)] |
| if not available: |
| seen = [] |
| for d in all_pred_stats: |
| if d['scale'] not in seen: |
| seen.append(d['scale']) |
| available = seen |
| colors = GROUP_COLORS |
|
|
| for group in GROUP_ORDER: |
| x_vals, y_vals = [], [] |
| for i, scale in enumerate(available): |
| entry = next((d for d in all_pred_stats if d['scale'] == scale), None) |
| if entry: |
| x_vals.append(i) |
| y_vals.append(entry.get(f'{group}_acc_both', 0)) |
| if x_vals: |
| ax.plot(x_vals, y_vals, '-o', color=colors[group], label=group, linewidth=2.5, markersize=6) |
|
|
| ax.set_xticks(range(len(available))) |
| ax.set_xticklabels(available) |
| ax.set_xlabel('Scale') |
| ax.set_ylabel('Accuracy (both correct)') |
| ax.set_title(f'{model_type.upper()} - Both-Correct Accuracy Across Scales', fontweight='bold') |
| ax.legend(fontsize=10) |
| ax.set_ylim(0, 1.05) |
| ax.axhline(y=0.5, color='gray', linestyle='--', alpha=0.5) |
| ax.grid(True, alpha=0.3) |
| plt.tight_layout() |
| plt.savefig(save_path, dpi=300, bbox_inches='tight') |
| plt.close() |
| logger.info(f"Saved: {save_path}") |
|
|
|
|
| def plot_pca_embeddings(vectors_npz_path, scale, model_type, save_dir, bc_only=False): |
| data = np.load(vectors_npz_path, allow_pickle=True) |
| layer_keys = [k for k in data.files if k.startswith('orig_L')] |
| layers = sorted([int(k.replace('orig_L', '')) for k in layer_keys]) |
|
|
| cat_colors = CAT_COLORS |
|
|
| for layer in layers: |
| orig = data.get(f'orig_L{layer}') |
| swap = data.get(f'swap_L{layer}') |
| labels = data.get(f'labels_L{layer}') |
| deltas = data.get(f'delta_L{layer}') |
| cats = data.get(f'categories_L{layer}') |
| groups = data.get(f'groups_L{layer}') |
|
|
| if bc_only and deltas is not None: |
| co = data.get(f'is_correct_orig_L{layer}') |
| cs = data.get(f'is_correct_swap_L{layer}') |
| if co is not None and cs is not None: |
| bc_mask = co.astype(bool) & cs.astype(bool) |
| if orig is not None and len(orig) == len(bc_mask): |
| orig = orig[bc_mask] |
| swap = swap[bc_mask] |
| labels = labels[bc_mask] if labels is not None else None |
| if len(deltas) == len(bc_mask): |
| deltas = deltas[bc_mask] |
| cats = cats[bc_mask] if cats is not None else None |
| groups = groups[bc_mask] if groups is not None else None |
|
|
| if orig is None or swap is None or len(orig) == 0: |
| continue |
|
|
| fig, axes = plt.subplots(1, 3, figsize=(24, 7)) |
|
|
| pca = PCA(n_components=2) |
| all_vecs = np.vstack([orig, swap]) |
| all_pca = pca.fit_transform(all_vecs) |
| orig_pca = all_pca[:len(orig)] |
| swap_pca = all_pca[len(orig):] |
|
|
| ax = axes[0] |
| for cat in CATEGORY_ORDER: |
| mask = np.array([str(l) == cat for l in labels]) |
| if mask.any(): |
| ax.scatter(orig_pca[mask, 0], orig_pca[mask, 1], |
| c=cat_colors.get(cat, 'gray'), label=f'{cat} (orig)', |
| alpha=0.5, s=15, marker='o') |
| ax.scatter(swap_pca[mask, 0], swap_pca[mask, 1], |
| c=cat_colors.get(cat, 'gray'), |
| alpha=0.5, s=15, marker='x') |
| ax.set_title('Embeddings by Category\n(o=orig, x=swap)', fontsize=11) |
| ax.legend(fontsize=7, ncol=2) |
| ax.grid(True, alpha=0.2) |
|
|
| ax = axes[1] |
| if deltas is not None and cats is not None: |
| pca_d = PCA(n_components=2) |
| delta_pca = pca_d.fit_transform(deltas) |
| group_colors = GROUP_COLORS |
| if groups is not None: |
| for group in GROUP_ORDER: |
| mask = np.array([str(g) == group for g in groups]) |
| if mask.any(): |
| ax.scatter(delta_pca[mask, 0], delta_pca[mask, 1], |
| c=group_colors.get(group, 'gray'), label=group, alpha=0.5, s=15) |
| ax.set_title('Delta Vectors by Group', fontsize=11) |
| ax.legend(fontsize=9) |
| ax.grid(True, alpha=0.2) |
|
|
| ax = axes[2] |
| if deltas is not None and cats is not None: |
| for cat in CATEGORY_ORDER: |
| mask = np.array([str(c) == cat for c in cats]) |
| if mask.any(): |
| ax.scatter(delta_pca[mask, 0], delta_pca[mask, 1], |
| c=cat_colors.get(cat, 'gray'), label=cat, alpha=0.5, s=15) |
| ax.set_title('Delta Vectors by Category', fontsize=11) |
| ax.legend(fontsize=8, ncol=2) |
| ax.grid(True, alpha=0.2) |
|
|
| fig.suptitle(f'{model_type.upper()} ({scale}) - Layer {layer} - PCA', fontweight='bold') |
| plt.tight_layout() |
| plt.savefig(os.path.join(save_dir, f'pca_{scale}_L{layer}.png'), dpi=200, bbox_inches='tight') |
| plt.close() |
|
|
| logger.info(f"Saved PCA plots to {save_dir}") |
|
|
|
|
| def plot_pca_3d(vectors_npz_path, scale, model_type, save_dir, bc_only=False): |
| """Generate single-panel 3D PCA figure (Delta Vectors by Category) per layer. |
| |
| Figure style follows pca_new.py: larger fonts, single panel, PC3 via fig.text. |
| """ |
| |
| _TITLE_FS = 22 |
| _AXIS_FS = 18 |
| _TICK_FS = 14 |
| _LEGEND_FS = 16 |
| _SUPTITLE_FS = 24 |
| _SCATTER_S = 30 |
|
|
| def _normalise_label(raw): |
| return 'below' if str(raw) == 'under' else str(raw) |
|
|
| def scatter3d(ax, xs, ys, zs, c, label, alpha=0.55, s=_SCATTER_S, marker='o'): |
| ax.scatter(xs, ys, zs, c=c, label=label, alpha=alpha, s=s, marker=marker) |
|
|
| data = np.load(vectors_npz_path, allow_pickle=True) |
| layer_keys = [k for k in data.files if k.startswith('orig_L')] |
| layers = sorted([int(k.replace('orig_L', '')) for k in layer_keys]) |
|
|
| if not layers: |
| logger.info(f" [pca_3d] No orig_L* keys found in {vectors_npz_path}") |
| return |
|
|
| os.makedirs(save_dir, exist_ok=True) |
|
|
| for layer in layers: |
| deltas = data.get(f'delta_L{layer}') |
| cats = data.get(f'categories_L{layer}') |
|
|
| |
| if bc_only and deltas is not None: |
| co = data.get(f'is_correct_orig_L{layer}') |
| cs = data.get(f'is_correct_swap_L{layer}') |
| if co is not None and cs is not None: |
| bc_mask = co.astype(bool) & cs.astype(bool) |
| if len(deltas) == len(bc_mask): |
| deltas = deltas[bc_mask] |
| cats = cats[bc_mask] if cats is not None else None |
|
|
| has_delta = (deltas is not None and len(deltas) >= 3) |
| if not has_delta: |
| logger.info(f" [pca_3d] Layer {layer}: no delta vectors, skipping") |
| continue |
|
|
| |
| pca_d = PCA(n_components=3) |
| delta_proj = pca_d.fit_transform(deltas) |
| ev = pca_d.explained_variance_ratio_ |
|
|
| |
| fig = plt.figure(figsize=(13, 10)) |
| ax = fig.add_subplot(111, projection='3d') |
|
|
| if cats is not None: |
| for cat in CATEGORY_ORDER: |
| mask = np.array([_normalise_label(c) == cat for c in cats]) |
| if not mask.any(): |
| continue |
| scatter3d(ax, |
| delta_proj[mask, 0], |
| delta_proj[mask, 1], |
| delta_proj[mask, 2], |
| c=CAT_COLORS.get(cat, 'gray'), |
| label=cat) |
|
|
| ax.set_title('Delta Vectors by Category', fontsize=_TITLE_FS, pad=12) |
| ax.set_xlabel(f'PC1 ({ev[0]:.1%})', fontsize=_AXIS_FS, labelpad=25) |
| ax.set_ylabel(f'PC2 ({ev[1]:.1%})', fontsize=_AXIS_FS, labelpad=25) |
| ax.set_zlabel('') |
| ax.tick_params(axis='both', labelsize=_TICK_FS) |
| ax.legend(fontsize=_LEGEND_FS, ncol=2, loc='upper right') |
|
|
| |
| fig.canvas.draw() |
| ax_pos = ax.get_position() |
|
|
| pc3_x = ax_pos.x1 + 0.04 |
| fig.text( |
| pc3_x, |
| (ax_pos.y0 + ax_pos.y1) / 2, |
| f'PC3 ({ev[2]:.1%})', |
| fontsize=_AXIS_FS, |
| va='center', ha='center', |
| rotation=90, |
| ) |
|
|
| ax_cx = (ax_pos.x0 + ax_pos.x1) / 2 |
| fig.suptitle( |
| f'{model_type.upper()} ({scale}) β L{layer}', |
| fontsize=_SUPTITLE_FS, fontweight='bold', |
| x=ax_cx, y=1.01, |
| ) |
|
|
| out_path = os.path.join(save_dir, f'pca_{scale}_L{layer}.png') |
| plt.savefig(out_path, dpi=200, bbox_inches='tight', pad_inches=0.5) |
| plt.close() |
|
|
| logger.info(f"Saved 3D PCA plots to {save_dir}") |
|
|
|
|
| |
|
|
| def plot_cross_scale_consistency(all_consistency, model_type, save_path, title_prefix='Sign-Corrected'): |
| fig, axes = plt.subplots(1, 3, figsize=(21, 6)) |
|
|
| for idx, group in enumerate(GROUP_ORDER): |
| ax = axes[idx] |
| for scale in SCALE_ORDER: |
| if scale not in all_consistency: |
| continue |
| consistency = all_consistency[scale] |
| layers, vals = [], [] |
| for (g, l), v in sorted(consistency.items(), key=lambda x: x[0][1]): |
| if g == group: |
| layers.append(l) |
| vals.append(v['mean']) |
| if layers: |
| ax.plot(layers, vals, '-', color=SCALE_COLORS.get(scale, 'gray'), |
| label=SCALE_DISPLAY_NAMES.get(scale, scale), linewidth=2) |
| ax.set_xlabel('Layer Index') |
| ax.set_ylabel('Consistency') |
| ax.set_title(group, fontweight='bold') |
| ax.legend(fontsize=9) |
| ax.grid(True, alpha=0.3) |
|
|
| fig.suptitle(f'{model_type.upper()} - {title_prefix} Consistency Across Scales', |
| fontsize=15, fontweight='bold', y=1.02) |
| plt.tight_layout() |
| plt.savefig(save_path, dpi=300, bbox_inches='tight') |
| plt.close() |
| logger.info(f"Saved: {save_path}") |
|
|
|
|
| def plot_cross_scale_within_cat_consistency(all_within_cat, model_type, save_path): |
| """Cross-scale within-category consistency.""" |
| fig, axes = plt.subplots(2, 3, figsize=(21, 12)) |
|
|
| for idx, cat in enumerate(CATEGORY_ORDER): |
| ax = axes[idx // 3][idx % 3] |
| for scale in SCALE_ORDER: |
| if scale not in all_within_cat: |
| continue |
| wc = all_within_cat[scale] |
| layers, vals = [], [] |
| for (c, l), v in sorted(wc.items(), key=lambda x: x[0][1]): |
| if c == cat: |
| layers.append(l) |
| vals.append(v['mean']) |
| if layers: |
| ax.plot(layers, vals, '-', color=SCALE_COLORS.get(scale, 'gray'), |
| label=SCALE_DISPLAY_NAMES.get(scale, scale), linewidth=2) |
| ax.set_xlabel('Layer Index') |
| ax.set_ylabel('Consistency') |
| ax.set_title(cat, fontweight='bold') |
| ax.legend(fontsize=8) |
| ax.grid(True, alpha=0.3) |
|
|
| fig.suptitle(f'{model_type.upper()} - Within-Category Consistency Across Scales', |
| fontsize=15, fontweight='bold', y=1.02) |
| plt.tight_layout() |
| plt.savefig(save_path, dpi=300, bbox_inches='tight') |
| plt.close() |
| logger.info(f"Saved: {save_path}") |
|
|
|
|
| def plot_cross_scale_alignment(all_alignment, model_type, save_path): |
| fig, ax = plt.subplots(figsize=(12, 6)) |
| for scale in SCALE_ORDER: |
| if scale not in all_alignment: |
| continue |
| alignment = all_alignment[scale] |
| layers = sorted(alignment.keys()) |
| vals = [alignment[l]['per_sample_mean'] for l in layers] |
| ax.plot(layers, vals, '-', color=SCALE_COLORS.get(scale, 'gray'), |
| label=SCALE_DISPLAY_NAMES.get(scale, scale), linewidth=2) |
| ax.set_xlabel('Layer Index') |
| ax.set_ylabel('cos(d_vert, d_dist)') |
| ax.set_title(f'{model_type.upper()} - Cross-Group Alignment Across Scales\n' |
| f'(High=entangled, Low=disentangled)', fontweight='bold') |
| ax.legend(fontsize=10) |
| ax.grid(True, alpha=0.3) |
| plt.tight_layout() |
| plt.savefig(save_path, dpi=300, bbox_inches='tight') |
| plt.close() |
| logger.info(f"Saved: {save_path}") |
|
|
|
|
| |
|
|
| def plot_delta_trajectory(all_delta_heatmaps, model_type, save_path): |
| """Cross-layer trajectory of delta-based similarities for key pairs.""" |
| pairs = [ |
| ('above', 'far', 'above-far'), ('below', 'close', 'below-close'), |
| ('left', 'right', 'left-right'), |
| ] |
| fig, axes = plt.subplots(1, len(pairs), figsize=(7 * len(pairs), 6)) |
| if len(pairs) == 1: |
| axes = [axes] |
|
|
| for idx, (cat1, cat2, label) in enumerate(pairs): |
| ax = axes[idx] |
| for scale in SCALE_ORDER: |
| if scale not in all_delta_heatmaps: |
| continue |
| hm = all_delta_heatmaps[scale] |
| layers = sorted(hm.keys()) |
| vals = [] |
| valid_layers = [] |
| for l in layers: |
| df = hm[l] |
| if df is not None and cat1 in df.index and cat2 in df.columns: |
| valid_layers.append(l) |
| vals.append(df.loc[cat1, cat2]) |
| if valid_layers: |
| ax.plot(valid_layers, vals, '-', color=SCALE_COLORS.get(scale, 'gray'), |
| label=SCALE_DISPLAY_NAMES.get(scale, scale), linewidth=2) |
| ax.set_xlabel('Layer Index') |
| ax.set_ylabel('Delta Cosine Similarity') |
| ax.set_title(label, fontweight='bold') |
| ax.legend(fontsize=9) |
| ax.grid(True, alpha=0.3) |
| ax.axhline(y=0, color='gray', linestyle='--', alpha=0.5) |
|
|
| fig.suptitle(f'{model_type.upper()} - Delta-Based Similarity Trajectory', |
| fontsize=15, fontweight='bold', y=1.02) |
| plt.tight_layout() |
| plt.savefig(save_path, dpi=300, bbox_inches='tight') |
| plt.close() |
| logger.info(f"Saved: {save_path}") |
|
|
|
|
| def plot_summary_barplot(all_consistency, all_alignment, model_type, save_path): |
| available_scales = [s for s in SCALE_ORDER if s in all_consistency] |
| if not available_scales: |
| return |
|
|
| sample_cons = all_consistency[available_scales[0]] |
| max_layer = max(l for (_, l) in sample_cons.keys()) |
|
|
| fig, axes = plt.subplots(1, 2, figsize=(16, 6)) |
|
|
| ax = axes[0] |
| x = np.arange(len(GROUP_ORDER)) |
| width = 0.8 / len(available_scales) |
| for i, scale in enumerate(available_scales): |
| cons = all_consistency[scale] |
| vals = [cons.get((g, max_layer), {}).get('mean', 0) for g in GROUP_ORDER] |
| offset = (i - len(available_scales) / 2 + 0.5) * width |
| ax.bar(x + offset, vals, width, |
| label=SCALE_DISPLAY_NAMES.get(scale, scale), |
| color=SCALE_COLORS.get(scale, 'gray')) |
| ax.set_xticks(x) |
| ax.set_xticklabels(GROUP_ORDER) |
| ax.set_ylabel('Sign-Corrected Consistency') |
| ax.set_title(f'Consistency at Layer {max_layer}', fontweight='bold') |
| ax.legend(fontsize=8) |
| ax.grid(True, alpha=0.3, axis='y') |
|
|
| ax = axes[1] |
| available_align = [s for s in available_scales if s in all_alignment] |
| if available_align: |
| vals = [all_alignment[s].get(max_layer, {}).get('per_sample_mean', 0) for s in available_align] |
| colors = [SCALE_COLORS.get(s, 'gray') for s in available_align] |
| ax.bar(range(len(vals)), vals, color=colors) |
| ax.set_xticks(range(len(vals))) |
| ax.set_xticklabels([SCALE_DISPLAY_NAMES.get(s, s) for s in available_align]) |
| ax.set_ylabel('cos(d_vert, d_dist)') |
| ax.set_title(f'Cross-Group Alignment at L{max_layer}\n(Lower=disentangled)', fontweight='bold') |
| ax.grid(True, alpha=0.3, axis='y') |
|
|
| fig.suptitle(f'{model_type.upper()} - Summary at Deepest Layer', fontsize=15, fontweight='bold', y=1.02) |
| plt.tight_layout() |
| plt.savefig(save_path, dpi=300, bbox_inches='tight') |
| plt.close() |
| logger.info(f"Saved: {save_path}") |
|
|
|
|
| |
| |
| |
|
|
| def process_scale(args, scale, swap_pairs, quads): |
| |
| if args.model_type in MODEL_CONFIGS_NEW: |
| cls_name, model_path = MODEL_CONFIGS_NEW[args.model_type][scale] |
| else: |
| model_path = MODEL_CONFIGS[args.model_type][scale] |
| cls_name = None |
|
|
| logger.info(f"\n{'='*60}") |
| logger.info(f"Processing {args.model_type} - {scale}" |
| + (f" [{cls_name}]" if cls_name else "")) |
| logger.info(f"Model path: {model_path}") |
| logger.info(f"{'='*60}") |
|
|
| extractor = get_extractor(args.model_type, model_path, scale=scale, device=args.device) |
| target_layers = extractor.target_layers |
|
|
| vlm_key = get_model_key(args.model_type, scale) |
| output_dir = os.path.join(args.output_dir, vlm_key) |
| plots_dir = os.path.join(output_dir, 'plots') |
| os.makedirs(plots_dir, exist_ok=True) |
|
|
| |
| logger.info("\n--- Phase A: Extracting swap pair features ---") |
| swap_records = extract_swap_features(extractor, swap_pairs, |
| max_samples_per_category=args.max_samples_per_category) |
|
|
| |
| logger.info("\n--- Phase C_A: Analysis (swap pairs) ---") |
|
|
| |
| category_validity = check_category_validity(swap_records, scale) |
| unreliable_cats = [c for c, v in category_validity.items() if not v['reliable']] |
| if unreliable_cats: |
| logger.warning(f" Unreliable categories: {unreliable_cats}") |
|
|
| |
| within_cat_all, sign_corrected_all = compute_delta_consistency(swap_records, target_layers) |
|
|
| |
| both_correct_records = filter_both_correct(swap_records) |
| logger.info(f" Both-correct pairs: {len(both_correct_records)}/{len(swap_records)}") |
| within_cat_bc, sign_corrected_bc = compute_delta_consistency(both_correct_records, target_layers) |
|
|
| pred_stats = compute_prediction_stats(swap_records, scale) |
|
|
| |
| delta_heatmaps_all = {} |
| delta_heatmaps_bc = {} |
| delta_norms_all = {} |
| delta_norms_bc = {} |
| for layer in target_layers: |
| delta_heatmaps_all[layer] = compute_delta_similarity_matrix(swap_records, layer) |
| delta_norms_all[layer] = compute_delta_norm_per_category(swap_records, layer) |
| if both_correct_records: |
| delta_heatmaps_bc[layer] = compute_delta_similarity_matrix(both_correct_records, layer) |
| delta_norms_bc[layer] = compute_delta_norm_per_category(both_correct_records, layer) |
|
|
|
|
| |
| max_layer = max(target_layers) |
| for group in GROUP_ORDER: |
| key = (group, max_layer) |
| if key in sign_corrected_all: |
| logger.info(f" Sign-corrected [{group}, L{max_layer}]: " |
| f"{sign_corrected_all[key]['mean']:.4f} +/- {sign_corrected_all[key]['std']:.4f}") |
| logger.info(f" Accuracy orig={pred_stats['overall_acc_orig']:.1%}, " |
| f"swap={pred_stats['overall_acc_swap']:.1%}, " |
| f"both={pred_stats['overall_acc_both']:.1%}") |
|
|
| |
| logger.info("\n--- Phase D_A: Saving Phase A results ---") |
|
|
| save_vectors_npz(scale, swap_records, target_layers, output_dir) |
|
|
| save_scale_results( |
| scale, swap_records, [], |
| within_cat_all, sign_corrected_all, |
| {}, pred_stats, target_layers, |
| category_validity, delta_heatmaps_all, |
| output_dir, both_correct_tag='all_pairs', |
| save_alignment=False, |
| delta_norms=delta_norms_all, |
| ) |
| if both_correct_records: |
| save_scale_results( |
| scale, both_correct_records, [], |
| within_cat_bc, sign_corrected_bc, |
| {}, pred_stats, target_layers, |
| category_validity, delta_heatmaps_bc, |
| output_dir, both_correct_tag='both_correct', |
| save_alignment=False, |
| delta_norms=delta_norms_bc, |
| ) |
|
|
| |
| if args.phase1_only: |
| logger.info("\n--- Phase E_A: Per-scale plots [SKIPPED: --phase1-only] ---") |
| else: |
| logger.info("\n--- Phase E_A: Per-scale plots (swap-pair data) ---") |
|
|
| for condition, wc_data, sc_data in [ |
| ('all', within_cat_all, sign_corrected_all), |
| ('both_correct', within_cat_bc, sign_corrected_bc), |
| ]: |
| if condition == 'both_correct' and not both_correct_records: |
| continue |
|
|
| cond_dir = os.path.join(plots_dir, condition) |
| os.makedirs(cond_dir, exist_ok=True) |
|
|
| wc_dir = os.path.join(cond_dir, 'within_cat_consistency') |
| sc_dir = os.path.join(cond_dir, 'sign_corrected') |
| os.makedirs(wc_dir, exist_ok=True) |
| os.makedirs(sc_dir, exist_ok=True) |
|
|
| plot_within_cat_consistency_trajectory( |
| wc_data, scale, args.model_type, |
| os.path.join(wc_dir, f'within_cat_consistency_{scale}.png')) |
|
|
| plot_sign_corrected_consistency_trajectory( |
| sc_data, scale, args.model_type, |
| os.path.join(sc_dir, f'sign_corrected_consistency_{scale}.png')) |
|
|
| |
| npz_path = os.path.join(output_dir, 'npz', f'vectors_{scale}.npz') |
| if os.path.exists(npz_path): |
| pca_dir = os.path.join(plots_dir, 'all', 'pca') |
| pca_3d_dir = os.path.join(plots_dir, 'all', 'pca_3d') |
| bc_pca_dir = os.path.join(plots_dir, 'both_correct', 'pca') |
| bc_pca_3d_dir = os.path.join(plots_dir, 'both_correct', 'pca_3d') |
| for d in (pca_dir, pca_3d_dir, bc_pca_dir, bc_pca_3d_dir): |
| os.makedirs(d, exist_ok=True) |
| plot_pca_embeddings(npz_path, scale, args.model_type, pca_dir) |
| plot_pca_3d(npz_path, scale, args.model_type, pca_3d_dir) |
| plot_pca_embeddings(npz_path, scale, args.model_type, bc_pca_dir, bc_only=True) |
| plot_pca_3d(npz_path, scale, args.model_type, bc_pca_3d_dir, bc_only=True) |
|
|
| if pred_stats: |
| pred_plot_dir = os.path.join(plots_dir, 'all', 'pred_stats') |
| os.makedirs(pred_plot_dir, exist_ok=True) |
| plot_pred_stats_bars([pred_stats], args.model_type, |
| os.path.join(pred_plot_dir, f'pred_stats_{scale}.png')) |
|
|
| if pred_stats: |
| acc_dir = os.path.join(output_dir, 'accuracy') |
| logger.info(f"\n--- Accuracy Charts [{scale}] ---") |
| run_accuracy_charts([pred_stats], {scale: category_validity}, args.model_type, acc_dir) |
|
|
| logger.info(f"\n--- All-Layer Heatmaps [{scale}] ---") |
| run_all_layer_heatmaps(output_dir, args.model_type, [scale]) |
| logger.info(f"\n--- All-Layer PCA [{scale}] ---") |
| run_all_layer_pca(output_dir, args.model_type, [scale]) |
|
|
| |
| skip_b = getattr(args, 'skip_phase_b', False) |
| if skip_b or not quads: |
| if skip_b: |
| logger.info("\n--- Phase B: Cross-group extraction [SKIPPED: --skip-phase-b] ---") |
| quad_records = [] |
| cross_alignment = {} |
| else: |
| logger.info("\n--- Phase B: Extracting cross-group features ---") |
| quad_records = extract_cross_group_features(extractor, quads) |
|
|
| |
| logger.info("\n--- Phase C_B: Analysis (cross-group) ---") |
| cross_alignment = compute_cross_group_alignment(quad_records, target_layers) |
|
|
| if max_layer in cross_alignment: |
| ca = cross_alignment[max_layer] |
| logger.info(f" Cross-group alignment L{max_layer}: " |
| f"{ca['per_sample_mean']:.4f} (perm={ca['permutation_mean']:.4f})") |
|
|
| |
| logger.info("\n--- Phase D_B: Saving Phase B results ---") |
| save_cross_group_npz(scale, quad_records, target_layers, output_dir) |
| save_cross_alignment(scale, cross_alignment, output_dir) |
|
|
| |
| if args.phase1_only: |
| logger.info("\n--- Phase E_B: Cross-alignment plots [SKIPPED: --phase1-only] ---") |
| else: |
| logger.info("\n--- Phase E_B: Per-scale plots (cross-group data) ---") |
| for condition in ['all', 'both_correct']: |
| if condition == 'both_correct' and not both_correct_records: |
| continue |
| ca_dir = os.path.join(plots_dir, condition, 'cross_alignment') |
| os.makedirs(ca_dir, exist_ok=True) |
| plot_cross_group_alignment_trajectory( |
| cross_alignment, scale, args.model_type, |
| os.path.join(ca_dir, f'cross_alignment_{scale}.png')) |
|
|
| |
| del swap_records, quad_records, both_correct_records |
| extractor.cleanup() |
|
|
| logger.info(f"\n Scale {scale} complete.") |
|
|
|
|
| |
| |
| |
|
|
| def _acc_plot_group_bars(pred_stats, model_type, ax_list): |
| available = [s for s in SCALE_ORDER if any(d['scale'] == s for d in pred_stats)] |
| x = np.arange(3) |
| width = 0.8 / max(len(available), 1) |
| for idx, group in enumerate(GROUP_ORDER): |
| ax = ax_list[idx] |
| for i, scale in enumerate(available): |
| entry = next((d for d in pred_stats if d['scale'] == scale), None) |
| if entry is None: |
| continue |
| vals = [entry.get(f'{group}_acc_orig', 0), |
| entry.get(f'{group}_acc_swap', 0), |
| entry.get(f'{group}_acc_both', 0)] |
| offset = (i - len(available) / 2 + 0.5) * width |
| ax.bar(x + offset, vals, width, label=scale, |
| color=SCALE_COLORS.get(scale, 'gray'), alpha=0.85) |
| ax.set_xticks(x) |
| ax.set_xticklabels(['orig', 'swap', 'both'], fontsize=10) |
| ax.set_ylabel('Accuracy', fontsize=9) |
| ax.set_title(group.capitalize(), fontweight='bold', fontsize=11, |
| color=GROUP_COLORS.get(group, 'black')) |
| ax.legend(fontsize=7, ncol=2) |
| ax.set_ylim(0, 1.15) |
| ax.axhline(y=0.5, color='gray', linestyle='--', alpha=0.5, linewidth=1) |
| ax.grid(True, alpha=0.3, axis='y') |
|
|
|
|
| def _acc_plot_both_trajectory(pred_stats, model_type, ax): |
| available = [s for s in SCALE_ORDER if any(d['scale'] == s for d in pred_stats)] |
| x_ticks = range(len(available)) |
| for group in GROUP_ORDER: |
| y_vals = [next((d for d in pred_stats if d['scale'] == s), {}).get(f'{group}_acc_both', 0) |
| for s in available] |
| ax.plot(x_ticks, y_vals, '-o', color=GROUP_COLORS.get(group, 'gray'), |
| label=group, linewidth=2.5, markersize=7) |
| y_overall = [next((d for d in pred_stats if d['scale'] == s), {}).get('overall_acc_both', 0) |
| for s in available] |
| ax.plot(x_ticks, y_overall, '--s', color='black', label='overall', |
| linewidth=2, markersize=6, alpha=0.7) |
| ax.set_xticks(list(x_ticks)) |
| ax.set_xticklabels(available, fontsize=9) |
| ax.set_xlabel('Scale', fontsize=9) |
| ax.set_ylabel('Accuracy (both correct)', fontsize=9) |
| ax.set_title('Both-Correct Accuracy Trajectory', fontweight='bold', fontsize=11) |
| ax.legend(fontsize=9) |
| ax.set_ylim(0, 1.05) |
| ax.axhline(y=0.5, color='gray', linestyle='--', alpha=0.5, linewidth=1) |
| ax.grid(True, alpha=0.3) |
|
|
|
|
| def _acc_plot_overall_trajectory(pred_stats, model_type, ax): |
| available = [s for s in SCALE_ORDER if any(d['scale'] == s for d in pred_stats)] |
| x_ticks = range(len(available)) |
| for metric, label, ls in [ |
| ('overall_acc_orig', 'orig', '-o'), |
| ('overall_acc_swap', 'swap', '-s'), |
| ('overall_acc_both', 'both', '-^'), |
| ]: |
| y_vals = [next((d for d in pred_stats if d['scale'] == s), {}).get(metric, 0) |
| for s in available] |
| ax.plot(x_ticks, y_vals, ls, label=label, linewidth=2.2, markersize=6) |
| ax.set_xticks(list(x_ticks)) |
| ax.set_xticklabels(available, fontsize=9) |
| ax.set_xlabel('Scale', fontsize=9) |
| ax.set_ylabel('Overall Accuracy', fontsize=9) |
| ax.set_title('Overall Accuracy Trajectory', fontweight='bold', fontsize=11) |
| ax.legend(fontsize=9) |
| ax.set_ylim(0, 1.05) |
| ax.axhline(y=0.5, color='gray', linestyle='--', alpha=0.5, linewidth=1) |
| ax.grid(True, alpha=0.3) |
|
|
|
|
| def _acc_plot_category_accuracy(cat_validity, model_type, ax_orig, ax_swap, pred_stats=None): |
| available = [s for s in SCALE_ORDER if s in cat_validity] |
| cats_with_overall = CATEGORY_ORDER + ['overall'] |
| x = np.arange(len(cats_with_overall)) |
| width = 0.8 / max(len(available), 1) |
| overall_key = {'acc_orig': 'overall_acc_orig', 'acc_swap': 'overall_acc_swap'} |
| for ax, metric, title in [ |
| (ax_orig, 'acc_orig', 'Per-Category Accuracy (orig)'), |
| (ax_swap, 'acc_swap', 'Per-Category Accuracy (swap)'), |
| ]: |
| for i, scale in enumerate(available): |
| cv = cat_validity[scale] |
| vals = [cv.get(cat, {}).get(metric, 0) for cat in CATEGORY_ORDER] |
| if pred_stats is not None: |
| entry = next((d for d in pred_stats if d['scale'] == scale), None) |
| vals.append(entry.get(overall_key[metric], 0) if entry else 0) |
| else: |
| vals.append(0) |
| offset = (i - len(available) / 2 + 0.5) * width |
| ax.bar(x + offset, vals, width, label=scale, |
| color=SCALE_COLORS.get(scale, 'gray'), alpha=0.85) |
| for j, cat in enumerate(CATEGORY_ORDER): |
| ax.axvspan(j - 0.45, j + 0.45, color=CAT_COLORS.get(cat, 'gray'), alpha=0.06, linewidth=0) |
| ax.axvline(x=len(CATEGORY_ORDER) - 0.5, color='black', linewidth=1.2, linestyle=':', alpha=0.6) |
| ax.set_xticks(x) |
| ax.set_xticklabels(cats_with_overall, fontsize=9, rotation=15) |
| ax.set_ylabel('Accuracy', fontsize=9) |
| ax.set_title(title, fontweight='bold', fontsize=11) |
| ax.legend(fontsize=7, ncol=2) |
| ax.set_ylim(0, 1.15) |
| ax.axhline(y=0.5, color='gray', linestyle='--', alpha=0.5, linewidth=1) |
| ax.grid(True, alpha=0.3, axis='y') |
| if available: |
| last_cv = cat_validity[available[-1]] |
| for j, cat in enumerate(CATEGORY_ORDER): |
| if not last_cv.get(cat, {}).get('reliable', True): |
| ax.text(j, 1.08, 'β', ha='center', va='center', |
| fontsize=9, color='red', fontweight='bold') |
|
|
|
|
| def _acc_plot_category_per_scale(cat_validity, model_type, save_dir, pred_stats=None): |
| cats_with_overall = CATEGORY_ORDER + ['overall'] |
| overall_key = {'acc_orig': 'overall_acc_orig', 'acc_swap': 'overall_acc_swap'} |
| for scale in sorted(cat_validity.keys(), |
| key=lambda s: SCALE_ORDER.index(s) if s in SCALE_ORDER else 99): |
| cv = cat_validity[scale] |
| ps_entry = next((d for d in pred_stats if d['scale'] == scale), None) if pred_stats else None |
| fig, axes = plt.subplots(1, 2, figsize=(16, 5)) |
| x = np.arange(len(cats_with_overall)) |
| width = 0.55 |
| for ax, metric, title in [ |
| (axes[0], 'acc_orig', f'acc_orig ({scale})'), |
| (axes[1], 'acc_swap', f'acc_swap ({scale})'), |
| ]: |
| vals = [cv.get(cat, {}).get(metric, 0) for cat in CATEGORY_ORDER] |
| overall_val = ps_entry.get(overall_key[metric], 0) if ps_entry else 0 |
| vals.append(overall_val) |
| colors = [CAT_COLORS.get(cat, 'gray') for cat in CATEGORY_ORDER] + ['#333333'] |
| bars = ax.bar(x, vals, width, color=colors, alpha=0.85, edgecolor='white') |
| ax.axvline(x=len(CATEGORY_ORDER) - 0.5, color='black', |
| linewidth=1.2, linestyle=':', alpha=0.6) |
| ax.set_xticks(x) |
| ax.set_xticklabels(cats_with_overall, fontsize=10) |
| ax.set_ylabel('Accuracy', fontsize=10) |
| ax.set_title(title, fontweight='bold', fontsize=12) |
| ax.set_ylim(0, 1.15) |
| ax.axhline(y=0.5, color='gray', linestyle='--', alpha=0.5) |
| ax.grid(True, alpha=0.3, axis='y') |
| for bar, cat in zip(bars, cats_with_overall): |
| reliable = cv.get(cat, {}).get('reliable', True) if cat != 'overall' else True |
| h = bar.get_height() |
| ax.text(bar.get_x() + bar.get_width() / 2, h + 0.02, |
| f'{h:.2f}' + ('' if reliable else ' β'), |
| ha='center', va='bottom', fontsize=8, |
| color='red' if not reliable else 'black') |
| fig.suptitle(f'{model_type.upper()} - Category Accuracy ({scale})', |
| fontsize=13, fontweight='bold') |
| plt.tight_layout() |
| out = os.path.join(save_dir, f'category_accuracy_{scale}.png') |
| plt.savefig(out, dpi=200, bbox_inches='tight') |
| plt.close() |
| logger.info(f"Saved: {out}") |
|
|
|
|
| def run_accuracy_charts(pred_stats, cat_validity, model_type, save_dir): |
| """Generate all accuracy chart plots into save_dir.""" |
| os.makedirs(save_dir, exist_ok=True) |
|
|
| |
| fig, axes = plt.subplots(1, 3, figsize=(21, 6)) |
| _acc_plot_group_bars(pred_stats, model_type, axes) |
| fig.suptitle(f'{model_type.upper()} - Prediction Accuracy by Group', |
| fontsize=15, fontweight='bold') |
| plt.tight_layout() |
| plt.savefig(os.path.join(save_dir, 'accuracy_group_bars.png'), dpi=200, bbox_inches='tight') |
| plt.close() |
| logger.info(f"Saved: {os.path.join(save_dir, 'accuracy_group_bars.png')}") |
|
|
| |
| fig, axes = plt.subplots(1, 2, figsize=(16, 6)) |
| _acc_plot_both_trajectory(pred_stats, model_type, axes[0]) |
| _acc_plot_overall_trajectory(pred_stats, model_type, axes[1]) |
| fig.suptitle(f'{model_type.upper()} - Accuracy Trajectory Across Scales', |
| fontsize=14, fontweight='bold') |
| plt.tight_layout() |
| plt.savefig(os.path.join(save_dir, 'accuracy_trajectory.png'), dpi=200, bbox_inches='tight') |
| plt.close() |
| logger.info(f"Saved: {os.path.join(save_dir, 'accuracy_trajectory.png')}") |
|
|
| if cat_validity: |
| |
| fig, axes = plt.subplots(1, 2, figsize=(20, 6)) |
| _acc_plot_category_accuracy(cat_validity, model_type, axes[0], axes[1], |
| pred_stats=pred_stats) |
| fig.suptitle(f'{model_type.upper()} - Per-Category Accuracy Across Scales', |
| fontsize=14, fontweight='bold') |
| plt.tight_layout() |
| plt.savefig(os.path.join(save_dir, 'accuracy_category.png'), dpi=200, bbox_inches='tight') |
| plt.close() |
| logger.info(f"Saved: {os.path.join(save_dir, 'accuracy_category.png')}") |
|
|
| |
| _acc_plot_category_per_scale(cat_validity, model_type, save_dir, pred_stats=pred_stats) |
|
|
| |
| fig = plt.figure(figsize=(24, 14)) |
| ax_h = fig.add_subplot(3, 3, 1) |
| ax_v = fig.add_subplot(3, 3, 2) |
| ax_d = fig.add_subplot(3, 3, 3) |
| _acc_plot_group_bars(pred_stats, model_type, [ax_h, ax_v, ax_d]) |
| ax_tb = fig.add_subplot(3, 3, 4) |
| ax_to = fig.add_subplot(3, 3, 5) |
| _acc_plot_both_trajectory(pred_stats, model_type, ax_tb) |
| _acc_plot_overall_trajectory(pred_stats, model_type, ax_to) |
| ax_note = fig.add_subplot(3, 3, 6) |
| ax_note.axis('off') |
| available_scales = [s for s in SCALE_ORDER if any(d['scale'] == s for d in pred_stats)] |
| ax_note.text(0.1, 0.6, |
| f'Scales: {", ".join(available_scales)}\n\nβ = unreliable category\n-- = 0.5 chance level', |
| transform=ax_note.transAxes, fontsize=11, va='top', family='monospace') |
| if cat_validity: |
| ax_co = fig.add_subplot(3, 2, 5) |
| ax_cs = fig.add_subplot(3, 2, 6) |
| _acc_plot_category_accuracy(cat_validity, model_type, ax_co, ax_cs, pred_stats=pred_stats) |
| fig.suptitle(f'{model_type.upper()} β Accuracy Summary', |
| fontsize=17, fontweight='bold', y=1.01) |
| plt.tight_layout() |
| plt.savefig(os.path.join(save_dir, 'accuracy_chart.png'), dpi=200, bbox_inches='tight') |
| plt.close() |
| logger.info(f"Saved: {os.path.join(save_dir, 'accuracy_chart.png')}") |
|
|
|
|
| |
| |
| |
|
|
| def _ylim_compute(all_vals, margin_ratio=0.08): |
| if not all_vals: |
| return -1, 1 |
| ymin, ymax = min(all_vals), max(all_vals) |
| margin = (ymax - ymin) * margin_ratio |
| return ymin - margin, ymax + margin |
|
|
|
|
| def _ylim_load_keyed_json(path): |
| if not os.path.exists(path): |
| return None |
| with open(path) as f: |
| raw = json.load(f) |
| if not raw: |
| return None |
| result = {} |
| for key, vals in raw.items(): |
| parts = key.rsplit('_L', 1) |
| if len(parts) == 2: |
| result[(parts[0], int(parts[1]))] = vals |
| return result if result else None |
|
|
|
|
| def _ylim_load_alignment_json(path): |
| if not os.path.exists(path): |
| return None |
| with open(path) as f: |
| raw = json.load(f) |
| if not raw: |
| return None |
| result = {int(k[1:]): v for k, v in raw.items() if k.startswith('L')} |
| return result if result else None |
|
|
|
|
| def _ylim_plot_sign_corrected(data, scale, model_type, save_path, ylim): |
| fig, ax = plt.subplots(figsize=(12, 6)) |
| for group in GROUP_ORDER: |
| layers, vals = [], [] |
| for (g, l), v in sorted(data.items(), key=lambda x: x[0][1]): |
| if g == group: |
| layers.append(l) |
| vals.append(v['mean']) |
| if layers: |
| ax.plot(layers, vals, '-o', color=GROUP_COLORS[group], |
| label=group, linewidth=2, markersize=3) |
| ax.set_ylim(ylim) |
| ax.set_xlabel('Layer Index') |
| ax.set_ylabel('Sign-Corrected Consistency') |
| ax.set_title(f'{model_type.upper()} ({scale}) - Sign-Corrected Group Consistency', |
| fontweight='bold') |
| ax.legend(fontsize=11) |
| ax.grid(True, alpha=0.3) |
| plt.tight_layout() |
| plt.savefig(save_path, dpi=300, bbox_inches='tight') |
| plt.close() |
|
|
|
|
| def _ylim_plot_within_cat(data, scale, model_type, save_path, ylim): |
| fig, ax = plt.subplots(figsize=(12, 6)) |
| for cat in CATEGORY_ORDER: |
| layers, vals = [], [] |
| for (c, l), v in sorted(data.items(), key=lambda x: x[0][1]): |
| if c == cat: |
| layers.append(l) |
| vals.append(v['mean']) |
| if layers: |
| ax.plot(layers, vals, '-o', color=CAT_COLORS[cat], |
| label=cat, linewidth=2, markersize=3) |
| ax.set_ylim(ylim) |
| ax.set_xlabel('Layer Index') |
| ax.set_ylabel('Within-Category Consistency') |
| ax.set_title(f'{model_type.upper()} ({scale}) - Within-Category Delta Consistency', |
| fontweight='bold') |
| ax.legend(fontsize=9) |
| ax.grid(True, alpha=0.3) |
| plt.tight_layout() |
| plt.savefig(save_path, dpi=300, bbox_inches='tight') |
| plt.close() |
|
|
|
|
| def _ylim_plot_cross_alignment(data, scale, model_type, save_path, ylim): |
| fig, ax = plt.subplots(figsize=(12, 6)) |
| layers = sorted(data.keys()) |
| ax.plot(layers, [data[l]['per_sample_mean'] for l in layers], '-o', color='#d62728', |
| label='cos(d_vert, d_dist) per-sample mean', linewidth=2.5, markersize=3) |
| ax.plot(layers, [data[l]['mean_delta_alignment'] for l in layers], '--s', color='#e377c2', |
| label='cos(mean_d_vert, mean_d_dist)', linewidth=1.5, markersize=3) |
| perm_mean = [data[l]['permutation_mean'] for l in layers] |
| perm_std = [data[l]['permutation_std'] for l in layers] |
| ax.plot(layers, perm_mean, ':', color='gray', label='permutation control', linewidth=1.5) |
| ax.fill_between(layers, |
| [m - 2*s for m, s in zip(perm_mean, perm_std)], |
| [m + 2*s for m, s in zip(perm_mean, perm_std)], |
| alpha=0.2, color='gray') |
| ax.set_ylim(ylim) |
| ax.set_xlabel('Layer Index') |
| ax.set_ylabel('Cosine Alignment') |
| ax.set_title(f'{model_type.upper()} ({scale}) - Cross-Group Alignment (Perspective Bias)', |
| fontweight='bold') |
| ax.legend(fontsize=9) |
| ax.grid(True, alpha=0.3) |
| plt.tight_layout() |
| plt.savefig(save_path, dpi=300, bbox_inches='tight') |
| plt.close() |
|
|
|
|
| def _ylim_process_plot_type(scale_dir_map, plots_dir, conditions, model_type, |
| plot_name, json_pattern, loader, val_gatherer, plotter, |
| subfolder=None): |
| """Re-plot one plot type across all conditions with a unified y-axis. |
| |
| scale_dir_map: {scale: path_to_saved_data/vlm_key} (ordered dict) |
| """ |
| logger.info(f" [unify ylim] {plot_name}") |
| for condition, condition_tag in conditions: |
| cond_plot_dir = os.path.join(plots_dir, condition) |
| if not os.path.isdir(cond_plot_dir): |
| continue |
| save_dir = os.path.join(cond_plot_dir, subfolder) if subfolder else cond_plot_dir |
| os.makedirs(save_dir, exist_ok=True) |
| all_data = {} |
| for scale, scale_dir in scale_dir_map.items(): |
| path = os.path.join(scale_dir, 'json', |
| json_pattern.format(scale=scale, tag=condition_tag)) |
| loaded = loader(path) |
| if loaded: |
| all_data[scale] = loaded |
| if not all_data: |
| continue |
| all_vals = val_gatherer(all_data) |
| ylim = _ylim_compute(all_vals) |
| for scale, data in all_data.items(): |
| save_path = os.path.join(save_dir, f'{plot_name}_{scale}.png') |
| plotter(data, scale, model_type, save_path, ylim) |
| logger.info(f" {condition}: y=[{ylim[0]:.4f}, {ylim[1]:.4f}], {len(all_data)} scales") |
|
|
|
|
| def run_unify_ylim(scale_dir_map: dict, plots_dir: str, model_type: str): |
| """Unify y-axis for sign_corrected, within_cat, and cross_alignment plots. |
| |
| scale_dir_map: {scale: path_to_saved_data/vlm_key} |
| plots_dir: compare/{group_name}/plots/ (where unified plots are written) |
| """ |
| conditions = [ |
| ('all', 'all_pairs'), |
| ('both_correct', 'both_correct'), |
| ] |
|
|
| def gather_keyed(all_data): |
| return [v['mean'] for data in all_data.values() for v in data.values()] |
|
|
| def gather_alignment(all_data): |
| vals = [] |
| for data in all_data.values(): |
| for v in data.values(): |
| vals += [v['per_sample_mean'], v['mean_delta_alignment'], |
| v['permutation_mean'] + 2 * v['permutation_std'], |
| v['permutation_mean'] - 2 * v['permutation_std']] |
| return vals |
|
|
| _ylim_process_plot_type( |
| scale_dir_map, plots_dir, conditions, model_type, |
| plot_name='sign_corrected_consistency', |
| json_pattern='sign_corrected_consistency_{scale}_{tag}.json', |
| loader=_ylim_load_keyed_json, |
| val_gatherer=gather_keyed, |
| plotter=_ylim_plot_sign_corrected, |
| subfolder='sign_corrected', |
| ) |
| _ylim_process_plot_type( |
| scale_dir_map, plots_dir, conditions, model_type, |
| plot_name='within_cat_consistency', |
| json_pattern='within_cat_consistency_{scale}_{tag}.json', |
| loader=_ylim_load_keyed_json, |
| val_gatherer=gather_keyed, |
| plotter=_ylim_plot_within_cat, |
| subfolder='within_cat_consistency', |
| ) |
| _ylim_process_plot_type( |
| scale_dir_map, plots_dir, conditions, model_type, |
| plot_name='cross_alignment', |
| json_pattern='cross_alignment_{scale}.json', |
| loader=_ylim_load_alignment_json, |
| val_gatherer=gather_alignment, |
| plotter=_ylim_plot_cross_alignment, |
| subfolder='cross_alignment', |
| ) |
|
|
|
|
| def _has_phase_b_data(scale_dir: str, scale: str) -> bool: |
| """Return True if cross_alignment_{scale}.json exists and is non-empty. |
| |
| Used in merge to auto-detect whether Phase B was run for a given scale. |
| """ |
| path = os.path.join(scale_dir, 'json', f'cross_alignment_{scale}.json') |
| if not os.path.exists(path): |
| return False |
| try: |
| with open(path) as f: |
| data = json.load(f) |
| return bool(data) |
| except Exception: |
| return False |
|
|
|
|
| def _check_merge_only_sources(output_dir: str, model_type: str) -> bool: |
| """Verify required source directories have data for a merge-only model_type. |
| |
| With the new per-scale directory layout, data lives at |
| {output_dir}/{model_type}_{scale}/ instead of {output_dir}/{model_type}/. |
| Returns True if all sources look healthy, False (with warnings) if not. |
| """ |
| mc = MERGE_ONLY_CONFIGS[model_type] |
| ok = True |
| for req_model_type in mc['required_dirs']: |
| |
| if not os.path.isdir(output_dir): |
| logger.warning( |
| f"[{model_type}] output_dir not found: {output_dir}\n" |
| f" β Run inference first: python swap_analysis.py --model_type {req_model_type}" |
| ) |
| ok = False |
| continue |
| matching = [ |
| d for d in os.listdir(output_dir) |
| if d.startswith(f'{req_model_type}_') |
| and os.path.isdir(os.path.join(output_dir, d)) |
| ] |
| if not matching: |
| logger.warning( |
| f"[{model_type}] No '{req_model_type}_*' directories found in {output_dir}\n" |
| f" β Run inference first: python swap_analysis.py --model_type {req_model_type}" |
| ) |
| ok = False |
| continue |
| any_data = False |
| for d in matching: |
| json_dir = os.path.join(output_dir, d, 'json') |
| if os.path.isdir(json_dir) and any( |
| f.startswith('pred_stats_') for f in os.listdir(json_dir) |
| ): |
| scale = d[len(req_model_type) + 1:] |
| logger.info(f" [{req_model_type}/{scale}] found data in {d}/") |
| any_data = True |
| if not any_data: |
| logger.warning( |
| f"[{model_type}] No pred_stats JSON found in any '{req_model_type}_*' directory.\n" |
| f" β Inference may not have completed for '{req_model_type}'." |
| ) |
| ok = False |
| return ok |
|
|
|
|
| def _load_scale_data_multi(output_dir: str, model_type: str, scale: str, scale_sources: dict): |
| """Load per-scale data for one scale, looking in the correct source directory. |
| |
| With the new per-scale layout, data lives at {output_dir}/{src_model_type}_{scale}/. |
| Returns (sc, sc_bc, wc, wc_bc, align, pred_stat, cat_validity, dh, dh_bc). |
| Any unavailable item is None / {}. |
| """ |
| src_model_type = scale_sources.get(scale, model_type) |
| src_dir = os.path.join(output_dir, get_model_key(src_model_type, scale)) |
|
|
| sc = load_scale_consistency(src_dir, scale, 'all_pairs') |
| sc_bc = load_scale_consistency(src_dir, scale, 'both_correct') |
| wc = load_within_cat_consistency(src_dir, scale, 'all_pairs') |
| wc_bc = load_within_cat_consistency(src_dir, scale, 'both_correct') |
| align = load_scale_alignment(src_dir, scale) |
|
|
| pred_stat = None |
| pred_path = os.path.join(src_dir, 'json', f'pred_stats_{scale}.json') |
| if os.path.exists(pred_path): |
| with open(pred_path) as f: |
| pred_stat = json.load(f) |
|
|
| cat_validity = None |
| cv_path = os.path.join(src_dir, 'json', f'category_validity_{scale}.json') |
| if os.path.exists(cv_path): |
| with open(cv_path) as f: |
| cat_validity = json.load(f) |
|
|
| dh = load_delta_heatmaps(src_dir, scale, 'all_pairs') |
| dh_bc = load_delta_heatmaps(src_dir, scale, 'both_correct') |
|
|
| return sc, sc_bc, wc, wc_bc, align, pred_stat, cat_validity, dh, dh_bc |
|
|
|
|
| |
| |
| |
|
|
| def _get_csv_layers(csv_dir: str, scale: str, tag: str) -> list: |
| """Return sorted list of layer indices that have a delta_similarity CSV.""" |
| import glob as _glob |
| pattern = os.path.join(csv_dir, f'delta_similarity_{scale}_L*_{tag}.csv') |
| layers = [] |
| for fpath in _glob.glob(pattern): |
| m = re.search( |
| rf'delta_similarity_{re.escape(scale)}_L(\d+)_{re.escape(tag)}\.csv$', |
| os.path.basename(fpath)) |
| if m: |
| layers.append(int(m.group(1))) |
| return sorted(layers) |
|
|
|
|
| def run_all_layer_heatmaps(model_dir: str, model_type: str, scales: list): |
| """Generate delta-similarity heatmaps for ALL layers from pre-computed CSVs. |
| |
| Reads {model_dir}/csv/delta_similarity_{scale}_L{n}_{tag}.csv |
| Writes {model_dir}/plots/all/heatmap/heatmap_{scale}_L{n}.png (all_pairs) |
| {model_dir}/plots/both_correct/heatmap/heatmap_{scale}_L{n}.png (both_correct) |
| |
| Skips a scale if the NPZ is missing or any all_pairs CSV is absent |
| (indicates inference was not fully completed for that scale). |
| """ |
| TAG_TO_DIR = { |
| 'all_pairs': os.path.join(model_dir, 'plots', 'all', 'heatmap'), |
| 'both_correct': os.path.join(model_dir, 'plots', 'both_correct', 'heatmap'), |
| } |
|
|
| for scale in scales: |
| npz_path = os.path.join(model_dir, 'npz', f'vectors_{scale}.npz') |
| csv_dir = os.path.join(model_dir, 'csv') |
|
|
| if not os.path.exists(npz_path): |
| logger.warning(f' [{model_type}/{scale}] NPZ not found, skipping heatmaps.') |
| continue |
|
|
| data = np.load(npz_path, allow_pickle=True) |
| npz_layers = sorted( |
| int(k.replace('orig_L', '')) |
| for k in data.files if k.startswith('orig_L') |
| ) |
| data.close() |
|
|
| if not npz_layers: |
| logger.warning(f' [{model_type}/{scale}] No orig_L* keys in NPZ, skipping heatmaps.') |
| continue |
|
|
| csv_layers = _get_csv_layers(csv_dir, scale, 'all_pairs') |
| missing = set(npz_layers) - set(csv_layers) |
| if missing: |
| logger.warning( |
| f' [{model_type}/{scale}] {len(missing)} NPZ layers lack CSVs ' |
| f'(e.g. L{sorted(missing)[:5]}). Skipping all-layer heatmaps.') |
| continue |
|
|
| for out_dir in TAG_TO_DIR.values(): |
| os.makedirs(out_dir, exist_ok=True) |
|
|
| logger.info(f' [{model_type}/{scale}] Generating heatmaps for {len(npz_layers)} layers...') |
| saved = 0 |
| for layer in npz_layers: |
| for tag, out_dir in TAG_TO_DIR.items(): |
| csv_path = os.path.join(csv_dir, f'delta_similarity_{scale}_L{layer}_{tag}.csv') |
| if not os.path.exists(csv_path): |
| continue |
| df = pd.read_csv(csv_path, index_col=0) |
| available = [c for c in CATEGORY_ORDER if c in df.index] |
| if not available: |
| continue |
| df = df.loc[available, available] |
| title = ( |
| f'{model_type.upper()} ({scale}) \u2014 Delta Heatmap L{layer} ' |
| f'({"both-correct" if tag == "both_correct" else "all pairs"})' |
| ) |
| out_path = os.path.join(out_dir, f'heatmap_{scale}_L{layer}.png') |
| plot_delta_heatmap(df, title, out_path) |
| saved += 1 |
| logger.info(f' [{model_type}/{scale}] Saved {saved} heatmaps') |
|
|
|
|
| def run_all_layer_pca(model_dir: str, model_type: str, scales: list): |
| """Generate 2D and 3D PCA plots for ALL layers from saved NPZ files. |
| |
| Writes {model_dir}/plots/all/pca/pca_{scale}_L{n}.png (all pairs) |
| {model_dir}/plots/all/pca_3d/pca_{scale}_L{n}.png |
| {model_dir}/plots/both_correct/pca/pca_{scale}_L{n}.png (both-correct only) |
| {model_dir}/plots/both_correct/pca_3d/pca_{scale}_L{n}.png |
| """ |
| for scale in scales: |
| npz_path = os.path.join(model_dir, 'npz', f'vectors_{scale}.npz') |
| if not os.path.exists(npz_path): |
| logger.warning(f' [{model_type}/{scale}] NPZ not found, skipping PCA.') |
| continue |
|
|
| |
| pca_2d_dir = os.path.join(model_dir, 'plots', 'all', 'pca') |
| pca_3d_dir = os.path.join(model_dir, 'plots', 'all', 'pca_3d') |
| os.makedirs(pca_2d_dir, exist_ok=True) |
| os.makedirs(pca_3d_dir, exist_ok=True) |
| logger.info(f' [{model_type}/{scale}] Generating all-layer 2D PCA...') |
| plot_pca_embeddings(npz_path, scale, model_type, pca_2d_dir) |
| logger.info(f' [{model_type}/{scale}] Generating all-layer 3D PCA...') |
| plot_pca_3d(npz_path, scale, model_type, pca_3d_dir) |
|
|
| |
| bc_pca_2d_dir = os.path.join(model_dir, 'plots', 'both_correct', 'pca') |
| bc_pca_3d_dir = os.path.join(model_dir, 'plots', 'both_correct', 'pca_3d') |
| os.makedirs(bc_pca_2d_dir, exist_ok=True) |
| os.makedirs(bc_pca_3d_dir, exist_ok=True) |
| logger.info(f' [{model_type}/{scale}] Generating both-correct 2D PCA...') |
| plot_pca_embeddings(npz_path, scale, model_type, bc_pca_2d_dir, bc_only=True) |
| logger.info(f' [{model_type}/{scale}] Generating both-correct 3D PCA...') |
| plot_pca_3d(npz_path, scale, model_type, bc_pca_3d_dir, bc_only=True) |
|
|
|
|
| def run_merge(args): |
| |
| def _scale_dir(scale): |
| return os.path.join(args.output_dir, get_model_key(args.model_type, scale)) |
|
|
| |
| group_name = args.group_name or args.model_type |
| if args.merge_output_dir: |
| merge_out = args.merge_output_dir |
| else: |
| qt_root = os.path.dirname(args.output_dir.rstrip('/')) |
| merge_out = os.path.join(qt_root, 'compare', group_name) |
| plots_dir = os.path.join(merge_out, 'plots') |
| os.makedirs(plots_dir, exist_ok=True) |
|
|
| scale_order = ['vanilla', '80k', '400k', '800k', '2m', 'roborefer', |
| 'roborefer_depth', '10pct', '20pct', '30pct'] |
| available_scales = [s for s in scale_order if s in args.scales] |
|
|
| |
| all_sign_corrected = {} |
| all_sign_corrected_bc = {} |
| all_within_cat = {} |
| all_within_cat_bc = {} |
| all_alignment = {} |
| all_pred_stats = [] |
| all_cat_validity = {} |
| all_delta_heatmaps = {} |
| all_delta_heatmaps_bc = {} |
|
|
| for scale in available_scales: |
| sd = _scale_dir(scale) |
| sc = load_scale_consistency(sd, scale, 'all_pairs') |
| if sc: |
| all_sign_corrected[scale] = sc |
| sc_bc = load_scale_consistency(sd, scale, 'both_correct') |
| if sc_bc: |
| all_sign_corrected_bc[scale] = sc_bc |
| wc = load_within_cat_consistency(sd, scale, 'all_pairs') |
| if wc: |
| all_within_cat[scale] = wc |
| wc_bc = load_within_cat_consistency(sd, scale, 'both_correct') |
| if wc_bc: |
| all_within_cat_bc[scale] = wc_bc |
| align = load_scale_alignment(sd, scale) |
| if align: |
| all_alignment[scale] = align |
| pred_path = os.path.join(sd, 'json', f'pred_stats_{scale}.json') |
| if os.path.exists(pred_path): |
| with open(pred_path) as f: |
| all_pred_stats.append(json.load(f)) |
| cv_path = os.path.join(sd, 'json', f'category_validity_{scale}.json') |
| if os.path.exists(cv_path): |
| with open(cv_path) as f: |
| all_cat_validity[scale] = json.load(f) |
| dh = load_delta_heatmaps(sd, scale, 'all_pairs') |
| if dh: |
| all_delta_heatmaps[scale] = dh |
| dh_bc = load_delta_heatmaps(sd, scale, 'both_correct') |
| if dh_bc: |
| all_delta_heatmaps_bc[scale] = dh_bc |
|
|
| logger.info(f" Loaded data for {scale}") |
|
|
| |
| for condition, sc_data, wc_data, dh_data, tag_label in [ |
| ('all', all_sign_corrected, all_within_cat, all_delta_heatmaps, 'all pairs'), |
| ('both_correct', all_sign_corrected_bc, all_within_cat_bc, all_delta_heatmaps_bc, 'both-correct'), |
| ]: |
| cond_dir = os.path.join(plots_dir, condition) |
| sc_dir = os.path.join(cond_dir, 'sign_corrected') |
| wc_dir = os.path.join(cond_dir, 'within_cat_consistency') |
| dt_dir = os.path.join(cond_dir, 'delta_trajectory') |
| os.makedirs(sc_dir, exist_ok=True) |
| os.makedirs(wc_dir, exist_ok=True) |
| os.makedirs(dt_dir, exist_ok=True) |
|
|
| if len(sc_data) > 1: |
| plot_cross_scale_consistency( |
| sc_data, args.model_type, |
| os.path.join(sc_dir, 'cross_scale_sign_corrected.png'), |
| title_prefix=f'Sign-Corrected ({tag_label})') |
|
|
| if len(wc_data) > 1: |
| plot_cross_scale_within_cat_consistency( |
| wc_data, args.model_type, |
| os.path.join(wc_dir, 'cross_scale_within_cat.png')) |
|
|
| if dh_data: |
| plot_delta_trajectory(dh_data, args.model_type, |
| os.path.join(dt_dir, 'delta_trajectory.png')) |
|
|
| |
| all_cond_dir = os.path.join(plots_dir, 'all') |
| ca_dir = os.path.join(all_cond_dir, 'cross_alignment') |
| pred_stats_dir = os.path.join(all_cond_dir, 'pred_stats') |
| summary_dir = os.path.join(all_cond_dir, 'summary') |
| os.makedirs(ca_dir, exist_ok=True) |
| os.makedirs(pred_stats_dir, exist_ok=True) |
| os.makedirs(summary_dir, exist_ok=True) |
|
|
| if len(all_alignment) > 1: |
| plot_cross_scale_alignment( |
| all_alignment, args.model_type, |
| os.path.join(ca_dir, 'cross_scale_alignment.png')) |
|
|
| |
| if all_pred_stats: |
| plot_pred_stats_bars(all_pred_stats, args.model_type, |
| os.path.join(pred_stats_dir, 'pred_stats_bars.png')) |
| plot_pred_stats_trajectory(all_pred_stats, args.model_type, |
| os.path.join(pred_stats_dir, 'pred_stats_trajectory.png')) |
|
|
| |
| if all_sign_corrected: |
| plot_summary_barplot( |
| all_sign_corrected, all_alignment, args.model_type, |
| os.path.join(summary_dir, 'summary_barplot.png')) |
|
|
| |
| summary_rows = [] |
| for scale in available_scales: |
| ps = next((p for p in all_pred_stats if p.get('scale') == scale), None) |
| if ps is None: |
| continue |
| row = dict(ps) |
| if scale in all_alignment: |
| max_layer = max(all_alignment[scale].keys()) |
| row['alignment_deepest'] = all_alignment[scale][max_layer]['per_sample_mean'] |
| row['alignment_perm'] = all_alignment[scale][max_layer]['permutation_mean'] |
| summary_rows.append(row) |
|
|
| if summary_rows: |
| csv_dir = os.path.join(merge_out, 'csv') |
| os.makedirs(csv_dir, exist_ok=True) |
| pd.DataFrame(summary_rows).to_csv(os.path.join(csv_dir, 'summary.csv'), index=False) |
|
|
| |
| if all_pred_stats: |
| acc_dir = os.path.join(plots_dir, 'accuracy') |
| logger.info("\n--- Accuracy Charts ---") |
| run_accuracy_charts(all_pred_stats, all_cat_validity, args.model_type, acc_dir) |
|
|
| |
| logger.info("\n--- Unifying Y-axis ---") |
| scale_dir_map = {s: _scale_dir(s) for s in available_scales} |
| run_unify_ylim(scale_dir_map, plots_dir, args.model_type) |
|
|
| |
|
|
| logger.info(f"\n=== Merge Complete ===\nResults in: {merge_out}") |
|
|
|
|
| def run_merge_extended(args): |
| """Generate cross-scale plots for new / merge-only model_types. |
| |
| - Runnable types (molmo_big, qwen_big, qwen_super, big_trio): |
| loads all data from results/{model_type}/ and saves plots there. |
| - Merge-only types (molmo_all, qwen_all): |
| loads per-scale data from the respective source directories, |
| saves all cross-scale plots to results/{model_type}/. |
| """ |
| is_merge_only = args.model_type in MERGE_ONLY_CONFIGS |
|
|
| |
| if is_merge_only: |
| mc = MERGE_ONLY_CONFIGS[args.model_type] |
| scale_order = mc['scale_order'] |
| scale_sources = mc['scale_sources'] |
|
|
| logger.info(f"\n=== MERGE-ONLY mode: {args.model_type} ===") |
| logger.info("Checking required source directories...") |
| sources_ok = _check_merge_only_sources(args.output_dir, args.model_type) |
| if not sources_ok: |
| logger.warning( |
| f"\n[WARNING] One or more source directories are missing or incomplete.\n" |
| f" Cross-scale plots for '{args.model_type}' may be partial.\n" |
| f" Run the missing model types first (see warnings above), then retry merge." |
| ) |
| else: |
| scale_order = SCALE_ORDERS_NEW.get( |
| args.model_type, list(MODEL_CONFIGS_NEW[args.model_type])) |
| scale_sources = None |
|
|
| available_scales = [s for s in scale_order if s in args.scales] |
| logger.info(f"Merging scales (in order): {available_scales}") |
|
|
| |
| group_name = args.group_name or args.model_type |
| if args.merge_output_dir: |
| merge_out = args.merge_output_dir |
| else: |
| qt_root = os.path.dirname(args.output_dir.rstrip('/')) |
| merge_out = os.path.join(qt_root, 'compare', group_name) |
| plots_dir = os.path.join(merge_out, 'plots') |
| os.makedirs(plots_dir, exist_ok=True) |
|
|
| |
| def _scale_dir(scale): |
| if is_merge_only: |
| src_model_type = scale_sources[scale] |
| else: |
| src_model_type = args.model_type |
| return os.path.join(args.output_dir, get_model_key(src_model_type, scale)) |
|
|
| |
| all_sign_corrected = {} |
| all_sign_corrected_bc = {} |
| all_within_cat = {} |
| all_within_cat_bc = {} |
| all_alignment = {} |
| all_pred_stats = [] |
| all_cat_validity = {} |
| all_delta_heatmaps = {} |
| all_delta_heatmaps_bc = {} |
|
|
| for scale in available_scales: |
| sd = _scale_dir(scale) |
| sc = load_scale_consistency(sd, scale, 'all_pairs') |
| sc_bc = load_scale_consistency(sd, scale, 'both_correct') |
| wc = load_within_cat_consistency(sd, scale, 'all_pairs') |
| wc_bc = load_within_cat_consistency(sd, scale, 'both_correct') |
| align = load_scale_alignment(sd, scale) |
|
|
| pred_stat = None |
| pred_path = os.path.join(sd, 'json', f'pred_stats_{scale}.json') |
| if os.path.exists(pred_path): |
| with open(pred_path) as f: |
| pred_stat = json.load(f) |
|
|
| cat_validity = None |
| cv_path = os.path.join(sd, 'json', f'category_validity_{scale}.json') |
| if os.path.exists(cv_path): |
| with open(cv_path) as f: |
| cat_validity = json.load(f) |
|
|
| dh = load_delta_heatmaps(sd, scale, 'all_pairs') |
| dh_bc = load_delta_heatmaps(sd, scale, 'both_correct') |
|
|
| if sc: |
| all_sign_corrected[scale] = sc |
| if sc_bc: |
| all_sign_corrected_bc[scale] = sc_bc |
| if wc: |
| all_within_cat[scale] = wc |
| if wc_bc: |
| all_within_cat_bc[scale] = wc_bc |
| if align: |
| all_alignment[scale] = align |
| if pred_stat is not None: |
| all_pred_stats.append(pred_stat) |
| if cat_validity is not None: |
| all_cat_validity[scale] = cat_validity |
| if dh: |
| all_delta_heatmaps[scale] = dh |
| if dh_bc: |
| all_delta_heatmaps_bc[scale] = dh_bc |
|
|
| logger.info(f" Loaded data for '{scale}'" |
| + (f" (from '{scale_sources[scale]}')" if is_merge_only else "")) |
|
|
| |
| has_phase_b = all(_has_phase_b_data(_scale_dir(s), s) for s in available_scales) |
| if has_phase_b: |
| logger.info(" [Phase B] Cross-alignment data found for all scales β will include cross-alignment plots") |
| else: |
| missing_b = [s for s in available_scales if not _has_phase_b_data(_scale_dir(s), s)] |
| logger.info(f" [Phase B] Cross-alignment data missing for: {missing_b} β skipping cross-alignment plots") |
|
|
| |
| summary_rows = [] |
| for scale in available_scales: |
| ps = next((p for p in all_pred_stats if p.get('scale') == scale), None) |
| if ps is None: |
| continue |
| row = dict(ps) |
| if scale in all_alignment: |
| max_layer = max(all_alignment[scale].keys()) |
| row['alignment_deepest'] = all_alignment[scale][max_layer]['per_sample_mean'] |
| row['alignment_perm'] = all_alignment[scale][max_layer]['permutation_mean'] |
| summary_rows.append(row) |
| if summary_rows: |
| csv_dir = os.path.join(merge_out, 'csv') |
| os.makedirs(csv_dir, exist_ok=True) |
| pd.DataFrame(summary_rows).to_csv(os.path.join(csv_dir, 'summary.csv'), index=False) |
|
|
| |
| if args.phase1_only: |
| logger.info("\n--- Cross-scale plots [SKIPPED: --phase1-only] ---") |
| else: |
| for condition, sc_data, wc_data, dh_data, tag_label in [ |
| ('all', all_sign_corrected, all_within_cat, all_delta_heatmaps, 'all pairs'), |
| ('both_correct', all_sign_corrected_bc, all_within_cat_bc, all_delta_heatmaps_bc, 'both-correct'), |
| ]: |
| cond_dir = os.path.join(plots_dir, condition) |
| sc_dir = os.path.join(cond_dir, 'sign_corrected') |
| wc_dir = os.path.join(cond_dir, 'within_cat_consistency') |
| dt_dir = os.path.join(cond_dir, 'delta_trajectory') |
| os.makedirs(sc_dir, exist_ok=True) |
| os.makedirs(wc_dir, exist_ok=True) |
| os.makedirs(dt_dir, exist_ok=True) |
|
|
| if len(sc_data) > 1: |
| plot_cross_scale_consistency( |
| sc_data, args.model_type, |
| os.path.join(sc_dir, 'cross_scale_sign_corrected.png'), |
| title_prefix=f'Sign-Corrected ({tag_label})') |
|
|
| if len(wc_data) > 1: |
| plot_cross_scale_within_cat_consistency( |
| wc_data, args.model_type, |
| os.path.join(wc_dir, 'cross_scale_within_cat.png')) |
|
|
| if dh_data: |
| plot_delta_trajectory( |
| dh_data, args.model_type, |
| os.path.join(dt_dir, 'delta_trajectory.png')) |
|
|
| |
| all_cond_dir = os.path.join(plots_dir, 'all') |
| pred_stats_dir = os.path.join(all_cond_dir, 'pred_stats') |
| summary_dir = os.path.join(all_cond_dir, 'summary') |
| os.makedirs(pred_stats_dir, exist_ok=True) |
| os.makedirs(summary_dir, exist_ok=True) |
|
|
| if has_phase_b and len(all_alignment) > 1: |
| ca_dir = os.path.join(all_cond_dir, 'cross_alignment') |
| os.makedirs(ca_dir, exist_ok=True) |
| plot_cross_scale_alignment( |
| all_alignment, args.model_type, |
| os.path.join(ca_dir, 'cross_scale_alignment.png')) |
|
|
| if all_pred_stats: |
| plot_pred_stats_bars( |
| all_pred_stats, args.model_type, |
| os.path.join(pred_stats_dir, 'pred_stats_bars.png')) |
| plot_pred_stats_trajectory( |
| all_pred_stats, args.model_type, |
| os.path.join(pred_stats_dir, 'pred_stats_trajectory.png')) |
|
|
| if all_sign_corrected: |
| plot_summary_barplot( |
| all_sign_corrected, all_alignment, args.model_type, |
| os.path.join(summary_dir, 'summary_barplot.png')) |
|
|
| |
| if all_pred_stats: |
| acc_dir = os.path.join(plots_dir, 'accuracy') |
| logger.info("\n--- Accuracy Charts ---") |
| run_accuracy_charts(all_pred_stats, all_cat_validity, args.model_type, acc_dir) |
|
|
| |
| logger.info("\n--- Unifying Y-axis ---") |
| scale_dir_map = {s: _scale_dir(s) for s in available_scales} |
| run_unify_ylim(scale_dir_map, plots_dir, args.model_type) |
|
|
| |
|
|
| logger.info(f"\n=== Merge Complete ===\nResults saved to: {merge_out}") |
|
|
|
|
| def main(): |
| |
| _LEGACY_DEFAULT_SCALES = { |
| 'molmo': ['vanilla', '80k', '400k', '800k', '2m'], |
| 'nvila': ['vanilla', '80k', '400k', '800k', '2m'], |
| 'qwen': ['vanilla', '80k', '400k', '800k', '2m'], |
| 'nvila_synthetic': ['80k-5pct', '80k-10pct', '80k-20pct', '80k-30pct', '400k-5pct'], |
| 'nvila_st': ['80k-st', '400k-st', '800k-st'], |
| } |
|
|
| parser = argparse.ArgumentParser( |
| description='Swap Analysis β Spatial Representation Probing', |
| formatter_class=argparse.RawDescriptionHelpFormatter, |
| ) |
| parser.add_argument('--data_path', type=str, |
| default='/data/shared/Qwen/EmbSpatial-Bench/EmbSpatial-Bench.tsv') |
| parser.add_argument('--model_type', type=str, required=True, |
| choices=ALL_MODEL_TYPES, |
| help=( |
| 'Legacy: molmo | nvila | qwen\n' |
| 'Synthetic: nvila_synthetic\n' |
| 'New large: molmo_big | qwen_big | qwen_super | big_trio\n' |
| 'Merge-only (--merge required): molmo_all | qwen_all' |
| )) |
| parser.add_argument('--scales', type=str, nargs='+', default=None, |
| help='Scales to process (default: all for the given model_type).') |
| parser.add_argument('--output_dir', type=str, default=None, |
| help='Root for saved_data/. Defaults to ' |
| '{script_dir}/{question_type}/saved_data.') |
| parser.add_argument('--device', type=str, default='cuda') |
| parser.add_argument('--seed', type=int, default=42) |
| parser.add_argument('--merge', action='store_true', |
| help='Merge mode: generate cross-scale plots from saved per-scale data.') |
| parser.add_argument('--merge-output-dir', type=str, default=None, dest='merge_output_dir', |
| help='(Deprecated) Override output dir for cross-scale plots. ' |
| 'Use --group-name instead.') |
| parser.add_argument('--group-name', type=str, default=None, dest='group_name', |
| help='Folder name under compare/ for merged output. ' |
| 'Defaults to model_type.') |
| parser.add_argument('--no-auto-roborefer', action='store_true', dest='no_auto_roborefer', |
| help='Disable automatic inclusion of roborefer scale for nvila.') |
| parser.add_argument('--skip-cross-group', action='store_true') |
| parser.add_argument('--max-samples-per-category', type=int, default=200, |
| dest='max_samples_per_category') |
| parser.add_argument('--no-filtering', action='store_true', dest='no_filtering', |
| help='Disable Unknown/empty filtering for far/close reference objects.' |
| ' By default, Unknown candidates are removed before sampling.') |
| parser.add_argument('--question-type', type=str, default='short_answer', |
| choices=['short_answer', 'mcq'], dest='question_type', |
| help='short_answer (default): "Answer with only one word." format; ' |
| 'mcq: MCQ A/B format with letter answers.') |
| parser.add_argument('--phase1-only', action='store_true', dest='phase1_only', |
| help='Skip all plot generation (per-scale and cross-scale). ' |
| 'Data (npz/csv/json) is still saved.') |
| parser.add_argument('--skip-phase-b', action='store_true', dest='skip_phase_b', |
| help='Skip Phase B (cross-group feature extraction). ' |
| 'Phase A inference + analysis + plots still run normally. ' |
| 'Merge auto-detects whether Phase B data is available.') |
|
|
| args = parser.parse_args() |
|
|
| |
| _HERE_UPDATED = os.path.dirname(os.path.abspath(__file__)) |
| if args.output_dir is None: |
| args.output_dir = os.path.join(_HERE_UPDATED, args.question_type, 'saved_data') |
| log_dir = os.path.join(_HERE_UPDATED, args.question_type, 'logs') |
|
|
| |
| if args.model_type in MERGE_ONLY_CONFIGS and not args.merge: |
| parser.error( |
| f"'{args.model_type}' is a merge-only type. Add --merge to run it.\n" |
| f" Example: python swap_analysis.py --model_type {args.model_type} --merge" |
| ) |
|
|
| |
| if args.scales is None: |
| if args.model_type in MERGE_ONLY_CONFIGS: |
| args.scales = MERGE_ONLY_CONFIGS[args.model_type]['scale_order'] |
| elif args.model_type in MODEL_CONFIGS_NEW: |
| args.scales = list(MODEL_CONFIGS_NEW[args.model_type].keys()) |
| else: |
| args.scales = _LEGACY_DEFAULT_SCALES.get( |
| args.model_type, ['vanilla', '80k', '400k', '800k', '2m']) |
|
|
| |
| if args.model_type == 'nvila' and 'roborefer' not in args.scales and not args.no_auto_roborefer: |
| args.scales.append('roborefer') |
|
|
| np.random.seed(args.seed) |
| torch.manual_seed(args.seed) |
| random.seed(args.seed) |
|
|
| |
| if args.merge: |
| group_name = args.group_name or args.model_type |
| log_path = _setup_file_logging(group_name, log_dir) |
| logger.info(f"Logging to: {log_path}") |
| logger.info("\n=== MERGE MODE ===") |
| if args.model_type in MODEL_CONFIGS_NEW or args.model_type in MERGE_ONLY_CONFIGS: |
| run_merge_extended(args) |
| else: |
| run_merge(args) |
| return |
|
|
| |
| logger.info("\n=== Loading & Creating Swap Pairs ===") |
| swap_pairs = load_swap_pairs(args.data_path, args.seed, |
| filter_unknown=not args.no_filtering, |
| question_type=args.question_type) |
|
|
| quads = [] |
| if not args.skip_cross_group and not getattr(args, 'skip_phase_b', False): |
| try: |
| hf_cache = build_hf_bbox_cache() |
| quads = create_cross_group_quads(swap_pairs, hf_cache, |
| question_type=args.question_type) |
| except Exception as e: |
| logger.warning(f"Cross-group setup failed: {e}. Skipping.") |
| quads = [] |
|
|
| |
| if args.model_type in MODEL_CONFIGS_NEW: |
| model_configs = MODEL_CONFIGS_NEW[args.model_type] |
| else: |
| model_configs = MODEL_CONFIGS[args.model_type] |
|
|
| for scale in args.scales: |
| if scale not in model_configs: |
| logger.warning(f"Scale '{scale}' not in config for '{args.model_type}', skipping.") |
| continue |
|
|
| |
| vlm_key = get_model_key(args.model_type, scale) |
| log_path = _setup_file_logging(vlm_key, log_dir) |
| logger.info(f"Logging to: {log_path}") |
|
|
| |
| if args.model_type in MODEL_CONFIGS_NEW: |
| _, raw_path = model_configs[scale] |
| else: |
| raw_path = model_configs[scale] |
| if not os.path.isabs(raw_path) and not raw_path.startswith(('Qwen/', 'allenai/')): |
| if not os.path.exists(raw_path): |
| logger.warning(f"Model path not found: {raw_path} (scale='{scale}'), skipping.") |
| continue |
|
|
| try: |
| process_scale(args, scale, swap_pairs, quads) |
| except Exception as e: |
| logger.error(f"Failed {args.model_type} - {scale}: {e}") |
| import traceback |
| traceback.print_exc() |
| continue |
|
|
| logger.info(f"\n{'='*60}") |
| logger.info("=== All scales complete ===") |
| logger.info(f"Results: {args.output_dir}") |
| logger.info(f"{'='*60}") |
|
|
|
|
| if __name__ == '__main__': |
| main() |