experiments / spatial-representation-probe /swap_analysis_0226.py
ch-min's picture
Add files using upload-large-folder tool
19898f1 verified
raw
history blame
156 kB
#!/usr/bin/env python3
"""
Swap Analysis: Minimal Pair Probing for Spatial Representations
Creates minimal pairs by swapping obj1<->obj2 in spatial questions:
Original: "Is A to the left or right of B?" -> left
Swapped: "Is B to the left or right of A?" -> right
Supported model types
---------------------
Legacy (Qwen2.5-VL-3B scale experiments):
molmo | nvila | qwen
New large models:
molmo_big : Molmo2-8B
qwen_big : Qwen3-VL-32B-Instruct
qwen_super : Qwen3-VL-235B-A22B-Instruct
big_trio : Molmo2-8B + RoboRefer + Qwen3-VL-32B
Merge-only (--merge required):
molmo_all : molmo (vanilla→2m) + molmo_big (molmo2)
qwen_all : qwen (vanilla→2m) + qwen_big (qwen3_32b)
Usage examples
--------------
# Legacy model (Qwen2.5-VL-3B scale)
python swap_analysis.py --model_type qwen
# New large model (Qwen3-VL-32B)
conda run -n qwen3 python swap_analysis.py --model_type qwen_big
# Cross-family merge (combine qwen + qwen_big results)
conda run -n qwen3 python swap_analysis.py --model_type qwen_all --merge
Analyses:
1. Difference vectors: delta = feature(swapped) - feature(original)
2. Within-category delta consistency (do all left->right swaps point same direction?)
3. Sign-corrected group consistency (align opposite categories by flipping)
4. Cross-group delta alignment (delta_vertical vs delta_distance) for perspective bias
5. Delta-based 6x6 similarity heatmap (mean delta per category as representation)
6. Prediction stats visualization (bar chart + cross-scale trajectory)
7. Both-correct filtering for delta analysis
8. PCA visualization of per-sample embeddings
9. Scaling effects on all of the above
Fixes applied:
Fix 1: "Answer with only one word." appended to all prompts
Fix 2: Synonym handling (below/beneath->under, near/nearby->close, distant->far)
Fix 4: Cross-group quads index matching via string normalization
Fix 5: Within-category + sign-corrected delta consistency (replaces wrong group-level)
Fix 6: Prediction stats bar chart + cross-scale line plot
Fix 7: Delta-based 6x6 heatmap and trajectory
Fix 8: Category validity check + both-correct delta filtering
"""
import os
import sys
import json
import argparse
import base64
import logging
import random
import re
from io import BytesIO
from collections import defaultdict
from typing import Dict, List, Tuple, Optional, Any
from abc import ABC, abstractmethod
import torch
import numpy as np
import pandas as pd
from PIL import Image
from tqdm import tqdm
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D # noqa: F401
import seaborn as sns
from sklearn.metrics.pairwise import cosine_similarity
from sklearn.decomposition import PCA
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)
_HERE = os.path.dirname(os.path.abspath(__file__))
# ── Local HuggingFace cache helpers ──────────────────────────────────────────
HF_HUB_DIR = '/data/shared/Qwen/mydisk/huggingface/hub'
def resolve_local_path(model_path: str) -> str:
"""Return local snapshot path for a HF model ID if cached, else return the ID unchanged."""
if os.path.isabs(model_path):
return model_path
cache_name = 'models--' + model_path.replace('/', '--')
snapshots_dir = os.path.join(HF_HUB_DIR, cache_name, 'snapshots')
if os.path.isdir(snapshots_dir):
snapshots = sorted(os.listdir(snapshots_dir))
if snapshots:
local_path = os.path.join(snapshots_dir, snapshots[-1])
logger.info(f"Local cache found: {model_path} β†’ {local_path}")
return local_path
logger.warning(
f"Model not found in local cache: '{model_path}'\n"
f" Expected at: {snapshots_dir}\n"
f" Will fall back to online HuggingFace Hub download.\n"
f" To cache locally first: python -c \"from huggingface_hub import snapshot_download; "
f"snapshot_download('{model_path}', cache_dir='{HF_HUB_DIR}')\""
)
return model_path
def _setup_file_logging(model_type: str) -> str:
"""Attach a per-model-type FileHandler to the root logger.
Writes to <script_dir>/logs/{model_type}.log (append mode).
Returns the log file path.
"""
log_dir = os.path.join(_HERE, 'logs')
os.makedirs(log_dir, exist_ok=True)
log_path = os.path.join(log_dir, f'{model_type}.log')
fh = logging.FileHandler(log_path, mode='a', encoding='utf-8')
fh.setLevel(logging.INFO)
fh.setFormatter(logging.Formatter('%(asctime)s - %(levelname)s - %(message)s'))
logging.getLogger().addHandler(fh)
return log_path
# ============================================================================
# Constants
# ============================================================================
CATEGORY_ORDER = ['left', 'right', 'above', 'below', 'far', 'close']
OPPOSITE_MAP = {
'left': 'right', 'right': 'left',
'above': 'below', 'below': 'above',
'under': 'above', # short-mode vertical answer
'far': 'close', 'close': 'far',
}
# Opposite map for short-answer mode (vertical uses 'above'/'under', not 'above'/'below')
SHORT_OPPOSITE_MAP = {
'left': 'right', 'right': 'left',
'above': 'below', 'below': 'above',
'far': 'close', 'close': 'far',
}
GROUP_MAP = {
'left': 'horizontal', 'right': 'horizontal',
'above': 'vertical', 'below': 'vertical',
'far': 'distance', 'close': 'distance',
}
GROUP_ORDER = ['horizontal', 'vertical', 'distance']
# Fix 5: Canonical categories for sign-corrected consistency
CANONICAL_CATEGORIES = {
'horizontal': 'left',
'vertical': 'above',
'distance': 'far',
}
# Fix 2: Synonyms for answer matching
# 'below' is now primary; 'under'/'beneath' recognized as synonyms
SYNONYMS = {
'below': ['under', 'beneath'],
'close': ['near', 'nearby'],
'far': ['distant'],
}
# ── MCQ question templates (option order alternated per pair for A/B bias control) ──
_Q_TAIL_MCQ = "Answer with a single letter A or B."
MCQ_TEMPLATES = {
'horizontal': {
'left_first': "Is the {obj1} to the left or right of the {obj2}? (A) left (B) right " + _Q_TAIL_MCQ,
'right_first': "Is the {obj1} to the left or right of the {obj2}? (A) right (B) left " + _Q_TAIL_MCQ,
},
'vertical': {
'above_first': "Is the {obj1} above or below the {obj2}? (A) above (B) below " + _Q_TAIL_MCQ,
'below_first': "Is the {obj1} above or below the {obj2}? (A) below (B) above " + _Q_TAIL_MCQ,
},
'distance': {
'far_first': "Compared to {ref}, is {subj} far or close from you? (A) far (B) close " + _Q_TAIL_MCQ,
'close_first': "Compared to {ref}, is {subj} far or close from you? (A) close (B) far " + _Q_TAIL_MCQ,
},
}
MCQ_LETTER = {
'horizontal': {
'left_first': {'left': 'a', 'right': 'b'},
'right_first': {'left': 'b', 'right': 'a'},
},
'vertical': {
'above_first': {'above': 'a', 'below': 'b'},
'below_first': {'above': 'b', 'below': 'a'},
},
'distance': {
'far_first': {'far': 'a', 'close': 'b'},
'close_first': {'far': 'b', 'close': 'a'},
},
}
SCALE_COLORS = {
'vanilla': '#1f77b4', '80k': '#ff7f0e', '400k': '#2ca02c',
'800k': '#d62728', '2m': '#9467bd', 'roborefer':'#8c564b',
# New large models
'molmo2': '#17becf', # cyan
'qwen3_32b': '#bcbd22', # yellow-green
'qwen3_235b': '#e377c2', # pink
# Synthetic-mix NVILA at 80k scale (shades of teal, light→dark by mix ratio)
'80k-5pct': '#b2dfdb', # very light teal
'80k-10pct': '#00b894', # teal
'80k-20pct': '#00897b', # darker teal
'80k-30pct': '#004d40', # deep teal
# Synthetic-mix NVILA at 400k scale
'400k-5pct': '#66bb6a', # light green (near 400k's #2ca02c)
}
# Canonical scale ordering used by accuracy/ylim plots (add new scales here to control x-axis)
SCALE_ORDER = [
'vanilla', '80k', '80k-5pct', '80k-10pct', '80k-20pct', '80k-30pct',
'400k', '400k-5pct', '800k', '2m', 'roborefer',
'molmo2', 'qwen3_32b', 'qwen3_235b',
]
# Human-readable legend labels (only entries that differ from the key are needed)
SCALE_DISPLAY_NAMES = {
'80k-5pct': '80k 5%',
'80k-10pct': '80k 10%',
'80k-20pct': '80k 20%',
'80k-30pct': '80k 30%',
'400k-5pct': '400k 5%',
}
# Category colors aligned with group: horizontal=orange, vertical=green, distance=purple
CAT_COLORS = {
'left': '#ff7f0e', 'right': '#ffbb78', # horizontal β†’ orange
'above': '#2ca02c', 'below': '#98df8a', # vertical β†’ green
'far': '#9467bd', 'close': '#c5b0d5', # distance β†’ purple
}
GROUP_COLORS = {
'horizontal': '#ff7f0e',
'vertical': '#2ca02c',
'distance': '#9467bd',
}
# Short-answer (non-MCQ) question templates
SHORT_TEMPLATES = {
'horizontal': "Is the {obj1} to the left or right of the {obj2}? Answer with only one word.",
'vertical': "Is the {obj1} above or below the {obj2}? Answer with only one word.",
'distance': "Compared to {ref}, is {subj} far or close from you? Answer with only one word.",
}
MODEL_CONFIGS = {
'molmo': {
'vanilla': 'allenai/Molmo-7B-O-0924',
'80k': '/data/shared/Qwen/molmo/outputs/data_scale_exp_80k/unshared',
'400k': '/data/shared/Qwen/molmo/outputs/data_scale_exp_400k/unshared',
'800k': '/data/shared/Qwen/molmo/outputs/data_scale_exp_800k/unshared',
'2m': '/data/shared/Qwen/molmo/outputs/data_scale_exp_2m/unshared',
},
'nvila': {
'vanilla': '/data/shared/Qwen/mydisk/NVILA-Lite-2B',
'80k': '/data/shared/Qwen/mydisk/output/DATA/NVILA-Lite-2B-DATA_SCALE_EXP_80K-20251108_180221',
'400k': '/data/shared/Qwen/mydisk/output/DATA/NVILA-Lite-2B-DATA_SCALE_EXP_400K-20251108_180221',
'800k': '/data/shared/Qwen/mydisk/output/DATA/NVILA-Lite-2B-DATA_SCALE_EXP_800K-20251108_180221',
'2m': '/data/shared/Qwen/mydisk/output/DATA/NVILA-Lite-2B-DATA_SCALE_EXP_2M-20260205_003632',
# '80k': '/data/shared/Qwen/mydisk/output/SINGLE/NVILA-Lite-2B-SINGLE_REFSPATIAL_16M-20260217_035008/checkpoint-1250',
# '400k': '/data/shared/Qwen/mydisk/output/SINGLE/NVILA-Lite-2B-SINGLE_REFSPATIAL_16M-20260217_035008/checkpoint-6250',
# '800k': '/data/shared/Qwen/mydisk/output/SINGLE/NVILA-Lite-2B-SINGLE_REFSPATIAL_16M-20260217_035008/checkpoint-12500',
# '2m': '/data/shared/Qwen/mydisk/output/SINGLE/NVILA-Lite-2B-SINGLE_REFSPATIAL_16M-20260217_035008/checkpoint-31250',
'roborefer': '/data/shared/Qwen/mydisk/RoboRefer_model',
},
'qwen': {
'vanilla': 'Qwen/Qwen2.5-VL-3B-Instruct',
'80k': '/data/shared/Qwen/mydisk/output/Qwen/Qwen2.5-VL-3B-Instruct-data_scale_exp_80k-20251114_120221',
'400k': '/data/shared/Qwen/mydisk/output/Qwen/Qwen2.5-VL-3B-Instruct-data_scale_exp_400k-20251114_120221',
'800k': '/data/shared/Qwen/mydisk/output/Qwen/Qwen2.5-VL-3B-Instruct-data_scale_exp_800k-20251114_120221',
'2m': '/data/shared/Qwen/mydisk/output/Qwen/Qwen2.5-VL-3B-Instruct-data_scale_exp_2m-20260109_120517',
},
# NVILA trained with synthetic data mixed in at different ratios
'nvila_synthetic': {
'80k-5pct': '/data/shared/Qwen/mydisk/output/SYNTHETIC/NVILA-Lite-2B-SYNTHETIC_MIX_5PCT_2M-20260226_023301/checkpoint-1250',
'80k-10pct': '/data/shared/Qwen/mydisk/output/SYNTHETIC/NVILA-Lite-2B-SYNTHETIC_MIX_10PCT_80K-20260224_234537',
'80k-20pct': '/data/shared/Qwen/mydisk/output/SYNTHETIC/NVILA-Lite-2B-SYNTHETIC_MIX_20PCT_80K-20260224_232347',
'80k-30pct': '/data/shared/Qwen/mydisk/output/SYNTHETIC/NVILA-Lite-2B-SYNTHETIC_MIX_30PCT_80K-20260224_232347',
'400k-5pct': '/data/shared/Qwen/mydisk/output/SYNTHETIC/NVILA-Lite-2B-SYNTHETIC_MIX_5PCT_2M-20260226_023301/checkpoint-6250',
},
}
# ── New large / cross-family models ──────────────────────────────────────────
# Each scale maps to (ExtractorClassName, HF-model-ID-or-absolute-path).
# resolve_local_path() converts HF IDs to local snapshot dirs when cached.
MODEL_CONFIGS_NEW = {
'molmo_big': {
'molmo2': ('Molmo2Extractor', 'allenai/Molmo2-8B'),
},
'qwen_big': {
'qwen3_32b': ('Qwen3VLExtractor', 'Qwen/Qwen3-VL-32B-Instruct'),
},
'qwen_super': {
'qwen3_235b': ('Qwen3VLExtractor', 'Qwen/Qwen3-VL-235B-A22B-Instruct'),
},
'big_trio': {
'molmo2': ('Molmo2Extractor', 'allenai/Molmo2-8B'),
'roborefer': ('RoboReferExtractor', '/data/shared/Qwen/mydisk/RoboRefer_model'),
'qwen3_32b': ('Qwen3VLExtractor', 'Qwen/Qwen3-VL-32B-Instruct'),
},
}
# ── Merge-only: combine existing per-scale data from multiple source dirs ─────
MERGE_ONLY_CONFIGS = {
'molmo_all': {
'scale_order': ['vanilla', '80k', '400k', '800k', '2m', 'molmo2'],
'scale_sources': {
'vanilla': 'molmo', '80k': 'molmo', '400k': 'molmo',
'800k': 'molmo', '2m': 'molmo', 'molmo2': 'molmo_big',
},
'required_dirs': ['molmo', 'molmo_big'],
},
'qwen_all': {
'scale_order': ['vanilla', '80k', '400k', '800k', '2m', 'qwen3_32b'],
'scale_sources': {
'vanilla': 'qwen', '80k': 'qwen', '400k': 'qwen',
'800k': 'qwen', '2m': 'qwen', 'qwen3_32b': 'qwen_big',
},
'required_dirs': ['qwen', 'qwen_big'],
},
# Compare NVILA baselines against synthetic-mix checkpoints
'nvila_synth_compare': {
'scale_order': ['vanilla', '80k', '80k-5pct', '80k-10pct', '400k', '400k-5pct'],
'scale_sources': {
'vanilla': 'nvila', '80k': 'nvila',
'80k-5pct': 'nvila_synthetic', '80k-10pct': 'nvila_synthetic',
'400k': 'nvila',
'400k-5pct': 'nvila_synthetic',
},
'required_dirs': ['nvila', 'nvila_synthetic'],
},
}
# Default scale run order for new runnable types
SCALE_ORDERS_NEW = {
'molmo_big': ['molmo2'],
'qwen_big': ['qwen3_32b'],
'qwen_super': ['qwen3_235b'],
'big_trio': ['molmo2', 'roborefer', 'qwen3_32b'],
}
ALL_MODEL_TYPES = (
list(MODEL_CONFIGS.keys()) +
list(MODEL_CONFIGS_NEW.keys()) +
list(MERGE_ONLY_CONFIGS.keys())
)
# ============================================================================
# Data Loading & Swap Pair Creation
# ============================================================================
OBJECT_PATTERNS = [
re.compile(r'between\s+(.+?)\s+and\s+(.+?)\s+in', re.IGNORECASE),
re.compile(r'of\s+(.+?)\s+and\s+(.+?)\s+in', re.IGNORECASE),
re.compile(r'positions\s+of\s+(.+?)\s+and\s+(.+?)\s+interact', re.IGNORECASE),
re.compile(r'How\s+are\s+(.+?)\s+and\s+(.+?)\s+positioned', re.IGNORECASE),
re.compile(r'arrangement\s+of\s+(.+?)\s+and\s+(.+?)\s+in', re.IGNORECASE),
]
def extract_objects(question: str) -> Tuple[str, str]:
for pattern in OBJECT_PATTERNS:
m = pattern.search(question)
if m:
return m.group(1).strip(), m.group(2).strip()
raise ValueError(f"Could not extract objects from: {question}")
def decode_base64_image(base64_str: str) -> Image.Image:
image_data = base64.b64decode(base64_str)
return Image.open(BytesIO(image_data)).convert('RGB')
# ============================================================================
# Answer Matching (Fix 2: synonym support)
# ============================================================================
def find_earliest_position(text: str, word: str) -> int:
"""Find earliest position of word or any of its synonyms in text."""
positions = []
pos = text.find(word)
if pos != -1:
positions.append(pos)
for syn in SYNONYMS.get(word, []):
pos = text.find(syn)
if pos != -1:
positions.append(pos)
return min(positions) if positions else -1
def check_answer(generated_text: str, expected_category: str, mcq_map: dict = None) -> bool:
if not generated_text or not generated_text.strip():
return False
text = generated_text.strip().lower()
expected = expected_category.lower()
opposite = OPPOSITE_MAP[expected]
if mcq_map:
exp_letter = mcq_map.get(expected)
opp_letter = mcq_map.get(opposite)
# Standalone letter response (e.g. "A", "A.", "A)", "B")
if exp_letter and text in (exp_letter, exp_letter+'.', exp_letter+')', exp_letter+','):
return True
if opp_letter and text in (opp_letter, opp_letter+'.', opp_letter+')', opp_letter+','):
return False
else:
exp_letter = opp_letter = None
# MCQ inline pattern "(a)"/"(b)" β€” variant-aware
mcq_exp = f'({exp_letter})' if exp_letter else None
mcq_opp = f'({opp_letter})' if opp_letter else None
def earliest_with_mcq(word, mcq_pat=None):
positions = []
pos = text.find(word)
if pos != -1:
positions.append(pos)
for syn in SYNONYMS.get(word, []):
pos = text.find(syn)
if pos != -1:
positions.append(pos)
if mcq_pat:
pos = text.find(mcq_pat)
if pos != -1:
positions.append(pos)
return min(positions) if positions else -1
pos_exp = earliest_with_mcq(expected, mcq_exp)
pos_opp = earliest_with_mcq(opposite, mcq_opp)
if pos_exp == -1:
return False
if pos_opp == -1:
return True
return pos_exp < pos_opp
# ============================================================================
# Swap Pair Loading (Fix 1: prompt suffix)
# ============================================================================
def load_swap_pairs(tsv_path: str, seed: int = 42, filter_unknown: bool = True,
question_type: str = 'mcq') -> List[dict]:
"""Load EmbSpatialBench TSV and create swap pairs for all samples.
Args:
filter_unknown: If True (default), skip far/close pairs where target_object
is Unknown/empty, and remove Unknown/empty values from reference_object
candidates before sampling. Pairs with no valid candidates are dropped.
Use --no-filtering to disable.
question_type: 'mcq' (default) uses MCQ A/B templates with letter answers;
'short' uses the original "Answer with only one word." format.
"""
rng = random.Random(seed)
df = pd.read_csv(tsv_path, sep='\t')
pairs = []
stats = defaultdict(lambda: {'total': 0, 'success': 0})
def _valid_obj(v):
return bool(v) and str(v).strip().lower() not in ('unknown', 'n/a', '')
for _, row in df.iterrows():
category = row['category']
stats[category]['total'] += 1
try:
if category in ['left', 'right', 'above', 'under', 'below']:
obj1, obj2 = extract_objects(row['question'])
if category in ['left', 'right']:
grp = 'horizontal'
else:
grp = 'vertical'
if question_type == 'short':
# Single-word format; normalize 'under' β†’ 'below'
if category == 'under':
category = 'below'
tmpl = SHORT_TEMPLATES[grp]
pair = {
'index': row['index'],
'question_id': str(row['question_id']),
'image_base64': row['image'],
'original_question': tmpl.format(obj1=obj1, obj2=obj2),
'swapped_question': tmpl.format(obj1=obj2, obj2=obj1),
'original_answer': category,
'swapped_answer': SHORT_OPPOSITE_MAP[category],
'group': grp,
'category': category,
'obj1': obj1, 'obj2': obj2,
'mcq_map': None,
}
else:
# MCQ format; normalize 'under' β†’ 'below'
if category == 'under':
category = 'below'
variant = ('left_first' if grp == 'horizontal' else 'above_first') \
if len(pairs) % 2 == 0 else \
('right_first' if grp == 'horizontal' else 'below_first')
tmpl = MCQ_TEMPLATES[grp][variant]
mcq_map = MCQ_LETTER[grp][variant]
pair = {
'index': row['index'],
'question_id': str(row['question_id']),
'image_base64': row['image'],
'original_question': tmpl.format(obj1=obj1, obj2=obj2),
'swapped_question': tmpl.format(obj1=obj2, obj2=obj1),
'original_answer': category,
'swapped_answer': OPPOSITE_MAP[category],
'group': GROUP_MAP[category],
'category': category,
'obj1': obj1, 'obj2': obj2,
'mcq_map': mcq_map,
}
elif category in ['far', 'close']:
answer_key = row['answer']
options = {k: row[k] for k in ['A', 'B', 'C', 'D']}
target_object = options[answer_key]
candidates = [v for k, v in options.items() if k != answer_key]
if filter_unknown:
if not _valid_obj(target_object):
continue
candidates = [v for v in candidates if _valid_obj(v)]
if not candidates:
continue
reference_object = rng.choice(candidates)
if question_type == 'short':
tmpl = SHORT_TEMPLATES['distance']
pair = {
'index': row['index'],
'question_id': str(row['question_id']),
'image_base64': row['image'],
'original_question': tmpl.format(ref=reference_object, subj=target_object),
'swapped_question': tmpl.format(ref=target_object, subj=reference_object),
'original_answer': category,
'swapped_answer': OPPOSITE_MAP[category],
'group': 'distance',
'category': category,
'target_object': target_object,
'reference_object': reference_object,
'mcq_map': None,
}
else:
variant = 'far_first' if len(pairs) % 2 == 0 else 'close_first'
tmpl = MCQ_TEMPLATES['distance'][variant]
mcq_map = MCQ_LETTER['distance'][variant]
pair = {
'index': row['index'],
'question_id': str(row['question_id']),
'image_base64': row['image'],
'original_question': tmpl.format(ref=reference_object, subj=target_object),
'swapped_question': tmpl.format(ref=target_object, subj=reference_object),
'original_answer': category,
'swapped_answer': OPPOSITE_MAP[category],
'group': 'distance',
'category': category,
'target_object': target_object,
'reference_object': reference_object,
'mcq_map': mcq_map,
}
else:
continue
pairs.append(pair)
stats[category]['success'] += 1
except Exception as e:
logger.warning(f"Failed to create swap pair for index {row['index']}: {e}")
continue
logger.info("Swap pair creation stats:")
for cat in CATEGORY_ORDER:
s = stats[cat]
logger.info(f" {cat}: {s['success']}/{s['total']}")
logger.info(f" Total pairs: {len(pairs)}")
return pairs
# ============================================================================
# HF Bbox Cache (Fix 4: string-normalized keys)
# ============================================================================
def build_hf_bbox_cache(hf_dataset_name: str = 'FlagEval/EmbSpatial-Bench') -> Dict[str, dict]:
"""Load HF dataset and build bbox lookup cache keyed by string-normalized question_id."""
from datasets import load_dataset
logger.info(f"Loading HF dataset: {hf_dataset_name}")
ds = load_dataset(hf_dataset_name, split='test')
cache = {}
for item in ds:
# Fix 4: Normalize key to string for consistent matching
qid = str(item['question_id'])
cache[qid] = {
'objects': item['objects'],
'relation': item['relation'],
'data_source': item['data_source'],
'answer': item['answer'],
'answer_options': item['answer_options'],
}
# Fix 4: Log sample keys for debugging
sample_keys = list(cache.keys())[:5]
logger.info(f"Built bbox cache: {len(cache)} entries (sample keys: {sample_keys})")
return cache
def get_bbox_center_y(bbox: list) -> float:
return bbox[1] + bbox[3] / 2
def create_cross_group_quads(
swap_pairs: List[dict],
hf_cache: Dict[str, dict],
threshold_ratio: float = 0.05,
question_type: str = 'mcq',
) -> List[dict]:
"""For far/close swap pairs, create additional vertical queries using bbox."""
IMAGE_HEIGHTS = {'ai2thor': 300, 'mp3d': 480, 'scannet': 968}
quads = []
stats = {'total': 0, 'matched': 0, 'ambiguous': 0, 'no_bbox': 0}
distance_pairs = [p for p in swap_pairs if p['group'] == 'distance']
# Fix 4: Use question_id (e.g. 'mp3d_0') to match HF dataset, not integer index
n_matched_keys = sum(1 for p in distance_pairs if p['question_id'] in hf_cache)
logger.info(f"Matched {n_matched_keys}/{len(distance_pairs)} question_ids between TSV and HF dataset")
for pair in distance_pairs:
stats['total'] += 1
qid = pair['question_id']
if qid not in hf_cache:
stats['no_bbox'] += 1
continue
hf_item = hf_cache[qid]
names = hf_item['objects']['name']
bboxes = hf_item['objects']['bbox']
target = pair['target_object']
reference = pair['reference_object']
target_bbox_y, ref_bbox_y = None, None
for name, bbox in zip(names, bboxes):
if name == target:
target_bbox_y = get_bbox_center_y(bbox)
if name == reference:
ref_bbox_y = get_bbox_center_y(bbox)
if target_bbox_y is None or ref_bbox_y is None:
stats['no_bbox'] += 1
continue
image_height = IMAGE_HEIGHTS.get(hf_item['data_source'], 480)
threshold = image_height * threshold_ratio
y_diff = target_bbox_y - ref_bbox_y
if abs(y_diff) < threshold:
stats['ambiguous'] += 1
continue
if target_bbox_y < ref_bbox_y:
vert_original_answer = 'above'
else:
vert_original_answer = 'below'
if question_type == 'short':
vert_tmpl = SHORT_TEMPLATES['vertical']
vert_mcq_map = None
vert_original_q = vert_tmpl.format(obj1=target, obj2=reference)
vert_swapped_q = vert_tmpl.format(obj1=reference, obj2=target)
vert_swapped_answer = SHORT_OPPOSITE_MAP[vert_original_answer]
else:
vert_variant = 'above_first' if len(quads) % 2 == 0 else 'below_first'
vert_tmpl = MCQ_TEMPLATES['vertical'][vert_variant]
vert_mcq_map = MCQ_LETTER['vertical'][vert_variant]
vert_original_q = vert_tmpl.format(obj1=target, obj2=reference)
vert_swapped_q = vert_tmpl.format(obj1=reference, obj2=target)
vert_swapped_answer = OPPOSITE_MAP[vert_original_answer]
quad = {
'index': pair['index'],
'image_base64': pair['image_base64'],
'dist_original_q': pair['original_question'],
'dist_swapped_q': pair['swapped_question'],
'dist_original_answer': pair['original_answer'],
'dist_swapped_answer': pair['swapped_answer'],
'dist_mcq_map': pair['mcq_map'],
'vert_original_q': vert_original_q,
'vert_swapped_q': vert_swapped_q,
'vert_original_answer': vert_original_answer,
'vert_swapped_answer': vert_swapped_answer,
'vert_mcq_map': vert_mcq_map,
'target_object': target,
'reference_object': reference,
'target_bbox_y': target_bbox_y,
'ref_bbox_y': ref_bbox_y,
'y_diff': y_diff,
'data_source': hf_item['data_source'],
}
quads.append(quad)
stats['matched'] += 1
logger.info(f"Cross-group quads: {stats['matched']}/{stats['total']} "
f"(ambiguous={stats['ambiguous']}, no_bbox={stats['no_bbox']})")
return quads
# ============================================================================
# Base Extractor
# ============================================================================
class BaseHiddenStateExtractor(ABC):
def __init__(self, model_path: str, device: str = 'cuda', target_layers: List[int] = None):
self.model_path = model_path
self.device = device
self.hidden_states = {}
self.hooks = []
self._load_model()
num_layers = self._get_num_layers()
if target_layers is None:
self.target_layers = list(range(num_layers))
logger.info(f"Model has {num_layers} layers. Extracting ALL.")
else:
self.target_layers = target_layers
self._register_hooks()
def _register_hooks(self):
for layer_idx in self.target_layers:
module = self._get_layer_module(layer_idx)
if module is not None:
hook = module.register_forward_hook(self._make_hook(layer_idx))
self.hooks.append(hook)
def _make_hook(self, layer_idx: int):
def hook_fn(module, input, output):
if isinstance(output, tuple):
hidden = output[0]
else:
hidden = output
if hidden.shape[1] > 1: # prefill only
last_token = hidden[:, -1, :].detach().cpu().float()
self.hidden_states[layer_idx] = last_token.squeeze(0)
return hook_fn
@abstractmethod
def _load_model(self): pass
@abstractmethod
def _get_num_layers(self) -> int: pass
@abstractmethod
def _get_layer_module(self, layer_idx: int): pass
@abstractmethod
def extract_and_predict(self, image: Image.Image, question: str) -> Tuple[Dict[int, torch.Tensor], str]: pass
def cleanup(self):
for hook in self.hooks:
hook.remove()
self.hooks = []
if hasattr(self, 'model'):
del self.model
if hasattr(self, 'processor'):
del self.processor
torch.cuda.empty_cache()
# ============================================================================
# Molmo Extractor
# ============================================================================
class MolmoExtractor(BaseHiddenStateExtractor):
def _load_model(self):
config_path = os.path.join(self.model_path, "config.yaml")
checkpoint_path = os.path.join(self.model_path, "model.pt")
if os.path.exists(config_path) and os.path.exists(checkpoint_path):
self._load_native_model()
self.is_native = True
else:
self._load_hf_model()
self.is_native = False
def _load_native_model(self):
from olmo.config import ModelConfig
from olmo.model import Molmo as NativeMolmoModel
from olmo.data.model_preprocessor import MultiModalPreprocessor
from olmo.data.data_formatter import DataFormatter
_original_load = torch.load
def _unsafe_load_wrapper(*args, **kwargs):
if 'weights_only' not in kwargs:
kwargs['weights_only'] = False
return _original_load(*args, **kwargs)
torch.load = _unsafe_load_wrapper
cfg = ModelConfig.load(
os.path.join(self.model_path, "config.yaml"),
key="model", validate_paths=False
)
cfg.init_device = "cpu"
self.model = NativeMolmoModel(cfg)
state_dict = torch.load(os.path.join(self.model_path, "model.pt"), map_location="cpu")
self.model.load_state_dict(state_dict)
self.model = self.model.to(self.device, dtype=torch.bfloat16).eval()
self.tokenizer = cfg.get_tokenizer()
v_cfg = cfg.vision_backbone
h, w = cfg.llm_patches_per_crop()
image_padding_mask = 2 if cfg.fix_image_padding else (1 if cfg.image_padding_embed else None)
class SafeDataFormatter(DataFormatter):
def get_system_prompt(self, style, for_inference, messages, rng=None):
if style is None:
style = "User"
return super().get_system_prompt(style, for_inference, messages, rng)
self.formatter = SafeDataFormatter(
prompt_templates=cfg.prompt_type, message_format=cfg.message_formatting,
system_prompt=cfg.system_prompt_kind, always_start_with_space=cfg.always_start_with_space,
default_inference_len=cfg.default_inference_len
)
self.preprocessor = MultiModalPreprocessor(
tokenizer=self.tokenizer, normalize=str(v_cfg.image_model_type),
crop_mode=cfg.crop_mode, max_crops=cfg.max_crops,
overlap_margins=cfg.overlap_margins, resize=v_cfg.resize_mode,
use_col_tokens=cfg.use_col_tokens, base_image_input_size=v_cfg.image_default_input_size,
image_pooling_w=cfg.image_pooling_w, image_pooling_h=cfg.image_pooling_h,
image_token_length_w=w, image_token_length_h=h,
image_patch_size=v_cfg.image_patch_size, image_padding_mask=image_padding_mask,
pad_value=cfg.pad_value, loss_token_weighting=cfg.multi_annotation_weighting,
)
logger.info(f"Loaded native Molmo from {self.model_path}")
def _load_hf_model(self):
from transformers import AutoModelForCausalLM, AutoProcessor
self.model = AutoModelForCausalLM.from_pretrained(
self.model_path, torch_dtype=torch.bfloat16,
trust_remote_code=True, device_map=self.device
).eval()
self.processor = AutoProcessor.from_pretrained(self.model_path, trust_remote_code=True)
logger.info(f"Loaded HF Molmo from {self.model_path}")
def _get_num_layers(self) -> int:
if self.is_native:
return len(self.model.transformer.blocks)
if hasattr(self.model, 'model') and hasattr(self.model.model, 'transformer'):
return len(self.model.model.transformer.blocks)
return 32
def _get_layer_module(self, layer_idx: int):
if self.is_native:
return self.model.transformer.blocks[layer_idx]
return self.model.model.transformer.blocks[layer_idx]
def extract_and_predict(self, image, question):
self.hidden_states = {}
if self.is_native:
example = {"messages": [question], "image": image}
messages, _ = self.formatter(example, is_training=False, for_inference=True, rng=np.random)
batch = self.preprocessor(np.array(image), messages, is_training=False, require_image_features=True)
if 'input_ids' not in batch and 'input_tokens' in batch:
batch['input_ids'] = batch['input_tokens']
def to_t(x):
return torch.from_numpy(x) if isinstance(x, np.ndarray) else x
input_ids = to_t(batch['input_ids']).unsqueeze(0).to(self.device).long()
images_t = to_t(batch['images']).unsqueeze(0).to(self.device, dtype=torch.bfloat16)
image_masks = to_t(batch['image_masks']).unsqueeze(0).to(self.device, dtype=torch.bfloat16)
image_input_idx = to_t(batch['image_input_idx']).unsqueeze(0).to(self.device)
with torch.inference_mode(), torch.autocast("cuda", dtype=torch.bfloat16):
gen = self.model.generate(
input_ids=input_ids, images=images_t,
image_masks=image_masks, image_input_idx=image_input_idx,
max_steps=20, beam_size=1,
)
generated_ids = gen.token_ids[0, 0]
answer = self.tokenizer.decode(generated_ids.tolist()).strip()
for eos in ['<|endoftext|>', '</s>', '<|end|>']:
answer = answer.replace(eos, '').strip()
else:
from transformers import GenerationConfig
inputs = self.processor.process(images=[image], text=question)
processed = {}
for k, v in inputs.items():
v = v.to(self.device).unsqueeze(0)
if v.dtype == torch.float32:
v = v.to(dtype=torch.bfloat16)
processed[k] = v
with torch.no_grad(), torch.autocast("cuda", dtype=torch.bfloat16):
output = self.model.generate_from_batch(
processed,
GenerationConfig(max_new_tokens=20, stop_strings="<|endoftext|>"),
tokenizer=self.processor.tokenizer,
)
input_len = processed['input_ids'].shape[1]
answer = self.processor.tokenizer.decode(output[0, input_len:], skip_special_tokens=True).strip()
return self.hidden_states.copy(), answer
# ============================================================================
# NVILA Extractor
# ============================================================================
class NVILAExtractor(BaseHiddenStateExtractor):
def _load_model(self):
original_sys_path = sys.path.copy()
sys.path = [p for p in sys.path if 'RoboRefer' not in p]
modules_to_remove = [k for k in list(sys.modules.keys()) if 'llava' in k.lower()]
removed = {m: sys.modules.pop(m) for m in modules_to_remove}
try:
import llava
from llava.media import Image as LLaVAImage
from llava import conversation as clib
except Exception as err:
sys.path = original_sys_path
for m, mod in removed.items():
sys.modules[m] = mod
raise RuntimeError(f"Failed to import llava: {err}")
sys.path = original_sys_path
self.LLaVAImage = LLaVAImage
self.clib = clib
self.model = llava.load(self.model_path, model_base=None)
self._find_llm_backbone()
logger.info(f"Loaded NVILA from {self.model_path}")
def _find_llm_backbone(self):
candidates = []
if hasattr(self.model, 'llm'):
if hasattr(self.model.llm, 'model') and hasattr(self.model.llm.model, 'layers'):
candidates.append(self.model.llm.model.layers)
if hasattr(self.model.llm, 'layers'):
candidates.append(self.model.llm.layers)
if hasattr(self.model, 'model'):
if hasattr(self.model.model, 'model') and hasattr(self.model.model.model, 'layers'):
candidates.append(self.model.model.model.layers)
if hasattr(self.model.model, 'layers'):
candidates.append(self.model.model.layers)
for name, module in self.model.named_modules():
if name.endswith('.layers') and hasattr(module, '__len__') and len(module) > 0:
candidates.append(module)
if candidates:
self.llm_backbone = candidates[0]
else:
raise ValueError("Could not locate transformer layers in NVILA model")
def _get_num_layers(self) -> int:
return len(self.llm_backbone) if hasattr(self, 'llm_backbone') else 24
def _get_layer_module(self, layer_idx: int):
return self.llm_backbone[layer_idx]
def extract_and_predict(self, image, question):
self.hidden_states = {}
import tempfile
with tempfile.NamedTemporaryFile(suffix='.png', delete=False) as f:
temp_path = f.name
image.save(temp_path)
try:
prompt = [self.LLaVAImage(temp_path), question]
from transformers import GenerationConfig
response = self.model.generate_content(
prompt, generation_config=GenerationConfig(max_new_tokens=20, do_sample=False)
)
finally:
os.unlink(temp_path)
answer = str(response[0] if isinstance(response, list) else response).strip()
return self.hidden_states.copy(), answer
class RoboReferExtractor(NVILAExtractor):
ROBOREFER_PATH = '/data/shared/Qwen/RoboRefer'
def _load_model(self):
original_sys_path = sys.path.copy()
if self.ROBOREFER_PATH not in sys.path:
sys.path.insert(0, self.ROBOREFER_PATH)
modules_to_remove = [k for k in list(sys.modules.keys()) if 'llava' in k.lower()]
removed = {m: sys.modules.pop(m) for m in modules_to_remove}
try:
import llava
from llava.media import Image as LLaVAImage
from llava import conversation as clib
except Exception as err:
sys.path = original_sys_path
for m, mod in removed.items():
sys.modules[m] = mod
raise RuntimeError(f"Failed to import RoboRefer llava: {err}")
sys.path = original_sys_path
self.LLaVAImage = LLaVAImage
self.clib = clib
self.model = llava.load(self.model_path, model_base=None)
self._find_llm_backbone()
logger.info(f"Loaded RoboRefer from {self.model_path}")
# ============================================================================
# Qwen2.5-VL Extractor
# ============================================================================
class Qwen25VLExtractor(BaseHiddenStateExtractor):
BASE_MODEL = "Qwen/Qwen2.5-VL-3B-Instruct"
def _load_model(self):
from transformers import Qwen2_5_VLForConditionalGeneration, AutoProcessor
try:
self.model = Qwen2_5_VLForConditionalGeneration.from_pretrained(
self.model_path, torch_dtype=torch.bfloat16, device_map=self.device
)
except ImportError:
self.model = Qwen2_5_VLForConditionalGeneration.from_pretrained(
self.model_path, torch_dtype=torch.bfloat16
).to(self.device)
self.model.eval()
if self.model_path.startswith('/'):
self.processor = AutoProcessor.from_pretrained(self.BASE_MODEL)
else:
self.processor = AutoProcessor.from_pretrained(self.model_path)
logger.info(f"Loaded Qwen2.5-VL from {self.model_path}")
def _get_num_layers(self) -> int:
return len(self.model.model.layers)
def _get_layer_module(self, layer_idx: int):
return self.model.model.layers[layer_idx]
def extract_and_predict(self, image, question):
self.hidden_states = {}
messages = [{"role": "user", "content": [
{"type": "image", "image": image},
{"type": "text", "text": question}
]}]
text = self.processor.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
from qwen_vl_utils import process_vision_info
image_inputs, video_inputs = process_vision_info(messages)
inputs = self.processor(
text=[text], images=image_inputs, videos=video_inputs,
padding=True, return_tensors="pt"
).to(self.device)
with torch.no_grad():
output_ids = self.model.generate(**inputs, max_new_tokens=20, do_sample=False)
input_len = inputs['input_ids'].shape[1]
answer = self.processor.tokenizer.decode(output_ids[0, input_len:], skip_special_tokens=True).strip()
return self.hidden_states.copy(), answer
# ============================================================================
# New Extractors: Molmo2-8B and Qwen3-VL family
# ============================================================================
class Molmo2Extractor(BaseHiddenStateExtractor):
"""Extractor for allenai/Molmo2-8B (AutoModelForImageTextToText, messages-dict input)."""
def _load_model(self):
from transformers import AutoProcessor, AutoModelForImageTextToText
self.processor = AutoProcessor.from_pretrained(self.model_path, trust_remote_code=True)
self.model = AutoModelForImageTextToText.from_pretrained(
self.model_path, trust_remote_code=True, torch_dtype='auto', device_map='auto',
).eval()
self._find_llm_layers()
logger.info(f"Loaded Molmo2 from {self.model_path}")
def _find_llm_layers(self):
candidates = [
['model', 'layers'],
['language_model', 'model', 'layers'],
['model', 'model', 'layers'],
]
for path in candidates:
obj = self.model
for attr in path:
obj = getattr(obj, attr, None)
if obj is None:
break
if obj is not None and hasattr(obj, '__len__') and len(obj) > 0:
self.llm_layers = obj
logger.info(f"Molmo2: layers at '{'.'.join(path)}', count={len(obj)}")
return
best, best_len = None, 0
for name, module in self.model.named_modules():
if name.endswith('.layers') and hasattr(module, '__len__') and len(module) > best_len:
best, best_len = module, len(module)
logger.info(f"Molmo2: layers via scan at '{name}', count={best_len}")
if best is not None:
self.llm_layers = best
return
raise ValueError("Could not find transformer layers in Molmo2 model")
def _get_num_layers(self) -> int:
return len(self.llm_layers)
def _get_layer_module(self, layer_idx: int):
return self.llm_layers[layer_idx]
def extract_and_predict(self, image, question):
self.hidden_states = {}
messages = [{"role": "user", "content": [
{"type": "image", "image": image},
{"type": "text", "text": question},
]}]
inputs = self.processor.apply_chat_template(
messages, tokenize=True, add_generation_prompt=True,
return_tensors="pt", return_dict=True,
)
inputs = {k: v.to(self.model.device) for k, v in inputs.items()}
with torch.inference_mode():
generated_ids = self.model.generate(**inputs, max_new_tokens=20, do_sample=False)
input_len = inputs['input_ids'].shape[1]
answer = self.processor.tokenizer.decode(
generated_ids[0, input_len:], skip_special_tokens=True).strip()
return self.hidden_states.copy(), answer
class Qwen3VLExtractor(BaseHiddenStateExtractor):
"""Extractor for Qwen3-VL family (32B dense, 235B MoE).
Key differences from Qwen25VLExtractor:
- AutoModelForImageTextToText + trust_remote_code=True
- process_vision_info requires image_patch_size=16
- processor call requires do_resize=False
- 32Γ—32 px patches β†’ different min/max_pixels
"""
MIN_PIXELS = 256 * 32 * 32 # 262,144 (mp3d/scannet β†’ natural res; ai2thor β†’ ~256 tokens)
MAX_PIXELS = 16384 * 32 * 32 # 16,777,216
def _load_model(self):
from transformers import AutoProcessor, AutoModelForImageTextToText
self.processor = AutoProcessor.from_pretrained(self.model_path, trust_remote_code=True)
self.model = AutoModelForImageTextToText.from_pretrained(
self.model_path, trust_remote_code=True, torch_dtype='auto',
device_map='auto', attn_implementation='flash_attention_2',
).eval()
self._find_llm_layers()
logger.info(f"Loaded Qwen3-VL from {self.model_path}")
def _find_llm_layers(self):
candidates = [
['model', 'language_model', 'model', 'layers'], # Qwen3-VL expected
['language_model', 'model', 'layers'],
['model', 'model', 'layers'],
['model', 'layers'],
]
for path in candidates:
obj = self.model
for attr in path:
obj = getattr(obj, attr, None)
if obj is None:
break
if obj is not None and hasattr(obj, '__len__') and len(obj) > 0:
self.llm_layers = obj
logger.info(f"Qwen3-VL: layers at '{'.'.join(path)}', count={len(obj)}")
return
best, best_len = None, 0
for name, module in self.model.named_modules():
if name.endswith('.layers') and hasattr(module, '__len__') and len(module) > best_len:
best, best_len = module, len(module)
logger.info(f"Qwen3-VL: layers via scan at '{name}', count={best_len}")
if best is not None:
self.llm_layers = best
return
raise ValueError("Could not find transformer layers in Qwen3-VL model")
def _get_num_layers(self) -> int:
return len(self.llm_layers)
def _get_layer_module(self, layer_idx: int):
return self.llm_layers[layer_idx]
def extract_and_predict(self, image, question):
self.hidden_states = {}
messages = [{"role": "user", "content": [
{"type": "image", "image": image,
"min_pixels": self.MIN_PIXELS, "max_pixels": self.MAX_PIXELS},
{"type": "text", "text": question},
]}]
text = self.processor.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
from qwen_vl_utils import process_vision_info
images, videos, _ = process_vision_info(
messages, image_patch_size=16, return_video_kwargs=True, return_video_metadata=True,
)
inputs = self.processor(
text=text, images=images, videos=videos, do_resize=False, return_tensors="pt",
).to(self.model.device)
with torch.no_grad():
output_ids = self.model.generate(**inputs, max_new_tokens=20, do_sample=False)
input_len = inputs['input_ids'].shape[1]
answer = self.processor.tokenizer.decode(
output_ids[0, input_len:], skip_special_tokens=True).strip()
return self.hidden_states.copy(), answer
EXTRACTOR_CLASSES = {
'MolmoExtractor': MolmoExtractor,
'NVILAExtractor': NVILAExtractor,
'RoboReferExtractor': RoboReferExtractor,
'Qwen25VLExtractor': Qwen25VLExtractor,
'Molmo2Extractor': Molmo2Extractor,
'Qwen3VLExtractor': Qwen3VLExtractor,
}
def get_extractor(model_type: str, model_path: str = None, scale: str = None, **kwargs):
"""Create an extractor for any model_type (legacy or new-large)."""
# New large models: (ExtractorClass, path) tuples in MODEL_CONFIGS_NEW
if model_type in MODEL_CONFIGS_NEW:
cls_name, raw_path = MODEL_CONFIGS_NEW[model_type][scale]
resolved = resolve_local_path(raw_path)
logger.info(f"Creating {cls_name} for scale='{scale}' from {resolved}")
return EXTRACTOR_CLASSES[cls_name](resolved, **kwargs)
# Legacy models
if model_type == 'nvila' and scale == 'roborefer':
return RoboReferExtractor(model_path, **kwargs)
legacy = {
'molmo': MolmoExtractor, 'nvila': NVILAExtractor, 'qwen': Qwen25VLExtractor,
'nvila_synthetic': NVILAExtractor,
}
return legacy[model_type](model_path, **kwargs)
# ============================================================================
# Feature Extraction Pipeline
# ============================================================================
def run_single_query(extractor, image, question):
hidden_states, predicted = extractor.extract_and_predict(image, question)
result = {}
for layer_idx in extractor.target_layers:
if layer_idx in hidden_states:
state = hidden_states[layer_idx].numpy().flatten()
if state.size > 0:
result[layer_idx] = state
return result, predicted
def extract_swap_features(
extractor: BaseHiddenStateExtractor,
swap_pairs: List[dict],
max_samples_per_category: int = 0,
) -> List[dict]:
"""Extract features for all swap pairs."""
rng = random.Random(42)
if max_samples_per_category > 0:
grouped = defaultdict(list)
for p in swap_pairs:
grouped[p['category']].append(p)
limited = []
for cat in CATEGORY_ORDER:
samples = grouped[cat]
if len(samples) > max_samples_per_category:
samples = rng.sample(samples, max_samples_per_category)
limited.extend(samples)
swap_pairs = limited
records = []
for pair in tqdm(swap_pairs, desc="Swap pairs"):
try:
image = decode_base64_image(pair['image_base64'])
hs_orig, pred_orig = run_single_query(extractor, image, pair['original_question'])
hs_swap, pred_swap = run_single_query(extractor, image, pair['swapped_question'])
is_correct_orig = check_answer(pred_orig, pair['original_answer'], pair['mcq_map'])
is_correct_swap = check_answer(pred_swap, pair['swapped_answer'], pair['mcq_map'])
delta = {}
for layer_idx in extractor.target_layers:
if layer_idx in hs_orig and layer_idx in hs_swap:
delta[layer_idx] = hs_swap[layer_idx] - hs_orig[layer_idx]
record = {
'index': pair['index'],
'group': pair['group'],
'category': pair['category'],
'original_answer': pair['original_answer'],
'swapped_answer': pair['swapped_answer'],
'pred_orig': pred_orig,
'pred_swap': pred_swap,
'is_correct_orig': is_correct_orig,
'is_correct_swap': is_correct_swap,
'hs_orig': hs_orig,
'hs_swap': hs_swap,
'delta': delta,
}
records.append(record)
mark_o = "O" if is_correct_orig else "X"
mark_s = "O" if is_correct_swap else "X"
logger.info(f" #{pair['index']:<6} {pair['category']:<6} "
f"orig[{mark_o}]=\"{pred_orig[:40]}\" swap[{mark_s}]=\"{pred_swap[:40]}\""
+ (f" [{len(records)}/{len(swap_pairs)}]" if len(records) % 50 == 0 else ""))
except Exception as e:
logger.warning(f"Error on index {pair['index']}: {e}")
continue
logger.info(f"Extracted {len(records)} swap pair records")
# Fix 8: Per-category accuracy logging
for cat in CATEGORY_ORDER:
cat_recs = [r for r in records if r['category'] == cat]
n = len(cat_recs)
if n == 0:
continue
c_orig = sum(1 for r in cat_recs if r['is_correct_orig'])
c_swap = sum(1 for r in cat_recs if r['is_correct_swap'])
c_both = sum(1 for r in cat_recs if r['is_correct_orig'] and r['is_correct_swap'])
logger.info(f" {cat:>6s} (n={n}): acc_orig={c_orig/n:.1%}, acc_swap={c_swap/n:.1%}, "
f"acc_both={c_both/n:.1%}")
return records
def extract_cross_group_features(
extractor: BaseHiddenStateExtractor,
quads: List[dict],
) -> List[dict]:
"""Extract features for cross-group quads (4 forward passes each)."""
records = []
for quad in tqdm(quads, desc="Cross-group quads"):
try:
image = decode_base64_image(quad['image_base64'])
hs_d_orig, pred_d_orig = run_single_query(extractor, image, quad['dist_original_q'])
hs_d_swap, pred_d_swap = run_single_query(extractor, image, quad['dist_swapped_q'])
hs_v_orig, pred_v_orig = run_single_query(extractor, image, quad['vert_original_q'])
hs_v_swap, pred_v_swap = run_single_query(extractor, image, quad['vert_swapped_q'])
delta_dist, delta_vert = {}, {}
for layer_idx in extractor.target_layers:
if layer_idx in hs_d_orig and layer_idx in hs_d_swap:
delta_dist[layer_idx] = hs_d_swap[layer_idx] - hs_d_orig[layer_idx]
if layer_idx in hs_v_orig and layer_idx in hs_v_swap:
delta_vert[layer_idx] = hs_v_swap[layer_idx] - hs_v_orig[layer_idx]
record = {
'index': quad['index'],
'delta_dist': delta_dist,
'delta_vert': delta_vert,
'pred_d_orig': pred_d_orig, 'pred_d_swap': pred_d_swap,
'pred_v_orig': pred_v_orig, 'pred_v_swap': pred_v_swap,
'is_correct_d_orig': check_answer(pred_d_orig, quad['dist_original_answer'], quad['dist_mcq_map']),
'is_correct_d_swap': check_answer(pred_d_swap, quad['dist_swapped_answer'], quad['dist_mcq_map']),
'is_correct_v_orig': check_answer(pred_v_orig, quad['vert_original_answer'], quad['vert_mcq_map']),
'is_correct_v_swap': check_answer(pred_v_swap, quad['vert_swapped_answer'], quad['vert_mcq_map']),
'data_source': quad['data_source'],
}
records.append(record)
tqdm.write(f" #{quad['index']:<6} dist=[{pred_d_orig[:20]}/{pred_d_swap[:20]}] "
f"vert=[{pred_v_orig[:20]}/{pred_v_swap[:20]}]")
except Exception as e:
logger.warning(f"Error on cross-group index {quad['index']}: {e}")
continue
logger.info(f"Extracted {len(records)} cross-group quad records")
return records
# ============================================================================
# Analysis Functions
# ============================================================================
# Fix 5: Within-category + sign-corrected delta consistency
def compute_delta_consistency(records: List[dict], target_layers: List[int]):
"""Compute TWO types of delta consistency.
Returns:
within_cat_results: {(category, layer) -> {mean, std, n}}
sign_corrected_results: {(group, layer) -> {mean, std, n}}
"""
within_cat_results = {}
sign_corrected_results = {}
for group in GROUP_ORDER:
canonical = CANONICAL_CATEGORIES[group]
opposite = OPPOSITE_MAP[canonical]
group_recs = [r for r in records if r['group'] == group]
for layer in target_layers:
# (a) Within-category consistency
for cat in [canonical, opposite]:
cat_deltas = [r['delta'][layer] for r in group_recs
if r['category'] == cat and layer in r['delta']]
if len(cat_deltas) >= 2:
arr = np.array(cat_deltas)
sim = cosine_similarity(arr)
upper = sim[np.triu_indices(len(cat_deltas), k=1)]
within_cat_results[(cat, layer)] = {
'mean': float(np.mean(upper)),
'std': float(np.std(upper)),
'n': len(cat_deltas),
}
# (b) Sign-corrected group consistency
all_deltas = []
for r in group_recs:
if layer not in r['delta']:
continue
d = r['delta'][layer]
if r['category'] == opposite:
d = -d # flip to align with canonical direction
all_deltas.append(d)
if len(all_deltas) >= 2:
arr = np.array(all_deltas)
sim = cosine_similarity(arr)
upper = sim[np.triu_indices(len(all_deltas), k=1)]
sign_corrected_results[(group, layer)] = {
'mean': float(np.mean(upper)),
'std': float(np.std(upper)),
'n': len(all_deltas),
}
return within_cat_results, sign_corrected_results
# Fix 7: Delta-based similarity matrix
def compute_delta_similarity_matrix(records: List[dict], layer: int) -> Optional[pd.DataFrame]:
"""Compute 6x6 cosine similarity using mean delta per category."""
cat_deltas = {}
for cat in CATEGORY_ORDER:
deltas = [r['delta'][layer] for r in records if r['category'] == cat and layer in r['delta']]
if deltas:
cat_deltas[cat] = np.mean(deltas, axis=0)
available = [c for c in CATEGORY_ORDER if c in cat_deltas]
if len(available) < 2:
return None
vectors = np.array([cat_deltas[c] for c in available])
sim = cosine_similarity(vectors)
return pd.DataFrame(sim, index=available, columns=available)
# Fix 8: Both-correct filtering
def filter_both_correct(records: List[dict]) -> List[dict]:
"""Filter to pairs where both orig and swap predictions are correct."""
return [r for r in records if r['is_correct_orig'] and r['is_correct_swap']]
# Fix 8: Category validity check
def check_category_validity(records: List[dict], scale: str) -> Dict[str, dict]:
"""Check per-category accuracy and flag unreliable categories."""
validity = {}
for cat in CATEGORY_ORDER:
cat_recs = [r for r in records if r['category'] == cat]
n = len(cat_recs)
if n == 0:
validity[cat] = {'n': 0, 'acc_orig': 0, 'acc_swap': 0, 'reliable': False}
continue
acc_orig = sum(1 for r in cat_recs if r['is_correct_orig']) / n
acc_swap = sum(1 for r in cat_recs if r['is_correct_swap']) / n
reliable = acc_orig >= 0.5 and acc_swap >= 0.5
validity[cat] = {
'n': n, 'acc_orig': acc_orig, 'acc_swap': acc_swap,
'reliable': reliable,
}
if not reliable:
logger.warning(f" [!] Category '{cat}' unreliable at scale={scale}: "
f"acc_orig={acc_orig:.1%}, acc_swap={acc_swap:.1%}")
return validity
def compute_cross_group_alignment(quad_records: List[dict], target_layers: List[int]) -> dict:
results = {}
for layer in target_layers:
per_sample = []
delta_verts, delta_dists = [], []
for rec in quad_records:
if layer in rec['delta_vert'] and layer in rec['delta_dist']:
dv = rec['delta_vert'][layer]
dd = rec['delta_dist'][layer]
norm_v, norm_d = np.linalg.norm(dv), np.linalg.norm(dd)
if norm_v > 1e-10 and norm_d > 1e-10:
per_sample.append(float(np.dot(dv, dd) / (norm_v * norm_d)))
delta_verts.append(dv)
delta_dists.append(dd)
if not per_sample:
continue
mean_dv = np.mean(delta_verts, axis=0)
mean_dd = np.mean(delta_dists, axis=0)
norm_mv, norm_md = np.linalg.norm(mean_dv), np.linalg.norm(mean_dd)
mean_alignment = float(np.dot(mean_dv, mean_dd) / (norm_mv * norm_md + 1e-10))
rng = np.random.RandomState(42)
perm_alignments = []
for _ in range(100):
shuffled_dd = [delta_dists[i] for i in rng.permutation(len(delta_dists))]
perm_cos = []
for dv, dd in zip(delta_verts, shuffled_dd):
nv, nd = np.linalg.norm(dv), np.linalg.norm(dd)
if nv > 1e-10 and nd > 1e-10:
perm_cos.append(np.dot(dv, dd) / (nv * nd))
perm_alignments.append(np.mean(perm_cos))
results[layer] = {
'per_sample_mean': float(np.mean(per_sample)),
'per_sample_std': float(np.std(per_sample)),
'mean_delta_alignment': mean_alignment,
'permutation_mean': float(np.mean(perm_alignments)),
'permutation_std': float(np.std(perm_alignments)),
'n_samples': len(per_sample),
}
return results
def compute_prediction_stats(records: List[dict], scale: str) -> dict:
stats = {'scale': scale}
total_correct_orig, total_correct_swap, total_both, total_n = 0, 0, 0, 0
for group in GROUP_ORDER:
group_recs = [r for r in records if r['group'] == group]
n = len(group_recs)
c_orig = sum(1 for r in group_recs if r['is_correct_orig'])
c_swap = sum(1 for r in group_recs if r['is_correct_swap'])
c_both = sum(1 for r in group_recs if r['is_correct_orig'] and r['is_correct_swap'])
stats[f'{group}_n'] = n
stats[f'{group}_acc_orig'] = c_orig / n if n > 0 else 0
stats[f'{group}_acc_swap'] = c_swap / n if n > 0 else 0
stats[f'{group}_acc_both'] = c_both / n if n > 0 else 0
total_correct_orig += c_orig
total_correct_swap += c_swap
total_both += c_both
total_n += n
stats['overall_acc_orig'] = total_correct_orig / total_n if total_n > 0 else 0
stats['overall_acc_swap'] = total_correct_swap / total_n if total_n > 0 else 0
stats['overall_acc_both'] = total_both / total_n if total_n > 0 else 0
stats['overall_n'] = total_n
return stats
# ============================================================================
# Saving & Loading
# ============================================================================
def get_representative_layers(all_layers, n=5):
if len(all_layers) <= n:
return list(all_layers)
indices = np.linspace(0, len(all_layers) - 1, n, dtype=int)
return [all_layers[i] for i in indices]
def save_scale_results(
scale, swap_records, quad_records,
within_cat_consistency, sign_corrected_consistency,
cross_alignment, pred_stats, target_layers,
category_validity, delta_heatmaps,
output_dir, both_correct_tag="all_pairs",
):
"""Save all per-scale results to disk."""
csv_dir = os.path.join(output_dir, 'csv')
json_dir = os.path.join(output_dir, 'json')
os.makedirs(csv_dir, exist_ok=True)
os.makedirs(json_dir, exist_ok=True)
# 1. Predictions CSV (tagged so all_pairs and both_correct don't overwrite each other)
pred_rows = []
for r in swap_records:
pred_rows.append({
'index': r['index'], 'group': r['group'], 'category': r['category'],
'pred_orig': r['pred_orig'], 'pred_swap': r['pred_swap'],
'is_correct_orig': r['is_correct_orig'], 'is_correct_swap': r['is_correct_swap'],
})
pd.DataFrame(pred_rows).to_csv(
os.path.join(csv_dir, f'predictions_{scale}_{both_correct_tag}.csv'), index=False)
# 2. Within-category consistency JSON
wc_data = {}
for (cat, layer), vals in within_cat_consistency.items():
wc_data[f'{cat}_L{layer}'] = vals
with open(os.path.join(json_dir, f'within_cat_consistency_{scale}_{both_correct_tag}.json'), 'w') as f:
json.dump(wc_data, f, indent=2)
# 3. Sign-corrected consistency JSON
sc_data = {}
for (group, layer), vals in sign_corrected_consistency.items():
sc_data[f'{group}_L{layer}'] = vals
with open(os.path.join(json_dir, f'sign_corrected_consistency_{scale}_{both_correct_tag}.json'), 'w') as f:
json.dump(sc_data, f, indent=2)
# 4. Cross-group alignment JSON
alignment_data = {}
for layer, vals in cross_alignment.items():
alignment_data[f'L{layer}'] = vals
with open(os.path.join(json_dir, f'cross_alignment_{scale}.json'), 'w') as f:
json.dump(alignment_data, f, indent=2)
# 5. Prediction stats JSON
with open(os.path.join(json_dir, f'pred_stats_{scale}.json'), 'w') as f:
json.dump(pred_stats, f, indent=2)
# 6. Category validity JSON (Fix 8)
with open(os.path.join(json_dir, f'category_validity_{scale}.json'), 'w') as f:
json.dump(category_validity, f, indent=2)
# 7. Delta heatmap CSVs (Fix 7)
for layer, df in delta_heatmaps.items():
if df is not None:
df.to_csv(os.path.join(csv_dir, f'delta_similarity_{scale}_L{layer}_{both_correct_tag}.csv'))
logger.info(f"Saved results for scale={scale} ({both_correct_tag}) to {output_dir}")
def save_vectors_npz(scale, swap_records, quad_records, target_layers, output_dir):
"""Save ALL vectors with correctness metadata to NPZ (once per scale).
This enables post-hoc filtering (both_correct, all_with_validity) from saved data.
"""
rep_layers = list(target_layers) # save ALL layers (not just 5 representative)
delta_data = {}
for layer in rep_layers:
groups_list, categories_list, vectors = [], [], []
orig_vecs, swap_vecs, labels = [], [], []
correct_orig_list, correct_swap_list, indices_list = [], [], []
for r in swap_records:
if layer in r['delta']:
groups_list.append(r['group'])
categories_list.append(r['category'])
vectors.append(r['delta'][layer])
correct_orig_list.append(r['is_correct_orig'])
correct_swap_list.append(r['is_correct_swap'])
indices_list.append(r['index'])
if layer in r['hs_orig'] and layer in r['hs_swap']:
orig_vecs.append(r['hs_orig'][layer])
swap_vecs.append(r['hs_swap'][layer])
labels.append(r['category'])
if vectors:
delta_data[f'delta_L{layer}'] = np.array(vectors)
delta_data[f'groups_L{layer}'] = np.array(groups_list)
delta_data[f'categories_L{layer}'] = np.array(categories_list)
delta_data[f'is_correct_orig_L{layer}'] = np.array(correct_orig_list)
delta_data[f'is_correct_swap_L{layer}'] = np.array(correct_swap_list)
delta_data[f'indices_L{layer}'] = np.array(indices_list)
if orig_vecs:
delta_data[f'orig_L{layer}'] = np.array(orig_vecs)
delta_data[f'swap_L{layer}'] = np.array(swap_vecs)
delta_data[f'labels_L{layer}'] = np.array(labels)
npz_dir = os.path.join(output_dir, 'npz')
os.makedirs(npz_dir, exist_ok=True)
np.savez_compressed(os.path.join(npz_dir, f'vectors_{scale}.npz'), **delta_data)
logger.info(f"Saved vectors NPZ with correctness metadata for scale={scale}")
# Cross-group delta vectors
if quad_records:
cg_data = {}
for layer in rep_layers:
dverts, ddists = [], []
for rec in quad_records:
if layer in rec['delta_vert'] and layer in rec['delta_dist']:
dverts.append(rec['delta_vert'][layer])
ddists.append(rec['delta_dist'][layer])
if dverts:
cg_data[f'delta_vert_L{layer}'] = np.array(dverts)
cg_data[f'delta_dist_L{layer}'] = np.array(ddists)
np.savez_compressed(os.path.join(npz_dir, f'cross_group_vectors_{scale}.npz'), **cg_data)
def load_scale_consistency(output_dir, scale, tag='all_pairs'):
"""Load sign-corrected consistency."""
path = os.path.join(output_dir, 'json', f'sign_corrected_consistency_{scale}_{tag}.json')
if not os.path.exists(path):
return {}
with open(path) as f:
raw = json.load(f)
result = {}
for key, vals in raw.items():
parts = key.rsplit('_L', 1)
if len(parts) == 2:
result[(parts[0], int(parts[1]))] = vals
return result
def load_within_cat_consistency(output_dir, scale, tag='all_pairs'):
path = os.path.join(output_dir, 'json', f'within_cat_consistency_{scale}_{tag}.json')
if not os.path.exists(path):
return {}
with open(path) as f:
raw = json.load(f)
result = {}
for key, vals in raw.items():
parts = key.rsplit('_L', 1)
if len(parts) == 2:
result[(parts[0], int(parts[1]))] = vals
return result
def load_scale_alignment(output_dir, scale):
path = os.path.join(output_dir, 'json', f'cross_alignment_{scale}.json')
if not os.path.exists(path):
return {}
with open(path) as f:
raw = json.load(f)
result = {}
for key, vals in raw.items():
result[int(key.replace('L', ''))] = vals
return result
def load_delta_heatmaps(output_dir, scale, tag='all_pairs'):
import glob as glob_mod
pattern = os.path.join(output_dir, 'csv', f'delta_similarity_{scale}_L*_{tag}.csv')
files = glob_mod.glob(pattern)
result = {}
for fpath in files:
basename = os.path.basename(fpath)
# delta_similarity_{scale}_L{layer}_{tag}.csv
part = basename.replace(f'delta_similarity_{scale}_L', '').replace(f'_{tag}.csv', '')
try:
layer = int(part)
except ValueError:
continue
result[layer] = pd.read_csv(fpath, index_col=0)
return result
# ============================================================================
# Visualization
# ============================================================================
def plot_within_cat_consistency_trajectory(within_cat, scale, model_type, save_path):
"""Plot within-category delta consistency across layers."""
fig, ax = plt.subplots(figsize=(12, 6))
cat_colors = CAT_COLORS
for cat in CATEGORY_ORDER:
layers, vals = [], []
for (c, l), v in sorted(within_cat.items(), key=lambda x: x[0][1]):
if c == cat:
layers.append(l)
vals.append(v['mean'])
if layers:
ax.plot(layers, vals, '-o', color=cat_colors[cat], label=cat, linewidth=2, markersize=3)
ax.set_xlabel('Layer Index')
ax.set_ylabel('Within-Category Consistency')
ax.set_title(f'{model_type.upper()} ({scale}) - Within-Category Delta Consistency', fontweight='bold')
ax.legend(fontsize=9)
ax.grid(True, alpha=0.3)
plt.tight_layout()
plt.savefig(save_path, dpi=300, bbox_inches='tight')
plt.close()
logger.info(f"Saved: {save_path}")
def plot_sign_corrected_consistency_trajectory(sign_corrected, scale, model_type, save_path):
"""Plot sign-corrected group consistency across layers."""
fig, ax = plt.subplots(figsize=(12, 6))
colors = GROUP_COLORS
for group in GROUP_ORDER:
layers, vals = [], []
for (g, l), v in sorted(sign_corrected.items(), key=lambda x: x[0][1]):
if g == group:
layers.append(l)
vals.append(v['mean'])
if layers:
ax.plot(layers, vals, '-o', color=colors[group], label=group, linewidth=2, markersize=3)
ax.set_xlabel('Layer Index')
ax.set_ylabel('Sign-Corrected Consistency')
ax.set_title(f'{model_type.upper()} ({scale}) - Sign-Corrected Group Consistency', fontweight='bold')
ax.legend(fontsize=11)
ax.grid(True, alpha=0.3)
plt.tight_layout()
plt.savefig(save_path, dpi=300, bbox_inches='tight')
plt.close()
logger.info(f"Saved: {save_path}")
def plot_cross_group_alignment_trajectory(cross_alignment, scale, model_type, save_path):
fig, ax = plt.subplots(figsize=(12, 6))
layers = sorted(cross_alignment.keys())
actual = [cross_alignment[l]['per_sample_mean'] for l in layers]
mean_delta = [cross_alignment[l]['mean_delta_alignment'] for l in layers]
perm_mean = [cross_alignment[l]['permutation_mean'] for l in layers]
perm_std = [cross_alignment[l]['permutation_std'] for l in layers]
ax.plot(layers, actual, '-o', color='#d62728', label='cos(d_vert, d_dist) per-sample mean',
linewidth=2.5, markersize=3)
ax.plot(layers, mean_delta, '--s', color='#e377c2', label='cos(mean_d_vert, mean_d_dist)',
linewidth=1.5, markersize=3)
ax.plot(layers, perm_mean, ':', color='gray', label='permutation control', linewidth=1.5)
ax.fill_between(layers,
[m - 2*s for m, s in zip(perm_mean, perm_std)],
[m + 2*s for m, s in zip(perm_mean, perm_std)],
alpha=0.2, color='gray')
ax.set_xlabel('Layer Index')
ax.set_ylabel('Cosine Alignment')
ax.set_title(f'{model_type.upper()} ({scale}) - Cross-Group Alignment (Perspective Bias)', fontweight='bold')
ax.legend(fontsize=9)
ax.grid(True, alpha=0.3)
plt.tight_layout()
plt.savefig(save_path, dpi=300, bbox_inches='tight')
plt.close()
logger.info(f"Saved: {save_path}")
# Fix 7: Delta heatmap visualization
def plot_delta_heatmap(sim_df, title, save_path):
"""Plot delta-based similarity heatmap."""
plt.figure(figsize=(10, 8))
available_order = [c for c in CATEGORY_ORDER if c in sim_df.index]
sim_df_ordered = sim_df.loc[available_order, available_order]
annot = sim_df_ordered.round(4).astype(str)
sns.heatmap(sim_df_ordered, annot=annot, fmt='', cmap='RdBu_r',
center=0, vmin=-1, vmax=1, square=True, linewidths=0.5,
cbar_kws={'label': 'Cosine Similarity'})
plt.title(title, fontsize=14, fontweight='bold')
plt.tight_layout()
plt.savefig(save_path, dpi=300, bbox_inches='tight')
plt.close()
logger.info(f"Saved delta heatmap: {save_path}")
# Fix 6: Prediction stats visualization
def plot_pred_stats_bars(all_pred_stats, model_type, save_path):
"""Bar chart: per-group accuracy (orig/swap/both) across scales."""
fig, axes = plt.subplots(1, len(GROUP_ORDER), figsize=(7 * len(GROUP_ORDER), 6))
if len(GROUP_ORDER) == 1:
axes = [axes]
available = [s for s in SCALE_ORDER if any(d['scale'] == s for d in all_pred_stats)]
if not available:
# Fallback: use whatever scales are present (preserves insertion order)
seen = []
for d in all_pred_stats:
if d['scale'] not in seen:
seen.append(d['scale'])
available = seen
for idx, group in enumerate(GROUP_ORDER):
ax = axes[idx]
x = np.arange(3) # orig, swap, both
width = 0.8 / len(available)
for i, scale in enumerate(available):
entry = next((d for d in all_pred_stats if d['scale'] == scale), None)
if entry is None:
continue
vals = [entry.get(f'{group}_acc_orig', 0),
entry.get(f'{group}_acc_swap', 0),
entry.get(f'{group}_acc_both', 0)]
offset = (i - len(available) / 2 + 0.5) * width
color = SCALE_COLORS.get(scale, 'gray')
ax.bar(x + offset, vals, width, label=scale, color=color)
ax.set_xticks(x)
ax.set_xticklabels(['orig', 'swap', 'both'])
ax.set_ylabel('Accuracy')
ax.set_title(group, fontweight='bold')
ax.legend(fontsize=7)
ax.set_ylim(0, 1.1)
ax.axhline(y=0.5, color='gray', linestyle='--', alpha=0.5)
ax.grid(True, alpha=0.3, axis='y')
fig.suptitle(f'{model_type.upper()} - Prediction Accuracy by Group', fontsize=15, fontweight='bold', y=1.02)
plt.tight_layout()
plt.savefig(save_path, dpi=300, bbox_inches='tight')
plt.close()
logger.info(f"Saved: {save_path}")
def plot_pred_stats_trajectory(all_pred_stats, model_type, save_path):
"""Line plot: acc_both trajectory across scales per group."""
fig, ax = plt.subplots(figsize=(10, 6))
available = [s for s in SCALE_ORDER if any(d['scale'] == s for d in all_pred_stats)]
if not available:
seen = []
for d in all_pred_stats:
if d['scale'] not in seen:
seen.append(d['scale'])
available = seen
colors = GROUP_COLORS
for group in GROUP_ORDER:
x_vals, y_vals = [], []
for i, scale in enumerate(available):
entry = next((d for d in all_pred_stats if d['scale'] == scale), None)
if entry:
x_vals.append(i)
y_vals.append(entry.get(f'{group}_acc_both', 0))
if x_vals:
ax.plot(x_vals, y_vals, '-o', color=colors[group], label=group, linewidth=2.5, markersize=6)
ax.set_xticks(range(len(available)))
ax.set_xticklabels(available)
ax.set_xlabel('Scale')
ax.set_ylabel('Accuracy (both correct)')
ax.set_title(f'{model_type.upper()} - Both-Correct Accuracy Across Scales', fontweight='bold')
ax.legend(fontsize=10)
ax.set_ylim(0, 1.05)
ax.axhline(y=0.5, color='gray', linestyle='--', alpha=0.5)
ax.grid(True, alpha=0.3)
plt.tight_layout()
plt.savefig(save_path, dpi=300, bbox_inches='tight')
plt.close()
logger.info(f"Saved: {save_path}")
def plot_pca_embeddings(vectors_npz_path, scale, model_type, save_dir, bc_only=False):
data = np.load(vectors_npz_path, allow_pickle=True)
layer_keys = [k for k in data.files if k.startswith('orig_L')]
layers = sorted([int(k.replace('orig_L', '')) for k in layer_keys])
cat_colors = CAT_COLORS
for layer in layers:
orig = data.get(f'orig_L{layer}')
swap = data.get(f'swap_L{layer}')
labels = data.get(f'labels_L{layer}')
deltas = data.get(f'delta_L{layer}')
cats = data.get(f'categories_L{layer}')
groups = data.get(f'groups_L{layer}')
if bc_only and deltas is not None:
co = data.get(f'is_correct_orig_L{layer}')
cs = data.get(f'is_correct_swap_L{layer}')
if co is not None and cs is not None:
bc_mask = co.astype(bool) & cs.astype(bool)
if orig is not None and len(orig) == len(bc_mask):
orig = orig[bc_mask]
swap = swap[bc_mask]
labels = labels[bc_mask] if labels is not None else None
if len(deltas) == len(bc_mask):
deltas = deltas[bc_mask]
cats = cats[bc_mask] if cats is not None else None
groups = groups[bc_mask] if groups is not None else None
if orig is None or swap is None or len(orig) == 0:
continue
fig, axes = plt.subplots(1, 3, figsize=(24, 7))
pca = PCA(n_components=2)
all_vecs = np.vstack([orig, swap])
all_pca = pca.fit_transform(all_vecs)
orig_pca = all_pca[:len(orig)]
swap_pca = all_pca[len(orig):]
ax = axes[0]
for cat in CATEGORY_ORDER:
mask = np.array([str(l) == cat for l in labels])
if mask.any():
ax.scatter(orig_pca[mask, 0], orig_pca[mask, 1],
c=cat_colors.get(cat, 'gray'), label=f'{cat} (orig)',
alpha=0.5, s=15, marker='o')
ax.scatter(swap_pca[mask, 0], swap_pca[mask, 1],
c=cat_colors.get(cat, 'gray'),
alpha=0.5, s=15, marker='x')
ax.set_title('Embeddings by Category\n(o=orig, x=swap)', fontsize=11)
ax.legend(fontsize=7, ncol=2)
ax.grid(True, alpha=0.2)
ax = axes[1]
if deltas is not None and cats is not None:
pca_d = PCA(n_components=2)
delta_pca = pca_d.fit_transform(deltas)
group_colors = GROUP_COLORS
if groups is not None:
for group in GROUP_ORDER:
mask = np.array([str(g) == group for g in groups])
if mask.any():
ax.scatter(delta_pca[mask, 0], delta_pca[mask, 1],
c=group_colors.get(group, 'gray'), label=group, alpha=0.5, s=15)
ax.set_title('Delta Vectors by Group', fontsize=11)
ax.legend(fontsize=9)
ax.grid(True, alpha=0.2)
ax = axes[2]
if deltas is not None and cats is not None:
for cat in CATEGORY_ORDER:
mask = np.array([str(c) == cat for c in cats])
if mask.any():
ax.scatter(delta_pca[mask, 0], delta_pca[mask, 1],
c=cat_colors.get(cat, 'gray'), label=cat, alpha=0.5, s=15)
ax.set_title('Delta Vectors by Category', fontsize=11)
ax.legend(fontsize=8, ncol=2)
ax.grid(True, alpha=0.2)
fig.suptitle(f'{model_type.upper()} ({scale}) - Layer {layer} - PCA', fontweight='bold')
plt.tight_layout()
plt.savefig(os.path.join(save_dir, f'pca_{scale}_L{layer}.png'), dpi=200, bbox_inches='tight')
plt.close()
logger.info(f"Saved PCA plots to {save_dir}")
def plot_pca_3d(vectors_npz_path, scale, model_type, save_dir, bc_only=False):
"""Generate 3-panel 3D PCA figure per representative layer."""
data = np.load(vectors_npz_path, allow_pickle=True)
layer_keys = [k for k in data.files if k.startswith('orig_L')]
layers = sorted([int(k.replace('orig_L', '')) for k in layer_keys])
if not layers:
logger.info(f" [pca_3d] No orig_L* keys found in {vectors_npz_path}")
return
os.makedirs(save_dir, exist_ok=True)
def scatter3d(ax, xs, ys, zs, c, label, alpha=0.45, s=12, marker='o'):
ax.scatter(xs, ys, zs, c=c, label=label, alpha=alpha, s=s, marker=marker)
for layer in layers:
orig = data.get(f'orig_L{layer}')
swap = data.get(f'swap_L{layer}')
labels = data.get(f'labels_L{layer}')
deltas = data.get(f'delta_L{layer}')
cats = data.get(f'categories_L{layer}')
groups = data.get(f'groups_L{layer}')
if bc_only and deltas is not None:
co = data.get(f'is_correct_orig_L{layer}')
cs = data.get(f'is_correct_swap_L{layer}')
if co is not None and cs is not None:
bc_mask = co.astype(bool) & cs.astype(bool)
if orig is not None and len(orig) == len(bc_mask):
orig = orig[bc_mask]
swap = swap[bc_mask]
labels = labels[bc_mask] if labels is not None else None
if len(deltas) == len(bc_mask):
deltas = deltas[bc_mask]
cats = cats[bc_mask] if cats is not None else None
groups = groups[bc_mask] if groups is not None else None
if orig is None or swap is None or len(orig) == 0:
continue
# Panel 1: embeddings
pca_emb = PCA(n_components=3)
all_vecs = np.vstack([orig, swap])
all_proj = pca_emb.fit_transform(all_vecs)
orig_proj = all_proj[:len(orig)]
swap_proj = all_proj[len(orig):]
ev1 = pca_emb.explained_variance_ratio_
# Panels 2/3: delta vectors
has_delta = (deltas is not None and len(deltas) >= 3)
if has_delta:
pca_d = PCA(n_components=3)
delta_proj = pca_d.fit_transform(deltas)
ev2 = pca_d.explained_variance_ratio_
else:
delta_proj = None
ev2 = None
fig = plt.figure(figsize=(30, 8))
ax1 = fig.add_subplot(131, projection='3d')
for cat in CATEGORY_ORDER:
mask = np.array([str(l) == cat for l in labels])
if not mask.any():
continue
c = CAT_COLORS.get(cat, 'gray')
scatter3d(ax1, orig_proj[mask, 0], orig_proj[mask, 1], orig_proj[mask, 2],
c=c, label=f'{cat} (orig)', marker='o')
scatter3d(ax1, swap_proj[mask, 0], swap_proj[mask, 1], swap_proj[mask, 2],
c=c, label=f'{cat} (swap)', marker='^')
ax1.set_title('Embeddings by Category\n(o=orig, ^=swap)', fontsize=10)
ax1.set_xlabel(f'PC1 ({ev1[0]:.1%})', fontsize=8)
ax1.set_ylabel(f'PC2 ({ev1[1]:.1%})', fontsize=8)
ax1.set_zlabel(f'PC3 ({ev1[2]:.1%})', fontsize=8)
ax1.legend(fontsize=6, ncol=2, loc='upper left')
ax2 = fig.add_subplot(132, projection='3d')
if has_delta and groups is not None:
for group in GROUP_ORDER:
mask = np.array([str(g) == group for g in groups])
if not mask.any():
continue
scatter3d(ax2, delta_proj[mask, 0], delta_proj[mask, 1], delta_proj[mask, 2],
c=GROUP_COLORS.get(group, 'gray'), label=group)
ax2.set_title('Delta Vectors by Group', fontsize=10)
ax2.set_xlabel(f'PC1 ({ev2[0]:.1%})', fontsize=8)
ax2.set_ylabel(f'PC2 ({ev2[1]:.1%})', fontsize=8)
ax2.set_zlabel(f'PC3 ({ev2[2]:.1%})', fontsize=8)
ax2.legend(fontsize=8)
else:
ax2.set_title('Delta Vectors by Group\n(no data)', fontsize=10)
ax3 = fig.add_subplot(133, projection='3d')
if has_delta and cats is not None:
for cat in CATEGORY_ORDER:
mask = np.array([str(c) == cat for c in cats])
if not mask.any():
continue
scatter3d(ax3, delta_proj[mask, 0], delta_proj[mask, 1], delta_proj[mask, 2],
c=CAT_COLORS.get(cat, 'gray'), label=cat)
ax3.set_title('Delta Vectors by Category', fontsize=10)
ax3.set_xlabel(f'PC1 ({ev2[0]:.1%})', fontsize=8)
ax3.set_ylabel(f'PC2 ({ev2[1]:.1%})', fontsize=8)
ax3.set_zlabel(f'PC3 ({ev2[2]:.1%})', fontsize=8)
ax3.legend(fontsize=7, ncol=2)
else:
ax3.set_title('Delta Vectors by Category\n(no data)', fontsize=10)
fig.suptitle(f'{model_type.upper()} ({scale}) - Layer {layer} - 3D PCA', fontweight='bold')
plt.tight_layout()
plt.savefig(os.path.join(save_dir, f'pca_{scale}_L{layer}.png'), dpi=200,
bbox_inches='tight', pad_inches=0.4)
plt.close()
logger.info(f"Saved 3D PCA plots to {save_dir}")
# Cross-scale plots
def plot_cross_scale_consistency(all_consistency, model_type, save_path, title_prefix='Sign-Corrected'):
fig, axes = plt.subplots(1, 3, figsize=(21, 6))
for idx, group in enumerate(GROUP_ORDER):
ax = axes[idx]
for scale in SCALE_ORDER:
if scale not in all_consistency:
continue
consistency = all_consistency[scale]
layers, vals = [], []
for (g, l), v in sorted(consistency.items(), key=lambda x: x[0][1]):
if g == group:
layers.append(l)
vals.append(v['mean'])
if layers:
ax.plot(layers, vals, '-', color=SCALE_COLORS.get(scale, 'gray'),
label=SCALE_DISPLAY_NAMES.get(scale, scale), linewidth=2)
ax.set_xlabel('Layer Index')
ax.set_ylabel('Consistency')
ax.set_title(group, fontweight='bold')
ax.legend(fontsize=9)
ax.grid(True, alpha=0.3)
fig.suptitle(f'{model_type.upper()} - {title_prefix} Consistency Across Scales',
fontsize=15, fontweight='bold', y=1.02)
plt.tight_layout()
plt.savefig(save_path, dpi=300, bbox_inches='tight')
plt.close()
logger.info(f"Saved: {save_path}")
def plot_cross_scale_within_cat_consistency(all_within_cat, model_type, save_path):
"""Cross-scale within-category consistency."""
fig, axes = plt.subplots(2, 3, figsize=(21, 12))
for idx, cat in enumerate(CATEGORY_ORDER):
ax = axes[idx // 3][idx % 3]
for scale in SCALE_ORDER:
if scale not in all_within_cat:
continue
wc = all_within_cat[scale]
layers, vals = [], []
for (c, l), v in sorted(wc.items(), key=lambda x: x[0][1]):
if c == cat:
layers.append(l)
vals.append(v['mean'])
if layers:
ax.plot(layers, vals, '-', color=SCALE_COLORS.get(scale, 'gray'),
label=SCALE_DISPLAY_NAMES.get(scale, scale), linewidth=2)
ax.set_xlabel('Layer Index')
ax.set_ylabel('Consistency')
ax.set_title(cat, fontweight='bold')
ax.legend(fontsize=8)
ax.grid(True, alpha=0.3)
fig.suptitle(f'{model_type.upper()} - Within-Category Consistency Across Scales',
fontsize=15, fontweight='bold', y=1.02)
plt.tight_layout()
plt.savefig(save_path, dpi=300, bbox_inches='tight')
plt.close()
logger.info(f"Saved: {save_path}")
def plot_cross_scale_alignment(all_alignment, model_type, save_path):
fig, ax = plt.subplots(figsize=(12, 6))
for scale in SCALE_ORDER:
if scale not in all_alignment:
continue
alignment = all_alignment[scale]
layers = sorted(alignment.keys())
vals = [alignment[l]['per_sample_mean'] for l in layers]
ax.plot(layers, vals, '-', color=SCALE_COLORS.get(scale, 'gray'),
label=SCALE_DISPLAY_NAMES.get(scale, scale), linewidth=2)
ax.set_xlabel('Layer Index')
ax.set_ylabel('cos(d_vert, d_dist)')
ax.set_title(f'{model_type.upper()} - Cross-Group Alignment Across Scales\n'
f'(High=entangled, Low=disentangled)', fontweight='bold')
ax.legend(fontsize=10)
ax.grid(True, alpha=0.3)
plt.tight_layout()
plt.savefig(save_path, dpi=300, bbox_inches='tight')
plt.close()
logger.info(f"Saved: {save_path}")
# Fix 7: Delta-based trajectory (cross-layer, per-scale)
def plot_delta_trajectory(all_delta_heatmaps, model_type, save_path):
"""Cross-layer trajectory of delta-based similarities for key pairs."""
pairs = [
('above', 'far', 'above-far'), ('below', 'close', 'below-close'),
('left', 'right', 'left-right'),
]
fig, axes = plt.subplots(1, len(pairs), figsize=(7 * len(pairs), 6))
if len(pairs) == 1:
axes = [axes]
for idx, (cat1, cat2, label) in enumerate(pairs):
ax = axes[idx]
for scale in SCALE_ORDER:
if scale not in all_delta_heatmaps:
continue
hm = all_delta_heatmaps[scale]
layers = sorted(hm.keys())
vals = []
valid_layers = []
for l in layers:
df = hm[l]
if df is not None and cat1 in df.index and cat2 in df.columns:
valid_layers.append(l)
vals.append(df.loc[cat1, cat2])
if valid_layers:
ax.plot(valid_layers, vals, '-', color=SCALE_COLORS.get(scale, 'gray'),
label=SCALE_DISPLAY_NAMES.get(scale, scale), linewidth=2)
ax.set_xlabel('Layer Index')
ax.set_ylabel('Delta Cosine Similarity')
ax.set_title(label, fontweight='bold')
ax.legend(fontsize=9)
ax.grid(True, alpha=0.3)
ax.axhline(y=0, color='gray', linestyle='--', alpha=0.5)
fig.suptitle(f'{model_type.upper()} - Delta-Based Similarity Trajectory',
fontsize=15, fontweight='bold', y=1.02)
plt.tight_layout()
plt.savefig(save_path, dpi=300, bbox_inches='tight')
plt.close()
logger.info(f"Saved: {save_path}")
def plot_summary_barplot(all_consistency, all_alignment, model_type, save_path):
available_scales = [s for s in SCALE_ORDER if s in all_consistency]
if not available_scales:
return
sample_cons = all_consistency[available_scales[0]]
max_layer = max(l for (_, l) in sample_cons.keys())
fig, axes = plt.subplots(1, 2, figsize=(16, 6))
ax = axes[0]
x = np.arange(len(GROUP_ORDER))
width = 0.8 / len(available_scales)
for i, scale in enumerate(available_scales):
cons = all_consistency[scale]
vals = [cons.get((g, max_layer), {}).get('mean', 0) for g in GROUP_ORDER]
offset = (i - len(available_scales) / 2 + 0.5) * width
ax.bar(x + offset, vals, width,
label=SCALE_DISPLAY_NAMES.get(scale, scale),
color=SCALE_COLORS.get(scale, 'gray'))
ax.set_xticks(x)
ax.set_xticklabels(GROUP_ORDER)
ax.set_ylabel('Sign-Corrected Consistency')
ax.set_title(f'Consistency at Layer {max_layer}', fontweight='bold')
ax.legend(fontsize=8)
ax.grid(True, alpha=0.3, axis='y')
ax = axes[1]
available_align = [s for s in available_scales if s in all_alignment]
if available_align:
vals = [all_alignment[s].get(max_layer, {}).get('per_sample_mean', 0) for s in available_align]
colors = [SCALE_COLORS.get(s, 'gray') for s in available_align]
ax.bar(range(len(vals)), vals, color=colors)
ax.set_xticks(range(len(vals)))
ax.set_xticklabels([SCALE_DISPLAY_NAMES.get(s, s) for s in available_align])
ax.set_ylabel('cos(d_vert, d_dist)')
ax.set_title(f'Cross-Group Alignment at L{max_layer}\n(Lower=disentangled)', fontweight='bold')
ax.grid(True, alpha=0.3, axis='y')
fig.suptitle(f'{model_type.upper()} - Summary at Deepest Layer', fontsize=15, fontweight='bold', y=1.02)
plt.tight_layout()
plt.savefig(save_path, dpi=300, bbox_inches='tight')
plt.close()
logger.info(f"Saved: {save_path}")
# ============================================================================
# Main Pipeline
# ============================================================================
def process_scale(args, scale, swap_pairs, quads):
# Resolve model path from the correct config dict
if args.model_type in MODEL_CONFIGS_NEW:
cls_name, model_path = MODEL_CONFIGS_NEW[args.model_type][scale]
else:
model_path = MODEL_CONFIGS[args.model_type][scale]
cls_name = None
logger.info(f"\n{'='*60}")
logger.info(f"Processing {args.model_type} - {scale}"
+ (f" [{cls_name}]" if cls_name else ""))
logger.info(f"Model path: {model_path}")
logger.info(f"{'='*60}")
extractor = get_extractor(args.model_type, model_path, scale=scale, device=args.device)
target_layers = extractor.target_layers
output_dir = os.path.join(args.output_dir, args.model_type)
plots_dir = os.path.join(output_dir, 'plots')
os.makedirs(plots_dir, exist_ok=True)
# Phase A: Extract swap pair features
logger.info("\n--- Phase A: Extracting swap pair features ---")
swap_records = extract_swap_features(extractor, swap_pairs,
max_samples_per_category=args.max_samples_per_category)
# Phase B: Extract cross-group features
logger.info("\n--- Phase B: Extracting cross-group features ---")
quad_records = extract_cross_group_features(extractor, quads) if quads else []
# Phase C: Analysis
# Fix 8: Category validity check
logger.info("\n--- Phase C: Analysis ---")
category_validity = check_category_validity(swap_records, scale)
unreliable_cats = [c for c, v in category_validity.items() if not v['reliable']]
if unreliable_cats:
logger.warning(f" Unreliable categories: {unreliable_cats}")
# Fix 5: Two types of consistency (all pairs)
within_cat_all, sign_corrected_all = compute_delta_consistency(swap_records, target_layers)
# Fix 8: Both-correct filtered consistency
both_correct_records = filter_both_correct(swap_records)
logger.info(f" Both-correct pairs: {len(both_correct_records)}/{len(swap_records)}")
within_cat_bc, sign_corrected_bc = compute_delta_consistency(both_correct_records, target_layers)
# Cross-group alignment
cross_alignment = compute_cross_group_alignment(quad_records, target_layers)
pred_stats = compute_prediction_stats(swap_records, scale)
# Fix 7: Delta-based heatmaps (for all layers)
delta_heatmaps_all = {}
delta_heatmaps_bc = {}
for layer in target_layers:
delta_heatmaps_all[layer] = compute_delta_similarity_matrix(swap_records, layer)
if both_correct_records:
delta_heatmaps_bc[layer] = compute_delta_similarity_matrix(both_correct_records, layer)
# Log key results
max_layer = max(target_layers)
for group in GROUP_ORDER:
key = (group, max_layer)
if key in sign_corrected_all:
logger.info(f" Sign-corrected [{group}, L{max_layer}]: "
f"{sign_corrected_all[key]['mean']:.4f} +/- {sign_corrected_all[key]['std']:.4f}")
if max_layer in cross_alignment:
ca = cross_alignment[max_layer]
logger.info(f" Cross-group alignment L{max_layer}: "
f"{ca['per_sample_mean']:.4f} (perm={ca['permutation_mean']:.4f})")
logger.info(f" Accuracy orig={pred_stats['overall_acc_orig']:.1%}, "
f"swap={pred_stats['overall_acc_swap']:.1%}, "
f"both={pred_stats['overall_acc_both']:.1%}")
# Phase D: Save results (both all_pairs and both_correct)
logger.info("\n--- Phase D: Saving results ---")
# Save vectors NPZ ONCE with all records + correctness metadata
save_vectors_npz(scale, swap_records, quad_records, target_layers, output_dir)
save_scale_results(
scale, swap_records, quad_records,
within_cat_all, sign_corrected_all,
cross_alignment, pred_stats, target_layers,
category_validity, delta_heatmaps_all,
output_dir, both_correct_tag='all_pairs',
)
if both_correct_records:
save_scale_results(
scale, both_correct_records, quad_records,
within_cat_bc, sign_corrected_bc,
cross_alignment, pred_stats, target_layers,
category_validity, delta_heatmaps_bc,
output_dir, both_correct_tag='both_correct',
)
# Phase E: Per-scale plots (generate into separate subdirs)
logger.info("\n--- Phase E: Per-scale plots ---")
for condition, wc_data, sc_data in [
('all', within_cat_all, sign_corrected_all),
('both_correct', within_cat_bc, sign_corrected_bc),
]:
if condition == 'both_correct' and not both_correct_records:
continue
cond_dir = os.path.join(plots_dir, condition)
os.makedirs(cond_dir, exist_ok=True)
wc_dir = os.path.join(cond_dir, 'within_cat_consistency')
sc_dir = os.path.join(cond_dir, 'sign_corrected')
ca_dir = os.path.join(cond_dir, 'cross_alignment')
os.makedirs(wc_dir, exist_ok=True)
os.makedirs(sc_dir, exist_ok=True)
os.makedirs(ca_dir, exist_ok=True)
# Within-category consistency
plot_within_cat_consistency_trajectory(
wc_data, scale, args.model_type,
os.path.join(wc_dir, f'within_cat_consistency_{scale}.png'))
# Sign-corrected consistency
plot_sign_corrected_consistency_trajectory(
sc_data, scale, args.model_type,
os.path.join(sc_dir, f'sign_corrected_consistency_{scale}.png'))
# Cross-group alignment
if cross_alignment:
plot_cross_group_alignment_trajectory(
cross_alignment, scale, args.model_type,
os.path.join(ca_dir, f'cross_alignment_{scale}.png'))
# PCA (from full NPZ) β€” 2D and 3D, all-pairs and both-correct
npz_path = os.path.join(output_dir, 'npz', f'vectors_{scale}.npz')
if os.path.exists(npz_path):
pca_dir = os.path.join(plots_dir, 'all', 'pca')
pca_3d_dir = os.path.join(plots_dir, 'all', 'pca_3d')
bc_pca_dir = os.path.join(plots_dir, 'both_correct', 'pca')
bc_pca_3d_dir = os.path.join(plots_dir, 'both_correct', 'pca_3d')
for d in (pca_dir, pca_3d_dir, bc_pca_dir, bc_pca_3d_dir):
os.makedirs(d, exist_ok=True)
plot_pca_embeddings(npz_path, scale, args.model_type, pca_dir)
plot_pca_3d(npz_path, scale, args.model_type, pca_3d_dir)
plot_pca_embeddings(npz_path, scale, args.model_type, bc_pca_dir, bc_only=True)
plot_pca_3d(npz_path, scale, args.model_type, bc_pca_3d_dir, bc_only=True)
# Prediction stats bar (per-scale)
if pred_stats:
pred_plot_dir = os.path.join(plots_dir, 'all', 'pred_stats')
os.makedirs(pred_plot_dir, exist_ok=True)
plot_pred_stats_bars([pred_stats], args.model_type,
os.path.join(pred_plot_dir, f'pred_stats_{scale}.png'))
# Cleanup
del swap_records, quad_records, both_correct_records
extractor.cleanup()
logger.info(f"\n Scale {scale} complete.")
# ============================================================================
# Accuracy Chart (integrated from accuracy_chart.py)
# ============================================================================
def _acc_plot_group_bars(pred_stats, model_type, ax_list):
available = [s for s in SCALE_ORDER if any(d['scale'] == s for d in pred_stats)]
x = np.arange(3)
width = 0.8 / max(len(available), 1)
for idx, group in enumerate(GROUP_ORDER):
ax = ax_list[idx]
for i, scale in enumerate(available):
entry = next((d for d in pred_stats if d['scale'] == scale), None)
if entry is None:
continue
vals = [entry.get(f'{group}_acc_orig', 0),
entry.get(f'{group}_acc_swap', 0),
entry.get(f'{group}_acc_both', 0)]
offset = (i - len(available) / 2 + 0.5) * width
ax.bar(x + offset, vals, width, label=scale,
color=SCALE_COLORS.get(scale, 'gray'), alpha=0.85)
ax.set_xticks(x)
ax.set_xticklabels(['orig', 'swap', 'both'], fontsize=10)
ax.set_ylabel('Accuracy', fontsize=9)
ax.set_title(group.capitalize(), fontweight='bold', fontsize=11,
color=GROUP_COLORS.get(group, 'black'))
ax.legend(fontsize=7, ncol=2)
ax.set_ylim(0, 1.15)
ax.axhline(y=0.5, color='gray', linestyle='--', alpha=0.5, linewidth=1)
ax.grid(True, alpha=0.3, axis='y')
def _acc_plot_both_trajectory(pred_stats, model_type, ax):
available = [s for s in SCALE_ORDER if any(d['scale'] == s for d in pred_stats)]
x_ticks = range(len(available))
for group in GROUP_ORDER:
y_vals = [next((d for d in pred_stats if d['scale'] == s), {}).get(f'{group}_acc_both', 0)
for s in available]
ax.plot(x_ticks, y_vals, '-o', color=GROUP_COLORS.get(group, 'gray'),
label=group, linewidth=2.5, markersize=7)
y_overall = [next((d for d in pred_stats if d['scale'] == s), {}).get('overall_acc_both', 0)
for s in available]
ax.plot(x_ticks, y_overall, '--s', color='black', label='overall',
linewidth=2, markersize=6, alpha=0.7)
ax.set_xticks(list(x_ticks))
ax.set_xticklabels(available, fontsize=9)
ax.set_xlabel('Scale', fontsize=9)
ax.set_ylabel('Accuracy (both correct)', fontsize=9)
ax.set_title('Both-Correct Accuracy Trajectory', fontweight='bold', fontsize=11)
ax.legend(fontsize=9)
ax.set_ylim(0, 1.05)
ax.axhline(y=0.5, color='gray', linestyle='--', alpha=0.5, linewidth=1)
ax.grid(True, alpha=0.3)
def _acc_plot_overall_trajectory(pred_stats, model_type, ax):
available = [s for s in SCALE_ORDER if any(d['scale'] == s for d in pred_stats)]
x_ticks = range(len(available))
for metric, label, ls in [
('overall_acc_orig', 'orig', '-o'),
('overall_acc_swap', 'swap', '-s'),
('overall_acc_both', 'both', '-^'),
]:
y_vals = [next((d for d in pred_stats if d['scale'] == s), {}).get(metric, 0)
for s in available]
ax.plot(x_ticks, y_vals, ls, label=label, linewidth=2.2, markersize=6)
ax.set_xticks(list(x_ticks))
ax.set_xticklabels(available, fontsize=9)
ax.set_xlabel('Scale', fontsize=9)
ax.set_ylabel('Overall Accuracy', fontsize=9)
ax.set_title('Overall Accuracy Trajectory', fontweight='bold', fontsize=11)
ax.legend(fontsize=9)
ax.set_ylim(0, 1.05)
ax.axhline(y=0.5, color='gray', linestyle='--', alpha=0.5, linewidth=1)
ax.grid(True, alpha=0.3)
def _acc_plot_category_accuracy(cat_validity, model_type, ax_orig, ax_swap, pred_stats=None):
available = [s for s in SCALE_ORDER if s in cat_validity]
cats_with_overall = CATEGORY_ORDER + ['overall']
x = np.arange(len(cats_with_overall))
width = 0.8 / max(len(available), 1)
overall_key = {'acc_orig': 'overall_acc_orig', 'acc_swap': 'overall_acc_swap'}
for ax, metric, title in [
(ax_orig, 'acc_orig', 'Per-Category Accuracy (orig)'),
(ax_swap, 'acc_swap', 'Per-Category Accuracy (swap)'),
]:
for i, scale in enumerate(available):
cv = cat_validity[scale]
vals = [cv.get(cat, {}).get(metric, 0) for cat in CATEGORY_ORDER]
if pred_stats is not None:
entry = next((d for d in pred_stats if d['scale'] == scale), None)
vals.append(entry.get(overall_key[metric], 0) if entry else 0)
else:
vals.append(0)
offset = (i - len(available) / 2 + 0.5) * width
ax.bar(x + offset, vals, width, label=scale,
color=SCALE_COLORS.get(scale, 'gray'), alpha=0.85)
for j, cat in enumerate(CATEGORY_ORDER):
ax.axvspan(j - 0.45, j + 0.45, color=CAT_COLORS.get(cat, 'gray'), alpha=0.06, linewidth=0)
ax.axvline(x=len(CATEGORY_ORDER) - 0.5, color='black', linewidth=1.2, linestyle=':', alpha=0.6)
ax.set_xticks(x)
ax.set_xticklabels(cats_with_overall, fontsize=9, rotation=15)
ax.set_ylabel('Accuracy', fontsize=9)
ax.set_title(title, fontweight='bold', fontsize=11)
ax.legend(fontsize=7, ncol=2)
ax.set_ylim(0, 1.15)
ax.axhline(y=0.5, color='gray', linestyle='--', alpha=0.5, linewidth=1)
ax.grid(True, alpha=0.3, axis='y')
if available:
last_cv = cat_validity[available[-1]]
for j, cat in enumerate(CATEGORY_ORDER):
if not last_cv.get(cat, {}).get('reliable', True):
ax.text(j, 1.08, 'βœ—', ha='center', va='center',
fontsize=9, color='red', fontweight='bold')
def _acc_plot_category_per_scale(cat_validity, model_type, save_dir, pred_stats=None):
cats_with_overall = CATEGORY_ORDER + ['overall']
overall_key = {'acc_orig': 'overall_acc_orig', 'acc_swap': 'overall_acc_swap'}
for scale in sorted(cat_validity.keys(),
key=lambda s: SCALE_ORDER.index(s) if s in SCALE_ORDER else 99):
cv = cat_validity[scale]
ps_entry = next((d for d in pred_stats if d['scale'] == scale), None) if pred_stats else None
fig, axes = plt.subplots(1, 2, figsize=(16, 5))
x = np.arange(len(cats_with_overall))
width = 0.55
for ax, metric, title in [
(axes[0], 'acc_orig', f'acc_orig ({scale})'),
(axes[1], 'acc_swap', f'acc_swap ({scale})'),
]:
vals = [cv.get(cat, {}).get(metric, 0) for cat in CATEGORY_ORDER]
overall_val = ps_entry.get(overall_key[metric], 0) if ps_entry else 0
vals.append(overall_val)
colors = [CAT_COLORS.get(cat, 'gray') for cat in CATEGORY_ORDER] + ['#333333']
bars = ax.bar(x, vals, width, color=colors, alpha=0.85, edgecolor='white')
ax.axvline(x=len(CATEGORY_ORDER) - 0.5, color='black',
linewidth=1.2, linestyle=':', alpha=0.6)
ax.set_xticks(x)
ax.set_xticklabels(cats_with_overall, fontsize=10)
ax.set_ylabel('Accuracy', fontsize=10)
ax.set_title(title, fontweight='bold', fontsize=12)
ax.set_ylim(0, 1.15)
ax.axhline(y=0.5, color='gray', linestyle='--', alpha=0.5)
ax.grid(True, alpha=0.3, axis='y')
for bar, cat in zip(bars, cats_with_overall):
reliable = cv.get(cat, {}).get('reliable', True) if cat != 'overall' else True
h = bar.get_height()
ax.text(bar.get_x() + bar.get_width() / 2, h + 0.02,
f'{h:.2f}' + ('' if reliable else ' βœ—'),
ha='center', va='bottom', fontsize=8,
color='red' if not reliable else 'black')
fig.suptitle(f'{model_type.upper()} - Category Accuracy ({scale})',
fontsize=13, fontweight='bold')
plt.tight_layout()
out = os.path.join(save_dir, f'category_accuracy_{scale}.png')
plt.savefig(out, dpi=200, bbox_inches='tight')
plt.close()
logger.info(f"Saved: {out}")
def run_accuracy_charts(pred_stats, cat_validity, model_type, save_dir):
"""Generate all accuracy chart plots into save_dir."""
os.makedirs(save_dir, exist_ok=True)
# Group bars
fig, axes = plt.subplots(1, 3, figsize=(21, 6))
_acc_plot_group_bars(pred_stats, model_type, axes)
fig.suptitle(f'{model_type.upper()} - Prediction Accuracy by Group',
fontsize=15, fontweight='bold')
plt.tight_layout()
plt.savefig(os.path.join(save_dir, 'accuracy_group_bars.png'), dpi=200, bbox_inches='tight')
plt.close()
logger.info(f"Saved: {os.path.join(save_dir, 'accuracy_group_bars.png')}")
# Trajectory
fig, axes = plt.subplots(1, 2, figsize=(16, 6))
_acc_plot_both_trajectory(pred_stats, model_type, axes[0])
_acc_plot_overall_trajectory(pred_stats, model_type, axes[1])
fig.suptitle(f'{model_type.upper()} - Accuracy Trajectory Across Scales',
fontsize=14, fontweight='bold')
plt.tight_layout()
plt.savefig(os.path.join(save_dir, 'accuracy_trajectory.png'), dpi=200, bbox_inches='tight')
plt.close()
logger.info(f"Saved: {os.path.join(save_dir, 'accuracy_trajectory.png')}")
if cat_validity:
# Category bars (all scales overlay)
fig, axes = plt.subplots(1, 2, figsize=(20, 6))
_acc_plot_category_accuracy(cat_validity, model_type, axes[0], axes[1],
pred_stats=pred_stats)
fig.suptitle(f'{model_type.upper()} - Per-Category Accuracy Across Scales',
fontsize=14, fontweight='bold')
plt.tight_layout()
plt.savefig(os.path.join(save_dir, 'accuracy_category.png'), dpi=200, bbox_inches='tight')
plt.close()
logger.info(f"Saved: {os.path.join(save_dir, 'accuracy_category.png')}")
# Per-scale category bars
_acc_plot_category_per_scale(cat_validity, model_type, save_dir, pred_stats=pred_stats)
# Combined accuracy_chart.png
fig = plt.figure(figsize=(24, 14))
ax_h = fig.add_subplot(3, 3, 1)
ax_v = fig.add_subplot(3, 3, 2)
ax_d = fig.add_subplot(3, 3, 3)
_acc_plot_group_bars(pred_stats, model_type, [ax_h, ax_v, ax_d])
ax_tb = fig.add_subplot(3, 3, 4)
ax_to = fig.add_subplot(3, 3, 5)
_acc_plot_both_trajectory(pred_stats, model_type, ax_tb)
_acc_plot_overall_trajectory(pred_stats, model_type, ax_to)
ax_note = fig.add_subplot(3, 3, 6)
ax_note.axis('off')
available_scales = [s for s in SCALE_ORDER if any(d['scale'] == s for d in pred_stats)]
ax_note.text(0.1, 0.6,
f'Scales: {", ".join(available_scales)}\n\nβœ— = unreliable category\n-- = 0.5 chance level',
transform=ax_note.transAxes, fontsize=11, va='top', family='monospace')
if cat_validity:
ax_co = fig.add_subplot(3, 2, 5)
ax_cs = fig.add_subplot(3, 2, 6)
_acc_plot_category_accuracy(cat_validity, model_type, ax_co, ax_cs, pred_stats=pred_stats)
fig.suptitle(f'{model_type.upper()} β€” Accuracy Summary',
fontsize=17, fontweight='bold', y=1.01)
plt.tight_layout()
plt.savefig(os.path.join(save_dir, 'accuracy_chart.png'), dpi=200, bbox_inches='tight')
plt.close()
logger.info(f"Saved: {os.path.join(save_dir, 'accuracy_chart.png')}")
# ============================================================================
# Unify Consistency Y-axis (integrated from unify_consistency_ylim.py)
# ============================================================================
def _ylim_compute(all_vals, margin_ratio=0.08):
if not all_vals:
return -1, 1
ymin, ymax = min(all_vals), max(all_vals)
margin = (ymax - ymin) * margin_ratio
return ymin - margin, ymax + margin
def _ylim_load_keyed_json(path):
if not os.path.exists(path):
return None
with open(path) as f:
raw = json.load(f)
if not raw:
return None
result = {}
for key, vals in raw.items():
parts = key.rsplit('_L', 1)
if len(parts) == 2:
result[(parts[0], int(parts[1]))] = vals
return result if result else None
def _ylim_load_alignment_json(path):
if not os.path.exists(path):
return None
with open(path) as f:
raw = json.load(f)
if not raw:
return None
result = {int(k[1:]): v for k, v in raw.items() if k.startswith('L')}
return result if result else None
def _ylim_plot_sign_corrected(data, scale, model_type, save_path, ylim):
fig, ax = plt.subplots(figsize=(12, 6))
for group in GROUP_ORDER:
layers, vals = [], []
for (g, l), v in sorted(data.items(), key=lambda x: x[0][1]):
if g == group:
layers.append(l)
vals.append(v['mean'])
if layers:
ax.plot(layers, vals, '-o', color=GROUP_COLORS[group],
label=group, linewidth=2, markersize=3)
ax.set_ylim(ylim)
ax.set_xlabel('Layer Index')
ax.set_ylabel('Sign-Corrected Consistency')
ax.set_title(f'{model_type.upper()} ({scale}) - Sign-Corrected Group Consistency',
fontweight='bold')
ax.legend(fontsize=11)
ax.grid(True, alpha=0.3)
plt.tight_layout()
plt.savefig(save_path, dpi=300, bbox_inches='tight')
plt.close()
def _ylim_plot_within_cat(data, scale, model_type, save_path, ylim):
fig, ax = plt.subplots(figsize=(12, 6))
for cat in CATEGORY_ORDER:
layers, vals = [], []
for (c, l), v in sorted(data.items(), key=lambda x: x[0][1]):
if c == cat:
layers.append(l)
vals.append(v['mean'])
if layers:
ax.plot(layers, vals, '-o', color=CAT_COLORS[cat],
label=cat, linewidth=2, markersize=3)
ax.set_ylim(ylim)
ax.set_xlabel('Layer Index')
ax.set_ylabel('Within-Category Consistency')
ax.set_title(f'{model_type.upper()} ({scale}) - Within-Category Delta Consistency',
fontweight='bold')
ax.legend(fontsize=9)
ax.grid(True, alpha=0.3)
plt.tight_layout()
plt.savefig(save_path, dpi=300, bbox_inches='tight')
plt.close()
def _ylim_plot_cross_alignment(data, scale, model_type, save_path, ylim):
fig, ax = plt.subplots(figsize=(12, 6))
layers = sorted(data.keys())
ax.plot(layers, [data[l]['per_sample_mean'] for l in layers], '-o', color='#d62728',
label='cos(d_vert, d_dist) per-sample mean', linewidth=2.5, markersize=3)
ax.plot(layers, [data[l]['mean_delta_alignment'] for l in layers], '--s', color='#e377c2',
label='cos(mean_d_vert, mean_d_dist)', linewidth=1.5, markersize=3)
perm_mean = [data[l]['permutation_mean'] for l in layers]
perm_std = [data[l]['permutation_std'] for l in layers]
ax.plot(layers, perm_mean, ':', color='gray', label='permutation control', linewidth=1.5)
ax.fill_between(layers,
[m - 2*s for m, s in zip(perm_mean, perm_std)],
[m + 2*s for m, s in zip(perm_mean, perm_std)],
alpha=0.2, color='gray')
ax.set_ylim(ylim)
ax.set_xlabel('Layer Index')
ax.set_ylabel('Cosine Alignment')
ax.set_title(f'{model_type.upper()} ({scale}) - Cross-Group Alignment (Perspective Bias)',
fontweight='bold')
ax.legend(fontsize=9)
ax.grid(True, alpha=0.3)
plt.tight_layout()
plt.savefig(save_path, dpi=300, bbox_inches='tight')
plt.close()
def _ylim_process_plot_type(data_dir, plots_dir, conditions, model_type,
plot_name, json_pattern, loader, val_gatherer, plotter,
subfolder=None):
"""Re-plot one plot type across all conditions with a unified y-axis."""
logger.info(f" [unify ylim] {plot_name}")
for condition, condition_tag in conditions:
cond_plot_dir = os.path.join(plots_dir, condition)
if not os.path.isdir(cond_plot_dir):
continue
save_dir = os.path.join(cond_plot_dir, subfolder) if subfolder else cond_plot_dir
os.makedirs(save_dir, exist_ok=True)
all_data = {}
for scale in SCALE_ORDER:
path = os.path.join(data_dir, 'json',
json_pattern.format(scale=scale, tag=condition_tag))
loaded = loader(path)
if loaded:
all_data[scale] = loaded
if not all_data:
continue
all_vals = val_gatherer(all_data)
ylim = _ylim_compute(all_vals)
for scale, data in all_data.items():
save_path = os.path.join(save_dir, f'{plot_name}_{scale}.png')
plotter(data, scale, model_type, save_path, ylim)
logger.info(f" {condition}: y=[{ylim[0]:.4f}, {ylim[1]:.4f}], {len(all_data)} scales")
def run_unify_ylim(data_dir, plots_dir, model_type):
"""Unify y-axis for sign_corrected, within_cat, and cross_alignment plots."""
conditions = [
('all', 'all_pairs'),
('both_correct', 'both_correct'),
]
def gather_keyed(all_data):
return [v['mean'] for data in all_data.values() for v in data.values()]
def gather_alignment(all_data):
vals = []
for data in all_data.values():
for v in data.values():
vals += [v['per_sample_mean'], v['mean_delta_alignment'],
v['permutation_mean'] + 2 * v['permutation_std'],
v['permutation_mean'] - 2 * v['permutation_std']]
return vals
_ylim_process_plot_type(
data_dir, plots_dir, conditions, model_type,
plot_name='sign_corrected_consistency',
json_pattern='sign_corrected_consistency_{scale}_{tag}.json',
loader=_ylim_load_keyed_json,
val_gatherer=gather_keyed,
plotter=_ylim_plot_sign_corrected,
subfolder='sign_corrected',
)
_ylim_process_plot_type(
data_dir, plots_dir, conditions, model_type,
plot_name='within_cat_consistency',
json_pattern='within_cat_consistency_{scale}_{tag}.json',
loader=_ylim_load_keyed_json,
val_gatherer=gather_keyed,
plotter=_ylim_plot_within_cat,
subfolder='within_cat_consistency',
)
_ylim_process_plot_type(
data_dir, plots_dir, conditions, model_type,
plot_name='cross_alignment',
json_pattern='cross_alignment_{scale}.json',
loader=_ylim_load_alignment_json,
val_gatherer=gather_alignment,
plotter=_ylim_plot_cross_alignment,
subfolder='cross_alignment',
)
def _check_merge_only_sources(output_dir: str, model_type: str) -> bool:
"""Verify required source directories have data for a merge-only model_type.
Returns True if all sources look healthy, False (with warnings) if not.
"""
mc = MERGE_ONLY_CONFIGS[model_type]
ok = True
for req_dir in mc['required_dirs']:
src_path = os.path.join(output_dir, req_dir)
json_dir = os.path.join(src_path, 'json')
if not os.path.isdir(src_path):
if req_dir in MODEL_CONFIGS_NEW:
hint = f"python swap_analysis.py --model_type {req_dir}"
else:
hint = f"python swap_analysis.py --model_type {req_dir}"
logger.warning(
f"[{model_type}] Required source directory not found: {src_path}\n"
f" β†’ Run inference first: {hint}"
)
ok = False
elif not os.path.isdir(json_dir) or not any(
f.startswith('pred_stats_') for f in os.listdir(json_dir)
):
logger.warning(
f"[{model_type}] Source directory exists but has no pred_stats JSON: {json_dir}\n"
f" β†’ Inference may not have completed for '{req_dir}'."
)
ok = False
else:
scales_found = [
f.replace('pred_stats_', '').replace('.json', '')
for f in os.listdir(json_dir)
if f.startswith('pred_stats_')
]
logger.info(f" [{req_dir}] found scales: {scales_found}")
return ok
def _load_scale_data_multi(output_dir: str, model_type: str, scale: str, scale_sources: dict):
"""Load per-scale data for one scale, looking in the correct source directory.
Returns (sc, sc_bc, wc, wc_bc, align, pred_stat, cat_validity, dh, dh_bc).
Any unavailable item is None / {}.
"""
src_dir = os.path.join(output_dir, scale_sources.get(scale, model_type))
sc = load_scale_consistency(src_dir, scale, 'all_pairs')
sc_bc = load_scale_consistency(src_dir, scale, 'both_correct')
wc = load_within_cat_consistency(src_dir, scale, 'all_pairs')
wc_bc = load_within_cat_consistency(src_dir, scale, 'both_correct')
align = load_scale_alignment(src_dir, scale)
pred_stat = None
pred_path = os.path.join(src_dir, 'json', f'pred_stats_{scale}.json')
if os.path.exists(pred_path):
with open(pred_path) as f:
pred_stat = json.load(f)
cat_validity = None
cv_path = os.path.join(src_dir, 'json', f'category_validity_{scale}.json')
if os.path.exists(cv_path):
with open(cv_path) as f:
cat_validity = json.load(f)
dh = load_delta_heatmaps(src_dir, scale, 'all_pairs')
dh_bc = load_delta_heatmaps(src_dir, scale, 'both_correct')
return sc, sc_bc, wc, wc_bc, align, pred_stat, cat_validity, dh, dh_bc
# ---------------------------------------------------------------------------
# All-layer heatmap + PCA helpers (called from run_merge / run_merge_extended)
# ---------------------------------------------------------------------------
def _get_csv_layers(csv_dir: str, scale: str, tag: str) -> list:
"""Return sorted list of layer indices that have a delta_similarity CSV."""
import glob as _glob
pattern = os.path.join(csv_dir, f'delta_similarity_{scale}_L*_{tag}.csv')
layers = []
for fpath in _glob.glob(pattern):
m = re.search(
rf'delta_similarity_{re.escape(scale)}_L(\d+)_{re.escape(tag)}\.csv$',
os.path.basename(fpath))
if m:
layers.append(int(m.group(1)))
return sorted(layers)
def run_all_layer_heatmaps(model_dir: str, model_type: str, scales: list):
"""Generate delta-similarity heatmaps for ALL layers from pre-computed CSVs.
Reads {model_dir}/csv/delta_similarity_{scale}_L{n}_{tag}.csv
Writes {model_dir}/plots/all/heatmap/heatmap_{scale}_L{n}.png (all_pairs)
{model_dir}/plots/both_correct/heatmap/heatmap_{scale}_L{n}.png (both_correct)
Skips a scale if the NPZ is missing or any all_pairs CSV is absent
(indicates inference was not fully completed for that scale).
"""
TAG_TO_DIR = {
'all_pairs': os.path.join(model_dir, 'plots', 'all', 'heatmap'),
'both_correct': os.path.join(model_dir, 'plots', 'both_correct', 'heatmap'),
}
for scale in scales:
npz_path = os.path.join(model_dir, 'npz', f'vectors_{scale}.npz')
csv_dir = os.path.join(model_dir, 'csv')
if not os.path.exists(npz_path):
logger.warning(f' [{model_type}/{scale}] NPZ not found, skipping heatmaps.')
continue
data = np.load(npz_path, allow_pickle=True)
npz_layers = sorted(
int(k.replace('orig_L', ''))
for k in data.files if k.startswith('orig_L')
)
data.close()
if not npz_layers:
logger.warning(f' [{model_type}/{scale}] No orig_L* keys in NPZ, skipping heatmaps.')
continue
csv_layers = _get_csv_layers(csv_dir, scale, 'all_pairs')
missing = set(npz_layers) - set(csv_layers)
if missing:
logger.warning(
f' [{model_type}/{scale}] {len(missing)} NPZ layers lack CSVs '
f'(e.g. L{sorted(missing)[:5]}). Skipping all-layer heatmaps.')
continue
for out_dir in TAG_TO_DIR.values():
os.makedirs(out_dir, exist_ok=True)
logger.info(f' [{model_type}/{scale}] Generating heatmaps for {len(npz_layers)} layers...')
saved = 0
for layer in npz_layers:
for tag, out_dir in TAG_TO_DIR.items():
csv_path = os.path.join(csv_dir, f'delta_similarity_{scale}_L{layer}_{tag}.csv')
if not os.path.exists(csv_path):
continue # both_correct CSV may be absent for some layers
df = pd.read_csv(csv_path, index_col=0)
available = [c for c in CATEGORY_ORDER if c in df.index]
if not available:
continue
df = df.loc[available, available]
title = (
f'{model_type.upper()} ({scale}) \u2014 Delta Heatmap L{layer} '
f'({"both-correct" if tag == "both_correct" else "all pairs"})'
)
out_path = os.path.join(out_dir, f'heatmap_{scale}_L{layer}.png')
plot_delta_heatmap(df, title, out_path)
saved += 1
logger.info(f' [{model_type}/{scale}] Saved {saved} heatmaps')
def run_all_layer_pca(model_dir: str, model_type: str, scales: list):
"""Generate 2D and 3D PCA plots for ALL layers from saved NPZ files.
Writes {model_dir}/plots/all/pca/pca_{scale}_L{n}.png (all pairs)
{model_dir}/plots/all/pca_3d/pca_{scale}_L{n}.png
{model_dir}/plots/both_correct/pca/pca_{scale}_L{n}.png (both-correct only)
{model_dir}/plots/both_correct/pca_3d/pca_{scale}_L{n}.png
"""
for scale in scales:
npz_path = os.path.join(model_dir, 'npz', f'vectors_{scale}.npz')
if not os.path.exists(npz_path):
logger.warning(f' [{model_type}/{scale}] NPZ not found, skipping PCA.')
continue
# All-pairs PCA
pca_2d_dir = os.path.join(model_dir, 'plots', 'all', 'pca')
pca_3d_dir = os.path.join(model_dir, 'plots', 'all', 'pca_3d')
os.makedirs(pca_2d_dir, exist_ok=True)
os.makedirs(pca_3d_dir, exist_ok=True)
logger.info(f' [{model_type}/{scale}] Generating all-layer 2D PCA...')
plot_pca_embeddings(npz_path, scale, model_type, pca_2d_dir)
logger.info(f' [{model_type}/{scale}] Generating all-layer 3D PCA...')
plot_pca_3d(npz_path, scale, model_type, pca_3d_dir)
# Both-correct PCA
bc_pca_2d_dir = os.path.join(model_dir, 'plots', 'both_correct', 'pca')
bc_pca_3d_dir = os.path.join(model_dir, 'plots', 'both_correct', 'pca_3d')
os.makedirs(bc_pca_2d_dir, exist_ok=True)
os.makedirs(bc_pca_3d_dir, exist_ok=True)
logger.info(f' [{model_type}/{scale}] Generating both-correct 2D PCA...')
plot_pca_embeddings(npz_path, scale, model_type, bc_pca_2d_dir, bc_only=True)
logger.info(f' [{model_type}/{scale}] Generating both-correct 3D PCA...')
plot_pca_3d(npz_path, scale, model_type, bc_pca_3d_dir, bc_only=True)
def run_merge(args):
# Per-scale data is always read from the standard results dir
data_dir = os.path.join(args.output_dir, args.model_type)
# Cross-scale plots go to merge_output_dir if specified, else same as data_dir
output_dir = args.merge_output_dir if args.merge_output_dir else data_dir
plots_dir = os.path.join(output_dir, 'plots')
os.makedirs(plots_dir, exist_ok=True)
scale_order = ['vanilla', '80k', '400k', '800k', '2m', 'roborefer', '10pct', '20pct', '30pct']
available_scales = [s for s in scale_order if s in args.scales]
# Load per-scale results
all_sign_corrected = {}
all_sign_corrected_bc = {}
all_within_cat = {}
all_within_cat_bc = {}
all_alignment = {}
all_pred_stats = []
all_cat_validity = {}
all_delta_heatmaps = {}
all_delta_heatmaps_bc = {}
for scale in available_scales:
sc = load_scale_consistency(data_dir, scale, 'all_pairs')
if sc:
all_sign_corrected[scale] = sc
sc_bc = load_scale_consistency(data_dir, scale, 'both_correct')
if sc_bc:
all_sign_corrected_bc[scale] = sc_bc
wc = load_within_cat_consistency(data_dir, scale, 'all_pairs')
if wc:
all_within_cat[scale] = wc
wc_bc = load_within_cat_consistency(data_dir, scale, 'both_correct')
if wc_bc:
all_within_cat_bc[scale] = wc_bc
align = load_scale_alignment(data_dir, scale)
if align:
all_alignment[scale] = align
pred_path = os.path.join(data_dir, 'json', f'pred_stats_{scale}.json')
if os.path.exists(pred_path):
with open(pred_path) as f:
all_pred_stats.append(json.load(f))
cv_path = os.path.join(data_dir, 'json', f'category_validity_{scale}.json')
if os.path.exists(cv_path):
with open(cv_path) as f:
all_cat_validity[scale] = json.load(f)
dh = load_delta_heatmaps(data_dir, scale, 'all_pairs')
if dh:
all_delta_heatmaps[scale] = dh
dh_bc = load_delta_heatmaps(data_dir, scale, 'both_correct')
if dh_bc:
all_delta_heatmaps_bc[scale] = dh_bc
logger.info(f" Loaded data for {scale}")
# Generate cross-scale plots into condition subdirs
for condition, sc_data, wc_data, dh_data, tag_label in [
('all', all_sign_corrected, all_within_cat, all_delta_heatmaps, 'all pairs'),
('both_correct', all_sign_corrected_bc, all_within_cat_bc, all_delta_heatmaps_bc, 'both-correct'),
]:
cond_dir = os.path.join(plots_dir, condition)
sc_dir = os.path.join(cond_dir, 'sign_corrected')
wc_dir = os.path.join(cond_dir, 'within_cat_consistency')
dt_dir = os.path.join(cond_dir, 'delta_trajectory')
os.makedirs(sc_dir, exist_ok=True)
os.makedirs(wc_dir, exist_ok=True)
os.makedirs(dt_dir, exist_ok=True)
if len(sc_data) > 1:
plot_cross_scale_consistency(
sc_data, args.model_type,
os.path.join(sc_dir, 'cross_scale_sign_corrected.png'),
title_prefix=f'Sign-Corrected ({tag_label})')
if len(wc_data) > 1:
plot_cross_scale_within_cat_consistency(
wc_data, args.model_type,
os.path.join(wc_dir, 'cross_scale_within_cat.png'))
if dh_data:
plot_delta_trajectory(dh_data, args.model_type,
os.path.join(dt_dir, 'delta_trajectory.png'))
# Cross-scale alignment + pred stats + summary (shared across conditions)
all_cond_dir = os.path.join(plots_dir, 'all')
ca_dir = os.path.join(all_cond_dir, 'cross_alignment')
pred_stats_dir = os.path.join(all_cond_dir, 'pred_stats')
summary_dir = os.path.join(all_cond_dir, 'summary')
os.makedirs(ca_dir, exist_ok=True)
os.makedirs(pred_stats_dir, exist_ok=True)
os.makedirs(summary_dir, exist_ok=True)
if len(all_alignment) > 1:
plot_cross_scale_alignment(
all_alignment, args.model_type,
os.path.join(ca_dir, 'cross_scale_alignment.png'))
# Prediction stats plots
if all_pred_stats:
plot_pred_stats_bars(all_pred_stats, args.model_type,
os.path.join(pred_stats_dir, 'pred_stats_bars.png'))
plot_pred_stats_trajectory(all_pred_stats, args.model_type,
os.path.join(pred_stats_dir, 'pred_stats_trajectory.png'))
# Summary barplot
if all_sign_corrected:
plot_summary_barplot(
all_sign_corrected, all_alignment, args.model_type,
os.path.join(summary_dir, 'summary_barplot.png'))
# Summary CSV
summary_rows = []
for scale in available_scales:
pred_path = os.path.join(data_dir, 'json', f'pred_stats_{scale}.json')
if os.path.exists(pred_path):
with open(pred_path) as f:
row = json.load(f)
if scale in all_alignment:
max_layer = max(all_alignment[scale].keys())
row['alignment_deepest'] = all_alignment[scale][max_layer]['per_sample_mean']
row['alignment_perm'] = all_alignment[scale][max_layer]['permutation_mean']
summary_rows.append(row)
if summary_rows:
csv_dir = os.path.join(output_dir, 'csv')
os.makedirs(csv_dir, exist_ok=True)
pd.DataFrame(summary_rows).to_csv(os.path.join(csv_dir, 'summary.csv'), index=False)
# Accuracy charts
if all_pred_stats:
acc_dir = os.path.join(plots_dir, 'accuracy')
logger.info("\n--- Accuracy Charts ---")
run_accuracy_charts(all_pred_stats, all_cat_validity, args.model_type, acc_dir)
# Unify y-axis across scales for per-scale trajectory plots
logger.info("\n--- Unifying Y-axis ---")
run_unify_ylim(data_dir, plots_dir, args.model_type)
# All-layer heatmaps + PCA (from saved CSVs / NPZ)
logger.info("\n--- All-Layer Heatmaps ---")
run_all_layer_heatmaps(data_dir, args.model_type, available_scales)
logger.info("\n--- All-Layer PCA ---")
run_all_layer_pca(data_dir, args.model_type, available_scales)
logger.info(f"\n=== Merge Complete ===\nResults in: {output_dir}")
def run_merge_extended(args):
"""Generate cross-scale plots for new / merge-only model_types.
- Runnable types (molmo_big, qwen_big, qwen_super, big_trio):
loads all data from results/{model_type}/ and saves plots there.
- Merge-only types (molmo_all, qwen_all):
loads per-scale data from the respective source directories,
saves all cross-scale plots to results/{model_type}/.
"""
is_merge_only = args.model_type in MERGE_ONLY_CONFIGS
# ── Determine scale order and data source strategy ────────────────────────
if is_merge_only:
mc = MERGE_ONLY_CONFIGS[args.model_type]
scale_order = mc['scale_order']
scale_sources = mc['scale_sources']
logger.info(f"\n=== MERGE-ONLY mode: {args.model_type} ===")
logger.info("Checking required source directories...")
sources_ok = _check_merge_only_sources(args.output_dir, args.model_type)
if not sources_ok:
logger.warning(
f"\n[WARNING] One or more source directories are missing or incomplete.\n"
f" Cross-scale plots for '{args.model_type}' may be partial.\n"
f" Run the missing model types first (see warnings above), then retry merge."
)
else:
scale_order = SCALE_ORDERS_NEW.get(
args.model_type, list(MODEL_CONFIGS_NEW[args.model_type]))
scale_sources = None # all data lives in results/{model_type}/
available_scales = [s for s in scale_order if s in args.scales]
logger.info(f"Merging scales (in order): {available_scales}")
out_dir = os.path.join(args.output_dir, args.model_type)
plots_dir = os.path.join(out_dir, 'plots')
os.makedirs(plots_dir, exist_ok=True)
# ── Load per-scale data ───────────────────────────────────────────────────
all_sign_corrected = {}
all_sign_corrected_bc = {}
all_within_cat = {}
all_within_cat_bc = {}
all_alignment = {}
all_pred_stats = []
all_cat_validity = {}
all_delta_heatmaps = {}
all_delta_heatmaps_bc = {}
for scale in available_scales:
if is_merge_only:
(sc, sc_bc, wc, wc_bc, align,
pred_stat, cat_validity, dh, dh_bc) = _load_scale_data_multi(
args.output_dir, args.model_type, scale, scale_sources)
else:
src_dir = out_dir
sc = load_scale_consistency(src_dir, scale, 'all_pairs')
sc_bc = load_scale_consistency(src_dir, scale, 'both_correct')
wc = load_within_cat_consistency(src_dir, scale, 'all_pairs')
wc_bc = load_within_cat_consistency(src_dir, scale, 'both_correct')
align = load_scale_alignment(src_dir, scale)
pred_stat = None
pred_path = os.path.join(src_dir, 'json', f'pred_stats_{scale}.json')
if os.path.exists(pred_path):
with open(pred_path) as f:
pred_stat = json.load(f)
cat_validity = None
cv_path = os.path.join(src_dir, 'json', f'category_validity_{scale}.json')
if os.path.exists(cv_path):
with open(cv_path) as f:
cat_validity = json.load(f)
dh = load_delta_heatmaps(src_dir, scale, 'all_pairs')
dh_bc = load_delta_heatmaps(src_dir, scale, 'both_correct')
if sc:
all_sign_corrected[scale] = sc
if sc_bc:
all_sign_corrected_bc[scale] = sc_bc
if wc:
all_within_cat[scale] = wc
if wc_bc:
all_within_cat_bc[scale] = wc_bc
if align:
all_alignment[scale] = align
if pred_stat is not None:
all_pred_stats.append(pred_stat)
if cat_validity is not None:
all_cat_validity[scale] = cat_validity
if dh:
all_delta_heatmaps[scale] = dh
if dh_bc:
all_delta_heatmaps_bc[scale] = dh_bc
logger.info(f" Loaded data for '{scale}'"
+ (f" (from '{scale_sources[scale]}')" if is_merge_only else ""))
# ── Cross-scale plots ─────────────────────────────────────────────────────
for condition, sc_data, wc_data, dh_data, tag_label in [
('all', all_sign_corrected, all_within_cat, all_delta_heatmaps, 'all pairs'),
('both_correct', all_sign_corrected_bc, all_within_cat_bc, all_delta_heatmaps_bc, 'both-correct'),
]:
cond_dir = os.path.join(plots_dir, condition)
sc_dir = os.path.join(cond_dir, 'sign_corrected')
wc_dir = os.path.join(cond_dir, 'within_cat_consistency')
dt_dir = os.path.join(cond_dir, 'delta_trajectory')
os.makedirs(sc_dir, exist_ok=True)
os.makedirs(wc_dir, exist_ok=True)
os.makedirs(dt_dir, exist_ok=True)
if len(sc_data) > 1:
plot_cross_scale_consistency(
sc_data, args.model_type,
os.path.join(sc_dir, 'cross_scale_sign_corrected.png'),
title_prefix=f'Sign-Corrected ({tag_label})')
if len(wc_data) > 1:
plot_cross_scale_within_cat_consistency(
wc_data, args.model_type,
os.path.join(wc_dir, 'cross_scale_within_cat.png'))
if dh_data:
plot_delta_trajectory(
dh_data, args.model_type,
os.path.join(dt_dir, 'delta_trajectory.png'))
# ── Alignment and prediction stats ────────────────────────────────────────
all_cond_dir = os.path.join(plots_dir, 'all')
ca_dir = os.path.join(all_cond_dir, 'cross_alignment')
pred_stats_dir = os.path.join(all_cond_dir, 'pred_stats')
summary_dir = os.path.join(all_cond_dir, 'summary')
os.makedirs(ca_dir, exist_ok=True)
os.makedirs(pred_stats_dir, exist_ok=True)
os.makedirs(summary_dir, exist_ok=True)
if len(all_alignment) > 1:
plot_cross_scale_alignment(
all_alignment, args.model_type,
os.path.join(ca_dir, 'cross_scale_alignment.png'))
if all_pred_stats:
plot_pred_stats_bars(
all_pred_stats, args.model_type,
os.path.join(pred_stats_dir, 'pred_stats_bars.png'))
plot_pred_stats_trajectory(
all_pred_stats, args.model_type,
os.path.join(pred_stats_dir, 'pred_stats_trajectory.png'))
if all_sign_corrected:
plot_summary_barplot(
all_sign_corrected, all_alignment, args.model_type,
os.path.join(summary_dir, 'summary_barplot.png'))
# ── Summary CSV ───────────────────────────────────────────────────────────
summary_rows = []
for scale in available_scales:
ps = next((p for p in all_pred_stats if p.get('scale') == scale), None)
if ps is None:
continue
row = dict(ps)
if scale in all_alignment:
max_layer = max(all_alignment[scale].keys())
row['alignment_deepest'] = all_alignment[scale][max_layer]['per_sample_mean']
row['alignment_perm'] = all_alignment[scale][max_layer]['permutation_mean']
summary_rows.append(row)
if summary_rows:
csv_dir = os.path.join(out_dir, 'csv')
os.makedirs(csv_dir, exist_ok=True)
pd.DataFrame(summary_rows).to_csv(os.path.join(csv_dir, 'summary.csv'), index=False)
# ── Accuracy charts ───────────────────────────────────────────────────────
if all_pred_stats:
acc_dir = os.path.join(plots_dir, 'accuracy')
logger.info("\n--- Accuracy Charts ---")
run_accuracy_charts(all_pred_stats, all_cat_validity, args.model_type, acc_dir)
# ── Unify y-axis ──────────────────────────────────────────────────────────
# For merge-only types, per-scale JSON files span multiple source dirs,
# so run_unify_ylim (which expects all JSON in one dir) is skipped.
if not is_merge_only:
logger.info("\n--- Unifying Y-axis ---")
run_unify_ylim(out_dir, plots_dir, args.model_type)
else:
logger.info("\n--- Skipping y-axis unification (per-scale data spans multiple source dirs) ---")
# ── All-layer heatmaps + PCA ──────────────────────────────────────────────
if not is_merge_only:
logger.info("\n--- All-Layer Heatmaps ---")
run_all_layer_heatmaps(out_dir, args.model_type, available_scales)
logger.info("\n--- All-Layer PCA ---")
run_all_layer_pca(out_dir, args.model_type, available_scales)
else:
# Merge-only types: NPZ/CSV files live in separate source directories
from collections import defaultdict as _defaultdict
mc_cfg = MERGE_ONLY_CONFIGS[args.model_type]
src_to_scales = _defaultdict(list)
for scale in available_scales:
src_to_scales[mc_cfg['scale_sources'][scale]].append(scale)
logger.info("\n--- All-Layer Heatmaps (per source) ---")
for src_key, src_scales in src_to_scales.items():
run_all_layer_heatmaps(
os.path.join(args.output_dir, src_key), src_key, src_scales)
logger.info("\n--- All-Layer PCA (per source) ---")
for src_key, src_scales in src_to_scales.items():
run_all_layer_pca(
os.path.join(args.output_dir, src_key), src_key, src_scales)
logger.info(f"\n=== Merge Complete ===\nResults saved to: {out_dir}")
def main():
# Default scales per legacy model_type (new types use their own defaults)
_LEGACY_DEFAULT_SCALES = {
'molmo': ['vanilla', '80k', '400k', '800k', '2m'],
'nvila': ['vanilla', '80k', '400k', '800k', '2m'],
'qwen': ['vanilla', '80k', '400k', '800k', '2m'],
'nvila_synthetic': ['80k-5pct', '80k-10pct', '80k-20pct', '80k-30pct', '400k-5pct'],
}
parser = argparse.ArgumentParser(
description='Swap Analysis β€” Spatial Representation Probing',
formatter_class=argparse.RawDescriptionHelpFormatter,
)
parser.add_argument('--data_path', type=str,
default='/data/shared/Qwen/EmbSpatial-Bench/EmbSpatial-Bench.tsv')
parser.add_argument('--model_type', type=str, required=True,
choices=ALL_MODEL_TYPES,
help=(
'Legacy: molmo | nvila | qwen\n'
'Synthetic: nvila_synthetic\n'
'New large: molmo_big | qwen_big | qwen_super | big_trio\n'
'Merge-only (--merge required): molmo_all | qwen_all'
))
parser.add_argument('--scales', type=str, nargs='+', default=None,
help='Scales to process (default: all for the given model_type).')
parser.add_argument('--output_dir', type=str,
default='/data/shared/Qwen/experiments/swap_analysis/results')
parser.add_argument('--device', type=str, default='cuda')
parser.add_argument('--seed', type=int, default=42)
parser.add_argument('--merge', action='store_true',
help='Merge mode: generate cross-scale plots from saved per-scale data.')
parser.add_argument('--merge-output-dir', type=str, default=None, dest='merge_output_dir',
help='Override output dir for cross-scale plots (NVILA dual-merge).')
parser.add_argument('--no-auto-roborefer', action='store_true', dest='no_auto_roborefer',
help='Disable automatic inclusion of roborefer scale for nvila.')
parser.add_argument('--skip-cross-group', action='store_true')
parser.add_argument('--max-samples-per-category', type=int, default=200,
dest='max_samples_per_category')
parser.add_argument('--no-filtering', action='store_true', dest='no_filtering',
help='Disable Unknown/empty filtering for far/close reference objects.'
' By default, Unknown candidates are removed before sampling.')
parser.add_argument('--question-type', type=str, default='mcq',
choices=['mcq', 'short'], dest='question_type',
help='mcq (default): MCQ A/B format with letter answers; '
'short: original "Answer with only one word." format.')
args = parser.parse_args()
# ── Per-model-type log file ───────────────────────────────────────────────
log_path = _setup_file_logging(args.model_type)
logger.info(f"Logging to: {log_path}")
# ── Validate: merge-only types require --merge ────────────────────────────
if args.model_type in MERGE_ONLY_CONFIGS and not args.merge:
parser.error(
f"'{args.model_type}' is a merge-only type. Add --merge to run it.\n"
f" Example: python swap_analysis.py --model_type {args.model_type} --merge"
)
# ── Default scales ────────────────────────────────────────────────────────
if args.scales is None:
if args.model_type in MERGE_ONLY_CONFIGS:
args.scales = MERGE_ONLY_CONFIGS[args.model_type]['scale_order']
elif args.model_type in MODEL_CONFIGS_NEW:
args.scales = list(MODEL_CONFIGS_NEW[args.model_type].keys())
else:
args.scales = _LEGACY_DEFAULT_SCALES.get(
args.model_type, ['vanilla', '80k', '400k', '800k', '2m'])
# Legacy nvila: auto-include roborefer
if args.model_type == 'nvila' and 'roborefer' not in args.scales and not args.no_auto_roborefer:
args.scales.append('roborefer')
np.random.seed(args.seed)
torch.manual_seed(args.seed)
random.seed(args.seed)
# ── Merge mode ───────────────────────────────────────────────────────────
if args.merge:
logger.info("\n=== MERGE MODE ===")
if args.model_type in MODEL_CONFIGS_NEW or args.model_type in MERGE_ONLY_CONFIGS:
run_merge_extended(args)
else:
run_merge(args)
return
# ── Inference mode ────────────────────────────────────────────────────────
logger.info("\n=== Loading & Creating Swap Pairs ===")
swap_pairs = load_swap_pairs(args.data_path, args.seed,
filter_unknown=not args.no_filtering,
question_type=args.question_type)
quads = []
if not args.skip_cross_group:
try:
hf_cache = build_hf_bbox_cache()
quads = create_cross_group_quads(swap_pairs, hf_cache,
question_type=args.question_type)
except Exception as e:
logger.warning(f"Cross-group setup failed: {e}. Skipping.")
quads = []
# ── Resolve config for the chosen model_type ─────────────────────────────
if args.model_type in MODEL_CONFIGS_NEW:
model_configs = MODEL_CONFIGS_NEW[args.model_type]
else:
model_configs = MODEL_CONFIGS[args.model_type]
for scale in args.scales:
if scale not in model_configs:
logger.warning(f"Scale '{scale}' not in config for '{args.model_type}', skipping.")
continue
# Validate model path exists (skip HF IDs that start with org/ prefix)
if args.model_type in MODEL_CONFIGS_NEW:
_, raw_path = model_configs[scale]
else:
raw_path = model_configs[scale]
if not os.path.isabs(raw_path) and not raw_path.startswith(('Qwen/', 'allenai/')):
if not os.path.exists(raw_path):
logger.warning(f"Model path not found: {raw_path} (scale='{scale}'), skipping.")
continue
try:
process_scale(args, scale, swap_pairs, quads)
except Exception as e:
logger.error(f"Failed {args.model_type} - {scale}: {e}")
import traceback
traceback.print_exc()
continue
logger.info(f"\n{'='*60}")
logger.info("=== All scales complete ===")
logger.info(f"Results: {os.path.join(args.output_dir, args.model_type)}")
logger.info(f"{'='*60}")
if __name__ == '__main__':
main()