| |
| """ |
| DEPRECATED β This file has been merged into swap_analysis.py. |
| |
| All functionality (Molmo2Extractor, Qwen3VLExtractor, merge-only types, |
| per-model-type logging, etc.) is now available directly via: |
| |
| python swap_analysis.py --model_type <type> |
| |
| See swap_analysis.py --help for all supported model types. |
| This file is kept for reference only and will be removed in a future cleanup. |
| |
| Original docstring preserved below: |
| ------------------------------------ |
| Swap Analysis β New Models Extension |
| |
| Adds Molmo2-8B, Qwen3-VL-32B-Instruct, and Qwen3-VL-235B-A22B-Instruct |
| to the swap analysis pipeline. |
| Results are saved under new model_type directories, never overwriting existing results. |
| |
| Runnable model types (actually run inference + save per-scale data) |
| --------------------------------------------------------------------- |
| molmo_big : Molmo2-8B only β saves to results/molmo_big/ |
| qwen_big : Qwen3-VL-32B only β saves to results/qwen_big/ |
| qwen_super : Qwen3-VL-235B-A22B only β saves to results/qwen_super/ |
| big_trio : Molmo2-8B + RoboRefer + Qwen3-VL-32B β saves to results/big_trio/ |
| |
| Merge-only model types (load existing data, output cross-scale plots) |
| ----------------------------------------------------------------------- |
| molmo_all : results/molmo/ (vanillaβ2m) + results/molmo_big/ (molmo2) |
| β plots saved to results/molmo_all/ |
| qwen_all : results/qwen/ (vanillaβ2m) + results/qwen_big/ (qwen3_32b) |
| β plots saved to results/qwen_all/ |
| (big_trio also uses --merge like other types) |
| |
| Logging |
| ------- |
| Each run writes its own log to: logs/{model_type}.log (appended) |
| alongside the usual stderr output. |
| |
| Environment notes |
| ----------------- |
| molmo_big, qwen_big, qwen_super, big_trio (molmo2+qwen3_32b) β qwen3 conda env |
| big_trio roborefer scale only β vila conda env |
| |
| Usage examples |
| -------------- |
| # Step 1 β run new models (qwen3 env) |
| conda run -n qwen3 python swap_analysis_new_models.py --model_type molmo_big |
| conda run -n qwen3 python swap_analysis_new_models.py --model_type qwen_big |
| conda run -n qwen3 python swap_analysis_new_models.py --model_type qwen_super |
| |
| # Step 2 β merge new results with existing molmo/qwen results (qwen3 env) |
| conda run -n qwen3 python swap_analysis_new_models.py --model_type molmo_all --merge |
| conda run -n qwen3 python swap_analysis_new_models.py --model_type qwen_all --merge |
| |
| # big_trio (multi-env): run per-env, then merge |
| conda run -n qwen3 python swap_analysis_new_models.py --model_type big_trio --scales molmo2 qwen3_32b |
| conda run -n vila python swap_analysis_new_models.py --model_type big_trio --scales roborefer |
| conda run -n qwen3 python swap_analysis_new_models.py --model_type big_trio --merge |
| |
| # Re-run a single scale |
| conda run -n qwen3 python swap_analysis_new_models.py --model_type molmo_big --scales molmo2 |
| """ |
|
|
| import os |
| import sys |
| import json |
| import argparse |
| import logging |
| import random |
|
|
| import torch |
| import numpy as np |
| import pandas as pd |
|
|
| |
|
|
| _HERE = os.path.dirname(os.path.abspath(__file__)) |
| if _HERE not in sys.path: |
| sys.path.insert(0, _HERE) |
|
|
| import swap_analysis as _sa |
| from swap_analysis import ( |
| |
| BaseHiddenStateExtractor, |
| MolmoExtractor, |
| NVILAExtractor, |
| RoboReferExtractor, |
| Qwen25VLExtractor, |
| |
| load_swap_pairs, |
| build_hf_bbox_cache, |
| create_cross_group_quads, |
| |
| extract_swap_features, |
| extract_cross_group_features, |
| |
| compute_delta_consistency, |
| compute_delta_similarity_matrix, |
| compute_cross_group_alignment, |
| compute_prediction_stats, |
| check_category_validity, |
| filter_both_correct, |
| get_representative_layers, |
| |
| save_scale_results, |
| save_vectors_npz, |
| load_scale_consistency, |
| load_within_cat_consistency, |
| load_scale_alignment, |
| load_delta_heatmaps, |
| |
| plot_within_cat_consistency_trajectory, |
| plot_sign_corrected_consistency_trajectory, |
| plot_cross_group_alignment_trajectory, |
| plot_delta_heatmap, |
| plot_pca_embeddings, |
| plot_pca_3d, |
| plot_pred_stats_bars, |
| plot_pred_stats_trajectory, |
| |
| plot_cross_scale_consistency, |
| plot_cross_scale_within_cat_consistency, |
| plot_cross_scale_alignment, |
| plot_delta_trajectory, |
| plot_summary_barplot, |
| |
| run_accuracy_charts, |
| run_unify_ylim, |
| |
| GROUP_ORDER, |
| ) |
|
|
| logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s') |
| logger = logging.getLogger(__name__) |
|
|
|
|
| def _setup_file_logging(model_type: str) -> str: |
| """Add a per-model-type FileHandler so each run gets its own log file. |
| |
| Log is written to <script_dir>/logs/{model_type}.log (append mode). |
| Returns the resolved log file path. |
| """ |
| log_dir = os.path.join(_HERE, 'logs') |
| os.makedirs(log_dir, exist_ok=True) |
| log_path = os.path.join(log_dir, f'{model_type}.log') |
|
|
| fh = logging.FileHandler(log_path, mode='a', encoding='utf-8') |
| fh.setLevel(logging.INFO) |
| fh.setFormatter(logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')) |
|
|
| |
| |
| logging.getLogger().addHandler(fh) |
|
|
| return log_path |
|
|
| |
| |
| |
|
|
| |
| HF_HUB_DIR = '/data/shared/Qwen/mydisk/huggingface/hub' |
|
|
|
|
| def resolve_local_path(model_path: str) -> str: |
| """ |
| Resolve a HuggingFace model ID (e.g. 'Qwen/Qwen3-VL-32B-Instruct') to its |
| local snapshot path under HF_HUB_DIR, if the model has been cached there. |
| |
| - If model_path is already an absolute path, return it unchanged. |
| - If the model is found in the local cache, return the snapshot directory. |
| - If not found locally, log a warning and return the original HF ID |
| (transformers will then attempt to download from the Hub). |
| """ |
| if os.path.isabs(model_path): |
| return model_path |
|
|
| |
| cache_name = 'models--' + model_path.replace('/', '--') |
| snapshots_dir = os.path.join(HF_HUB_DIR, cache_name, 'snapshots') |
|
|
| if os.path.isdir(snapshots_dir): |
| snapshots = sorted(os.listdir(snapshots_dir)) |
| if snapshots: |
| local_path = os.path.join(snapshots_dir, snapshots[-1]) |
| logger.info(f"Local cache found: {model_path} β {local_path}") |
| return local_path |
|
|
| logger.warning( |
| f"Model not found in local cache: '{model_path}'\n" |
| f" Expected at: {snapshots_dir}\n" |
| f" Will fall back to online HuggingFace Hub download.\n" |
| f" To cache locally first, run:\n" |
| f" python -c \"from huggingface_hub import snapshot_download; " |
| f"snapshot_download('{model_path}', cache_dir='{HF_HUB_DIR}')\"" |
| ) |
| return model_path |
|
|
|
|
| |
| |
| |
|
|
| |
| _sa.SCALE_COLORS.update({ |
| 'molmo2': '#17becf', |
| 'qwen3_32b': '#bcbd22', |
| 'qwen3_235b': '#d62728', |
| }) |
| SCALE_COLORS = _sa.SCALE_COLORS |
|
|
| |
| |
| MODEL_CONFIGS_NEW = { |
| 'molmo_big': { |
| 'molmo2': ('Molmo2Extractor', 'allenai/Molmo2-8B'), |
| }, |
| 'qwen_big': { |
| 'qwen3_32b': ('Qwen3VLExtractor', 'Qwen/Qwen3-VL-32B-Instruct'), |
| }, |
| 'qwen_super': { |
| |
| 'qwen3_235b': ('Qwen3VLExtractor', 'Qwen/Qwen3-VL-235B-A22B-Instruct'), |
| }, |
| 'big_trio': { |
| 'molmo2': ('Molmo2Extractor', 'allenai/Molmo2-8B'), |
| 'roborefer': ('RoboReferExtractor', '/data/shared/Qwen/mydisk/RoboRefer_model'), |
| 'qwen3_32b': ('Qwen3VLExtractor', 'Qwen/Qwen3-VL-32B-Instruct'), |
| }, |
| } |
|
|
| |
| |
| |
| MERGE_ONLY_CONFIGS = { |
| 'molmo_all': { |
| 'scale_order': ['vanilla', '80k', '400k', '800k', '2m', 'molmo2'], |
| 'scale_sources': { |
| 'vanilla': 'molmo', |
| '80k': 'molmo', |
| '400k': 'molmo', |
| '800k': 'molmo', |
| '2m': 'molmo', |
| 'molmo2': 'molmo_big', |
| }, |
| 'required_dirs': ['molmo', 'molmo_big'], |
| }, |
| 'qwen_all': { |
| 'scale_order': ['vanilla', '80k', '400k', '800k', '2m', 'qwen3_32b'], |
| 'scale_sources': { |
| 'vanilla': 'qwen', |
| '80k': 'qwen', |
| '400k': 'qwen', |
| '800k': 'qwen', |
| '2m': 'qwen', |
| 'qwen3_32b': 'qwen_big', |
| }, |
| 'required_dirs': ['qwen', 'qwen_big'], |
| }, |
| } |
|
|
| |
| SCALE_ORDERS = { |
| 'molmo_big': ['molmo2'], |
| 'qwen_big': ['qwen3_32b'], |
| 'qwen_super': ['qwen3_235b'], |
| 'big_trio': ['molmo2', 'roborefer', 'qwen3_32b'], |
| } |
|
|
| |
| ALL_MODEL_TYPES = list(MODEL_CONFIGS_NEW.keys()) + list(MERGE_ONLY_CONFIGS.keys()) |
|
|
|
|
| |
| |
| |
|
|
| class Molmo2Extractor(BaseHiddenStateExtractor): |
| """ |
| Extractor for allenai/Molmo2-8B. |
| |
| Uses AutoModelForImageTextToText (messages-dict input format). |
| LLM backbone: Qwen3. Transformer layers are discovered dynamically because |
| the exact attribute path depends on the custom model architecture. |
| |
| Differences from MolmoExtractor: |
| - apply_chat_template instead of processor.process |
| - model.generate(**inputs) instead of generate_from_batch |
| - device_map='auto' for multi-GPU support |
| """ |
|
|
| def _load_model(self): |
| from transformers import AutoProcessor, AutoModelForImageTextToText |
| self.processor = AutoProcessor.from_pretrained( |
| self.model_path, trust_remote_code=True |
| ) |
| self.model = AutoModelForImageTextToText.from_pretrained( |
| self.model_path, |
| trust_remote_code=True, |
| torch_dtype='auto', |
| device_map='auto', |
| ).eval() |
| self._find_llm_layers() |
| logger.info(f"Loaded Molmo2 from {self.model_path}") |
|
|
| def _find_llm_layers(self): |
| """Dynamically locate the LLM transformer layer list.""" |
| |
| candidates = [ |
| ['model', 'layers'], |
| ['language_model', 'model', 'layers'], |
| ['model', 'model', 'layers'], |
| ] |
| for path in candidates: |
| obj = self.model |
| for attr in path: |
| obj = getattr(obj, attr, None) |
| if obj is None: |
| break |
| if obj is not None and hasattr(obj, '__len__') and len(obj) > 0: |
| self.llm_layers = obj |
| logger.info(f"Molmo2: layers at '{'.'.join(path)}', count={len(obj)}") |
| return |
|
|
| |
| best, best_name, best_len = None, '', 0 |
| for name, module in self.model.named_modules(): |
| if name.endswith('.layers') and hasattr(module, '__len__') and len(module) > best_len: |
| best, best_name, best_len = module, name, len(module) |
| if best is not None: |
| self.llm_layers = best |
| logger.info(f"Molmo2: layers via scan at '{best_name}', count={best_len}") |
| return |
|
|
| raise ValueError("Could not find transformer layers in Molmo2 model") |
|
|
| def _get_num_layers(self) -> int: |
| return len(self.llm_layers) |
|
|
| def _get_layer_module(self, layer_idx: int): |
| return self.llm_layers[layer_idx] |
|
|
| def extract_and_predict(self, image, question): |
| self.hidden_states = {} |
| messages = [{ |
| "role": "user", |
| "content": [ |
| {"type": "image", "image": image}, |
| {"type": "text", "text": question}, |
| ], |
| }] |
| inputs = self.processor.apply_chat_template( |
| messages, |
| tokenize=True, |
| add_generation_prompt=True, |
| return_tensors="pt", |
| return_dict=True, |
| ) |
| inputs = {k: v.to(self.model.device) for k, v in inputs.items()} |
| with torch.inference_mode(): |
| generated_ids = self.model.generate(**inputs, max_new_tokens=20, do_sample=False) |
| input_len = inputs['input_ids'].shape[1] |
| answer = self.processor.tokenizer.decode( |
| generated_ids[0, input_len:], skip_special_tokens=True |
| ).strip() |
| return self.hidden_states.copy(), answer |
|
|
|
|
| |
| |
| |
|
|
| class Qwen3VLExtractor(BaseHiddenStateExtractor): |
| """ |
| Extractor for Qwen/Qwen3-VL-32B-Instruct (and other Qwen3-VL variants). |
| |
| Key differences from Qwen25VLExtractor: |
| - AutoModelForImageTextToText + trust_remote_code=True |
| - process_vision_info requires image_patch_size=16 |
| - processor call requires do_resize=False |
| - 32Γ32 px patches β different min/max_pixels |
| |
| Qwen3-VL wraps the LLM under model.model.language_model (not model.model.layers |
| directly like Qwen2.5-VL), so layer path is discovered dynamically. |
| Layer count: 64 for 32B (Qwen3-32B backbone), auto-detected. |
| """ |
|
|
| MIN_PIXELS = 1280 * 32 * 32 |
| MAX_PIXELS = 16384 * 32 * 32 |
|
|
| def _load_model(self): |
| from transformers import AutoProcessor, AutoModelForImageTextToText |
| self.processor = AutoProcessor.from_pretrained( |
| self.model_path, trust_remote_code=True |
| ) |
| self.model = AutoModelForImageTextToText.from_pretrained( |
| self.model_path, |
| trust_remote_code=True, |
| torch_dtype='auto', |
| device_map='auto', |
| attn_implementation='flash_attention_2', |
| ).eval() |
| self._find_llm_layers() |
| logger.info(f"Loaded Qwen3-VL from {self.model_path}") |
|
|
| def _find_llm_layers(self): |
| """Dynamically locate the LLM transformer layer list.""" |
| |
| candidates = [ |
| ['model', 'language_model', 'model', 'layers'], |
| ['language_model', 'model', 'layers'], |
| ['model', 'model', 'layers'], |
| ['model', 'layers'], |
| ] |
| for path in candidates: |
| obj = self.model |
| for attr in path: |
| obj = getattr(obj, attr, None) |
| if obj is None: |
| break |
| if obj is not None and hasattr(obj, '__len__') and len(obj) > 0: |
| self.llm_layers = obj |
| logger.info(f"Qwen3-VL: layers at '{'.'.join(path)}', count={len(obj)}") |
| return |
|
|
| |
| best, best_name, best_len = None, '', 0 |
| for name, module in self.model.named_modules(): |
| if name.endswith('.layers') and hasattr(module, '__len__') and len(module) > best_len: |
| best, best_name, best_len = module, name, len(module) |
| if best is not None: |
| self.llm_layers = best |
| logger.info(f"Qwen3-VL: layers via scan at '{best_name}', count={best_len}") |
| return |
|
|
| raise ValueError("Could not find transformer layers in Qwen3-VL model") |
|
|
| def _get_num_layers(self) -> int: |
| return len(self.llm_layers) |
|
|
| def _get_layer_module(self, layer_idx: int): |
| return self.llm_layers[layer_idx] |
|
|
| def extract_and_predict(self, image, question): |
| self.hidden_states = {} |
| messages = [{"role": "user", "content": [ |
| { |
| "type": "image", "image": image, |
| "min_pixels": self.MIN_PIXELS, "max_pixels": self.MAX_PIXELS, |
| }, |
| {"type": "text", "text": question}, |
| ]}] |
| text = self.processor.apply_chat_template( |
| messages, tokenize=False, add_generation_prompt=True |
| ) |
| from qwen_vl_utils import process_vision_info |
| |
| images, videos, _ = process_vision_info( |
| messages, |
| image_patch_size=16, |
| return_video_kwargs=True, |
| return_video_metadata=True, |
| ) |
| inputs = self.processor( |
| text=text, |
| images=images, |
| videos=videos, |
| do_resize=False, |
| return_tensors="pt", |
| ).to(self.model.device) |
| with torch.no_grad(): |
| output_ids = self.model.generate(**inputs, max_new_tokens=20, do_sample=False) |
| input_len = inputs['input_ids'].shape[1] |
| answer = self.processor.tokenizer.decode( |
| output_ids[0, input_len:], skip_special_tokens=True |
| ).strip() |
| return self.hidden_states.copy(), answer |
|
|
|
|
| |
| |
| |
|
|
| EXTRACTOR_CLASSES = { |
| 'MolmoExtractor': MolmoExtractor, |
| 'NVILAExtractor': NVILAExtractor, |
| 'RoboReferExtractor': RoboReferExtractor, |
| 'Qwen25VLExtractor': Qwen25VLExtractor, |
| 'Molmo2Extractor': Molmo2Extractor, |
| 'Qwen3VLExtractor': Qwen3VLExtractor, |
| } |
|
|
|
|
| def get_extractor_new(model_type: str, scale: str, device: str = 'cuda'): |
| """Create the appropriate extractor from MODEL_CONFIGS_NEW. |
| HF model IDs are automatically resolved to local snapshot paths when cached. |
| """ |
| cls_name, model_path = MODEL_CONFIGS_NEW[model_type][scale] |
| model_path = resolve_local_path(model_path) |
| ExtractorCls = EXTRACTOR_CLASSES[cls_name] |
| logger.info(f"Creating {cls_name} for scale='{scale}' from {model_path}") |
| return ExtractorCls(model_path, device=device) |
|
|
|
|
| |
| |
| |
|
|
| def process_scale_new(args, scale, swap_pairs, quads): |
| """Run the full swap-analysis pipeline for one scale of a runnable model_type.""" |
| cls_name, model_path = MODEL_CONFIGS_NEW[args.model_type][scale] |
|
|
| logger.info(f"\n{'='*60}") |
| logger.info(f"Processing {args.model_type} / {scale} [{cls_name}]") |
| logger.info(f"Model path: {model_path}") |
| logger.info(f"{'='*60}") |
|
|
| extractor = get_extractor_new(args.model_type, scale, device=args.device) |
| target_layers = extractor.target_layers |
|
|
| output_dir = os.path.join(args.output_dir, args.model_type) |
| plots_dir = os.path.join(output_dir, 'plots') |
| os.makedirs(plots_dir, exist_ok=True) |
|
|
| |
| logger.info("\n--- Phase A: Extracting swap pair features ---") |
| swap_records = extract_swap_features( |
| extractor, swap_pairs, |
| max_samples_per_category=args.max_samples_per_category, |
| ) |
|
|
| |
| logger.info("\n--- Phase B: Extracting cross-group features ---") |
| quad_records = extract_cross_group_features(extractor, quads) if quads else [] |
|
|
| |
| logger.info("\n--- Phase C: Analysis ---") |
| category_validity = check_category_validity(swap_records, scale) |
| unreliable_cats = [c for c, v in category_validity.items() if not v['reliable']] |
| if unreliable_cats: |
| logger.warning(f" Unreliable categories: {unreliable_cats}") |
|
|
| within_cat_all, sign_corrected_all = compute_delta_consistency(swap_records, target_layers) |
|
|
| both_correct_records = filter_both_correct(swap_records) |
| logger.info(f" Both-correct pairs: {len(both_correct_records)}/{len(swap_records)}") |
| within_cat_bc, sign_corrected_bc = compute_delta_consistency(both_correct_records, target_layers) |
|
|
| cross_alignment = compute_cross_group_alignment(quad_records, target_layers) |
| pred_stats = compute_prediction_stats(swap_records, scale) |
|
|
| delta_heatmaps_all, delta_heatmaps_bc = {}, {} |
| for layer in target_layers: |
| delta_heatmaps_all[layer] = compute_delta_similarity_matrix(swap_records, layer) |
| if both_correct_records: |
| delta_heatmaps_bc[layer] = compute_delta_similarity_matrix(both_correct_records, layer) |
|
|
| max_layer = max(target_layers) |
| for group in GROUP_ORDER: |
| key = (group, max_layer) |
| if key in sign_corrected_all: |
| logger.info(f" Sign-corrected [{group}, L{max_layer}]: " |
| f"{sign_corrected_all[key]['mean']:.4f} Β± " |
| f"{sign_corrected_all[key]['std']:.4f}") |
| if max_layer in cross_alignment: |
| ca = cross_alignment[max_layer] |
| logger.info(f" Cross-group alignment L{max_layer}: " |
| f"{ca['per_sample_mean']:.4f} (perm={ca['permutation_mean']:.4f})") |
| logger.info(f" Accuracy orig={pred_stats['overall_acc_orig']:.1%}, " |
| f"swap={pred_stats['overall_acc_swap']:.1%}, " |
| f"both={pred_stats['overall_acc_both']:.1%}") |
|
|
| |
| logger.info("\n--- Phase D: Saving results ---") |
| save_vectors_npz(scale, swap_records, quad_records, target_layers, output_dir) |
| save_scale_results( |
| scale, swap_records, quad_records, |
| within_cat_all, sign_corrected_all, |
| cross_alignment, pred_stats, target_layers, |
| category_validity, delta_heatmaps_all, |
| output_dir, both_correct_tag='all_pairs', |
| ) |
| if both_correct_records: |
| save_scale_results( |
| scale, both_correct_records, quad_records, |
| within_cat_bc, sign_corrected_bc, |
| cross_alignment, pred_stats, target_layers, |
| category_validity, delta_heatmaps_bc, |
| output_dir, both_correct_tag='both_correct', |
| ) |
|
|
| |
| logger.info("\n--- Phase E: Per-scale plots ---") |
| for condition, wc_data, sc_data, dh_data in [ |
| ('all', within_cat_all, sign_corrected_all, delta_heatmaps_all), |
| ('both_correct', within_cat_bc, sign_corrected_bc, delta_heatmaps_bc), |
| ('all_with_validity', within_cat_all, sign_corrected_all, delta_heatmaps_all), |
| ]: |
| if condition == 'both_correct' and not both_correct_records: |
| continue |
|
|
| cond_dir = os.path.join(plots_dir, condition) |
| os.makedirs(cond_dir, exist_ok=True) |
| cond_unreliable = unreliable_cats if condition == 'all_with_validity' else [] |
|
|
| plot_within_cat_consistency_trajectory( |
| wc_data, scale, args.model_type, |
| os.path.join(cond_dir, f'within_cat_consistency_{scale}.png')) |
|
|
| plot_sign_corrected_consistency_trajectory( |
| sc_data, scale, args.model_type, |
| os.path.join(cond_dir, f'sign_corrected_consistency_{scale}.png')) |
|
|
| if cross_alignment: |
| plot_cross_group_alignment_trajectory( |
| cross_alignment, scale, args.model_type, |
| os.path.join(cond_dir, f'cross_alignment_{scale}.png')) |
|
|
| rep_layers = get_representative_layers(target_layers) |
| for layer in rep_layers: |
| df = dh_data.get(layer) |
| if df is not None: |
| plot_delta_heatmap( |
| df, |
| f'{args.model_type.upper()} ({scale}) - Delta Heatmap L{layer} ({condition})', |
| os.path.join(cond_dir, f'delta_heatmap_{scale}_L{layer}.png'), |
| unreliable_cats=cond_unreliable, |
| ) |
|
|
| |
| npz_path = os.path.join(output_dir, 'npz', f'vectors_{scale}.npz') |
| if os.path.exists(npz_path): |
| pca_dir = os.path.join(plots_dir, 'all', 'pca') |
| os.makedirs(pca_dir, exist_ok=True) |
| plot_pca_embeddings(npz_path, scale, args.model_type, pca_dir) |
|
|
| pca_3d_dir = os.path.join(plots_dir, 'all', 'pca_3d') |
| os.makedirs(pca_3d_dir, exist_ok=True) |
| plot_pca_3d(npz_path, scale, args.model_type, pca_3d_dir) |
|
|
| if pred_stats: |
| pred_dir = os.path.join(plots_dir, 'all') |
| os.makedirs(pred_dir, exist_ok=True) |
| plot_pred_stats_bars( |
| [pred_stats], args.model_type, |
| os.path.join(pred_dir, f'pred_stats_bars_{scale}.png')) |
|
|
| extractor.cleanup() |
| logger.info(f"\n=== Scale '{scale}' complete ===") |
|
|
|
|
| |
| |
| |
|
|
| def _check_merge_only_sources(output_dir: str, model_type: str) -> bool: |
| """ |
| Verify required source directories have data. |
| Returns True if all sources look healthy, False (with warnings) if not. |
| """ |
| mc = MERGE_ONLY_CONFIGS[model_type] |
| ok = True |
| for req_dir in mc['required_dirs']: |
| src_path = os.path.join(output_dir, req_dir) |
| json_dir = os.path.join(src_path, 'json') |
| if not os.path.isdir(src_path): |
| logger.warning( |
| f"[{model_type}] Required source directory not found: {src_path}\n" |
| f" β Run inference first: python swap_analysis_new_models.py " |
| f"--model_type {req_dir}" |
| if req_dir in MODEL_CONFIGS_NEW else |
| f" β Run inference first: python swap_analysis.py " |
| f"--model_type {req_dir}" |
| ) |
| ok = False |
| elif not os.path.isdir(json_dir) or not any( |
| f.startswith('pred_stats_') for f in os.listdir(json_dir) |
| ): |
| logger.warning( |
| f"[{model_type}] Source directory exists but has no pred_stats JSON: {json_dir}\n" |
| f" β Inference may not have completed for '{req_dir}'." |
| ) |
| ok = False |
| else: |
| scales_found = [ |
| f.replace('pred_stats_', '').replace('.json', '') |
| for f in os.listdir(json_dir) |
| if f.startswith('pred_stats_') |
| ] |
| logger.info(f" [{req_dir}] found scales: {scales_found}") |
| return ok |
|
|
|
|
| def _load_scale_data_multi(output_dir: str, model_type: str, scale: str, scale_sources: dict): |
| """ |
| Load per-scale data for a single scale, looking in the correct source directory. |
| Returns (sc, sc_bc, wc, wc_bc, align, pred_stats_dict, cat_validity_dict, dh, dh_bc) |
| where any unavailable item is None / {}. |
| """ |
| src_dir = os.path.join(output_dir, scale_sources.get(scale, model_type)) |
|
|
| sc = load_scale_consistency(src_dir, scale, 'all_pairs') |
| sc_bc = load_scale_consistency(src_dir, scale, 'both_correct') |
| wc = load_within_cat_consistency(src_dir, scale, 'all_pairs') |
| wc_bc = load_within_cat_consistency(src_dir, scale, 'both_correct') |
| align = load_scale_alignment(src_dir, scale) |
|
|
| pred_stat = None |
| pred_path = os.path.join(src_dir, 'json', f'pred_stats_{scale}.json') |
| if os.path.exists(pred_path): |
| with open(pred_path) as f: |
| pred_stat = json.load(f) |
|
|
| cat_validity = None |
| cv_path = os.path.join(src_dir, 'json', f'category_validity_{scale}.json') |
| if os.path.exists(cv_path): |
| with open(cv_path) as f: |
| cat_validity = json.load(f) |
|
|
| dh = load_delta_heatmaps(src_dir, scale, 'all_pairs') |
| dh_bc = load_delta_heatmaps(src_dir, scale, 'both_correct') |
|
|
| return sc, sc_bc, wc, wc_bc, align, pred_stat, cat_validity, dh, dh_bc |
|
|
|
|
| |
| |
| |
|
|
| def run_merge_new(args): |
| """ |
| Generate cross-scale plots for a model_type. |
| |
| - For runnable types (molmo_big, qwen_big, big_trio): |
| loads all data from results/{model_type}/ and saves plots there. |
| - For merge-only types (molmo_all, qwen_all): |
| loads per-scale data from the respective source directories |
| (e.g. results/molmo/ and results/molmo_big/), |
| saves all cross-scale plots to results/{model_type}/. |
| """ |
| is_merge_only = args.model_type in MERGE_ONLY_CONFIGS |
|
|
| |
| if is_merge_only: |
| mc = MERGE_ONLY_CONFIGS[args.model_type] |
| scale_order = mc['scale_order'] |
| scale_sources = mc['scale_sources'] |
|
|
| logger.info(f"\n=== MERGE-ONLY mode: {args.model_type} ===") |
| logger.info("Checking required source directories...") |
| sources_ok = _check_merge_only_sources(args.output_dir, args.model_type) |
| if not sources_ok: |
| logger.warning( |
| f"\n[WARNING] One or more source directories are missing or incomplete.\n" |
| f" Cross-scale plots for '{args.model_type}' may be partial.\n" |
| f" Run the missing model types first (see warnings above), then retry merge." |
| ) |
| else: |
| scale_order = SCALE_ORDERS.get(args.model_type, list(MODEL_CONFIGS_NEW[args.model_type])) |
| scale_sources = None |
|
|
| available_scales = [s for s in scale_order if s in args.scales] |
| logger.info(f"Merging scales (in order): {available_scales}") |
|
|
| |
| out_dir = os.path.join(args.output_dir, args.model_type) |
| plots_dir = os.path.join(out_dir, 'plots') |
| os.makedirs(plots_dir, exist_ok=True) |
|
|
| |
| all_sign_corrected = {} |
| all_sign_corrected_bc = {} |
| all_within_cat = {} |
| all_within_cat_bc = {} |
| all_alignment = {} |
| all_pred_stats = [] |
| all_cat_validity = {} |
| all_delta_heatmaps = {} |
| all_delta_heatmaps_bc = {} |
|
|
| for scale in available_scales: |
| if is_merge_only: |
| (sc, sc_bc, wc, wc_bc, align, |
| pred_stat, cat_validity, dh, dh_bc) = _load_scale_data_multi( |
| args.output_dir, args.model_type, scale, scale_sources) |
| else: |
| src_dir = os.path.join(args.output_dir, args.model_type) |
| sc = load_scale_consistency(src_dir, scale, 'all_pairs') |
| sc_bc = load_scale_consistency(src_dir, scale, 'both_correct') |
| wc = load_within_cat_consistency(src_dir, scale, 'all_pairs') |
| wc_bc = load_within_cat_consistency(src_dir, scale, 'both_correct') |
| align = load_scale_alignment(src_dir, scale) |
|
|
| pred_stat = None |
| pred_path = os.path.join(src_dir, 'json', f'pred_stats_{scale}.json') |
| if os.path.exists(pred_path): |
| with open(pred_path) as f: |
| pred_stat = json.load(f) |
|
|
| cat_validity = None |
| cv_path = os.path.join(src_dir, 'json', f'category_validity_{scale}.json') |
| if os.path.exists(cv_path): |
| with open(cv_path) as f: |
| cat_validity = json.load(f) |
|
|
| dh = load_delta_heatmaps(src_dir, scale, 'all_pairs') |
| dh_bc = load_delta_heatmaps(src_dir, scale, 'both_correct') |
|
|
| if sc: |
| all_sign_corrected[scale] = sc |
| if sc_bc: |
| all_sign_corrected_bc[scale] = sc_bc |
| if wc: |
| all_within_cat[scale] = wc |
| if wc_bc: |
| all_within_cat_bc[scale] = wc_bc |
| if align: |
| all_alignment[scale] = align |
| if pred_stat is not None: |
| all_pred_stats.append(pred_stat) |
| if cat_validity is not None: |
| all_cat_validity[scale] = cat_validity |
| if dh: |
| all_delta_heatmaps[scale] = dh |
| if dh_bc: |
| all_delta_heatmaps_bc[scale] = dh_bc |
|
|
| logger.info(f" Loaded data for '{scale}'" |
| + (f" (from '{scale_sources[scale]}')" if is_merge_only else "")) |
|
|
| |
| for condition, sc_data, wc_data, dh_data, tag_label in [ |
| ('all', all_sign_corrected, all_within_cat, all_delta_heatmaps, 'all pairs'), |
| ('both_correct', all_sign_corrected_bc, all_within_cat_bc, all_delta_heatmaps_bc, 'both-correct'), |
| ]: |
| cond_dir = os.path.join(plots_dir, condition) |
| os.makedirs(cond_dir, exist_ok=True) |
|
|
| if len(sc_data) > 1: |
| plot_cross_scale_consistency( |
| sc_data, args.model_type, |
| os.path.join(cond_dir, 'cross_scale_sign_corrected.png'), |
| title_prefix=f'Sign-Corrected ({tag_label})') |
|
|
| if len(wc_data) > 1: |
| plot_cross_scale_within_cat_consistency( |
| wc_data, args.model_type, |
| os.path.join(cond_dir, 'cross_scale_within_cat.png')) |
|
|
| if dh_data: |
| plot_delta_trajectory( |
| dh_data, args.model_type, |
| os.path.join(cond_dir, 'delta_trajectory.png')) |
|
|
| |
| all_cond_dir = os.path.join(plots_dir, 'all') |
| os.makedirs(all_cond_dir, exist_ok=True) |
|
|
| if len(all_alignment) > 1: |
| plot_cross_scale_alignment( |
| all_alignment, args.model_type, |
| os.path.join(all_cond_dir, 'cross_scale_alignment.png')) |
|
|
| if all_pred_stats: |
| plot_pred_stats_bars( |
| all_pred_stats, args.model_type, |
| os.path.join(all_cond_dir, 'pred_stats_bars.png')) |
| plot_pred_stats_trajectory( |
| all_pred_stats, args.model_type, |
| os.path.join(all_cond_dir, 'pred_stats_trajectory.png')) |
|
|
| if all_sign_corrected: |
| plot_summary_barplot( |
| all_sign_corrected, all_alignment, args.model_type, |
| os.path.join(all_cond_dir, 'summary_barplot.png')) |
|
|
| |
| summary_rows = [] |
| for scale in available_scales: |
| ps = next((p for p in all_pred_stats if p.get('scale') == scale), None) |
| if ps is None: |
| continue |
| row = dict(ps) |
| if scale in all_alignment: |
| max_layer = max(all_alignment[scale].keys()) |
| row['alignment_deepest'] = all_alignment[scale][max_layer]['per_sample_mean'] |
| row['alignment_perm'] = all_alignment[scale][max_layer]['permutation_mean'] |
| summary_rows.append(row) |
| if summary_rows: |
| csv_dir = os.path.join(out_dir, 'csv') |
| os.makedirs(csv_dir, exist_ok=True) |
| pd.DataFrame(summary_rows).to_csv(os.path.join(csv_dir, 'summary.csv'), index=False) |
|
|
| |
| if all_pred_stats: |
| acc_dir = os.path.join(plots_dir, 'accuracy') |
| logger.info("\n--- Accuracy Charts ---") |
| run_accuracy_charts(all_pred_stats, all_cat_validity, args.model_type, acc_dir) |
|
|
| |
| |
| |
| |
| if not is_merge_only: |
| logger.info("\n--- Unifying Y-axis ---") |
| run_unify_ylim(out_dir, plots_dir, args.model_type) |
| else: |
| logger.info("\n--- Skipping y-axis unification (per-scale data spans multiple source dirs) ---") |
|
|
| logger.info(f"\n=== Merge Complete ===\nResults saved to: {out_dir}") |
|
|
|
|
| |
| |
| |
|
|
| def main(): |
| parser = argparse.ArgumentParser( |
| description='Swap Analysis β New Models (Molmo2 + Qwen3-VL)', |
| formatter_class=argparse.RawDescriptionHelpFormatter, |
| epilog=__doc__, |
| ) |
| parser.add_argument('--data_path', type=str, |
| default='/data/shared/Qwen/EmbSpatial-Bench/EmbSpatial-Bench.tsv') |
| parser.add_argument('--model_type', type=str, required=True, |
| choices=ALL_MODEL_TYPES, |
| help=( |
| 'Runnable: molmo_big | qwen_big | qwen_super | big_trio\n' |
| 'Merge-only (--merge required): molmo_all | qwen_all' |
| )) |
| parser.add_argument('--scales', type=str, nargs='+', default=None, |
| help='Scales to process (default: all for the given model_type). ' |
| 'For merge-only types, controls which scales are included in the merge.') |
| parser.add_argument('--output_dir', type=str, |
| default='/data/shared/Qwen/experiments/swap_analysis/results', |
| help='Root results directory.') |
| parser.add_argument('--device', type=str, default='cuda') |
| parser.add_argument('--seed', type=int, default=42) |
| parser.add_argument('--merge', action='store_true', |
| help='Merge mode: generate cross-scale plots from saved per-scale data.') |
| parser.add_argument('--skip-cross-group', action='store_true', |
| help='Skip cross-group quad extraction.') |
| parser.add_argument('--max-samples-per-category', type=int, default=200, |
| dest='max_samples_per_category') |
|
|
| args = parser.parse_args() |
|
|
| |
| log_path = _setup_file_logging(args.model_type) |
| logger.info(f"Logging to: {log_path}") |
|
|
| |
| if args.model_type in MERGE_ONLY_CONFIGS and not args.merge: |
| parser.error( |
| f"'{args.model_type}' is a merge-only type. Add --merge to run it.\n" |
| f" Example: python swap_analysis_new_models.py " |
| f"--model_type {args.model_type} --merge" |
| ) |
|
|
| |
| if args.scales is None: |
| if args.model_type in MERGE_ONLY_CONFIGS: |
| args.scales = MERGE_ONLY_CONFIGS[args.model_type]['scale_order'] |
| else: |
| args.scales = list(MODEL_CONFIGS_NEW[args.model_type].keys()) |
|
|
| np.random.seed(args.seed) |
| torch.manual_seed(args.seed) |
| random.seed(args.seed) |
|
|
| if args.merge: |
| logger.info("\n=== MERGE MODE ===") |
| run_merge_new(args) |
| return |
|
|
| |
| logger.info("\n=== Loading & Creating Swap Pairs ===") |
| swap_pairs = load_swap_pairs(args.data_path, args.seed) |
|
|
| quads = [] |
| if not args.skip_cross_group: |
| try: |
| hf_cache = build_hf_bbox_cache() |
| quads = create_cross_group_quads(swap_pairs, hf_cache) |
| except Exception as e: |
| logger.warning(f"Cross-group setup failed: {e}. Skipping.") |
|
|
| model_configs = MODEL_CONFIGS_NEW[args.model_type] |
| for scale in args.scales: |
| if scale not in model_configs: |
| logger.warning(f"Scale '{scale}' not in config for '{args.model_type}', skipping.") |
| continue |
|
|
| _, model_path = model_configs[scale] |
| if not model_path.startswith(('Qwen/', 'allenai/')) and not os.path.exists(model_path): |
| logger.warning(f"Model path not found: {model_path} (scale='{scale}'), skipping.") |
| continue |
|
|
| try: |
| process_scale_new(args, scale, swap_pairs, quads) |
| except Exception as e: |
| logger.error(f"Failed {args.model_type} - {scale}: {e}") |
| import traceback |
| traceback.print_exc() |
| continue |
|
|
| logger.info(f"\n{'='*60}") |
| logger.info("=== All scales complete ===") |
| logger.info(f"Results: {os.path.join(args.output_dir, args.model_type)}") |
| logger.info(f"{'='*60}") |
|
|
|
|
| if __name__ == '__main__': |
| main() |
|
|