experiments / swap_analysis_synthetic /swap_analysis_synthetic.py
ch-min's picture
Add files using upload-large-folder tool
19898f1 verified
raw
history blame
38.1 kB
"""
swap_analysis_synthetic.py β€” Swap analysis using synthetic 2-body rendered data.
Same experimental pipeline as swap_analysis.py, but uses locally-generated
synthetic images instead of EmbSpatial-Bench.
Data layout (per category folder):
/data/shared/Qwen/synthetic/2body/{above,below,left,right,close,far}/
0000.png … 0199.png (512Γ—512 rendered scenes)
vqa.json (200 entries; each has image path, answer, obj1, obj2)
vqa.json entry format:
{
"image": "output/2body/{category}/{XXXX}.png",
"question": "...",
"answer": "above" | "below" | "left" | "right" | "closer" | "farther",
"obj1": {"color": str, "shape": str, "bbox": [x_min, y_min, x_max, y_max]},
"obj2": {"color": str, "shape": str, "bbox": [x_min, y_min, x_max, y_max]}
}
Key differences from EmbSpatial-Bench:
- Images loaded from disk PNG files, not base64-in-TSV
- Object descriptions built from color+shape (e.g. "yellow cube")
- Cross-group quads computed directly from stored bounding boxes (no HF cache)
- MCQ questions with alternating A/B order (same as swap_analysis.py)
- "farther"/"closer" added as synonyms for "far"/"close" in answer matching
Usage:
# Single model, single scale
python swap_analysis_synthetic.py --model_type qwen --scales vanilla
python swap_analysis_synthetic.py --model_type qwen_super --scales qwen3_235b
# All scales for a model
python swap_analysis_synthetic.py --model_type qwen
# Merge mode (regenerate cross-scale plots from saved per-scale CSVs)
python swap_analysis_synthetic.py --model_type qwen --merge
# Skip cross-group quads
python swap_analysis_synthetic.py --model_type qwen --skip-cross-group
"""
import argparse
import base64
import json
import logging
import os
import random
import sys
from collections import defaultdict
from typing import List
import numpy as np
import torch
# ── Import pipeline from swap_analysis (sibling directory) ──────────────────
_HERE = os.path.dirname(os.path.abspath(__file__))
_SA_DIR = os.path.realpath(os.path.join(_HERE, '..', 'swap_analysis'))
if _SA_DIR not in sys.path:
sys.path.insert(0, _SA_DIR)
import swap_analysis as sa
from swap_analysis import (
CATEGORY_ORDER, OPPOSITE_MAP, GROUP_MAP,
MCQ_TEMPLATES, MCQ_LETTER,
SHORT_TEMPLATES, SHORT_OPPOSITE_MAP,
MODEL_CONFIGS, MODEL_CONFIGS_NEW, MERGE_ONLY_CONFIGS, ALL_MODEL_TYPES,
process_scale, run_merge, run_merge_extended,
logger,
)
# ── Patch SYNONYMS so "farther"/"closer" are accepted in answer matching ─────
# Synthetic vqa.json labels are "farther"/"closer" but our generated questions
# ask "far or close", so the model may answer with either form.
if 'farther' not in sa.SYNONYMS.get('far', []):
sa.SYNONYMS.setdefault('far', []).append('farther')
if 'closer' not in sa.SYNONYMS.get('close', []):
sa.SYNONYMS.setdefault('close', []).append('closer')
# ── Constants ────────────────────────────────────────────────────────────────
SYNTHETIC_DATA_DIR = '/data/shared/Qwen/synthetic/2body'
COUNTER_DATA_DIR = '/data/shared/Qwen/synthetic/2body_bias'
DEFAULT_OUTPUT_DIR = os.path.join(_HERE, 'results')
DEFAULT_OUTPUT_DIR_COUNTER = os.path.join(_HERE, 'results_counter_only')
# Maps folder name β†’ canonical category (keeps CATEGORY_ORDER / OPPOSITE_MAP intact)
FOLDER_TO_CATEGORY = {
'above': 'above',
'below': 'below',
'left': 'left',
'right': 'right',
'close': 'close',
'far': 'far',
}
# Counter-only: folders are prefixed with "counter_" inside 2body_bias/
COUNTER_FOLDER_TO_CATEGORY = {
'counter_above': 'above',
'counter_below': 'below',
'counter_left': 'left',
'counter_right': 'right',
'counter_close': 'close',
'counter_far': 'far',
}
# Consistent-only: folders are prefixed with "consistent_" inside 2body_bias/
CONSISTENT_FOLDER_TO_CATEGORY = {
'consistent_above': 'above',
'consistent_below': 'below',
'consistent_left': 'left',
'consistent_right': 'right',
'consistent_close': 'close',
'consistent_far': 'far',
}
# VD-only: vertical (above/below) + distance (far/close) categories only
VD_CATEGORIES = {'above', 'below', 'far', 'close'}
# Image height for vertical-ambiguity threshold in cross-group quads
SYNTHETIC_IMAGE_HEIGHT = 512
# ── Logging setup ────────────────────────────────────────────────────────────
def _setup_file_logging_synthetic(
model_type: str,
counter_only: bool = False,
consistent_ratio: float = None,
question_type: str = 'short_answer',
) -> str:
"""Write to logs[_counter_only|_consistent_ratio_X][_short]/{model_type}_synthetic.log."""
if consistent_ratio is not None:
ratio_str = f'{consistent_ratio:g}'
log_subdir = f'logs_consistent_ratio_{ratio_str}'
elif counter_only:
log_subdir = 'logs_counter_only'
else:
log_subdir = 'logs'
if question_type == 'short_answer':
log_subdir += '_short'
log_dir = os.path.join(_HERE, log_subdir)
os.makedirs(log_dir, exist_ok=True)
log_path = os.path.join(log_dir, f'{model_type}_synthetic.log')
fh = logging.FileHandler(log_path, mode='a', encoding='utf-8')
fh.setLevel(logging.INFO)
fh.setFormatter(logging.Formatter('%(asctime)s - %(levelname)s - %(message)s'))
logging.getLogger().addHandler(fh)
return log_path
# ── Data loading ─────────────────────────────────────────────────────────────
def _build_question_pair(group: str, obj1_desc: str, obj2_desc: str,
pair_index: int, question_type: str) -> tuple:
"""Return (orig_q, swap_q, mcq_map, orig_answer_key, swap_answer_key) for a swap pair.
For short_answer, mcq_map is None and answer keys are word-form (e.g. 'left').
For mcq, mcq_map is a dict and answer keys are the same word-form category labels.
"""
if question_type == 'short_answer':
if group == 'horizontal':
tmpl = SHORT_TEMPLATES['horizontal']
orig_q = tmpl.format(obj1=obj1_desc, obj2=obj2_desc)
swap_q = tmpl.format(obj1=obj2_desc, obj2=obj1_desc)
elif group == 'vertical':
tmpl = SHORT_TEMPLATES['vertical']
orig_q = tmpl.format(obj1=obj1_desc, obj2=obj2_desc)
swap_q = tmpl.format(obj1=obj2_desc, obj2=obj1_desc)
else: # distance
tmpl = SHORT_TEMPLATES['distance']
orig_q = tmpl.format(subj=obj1_desc, ref=obj2_desc)
swap_q = tmpl.format(subj=obj2_desc, ref=obj1_desc)
return orig_q, swap_q, None
else: # mcq
if group == 'horizontal':
variant = 'left_first' if pair_index % 2 == 0 else 'right_first'
orig_q = MCQ_TEMPLATES['horizontal'][variant].format(obj1=obj1_desc, obj2=obj2_desc)
swap_q = MCQ_TEMPLATES['horizontal'][variant].format(obj1=obj2_desc, obj2=obj1_desc)
mcq_map = MCQ_LETTER['horizontal'][variant]
elif group == 'vertical':
variant = 'above_first' if pair_index % 2 == 0 else 'below_first'
orig_q = MCQ_TEMPLATES['vertical'][variant].format(obj1=obj1_desc, obj2=obj2_desc)
swap_q = MCQ_TEMPLATES['vertical'][variant].format(obj1=obj2_desc, obj2=obj1_desc)
mcq_map = MCQ_LETTER['vertical'][variant]
else: # distance
variant = 'far_first' if pair_index % 2 == 0 else 'close_first'
orig_q = MCQ_TEMPLATES['distance'][variant].format(subj=obj1_desc, ref=obj2_desc)
swap_q = MCQ_TEMPLATES['distance'][variant].format(subj=obj2_desc, ref=obj1_desc)
mcq_map = MCQ_LETTER['distance'][variant]
return orig_q, swap_q, mcq_map
def load_swap_pairs_synthetic(
data_dir: str,
seed: int = 42,
folder_to_category: dict = None,
question_type: str = 'short_answer',
) -> List[dict]:
"""Load synthetic 2-body data and build swap pairs.
Returns a list of dicts with the same schema as swap_analysis.load_swap_pairs(),
plus extra keys (obj1_bbox, obj2_bbox, obj1_desc, obj2_desc) consumed by
create_cross_group_quads_synthetic() and ignored by the rest of the pipeline.
Args:
data_dir: Root directory containing category sub-folders.
seed: Random seed (currently unused; reserved for future subsampling).
folder_to_category: Mapping of folder_name β†’ canonical category.
Defaults to FOLDER_TO_CATEGORY (standard 2body layout).
Pass COUNTER_FOLDER_TO_CATEGORY for counter-only (2body_bias) layout.
question_type: 'mcq' (default) or 'short_answer'.
"""
if folder_to_category is None:
folder_to_category = FOLDER_TO_CATEGORY
pairs = []
stats = defaultdict(lambda: {'total': 0, 'success': 0})
for folder_name, category in folder_to_category.items():
folder_path = os.path.join(data_dir, folder_name)
json_path = os.path.join(folder_path, 'vqa.json')
if not os.path.exists(json_path):
logger.warning(f'[synthetic] Missing vqa.json: {json_path}')
continue
with open(json_path, encoding='utf-8') as f:
entries = json.load(f)
group = GROUP_MAP[category]
for i, entry in enumerate(entries):
stats[category]['total'] += 1
obj1 = entry['obj1']
obj2 = entry['obj2']
obj1_desc = f"{obj1['color']} {obj1['shape']}"
obj2_desc = f"{obj2['color']} {obj2['shape']}"
# Load PNG from disk β†’ base64 (same format as EmbSpatialBench TSV)
img_filename = os.path.basename(entry['image']) # e.g. "0000.png"
img_path = os.path.join(folder_path, img_filename)
if not os.path.exists(img_path):
logger.warning(f'[synthetic] Image not found: {img_path}')
continue
with open(img_path, 'rb') as fimg:
img_b64 = base64.b64encode(fimg.read()).decode('utf-8')
orig_q, swap_q, mcq_map = _build_question_pair(
group, obj1_desc, obj2_desc, len(pairs), question_type)
orig_ans = category
swap_ans = SHORT_OPPOSITE_MAP[category] if question_type == 'short_answer' else OPPOSITE_MAP[category]
pair = {
# ── Standard swap pair fields (same schema as load_swap_pairs) ──
'index': f'{folder_name}_{i:04d}',
'question_id': f'{folder_name}_{i:04d}',
'image_base64': img_b64,
'original_question': orig_q,
'swapped_question': swap_q,
'original_answer': orig_ans,
'swapped_answer': swap_ans,
'group': group,
'category': category,
'mcq_map': mcq_map,
# ── Extra fields for cross-group quad creation ───────────────────
'obj1_bbox': obj1['bbox'], # [x_min, y_min, x_max, y_max]
'obj2_bbox': obj2['bbox'],
'obj1_desc': obj1_desc,
'obj2_desc': obj2_desc,
}
pairs.append(pair)
stats[category]['success'] += 1
logger.info('Synthetic swap pair creation stats:')
for cat in CATEGORY_ORDER:
s = stats[cat]
if s['total']:
logger.info(f' {cat}: {s["success"]}/{s["total"]}')
logger.info(f' Total pairs: {len(pairs)}')
return pairs
def load_swap_pairs_mixed(
data_dir: str,
consistent_ratio: float,
n_per_category: int = 200,
seed: int = 42,
question_type: str = 'short_answer',
) -> List[dict]:
"""Load a mix of consistent and counter samples from 2body_bias.
For each canonical category, samples
n_consistent = round(n_per_category * consistent_ratio) entries from consistent_{cat}/
n_counter = n_per_category - n_consistent entries from counter_{cat}/
The two sets are combined per category and returned as swap pairs using the
same schema as load_swap_pairs_synthetic().
Args:
data_dir: Root directory (typically COUNTER_DATA_DIR / 2body_bias).
consistent_ratio: Fraction of consistent samples (0.0 – 1.0).
n_per_category: Target total samples per category (default 200).
seed: Random seed for reproducible subsampling.
question_type: 'mcq' (default) or 'short_answer'.
"""
rng = random.Random(seed)
n_consistent = round(n_per_category * consistent_ratio)
n_counter = n_per_category - n_consistent
logger.info(
f'[mixed] consistent_ratio={consistent_ratio} '
f'β†’ {n_consistent} consistent + {n_counter} counter per category '
f'(total {n_per_category})'
)
# Helper: load all entries from a single category folder and subsample
def _load_folder(folder_name: str, category: str, n: int) -> List[dict]:
if n <= 0:
return []
folder_path = os.path.join(data_dir, folder_name)
json_path = os.path.join(folder_path, 'vqa.json')
if not os.path.exists(json_path):
logger.warning(f'[mixed] Missing vqa.json: {json_path}')
return []
with open(json_path, encoding='utf-8') as f:
entries = json.load(f)
if len(entries) > n:
entries = rng.sample(entries, n)
return [(folder_path, folder_name, category, entry) for entry in entries]
CATEGORIES = list(FOLDER_TO_CATEGORY.values()) # canonical order
pairs = []
stats = defaultdict(lambda: {'total': 0, 'success': 0})
for category in CATEGORIES:
consistent_folder = f'consistent_{category}'
counter_folder = f'counter_{category}'
consistent_items = _load_folder(consistent_folder, category, n_consistent)
counter_items = _load_folder(counter_folder, category, n_counter)
all_items = consistent_items + counter_items
rng.shuffle(all_items) # mix the two sources
group = GROUP_MAP[category]
for idx, (folder_path, folder_name, cat, entry) in enumerate(all_items):
stats[cat]['total'] += 1
obj1 = entry['obj1']
obj2 = entry['obj2']
obj1_desc = f"{obj1['color']} {obj1['shape']}"
obj2_desc = f"{obj2['color']} {obj2['shape']}"
img_filename = os.path.basename(entry['image'])
img_path = os.path.join(folder_path, img_filename)
if not os.path.exists(img_path):
logger.warning(f'[mixed] Image not found: {img_path}')
continue
with open(img_path, 'rb') as fimg:
img_b64 = base64.b64encode(fimg.read()).decode('utf-8')
orig_q, swap_q, mcq_map = _build_question_pair(
group, obj1_desc, obj2_desc, len(pairs), question_type)
orig_ans = cat
swap_ans = SHORT_OPPOSITE_MAP[cat] if question_type == 'short_answer' else OPPOSITE_MAP[cat]
pair = {
'index': f'{folder_name}_{idx:04d}',
'question_id': f'{folder_name}_{idx:04d}',
'image_base64': img_b64,
'original_question': orig_q,
'swapped_question': swap_q,
'original_answer': orig_ans,
'swapped_answer': swap_ans,
'group': group,
'category': cat,
'mcq_map': mcq_map,
'obj1_bbox': obj1['bbox'],
'obj2_bbox': obj2['bbox'],
'obj1_desc': obj1_desc,
'obj2_desc': obj2_desc,
}
pairs.append(pair)
stats[cat]['success'] += 1
logger.info('Mixed swap pair creation stats:')
for cat in CATEGORY_ORDER:
s = stats[cat]
if s['total']:
logger.info(f' {cat}: {s["success"]}/{s["total"]}')
logger.info(f' Total pairs: {len(pairs)}')
return pairs
def create_cross_group_quads_synthetic(
swap_pairs: List[dict],
threshold_ratio: float = 0.05,
question_type: str = 'short_answer',
) -> List[dict]:
"""Build cross-group quads directly from bounding boxes stored in swap_pairs.
For each far/close swap pair, the vertical relationship (above/under) between
the two objects is derived from bbox center-Y coordinates β€” no HF cache needed.
Pairs whose vertical separation is below threshold_ratio Γ— image_height pixels
are considered ambiguous and skipped.
"""
threshold = SYNTHETIC_IMAGE_HEIGHT * threshold_ratio
quads = []
stats = {'total': 0, 'matched': 0, 'ambiguous': 0}
distance_pairs = [p for p in swap_pairs if p['group'] == 'distance']
for pair in distance_pairs:
stats['total'] += 1
bbox1 = pair['obj1_bbox'] # [x_min, y_min, x_max, y_max]
bbox2 = pair['obj2_bbox']
cy1 = (bbox1[1] + bbox1[3]) / 2.0 # center Y of obj1
cy2 = (bbox2[1] + bbox2[3]) / 2.0 # center Y of obj2
y_diff = cy1 - cy2
if abs(y_diff) < threshold:
stats['ambiguous'] += 1
continue
# In image coordinates: smaller Y = higher in frame = "above"
vert_orig_answer = 'above' if cy1 < cy2 else 'below'
if question_type == 'short_answer':
vert_orig_q = SHORT_TEMPLATES['vertical'].format(
obj1=pair['obj1_desc'], obj2=pair['obj2_desc'])
vert_swap_q = SHORT_TEMPLATES['vertical'].format(
obj1=pair['obj2_desc'], obj2=pair['obj1_desc'])
vert_mcq_map = None
else:
# MCQ vert question β€” alternate option order to cancel A/B positional bias
vert_variant = 'above_first' if len(quads) % 2 == 0 else 'below_first'
vert_orig_q = MCQ_TEMPLATES['vertical'][vert_variant].format(
obj1=pair['obj1_desc'], obj2=pair['obj2_desc'])
vert_swap_q = MCQ_TEMPLATES['vertical'][vert_variant].format(
obj1=pair['obj2_desc'], obj2=pair['obj1_desc'])
vert_mcq_map = MCQ_LETTER['vertical'][vert_variant]
quad = {
'index': pair['index'],
'image_base64': pair['image_base64'],
'dist_original_q': pair['original_question'],
'dist_swapped_q': pair['swapped_question'],
'dist_original_answer': pair['original_answer'],
'dist_swapped_answer': pair['swapped_answer'],
'dist_mcq_map': pair['mcq_map'],
'vert_original_q': vert_orig_q,
'vert_swapped_q': vert_swap_q,
'vert_original_answer': vert_orig_answer,
'vert_swapped_answer': OPPOSITE_MAP[vert_orig_answer],
'vert_mcq_map': vert_mcq_map,
'target_object': pair['obj1_desc'],
'reference_object': pair['obj2_desc'],
'target_bbox_y': cy1,
'ref_bbox_y': cy2,
'y_diff': y_diff,
'data_source': 'synthetic',
}
quads.append(quad)
stats['matched'] += 1
logger.info(f"Synthetic cross-group quads: {stats['matched']}/{stats['total']} "
f"(ambiguous={stats['ambiguous']})")
return quads
# ── VD-only merge helpers ────────────────────────────────────────────────────
def _filter_scale_data_vd(data):
"""Filter {(category, layer): values} dict to VD categories only."""
return {k: v for k, v in data.items() if k[0] in VD_CATEGORIES}
def _filter_heatmap_vd(heatmap_dict):
"""Filter per-layer delta-heatmap DataFrames to VD categories only."""
vd_order = [c for c in sa.CATEGORY_ORDER if c in VD_CATEGORIES]
result = {}
for layer, df in heatmap_dict.items():
idx = [c for c in vd_order if c in df.index]
col = [c for c in vd_order if c in df.columns]
result[layer] = df.loc[idx, col]
return result
def run_merge_vd_only_synthetic(args):
"""Merge in VD-only mode.
Reads per-scale JSON/CSV data from the *full* results directory
(all 6 categories), filters to VD categories (above/below/far/close),
and writes cross-scale comparison plots to results/vd-only/{model_type}/.
args.output_dir : write destination (e.g. results/vd-only/ or
results_counter_only/vd-only/)
args.counter_only: selects read base (results vs results_counter_only)
"""
if getattr(args, 'consistent_ratio', None) is not None:
ratio_str = f'{args.consistent_ratio:g}'
base_full = os.path.join(_HERE, f'results_consistent_ratio_{ratio_str}')
elif args.counter_only:
base_full = DEFAULT_OUTPUT_DIR_COUNTER
else:
base_full = DEFAULT_OUTPUT_DIR
is_merge_only = args.model_type in sa.MERGE_ONLY_CONFIGS
if is_merge_only:
mc = sa.MERGE_ONLY_CONFIGS[args.model_type]
scale_order = mc['scale_order']
scale_sources = mc['scale_sources']
elif args.model_type in sa.MODEL_CONFIGS_NEW:
scale_order = list(sa.MODEL_CONFIGS_NEW[args.model_type].keys())
scale_sources = None
else:
scale_order = ['vanilla', '80k', '400k', '800k', '2m', 'roborefer',
'10pct', '20pct', '30pct']
scale_sources = None
available_scales = [s for s in scale_order if s in args.scales]
default_write = os.path.join(args.output_dir, args.model_type)
write_dir = getattr(args, 'merge_output_dir', None) or default_write
plots_dir = os.path.join(write_dir, 'plots')
os.makedirs(plots_dir, exist_ok=True)
logger.info(f'[VD-only merge] Read base : {base_full}')
logger.info(f'[VD-only merge] Write dir : {write_dir}')
logger.info(f'[VD-only merge] Scales : {available_scales}')
all_sc = {}
all_sc_bc = {}
all_wc = {}
all_wc_bc = {}
all_align = {}
all_dh = {}
all_dh_bc = {}
phase_b_dirs = {}
for scale in available_scales:
if is_merge_only:
src_model = scale_sources[scale]
read_dir = os.path.join(base_full, src_model)
else:
read_dir = os.path.join(base_full, args.model_type)
phase_b_dirs[scale] = read_dir
sc = sa.load_scale_consistency(read_dir, scale, 'all_pairs')
sc_bc = sa.load_scale_consistency(read_dir, scale, 'both_correct')
wc = sa.load_within_cat_consistency(read_dir, scale, 'all_pairs')
wc_bc = sa.load_within_cat_consistency(read_dir, scale, 'both_correct')
align = sa.load_scale_alignment(read_dir, scale)
dh = sa.load_delta_heatmaps(read_dir, scale, 'all_pairs')
dh_bc = sa.load_delta_heatmaps(read_dir, scale, 'both_correct')
if sc: all_sc[scale] = _filter_scale_data_vd(sc)
if sc_bc: all_sc_bc[scale] = _filter_scale_data_vd(sc_bc)
if wc: all_wc[scale] = _filter_scale_data_vd(wc)
if wc_bc: all_wc_bc[scale] = _filter_scale_data_vd(wc_bc)
if align: all_align[scale] = align # no category dim
if dh: all_dh[scale] = _filter_heatmap_vd(dh)
if dh_bc: all_dh_bc[scale] = _filter_heatmap_vd(dh_bc)
logger.info(f' Loaded + VD-filtered: {scale}')
# ── Cross-scale trajectory plots ──────────────────────────────────────────
for condition, sc_data, wc_data, dh_data, tag_label in [
('all', all_sc, all_wc, all_dh, 'all pairs'),
('both_correct', all_sc_bc, all_wc_bc, all_dh_bc, 'both-correct'),
]:
cond_dir = os.path.join(plots_dir, condition)
sc_dir = os.path.join(cond_dir, 'sign_corrected')
wc_dir = os.path.join(cond_dir, 'within_cat_consistency')
dt_dir = os.path.join(cond_dir, 'delta_trajectory')
os.makedirs(sc_dir, exist_ok=True)
os.makedirs(wc_dir, exist_ok=True)
os.makedirs(dt_dir, exist_ok=True)
if len(sc_data) > 1:
sa.plot_cross_scale_consistency(
sc_data, args.model_type,
os.path.join(sc_dir, 'cross_scale_sign_corrected.png'),
title_prefix=f'Sign-Corrected ({tag_label}) [VD-only]')
if len(wc_data) > 1:
sa.plot_cross_scale_within_cat_consistency(
wc_data, args.model_type,
os.path.join(wc_dir, 'cross_scale_within_cat.png'))
if dh_data:
sa.plot_delta_trajectory(
dh_data, args.model_type,
os.path.join(dt_dir, 'delta_trajectory.png'))
# ── Cross-scale alignment ─────────────────────────────────────────────────
all_cond_dir = os.path.join(plots_dir, 'all')
ca_dir = os.path.join(all_cond_dir, 'cross_alignment')
os.makedirs(ca_dir, exist_ok=True)
has_phase_b = all(
sa._has_phase_b_data(phase_b_dirs[s], s) for s in available_scales)
if has_phase_b and len(all_align) > 1:
sa.plot_cross_scale_alignment(
all_align, args.model_type,
os.path.join(ca_dir, 'cross_scale_alignment.png'))
logger.info(f'\n=== VD-only Merge Complete ===\nResults in: {write_dir}')
# ── Main ─────────────────────────────────────────────────────────────────────
def main():
_LEGACY_DEFAULT_SCALES = {
'molmo': ['vanilla', '80k', '400k', '800k', '2m'],
'nvila': ['vanilla', '80k', '400k', '800k', '2m'],
'qwen': ['vanilla', '80k', '400k', '800k', '2m'],
'nvila_synthetic': ['80k-5pct', '80k-10pct', '80k-20pct', '80k-30pct', '400k-5pct'],
'nvila_st': ['80k-st', '400k-st', '800k-st'],
}
parser = argparse.ArgumentParser(
description='Swap Analysis β€” Synthetic 2-body data',
formatter_class=argparse.RawDescriptionHelpFormatter,
)
parser.add_argument('--data_path', type=str, default=None,
help='Root directory containing category folders. '
'Defaults to SYNTHETIC_DATA_DIR (or COUNTER_DATA_DIR with --counter-only).')
parser.add_argument('--counter-only', action='store_true', dest='counter_only',
help='Use counter-only data from 2body_bias/ '
'(folders: counter_above, counter_below, …). '
'Overrides --data_path default to COUNTER_DATA_DIR.')
parser.add_argument('--consistent_ratio', type=float, default=None,
dest='consistent_ratio',
help='Mix consistent and counter samples from 2body_bias/. '
'Value in [0, 1]: fraction of consistent samples per category. '
'E.g. 0.8 β†’ 160 consistent + 40 counter out of 200 total. '
'Results saved under results_consistent_ratio_{value}/ and '
'logs_consistent_ratio_{value}/.')
parser.add_argument('--model_type', type=str, required=True,
choices=ALL_MODEL_TYPES,
help=(
'Legacy: molmo | nvila | qwen\n'
'New large: molmo_big | qwen_big | qwen_super | big_trio\n'
'Merge-only (--merge required): molmo_all | qwen_all'
))
parser.add_argument('--scales', type=str, nargs='+', default=None,
help='Scales to process (default: all for the given model_type).')
parser.add_argument('--output_dir', type=str, default=None,
help='Root output directory. Defaults to results/ (or results/vd-only/ '
'with --vd-only).')
parser.add_argument('--vd-only', action='store_true', dest='vd_only',
help='Run only vertical+distance categories (above/below/far/close). '
'Produces a 4Γ—4 delta-similarity heatmap, 2D PCA only (no 3D). '
'Results saved under results/vd-only/ by default.')
parser.add_argument('--device', type=str, default='cuda')
parser.add_argument('--seed', type=int, default=42)
parser.add_argument('--merge', action='store_true',
help='Merge mode: regenerate cross-scale plots from saved per-scale data.')
parser.add_argument('--merge-output-dir', type=str, default=None, dest='merge_output_dir',
help='Override output dir for cross-scale plots.')
parser.add_argument('--no-auto-roborefer', action='store_true', dest='no_auto_roborefer',
help='Disable automatic inclusion of roborefer scale for nvila.')
parser.add_argument('--skip-cross-group', action='store_true',
help='Skip cross-group quad extraction.')
parser.add_argument('--skip-phase-b', action='store_true', dest='skip_phase_b',
help='Skip Phase B (cross-group feature extraction). '
'Phase A inference + analysis + plots still run normally.')
parser.add_argument('--max-samples-per-category', type=int, default=200,
dest='max_samples_per_category')
parser.add_argument('--question-type', type=str, default='short_answer',
choices=['mcq', 'short_answer'], dest='question_type',
help='Question format: short_answer (default) or mcq.')
args = parser.parse_args()
# ── Validate mutually exclusive modes ────────────────────────────────────
if args.consistent_ratio is not None and args.counter_only:
parser.error('--consistent_ratio and --counter-only are mutually exclusive.')
if args.consistent_ratio is not None and not (0.0 <= args.consistent_ratio <= 1.0):
parser.error('--consistent_ratio must be in [0.0, 1.0].')
# ── Resolve data path and folder mapping ──────────────────────────────────
if args.consistent_ratio is not None:
# Mixed mode: always read from COUNTER_DATA_DIR (contains both consistent_* and counter_*)
folder_map = None # handled by load_swap_pairs_mixed; not used for standard loading
if args.data_path is None:
args.data_path = COUNTER_DATA_DIR
elif args.counter_only:
folder_map = COUNTER_FOLDER_TO_CATEGORY
if args.data_path is None:
args.data_path = COUNTER_DATA_DIR
else:
folder_map = FOLDER_TO_CATEGORY
if args.data_path is None:
args.data_path = SYNTHETIC_DATA_DIR
# --vd-only: filter folder_map to vertical+distance categories only
if args.vd_only and folder_map is not None:
folder_map = {k: v for k, v in folder_map.items() if v in VD_CATEGORIES}
# ── Resolve output directory ───────────────────────────────────────────────
if args.output_dir is None:
if args.consistent_ratio is not None:
ratio_str = f'{args.consistent_ratio:g}'
base_dir = os.path.join(_HERE, f'results_consistent_ratio_{ratio_str}')
elif args.counter_only:
base_dir = DEFAULT_OUTPUT_DIR_COUNTER
else:
base_dir = DEFAULT_OUTPUT_DIR
if args.question_type == 'short_answer':
base_dir += '_short'
if args.vd_only:
args.output_dir = os.path.join(base_dir, 'vd-only')
else:
args.output_dir = base_dir
# ── Logging ──────────────────────────────────────────────────────────────
log_path = _setup_file_logging_synthetic(
args.model_type,
counter_only=args.counter_only,
consistent_ratio=args.consistent_ratio,
question_type=args.question_type,
)
logger.info(f'[synthetic] Logging to : {log_path}')
logger.info(f'[synthetic] Counter-only : {args.counter_only}')
logger.info(f'[synthetic] Consistent ratio : {args.consistent_ratio}')
logger.info(f'[synthetic] Question type : {args.question_type}')
logger.info(f'[synthetic] VD-only : {args.vd_only}')
logger.info(f'[synthetic] Data dir : {args.data_path}')
logger.info(f'[synthetic] Output dir : {args.output_dir}')
# ── VD-only: disable 3D PCA (monkey-patch sa module before process_scale) ─
if args.vd_only:
sa.plot_pca_3d = lambda *_a, **_kw: None
# ── Validate merge-only types ─────────────────────────────────────────────
if args.model_type in MERGE_ONLY_CONFIGS and not args.merge:
parser.error(
f"'{args.model_type}' is a merge-only type. Add --merge to run it.\n"
f" Example: python swap_analysis_synthetic.py "
f"--model_type {args.model_type} --merge"
)
# ── Default scales ────────────────────────────────────────────────────────
if args.scales is None:
if args.model_type in MERGE_ONLY_CONFIGS:
args.scales = MERGE_ONLY_CONFIGS[args.model_type]['scale_order']
elif args.model_type in MODEL_CONFIGS_NEW:
args.scales = list(MODEL_CONFIGS_NEW[args.model_type].keys())
else:
args.scales = _LEGACY_DEFAULT_SCALES.get(
args.model_type, ['vanilla', '80k', '400k', '800k', '2m'])
if (args.model_type == 'nvila'
and 'roborefer' not in args.scales
and not args.no_auto_roborefer):
args.scales.append('roborefer')
np.random.seed(args.seed)
torch.manual_seed(args.seed)
random.seed(args.seed)
# ── Merge mode ────────────────────────────────────────────────────────────
if args.merge:
logger.info('\n=== MERGE MODE (synthetic) ===')
if args.vd_only:
# Read from full results, filter to VD categories, write to vd-only/
run_merge_vd_only_synthetic(args)
elif args.model_type in MODEL_CONFIGS_NEW or args.model_type in MERGE_ONLY_CONFIGS:
run_merge_extended(args)
else:
run_merge(args)
return
# ── Inference mode ────────────────────────────────────────────────────────
logger.info('\n=== Loading Synthetic Swap Pairs ===')
if args.consistent_ratio is not None:
swap_pairs = load_swap_pairs_mixed(
args.data_path,
consistent_ratio=args.consistent_ratio,
n_per_category=args.max_samples_per_category,
seed=args.seed,
question_type=args.question_type,
)
else:
swap_pairs = load_swap_pairs_synthetic(
args.data_path, args.seed, folder_map,
question_type=args.question_type,
)
quads = []
if not args.skip_cross_group and not getattr(args, 'skip_phase_b', False):
quads = create_cross_group_quads_synthetic(swap_pairs, question_type=args.question_type)
if args.model_type in MODEL_CONFIGS_NEW:
model_configs = MODEL_CONFIGS_NEW[args.model_type]
else:
model_configs = MODEL_CONFIGS[args.model_type]
for scale in args.scales:
if scale not in model_configs:
logger.warning(f"Scale '{scale}' not in config for '{args.model_type}', skipping.")
continue
# Validate model path (HF IDs like "Qwen/…" or "allenai/…" are always remote)
if args.model_type in MODEL_CONFIGS_NEW:
_, raw_path = model_configs[scale]
else:
raw_path = model_configs[scale]
if not os.path.isabs(raw_path) and not raw_path.startswith(('Qwen/', 'allenai/')):
if not os.path.exists(raw_path):
logger.warning(f'Model path not found: {raw_path} (scale={scale!r}), skipping.')
continue
try:
process_scale(args, scale, swap_pairs, quads)
except Exception as e:
logger.error(f'Failed {args.model_type} - {scale}: {e}')
import traceback
traceback.print_exc()
continue
logger.info(f"\n{'='*60}")
logger.info('=== All scales complete (synthetic) ===')
logger.info(f"Results: {os.path.join(args.output_dir, args.model_type)}")
logger.info(f"{'='*60}")
if __name__ == '__main__':
main()