experiments / summarize_metrics_updated.py
ch-min's picture
Add files using upload-large-folder tool
3404d44 verified
#!/usr/bin/env python3
"""Summarize metrics from swap_analysis_updated results.
Directory layout expected:
{root}/short_answer/saved_data/{model_folder}/json/sign_corrected_consistency_{scale}_all_pairs.json
{root}/short_answer/saved_data/{model_folder}/csv/delta_similarity_{scale}_L{layer}_all_pairs.csv
[Optional – requires separate extraction step]
{root}/short_answer/saved_data/{model_folder}/csv/delta_norm_{scale}_L{layer}_all_pairs.csv
Expected format: single-column CSV (index = relation label, column = mean norm)
,norm
left,12.34
right,11.89
above,9.45
below,9.12
far,7.23
close,7.58
NOTE: delta_similarity CSVs contain cosine similarities only (no magnitude info).
To populate Norm columns, save raw delta vector norms during swap_analysis.
Each model_folder encodes both the model family and data scale, e.g.:
molmo_vanilla, molmo_80k, nvila_800k, nvila_st_800k-st, nvila_synthetic_80k-5pct, qwen_2m
Usage:
# All models (default)
python summarize_metrics_updated.py
# Specific models
python summarize_metrics_updated.py molmo_2m nvila_800k nvila_st_800k-st
"""
import argparse
import json
import re
from pathlib import Path
import numpy as np
import pandas as pd
# ---------------------------------------------------------------------------
# Constants
# ---------------------------------------------------------------------------
ROOT = Path("/data/shared/Qwen/experiments/swap_analysis_updated")
SAVED_DATA = ROOT / "short_answer" / "saved_data"
# SAVED_DATA = ROOT / "short_answer_wo_norm" / "saved_data_wo_norm"
EXP_DIR = Path("/data/shared/Qwen/experiments")
DISPLAY_NAMES = {
"molmo_vanilla": "Molmo vanilla",
"molmo_80k": "Molmo 80k",
"molmo_400k": "Molmo 400k",
"molmo_800k": "Molmo 800k",
"molmo_2m": "Molmo 2M",
"nvila_vanilla": "NVILA vanilla",
"nvila_80k": "NVILA 80k",
"nvila_400k": "NVILA 400k",
"nvila_800k": "NVILA 800k",
"nvila_2m": "NVILA 2M",
"nvila_roborefer": "RoboRefer",
"nvila_st_80k-st": "NVILA-ST 80k",
"nvila_st_400k-st": "NVILA-ST 400k",
"nvila_st_800k-st": "NVILA-ST 800k",
"nvila_synthetic_80k-5pct": "NVILA Syn 80k-5%",
"nvila_synthetic_80k-10pct": "NVILA Syn 80k-10%",
"nvila_synthetic_400k-5pct": "NVILA Syn 400k-5%",
"nvila_synthetic_800k-5pct": "NVILA Syn 800k-5%",
"qwen_vanilla": "Qwen vanilla",
"qwen_80k": "Qwen 80k",
"qwen_400k": "Qwen 400k",
"qwen_800k": "Qwen 800k",
"qwen_2m": "Qwen 2M",
}
TEXT_FILE_MODEL_NAMES = {
"molmo_vanilla": "molmo-7B-O-0924",
"molmo_80k": "molmo-7B-O-0924-data_scale_exp_80k",
"molmo_400k": "molmo-7B-O-0924-data_scale_exp_400k",
"molmo_800k": "molmo-7B-O-0924-data_scale_exp_800k",
"molmo_2m": "molmo-7B-O-0924-data_scale_exp_2m",
"nvila_vanilla": "NVILA-Lite-2B",
"nvila_80k": "NVILA-Lite-2B-data-scale-exp-80k",
"nvila_400k": "NVILA-Lite-2B-data-scale-exp-400k",
"nvila_800k": "NVILA-Lite-2B-data-scale-exp-800k",
"nvila_2m": "NVILA-Lite-2B-data-scale-exp-2m",
"nvila_st_80k-st": "NVILA-Lite-2B-ST-80k-5pct",
"nvila_st_400k-st": "NVILA-Lite-2B-ST-400k-5pct",
"nvila_st_800k-st": "NVILA-Lite-2B-ST-800k-5pct",
"nvila_roborefer": "RoboRefer-2B-SFT",
"qwen_vanilla": "Qwen2.5-VL-3B-Instruct",
"qwen_80k": "Qwen2.5-VL-3B-Instruct-data_scale_exp_80k",
"qwen_400k": "Qwen2.5-VL-3B-Instruct-data_scale_exp_400k",
"qwen_800k": "Qwen2.5-VL-3B-Instruct-data_scale_exp_800k",
"qwen_2m": "Qwen2.5-VL-3B-Instruct-data_scale_exp_2m",
}
FOLDER_ORDER = [
"molmo_vanilla", "molmo_80k", "molmo_400k", "molmo_800k", "molmo_2m",
"nvila_vanilla",
"nvila_80k", "nvila_400k", "nvila_800k", "nvila_2m",
"nvila_st_80k-st", "nvila_st_400k-st", "nvila_st_800k-st",
"nvila_roborefer",
"nvila_synthetic_80k-5pct", "nvila_synthetic_80k-10pct",
"nvila_synthetic_400k-5pct", "nvila_synthetic_800k-5pct",
"qwen_vanilla", "qwen_80k", "qwen_400k", "qwen_800k", "qwen_2m",
]
def get_target_layer(folder_name: str) -> int | None:
if folder_name.startswith("molmo"):
return 23
if folder_name.startswith("nvila"):
return 20
if folder_name.startswith("qwen"):
return 27
return None
def make_display_name(folder_name: str) -> str:
return DISPLAY_NAMES.get(folder_name, folder_name.replace("_", " "))
# ---------------------------------------------------------------------------
# Metric helpers
# ---------------------------------------------------------------------------
def get_peak_consistency(json_file: Path) -> dict:
"""Peak sign-corrected consistency across all layers per axis."""
with open(json_file) as f:
data = json.load(f)
result = {}
for dim in ("horizontal", "vertical", "distance"):
vals = [v["mean"] for k, v in data.items() if k.startswith(f"{dim}_L")]
result[dim] = max(vals) if vals else None
return result
def get_layer_consistency(json_file: Path, layer: int) -> dict:
"""Sign-corrected consistency for horiz/vert/dist at a specific layer.
JSON keys follow the pattern: "{dim}_L{layer}" e.g. "distance_L20"
Each value is {"mean": float, "std": float, "n": int}.
"""
with open(json_file) as f:
data = json.load(f)
result = {}
for dim in ("horizontal", "vertical", "distance"):
key = f"{dim}_L{layer}"
result[dim] = data[key]["mean"] if key in data else None
return result
def _loc(df: pd.DataFrame, row: str, col: str) -> float:
"""Safe df.loc with below/under alias."""
aliases = {"below": "under", "under": "below"}
r = row if row in df.index else aliases.get(row, row)
c = col if col in df.columns else aliases.get(col, col)
if r not in df.index or c not in df.columns:
return float("nan")
return float(df.loc[r, c])
def get_vd_entanglement(csv_dir: Path, scale: str, layer: int) -> float | None:
"""VD-entanglement from cosine similarity matrix.
Formula: (cos(above,far) + cos(below,close) - cos(above,close) - cos(below,far)) / 4
File: delta_similarity_{scale}_L{layer}_all_pairs.csv
Format: 6×6 cosine similarity matrix, labels left/right/above/below/far/close.
"""
csv_file = csv_dir / f"delta_similarity_{scale}_L{layer}_all_pairs.csv"
if not csv_file.exists():
return None
df = pd.read_csv(csv_file, index_col=0)
vd = (
_loc(df, "above", "far") + _loc(df, "below", "close")
- _loc(df, "above", "close") - _loc(df, "below", "far")
) / 4
return float(vd) if np.isfinite(vd) else None
def get_delta_norms(csv_dir: Path, scale: str, layer: int) -> dict:
"""Mean delta vector norms for horiz/vert/dist at a specific layer.
Requires: delta_norm_{scale}_L{layer}_all_pairs.csv
This file is NOT produced by default — it must be generated by saving
np.linalg.norm(delta_vec) per relation during swap_analysis.
(delta_similarity CSVs contain only cosine similarities, no magnitude info.)
Expected format:
,norm
left,12.34 right,11.89 above,9.45 below,9.12 far,7.23 close,7.58
Returns all-None dict when file is absent (columns will show N/A).
"""
norm_file = csv_dir / f"delta_norm_{scale}_L{layer}_all_pairs.csv"
if not norm_file.exists():
return {"horizontal": None, "vertical": None, "distance": None}
df = pd.read_csv(norm_file, index_col=0)
def _mean_norm(labels: list[str]) -> float | None:
vals = []
for lbl in labels:
aliases = {"below": "under", "under": "below"}
row = lbl if lbl in df.index else aliases.get(lbl, lbl)
if row not in df.index:
continue
row_vals = df.loc[row].values.astype(float)
finite = row_vals[np.isfinite(row_vals)]
if finite.size > 0:
vals.append(float(np.mean(finite)))
return float(np.mean(vals)) if vals else None
return {
"horizontal": _mean_norm(["left", "right"]),
"vertical": _mean_norm(["above", "below"]),
"distance": _mean_norm(["far", "close"]),
}
# ---------------------------------------------------------------------------
# Text file parser
# ---------------------------------------------------------------------------
def parse_accuracy_text(text_file: Path) -> dict:
content = text_file.read_text()
sections = re.split(r"={10,}\s*\nModel:\s*", content)
result = {}
for section in sections[1:]:
lines = section.splitlines()
model_name = lines[0].strip()
consistent = counter = None
for line in lines:
m = re.match(r"\s*TOTAL\s+consistent\s+(\d+)\s+(\d+)\s+([\d.]+)%", line)
if m:
consistent = float(m.group(3))
m = re.match(r"\s*TOTAL\s+counter\s+(\d+)\s+(\d+)\s+([\d.]+)%", line)
if m:
counter = float(m.group(3))
if model_name:
result[model_name] = {"consistent": consistent, "counter": counter}
return result
# ---------------------------------------------------------------------------
# Main
# ---------------------------------------------------------------------------
def fmt(val, fmt_str=".4f", suffix=""):
return f"{val:{fmt_str}}{suffix}" if val is not None else "N/A"
def main():
parser = argparse.ArgumentParser(
description="Summarize metrics from swap_analysis_updated results.",
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog=__doc__,
)
parser.add_argument(
"models",
nargs="*",
metavar="MODEL_FOLDER",
help="Model folder names under saved_data/ (e.g. molmo_2m nvila_800k). "
"Default: all folders found.",
)
args = parser.parse_args()
if args.models:
model_dirs = []
for name in args.models:
d = SAVED_DATA / name
if not d.is_dir():
print(f"[warn] Not found, skipping: {d}")
else:
model_dirs.append(d)
else:
model_dirs = sorted(d for d in SAVED_DATA.iterdir() if d.is_dir())
embspatial = parse_accuracy_text(EXP_DIR / "counter_consistent_results_embspatial_all.txt")
cvbench3d = parse_accuracy_text(EXP_DIR / "counter_consistent_results_cvbench3d_all.txt")
rows = []
for model_dir in model_dirs:
folder_name = model_dir.name
json_dir = model_dir / "json"
csv_dir = model_dir / "csv"
json_files = list(json_dir.glob("sign_corrected_consistency_*_all_pairs.json"))
if not json_files:
print(f"[warn] No consistency JSON in {json_dir}, skipping.")
continue
json_file = json_files[0]
m = re.match(r"sign_corrected_consistency_(.+)_all_pairs\.json", json_file.name)
if not m:
continue
scale = m.group(1)
display = make_display_name(folder_name)
target_layer = get_target_layer(folder_name)
# Peak consistency (across all layers)
peak = get_peak_consistency(json_file)
# Consistency at target layer
layer_sc = (
get_layer_consistency(json_file, target_layer)
if target_layer is not None
else {"horizontal": None, "vertical": None, "distance": None}
)
# VD-Entanglement at target layer
vd_entanglement = (
get_vd_entanglement(csv_dir, scale, target_layer)
if (target_layer is not None and csv_dir.is_dir())
else None
)
# Delta vector norms at target layer (N/A until delta_norm CSVs are generated)
norms = (
get_delta_norms(csv_dir, scale, target_layer)
if (target_layer is not None and csv_dir.is_dir())
else {"horizontal": None, "vertical": None, "distance": None}
)
# Task accuracy
text_model = TEXT_FILE_MODEL_NAMES.get(folder_name)
emb_con = emb_ctr = cvb_con = cvb_ctr = None
if text_model:
if text_model in embspatial:
emb_con = embspatial[text_model]["consistent"]
emb_ctr = embspatial[text_model]["counter"]
if text_model in cvbench3d:
cvb_con = cvbench3d[text_model]["consistent"]
cvb_ctr = cvbench3d[text_model]["counter"]
rows.append(dict(
folder_name=folder_name,
display=display,
peak_horiz=peak.get("horizontal"),
peak_vert=peak.get("vertical"),
peak_dist=peak.get("distance"),
target_layer=target_layer,
layer_horiz=layer_sc["horizontal"],
layer_vert=layer_sc["vertical"],
layer_dist=layer_sc["distance"],
vd_entanglement=vd_entanglement,
norm_horiz=norms["horizontal"],
norm_vert=norms["vertical"],
norm_dist=norms["distance"],
emb_con=emb_con, emb_ctr=emb_ctr,
cvb_con=cvb_con, cvb_ctr=cvb_ctr,
))
if not rows:
print("No data found.")
return
rows.sort(key=lambda r: FOLDER_ORDER.index(r["folder_name"])
if r["folder_name"] in FOLDER_ORDER else 99)
records = []
for r in rows:
layer = r["target_layer"] if r["target_layer"] is not None else "?"
records.append({
"Model": r["display"],
"Peak Horiz": fmt(r["peak_horiz"]),
"Peak Vert": fmt(r["peak_vert"]),
"Peak Dist": fmt(r["peak_dist"]),
"Ent. Layer": str(layer),
"Layer Horiz SC": fmt(r["layer_horiz"]),
"Layer Vert SC": fmt(r["layer_vert"]),
"Layer Dist SC": fmt(r["layer_dist"]),
"VD-Entanglement": fmt(r["vd_entanglement"]),
"Norm Horiz": fmt(r["norm_horiz"]),
"Norm Vert": fmt(r["norm_vert"]),
"Norm Dist": fmt(r["norm_dist"]),
"EmbSpatial (con)": fmt(r["emb_con"], ".1f", "%"),
"EmbSpatial (ctr)": fmt(r["emb_ctr"], ".1f", "%"),
"CVBench3D (con)": fmt(r["cvb_con"], ".1f", "%"),
"CVBench3D (ctr)": fmt(r["cvb_ctr"], ".1f", "%"),
})
df = pd.DataFrame(records)
print(df.to_string(index=False))
csv_path = EXP_DIR / "summarize_metrics" / "swap_analysis_updated" / "short_answer_including_norm.csv"
csv_path.parent.mkdir(parents=True, exist_ok=True)
df.to_csv(csv_path, index=False)
print(f"\nSaved: {csv_path}")
if __name__ == "__main__":
main()
# #!/usr/bin/env python3
# """Summarize metrics from swap_analysis_updated results.
# Directory layout expected:
# {root}/short_answer/saved_data/{model_folder}/json/sign_corrected_consistency_{scale}_all_pairs.json
# {root}/short_answer/saved_data/{model_folder}/csv/delta_similarity_{scale}_L{layer}_all_pairs.csv
# Each model_folder encodes both the model family and data scale, e.g.:
# molmo_vanilla, molmo_80k, nvila_800k, nvila_st_800k-st, nvila_synthetic_80k-5pct, qwen_2m
# Usage:
# # All models (default)
# python summarize_metrics_updated.py
# # Specific models
# python summarize_metrics_updated.py molmo_2m nvila_800k nvila_st_800k-st
# """
# import argparse
# import json
# import re
# from pathlib import Path
# import numpy as np
# import pandas as pd
# # ---------------------------------------------------------------------------
# # Constants
# # ---------------------------------------------------------------------------
# ROOT = Path("/data/shared/Qwen/experiments/swap_analysis_updated")
# SAVED_DATA = ROOT / "short_answer" / "saved_data"
# EXP_DIR = Path("/data/shared/Qwen/experiments")
# # Display names keyed by folder name
# DISPLAY_NAMES = {
# "molmo_vanilla": "Molmo vanilla",
# "molmo_80k": "Molmo 80k",
# "molmo_400k": "Molmo 400k",
# "molmo_800k": "Molmo 800k",
# "molmo_2m": "Molmo 2M",
# "nvila_vanilla": "NVILA vanilla",
# "nvila_80k": "NVILA 80k",
# "nvila_400k": "NVILA 400k",
# "nvila_800k": "NVILA 800k",
# "nvila_2m": "NVILA 2M",
# "nvila_roborefer": "RoboRefer",
# "nvila_st_80k-st": "NVILA-ST 80k",
# "nvila_st_400k-st": "NVILA-ST 400k",
# "nvila_st_800k-st": "NVILA-ST 800k",
# "nvila_synthetic_80k-5pct": "NVILA Syn 80k-5%",
# "nvila_synthetic_80k-10pct": "NVILA Syn 80k-10%",
# "nvila_synthetic_400k-5pct": "NVILA Syn 400k-5%",
# "nvila_synthetic_800k-5pct": "NVILA Syn 800k-5%",
# "qwen_vanilla": "Qwen vanilla",
# "qwen_80k": "Qwen 80k",
# "qwen_400k": "Qwen 400k",
# "qwen_800k": "Qwen 800k",
# "qwen_2m": "Qwen 2M",
# }
# # Accuracy text-file model names keyed by folder name
# TEXT_FILE_MODEL_NAMES = {
# "molmo_vanilla": "molmo-7B-O-0924",
# "molmo_80k": "molmo-7B-O-0924-data_scale_exp_80k",
# "molmo_400k": "molmo-7B-O-0924-data_scale_exp_400k",
# "molmo_800k": "molmo-7B-O-0924-data_scale_exp_800k",
# "molmo_2m": "molmo-7B-O-0924-data_scale_exp_2m",
# "nvila_vanilla": "NVILA-Lite-2B",
# "nvila_80k": "NVILA-Lite-2B-data-scale-exp-80k",
# "nvila_400k": "NVILA-Lite-2B-data-scale-exp-400k",
# "nvila_800k": "NVILA-Lite-2B-data-scale-exp-800k",
# "nvila_2m": "NVILA-Lite-2B-data-scale-exp-2m",
# "nvila_st_80k-st": "NVILA-Lite-2B-ST-80k-5pct",
# "nvila_st_400k-st": "NVILA-Lite-2B-ST-400k-5pct",
# "nvila_st_800k-st": "NVILA-Lite-2B-ST-800k-5pct",
# "nvila_roborefer": "RoboRefer-2B-SFT",
# "qwen_vanilla": "Qwen2.5-VL-3B-Instruct",
# "qwen_80k": "Qwen2.5-VL-3B-Instruct-data_scale_exp_80k",
# "qwen_400k": "Qwen2.5-VL-3B-Instruct-data_scale_exp_400k",
# "qwen_800k": "Qwen2.5-VL-3B-Instruct-data_scale_exp_800k",
# "qwen_2m": "Qwen2.5-VL-3B-Instruct-data_scale_exp_2m",
# # nvila_st_* and nvila_synthetic_* not yet in accuracy text files
# }
# # Canonical sort order (unknown folders appended at the end)
# FOLDER_ORDER = [
# "molmo_vanilla", "molmo_80k", "molmo_400k", "molmo_800k", "molmo_2m",
# "nvila_vanilla",
# "nvila_80k", "nvila_400k", "nvila_800k", "nvila_2m",
# "nvila_st_80k-st", "nvila_st_400k-st", "nvila_st_800k-st",
# "nvila_roborefer",
# "nvila_synthetic_80k-5pct", "nvila_synthetic_80k-10pct",
# "nvila_synthetic_400k-5pct", "nvila_synthetic_800k-5pct",
# "qwen_vanilla", "qwen_80k", "qwen_400k", "qwen_800k", "qwen_2m",
# ]
# def get_target_layer(folder_name: str) -> int | None:
# """Return the fixed probe layer for entanglement, based on model family."""
# if folder_name.startswith("molmo"):
# return 23
# if folder_name.startswith("nvila"):
# return 20
# if folder_name.startswith("qwen"):
# return 27
# return None
# def make_display_name(folder_name: str) -> str:
# """Fall back display name for unrecognised folder names."""
# return DISPLAY_NAMES.get(folder_name, folder_name.replace("_", " "))
# # ---------------------------------------------------------------------------
# # Metric helpers (same formulas as summarize_metrics.py)
# # ---------------------------------------------------------------------------
# def get_peak_consistency(json_file: Path) -> dict:
# with open(json_file) as f:
# data = json.load(f)
# result = {}
# for dim in ("horizontal", "vertical", "distance"):
# vals = [v["mean"] for k, v in data.items() if k.startswith(f"{dim}_L")]
# result[dim] = max(vals) if vals else None
# return result
# def _loc(df: pd.DataFrame, row: str, col: str) -> float:
# aliases = {"below": "under", "under": "below"}
# r = row if row in df.index else aliases.get(row, row)
# c = col if col in df.columns else aliases.get(col, col)
# if r not in df.index or c not in df.columns:
# return float("nan")
# return float(df.loc[r, c])
# def get_vd_entanglement(csv_dir: Path, scale: str, layer: int) -> float | None:
# """VD = (cos(above,far) + cos(below,close) - cos(above,close) - cos(below,far)) / 4"""
# csv_file = csv_dir / f"delta_similarity_{scale}_L{layer}_all_pairs.csv"
# if not csv_file.exists():
# return None
# df = pd.read_csv(csv_file, index_col=0)
# vd = (
# _loc(df, "above", "far") + _loc(df, "below", "close")
# - _loc(df, "above", "close") - _loc(df, "below", "far")
# ) / 4
# return float(vd) if np.isfinite(vd) else None
# # ---------------------------------------------------------------------------
# # Text file parser
# # ---------------------------------------------------------------------------
# def parse_accuracy_text(text_file: Path) -> dict:
# content = text_file.read_text()
# sections = re.split(r"={10,}\s*\nModel:\s*", content)
# result = {}
# for section in sections[1:]:
# lines = section.splitlines()
# model_name = lines[0].strip()
# consistent = counter = None
# for line in lines:
# m = re.match(r"\s*TOTAL\s+consistent\s+(\d+)\s+(\d+)\s+([\d.]+)%", line)
# if m:
# consistent = float(m.group(3))
# m = re.match(r"\s*TOTAL\s+counter\s+(\d+)\s+(\d+)\s+([\d.]+)%", line)
# if m:
# counter = float(m.group(3))
# if model_name:
# result[model_name] = {"consistent": consistent, "counter": counter}
# return result
# # ---------------------------------------------------------------------------
# # Main
# # ---------------------------------------------------------------------------
# def fmt(val, fmt_str=".4f", suffix=""):
# return f"{val:{fmt_str}}{suffix}" if val is not None else "N/A"
# def main():
# parser = argparse.ArgumentParser(
# description="Summarize metrics from swap_analysis_updated results.",
# formatter_class=argparse.RawDescriptionHelpFormatter,
# epilog=__doc__,
# )
# parser.add_argument(
# "models",
# nargs="*",
# metavar="MODEL_FOLDER",
# help="Model folder names under saved_data/ (e.g. molmo_2m nvila_800k). "
# "Default: all folders found.",
# )
# args = parser.parse_args()
# # Resolve model directories to process
# if args.models:
# model_dirs = []
# for name in args.models:
# d = SAVED_DATA / name
# if not d.is_dir():
# print(f"[warn] Not found, skipping: {d}")
# else:
# model_dirs.append(d)
# else:
# model_dirs = sorted(d for d in SAVED_DATA.iterdir() if d.is_dir())
# # Parse accuracy text files
# embspatial = parse_accuracy_text(EXP_DIR / "counter_consistent_results_embspatial_all.txt")
# cvbench3d = parse_accuracy_text(EXP_DIR / "counter_consistent_results_cvbench3d_all.txt")
# rows = []
# for model_dir in model_dirs:
# folder_name = model_dir.name
# json_dir = model_dir / "json"
# csv_dir = model_dir / "csv"
# # Locate the sign_corrected_consistency JSON to extract scale
# json_files = list(json_dir.glob("sign_corrected_consistency_*_all_pairs.json"))
# if not json_files:
# print(f"[warn] No consistency JSON in {json_dir}, skipping.")
# continue
# json_file = json_files[0]
# m = re.match(r"sign_corrected_consistency_(.+)_all_pairs\.json", json_file.name)
# if not m:
# continue
# scale = m.group(1)
# display = make_display_name(folder_name)
# target_layer = get_target_layer(folder_name)
# # 1-3: Peak consistency
# consistency = get_peak_consistency(json_file)
# # 4: VD-Entanglement at fixed layer
# vd_entanglement = (
# get_vd_entanglement(csv_dir, scale, target_layer)
# if (target_layer is not None and csv_dir.is_dir())
# else None
# )
# # 5-8: Accuracy from text files
# text_model = TEXT_FILE_MODEL_NAMES.get(folder_name)
# emb_con = emb_ctr = cvb_con = cvb_ctr = None
# if text_model:
# if text_model in embspatial:
# emb_con = embspatial[text_model]["consistent"]
# emb_ctr = embspatial[text_model]["counter"]
# if text_model in cvbench3d:
# cvb_con = cvbench3d[text_model]["consistent"]
# cvb_ctr = cvbench3d[text_model]["counter"]
# rows.append(dict(
# folder_name=folder_name, display=display,
# peak_horiz=consistency.get("horizontal"),
# peak_vert=consistency.get("vertical"),
# peak_dist=consistency.get("distance"),
# target_layer=target_layer,
# vd_entanglement=vd_entanglement,
# emb_con=emb_con, emb_ctr=emb_ctr,
# cvb_con=cvb_con, cvb_ctr=cvb_ctr,
# ))
# if not rows:
# print("No data found.")
# return
# rows.sort(key=lambda r: FOLDER_ORDER.index(r["folder_name"])
# if r["folder_name"] in FOLDER_ORDER else 99)
# records = []
# for r in rows:
# layer = r["target_layer"] if r["target_layer"] is not None else "?"
# records.append({
# "Model": r["display"],
# "Peak Horiz": fmt(r["peak_horiz"]),
# "Peak Vert": fmt(r["peak_vert"]),
# "Peak Dist": fmt(r["peak_dist"]),
# "Entanglement Layer": str(layer),
# "Entanglement": fmt(r["vd_entanglement"]),
# "EmbSpatial (con)": fmt(r["emb_con"], ".1f", "%"),
# "EmbSpatial (ctr)": fmt(r["emb_ctr"], ".1f", "%"),
# "CVBench3D (con)": fmt(r["cvb_con"], ".1f", "%"),
# "CVBench3D (ctr)": fmt(r["cvb_ctr"], ".1f", "%"),
# })
# df = pd.DataFrame(records)
# print(df.to_string(index=False))
# # Save CSV to experiments/summarize_metrics/swap_analysis_updated/short_answer.csv
# csv_path = EXP_DIR / "summarize_metrics" / "swap_analysis_updated" / "short_answer.csv"
# csv_path.parent.mkdir(parents=True, exist_ok=True)
# df.to_csv(csv_path, index=False)
# print(f"\nSaved: {csv_path}")
# if __name__ == "__main__":
# main()