experiments / summarize_metrics.py
ch-min's picture
Add files using upload-large-folder tool
3404d44 verified
#!/usr/bin/env python3
"""Extract metrics from JSON files and display as a table.
Columns:
1. Peak consistency - horizontal
2. Peak consistency - vertical
3. Peak consistency - distance
4. Entanglement Layer (fixed layer per model family)
5. Layer Horiz SC (Sign-corrected consistency at fixed layer)
6. Layer Vert SC (Sign-corrected consistency at fixed layer)
7. Layer Dist SC (Sign-corrected consistency at fixed layer)
8. VD-Entanglement (at fixed layer per model family)
9. EmbSpatial Accuracy - consistent
10. EmbSpatial Accuracy - counter
11. CV-Bench-3D Accuracy - consistent
12. CV-Bench-3D Accuracy - counter
"""
import argparse
import json
import re
from pathlib import Path
import numpy as np
import pandas as pd
# ---------------------------------------------------------------------------
# Display name and text-file model name mappings
# ---------------------------------------------------------------------------
DISPLAY_NAMES = {
("molmo", "vanilla"): "Molmo vanilla",
("molmo", "80k"): "Molmo 80k",
("molmo", "400k"): "Molmo 400k",
("molmo", "800k"): "Molmo 800k",
("molmo", "2m"): "Molmo 2M",
("nvila", "vanilla"): "NVILA vanilla",
("nvila", "80k"): "NVILA 80k",
("nvila", "400k"): "NVILA 400k",
("nvila", "800k"): "NVILA 800k",
("nvila", "2m"): "NVILA 2M",
("nvila", "roborefer"): "RoboRefer",
("nvila_synthetic", "10pct"): "NVILA 10pct",
("nvila_synthetic", "20pct"): "NVILA 20pct",
("nvila_synthetic", "30pct"): "NVILA 30pct",
("qwen", "vanilla"): "Qwen vanilla",
("qwen", "80k"): "Qwen 80k",
("qwen", "400k"): "Qwen 400k",
("qwen", "800k"): "Qwen 800k",
("qwen", "2m"): "Qwen 2M",
("qwen_super", "qwen3_235b"): "Qwen3-235B",
}
TEXT_FILE_MODEL_NAMES = {
("molmo", "vanilla"): "molmo-7B-O-0924",
("molmo", "80k"): "molmo-7B-O-0924-data_scale_exp_80k",
("molmo", "400k"): "molmo-7B-O-0924-data_scale_exp_400k",
("molmo", "800k"): "molmo-7B-O-0924-data_scale_exp_800k",
("molmo", "2m"): "molmo-7B-O-0924-data_scale_exp_2m",
("nvila", "vanilla"): "NVILA-Lite-2B",
("nvila", "80k"): "NVILA-Lite-2B-data-scale-exp-80k",
("nvila", "400k"): "NVILA-Lite-2B-data-scale-exp-400k",
("nvila", "800k"): "NVILA-Lite-2B-data-scale-exp-800k",
("nvila", "2m"): "NVILA-Lite-2B-data-scale-exp-2m",
("nvila", "roborefer"): "RoboRefer-2B-SFT",
("qwen", "vanilla"): "Qwen2.5-VL-3B-Instruct",
("qwen", "80k"): "Qwen2.5-VL-3B-Instruct-data_scale_exp_80k",
("qwen", "400k"): "Qwen2.5-VL-3B-Instruct-data_scale_exp_400k",
("qwen", "800k"): "Qwen2.5-VL-3B-Instruct-data_scale_exp_800k",
("qwen", "2m"): "Qwen2.5-VL-3B-Instruct-data_scale_exp_2m",
("qwen_super", "qwen3_235b"): "Qwen3-VL-235B-A22B-Instruct",
}
FOLDER_ORDER = ["molmo", "nvila", "nvila_synthetic", "qwen", "qwen_super"]
SCALE_ORDER = ["vanilla", "roborefer", "10pct", "20pct", "30pct", "80k", "400k", "800k", "2m",
"qwen3_235b"]
# Fixed layer index used for entanglement per model family
FOLDER_TARGET_LAYER = {
"molmo": 23,
"nvila": 20,
"nvila_synthetic": 20, # same architecture as nvila
"qwen": 27,
"qwen_super": 87, # Qwen3-VL-235B-A22B-Instruct
}
# Default extra model-family directories included in every run
_SWAP_ANALYSIS = Path("/data/shared/Qwen/experiments/swap_analysis")
DEFAULT_EXTRA_DIRS = [
_SWAP_ANALYSIS / "results_0223" / "qwen_super",
]
# ---------------------------------------------------------------------------
# JSON helpers
# ---------------------------------------------------------------------------
def get_peak_consistency(json_file: Path) -> dict:
"""Return peak mean value per dimension (horizontal/vertical/distance)."""
with open(json_file) as f:
data = json.load(f)
result = {}
for dim in ("horizontal", "vertical", "distance"):
vals = [v["mean"] for k, v in data.items() if k.startswith(f"{dim}_L")]
result[dim] = max(vals) if vals else None
return result
def get_layer_consistency(json_file: Path, layer: int) -> dict:
"""Sign-corrected consistency for horiz/vert/dist at a specific layer."""
with open(json_file) as f:
data = json.load(f)
result = {}
for dim in ("horizontal", "vertical", "distance"):
key = f"{dim}_L{layer}"
result[dim] = data[key]["mean"] if key in data else None
return result
def _loc(df: pd.DataFrame, row: str, col: str) -> float:
"""Look up (row, col) with 'under' <-> 'below' aliasing."""
aliases = {"below": "under", "under": "below"}
r = row if row in df.index else aliases.get(row, row)
c = col if col in df.columns else aliases.get(col, col)
if r not in df.index or c not in df.columns:
return float("nan")
return float(df.loc[r, c])
def get_vd_entanglement(csv_dir: Path, scale: str, layer: int) -> float | None:
"""Return VD-entanglement from delta_similarity_{scale}_L{layer}_all_pairs.csv.
VD = (mean(above-far, below-close) - mean(above-close, below-far)) / 4
"""
csv_file = csv_dir / f"delta_similarity_{scale}_L{layer}_all_pairs.csv"
if not csv_file.exists():
return None
df = pd.read_csv(csv_file, index_col=0)
vd = (
_loc(df, "above", "far") + _loc(df, "below", "close")
- _loc(df, "above", "close") - _loc(df, "below", "far")
) / 4
return float(vd) if np.isfinite(vd) else None
# ---------------------------------------------------------------------------
# Text file parser
# ---------------------------------------------------------------------------
def parse_accuracy_text(text_file: Path) -> dict:
"""Parse per-model TOTAL consistent/counter accuracies from a results text file.
Returns:
dict mapping model_name -> {"consistent": float, "counter": float}
"""
content = text_file.read_text()
# Split on section headers like "Model: <name>"
sections = re.split(r"={10,}\s*\nModel:\s*", content)
result = {}
for section in sections[1:]:
lines = section.splitlines()
model_name = lines[0].strip()
consistent = counter = None
for line in lines:
m = re.match(r"\s*TOTAL\s+consistent\s+(\d+)\s+(\d+)\s+([\d.]+)%", line)
if m:
consistent = float(m.group(3))
m = re.match(r"\s*TOTAL\s+counter\s+(\d+)\s+(\d+)\s+([\d.]+)%", line)
if m:
counter = float(m.group(3))
if model_name:
result[model_name] = {"consistent": consistent, "counter": counter}
return result
# ---------------------------------------------------------------------------
# Main
# ---------------------------------------------------------------------------
def fmt(val, fmt_str=".4f", suffix=""):
return f"{val:{fmt_str}}{suffix}" if val is not None else "N/A"
def main():
parser = argparse.ArgumentParser(description="Summarize metrics from JSON result files.")
parser.add_argument(
"folder",
nargs="?",
default="/data/shared/Qwen/experiments/swap_analysis/results_short_answer",
help="Root folder whose subdirectories are each treated as a model family",
)
parser.add_argument(
"--extra-dirs", "-e",
nargs="+",
metavar="DIR",
default=[],
help="Additional individual model-family directories to include "
"(each dir's basename is used as the folder name)",
)
parser.add_argument(
"--no-defaults",
action="store_true",
help="Do not automatically include the built-in extra directories (e.g. qwen_super)",
)
args = parser.parse_args()
root = Path(args.folder)
exp_dir = Path("/data/shared/Qwen/experiments")
# Collect model-family directories to scan:
# 1. All subdirectories of the root folder
# 2. Built-in defaults (e.g. qwen_super) unless --no-defaults
# 3. Any explicitly provided --extra-dirs
model_dirs: list[Path] = [d for d in sorted(root.iterdir()) if d.is_dir()]
extra = [] if args.no_defaults else [d for d in DEFAULT_EXTRA_DIRS if d.is_dir()]
extra += [Path(d) for d in args.extra_dirs]
# Avoid duplicates (same resolved path)
seen = {d.resolve() for d in model_dirs}
for d in extra:
if d.resolve() not in seen:
model_dirs.append(d)
seen.add(d.resolve())
# Parse text-file accuracies
embspatial = parse_accuracy_text(exp_dir / "counter_consistent_results_embspatial.txt")
cvbench3d = parse_accuracy_text(exp_dir / "counter_consistent_result_cvbench3d_depth.txt")
rows = []
for folder_dir in model_dirs:
if not folder_dir.is_dir():
continue
folder_name = folder_dir.name
# Find all sign_corrected_consistency_*_all_pairs.json under this folder
for json_file in sorted(folder_dir.rglob("sign_corrected_consistency_*_all_pairs.json")):
m = re.match(r"sign_corrected_consistency_(.+)_all_pairs\.json", json_file.name)
if not m:
continue
scale = m.group(1)
key = (folder_name, scale)
display = DISPLAY_NAMES.get(key, f"{folder_name} {scale}")
# 1-3: Peak consistency
consistency = get_peak_consistency(json_file)
# 4: Layer values and VD-Entanglement at the fixed layer for this model family
target_layer = FOLDER_TARGET_LAYER.get(folder_name)
csv_dir = json_file.parent.parent / "csv"
layer_sc = (
get_layer_consistency(json_file, target_layer)
if target_layer is not None
else {"horizontal": None, "vertical": None, "distance": None}
)
vd_entanglement = (
get_vd_entanglement(csv_dir, scale, target_layer)
if (target_layer is not None and csv_dir.is_dir())
else None
)
# 5-8: Text-file accuracies
text_model = TEXT_FILE_MODEL_NAMES.get(key)
emb_con = emb_ctr = cvb_con = cvb_ctr = None
if text_model:
if text_model in embspatial:
emb_con = embspatial[text_model]["consistent"]
emb_ctr = embspatial[text_model]["counter"]
if text_model in cvbench3d:
cvb_con = cvbench3d[text_model]["consistent"]
cvb_ctr = cvbench3d[text_model]["counter"]
rows.append(dict(
folder=folder_name, scale=scale, display=display,
peak_horiz=consistency.get("horizontal"),
peak_vert=consistency.get("vertical"),
peak_dist=consistency.get("distance"),
layer_horiz=layer_sc["horizontal"],
layer_vert=layer_sc["vertical"],
layer_dist=layer_sc["distance"],
vd_entanglement=vd_entanglement,
emb_con=emb_con, emb_ctr=emb_ctr,
cvb_con=cvb_con, cvb_ctr=cvb_ctr,
))
# Sort by model family then scale
def sort_key(r):
fi = FOLDER_ORDER.index(r["folder"]) if r["folder"] in FOLDER_ORDER else 99
si = SCALE_ORDER.index(r["scale"]) if r["scale"] in SCALE_ORDER else 99
return (fi, si)
rows.sort(key=sort_key)
# Build table records
records = []
for r in rows:
layer = FOLDER_TARGET_LAYER.get(r["folder"], "?")
records.append({
"Model": r["display"],
"Peak Horiz": fmt(r["peak_horiz"]),
"Peak Vert": fmt(r["peak_vert"]),
"Peak Dist": fmt(r["peak_dist"]),
"Entanglement Layer": str(layer),
"Layer Horiz SC": fmt(r["layer_horiz"]),
"Layer Vert SC": fmt(r["layer_vert"]),
"Layer Dist SC": fmt(r["layer_dist"]),
"Entanglement": fmt(r["vd_entanglement"]),
"EmbSpatial (con)": fmt(r["emb_con"], ".1f", "%"),
"EmbSpatial (ctr)": fmt(r["emb_ctr"], ".1f", "%"),
"CVBench3D (con)": fmt(r["cvb_con"], ".1f", "%"),
"CVBench3D (ctr)": fmt(r["cvb_ctr"], ".1f", "%"),
})
if not records:
print("No data found.")
return
df = pd.DataFrame(records)
print(df.to_string(index=False))
# Save to CSV: experiments/summarize_metrics/{parent_name}/{folder_name}.csv
csv_rel = Path(root.parent.name) / (root.name + ".csv")
csv_path = exp_dir / "summarize_metrics" / csv_rel
csv_path.parent.mkdir(parents=True, exist_ok=True)
df.to_csv(csv_path, index=False)
print(f"\nSaved: {csv_path}")
if __name__ == "__main__":
main()
# #!/usr/bin/env python3
# """Extract metrics from JSON files and display as a table.
# Columns:
# 1. Peak consistency - horizontal
# 2. Peak consistency - vertical
# 3. Peak consistency - distance
# 4. VD-Entanglement (at fixed layer per model family)
# 5. EmbSpatial Accuracy - consistent
# 6. EmbSpatial Accuracy - counter
# 7. CV-Bench-3D Accuracy - consistent
# 8. CV-Bench-3D Accuracy - counter
# """
# import argparse
# import json
# import re
# from pathlib import Path
# import numpy as np
# import pandas as pd
# # ---------------------------------------------------------------------------
# # Display name and text-file model name mappings
# # ---------------------------------------------------------------------------
# DISPLAY_NAMES = {
# ("molmo", "vanilla"): "Molmo vanilla",
# ("molmo", "80k"): "Molmo 80k",
# ("molmo", "400k"): "Molmo 400k",
# ("molmo", "800k"): "Molmo 800k",
# ("molmo", "2m"): "Molmo 2M",
# ("nvila", "vanilla"): "NVILA vanilla",
# ("nvila", "80k"): "NVILA 80k",
# ("nvila", "400k"): "NVILA 400k",
# ("nvila", "800k"): "NVILA 800k",
# ("nvila", "2m"): "NVILA 2M",
# ("nvila", "roborefer"): "RoboRefer",
# ("nvila_synthetic", "10pct"): "NVILA 10pct",
# ("nvila_synthetic", "20pct"): "NVILA 20pct",
# ("nvila_synthetic", "30pct"): "NVILA 30pct",
# ("qwen", "vanilla"): "Qwen vanilla",
# ("qwen", "80k"): "Qwen 80k",
# ("qwen", "400k"): "Qwen 400k",
# ("qwen", "800k"): "Qwen 800k",
# ("qwen", "2m"): "Qwen 2M",
# ("qwen_super", "qwen3_235b"): "Qwen3-235B",
# }
# TEXT_FILE_MODEL_NAMES = {
# ("molmo", "vanilla"): "molmo-7B-O-0924",
# ("molmo", "80k"): "molmo-7B-O-0924-data_scale_exp_80k",
# ("molmo", "400k"): "molmo-7B-O-0924-data_scale_exp_400k",
# ("molmo", "800k"): "molmo-7B-O-0924-data_scale_exp_800k",
# ("molmo", "2m"): "molmo-7B-O-0924-data_scale_exp_2m",
# ("nvila", "vanilla"): "NVILA-Lite-2B",
# ("nvila", "80k"): "NVILA-Lite-2B-data-scale-exp-80k",
# ("nvila", "400k"): "NVILA-Lite-2B-data-scale-exp-400k",
# ("nvila", "800k"): "NVILA-Lite-2B-data-scale-exp-800k",
# ("nvila", "2m"): "NVILA-Lite-2B-data-scale-exp-2m",
# ("nvila", "roborefer"): "RoboRefer-2B-SFT",
# ("qwen", "vanilla"): "Qwen2.5-VL-3B-Instruct",
# ("qwen", "80k"): "Qwen2.5-VL-3B-Instruct-data_scale_exp_80k",
# ("qwen", "400k"): "Qwen2.5-VL-3B-Instruct-data_scale_exp_400k",
# ("qwen", "800k"): "Qwen2.5-VL-3B-Instruct-data_scale_exp_800k",
# ("qwen", "2m"): "Qwen2.5-VL-3B-Instruct-data_scale_exp_2m",
# ("qwen_super", "qwen3_235b"): "Qwen3-VL-235B-A22B-Instruct",
# }
# FOLDER_ORDER = ["molmo", "nvila", "nvila_synthetic", "qwen", "qwen_super"]
# SCALE_ORDER = ["vanilla", "roborefer", "10pct", "20pct", "30pct", "80k", "400k", "800k", "2m",
# "qwen3_235b"]
# # Fixed layer index used for entanglement per model family
# FOLDER_TARGET_LAYER = {
# "molmo": 23,
# "nvila": 20,
# "nvila_synthetic": 20, # same architecture as nvila
# "qwen": 27,
# "qwen_super": 87, # Qwen3-VL-235B-A22B-Instruct
# }
# # Default extra model-family directories included in every run
# _SWAP_ANALYSIS = Path("/data/shared/Qwen/experiments/swap_analysis")
# DEFAULT_EXTRA_DIRS = [
# _SWAP_ANALYSIS / "results_0223" / "qwen_super",
# ]
# # ---------------------------------------------------------------------------
# # JSON helpers
# # ---------------------------------------------------------------------------
# def get_peak_consistency(json_file: Path) -> dict:
# """Return peak mean value per dimension (horizontal/vertical/distance)."""
# with open(json_file) as f:
# data = json.load(f)
# result = {}
# for dim in ("horizontal", "vertical", "distance"):
# vals = [v["mean"] for k, v in data.items() if k.startswith(f"{dim}_L")]
# result[dim] = max(vals) if vals else None
# return result
# def _loc(df: pd.DataFrame, row: str, col: str) -> float:
# """Look up (row, col) with 'under' <-> 'below' aliasing."""
# aliases = {"below": "under", "under": "below"}
# r = row if row in df.index else aliases.get(row, row)
# c = col if col in df.columns else aliases.get(col, col)
# if r not in df.index or c not in df.columns:
# return float("nan")
# return float(df.loc[r, c])
# def get_vd_entanglement(csv_dir: Path, scale: str, layer: int) -> float | None:
# """Return VD-entanglement from delta_similarity_{scale}_L{layer}_all_pairs.csv.
# VD = (mean(above-far, below-close) - mean(above-close, below-far)) / 4
# """
# csv_file = csv_dir / f"delta_similarity_{scale}_L{layer}_all_pairs.csv"
# if not csv_file.exists():
# return None
# df = pd.read_csv(csv_file, index_col=0)
# vd = (
# _loc(df, "above", "far") + _loc(df, "below", "close")
# - _loc(df, "above", "close") - _loc(df, "below", "far")
# ) / 4
# return float(vd) if np.isfinite(vd) else None
# # ---------------------------------------------------------------------------
# # Text file parser
# # ---------------------------------------------------------------------------
# def parse_accuracy_text(text_file: Path) -> dict:
# """Parse per-model TOTAL consistent/counter accuracies from a results text file.
# Returns:
# dict mapping model_name -> {"consistent": float, "counter": float}
# """
# content = text_file.read_text()
# # Split on section headers like "Model: <name>"
# sections = re.split(r"={10,}\s*\nModel:\s*", content)
# result = {}
# for section in sections[1:]:
# lines = section.splitlines()
# model_name = lines[0].strip()
# consistent = counter = None
# for line in lines:
# m = re.match(r"\s*TOTAL\s+consistent\s+(\d+)\s+(\d+)\s+([\d.]+)%", line)
# if m:
# consistent = float(m.group(3))
# m = re.match(r"\s*TOTAL\s+counter\s+(\d+)\s+(\d+)\s+([\d.]+)%", line)
# if m:
# counter = float(m.group(3))
# if model_name:
# result[model_name] = {"consistent": consistent, "counter": counter}
# return result
# # ---------------------------------------------------------------------------
# # Main
# # ---------------------------------------------------------------------------
# def fmt(val, fmt_str=".4f", suffix=""):
# return f"{val:{fmt_str}}{suffix}" if val is not None else "N/A"
# def main():
# parser = argparse.ArgumentParser(description="Summarize metrics from JSON result files.")
# parser.add_argument(
# "folder",
# nargs="?",
# default="/data/shared/Qwen/experiments/swap_analysis/results_short_answer",
# help="Root folder whose subdirectories are each treated as a model family",
# )
# parser.add_argument(
# "--extra-dirs", "-e",
# nargs="+",
# metavar="DIR",
# default=[],
# help="Additional individual model-family directories to include "
# "(each dir's basename is used as the folder name)",
# )
# parser.add_argument(
# "--no-defaults",
# action="store_true",
# help="Do not automatically include the built-in extra directories (e.g. qwen_super)",
# )
# args = parser.parse_args()
# root = Path(args.folder)
# exp_dir = Path("/data/shared/Qwen/experiments")
# # Collect model-family directories to scan:
# # 1. All subdirectories of the root folder
# # 2. Built-in defaults (e.g. qwen_super) unless --no-defaults
# # 3. Any explicitly provided --extra-dirs
# model_dirs: list[Path] = [d for d in sorted(root.iterdir()) if d.is_dir()]
# extra = [] if args.no_defaults else [d for d in DEFAULT_EXTRA_DIRS if d.is_dir()]
# extra += [Path(d) for d in args.extra_dirs]
# # Avoid duplicates (same resolved path)
# seen = {d.resolve() for d in model_dirs}
# for d in extra:
# if d.resolve() not in seen:
# model_dirs.append(d)
# seen.add(d.resolve())
# # Parse text-file accuracies
# embspatial = parse_accuracy_text(exp_dir / "counter_consistent_results_embspatial.txt")
# cvbench3d = parse_accuracy_text(exp_dir / "counter_consistent_result_cvbench3d_depth.txt")
# rows = []
# for folder_dir in model_dirs:
# if not folder_dir.is_dir():
# continue
# folder_name = folder_dir.name
# # Find all sign_corrected_consistency_*_all_pairs.json under this folder
# for json_file in sorted(folder_dir.rglob("sign_corrected_consistency_*_all_pairs.json")):
# m = re.match(r"sign_corrected_consistency_(.+)_all_pairs\.json", json_file.name)
# if not m:
# continue
# scale = m.group(1)
# key = (folder_name, scale)
# display = DISPLAY_NAMES.get(key, f"{folder_name} {scale}")
# # 1-3: Peak consistency
# consistency = get_peak_consistency(json_file)
# # 4: VD-Entanglement at the fixed layer for this model family
# target_layer = FOLDER_TARGET_LAYER.get(folder_name)
# csv_dir = json_file.parent.parent / "csv"
# vd_entanglement = (
# get_vd_entanglement(csv_dir, scale, target_layer)
# if (target_layer is not None and csv_dir.is_dir())
# else None
# )
# # 5-8: Text-file accuracies
# text_model = TEXT_FILE_MODEL_NAMES.get(key)
# emb_con = emb_ctr = cvb_con = cvb_ctr = None
# if text_model:
# if text_model in embspatial:
# emb_con = embspatial[text_model]["consistent"]
# emb_ctr = embspatial[text_model]["counter"]
# if text_model in cvbench3d:
# cvb_con = cvbench3d[text_model]["consistent"]
# cvb_ctr = cvbench3d[text_model]["counter"]
# rows.append(dict(
# folder=folder_name, scale=scale, display=display,
# peak_horiz=consistency.get("horizontal"),
# peak_vert=consistency.get("vertical"),
# peak_dist=consistency.get("distance"),
# vd_entanglement=vd_entanglement,
# emb_con=emb_con, emb_ctr=emb_ctr,
# cvb_con=cvb_con, cvb_ctr=cvb_ctr,
# ))
# # Sort by model family then scale
# def sort_key(r):
# fi = FOLDER_ORDER.index(r["folder"]) if r["folder"] in FOLDER_ORDER else 99
# si = SCALE_ORDER.index(r["scale"]) if r["scale"] in SCALE_ORDER else 99
# return (fi, si)
# rows.sort(key=sort_key)
# # Build table records
# records = []
# for r in rows:
# layer = FOLDER_TARGET_LAYER.get(r["folder"], "?")
# records.append({
# "Model": r["display"],
# "Peak Horiz": fmt(r["peak_horiz"]),
# "Peak Vert": fmt(r["peak_vert"]),
# "Peak Dist": fmt(r["peak_dist"]),
# "Entanglement Layer": str(layer),
# "Entanglement": fmt(r["vd_entanglement"]),
# "EmbSpatial (con)": fmt(r["emb_con"], ".1f", "%"),
# "EmbSpatial (ctr)": fmt(r["emb_ctr"], ".1f", "%"),
# "CVBench3D (con)": fmt(r["cvb_con"], ".1f", "%"),
# "CVBench3D (ctr)": fmt(r["cvb_ctr"], ".1f", "%"),
# })
# if not records:
# print("No data found.")
# return
# df = pd.DataFrame(records)
# print(df.to_string(index=False))
# # Save to CSV: experiments/summarize_metrics/{parent_name}/{folder_name}.csv
# csv_rel = Path(root.parent.name) / (root.name + ".csv")
# csv_path = exp_dir / "summarize_metrics" / csv_rel
# csv_path.parent.mkdir(parents=True, exist_ok=True)
# df.to_csv(csv_path, index=False)
# print(f"\nSaved: {csv_path}")
# if __name__ == "__main__":
# main()