miscellaneous / aggregate_results.py
Rakancorle11's picture
Upload supp/ and root scripts
203a7fb verified
#!/usr/bin/env python3
"""Aggregate mute/swap accuracy across all models in /home/ubuntu/eval_results.
For each (model, prompt_mode) we report:
mute : detection rate on muted inputs (positive = muted)
mute_org : detection rate on original audio (positive = synced)
swap : detection rate on swapped inputs (positive = mismatched)
swap_org : detection rate on original audio (positive = synced)
mute_all : combined accuracy = (correct_mute + correct_mute_org) / (n_mute + n_mute_org)
swap_all : combined accuracy = (correct_swap + correct_swap_org) / (n_swap + n_swap_org)
When sample sizes match, the combined number equals the simple mean of the two rates.
"""
from __future__ import annotations
import json
import re
from pathlib import Path
ROOT = Path("/home/ubuntu/eval_results")
# Each task -> (list of root subpaths to scan, dirname prefix to strip, accuracy keys)
TASKS = {
"mute": (["mute", "gemini_mute_sync_swap/mute"], "mute_",
["mute_detection_rate"]),
"mute_original": (["mute_original"], "mute_",
["audio_present_detection_rate", "describes_audio_rate"]),
"swap": (["swap", "gemini_mute_sync_swap/swap"], "swap_",
["mismatch_detection_rate"]),
"swap_original": (["swap_original"], "swap_",
["match_detection_rate", "correct_synced_rate"]),
}
# Suffixes attached to model names to mark which input variant was used
INPUT_SUFFIXES = ("_audioMuxed", "_originalsControl", "_visualOnly")
PROMPT_RE = re.compile(r"^(?P<model>.+)_prompt(?P<mode>[A-Za-z0-9]+)$")
def parse_dir(name: str, prefix: str) -> tuple[str, str] | None:
"""Return (model, prompt_mode) parsed from a result directory name."""
if not name.startswith(prefix):
return None
rest = name[len(prefix):]
m = PROMPT_RE.match(rest)
if not m:
return None
model = m.group("model").rstrip("_")
for suf in INPUT_SUFFIXES:
if model.endswith(suf):
model = model[: -len(suf)]
break
if model.startswith("gemini_"):
model = model[len("gemini_"):]
return model, m.group("mode")
def load_metric(sub: Path, keys: list[str]) -> tuple[float, int] | None:
"""Prefer metrics.rejudged.json (re-judged, more accurate); fall back to metrics.json."""
for fname in ("metrics.rejudged.json", "metrics.json"):
path = sub / fname
try:
data = json.loads(path.read_text())
except (FileNotFoundError, json.JSONDecodeError):
continue
for k in keys:
if k in data:
return float(data[k]), int(data["total_samples"])
return None
def collect() -> dict[tuple[str, str], dict[str, tuple[float, int]]]:
rows: dict[tuple[str, str], dict[str, tuple[float, int]]] = {}
for task, (subpaths, prefix, keys) in TASKS.items():
for subpath in subpaths:
task_dir = ROOT / subpath
if not task_dir.is_dir():
continue
for sub in sorted(task_dir.iterdir()):
if not sub.is_dir():
continue
parsed = parse_dir(sub.name, prefix)
if parsed is None:
continue
metric = load_metric(sub, keys)
if metric is None:
continue
rows.setdefault(parsed, {})[task] = metric
return rows
def fmt(rate_n: tuple[float, int] | None) -> str:
if rate_n is None:
return " — "
rate, n = rate_n
return f"{rate * 100:5.1f}% ({n})"
def combined(a: tuple[float, int] | None,
b: tuple[float, int] | None) -> tuple[float, int] | None:
if a is None or b is None:
return None
ra, na = a
rb, nb = b
return (ra * na + rb * nb) / (na + nb), na + nb
def main() -> None:
rows = collect()
header = (
f"{'Model (prompt)':<70} "
f"{'mute':>13} {'mute_org':>13} {'mute_all':>13} "
f"{'swap':>13} {'swap_org':>13} {'swap_all':>13}"
)
print(header)
print("-" * len(header))
for (model, mode), entries in sorted(rows.items()):
mute = entries.get("mute")
mute_o = entries.get("mute_original")
swap = entries.get("swap")
swap_o = entries.get("swap_original")
label = f"{model} [{mode}]"
print(
f"{label:<70} "
f"{fmt(mute):>13} {fmt(mute_o):>13} {fmt(combined(mute, mute_o)):>13} "
f"{fmt(swap):>13} {fmt(swap_o):>13} {fmt(combined(swap, swap_o)):>13}"
)
if __name__ == "__main__":
main()