Upload supp/ and root scripts
Browse files- aggregate_results.py +134 -0
- analyze_my_model.py +385 -0
- build_candidate_pool.py +128 -0
- case_study_gemini_mute_neutral.py +126 -0
- case_study_gemini_sync.py +165 -0
- case_study_gpt55.py +582 -0
- compute_nemotron_or_metrics.py +251 -0
- eval_nemotron_openrouter.py +780 -0
- eval_nemotron_openrouter_originals_control.py +304 -0
- eval_originals_control.py +523 -0
- finalize_case_study.py +153 -0
- find_case_study.py +140 -0
- probe_gpt55_audio.py +70 -0
- probe_gpt55_one_sample.py +30 -0
- prune_broken_rows.py +65 -0
- rejudge_mute_neutral.py +209 -0
- rejudge_swap_neutral.py +191 -0
- supp/eval_daily_omni.py +586 -0
- supp/eval_dpo_sync.py +826 -0
- supp/eval_lvbench.py +632 -0
- supp/eval_vggsoundsync.py +777 -0
- supp/eval_videomme.py +564 -0
- supp/eval_worldsense.py +591 -0
- sync_split_acc.py +176 -0
aggregate_results.py
ADDED
|
@@ -0,0 +1,134 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env python3
|
| 2 |
+
"""Aggregate mute/swap accuracy across all models in /home/ubuntu/eval_results.
|
| 3 |
+
|
| 4 |
+
For each (model, prompt_mode) we report:
|
| 5 |
+
mute : detection rate on muted inputs (positive = muted)
|
| 6 |
+
mute_org : detection rate on original audio (positive = synced)
|
| 7 |
+
swap : detection rate on swapped inputs (positive = mismatched)
|
| 8 |
+
swap_org : detection rate on original audio (positive = synced)
|
| 9 |
+
mute_all : combined accuracy = (correct_mute + correct_mute_org) / (n_mute + n_mute_org)
|
| 10 |
+
swap_all : combined accuracy = (correct_swap + correct_swap_org) / (n_swap + n_swap_org)
|
| 11 |
+
|
| 12 |
+
When sample sizes match, the combined number equals the simple mean of the two rates.
|
| 13 |
+
"""
|
| 14 |
+
|
| 15 |
+
from __future__ import annotations
|
| 16 |
+
|
| 17 |
+
import json
|
| 18 |
+
import re
|
| 19 |
+
from pathlib import Path
|
| 20 |
+
|
| 21 |
+
ROOT = Path("/home/ubuntu/eval_results")
|
| 22 |
+
|
| 23 |
+
# Each task -> (list of root subpaths to scan, dirname prefix to strip, accuracy keys)
|
| 24 |
+
TASKS = {
|
| 25 |
+
"mute": (["mute", "gemini_mute_sync_swap/mute"], "mute_",
|
| 26 |
+
["mute_detection_rate"]),
|
| 27 |
+
"mute_original": (["mute_original"], "mute_",
|
| 28 |
+
["audio_present_detection_rate", "describes_audio_rate"]),
|
| 29 |
+
"swap": (["swap", "gemini_mute_sync_swap/swap"], "swap_",
|
| 30 |
+
["mismatch_detection_rate"]),
|
| 31 |
+
"swap_original": (["swap_original"], "swap_",
|
| 32 |
+
["match_detection_rate", "correct_synced_rate"]),
|
| 33 |
+
}
|
| 34 |
+
|
| 35 |
+
# Suffixes attached to model names to mark which input variant was used
|
| 36 |
+
INPUT_SUFFIXES = ("_audioMuxed", "_originalsControl", "_visualOnly")
|
| 37 |
+
|
| 38 |
+
PROMPT_RE = re.compile(r"^(?P<model>.+)_prompt(?P<mode>[A-Za-z0-9]+)$")
|
| 39 |
+
|
| 40 |
+
|
| 41 |
+
def parse_dir(name: str, prefix: str) -> tuple[str, str] | None:
|
| 42 |
+
"""Return (model, prompt_mode) parsed from a result directory name."""
|
| 43 |
+
if not name.startswith(prefix):
|
| 44 |
+
return None
|
| 45 |
+
rest = name[len(prefix):]
|
| 46 |
+
m = PROMPT_RE.match(rest)
|
| 47 |
+
if not m:
|
| 48 |
+
return None
|
| 49 |
+
model = m.group("model").rstrip("_")
|
| 50 |
+
for suf in INPUT_SUFFIXES:
|
| 51 |
+
if model.endswith(suf):
|
| 52 |
+
model = model[: -len(suf)]
|
| 53 |
+
break
|
| 54 |
+
if model.startswith("gemini_"):
|
| 55 |
+
model = model[len("gemini_"):]
|
| 56 |
+
return model, m.group("mode")
|
| 57 |
+
|
| 58 |
+
|
| 59 |
+
def load_metric(sub: Path, keys: list[str]) -> tuple[float, int] | None:
|
| 60 |
+
"""Prefer metrics.rejudged.json (re-judged, more accurate); fall back to metrics.json."""
|
| 61 |
+
for fname in ("metrics.rejudged.json", "metrics.json"):
|
| 62 |
+
path = sub / fname
|
| 63 |
+
try:
|
| 64 |
+
data = json.loads(path.read_text())
|
| 65 |
+
except (FileNotFoundError, json.JSONDecodeError):
|
| 66 |
+
continue
|
| 67 |
+
for k in keys:
|
| 68 |
+
if k in data:
|
| 69 |
+
return float(data[k]), int(data["total_samples"])
|
| 70 |
+
return None
|
| 71 |
+
|
| 72 |
+
|
| 73 |
+
def collect() -> dict[tuple[str, str], dict[str, tuple[float, int]]]:
|
| 74 |
+
rows: dict[tuple[str, str], dict[str, tuple[float, int]]] = {}
|
| 75 |
+
for task, (subpaths, prefix, keys) in TASKS.items():
|
| 76 |
+
for subpath in subpaths:
|
| 77 |
+
task_dir = ROOT / subpath
|
| 78 |
+
if not task_dir.is_dir():
|
| 79 |
+
continue
|
| 80 |
+
for sub in sorted(task_dir.iterdir()):
|
| 81 |
+
if not sub.is_dir():
|
| 82 |
+
continue
|
| 83 |
+
parsed = parse_dir(sub.name, prefix)
|
| 84 |
+
if parsed is None:
|
| 85 |
+
continue
|
| 86 |
+
metric = load_metric(sub, keys)
|
| 87 |
+
if metric is None:
|
| 88 |
+
continue
|
| 89 |
+
rows.setdefault(parsed, {})[task] = metric
|
| 90 |
+
return rows
|
| 91 |
+
|
| 92 |
+
|
| 93 |
+
def fmt(rate_n: tuple[float, int] | None) -> str:
|
| 94 |
+
if rate_n is None:
|
| 95 |
+
return " — "
|
| 96 |
+
rate, n = rate_n
|
| 97 |
+
return f"{rate * 100:5.1f}% ({n})"
|
| 98 |
+
|
| 99 |
+
|
| 100 |
+
def combined(a: tuple[float, int] | None,
|
| 101 |
+
b: tuple[float, int] | None) -> tuple[float, int] | None:
|
| 102 |
+
if a is None or b is None:
|
| 103 |
+
return None
|
| 104 |
+
ra, na = a
|
| 105 |
+
rb, nb = b
|
| 106 |
+
return (ra * na + rb * nb) / (na + nb), na + nb
|
| 107 |
+
|
| 108 |
+
|
| 109 |
+
def main() -> None:
|
| 110 |
+
rows = collect()
|
| 111 |
+
|
| 112 |
+
header = (
|
| 113 |
+
f"{'Model (prompt)':<70} "
|
| 114 |
+
f"{'mute':>13} {'mute_org':>13} {'mute_all':>13} "
|
| 115 |
+
f"{'swap':>13} {'swap_org':>13} {'swap_all':>13}"
|
| 116 |
+
)
|
| 117 |
+
print(header)
|
| 118 |
+
print("-" * len(header))
|
| 119 |
+
|
| 120 |
+
for (model, mode), entries in sorted(rows.items()):
|
| 121 |
+
mute = entries.get("mute")
|
| 122 |
+
mute_o = entries.get("mute_original")
|
| 123 |
+
swap = entries.get("swap")
|
| 124 |
+
swap_o = entries.get("swap_original")
|
| 125 |
+
label = f"{model} [{mode}]"
|
| 126 |
+
print(
|
| 127 |
+
f"{label:<70} "
|
| 128 |
+
f"{fmt(mute):>13} {fmt(mute_o):>13} {fmt(combined(mute, mute_o)):>13} "
|
| 129 |
+
f"{fmt(swap):>13} {fmt(swap_o):>13} {fmt(combined(swap, swap_o)):>13}"
|
| 130 |
+
)
|
| 131 |
+
|
| 132 |
+
|
| 133 |
+
if __name__ == "__main__":
|
| 134 |
+
main()
|
analyze_my_model.py
ADDED
|
@@ -0,0 +1,385 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Generate paper-quality figures comparing the user's fine-tuned model against
|
| 2 |
+
two baselines (Qwen3-Omni vanilla, MiniCPM-o-4.5) on the Shift (sync) and
|
| 3 |
+
VGGSoundSync benchmarks. All figures use DejaVu Serif, no titles, 300 dpi
|
| 4 |
+
PDF + PNG, paper-friendly fonttype=42.
|
| 5 |
+
"""
|
| 6 |
+
|
| 7 |
+
import json
|
| 8 |
+
from pathlib import Path
|
| 9 |
+
from typing import Dict, List
|
| 10 |
+
|
| 11 |
+
import matplotlib.pyplot as plt
|
| 12 |
+
import numpy as np
|
| 13 |
+
|
| 14 |
+
# ---------------------------------------------------------------------------
|
| 15 |
+
# Style
|
| 16 |
+
# ---------------------------------------------------------------------------
|
| 17 |
+
plt.rcParams.update({
|
| 18 |
+
"font.family": "DejaVu Serif",
|
| 19 |
+
"font.serif": ["DejaVu Serif"],
|
| 20 |
+
"mathtext.fontset": "dejavuserif",
|
| 21 |
+
"font.size": 11,
|
| 22 |
+
"axes.titlesize": 12,
|
| 23 |
+
"axes.titleweight": "bold",
|
| 24 |
+
"axes.labelsize": 11,
|
| 25 |
+
"xtick.labelsize": 10.5,
|
| 26 |
+
"ytick.labelsize": 10.5,
|
| 27 |
+
"legend.fontsize": 10,
|
| 28 |
+
"pdf.fonttype": 42,
|
| 29 |
+
"ps.fonttype": 42,
|
| 30 |
+
"savefig.dpi": 300,
|
| 31 |
+
"savefig.bbox": "tight",
|
| 32 |
+
"axes.spines.top": False,
|
| 33 |
+
"axes.spines.right": False,
|
| 34 |
+
})
|
| 35 |
+
|
| 36 |
+
# ---------------------------------------------------------------------------
|
| 37 |
+
# Data sources
|
| 38 |
+
# ---------------------------------------------------------------------------
|
| 39 |
+
ROOT = Path("/home/ubuntu/eval_results/paper_shift_backup")
|
| 40 |
+
OUT = Path("/home/ubuntu/figs/my_model_vs_baselines")
|
| 41 |
+
OUT.mkdir(parents=True, exist_ok=True)
|
| 42 |
+
|
| 43 |
+
MODELS = [
|
| 44 |
+
# (display, sync_dir, vggsync_dir, color)
|
| 45 |
+
("Qwen3-Omni", ROOT/"sync/sync_qwen3omni_vanilla",
|
| 46 |
+
ROOT/"vggsoundsync/vggsync_freetext_vanilla_qwen3omni_3k",
|
| 47 |
+
"#5b8def"),
|
| 48 |
+
("MiniCPM-o", ROOT/"minicpmo_sync",
|
| 49 |
+
ROOT/"minicpmo_vggsync",
|
| 50 |
+
"#f4a04a"),
|
| 51 |
+
("Ours", ROOT/"sync/sync_sft_dpo_mdpo_fin_avmcqa_longform",
|
| 52 |
+
ROOT/"vggsoundsync/vggsync_freetext_sft_dpo_mdpo_fin_avmcqa_longform_3k",
|
| 53 |
+
"#2fa363"),
|
| 54 |
+
]
|
| 55 |
+
|
| 56 |
+
def load_metrics(p: Path) -> dict:
|
| 57 |
+
return json.loads((p / "metrics.json").read_text())
|
| 58 |
+
|
| 59 |
+
def load_jsonl(p: Path):
|
| 60 |
+
f = p / "eval_results.jsonl"
|
| 61 |
+
if not f.exists():
|
| 62 |
+
return []
|
| 63 |
+
return [json.loads(l) for l in f.open() if l.strip()]
|
| 64 |
+
|
| 65 |
+
|
| 66 |
+
# ===========================================================================
|
| 67 |
+
# Figure 1 — Headline three-metric bar chart (2 panels: Shift + VGGSoundSync)
|
| 68 |
+
# ===========================================================================
|
| 69 |
+
def fig_headline():
|
| 70 |
+
metric_keys = [
|
| 71 |
+
("three_class_accuracy", "3-class Acc."),
|
| 72 |
+
("sync_desync_accuracy", "Sync/Desync Acc."),
|
| 73 |
+
("direction_accuracy_on_desync","Direction Acc."),
|
| 74 |
+
]
|
| 75 |
+
panels = [
|
| 76 |
+
("Shift", [load_metrics(m[1]) for m in MODELS]),
|
| 77 |
+
("VGGSoundSync", [load_metrics(m[2]) for m in MODELS]),
|
| 78 |
+
]
|
| 79 |
+
|
| 80 |
+
fig, axes = plt.subplots(1, 2, figsize=(11.0, 4.2),
|
| 81 |
+
gridspec_kw={"wspace": 0.22})
|
| 82 |
+
n_models = len(MODELS)
|
| 83 |
+
n_metric = len(metric_keys)
|
| 84 |
+
bar_w = 0.24
|
| 85 |
+
|
| 86 |
+
for ax, (title, mets) in zip(axes, panels):
|
| 87 |
+
x = np.arange(n_metric)
|
| 88 |
+
for i, (m, met) in enumerate(zip(MODELS, mets)):
|
| 89 |
+
offsets = (i - (n_models - 1) / 2) * bar_w
|
| 90 |
+
heights = [met.get(k, 0.0) for k, _ in metric_keys]
|
| 91 |
+
bars = ax.bar(x + offsets, heights, bar_w,
|
| 92 |
+
color=m[3], edgecolor="white", linewidth=0.6,
|
| 93 |
+
label=m[0], zorder=3)
|
| 94 |
+
for b, v in zip(bars, heights):
|
| 95 |
+
ax.text(b.get_x() + b.get_width() / 2, v + 0.012,
|
| 96 |
+
f"{v*100:.1f}", ha="center", va="bottom",
|
| 97 |
+
fontsize=9, color="#1a1a1a")
|
| 98 |
+
ax.set_xticks(x)
|
| 99 |
+
ax.set_xticklabels([lbl for _, lbl in metric_keys])
|
| 100 |
+
ax.set_ylim(0, 1.05)
|
| 101 |
+
ax.set_yticks(np.arange(0, 1.01, 0.2))
|
| 102 |
+
ax.set_yticklabels([f"{int(v*100)}" for v in np.arange(0, 1.01, 0.2)])
|
| 103 |
+
ax.set_ylabel("Accuracy (%)" if ax is axes[0] else "")
|
| 104 |
+
ax.set_title(title, pad=6)
|
| 105 |
+
ax.grid(axis="y", color="#e0e0e0", lw=0.6, zorder=0)
|
| 106 |
+
ax.set_axisbelow(True)
|
| 107 |
+
handles, labels = axes[0].get_legend_handles_labels()
|
| 108 |
+
fig.legend(handles, labels, loc="upper center",
|
| 109 |
+
bbox_to_anchor=(0.5, 1.04), ncol=n_models,
|
| 110 |
+
frameon=False, fontsize=11)
|
| 111 |
+
fig.tight_layout(rect=(0, 0, 1, 0.95))
|
| 112 |
+
fig.savefig(OUT/"fig1_headline.pdf"); fig.savefig(OUT/"fig1_headline.png")
|
| 113 |
+
plt.close(fig)
|
| 114 |
+
|
| 115 |
+
|
| 116 |
+
# ===========================================================================
|
| 117 |
+
# Figure 2 — Per-direction accuracy on Shift (synced / delay / early)
|
| 118 |
+
# ===========================================================================
|
| 119 |
+
def fig_per_direction():
|
| 120 |
+
cats = [("synced_accuracy", "Synced (Orig.)"),
|
| 121 |
+
("delay_accuracy", "Delay"),
|
| 122 |
+
("early_accuracy", "Early")]
|
| 123 |
+
fig, ax = plt.subplots(figsize=(7.4, 4.0))
|
| 124 |
+
n_models = len(MODELS)
|
| 125 |
+
bar_w = 0.25
|
| 126 |
+
x = np.arange(len(cats))
|
| 127 |
+
for i, (name, sync_dir, _, color) in enumerate(MODELS):
|
| 128 |
+
m = load_metrics(sync_dir)
|
| 129 |
+
pc = m["per_category"]
|
| 130 |
+
heights = [pc[k] for k, _ in cats]
|
| 131 |
+
offsets = (i - (n_models - 1) / 2) * bar_w
|
| 132 |
+
bars = ax.bar(x + offsets, heights, bar_w, color=color,
|
| 133 |
+
edgecolor="white", linewidth=0.6, label=name, zorder=3)
|
| 134 |
+
for b, v in zip(bars, heights):
|
| 135 |
+
ax.text(b.get_x() + b.get_width()/2, v + 0.012,
|
| 136 |
+
f"{v*100:.1f}", ha="center", va="bottom",
|
| 137 |
+
fontsize=9, color="#1a1a1a")
|
| 138 |
+
ax.set_xticks(x); ax.set_xticklabels([lbl for _, lbl in cats])
|
| 139 |
+
ax.set_ylim(0, 1.10)
|
| 140 |
+
ax.set_yticks(np.arange(0, 1.01, 0.2))
|
| 141 |
+
ax.set_yticklabels([f"{int(v*100)}" for v in np.arange(0, 1.01, 0.2)])
|
| 142 |
+
ax.set_ylabel("Accuracy (%)")
|
| 143 |
+
ax.grid(axis="y", color="#e0e0e0", lw=0.6, zorder=0); ax.set_axisbelow(True)
|
| 144 |
+
ax.legend(loc="upper right", frameon=False)
|
| 145 |
+
fig.tight_layout()
|
| 146 |
+
fig.savefig(OUT/"fig2_per_direction.pdf"); fig.savefig(OUT/"fig2_per_direction.png")
|
| 147 |
+
plt.close(fig)
|
| 148 |
+
|
| 149 |
+
|
| 150 |
+
# ===========================================================================
|
| 151 |
+
# Figure 3 — Per-difficulty line chart on VGGSoundSync
|
| 152 |
+
# ===========================================================================
|
| 153 |
+
def fig_per_difficulty():
|
| 154 |
+
diff_order = ["synced", "very_easy", "easy", "medium", "hard"]
|
| 155 |
+
diff_labels = ["Orig.\n(synced)", "Very Easy\n(2.0–2.5s)",
|
| 156 |
+
"Easy\n(1.5–2.0s)", "Medium\n(1.0–1.5s)",
|
| 157 |
+
"Hard\n(0.5–1.0s)"]
|
| 158 |
+
fig, ax = plt.subplots(figsize=(8.2, 4.4))
|
| 159 |
+
x = np.arange(len(diff_order))
|
| 160 |
+
for name, _, vggsync_dir, color in MODELS:
|
| 161 |
+
m = load_metrics(vggsync_dir)
|
| 162 |
+
pd_ = m["per_difficulty"]
|
| 163 |
+
ys = [pd_[d]["accuracy"] for d in diff_order]
|
| 164 |
+
ax.plot(x, ys, marker="o", linewidth=2.2, markersize=8.5,
|
| 165 |
+
color=color, label=name, markeredgecolor="white",
|
| 166 |
+
markeredgewidth=1.2, zorder=3)
|
| 167 |
+
for xi, v in zip(x, ys):
|
| 168 |
+
ax.text(xi, v + 0.025, f"{v*100:.0f}", ha="center", va="bottom",
|
| 169 |
+
fontsize=9, color=color)
|
| 170 |
+
ax.set_xticks(x); ax.set_xticklabels(diff_labels)
|
| 171 |
+
ax.set_ylim(0, 1.0)
|
| 172 |
+
ax.set_yticks(np.arange(0, 1.01, 0.2))
|
| 173 |
+
ax.set_yticklabels([f"{int(v*100)}" for v in np.arange(0, 1.01, 0.2)])
|
| 174 |
+
ax.set_ylabel("Accuracy (%)")
|
| 175 |
+
ax.set_xlabel("VGGSoundSync difficulty bucket (offset magnitude shown)")
|
| 176 |
+
ax.grid(axis="y", color="#e0e0e0", lw=0.6, zorder=0); ax.set_axisbelow(True)
|
| 177 |
+
ax.legend(loc="upper right", frameon=False)
|
| 178 |
+
fig.tight_layout()
|
| 179 |
+
fig.savefig(OUT/"fig3_per_difficulty.pdf"); fig.savefig(OUT/"fig3_per_difficulty.png")
|
| 180 |
+
plt.close(fig)
|
| 181 |
+
|
| 182 |
+
|
| 183 |
+
# ===========================================================================
|
| 184 |
+
# Figure 4 — Offset CDF on Shift and VGGSoundSync (two panels)
|
| 185 |
+
# ===========================================================================
|
| 186 |
+
def _abs_offset_errors(rows):
|
| 187 |
+
"""Extract |pred_offset - gt_offset| only on samples where the model
|
| 188 |
+
actually claimed a desync (i.e. produced a non-zero offset)."""
|
| 189 |
+
out = []
|
| 190 |
+
for r in rows:
|
| 191 |
+
gt_off = r.get("gt_offset_sec")
|
| 192 |
+
if gt_off is None:
|
| 193 |
+
continue
|
| 194 |
+
# gt_synced=True samples have no offset to evaluate
|
| 195 |
+
if r.get("gt_synced") is True:
|
| 196 |
+
continue
|
| 197 |
+
pred_off = r.get("pred_offset_sec")
|
| 198 |
+
if pred_off is None:
|
| 199 |
+
continue
|
| 200 |
+
# only include evaluations where the model also said it was offset
|
| 201 |
+
if r.get("pred_synced") is True:
|
| 202 |
+
continue
|
| 203 |
+
out.append(abs(float(pred_off) - float(gt_off)))
|
| 204 |
+
return np.array(out, dtype=float)
|
| 205 |
+
|
| 206 |
+
|
| 207 |
+
def fig_offset_cdf():
|
| 208 |
+
fig, axes = plt.subplots(1, 2, figsize=(11.0, 4.2),
|
| 209 |
+
gridspec_kw={"wspace": 0.22})
|
| 210 |
+
panels = [
|
| 211 |
+
("Shift", [m[1] for m in MODELS]),
|
| 212 |
+
("VGGSoundSync", [m[2] for m in MODELS]),
|
| 213 |
+
]
|
| 214 |
+
thresholds = np.linspace(0, 3.0, 121)
|
| 215 |
+
for ax, (title, dirs) in zip(axes, panels):
|
| 216 |
+
for (name, _, _, color), d in zip(MODELS, dirs):
|
| 217 |
+
rows = load_jsonl(d)
|
| 218 |
+
errs = _abs_offset_errors(rows)
|
| 219 |
+
if len(errs) == 0:
|
| 220 |
+
continue
|
| 221 |
+
# CDF over ALL desync samples (denominator = total desync gt count)
|
| 222 |
+
n_desync = sum(1 for r in rows
|
| 223 |
+
if r.get("gt_synced") is False
|
| 224 |
+
or r.get("gt_direction") in ("delay", "early"))
|
| 225 |
+
ys = [(errs <= t).sum() / max(n_desync, 1) for t in thresholds]
|
| 226 |
+
ax.plot(thresholds, ys, color=color, linewidth=2.4,
|
| 227 |
+
label=name, zorder=3)
|
| 228 |
+
# mark the value at 0.5s (a key threshold) with a dot
|
| 229 |
+
t_mark = 0.5
|
| 230 |
+
y_mark = (errs <= t_mark).sum() / max(n_desync, 1)
|
| 231 |
+
ax.scatter([t_mark], [y_mark], s=70, color=color,
|
| 232 |
+
edgecolor="white", linewidth=1.2, zorder=4)
|
| 233 |
+
ax.text(t_mark + 0.05, y_mark + 0.018,
|
| 234 |
+
f"{y_mark*100:.0f}%", color=color, fontsize=9,
|
| 235 |
+
weight="bold")
|
| 236 |
+
ax.set_xlim(0, 3.0)
|
| 237 |
+
ax.set_ylim(0, 1.0)
|
| 238 |
+
ax.set_yticks(np.arange(0, 1.01, 0.2))
|
| 239 |
+
ax.set_yticklabels([f"{int(v*100)}" for v in np.arange(0, 1.01, 0.2)])
|
| 240 |
+
ax.set_xlabel("Offset error tolerance |Δ| (sec)")
|
| 241 |
+
ax.set_ylabel("Fraction of desync samples within tolerance (%)"
|
| 242 |
+
if ax is axes[0] else "")
|
| 243 |
+
ax.set_title(title, pad=6)
|
| 244 |
+
ax.axvline(0.5, color="#bbb", linestyle=":", linewidth=0.8, zorder=1)
|
| 245 |
+
ax.grid(axis="both", color="#e8e8e8", lw=0.5, zorder=0)
|
| 246 |
+
ax.set_axisbelow(True)
|
| 247 |
+
handles, labels = axes[0].get_legend_handles_labels()
|
| 248 |
+
fig.legend(handles, labels, loc="upper center",
|
| 249 |
+
bbox_to_anchor=(0.5, 1.04), ncol=len(MODELS),
|
| 250 |
+
frameon=False, fontsize=11)
|
| 251 |
+
fig.tight_layout(rect=(0, 0, 1, 0.95))
|
| 252 |
+
fig.savefig(OUT/"fig4_offset_cdf.pdf"); fig.savefig(OUT/"fig4_offset_cdf.png")
|
| 253 |
+
plt.close(fig)
|
| 254 |
+
|
| 255 |
+
|
| 256 |
+
# ===========================================================================
|
| 257 |
+
# Figure 5 — Per-class scatter on VGGSoundSync (vanilla vs ours)
|
| 258 |
+
# ===========================================================================
|
| 259 |
+
def fig_per_class_scatter():
|
| 260 |
+
vanilla = load_metrics(MODELS[0][2])["per_class"]
|
| 261 |
+
ours = load_metrics(MODELS[2][2])["per_class"]
|
| 262 |
+
minicpm = load_metrics(MODELS[1][2])["per_class"]
|
| 263 |
+
classes = sorted(set(vanilla) & set(ours) & set(minicpm))
|
| 264 |
+
|
| 265 |
+
fig, axes = plt.subplots(1, 2, figsize=(11.0, 5.2),
|
| 266 |
+
gridspec_kw={"wspace": 0.25})
|
| 267 |
+
|
| 268 |
+
panels = [
|
| 269 |
+
(axes[0], "vs Qwen3-Omni", vanilla, MODELS[0][3]),
|
| 270 |
+
(axes[1], "vs MiniCPM-o", minicpm, MODELS[1][3]),
|
| 271 |
+
]
|
| 272 |
+
for ax, sub_title, baseline_d, base_color in panels:
|
| 273 |
+
bx = np.array([baseline_d[c]["accuracy"] for c in classes])
|
| 274 |
+
oy = np.array([ours[c]["accuracy"] for c in classes])
|
| 275 |
+
sizes = np.array([vanilla[c]["count"] for c in classes])
|
| 276 |
+
# Use lighter "base_color" mix for the dots; gradient by sample count
|
| 277 |
+
ax.scatter(bx, oy, s=10 + sizes * 1.5,
|
| 278 |
+
color="#2fa363", alpha=0.65,
|
| 279 |
+
edgecolor="white", linewidth=0.5, zorder=3)
|
| 280 |
+
ax.plot([0, 1], [0, 1], ls="--", color="#888", lw=1.0, zorder=2)
|
| 281 |
+
# Highlight a few extreme points with class names
|
| 282 |
+
deltas = oy - bx
|
| 283 |
+
top_imp = np.argsort(-deltas)[:5]
|
| 284 |
+
bot_imp = np.argsort(deltas)[:3]
|
| 285 |
+
for idx in list(top_imp) + list(bot_imp):
|
| 286 |
+
ax.annotate(classes[idx], (bx[idx], oy[idx]),
|
| 287 |
+
xytext=(4, 4), textcoords="offset points",
|
| 288 |
+
fontsize=7.5, color="#444", style="italic")
|
| 289 |
+
# Stats annotation
|
| 290 |
+
win = (deltas > 0).sum()
|
| 291 |
+
ax.text(0.02, 0.97,
|
| 292 |
+
f"Ours wins on {win}/{len(classes)} classes\n"
|
| 293 |
+
f"Mean Δ = +{deltas.mean()*100:.1f} pts",
|
| 294 |
+
transform=ax.transAxes, va="top", ha="left",
|
| 295 |
+
fontsize=10, color="#1a1a1a",
|
| 296 |
+
bbox=dict(facecolor="#fefef0", edgecolor="#888",
|
| 297 |
+
boxstyle="round,pad=0.35"))
|
| 298 |
+
ax.set_xlim(0, 1.0); ax.set_ylim(0, 1.0)
|
| 299 |
+
ax.set_xticks(np.arange(0, 1.01, 0.2))
|
| 300 |
+
ax.set_yticks(np.arange(0, 1.01, 0.2))
|
| 301 |
+
ax.set_xticklabels([f"{int(v*100)}" for v in np.arange(0, 1.01, 0.2)])
|
| 302 |
+
ax.set_yticklabels([f"{int(v*100)}" for v in np.arange(0, 1.01, 0.2)])
|
| 303 |
+
ax.set_xlabel(f"Baseline accuracy {sub_title.split()[1]} (%)")
|
| 304 |
+
ax.set_ylabel("Ours accuracy (%)" if ax is axes[0] else "")
|
| 305 |
+
ax.set_title(sub_title, pad=6)
|
| 306 |
+
ax.grid(color="#eee", lw=0.5, zorder=0)
|
| 307 |
+
ax.set_axisbelow(True)
|
| 308 |
+
fig.tight_layout()
|
| 309 |
+
fig.savefig(OUT/"fig5_per_class_scatter.pdf")
|
| 310 |
+
fig.savefig(OUT/"fig5_per_class_scatter.png")
|
| 311 |
+
plt.close(fig)
|
| 312 |
+
|
| 313 |
+
|
| 314 |
+
# ===========================================================================
|
| 315 |
+
# Figure 6 — Within-tolerance bar grid (offset attribution success)
|
| 316 |
+
# ===========================================================================
|
| 317 |
+
def fig_within_thresholds():
|
| 318 |
+
"""Show the % of desync samples for which the predicted offset is within a
|
| 319 |
+
fixed tolerance, computed against the FULL desync sample count (not just
|
| 320 |
+
samples where the model attempted an offset)."""
|
| 321 |
+
fig, axes = plt.subplots(1, 2, figsize=(11.0, 4.2),
|
| 322 |
+
gridspec_kw={"wspace": 0.22})
|
| 323 |
+
sync_thresh = ["offset_within_0.5s", "offset_within_1.0s"]
|
| 324 |
+
sync_labels = ["≤ 0.5 s", "≤ 1.0 s"]
|
| 325 |
+
vggsync_thresh = ["offset_within_0.2s", "offset_within_0.5s"]
|
| 326 |
+
vggsync_labels = ["≤ 0.2 s", "≤ 0.5 s"]
|
| 327 |
+
panels = [
|
| 328 |
+
("Shift", sync_thresh, sync_labels, 1),
|
| 329 |
+
("VGGSoundSync", vggsync_thresh, vggsync_labels, 2),
|
| 330 |
+
]
|
| 331 |
+
bar_w = 0.25
|
| 332 |
+
|
| 333 |
+
for ax, (title, ks, labs, dir_idx) in zip(axes, panels):
|
| 334 |
+
x = np.arange(len(ks))
|
| 335 |
+
# denominator: total desync samples (= total - synced_count)
|
| 336 |
+
for i, (name, sd, vd, color) in enumerate(MODELS):
|
| 337 |
+
m = load_metrics(sd if dir_idx == 1 else vd)
|
| 338 |
+
if dir_idx == 1:
|
| 339 |
+
n_synced = m["per_category"]["synced_count"]
|
| 340 |
+
else:
|
| 341 |
+
n_synced = m["per_difficulty"]["synced"]["count"]
|
| 342 |
+
n_desync = m["total_samples"] - n_synced
|
| 343 |
+
heights = [m.get(k, 0) / max(n_desync, 1) for k in ks]
|
| 344 |
+
offsets = (i - (len(MODELS) - 1) / 2) * bar_w
|
| 345 |
+
bars = ax.bar(x + offsets, heights, bar_w, color=color,
|
| 346 |
+
edgecolor="white", linewidth=0.6,
|
| 347 |
+
label=name, zorder=3)
|
| 348 |
+
for b, v in zip(bars, heights):
|
| 349 |
+
ax.text(b.get_x() + b.get_width()/2, v + 0.012,
|
| 350 |
+
f"{v*100:.1f}", ha="center", va="bottom",
|
| 351 |
+
fontsize=9, color="#1a1a1a")
|
| 352 |
+
ax.set_xticks(x); ax.set_xticklabels(labs)
|
| 353 |
+
ax.set_xlabel("Offset tolerance")
|
| 354 |
+
ax.set_ylim(0, max(0.4, ax.get_ylim()[1]) + 0.05)
|
| 355 |
+
ax.set_ylabel("Desync samples within tolerance (%)"
|
| 356 |
+
if ax is axes[0] else "")
|
| 357 |
+
ax.set_title(title, pad=6)
|
| 358 |
+
ax.grid(axis="y", color="#e0e0e0", lw=0.6, zorder=0)
|
| 359 |
+
ax.set_axisbelow(True)
|
| 360 |
+
# convert ticks to %
|
| 361 |
+
yticks = ax.get_yticks()
|
| 362 |
+
ax.set_yticklabels([f"{int(v*100)}" for v in yticks])
|
| 363 |
+
handles, labels = axes[0].get_legend_handles_labels()
|
| 364 |
+
fig.legend(handles, labels, loc="upper center",
|
| 365 |
+
bbox_to_anchor=(0.5, 1.04), ncol=len(MODELS),
|
| 366 |
+
frameon=False, fontsize=11)
|
| 367 |
+
fig.tight_layout(rect=(0, 0, 1, 0.95))
|
| 368 |
+
fig.savefig(OUT/"fig6_within_thresholds.pdf")
|
| 369 |
+
fig.savefig(OUT/"fig6_within_thresholds.png")
|
| 370 |
+
plt.close(fig)
|
| 371 |
+
|
| 372 |
+
|
| 373 |
+
# ===========================================================================
|
| 374 |
+
# Run all
|
| 375 |
+
# ===========================================================================
|
| 376 |
+
if __name__ == "__main__":
|
| 377 |
+
fig_headline()
|
| 378 |
+
fig_per_direction()
|
| 379 |
+
fig_per_difficulty()
|
| 380 |
+
fig_offset_cdf()
|
| 381 |
+
fig_per_class_scatter()
|
| 382 |
+
fig_within_thresholds()
|
| 383 |
+
print("All figures saved to:", OUT)
|
| 384 |
+
for p in sorted(OUT.glob("*.png")):
|
| 385 |
+
print(" ", p.name)
|
build_candidate_pool.py
ADDED
|
@@ -0,0 +1,128 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Build the candidate pool for case study (BEFORE re-running Gemini).
|
| 2 |
+
|
| 3 |
+
Selection rule (only the trustworthy signals):
|
| 4 |
+
- Qwen failed mute (raw_output == 'yes', i.e. real hallucination)
|
| 5 |
+
- Qwen failed swap (raw_output == 'yes', real false-match)
|
| 6 |
+
- Qwen failed BOTH delay AND early sync
|
| 7 |
+
- Gemini failed swap (raw_output == 'yes', real false-match)
|
| 8 |
+
|
| 9 |
+
Skipped on purpose:
|
| 10 |
+
- Gemini mute direct: all wrong rows are empty raw_output -> not real signal
|
| 11 |
+
- Gemini sync : we will re-run this on the candidate pool
|
| 12 |
+
|
| 13 |
+
Writes pool to /home/ubuntu/case_study_pool.jsonl
|
| 14 |
+
"""
|
| 15 |
+
|
| 16 |
+
import json
|
| 17 |
+
import re
|
| 18 |
+
from pathlib import Path
|
| 19 |
+
|
| 20 |
+
GEMINI_SWAP = "/home/ubuntu/eval_results/gemini_mute_sync_swap/swap/swap_gemini_gemini-3.1-pro-preview__promptDirect/eval_results.jsonl"
|
| 21 |
+
QWEN_MUTE = "/home/ubuntu/eval_results/mute/mute_qwen3omni_vanilla_promptDirect/eval_results.jsonl"
|
| 22 |
+
QWEN_SWAP = "/home/ubuntu/eval_results/swap/swap_Qwen_Qwen3-Omni-30B-A3B-Instruct_promptDirect/eval_results.jsonl"
|
| 23 |
+
QWEN_SYNC = "/home/ubuntu/eval_results/sync/sync_qwen3omni_vanilla/eval_results.jsonl"
|
| 24 |
+
|
| 25 |
+
OUT_PATH = Path("/home/ubuntu/case_study_pool.jsonl")
|
| 26 |
+
|
| 27 |
+
SYNC_SUFFIX_RE = re.compile(r"_(delay|early)_\d+(?:\.\d+)?s(?=\.mp4$)")
|
| 28 |
+
DIRECTION_RE = re.compile(r"_(delay|early)_(\d+(?:\.\d+)?)s\.mp4$")
|
| 29 |
+
|
| 30 |
+
|
| 31 |
+
def base_of(name): return SYNC_SUFFIX_RE.sub("", name)
|
| 32 |
+
def direction_of(name):
|
| 33 |
+
m = DIRECTION_RE.search(name)
|
| 34 |
+
return m.group(1) if m else None
|
| 35 |
+
|
| 36 |
+
|
| 37 |
+
def load_jsonl(path):
|
| 38 |
+
with open(path) as f:
|
| 39 |
+
for line in f:
|
| 40 |
+
line = line.strip()
|
| 41 |
+
if line:
|
| 42 |
+
yield json.loads(line)
|
| 43 |
+
|
| 44 |
+
|
| 45 |
+
def real_yes_wrong(jsonl_path):
|
| 46 |
+
"""Wrong rows whose raw_output is actually 'yes' (drops empty/ambiguous artifacts)."""
|
| 47 |
+
out = set()
|
| 48 |
+
for r in load_jsonl(jsonl_path):
|
| 49 |
+
if r.get("correct", True):
|
| 50 |
+
continue
|
| 51 |
+
raw = (r.get("raw_output") or "").strip().lower()
|
| 52 |
+
if raw.startswith("yes"):
|
| 53 |
+
out.add(r["video"])
|
| 54 |
+
return out
|
| 55 |
+
|
| 56 |
+
|
| 57 |
+
def sync_is_wrong(row):
|
| 58 |
+
gt_synced = bool(row.get("gt_synced"))
|
| 59 |
+
pred_synced = bool(row.get("pred_synced"))
|
| 60 |
+
if gt_synced != pred_synced:
|
| 61 |
+
return True
|
| 62 |
+
if not gt_synced and row.get("pred_direction") != row.get("gt_direction"):
|
| 63 |
+
return True
|
| 64 |
+
return False
|
| 65 |
+
|
| 66 |
+
|
| 67 |
+
def collect_qwen_sync_intervention_failures(jsonl_path):
|
| 68 |
+
by_base = {}
|
| 69 |
+
for r in load_jsonl(jsonl_path):
|
| 70 |
+
v = r["video"]
|
| 71 |
+
d = direction_of(v)
|
| 72 |
+
if d is None:
|
| 73 |
+
continue # skip the synced original
|
| 74 |
+
if sync_is_wrong(r):
|
| 75 |
+
by_base.setdefault(base_of(v), {})[d] = {
|
| 76 |
+
"variant": v,
|
| 77 |
+
"gt": (r.get("gt_direction"), r.get("gt_offset_sec")),
|
| 78 |
+
"pred_synced": r.get("pred_synced"),
|
| 79 |
+
"pred": (r.get("pred_direction"), r.get("pred_offset_sec")),
|
| 80 |
+
}
|
| 81 |
+
return by_base
|
| 82 |
+
|
| 83 |
+
|
| 84 |
+
def main():
|
| 85 |
+
qwen_mute_yes = real_yes_wrong(QWEN_MUTE)
|
| 86 |
+
qwen_swap_yes = real_yes_wrong(QWEN_SWAP)
|
| 87 |
+
gem_swap_yes = real_yes_wrong(GEMINI_SWAP)
|
| 88 |
+
qwen_sync_fails = collect_qwen_sync_intervention_failures(QWEN_SYNC)
|
| 89 |
+
qwen_sync_both = {b for b, dirs in qwen_sync_fails.items()
|
| 90 |
+
if "delay" in dirs and "early" in dirs}
|
| 91 |
+
|
| 92 |
+
print("Per-source REAL wrong sets (raw_output='yes' only):")
|
| 93 |
+
print(f" qwen mute yes-hallucinations : {len(qwen_mute_yes)}")
|
| 94 |
+
print(f" qwen swap yes-false-match : {len(qwen_swap_yes)}")
|
| 95 |
+
print(f" qwen sync (delay AND early) : {len(qwen_sync_both)}")
|
| 96 |
+
print(f" gemini swap yes-false-match : {len(gem_swap_yes)}")
|
| 97 |
+
|
| 98 |
+
pool = qwen_mute_yes & qwen_swap_yes & qwen_sync_both & gem_swap_yes
|
| 99 |
+
print(f"\nPool (4-way real intersection): {len(pool)} videos")
|
| 100 |
+
|
| 101 |
+
qwen_swap_rows = {r["video"]: r for r in load_jsonl(QWEN_SWAP)
|
| 102 |
+
if not r.get("correct", True)}
|
| 103 |
+
gem_swap_rows = {r["video"]: r for r in load_jsonl(GEMINI_SWAP)
|
| 104 |
+
if not r.get("correct", True)}
|
| 105 |
+
|
| 106 |
+
rows = []
|
| 107 |
+
for v in sorted(pool):
|
| 108 |
+
rec = {
|
| 109 |
+
"video": v,
|
| 110 |
+
"gemini_swap_audio_from": gem_swap_rows.get(v, {}).get("swapped_from"),
|
| 111 |
+
"qwen_swap_audio_from": qwen_swap_rows.get(v, {}).get("swapped_from"),
|
| 112 |
+
"qwen_sync_failures": [qwen_sync_fails[v]["delay"],
|
| 113 |
+
qwen_sync_fails[v]["early"]],
|
| 114 |
+
}
|
| 115 |
+
rows.append(rec)
|
| 116 |
+
|
| 117 |
+
with open(OUT_PATH, "w") as f:
|
| 118 |
+
for rec in rows:
|
| 119 |
+
f.write(json.dumps(rec, ensure_ascii=False) + "\n")
|
| 120 |
+
print(f"[saved] {len(rows)} rows -> {OUT_PATH}")
|
| 121 |
+
|
| 122 |
+
print("\nPool videos:")
|
| 123 |
+
for r in rows:
|
| 124 |
+
print(f" {r['video']}")
|
| 125 |
+
|
| 126 |
+
|
| 127 |
+
if __name__ == "__main__":
|
| 128 |
+
main()
|
case_study_gemini_mute_neutral.py
ADDED
|
@@ -0,0 +1,126 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Re-run Gemini mute on the 6 case-study candidates with NEUTRAL prompt.
|
| 2 |
+
|
| 3 |
+
Direct prompt failures for Gemini all turned out to be empty responses (API
|
| 4 |
+
artifacts), not real audio hallucinations. Neutral prompt ("Describe the audio
|
| 5 |
+
you hear") forces an open-ended answer; OpenAI judge classifies whether the
|
| 6 |
+
response hallucinates audio content vs correctly reports silence.
|
| 7 |
+
|
| 8 |
+
Pipeline:
|
| 9 |
+
1. Read /home/ubuntu/case_study_candidates.jsonl (6 base videos).
|
| 10 |
+
2. Build a 6-row test jsonl (mute loader dedupes to base originals only).
|
| 11 |
+
3. Run scripts/eval_gemini_mute_sync_swap.py --tasks mute
|
| 12 |
+
--mute-prompt-mode neutral --openai-judge.
|
| 13 |
+
4. Print per-video raw_output + judge classification.
|
| 14 |
+
|
| 15 |
+
Usage:
|
| 16 |
+
GEMINI_API_KEY=... OPENAI_API_KEY=... python3 /home/ubuntu/case_study_gemini_mute_neutral.py
|
| 17 |
+
"""
|
| 18 |
+
|
| 19 |
+
import argparse
|
| 20 |
+
import json
|
| 21 |
+
import os
|
| 22 |
+
import subprocess
|
| 23 |
+
import sys
|
| 24 |
+
from pathlib import Path
|
| 25 |
+
|
| 26 |
+
REPO = Path("/home/ubuntu/CleverHans-Evaluation")
|
| 27 |
+
TEST_JSONL = REPO / "data" / "kto_training_data_v2_test.jsonl"
|
| 28 |
+
EVAL_SCRIPT = REPO / "scripts" / "eval_gemini_mute_sync_swap.py"
|
| 29 |
+
DATA_ROOT = Path("/opt/dlami/nvme/video_source")
|
| 30 |
+
CANDIDATES = Path("/home/ubuntu/case_study_pool.jsonl") # 45-video pool
|
| 31 |
+
|
| 32 |
+
OUT_ROOT = Path("/home/ubuntu/eval_results/case_study_pool")
|
| 33 |
+
FILTERED_JSON = OUT_ROOT / "mute_test.jsonl"
|
| 34 |
+
LABEL = "case_study_pool_gemini_mute_neutral"
|
| 35 |
+
GEMINI_RESULT = OUT_ROOT / "mute" / LABEL / "eval_results.jsonl"
|
| 36 |
+
|
| 37 |
+
|
| 38 |
+
def load_jsonl(path):
|
| 39 |
+
with open(path) as f:
|
| 40 |
+
for line in f:
|
| 41 |
+
line = line.strip()
|
| 42 |
+
if line:
|
| 43 |
+
yield json.loads(line)
|
| 44 |
+
|
| 45 |
+
|
| 46 |
+
def step_filter():
|
| 47 |
+
bases = {row["video"] for row in load_jsonl(CANDIDATES)}
|
| 48 |
+
print(f"[filter] {len(bases)} base videos from {CANDIDATES}")
|
| 49 |
+
|
| 50 |
+
OUT_ROOT.mkdir(parents=True, exist_ok=True)
|
| 51 |
+
kept = []
|
| 52 |
+
for row in load_jsonl(TEST_JSONL):
|
| 53 |
+
v = row["video"]
|
| 54 |
+
if "_delay_" in v or "_early_" in v:
|
| 55 |
+
continue
|
| 56 |
+
if v in bases:
|
| 57 |
+
kept.append(row)
|
| 58 |
+
|
| 59 |
+
with open(FILTERED_JSON, "w") as f:
|
| 60 |
+
for row in kept:
|
| 61 |
+
f.write(json.dumps(row, ensure_ascii=False) + "\n")
|
| 62 |
+
print(f"[filter] kept {len(kept)} rows -> {FILTERED_JSON}")
|
| 63 |
+
if len(kept) != len(bases):
|
| 64 |
+
print(f"[warn] expected {len(bases)} rows, got {len(kept)}")
|
| 65 |
+
|
| 66 |
+
|
| 67 |
+
def step_run(gemini_key, openai_key, model, workers):
|
| 68 |
+
env = os.environ.copy()
|
| 69 |
+
env["GEMINI_API_KEY"] = gemini_key
|
| 70 |
+
env["OPENAI_API_KEY"] = openai_key
|
| 71 |
+
|
| 72 |
+
cmd = [
|
| 73 |
+
sys.executable, str(EVAL_SCRIPT),
|
| 74 |
+
"--tasks", "mute",
|
| 75 |
+
"--model", model,
|
| 76 |
+
"--data-root", str(DATA_ROOT),
|
| 77 |
+
"--test-jsonl", str(FILTERED_JSON),
|
| 78 |
+
"--output-dir", str(OUT_ROOT),
|
| 79 |
+
"--label", LABEL,
|
| 80 |
+
"--mute-prompt-mode", "neutral",
|
| 81 |
+
"--openai-judge",
|
| 82 |
+
"--workers", str(workers),
|
| 83 |
+
]
|
| 84 |
+
print(f"[run] {' '.join(cmd)}")
|
| 85 |
+
subprocess.run(cmd, env=env, check=True)
|
| 86 |
+
|
| 87 |
+
|
| 88 |
+
def step_report():
|
| 89 |
+
if not GEMINI_RESULT.exists():
|
| 90 |
+
sys.exit(f"[error] expected results at {GEMINI_RESULT}")
|
| 91 |
+
rows = list(load_jsonl(GEMINI_RESULT))
|
| 92 |
+
print(f"\n[report] {len(rows)} mute samples (neutral, OpenAI judged):")
|
| 93 |
+
for r in rows:
|
| 94 |
+
v = r["video"]
|
| 95 |
+
pred = r.get("pred_label")
|
| 96 |
+
correct = r.get("correct")
|
| 97 |
+
raw = (r.get("raw_output") or "").strip().replace("\n", " ")[:240]
|
| 98 |
+
je = (r.get("judge_explanation") or "").strip()[:120]
|
| 99 |
+
print(f"\n VIDEO: {v}")
|
| 100 |
+
print(f" pred={pred} correct={correct}")
|
| 101 |
+
print(f" raw : {raw!r}")
|
| 102 |
+
if je:
|
| 103 |
+
print(f" judge: {je}")
|
| 104 |
+
|
| 105 |
+
|
| 106 |
+
def main():
|
| 107 |
+
p = argparse.ArgumentParser()
|
| 108 |
+
p.add_argument("--gemini-key", default=os.environ.get("GEMINI_API_KEY"))
|
| 109 |
+
p.add_argument("--openai-key", default=os.environ.get("OPENAI_API_KEY"))
|
| 110 |
+
p.add_argument("--model", default="gemini-3.1-pro-preview")
|
| 111 |
+
p.add_argument("--workers", type=int, default=4)
|
| 112 |
+
p.add_argument("--skip-run", action="store_true")
|
| 113 |
+
args = p.parse_args()
|
| 114 |
+
|
| 115 |
+
step_filter()
|
| 116 |
+
if not args.skip_run:
|
| 117 |
+
if not args.gemini_key:
|
| 118 |
+
sys.exit("[error] need --gemini-key or GEMINI_API_KEY env")
|
| 119 |
+
if not args.openai_key:
|
| 120 |
+
sys.exit("[error] need --openai-key or OPENAI_API_KEY env")
|
| 121 |
+
step_run(args.gemini_key, args.openai_key, args.model, args.workers)
|
| 122 |
+
step_report()
|
| 123 |
+
|
| 124 |
+
|
| 125 |
+
if __name__ == "__main__":
|
| 126 |
+
main()
|
case_study_gemini_sync.py
ADDED
|
@@ -0,0 +1,165 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Re-test the 12 case-study candidates' sync variants on Gemini.
|
| 2 |
+
|
| 3 |
+
Pipeline:
|
| 4 |
+
1. Read /home/ubuntu/case_study_candidates.jsonl (12 base videos).
|
| 5 |
+
2. Filter the master test jsonl to the 36 rows that belong to those 12 bases
|
| 6 |
+
(each base + its delay + early variant).
|
| 7 |
+
3. Invoke scripts/eval_gemini_mute_sync_swap.py --task sync --openai-parse-sync.
|
| 8 |
+
4. Read the Gemini sync results and report which of the 36 are wrong, using
|
| 9 |
+
the same correctness rule as find_case_study.py:
|
| 10 |
+
wrong = (gt_synced != pred_synced) OR
|
| 11 |
+
(not gt_synced AND pred_direction != gt_direction)
|
| 12 |
+
|
| 13 |
+
Usage:
|
| 14 |
+
GEMINI_API_KEY=... OPENAI_API_KEY=... python3 /home/ubuntu/case_study_gemini_sync.py
|
| 15 |
+
or pass --gemini-key / --openai-key.
|
| 16 |
+
"""
|
| 17 |
+
|
| 18 |
+
import argparse
|
| 19 |
+
import json
|
| 20 |
+
import os
|
| 21 |
+
import re
|
| 22 |
+
import subprocess
|
| 23 |
+
import sys
|
| 24 |
+
from pathlib import Path
|
| 25 |
+
|
| 26 |
+
REPO = Path("/home/ubuntu/CleverHans-Evaluation")
|
| 27 |
+
TEST_JSONL = REPO / "data" / "kto_training_data_v2_test.jsonl"
|
| 28 |
+
EVAL_SCRIPT = REPO / "scripts" / "eval_gemini_mute_sync_swap.py"
|
| 29 |
+
DATA_ROOT = Path("/opt/dlami/nvme/video_source")
|
| 30 |
+
CANDIDATES = Path("/home/ubuntu/case_study_pool.jsonl") # 45-video pool
|
| 31 |
+
|
| 32 |
+
OUT_ROOT = Path("/home/ubuntu/eval_results/case_study_pool")
|
| 33 |
+
FILTERED_JSON = OUT_ROOT / "sync_test.jsonl"
|
| 34 |
+
LABEL = "case_study_pool_gemini_sync"
|
| 35 |
+
GEMINI_RESULT = OUT_ROOT / "sync" / LABEL / "eval_results.jsonl"
|
| 36 |
+
WRONG_OUT = OUT_ROOT / "gemini_wrong.jsonl"
|
| 37 |
+
|
| 38 |
+
SYNC_SUFFIX_RE = re.compile(r"_(delay|early)_\d+(?:\.\d+)?s(?=\.mp4$)")
|
| 39 |
+
|
| 40 |
+
|
| 41 |
+
def base_of(name: str) -> str:
|
| 42 |
+
return SYNC_SUFFIX_RE.sub("", name)
|
| 43 |
+
|
| 44 |
+
|
| 45 |
+
def load_jsonl(path):
|
| 46 |
+
with open(path) as f:
|
| 47 |
+
for line in f:
|
| 48 |
+
line = line.strip()
|
| 49 |
+
if line:
|
| 50 |
+
yield json.loads(line)
|
| 51 |
+
|
| 52 |
+
|
| 53 |
+
def step_filter():
|
| 54 |
+
bases = {row["video"] for row in load_jsonl(CANDIDATES)}
|
| 55 |
+
print(f"[filter] {len(bases)} base videos from {CANDIDATES}")
|
| 56 |
+
|
| 57 |
+
OUT_ROOT.mkdir(parents=True, exist_ok=True)
|
| 58 |
+
kept = []
|
| 59 |
+
for row in load_jsonl(TEST_JSONL):
|
| 60 |
+
if base_of(row["video"]) in bases:
|
| 61 |
+
kept.append(row)
|
| 62 |
+
|
| 63 |
+
with open(FILTERED_JSON, "w") as f:
|
| 64 |
+
for row in kept:
|
| 65 |
+
f.write(json.dumps(row, ensure_ascii=False) + "\n")
|
| 66 |
+
print(f"[filter] kept {len(kept)} rows -> {FILTERED_JSON}")
|
| 67 |
+
if len(kept) != 3 * len(bases):
|
| 68 |
+
print(f"[warn] expected {3*len(bases)} rows (3 per base), got {len(kept)}")
|
| 69 |
+
|
| 70 |
+
|
| 71 |
+
def step_run_gemini(gemini_key: str, openai_key: str, model: str, workers: int):
|
| 72 |
+
env = os.environ.copy()
|
| 73 |
+
env["GEMINI_API_KEY"] = gemini_key
|
| 74 |
+
env["OPENAI_API_KEY"] = openai_key
|
| 75 |
+
|
| 76 |
+
cmd = [
|
| 77 |
+
sys.executable, str(EVAL_SCRIPT),
|
| 78 |
+
"--task", "sync",
|
| 79 |
+
"--model", model,
|
| 80 |
+
"--data-root", str(DATA_ROOT),
|
| 81 |
+
"--test-jsonl", str(FILTERED_JSON),
|
| 82 |
+
"--output-dir", str(OUT_ROOT),
|
| 83 |
+
"--label", LABEL,
|
| 84 |
+
"--openai-parse-sync",
|
| 85 |
+
"--workers", str(workers),
|
| 86 |
+
]
|
| 87 |
+
print(f"[run] {' '.join(cmd)}")
|
| 88 |
+
subprocess.run(cmd, env=env, check=True)
|
| 89 |
+
|
| 90 |
+
|
| 91 |
+
def step_report():
|
| 92 |
+
if not GEMINI_RESULT.exists():
|
| 93 |
+
sys.exit(f"[error] expected results at {GEMINI_RESULT}; eval did not produce one")
|
| 94 |
+
|
| 95 |
+
rows = list(load_jsonl(GEMINI_RESULT))
|
| 96 |
+
wrong = []
|
| 97 |
+
for r in rows:
|
| 98 |
+
gt_synced = bool(r["gt_synced"])
|
| 99 |
+
pred_synced = bool(r["pred_synced"])
|
| 100 |
+
is_wrong = (gt_synced != pred_synced) or (
|
| 101 |
+
(not gt_synced) and r.get("pred_direction") != r.get("gt_direction")
|
| 102 |
+
)
|
| 103 |
+
if is_wrong:
|
| 104 |
+
wrong.append({
|
| 105 |
+
"video": r["video"],
|
| 106 |
+
"base": base_of(r["video"]),
|
| 107 |
+
"gt": (r["gt_direction"], r["gt_offset_sec"]),
|
| 108 |
+
"pred_synced": pred_synced,
|
| 109 |
+
"pred": (r.get("pred_direction"), r.get("pred_offset_sec")),
|
| 110 |
+
"raw": r.get("raw_output", "")[:200],
|
| 111 |
+
})
|
| 112 |
+
|
| 113 |
+
by_base = {}
|
| 114 |
+
for w in wrong:
|
| 115 |
+
by_base.setdefault(w["base"], []).append(w)
|
| 116 |
+
|
| 117 |
+
print(f"\n[report] {len(rows)} sync samples evaluated, {len(wrong)} wrong")
|
| 118 |
+
print(f"[report] wrong distribution by base video:")
|
| 119 |
+
for base in sorted(by_base):
|
| 120 |
+
hits = by_base[base]
|
| 121 |
+
print(f"\n {base} ({len(hits)} variant(s) wrong)")
|
| 122 |
+
for h in hits:
|
| 123 |
+
print(f" - {h['video']}")
|
| 124 |
+
print(f" gt={h['gt']} pred_synced={h['pred_synced']} pred={h['pred']}")
|
| 125 |
+
|
| 126 |
+
with open(WRONG_OUT, "w") as f:
|
| 127 |
+
for w in wrong:
|
| 128 |
+
f.write(json.dumps(w, ensure_ascii=False) + "\n")
|
| 129 |
+
print(f"\n[saved] wrong rows -> {WRONG_OUT}")
|
| 130 |
+
|
| 131 |
+
bases_total = {base_of(r["video"]) for r in rows}
|
| 132 |
+
bases_any_wrong = set(by_base)
|
| 133 |
+
bases_all_three_wrong = {b for b, hs in by_base.items()
|
| 134 |
+
if len(hs) == 3}
|
| 135 |
+
print(f"\n[summary]")
|
| 136 |
+
print(f" candidates with >=1 wrong sync : {len(bases_any_wrong)} / {len(bases_total)}")
|
| 137 |
+
print(f" candidates with all 3 wrong : {len(bases_all_three_wrong)} / {len(bases_total)}")
|
| 138 |
+
if bases_all_three_wrong:
|
| 139 |
+
print(f" perfect-fail candidates:")
|
| 140 |
+
for b in sorted(bases_all_three_wrong):
|
| 141 |
+
print(f" - {b}")
|
| 142 |
+
|
| 143 |
+
|
| 144 |
+
def main():
|
| 145 |
+
p = argparse.ArgumentParser()
|
| 146 |
+
p.add_argument("--gemini-key", default=os.environ.get("GEMINI_API_KEY"))
|
| 147 |
+
p.add_argument("--openai-key", default=os.environ.get("OPENAI_API_KEY"))
|
| 148 |
+
p.add_argument("--model", default="gemini-3.1-pro-preview")
|
| 149 |
+
p.add_argument("--workers", type=int, default=4)
|
| 150 |
+
p.add_argument("--skip-run", action="store_true",
|
| 151 |
+
help="Skip the Gemini call (use existing results to re-report).")
|
| 152 |
+
args = p.parse_args()
|
| 153 |
+
|
| 154 |
+
step_filter()
|
| 155 |
+
if not args.skip_run:
|
| 156 |
+
if not args.gemini_key:
|
| 157 |
+
sys.exit("[error] need --gemini-key or GEMINI_API_KEY env")
|
| 158 |
+
if not args.openai_key:
|
| 159 |
+
sys.exit("[error] need --openai-key or OPENAI_API_KEY env")
|
| 160 |
+
step_run_gemini(args.gemini_key, args.openai_key, args.model, args.workers)
|
| 161 |
+
step_report()
|
| 162 |
+
|
| 163 |
+
|
| 164 |
+
if __name__ == "__main__":
|
| 165 |
+
main()
|
case_study_gpt55.py
ADDED
|
@@ -0,0 +1,582 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Evaluate GPT-5.5 on the FULL test set: sync (426), mute neutral (142), swap direct (142).
|
| 2 |
+
|
| 3 |
+
GPT-5.5 chat.completions does NOT accept input_audio (audio is gated to
|
| 4 |
+
gpt-4o-audio-preview / realtime). We therefore send VIDEO FRAMES ONLY (8 frames
|
| 5 |
+
sampled evenly per clip) + the appropriate text prompt. This is a documented
|
| 6 |
+
handicap — the resulting numbers reflect what GPT-5.5 can do without hearing
|
| 7 |
+
the audio.
|
| 8 |
+
|
| 9 |
+
Audio is intentionally NOT extracted; for sync the model has no temporal audio
|
| 10 |
+
cue, for mute the audio is silent anyway, for swap the model can't hear the
|
| 11 |
+
donor track.
|
| 12 |
+
|
| 13 |
+
Outputs (label includes _visualOnly to flag the handicap):
|
| 14 |
+
~/eval_results/sync/sync_gpt-5.5_visualOnly/
|
| 15 |
+
~/eval_results/mute/mute_gpt-5.5_visualOnly_promptNeutral/
|
| 16 |
+
~/eval_results/swap/swap_gpt-5.5_visualOnly_promptDirect/
|
| 17 |
+
|
| 18 |
+
Swap donor pairing reuses the assignments from the Gemini swap eval
|
| 19 |
+
(same seed=42, so identical to Qwen swap pairing too).
|
| 20 |
+
|
| 21 |
+
Usage:
|
| 22 |
+
OPENAI_API_KEY=sk-... python3 /home/ubuntu/case_study_gpt55.py
|
| 23 |
+
"""
|
| 24 |
+
|
| 25 |
+
import argparse
|
| 26 |
+
import base64
|
| 27 |
+
import json
|
| 28 |
+
import os
|
| 29 |
+
import re
|
| 30 |
+
import subprocess
|
| 31 |
+
import sys
|
| 32 |
+
import tempfile
|
| 33 |
+
from concurrent.futures import ThreadPoolExecutor, as_completed
|
| 34 |
+
from pathlib import Path
|
| 35 |
+
from typing import Any, Dict, List, Optional
|
| 36 |
+
|
| 37 |
+
# ---------------------------------------------------------------------------
|
| 38 |
+
# Paths
|
| 39 |
+
# ---------------------------------------------------------------------------
|
| 40 |
+
DATA_ROOT = Path("/opt/dlami/nvme/video_source")
|
| 41 |
+
ORIG_DIR = DATA_ROOT / "original" / "uag_oops"
|
| 42 |
+
DELAY_DIR = DATA_ROOT / "random_shift_video" / "delay"
|
| 43 |
+
EARLY_DIR = DATA_ROOT / "random_shift_video" / "early"
|
| 44 |
+
|
| 45 |
+
TEST_JSONL = Path("/home/ubuntu/CleverHans-Evaluation/data/kto_training_data_v2_test.jsonl")
|
| 46 |
+
GEMINI_SWAP = Path("/home/ubuntu/eval_results/gemini_mute_sync_swap/swap/"
|
| 47 |
+
"swap_gemini_gemini-3.1-pro-preview__promptDirect/eval_results.jsonl")
|
| 48 |
+
|
| 49 |
+
EVAL_ROOT = Path("/home/ubuntu/eval_results")
|
| 50 |
+
FFMPEG = "/home/ubuntu/anaconda3/envs/video/bin/ffmpeg"
|
| 51 |
+
FFPROBE = "/home/ubuntu/anaconda3/envs/video/bin/ffprobe"
|
| 52 |
+
|
| 53 |
+
DIR_OFFSET_RE = re.compile(r"_(delay|early)_(\d+(?:\.\d+)?)s\.mp4$")
|
| 54 |
+
|
| 55 |
+
# ---------------------------------------------------------------------------
|
| 56 |
+
# Prompts (mirror eval_gemini_mute_sync_swap.py)
|
| 57 |
+
# ---------------------------------------------------------------------------
|
| 58 |
+
SYNC_PROMPT = (
|
| 59 |
+
"Watch this video and listen to its audio carefully. "
|
| 60 |
+
"Determine whether the audio and video tracks are synchronized. "
|
| 61 |
+
"If they are not synchronized, identify the direction of the offset "
|
| 62 |
+
"(audio delayed or audio early relative to video) and estimate the offset in seconds. "
|
| 63 |
+
"Explain your reasoning."
|
| 64 |
+
)
|
| 65 |
+
MUTE_PROMPT_NEUTRAL = "Describe the audio you hear in this video."
|
| 66 |
+
SWAP_PROMPT_DIRECT = (
|
| 67 |
+
"Does the audio in this video match the visual content? "
|
| 68 |
+
"Answer with only 'yes' or 'no'."
|
| 69 |
+
)
|
| 70 |
+
|
| 71 |
+
SYNC_PARSE_SYSTEM = (
|
| 72 |
+
"You parse a free-form judgement about whether the audio in a short video is "
|
| 73 |
+
"synchronized with the visuals. Return STRICT JSON only with keys: "
|
| 74 |
+
"synced (bool), direction (one of 'delay'|'early'|'none'), "
|
| 75 |
+
"offset_sec (number, 0 if synced), t_v (number|null, visual cue time), "
|
| 76 |
+
"t_a (number|null, audio cue time), explanation (string). "
|
| 77 |
+
"If the answer says they are aligned, set synced=true, direction='none', offset_sec=0."
|
| 78 |
+
)
|
| 79 |
+
MUTE_JUDGE_SYSTEM = (
|
| 80 |
+
"You are evaluating a model's response about a video whose audio has been secretly "
|
| 81 |
+
"replaced with silence. Classify into ONE of: "
|
| 82 |
+
'{"prediction": "muted"|"synced"|"delay"|"early", "explanation": "<one sentence>"}. '
|
| 83 |
+
"Rules: "
|
| 84 |
+
"- 'muted' iff the model says no audio / silence / no audible sound. "
|
| 85 |
+
"- 'synced' iff the model describes audio that matches the visuals (HALLUCINATION). "
|
| 86 |
+
"- 'delay'/'early' iff the model claims a temporal offset. "
|
| 87 |
+
"Return STRICT JSON only."
|
| 88 |
+
)
|
| 89 |
+
|
| 90 |
+
|
| 91 |
+
# ---------------------------------------------------------------------------
|
| 92 |
+
# Small utilities
|
| 93 |
+
# ---------------------------------------------------------------------------
|
| 94 |
+
def load_jsonl(path):
|
| 95 |
+
with open(path) as f:
|
| 96 |
+
for line in f:
|
| 97 |
+
line = line.strip()
|
| 98 |
+
if line:
|
| 99 |
+
yield json.loads(line)
|
| 100 |
+
|
| 101 |
+
|
| 102 |
+
def parse_gt_from_name(video_name: str) -> Dict[str, Any]:
|
| 103 |
+
m = DIR_OFFSET_RE.search(video_name)
|
| 104 |
+
if m:
|
| 105 |
+
return {"synced": False, "direction": m.group(1), "offset_sec": float(m.group(2))}
|
| 106 |
+
return {"synced": True, "direction": "none", "offset_sec": 0.0}
|
| 107 |
+
|
| 108 |
+
|
| 109 |
+
def video_path_for(video_name: str) -> Optional[Path]:
|
| 110 |
+
"""Resolve a sync test name to its on-disk mp4 (synced original / delay / early)."""
|
| 111 |
+
m = DIR_OFFSET_RE.search(video_name)
|
| 112 |
+
if m is None:
|
| 113 |
+
p = ORIG_DIR / video_name
|
| 114 |
+
elif m.group(1) == "delay":
|
| 115 |
+
p = DELAY_DIR / video_name
|
| 116 |
+
else:
|
| 117 |
+
p = EARLY_DIR / video_name
|
| 118 |
+
return p if p.exists() else None
|
| 119 |
+
|
| 120 |
+
|
| 121 |
+
def video_duration(path: Path) -> float:
|
| 122 |
+
out = subprocess.run(
|
| 123 |
+
[FFPROBE, "-v", "error", "-show_entries", "format=duration",
|
| 124 |
+
"-of", "default=noprint_wrappers=1:nokey=1", str(path)],
|
| 125 |
+
capture_output=True, text=True, check=True,
|
| 126 |
+
)
|
| 127 |
+
try:
|
| 128 |
+
return float(out.stdout.strip())
|
| 129 |
+
except ValueError:
|
| 130 |
+
return 5.0
|
| 131 |
+
|
| 132 |
+
|
| 133 |
+
def extract_frames_b64(video_path: Path, n_frames: int = 8) -> List[str]:
|
| 134 |
+
dur = video_duration(video_path)
|
| 135 |
+
if dur <= 0.05:
|
| 136 |
+
dur = 0.5
|
| 137 |
+
timestamps = [dur * (i + 0.5) / n_frames for i in range(n_frames)]
|
| 138 |
+
out = []
|
| 139 |
+
with tempfile.TemporaryDirectory() as td:
|
| 140 |
+
for i, t in enumerate(timestamps):
|
| 141 |
+
png = Path(td) / f"f_{i:02d}.png"
|
| 142 |
+
subprocess.run(
|
| 143 |
+
[FFMPEG, "-y", "-ss", f"{t:.3f}", "-i", str(video_path),
|
| 144 |
+
"-frames:v", "1", "-vf", "scale=512:-2",
|
| 145 |
+
"-loglevel", "error", str(png)],
|
| 146 |
+
check=True,
|
| 147 |
+
)
|
| 148 |
+
out.append(base64.b64encode(png.read_bytes()).decode())
|
| 149 |
+
return out
|
| 150 |
+
|
| 151 |
+
|
| 152 |
+
# ---------------------------------------------------------------------------
|
| 153 |
+
# OpenAI calls (frames-only multimodal: text + images)
|
| 154 |
+
# ---------------------------------------------------------------------------
|
| 155 |
+
def _client(api_key: str):
|
| 156 |
+
from openai import OpenAI
|
| 157 |
+
return OpenAI(api_key=api_key)
|
| 158 |
+
|
| 159 |
+
|
| 160 |
+
def call_gpt_frames(client, model, prompt, frames_b64,
|
| 161 |
+
max_tokens=4000, temperature=0.0,
|
| 162 |
+
reasoning_effort="minimal", _diag=True) -> str:
|
| 163 |
+
"""Send text + frames to a (possibly-reasoning) GPT model.
|
| 164 |
+
|
| 165 |
+
GPT-5 family is a reasoning model: max_completion_tokens covers BOTH the
|
| 166 |
+
hidden reasoning trace AND the visible content. If the token budget is too
|
| 167 |
+
low, content can come back empty. We:
|
| 168 |
+
- bump max_completion_tokens to 4000
|
| 169 |
+
- pass reasoning_effort='minimal' to keep most of the budget for content
|
| 170 |
+
- retry once on temperature-rejection
|
| 171 |
+
- if content is still empty, fall through with a diagnostic-friendly log
|
| 172 |
+
"""
|
| 173 |
+
content: List[Dict[str, Any]] = [{"type": "text", "text": prompt}]
|
| 174 |
+
for fb in frames_b64:
|
| 175 |
+
content.append({"type": "image_url",
|
| 176 |
+
"image_url": {"url": f"data:image/png;base64,{fb}"}})
|
| 177 |
+
|
| 178 |
+
base = dict(
|
| 179 |
+
model=model,
|
| 180 |
+
messages=[{"role": "user", "content": content}],
|
| 181 |
+
max_completion_tokens=max_tokens,
|
| 182 |
+
)
|
| 183 |
+
|
| 184 |
+
def _try(extra):
|
| 185 |
+
return client.chat.completions.create(**base, **extra)
|
| 186 |
+
|
| 187 |
+
resp = None
|
| 188 |
+
for kwargs in (
|
| 189 |
+
{"temperature": temperature, "reasoning_effort": reasoning_effort},
|
| 190 |
+
{"reasoning_effort": reasoning_effort}, # drop temp
|
| 191 |
+
{"temperature": temperature}, # drop reasoning_effort
|
| 192 |
+
{}, # both stripped
|
| 193 |
+
):
|
| 194 |
+
try:
|
| 195 |
+
resp = _try(kwargs)
|
| 196 |
+
break
|
| 197 |
+
except Exception as exc:
|
| 198 |
+
last_err = exc
|
| 199 |
+
continue
|
| 200 |
+
if resp is None:
|
| 201 |
+
raise last_err
|
| 202 |
+
|
| 203 |
+
msg = resp.choices[0].message
|
| 204 |
+
text = (msg.content or "").strip()
|
| 205 |
+
if not text:
|
| 206 |
+
# Diagnostic: surface finish_reason / refusal / usage so we can see why.
|
| 207 |
+
fin = resp.choices[0].finish_reason
|
| 208 |
+
refusal = getattr(msg, "refusal", None)
|
| 209 |
+
usage = getattr(resp, "usage", None)
|
| 210 |
+
if _diag:
|
| 211 |
+
print(f" [gpt empty] finish={fin} refusal={refusal} usage={usage}",
|
| 212 |
+
flush=True)
|
| 213 |
+
return text
|
| 214 |
+
|
| 215 |
+
|
| 216 |
+
def call_judge(client, judge_model, system_prompt, user_text) -> Optional[Dict[str, Any]]:
|
| 217 |
+
try:
|
| 218 |
+
resp = client.chat.completions.create(
|
| 219 |
+
model=judge_model,
|
| 220 |
+
messages=[
|
| 221 |
+
{"role": "system", "content": system_prompt},
|
| 222 |
+
{"role": "user", "content": user_text},
|
| 223 |
+
],
|
| 224 |
+
response_format={"type": "json_object"},
|
| 225 |
+
max_completion_tokens=300,
|
| 226 |
+
)
|
| 227 |
+
return json.loads(resp.choices[0].message.content or "")
|
| 228 |
+
except Exception as exc:
|
| 229 |
+
print(f" [judge] error: {exc}", flush=True)
|
| 230 |
+
return None
|
| 231 |
+
|
| 232 |
+
|
| 233 |
+
# ---------------------------------------------------------------------------
|
| 234 |
+
# Per-task runners (frames-only)
|
| 235 |
+
# ---------------------------------------------------------------------------
|
| 236 |
+
def run_one_sync(client, model, judge_model, item, n_frames):
|
| 237 |
+
raw = call_gpt_frames(
|
| 238 |
+
client, model, SYNC_PROMPT,
|
| 239 |
+
extract_frames_b64(item["video_path"], n_frames),
|
| 240 |
+
)
|
| 241 |
+
parsed = call_judge(client, judge_model, SYNC_PARSE_SYSTEM, raw) or {}
|
| 242 |
+
direction = str(parsed.get("direction", "none")).lower()
|
| 243 |
+
if direction not in ("delay", "early", "none"):
|
| 244 |
+
direction = "none"
|
| 245 |
+
synced = parsed.get("synced", False)
|
| 246 |
+
if isinstance(synced, str):
|
| 247 |
+
synced = synced.lower() in ("true", "yes", "1")
|
| 248 |
+
return {
|
| 249 |
+
"video": item["video"],
|
| 250 |
+
"video_path": str(item["video_path"]),
|
| 251 |
+
"gt_synced": item["gt_synced"],
|
| 252 |
+
"gt_direction": item["gt_direction"],
|
| 253 |
+
"gt_offset_sec": item["gt_offset_sec"],
|
| 254 |
+
"pred_synced": bool(synced),
|
| 255 |
+
"pred_direction": direction,
|
| 256 |
+
"pred_offset_sec": float(parsed.get("offset_sec", 0.0) or 0.0),
|
| 257 |
+
"pred_explanation": str(parsed.get("explanation", "")),
|
| 258 |
+
"parse_method": "openai_parse" if parsed else "openai_parse_fail",
|
| 259 |
+
"raw_output": raw,
|
| 260 |
+
}
|
| 261 |
+
|
| 262 |
+
|
| 263 |
+
def run_one_mute(client, model, judge_model, item, n_frames):
|
| 264 |
+
raw = call_gpt_frames(
|
| 265 |
+
client, model, MUTE_PROMPT_NEUTRAL,
|
| 266 |
+
extract_frames_b64(item["video_path"], n_frames),
|
| 267 |
+
)
|
| 268 |
+
parsed = call_judge(client, judge_model, MUTE_JUDGE_SYSTEM, raw) or {}
|
| 269 |
+
pred = str(parsed.get("prediction", "synced")).lower()
|
| 270 |
+
if pred not in ("muted", "synced", "delay", "early"):
|
| 271 |
+
pred = "synced"
|
| 272 |
+
return {
|
| 273 |
+
"video": item["video"],
|
| 274 |
+
"video_path": str(item["video_path"]),
|
| 275 |
+
"gt_label": "muted",
|
| 276 |
+
"pred_label": pred,
|
| 277 |
+
"correct": (pred == "muted"),
|
| 278 |
+
"judge_explanation": str(parsed.get("explanation", "")),
|
| 279 |
+
"parse_method": "openai_judge" if parsed else "judge_fail",
|
| 280 |
+
"prompt_mode": "neutral",
|
| 281 |
+
"raw_output": raw,
|
| 282 |
+
}
|
| 283 |
+
|
| 284 |
+
|
| 285 |
+
def run_one_swap(client, model, judge_model, item, n_frames):
|
| 286 |
+
raw = call_gpt_frames(
|
| 287 |
+
client, model, SWAP_PROMPT_DIRECT,
|
| 288 |
+
extract_frames_b64(item["video_path"], n_frames),
|
| 289 |
+
# Reasoning model needs headroom for the hidden trace even on yes/no Qs.
|
| 290 |
+
)
|
| 291 |
+
t = raw.strip().lower()
|
| 292 |
+
if t.startswith("no") or "no" in t.split()[:3]:
|
| 293 |
+
pred = "mismatched"
|
| 294 |
+
elif t.startswith("yes") or "yes" in t.split()[:3]:
|
| 295 |
+
pred = "synced"
|
| 296 |
+
else:
|
| 297 |
+
pred = "synced"
|
| 298 |
+
return {
|
| 299 |
+
"video": item["video"],
|
| 300 |
+
"video_path": str(item["video_path"]),
|
| 301 |
+
"swapped_from": item["swapped_from"],
|
| 302 |
+
"gt_label": "mismatched",
|
| 303 |
+
"pred_label": pred,
|
| 304 |
+
"correct": (pred == "mismatched"),
|
| 305 |
+
"parse_method": "direct",
|
| 306 |
+
"prompt_mode": "direct",
|
| 307 |
+
"raw_output": raw,
|
| 308 |
+
}
|
| 309 |
+
|
| 310 |
+
|
| 311 |
+
# ---------------------------------------------------------------------------
|
| 312 |
+
# Build work items from the FULL test set
|
| 313 |
+
# ---------------------------------------------------------------------------
|
| 314 |
+
def build_sync_work() -> List[Dict[str, Any]]:
|
| 315 |
+
"""All 426 sync samples (142 base × 3 variants)."""
|
| 316 |
+
work = []
|
| 317 |
+
for r in load_jsonl(TEST_JSONL):
|
| 318 |
+
v = r["video"]
|
| 319 |
+
p = video_path_for(v)
|
| 320 |
+
if p is None:
|
| 321 |
+
print(f"[skip sync] missing file: {v}", flush=True)
|
| 322 |
+
continue
|
| 323 |
+
gt = parse_gt_from_name(v)
|
| 324 |
+
work.append({
|
| 325 |
+
"video": v,
|
| 326 |
+
"video_path": p,
|
| 327 |
+
"gt_synced": gt["synced"],
|
| 328 |
+
"gt_direction": gt["direction"],
|
| 329 |
+
"gt_offset_sec": gt["offset_sec"],
|
| 330 |
+
})
|
| 331 |
+
return work
|
| 332 |
+
|
| 333 |
+
|
| 334 |
+
def build_mute_work() -> List[Dict[str, Any]]:
|
| 335 |
+
"""142 base videos (skip delay/early variants); audio is implicit silence."""
|
| 336 |
+
work = []
|
| 337 |
+
seen = set()
|
| 338 |
+
for r in load_jsonl(TEST_JSONL):
|
| 339 |
+
v = r["video"]
|
| 340 |
+
if "_delay_" in v or "_early_" in v or v in seen:
|
| 341 |
+
continue
|
| 342 |
+
seen.add(v)
|
| 343 |
+
p = ORIG_DIR / v
|
| 344 |
+
if not p.exists():
|
| 345 |
+
print(f"[skip mute] missing: {v}", flush=True)
|
| 346 |
+
continue
|
| 347 |
+
work.append({"video": v, "video_path": p})
|
| 348 |
+
return work
|
| 349 |
+
|
| 350 |
+
|
| 351 |
+
def build_swap_work() -> List[Dict[str, Any]]:
|
| 352 |
+
"""142 base videos with the same donor pairing used in Gemini swap eval."""
|
| 353 |
+
if not GEMINI_SWAP.exists():
|
| 354 |
+
sys.exit(f"[error] need {GEMINI_SWAP} for swap donor pairings")
|
| 355 |
+
work = []
|
| 356 |
+
for r in load_jsonl(GEMINI_SWAP):
|
| 357 |
+
v = r["video"]
|
| 358 |
+
donor = r.get("swapped_from")
|
| 359 |
+
if not donor:
|
| 360 |
+
continue
|
| 361 |
+
p = ORIG_DIR / v
|
| 362 |
+
if not p.exists():
|
| 363 |
+
print(f"[skip swap] missing: {v}", flush=True)
|
| 364 |
+
continue
|
| 365 |
+
work.append({"video": v, "video_path": p, "swapped_from": donor})
|
| 366 |
+
return work
|
| 367 |
+
|
| 368 |
+
|
| 369 |
+
# ---------------------------------------------------------------------------
|
| 370 |
+
# Metrics (match existing eval_*.py schemas so results are directly comparable)
|
| 371 |
+
# ---------------------------------------------------------------------------
|
| 372 |
+
def _safe_div(a, b):
|
| 373 |
+
return round(a / b, 4) if b else 0.0
|
| 374 |
+
|
| 375 |
+
|
| 376 |
+
def metrics_mute(rows, model, judge_model):
|
| 377 |
+
breakdown = {"muted": 0, "synced": 0, "delay": 0, "early": 0}
|
| 378 |
+
parse_stats: Dict[str, int] = {}
|
| 379 |
+
for r in rows:
|
| 380 |
+
breakdown[r["pred_label"]] = breakdown.get(r["pred_label"], 0) + 1
|
| 381 |
+
m = r.get("parse_method", "")
|
| 382 |
+
parse_stats[m] = parse_stats.get(m, 0) + 1
|
| 383 |
+
n = len(rows)
|
| 384 |
+
return {
|
| 385 |
+
"total_samples": n,
|
| 386 |
+
"mute_detection_rate": _safe_div(breakdown["muted"], n),
|
| 387 |
+
"hallucination_rate": _safe_div(n - breakdown["muted"], n),
|
| 388 |
+
"prediction_breakdown": breakdown,
|
| 389 |
+
"parse_stats": parse_stats,
|
| 390 |
+
"eval_config": {
|
| 391 |
+
"base_model": model,
|
| 392 |
+
"prompt_mode": "neutral",
|
| 393 |
+
"openai_judge": True,
|
| 394 |
+
"judge_model": judge_model,
|
| 395 |
+
"input_modality": "frames_only",
|
| 396 |
+
},
|
| 397 |
+
}
|
| 398 |
+
|
| 399 |
+
|
| 400 |
+
def metrics_swap(rows, model, judge_model):
|
| 401 |
+
breakdown = {"mismatched": 0, "synced": 0, "delay": 0, "early": 0}
|
| 402 |
+
parse_stats: Dict[str, int] = {}
|
| 403 |
+
for r in rows:
|
| 404 |
+
breakdown[r["pred_label"]] = breakdown.get(r["pred_label"], 0) + 1
|
| 405 |
+
m = r.get("parse_method", "")
|
| 406 |
+
parse_stats[m] = parse_stats.get(m, 0) + 1
|
| 407 |
+
n = len(rows)
|
| 408 |
+
return {
|
| 409 |
+
"total_samples": n,
|
| 410 |
+
"mismatch_detection_rate": _safe_div(breakdown["mismatched"], n),
|
| 411 |
+
"hallucination_rate": _safe_div(n - breakdown["mismatched"], n),
|
| 412 |
+
"prediction_breakdown": breakdown,
|
| 413 |
+
"parse_stats": parse_stats,
|
| 414 |
+
"eval_config": {
|
| 415 |
+
"base_model": model,
|
| 416 |
+
"prompt_mode": "direct",
|
| 417 |
+
"judge_model": judge_model, # not used for direct, kept for parity
|
| 418 |
+
"input_modality": "frames_only",
|
| 419 |
+
},
|
| 420 |
+
}
|
| 421 |
+
|
| 422 |
+
|
| 423 |
+
def metrics_sync(rows, model, judge_model):
|
| 424 |
+
n = len(rows)
|
| 425 |
+
by_cat = {"synced": [], "delay": [], "early": []}
|
| 426 |
+
for r in rows:
|
| 427 |
+
if r["gt_synced"]:
|
| 428 |
+
by_cat["synced"].append(r)
|
| 429 |
+
else:
|
| 430 |
+
by_cat[r["gt_direction"]].append(r)
|
| 431 |
+
|
| 432 |
+
def _is_correct(r):
|
| 433 |
+
if r["gt_synced"]:
|
| 434 |
+
return r["pred_synced"]
|
| 435 |
+
return (not r["pred_synced"]) and r["pred_direction"] == r["gt_direction"]
|
| 436 |
+
|
| 437 |
+
sync_desync = sum(1 for r in rows if bool(r["pred_synced"]) == bool(r["gt_synced"]))
|
| 438 |
+
three_class = sum(1 for r in rows if _is_correct(r))
|
| 439 |
+
desync_rows = [r for r in rows if not r["gt_synced"]]
|
| 440 |
+
dir_correct = sum(1 for r in desync_rows
|
| 441 |
+
if (not r["pred_synced"]) and r["pred_direction"] == r["gt_direction"])
|
| 442 |
+
|
| 443 |
+
# Offset MAE on rows where both gt and pred have a numeric offset for desync
|
| 444 |
+
offsets = []
|
| 445 |
+
for r in desync_rows:
|
| 446 |
+
if r.get("pred_offset_sec") is not None and r.get("gt_offset_sec") is not None:
|
| 447 |
+
offsets.append(abs(float(r["pred_offset_sec"]) - float(r["gt_offset_sec"])))
|
| 448 |
+
offset_mae = round(sum(offsets) / len(offsets), 4) if offsets else None
|
| 449 |
+
|
| 450 |
+
parse_stats: Dict[str, int] = {}
|
| 451 |
+
for r in rows:
|
| 452 |
+
m = r.get("parse_method", "")
|
| 453 |
+
parse_stats[m] = parse_stats.get(m, 0) + 1
|
| 454 |
+
|
| 455 |
+
return {
|
| 456 |
+
"total_samples": n,
|
| 457 |
+
"sync_desync_accuracy": _safe_div(sync_desync, n),
|
| 458 |
+
"three_class_accuracy": _safe_div(three_class, n),
|
| 459 |
+
"direction_accuracy_on_desync": _safe_div(dir_correct, len(desync_rows)),
|
| 460 |
+
"per_category": {
|
| 461 |
+
"synced_accuracy": _safe_div(sum(1 for r in by_cat["synced"] if _is_correct(r)),
|
| 462 |
+
len(by_cat["synced"])),
|
| 463 |
+
"delay_accuracy": _safe_div(sum(1 for r in by_cat["delay"] if _is_correct(r)),
|
| 464 |
+
len(by_cat["delay"])),
|
| 465 |
+
"early_accuracy": _safe_div(sum(1 for r in by_cat["early"] if _is_correct(r)),
|
| 466 |
+
len(by_cat["early"])),
|
| 467 |
+
"synced_count": len(by_cat["synced"]),
|
| 468 |
+
"delay_count": len(by_cat["delay"]),
|
| 469 |
+
"early_count": len(by_cat["early"]),
|
| 470 |
+
},
|
| 471 |
+
"offset_mae_sec": offset_mae,
|
| 472 |
+
"offset_evaluated_count": len(offsets),
|
| 473 |
+
"parse_stats": parse_stats,
|
| 474 |
+
"eval_config": {
|
| 475 |
+
"base_model": model,
|
| 476 |
+
"openai_parse_sync": True,
|
| 477 |
+
"judge_model": judge_model,
|
| 478 |
+
"input_modality": "frames_only",
|
| 479 |
+
},
|
| 480 |
+
}
|
| 481 |
+
|
| 482 |
+
|
| 483 |
+
METRICS_FN = {
|
| 484 |
+
"sync": metrics_sync,
|
| 485 |
+
"mute": metrics_mute,
|
| 486 |
+
"swap": metrics_swap,
|
| 487 |
+
}
|
| 488 |
+
|
| 489 |
+
|
| 490 |
+
# ---------------------------------------------------------------------------
|
| 491 |
+
# Run loop with resume + parallel
|
| 492 |
+
# ---------------------------------------------------------------------------
|
| 493 |
+
def run_task(out_dir: Path, work, runner, client, model, judge_model, n_frames, workers,
|
| 494 |
+
task_kind: str):
|
| 495 |
+
out_dir.mkdir(parents=True, exist_ok=True)
|
| 496 |
+
results_path = out_dir / "eval_results.jsonl"
|
| 497 |
+
|
| 498 |
+
processed = set()
|
| 499 |
+
if results_path.exists():
|
| 500 |
+
with open(results_path) as f:
|
| 501 |
+
for line in f:
|
| 502 |
+
line = line.strip()
|
| 503 |
+
if line:
|
| 504 |
+
processed.add(json.loads(line)["video"])
|
| 505 |
+
print(f"[{out_dir.name}] resume: {len(processed)} already done")
|
| 506 |
+
|
| 507 |
+
todo = [w for w in work if w["video"] not in processed]
|
| 508 |
+
print(f"[{out_dir.name}] {len(todo)} new / {len(work)} total")
|
| 509 |
+
|
| 510 |
+
def _go(item):
|
| 511 |
+
try:
|
| 512 |
+
return runner(client, model, judge_model, item, n_frames)
|
| 513 |
+
except Exception as exc:
|
| 514 |
+
print(f"[{out_dir.name}] error on {item['video']}: {exc}", flush=True)
|
| 515 |
+
return None
|
| 516 |
+
|
| 517 |
+
n_done = 0
|
| 518 |
+
with ThreadPoolExecutor(max_workers=workers) as ex, open(results_path, "a") as out:
|
| 519 |
+
futures = {ex.submit(_go, item): item for item in todo}
|
| 520 |
+
for fut in as_completed(futures):
|
| 521 |
+
res = fut.result()
|
| 522 |
+
if res is None:
|
| 523 |
+
continue
|
| 524 |
+
out.write(json.dumps(res, ensure_ascii=False) + "\n")
|
| 525 |
+
out.flush()
|
| 526 |
+
n_done += 1
|
| 527 |
+
if n_done % 10 == 0 or n_done == len(todo):
|
| 528 |
+
print(f"[{out_dir.name}] {n_done}/{len(todo)} done", flush=True)
|
| 529 |
+
print(f"[{out_dir.name}] saved -> {results_path}")
|
| 530 |
+
|
| 531 |
+
# Compute and write metrics.json (matches existing eval_*.py schemas).
|
| 532 |
+
rows = [json.loads(l) for l in open(results_path) if l.strip()]
|
| 533 |
+
metrics = METRICS_FN[task_kind](rows, model, judge_model)
|
| 534 |
+
metrics_path = out_dir / "metrics.json"
|
| 535 |
+
with open(metrics_path, "w") as f:
|
| 536 |
+
json.dump(metrics, f, indent=2, ensure_ascii=False)
|
| 537 |
+
print(f"[{out_dir.name}] metrics -> {metrics_path}")
|
| 538 |
+
return results_path
|
| 539 |
+
|
| 540 |
+
|
| 541 |
+
# ---------------------------------------------------------------------------
|
| 542 |
+
# Main
|
| 543 |
+
# ---------------------------------------------------------------------------
|
| 544 |
+
def main():
|
| 545 |
+
ap = argparse.ArgumentParser()
|
| 546 |
+
ap.add_argument("--openai-key", default=os.environ.get("OPENAI_API_KEY"))
|
| 547 |
+
ap.add_argument("--model", default="gpt-5.5",
|
| 548 |
+
help="Model under test (frames-only multimodal).")
|
| 549 |
+
ap.add_argument("--judge-model", default="gpt-5.4")
|
| 550 |
+
ap.add_argument("--tasks", default="sync,mute,swap")
|
| 551 |
+
ap.add_argument("--workers", type=int, default=6)
|
| 552 |
+
ap.add_argument("--n-frames", type=int, default=8)
|
| 553 |
+
ap.add_argument("--label-suffix", default="_visualOnly",
|
| 554 |
+
help="Tagged into output dir name to flag the no-audio handicap.")
|
| 555 |
+
args = ap.parse_args()
|
| 556 |
+
|
| 557 |
+
if not args.openai_key:
|
| 558 |
+
sys.exit("[error] need --openai-key or OPENAI_API_KEY env")
|
| 559 |
+
|
| 560 |
+
client = _client(args.openai_key)
|
| 561 |
+
model_tag = args.model.replace("/", "_") + args.label_suffix
|
| 562 |
+
|
| 563 |
+
tasks = [t.strip() for t in args.tasks.split(",") if t.strip()]
|
| 564 |
+
if "sync" in tasks:
|
| 565 |
+
run_task(EVAL_ROOT / "sync" / f"sync_{model_tag}",
|
| 566 |
+
build_sync_work(), run_one_sync,
|
| 567 |
+
client, args.model, args.judge_model, args.n_frames, args.workers,
|
| 568 |
+
task_kind="sync")
|
| 569 |
+
if "mute" in tasks:
|
| 570 |
+
run_task(EVAL_ROOT / "mute" / f"mute_{model_tag}_promptNeutral",
|
| 571 |
+
build_mute_work(), run_one_mute,
|
| 572 |
+
client, args.model, args.judge_model, args.n_frames, args.workers,
|
| 573 |
+
task_kind="mute")
|
| 574 |
+
if "swap" in tasks:
|
| 575 |
+
run_task(EVAL_ROOT / "swap" / f"swap_{model_tag}_promptDirect",
|
| 576 |
+
build_swap_work(), run_one_swap,
|
| 577 |
+
client, args.model, args.judge_model, args.n_frames, args.workers,
|
| 578 |
+
task_kind="swap")
|
| 579 |
+
|
| 580 |
+
|
| 581 |
+
if __name__ == "__main__":
|
| 582 |
+
main()
|
compute_nemotron_or_metrics.py
ADDED
|
@@ -0,0 +1,251 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Compute metrics.json + summary.txt for the OpenRouter Nemotron eval runs.
|
| 2 |
+
|
| 3 |
+
Mirrors the metric definitions from the in-repo eval_*.py scripts so numbers
|
| 4 |
+
are head-to-head comparable with the Qwen / Gemini / GPT-5.5 runs.
|
| 5 |
+
|
| 6 |
+
Usage:
|
| 7 |
+
python3 /home/ubuntu/compute_nemotron_or_metrics.py
|
| 8 |
+
"""
|
| 9 |
+
import io
|
| 10 |
+
import json
|
| 11 |
+
from contextlib import redirect_stdout
|
| 12 |
+
from pathlib import Path
|
| 13 |
+
from statistics import mean, median
|
| 14 |
+
from typing import Any, Dict, List
|
| 15 |
+
|
| 16 |
+
EVAL = Path("/home/ubuntu/eval_results")
|
| 17 |
+
|
| 18 |
+
import sys as _sys
|
| 19 |
+
TAG = _sys.argv[1] if len(_sys.argv) > 1 else "nemotron3_nano_omni_reasoning_free_audioMuxed"
|
| 20 |
+
print(f"[compute] TAG={TAG}")
|
| 21 |
+
|
| 22 |
+
DIRS = {
|
| 23 |
+
"sync": EVAL / "sync" / f"sync_{TAG}",
|
| 24 |
+
"swap_direct": EVAL / "swap" / f"swap_{TAG}_promptDirect",
|
| 25 |
+
"swap_neutral": EVAL / "swap" / f"swap_{TAG}_promptNeutral",
|
| 26 |
+
"mute_neutral": EVAL / "mute" / f"mute_{TAG}_promptNeutral",
|
| 27 |
+
}
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
def load_rows(path: Path) -> List[Dict[str, Any]]:
|
| 31 |
+
if not path.exists():
|
| 32 |
+
return []
|
| 33 |
+
return [json.loads(l) for l in open(path) if l.strip()]
|
| 34 |
+
|
| 35 |
+
|
| 36 |
+
# --------------------------------------------------------------------------
|
| 37 |
+
# sync — same fields as scripts/eval_dpo_sync.py compute_metrics
|
| 38 |
+
# --------------------------------------------------------------------------
|
| 39 |
+
def metrics_sync(rows: List[Dict[str, Any]]) -> Dict[str, Any]:
|
| 40 |
+
n = len(rows)
|
| 41 |
+
if n == 0:
|
| 42 |
+
return {}
|
| 43 |
+
sync_correct = sum(1 for r in rows if r["pred_synced"] == r["gt_synced"])
|
| 44 |
+
sync_acc = sync_correct / n
|
| 45 |
+
|
| 46 |
+
desync = [r for r in rows if not r["gt_synced"]]
|
| 47 |
+
dir_acc = (sum(1 for r in desync if r["pred_direction"] == r["gt_direction"])
|
| 48 |
+
/ len(desync)) if desync else None
|
| 49 |
+
|
| 50 |
+
def label(r, prefix):
|
| 51 |
+
return "synced" if r[f"{prefix}synced"] else r[f"{prefix}direction"]
|
| 52 |
+
three_class = sum(1 for r in rows if label(r, "pred_") == label(r, "gt_")) / n
|
| 53 |
+
|
| 54 |
+
synced_samples = [r for r in rows if r["gt_synced"]]
|
| 55 |
+
delay_samples = [r for r in rows if r["gt_direction"] == "delay"]
|
| 56 |
+
early_samples = [r for r in rows if r["gt_direction"] == "early"]
|
| 57 |
+
synced_acc = (sum(1 for r in synced_samples if r["pred_synced"]) / len(synced_samples)) if synced_samples else None
|
| 58 |
+
delay_acc = (sum(1 for r in delay_samples
|
| 59 |
+
if not r["pred_synced"] and r["pred_direction"] == "delay") / len(delay_samples)) if delay_samples else None
|
| 60 |
+
early_acc = (sum(1 for r in early_samples
|
| 61 |
+
if not r["pred_synced"] and r["pred_direction"] == "early") / len(early_samples)) if early_samples else None
|
| 62 |
+
|
| 63 |
+
offset_errors = [abs(r["pred_offset_sec"] - r["gt_offset_sec"])
|
| 64 |
+
for r in desync
|
| 65 |
+
if (not r["pred_synced"] and r.get("pred_offset_sec", 0) > 0)]
|
| 66 |
+
offset_mae = mean(offset_errors) if offset_errors else None
|
| 67 |
+
offset_median = median(offset_errors) if offset_errors else None
|
| 68 |
+
within_05 = sum(1 for e in offset_errors if e <= 0.5)
|
| 69 |
+
within_10 = sum(1 for e in offset_errors if e <= 1.0)
|
| 70 |
+
|
| 71 |
+
parse_stats: Dict[str, int] = {}
|
| 72 |
+
for r in rows:
|
| 73 |
+
parse_stats[r.get("parse_method", "unknown")] = parse_stats.get(r.get("parse_method", "unknown"), 0) + 1
|
| 74 |
+
|
| 75 |
+
return {
|
| 76 |
+
"total_samples": n,
|
| 77 |
+
"sync_desync_accuracy": round(sync_acc, 4),
|
| 78 |
+
"three_class_accuracy": round(three_class, 4),
|
| 79 |
+
"direction_accuracy_on_desync": round(dir_acc, 4) if dir_acc is not None else None,
|
| 80 |
+
"per_category": {
|
| 81 |
+
"synced_accuracy": round(synced_acc, 4) if synced_acc is not None else None,
|
| 82 |
+
"delay_accuracy": round(delay_acc, 4) if delay_acc is not None else None,
|
| 83 |
+
"early_accuracy": round(early_acc, 4) if early_acc is not None else None,
|
| 84 |
+
"synced_count": len(synced_samples),
|
| 85 |
+
"delay_count": len(delay_samples),
|
| 86 |
+
"early_count": len(early_samples),
|
| 87 |
+
},
|
| 88 |
+
"offset_mae_sec": round(offset_mae, 4) if offset_mae is not None else None,
|
| 89 |
+
"offset_median_sec": round(offset_median, 4) if offset_median is not None else None,
|
| 90 |
+
"offset_within_0.5s": within_05,
|
| 91 |
+
"offset_within_1.0s": within_10,
|
| 92 |
+
"offset_evaluated_count": len(offset_errors),
|
| 93 |
+
"parse_stats": parse_stats,
|
| 94 |
+
}
|
| 95 |
+
|
| 96 |
+
|
| 97 |
+
# --------------------------------------------------------------------------
|
| 98 |
+
# swap — eval_swap.py shape
|
| 99 |
+
# --------------------------------------------------------------------------
|
| 100 |
+
def metrics_swap(rows: List[Dict[str, Any]]) -> Dict[str, Any]:
|
| 101 |
+
n = len(rows)
|
| 102 |
+
if n == 0:
|
| 103 |
+
return {}
|
| 104 |
+
breakdown = {k: 0 for k in ("mismatched", "synced", "delay", "early")}
|
| 105 |
+
for r in rows:
|
| 106 |
+
p = r.get("pred_label", "synced")
|
| 107 |
+
breakdown[p] = breakdown.get(p, 0) + 1
|
| 108 |
+
parse_stats: Dict[str, int] = {}
|
| 109 |
+
for r in rows:
|
| 110 |
+
parse_stats[r.get("parse_method", "unknown")] = parse_stats.get(r.get("parse_method", "unknown"), 0) + 1
|
| 111 |
+
return {
|
| 112 |
+
"total_samples": n,
|
| 113 |
+
"mismatch_detection_rate": round(breakdown["mismatched"] / n, 4),
|
| 114 |
+
"hallucination_rate": round(1 - breakdown["mismatched"] / n, 4),
|
| 115 |
+
"prediction_breakdown": breakdown,
|
| 116 |
+
"parse_stats": parse_stats,
|
| 117 |
+
}
|
| 118 |
+
|
| 119 |
+
|
| 120 |
+
# --------------------------------------------------------------------------
|
| 121 |
+
# mute — eval_mute.py shape
|
| 122 |
+
# --------------------------------------------------------------------------
|
| 123 |
+
def metrics_mute(rows: List[Dict[str, Any]]) -> Dict[str, Any]:
|
| 124 |
+
n = len(rows)
|
| 125 |
+
if n == 0:
|
| 126 |
+
return {}
|
| 127 |
+
breakdown = {k: 0 for k in ("muted", "synced", "delay", "early")}
|
| 128 |
+
for r in rows:
|
| 129 |
+
p = r.get("pred_label", "synced")
|
| 130 |
+
breakdown[p] = breakdown.get(p, 0) + 1
|
| 131 |
+
parse_stats: Dict[str, int] = {}
|
| 132 |
+
for r in rows:
|
| 133 |
+
parse_stats[r.get("parse_method", "unknown")] = parse_stats.get(r.get("parse_method", "unknown"), 0) + 1
|
| 134 |
+
return {
|
| 135 |
+
"total_samples": n,
|
| 136 |
+
"mute_detection_rate": round(breakdown["muted"] / n, 4),
|
| 137 |
+
"hallucination_rate": round(1 - breakdown["muted"] / n, 4),
|
| 138 |
+
"prediction_breakdown": breakdown,
|
| 139 |
+
"parse_stats": parse_stats,
|
| 140 |
+
}
|
| 141 |
+
|
| 142 |
+
|
| 143 |
+
# --------------------------------------------------------------------------
|
| 144 |
+
# Pretty summaries (mirror print_summary in each eval_*.py)
|
| 145 |
+
# --------------------------------------------------------------------------
|
| 146 |
+
def print_sync(m: Dict[str, Any], label: str) -> None:
|
| 147 |
+
print(f"{'=' * 60}")
|
| 148 |
+
print(f" Sync Eval: {label}")
|
| 149 |
+
print(f"{'=' * 60}")
|
| 150 |
+
print(f" Total samples: {m['total_samples']}")
|
| 151 |
+
print(f" Sync/Desync Accuracy: {m['sync_desync_accuracy']:.1%}")
|
| 152 |
+
print(f" 3-Class Accuracy: {m['three_class_accuracy']:.1%}")
|
| 153 |
+
if m['direction_accuracy_on_desync'] is not None:
|
| 154 |
+
print(f" Direction Acc (desync): {m['direction_accuracy_on_desync']:.1%}")
|
| 155 |
+
print(f" --- Per Category ---")
|
| 156 |
+
pc = m['per_category']
|
| 157 |
+
for k in ("synced", "delay", "early"):
|
| 158 |
+
a = pc.get(f"{k}_accuracy")
|
| 159 |
+
c = pc.get(f"{k}_count")
|
| 160 |
+
if a is not None:
|
| 161 |
+
print(f" {k:>8} correct: {a:.1%} ({c} samples)")
|
| 162 |
+
print(f" --- Offset Estimation ---")
|
| 163 |
+
if m['offset_mae_sec'] is not None:
|
| 164 |
+
print(f" MAE: {m['offset_mae_sec']:.3f}s")
|
| 165 |
+
print(f" Median: {m['offset_median_sec']:.3f}s")
|
| 166 |
+
print(f" Within 0.5s: {m['offset_within_0.5s']} / {m['offset_evaluated_count']}")
|
| 167 |
+
print(f" Within 1.0s: {m['offset_within_1.0s']} / {m['offset_evaluated_count']}")
|
| 168 |
+
else:
|
| 169 |
+
print(f" (no valid offset predictions)")
|
| 170 |
+
print(f" --- Parse Stats ---")
|
| 171 |
+
for k, v in sorted(m['parse_stats'].items()):
|
| 172 |
+
print(f" {k}: {v}")
|
| 173 |
+
print(f"{'=' * 60}")
|
| 174 |
+
|
| 175 |
+
|
| 176 |
+
def print_swap(m: Dict[str, Any], label: str) -> None:
|
| 177 |
+
print(f"{'=' * 60}")
|
| 178 |
+
print(f" Swap Eval: {label}")
|
| 179 |
+
print(f"{'=' * 60}")
|
| 180 |
+
print(f" Total samples: {m['total_samples']}")
|
| 181 |
+
print(f" Mismatch Detection Rate: {m['mismatch_detection_rate']:.1%}")
|
| 182 |
+
print(f" Hallucination Rate: {m['hallucination_rate']:.1%}")
|
| 183 |
+
print(f" --- Prediction Breakdown ---")
|
| 184 |
+
bd = m['prediction_breakdown']
|
| 185 |
+
print(f" mismatched (correct): {bd['mismatched']}")
|
| 186 |
+
print(f" synced (halluc.): {bd['synced']}")
|
| 187 |
+
print(f" delay (halluc.): {bd['delay']}")
|
| 188 |
+
print(f" early (halluc.): {bd['early']}")
|
| 189 |
+
print(f" --- Parse Stats ---")
|
| 190 |
+
for k, v in sorted(m['parse_stats'].items()):
|
| 191 |
+
print(f" {k}: {v}")
|
| 192 |
+
print(f"{'=' * 60}")
|
| 193 |
+
|
| 194 |
+
|
| 195 |
+
def print_mute(m: Dict[str, Any], label: str) -> None:
|
| 196 |
+
print(f"{'=' * 60}")
|
| 197 |
+
print(f" Mute Eval: {label}")
|
| 198 |
+
print(f"{'=' * 60}")
|
| 199 |
+
print(f" Total samples: {m['total_samples']}")
|
| 200 |
+
print(f" Mute Detection Rate: {m['mute_detection_rate']:.1%}")
|
| 201 |
+
print(f" Hallucination Rate: {m['hallucination_rate']:.1%}")
|
| 202 |
+
print(f" --- Prediction Breakdown ---")
|
| 203 |
+
bd = m['prediction_breakdown']
|
| 204 |
+
print(f" muted (correct): {bd['muted']}")
|
| 205 |
+
print(f" synced (halluc.): {bd['synced']}")
|
| 206 |
+
print(f" delay (halluc.): {bd['delay']}")
|
| 207 |
+
print(f" early (halluc.): {bd['early']}")
|
| 208 |
+
print(f" --- Parse Stats ---")
|
| 209 |
+
for k, v in sorted(m['parse_stats'].items()):
|
| 210 |
+
print(f" {k}: {v}")
|
| 211 |
+
print(f"{'=' * 60}")
|
| 212 |
+
|
| 213 |
+
|
| 214 |
+
# --------------------------------------------------------------------------
|
| 215 |
+
# Main
|
| 216 |
+
# --------------------------------------------------------------------------
|
| 217 |
+
COMPUTE = {"sync": metrics_sync, "swap_direct": metrics_swap,
|
| 218 |
+
"swap_neutral": metrics_swap, "mute_neutral": metrics_mute}
|
| 219 |
+
PRINT = {"sync": print_sync, "swap_direct": print_swap,
|
| 220 |
+
"swap_neutral": print_swap, "mute_neutral": print_mute}
|
| 221 |
+
|
| 222 |
+
CONFIG = {
|
| 223 |
+
"model": "nvidia/nemotron-3-nano-omni-30b-a3b-reasoning:free",
|
| 224 |
+
"provider": "openrouter",
|
| 225 |
+
"judge": "gpt-5.4 (native OpenAI)",
|
| 226 |
+
"send_mode": "muxed (ffmpeg merges target audio into video, single video_url block)",
|
| 227 |
+
"n_frames": None,
|
| 228 |
+
"input": "native video (data:video/mp4;base64,...) + embedded audio",
|
| 229 |
+
}
|
| 230 |
+
|
| 231 |
+
for task, d in DIRS.items():
|
| 232 |
+
jsonl = d / "eval_results.jsonl"
|
| 233 |
+
rows = load_rows(jsonl)
|
| 234 |
+
if not rows:
|
| 235 |
+
print(f"[skip] {task}: no rows in {jsonl}")
|
| 236 |
+
continue
|
| 237 |
+
m = COMPUTE[task](rows)
|
| 238 |
+
m["eval_config"] = {**CONFIG, "task": task, "results_jsonl": str(jsonl)}
|
| 239 |
+
|
| 240 |
+
metrics_path = d / "metrics.json"
|
| 241 |
+
summary_path = d / "summary.txt"
|
| 242 |
+
metrics_path.write_text(json.dumps(m, indent=2, ensure_ascii=False))
|
| 243 |
+
|
| 244 |
+
buf = io.StringIO()
|
| 245 |
+
with redirect_stdout(buf):
|
| 246 |
+
PRINT[task](m, d.name)
|
| 247 |
+
summary_path.write_text(buf.getvalue())
|
| 248 |
+
|
| 249 |
+
PRINT[task](m, d.name)
|
| 250 |
+
print(f" -> {metrics_path}")
|
| 251 |
+
print(f" -> {summary_path}\n")
|
eval_nemotron_openrouter.py
ADDED
|
@@ -0,0 +1,780 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Evaluate nvidia/nemotron-3-nano-omni-30b-a3b-reasoning:free via OpenRouter
|
| 2 |
+
on the FULL CleverHans test set, sending NATIVE video + audio (no frame
|
| 3 |
+
extraction):
|
| 4 |
+
- sync (426) raw mp4 (audio is already embedded in delay/early/sync variant) + GPT judge
|
| 5 |
+
- swap-direct (142) donor audio MUXED INTO video via ffmpeg, yes/no parsed
|
| 6 |
+
- swap-neutral (142) same muxed donor video + GPT judge
|
| 7 |
+
- mute-neutral (142) silent wav MUXED INTO video + GPT judge
|
| 8 |
+
|
| 9 |
+
OpenRouter accepts multimodal content blocks in OpenAI-compatible chat:
|
| 10 |
+
{"type": "video_url", "video_url": {"url": "data:video/mp4;base64,..."}}
|
| 11 |
+
{"type": "input_audio", "input_audio": {"data": "<b64>", "format": "wav"}}
|
| 12 |
+
The Nemotron-3-Nano-Omni model lists input_modalities = text/audio/image/video.
|
| 13 |
+
Because we don't know whether OpenRouter routes a separately-supplied
|
| 14 |
+
input_audio block as an OVERRIDE of the audio embedded in video_url or as a
|
| 15 |
+
SECOND track, we mux the desired audio into a tmp mp4 (ffmpeg) and send only
|
| 16 |
+
video_url. That guarantees the model hears exactly the audio we intend.
|
| 17 |
+
|
| 18 |
+
Usage:
|
| 19 |
+
pip install openai tqdm
|
| 20 |
+
export OPENROUTER_API_KEY=sk-or-v1-...
|
| 21 |
+
# optional: separate key for the GPT judge (defaults to OPENROUTER_API_KEY,
|
| 22 |
+
# routing openai/gpt-5.4 through OpenRouter)
|
| 23 |
+
export OPENAI_API_KEY=sk-...
|
| 24 |
+
|
| 25 |
+
python3 /home/ubuntu/eval_nemotron_openrouter.py
|
| 26 |
+
python3 /home/ubuntu/eval_nemotron_openrouter.py --tasks sync --max-samples 10
|
| 27 |
+
# If OpenRouter ever rejects 4xx the muxed payload, retry with both
|
| 28 |
+
# blocks (video_url + input_audio) instead:
|
| 29 |
+
python3 /home/ubuntu/eval_nemotron_openrouter.py --send-mode separate
|
| 30 |
+
"""
|
| 31 |
+
|
| 32 |
+
import argparse
|
| 33 |
+
import base64
|
| 34 |
+
import json
|
| 35 |
+
import os
|
| 36 |
+
import random
|
| 37 |
+
import re
|
| 38 |
+
import shutil
|
| 39 |
+
import subprocess
|
| 40 |
+
import sys
|
| 41 |
+
import tempfile
|
| 42 |
+
import time
|
| 43 |
+
from concurrent.futures import ThreadPoolExecutor, as_completed
|
| 44 |
+
from pathlib import Path
|
| 45 |
+
from typing import Any, Dict, List, Optional, Tuple
|
| 46 |
+
|
| 47 |
+
# ---------------------------------------------------------------------------
|
| 48 |
+
# Paths
|
| 49 |
+
# ---------------------------------------------------------------------------
|
| 50 |
+
DATA_ROOT = Path("/opt/dlami/nvme/video_source")
|
| 51 |
+
ORIG_DIR = DATA_ROOT / "original" / "uag_oops"
|
| 52 |
+
DELAY_DIR = DATA_ROOT / "random_shift_video" / "delay"
|
| 53 |
+
EARLY_DIR = DATA_ROOT / "random_shift_video" / "early"
|
| 54 |
+
AUDIO_ORIG = DATA_ROOT / "extracted_audio" / "original" / "uag_oops"
|
| 55 |
+
AUDIO_DELAY = DATA_ROOT / "extracted_audio" / "random_shift_video" / "delay"
|
| 56 |
+
AUDIO_EARLY = DATA_ROOT / "extracted_audio" / "random_shift_video" / "early"
|
| 57 |
+
MUTE_DIR = DATA_ROOT / "mute_audio"
|
| 58 |
+
MUX_CACHE = DATA_ROOT / "openrouter_muxed_cache" # cached muxed videos
|
| 59 |
+
|
| 60 |
+
TEST_JSONL = Path("/home/ubuntu/CleverHans-Evaluation/data/kto_training_data_v2_test.jsonl")
|
| 61 |
+
GEMINI_SWAP = Path("/home/ubuntu/eval_results/gemini_mute_sync_swap/swap/"
|
| 62 |
+
"swap_gemini_gemini-3.1-pro-preview__promptDirect/eval_results.jsonl")
|
| 63 |
+
|
| 64 |
+
EVAL_ROOT = Path("/home/ubuntu/eval_results")
|
| 65 |
+
FFMPEG = "/home/ubuntu/anaconda3/envs/video/bin/ffmpeg"
|
| 66 |
+
FFPROBE = "/home/ubuntu/anaconda3/envs/video/bin/ffprobe"
|
| 67 |
+
if not Path(FFMPEG).exists():
|
| 68 |
+
FFMPEG = shutil.which("ffmpeg") or "ffmpeg"
|
| 69 |
+
if not Path(FFPROBE).exists():
|
| 70 |
+
FFPROBE = shutil.which("ffprobe") or "ffprobe"
|
| 71 |
+
|
| 72 |
+
DIR_OFFSET_RE = re.compile(r"_(delay|early)_(\d+(?:\.\d+)?)s\.mp4$")
|
| 73 |
+
|
| 74 |
+
# ---------------------------------------------------------------------------
|
| 75 |
+
# Prompts (same wording as the rest of the CleverHans suite)
|
| 76 |
+
# ---------------------------------------------------------------------------
|
| 77 |
+
SYNC_PROMPT = (
|
| 78 |
+
"Watch this video and listen to its audio carefully. "
|
| 79 |
+
"Determine whether the audio and video tracks are synchronized. "
|
| 80 |
+
"If they are not synchronized, identify the direction of the offset "
|
| 81 |
+
"(audio delayed or audio early relative to video) and estimate the offset in seconds. "
|
| 82 |
+
"Explain your reasoning."
|
| 83 |
+
)
|
| 84 |
+
MUTE_PROMPT_NEUTRAL = "Describe the audio you hear in this video."
|
| 85 |
+
SWAP_PROMPT_NEUTRAL = "Describe what you see in the video and what you hear in the audio."
|
| 86 |
+
SWAP_PROMPT_DIRECT = (
|
| 87 |
+
"Does the audio in this video match the visual content? "
|
| 88 |
+
"Answer with only 'yes' or 'no'."
|
| 89 |
+
)
|
| 90 |
+
|
| 91 |
+
SYNC_PARSE_SYSTEM = (
|
| 92 |
+
"You parse a free-form judgement about whether the audio in a short video is "
|
| 93 |
+
"synchronized with the visuals. Return STRICT JSON only with keys: "
|
| 94 |
+
"synced (bool), direction (one of 'delay'|'early'|'none'), "
|
| 95 |
+
"offset_sec (number, 0 if synced), t_v (number|null, visual cue time), "
|
| 96 |
+
"t_a (number|null, audio cue time), explanation (string). "
|
| 97 |
+
"If the answer says they are aligned, set synced=true, direction='none', offset_sec=0."
|
| 98 |
+
)
|
| 99 |
+
MUTE_JUDGE_SYSTEM = (
|
| 100 |
+
"You are evaluating a model's response about a video whose audio has been secretly "
|
| 101 |
+
"replaced with silence. Classify into ONE of: "
|
| 102 |
+
'{"prediction": "muted"|"synced"|"delay"|"early", "explanation": "<one sentence>"}. '
|
| 103 |
+
"Rules: "
|
| 104 |
+
"- 'muted' iff the model says no audio / silence / no audible sound. "
|
| 105 |
+
"- 'synced' iff the model describes audio that matches the visuals (HALLUCINATION). "
|
| 106 |
+
"- 'delay'/'early' iff the model claims a temporal offset. "
|
| 107 |
+
"Return STRICT JSON only."
|
| 108 |
+
)
|
| 109 |
+
SWAP_JUDGE_SYSTEM = (
|
| 110 |
+
"You are evaluating a model's response about a video whose audio has been secretly "
|
| 111 |
+
"replaced with audio from a DIFFERENT unrelated video. Classify into ONE of: "
|
| 112 |
+
'{"prediction": "synced"|"delay"|"early"|"mismatched", "explanation": "<one sentence>"}. '
|
| 113 |
+
"Rules: "
|
| 114 |
+
"- 'mismatched' iff the model says the audio does NOT match the visual content, or "
|
| 115 |
+
"the audio and video are unrelated, or the sounds don't correspond to what's shown. "
|
| 116 |
+
"- 'synced' iff the model claims audio and video are well synchronized and matching. "
|
| 117 |
+
"- 'delay'/'early' iff the model claims a temporal offset (but still believes they match). "
|
| 118 |
+
"- Prefer 'mismatched' whenever the model expresses confusion or doubt. "
|
| 119 |
+
"Return STRICT JSON only."
|
| 120 |
+
)
|
| 121 |
+
|
| 122 |
+
|
| 123 |
+
# ---------------------------------------------------------------------------
|
| 124 |
+
# Utilities
|
| 125 |
+
# ---------------------------------------------------------------------------
|
| 126 |
+
def load_jsonl(path):
|
| 127 |
+
with open(path) as f:
|
| 128 |
+
for line in f:
|
| 129 |
+
line = line.strip()
|
| 130 |
+
if line:
|
| 131 |
+
yield json.loads(line)
|
| 132 |
+
|
| 133 |
+
|
| 134 |
+
def parse_gt_from_name(video_name: str) -> Dict[str, Any]:
|
| 135 |
+
m = DIR_OFFSET_RE.search(video_name)
|
| 136 |
+
if m:
|
| 137 |
+
return {"synced": False, "direction": m.group(1), "offset_sec": float(m.group(2))}
|
| 138 |
+
return {"synced": True, "direction": "none", "offset_sec": 0.0}
|
| 139 |
+
|
| 140 |
+
|
| 141 |
+
def video_path_for(video_name: str) -> Optional[Path]:
|
| 142 |
+
m = DIR_OFFSET_RE.search(video_name)
|
| 143 |
+
if m is None:
|
| 144 |
+
p = ORIG_DIR / video_name
|
| 145 |
+
elif m.group(1) == "delay":
|
| 146 |
+
p = DELAY_DIR / video_name
|
| 147 |
+
else:
|
| 148 |
+
p = EARLY_DIR / video_name
|
| 149 |
+
return p if p.exists() else None
|
| 150 |
+
|
| 151 |
+
|
| 152 |
+
def video_duration(path: Path) -> float:
|
| 153 |
+
out = subprocess.run(
|
| 154 |
+
[FFPROBE, "-v", "error", "-show_entries", "format=duration",
|
| 155 |
+
"-of", "default=noprint_wrappers=1:nokey=1", str(path)],
|
| 156 |
+
capture_output=True, text=True, check=True,
|
| 157 |
+
)
|
| 158 |
+
try:
|
| 159 |
+
return float(out.stdout.strip())
|
| 160 |
+
except ValueError:
|
| 161 |
+
return 5.0
|
| 162 |
+
|
| 163 |
+
|
| 164 |
+
def silent_wav_for(video_path: Path) -> Path:
|
| 165 |
+
MUTE_DIR.mkdir(parents=True, exist_ok=True)
|
| 166 |
+
out = MUTE_DIR / f"{video_path.stem}_silent.wav"
|
| 167 |
+
if out.exists():
|
| 168 |
+
return out
|
| 169 |
+
import wave
|
| 170 |
+
sr = 16000
|
| 171 |
+
n = int(sr * max(0.5, video_duration(video_path)))
|
| 172 |
+
with wave.open(str(out), "wb") as w:
|
| 173 |
+
w.setnchannels(1)
|
| 174 |
+
w.setsampwidth(2)
|
| 175 |
+
w.setframerate(sr)
|
| 176 |
+
w.writeframes(b"\x00\x00" * n)
|
| 177 |
+
return out
|
| 178 |
+
|
| 179 |
+
|
| 180 |
+
def mux_audio_into_video(video_path: Path, audio_path: Path,
|
| 181 |
+
tag: str) -> Path:
|
| 182 |
+
"""Replace video's audio track with `audio_path`. Cached under MUX_CACHE
|
| 183 |
+
by (video_stem + tag + audio_stem) so swap/mute pairings only mux once.
|
| 184 |
+
"""
|
| 185 |
+
MUX_CACHE.mkdir(parents=True, exist_ok=True)
|
| 186 |
+
out = MUX_CACHE / f"{video_path.stem}__{tag}__{audio_path.stem}.mp4"
|
| 187 |
+
if out.exists():
|
| 188 |
+
return out
|
| 189 |
+
cmd = [
|
| 190 |
+
FFMPEG, "-y", "-loglevel", "error",
|
| 191 |
+
"-i", str(video_path),
|
| 192 |
+
"-i", str(audio_path),
|
| 193 |
+
"-map", "0:v:0", "-map", "1:a:0",
|
| 194 |
+
"-c:v", "copy",
|
| 195 |
+
"-c:a", "aac", "-b:a", "128k",
|
| 196 |
+
"-shortest",
|
| 197 |
+
str(out),
|
| 198 |
+
]
|
| 199 |
+
subprocess.run(cmd, check=True)
|
| 200 |
+
return out
|
| 201 |
+
|
| 202 |
+
|
| 203 |
+
def file_to_data_uri(path: Path, mime: str) -> str:
|
| 204 |
+
b = path.read_bytes()
|
| 205 |
+
return f"data:{mime};base64,{base64.b64encode(b).decode()}"
|
| 206 |
+
|
| 207 |
+
|
| 208 |
+
def wav_b64(path: Path) -> str:
|
| 209 |
+
return base64.b64encode(path.read_bytes()).decode()
|
| 210 |
+
|
| 211 |
+
|
| 212 |
+
# ---------------------------------------------------------------------------
|
| 213 |
+
# Clients
|
| 214 |
+
# ---------------------------------------------------------------------------
|
| 215 |
+
def _openrouter_client(api_key: str):
|
| 216 |
+
from openai import OpenAI
|
| 217 |
+
return OpenAI(
|
| 218 |
+
api_key=api_key,
|
| 219 |
+
base_url="https://openrouter.ai/api/v1",
|
| 220 |
+
default_headers={
|
| 221 |
+
"HTTP-Referer": os.environ.get("OPENROUTER_REFERER", "https://localhost"),
|
| 222 |
+
"X-Title": os.environ.get("OPENROUTER_TITLE", "CleverHans-Nemotron-Eval"),
|
| 223 |
+
},
|
| 224 |
+
timeout=300.0,
|
| 225 |
+
)
|
| 226 |
+
|
| 227 |
+
|
| 228 |
+
def _judge_client(judge_key: Optional[str], openrouter_key: str):
|
| 229 |
+
"""Native OpenAI if a different key is supplied, else route through OpenRouter."""
|
| 230 |
+
from openai import OpenAI
|
| 231 |
+
if judge_key and judge_key != openrouter_key:
|
| 232 |
+
return OpenAI(api_key=judge_key, timeout=120.0), False
|
| 233 |
+
return OpenAI(api_key=openrouter_key,
|
| 234 |
+
base_url="https://openrouter.ai/api/v1",
|
| 235 |
+
timeout=120.0), True
|
| 236 |
+
|
| 237 |
+
|
| 238 |
+
# ---------------------------------------------------------------------------
|
| 239 |
+
# Build content blocks
|
| 240 |
+
# ---------------------------------------------------------------------------
|
| 241 |
+
def _content_video_only(prompt: str, video_path: Path) -> List[Dict[str, Any]]:
|
| 242 |
+
return [
|
| 243 |
+
{"type": "video_url",
|
| 244 |
+
"video_url": {"url": file_to_data_uri(video_path, "video/mp4")}},
|
| 245 |
+
{"type": "text", "text": prompt},
|
| 246 |
+
]
|
| 247 |
+
|
| 248 |
+
|
| 249 |
+
def _content_video_plus_audio(prompt: str, video_path: Path,
|
| 250 |
+
audio_path: Path) -> List[Dict[str, Any]]:
|
| 251 |
+
fmt = audio_path.suffix.lstrip(".").lower() or "wav"
|
| 252 |
+
return [
|
| 253 |
+
{"type": "video_url",
|
| 254 |
+
"video_url": {"url": file_to_data_uri(video_path, "video/mp4")}},
|
| 255 |
+
{"type": "input_audio",
|
| 256 |
+
"input_audio": {"data": wav_b64(audio_path), "format": fmt}},
|
| 257 |
+
{"type": "text", "text": prompt},
|
| 258 |
+
]
|
| 259 |
+
|
| 260 |
+
|
| 261 |
+
# ---------------------------------------------------------------------------
|
| 262 |
+
# One chat call with retries
|
| 263 |
+
# ---------------------------------------------------------------------------
|
| 264 |
+
def _extract_text(resp) -> str:
|
| 265 |
+
"""Pull final answer text from a chat completion. Handles:
|
| 266 |
+
- resp / resp.choices being None (OR error responses)
|
| 267 |
+
- reasoning models that put the final answer in message.content but the
|
| 268 |
+
chain in message.reasoning / message.reasoning_content (we only return
|
| 269 |
+
content; reasoning is logged separately by callers if they want it)."""
|
| 270 |
+
if resp is None:
|
| 271 |
+
return ""
|
| 272 |
+
choices = getattr(resp, "choices", None) or []
|
| 273 |
+
if not choices:
|
| 274 |
+
return ""
|
| 275 |
+
msg = getattr(choices[0], "message", None)
|
| 276 |
+
if msg is None:
|
| 277 |
+
return ""
|
| 278 |
+
content = getattr(msg, "content", None) or ""
|
| 279 |
+
if isinstance(content, list):
|
| 280 |
+
# Some providers return content as a list of {type, text} parts.
|
| 281 |
+
parts = [p.get("text", "") for p in content if isinstance(p, dict) and p.get("type") == "text"]
|
| 282 |
+
content = "".join(parts)
|
| 283 |
+
return (content or "").strip()
|
| 284 |
+
|
| 285 |
+
|
| 286 |
+
def call_model(client, model: str, content: List[Dict[str, Any]],
|
| 287 |
+
max_tokens: int = 2000, temperature: float = 0.0,
|
| 288 |
+
reasoning_budget: int = 1024,
|
| 289 |
+
max_retries: int = 4) -> str:
|
| 290 |
+
"""Send chat completion. Bounds reasoning budget so reasoning tokens don't
|
| 291 |
+
eat the whole max_tokens window — without this, Nemotron-Reasoning often
|
| 292 |
+
returns empty content because max_tokens is consumed by the chain of
|
| 293 |
+
thought before any user-facing answer is produced."""
|
| 294 |
+
last_exc = None
|
| 295 |
+
for attempt in range(max_retries):
|
| 296 |
+
try:
|
| 297 |
+
kwargs = dict(
|
| 298 |
+
model=model,
|
| 299 |
+
messages=[{"role": "user", "content": content}],
|
| 300 |
+
max_tokens=max_tokens,
|
| 301 |
+
temperature=temperature,
|
| 302 |
+
# OpenRouter "reasoning" param: bound chain-of-thought length.
|
| 303 |
+
# Keys: max_tokens (cap reasoning), exclude (drop from response).
|
| 304 |
+
extra_body={"reasoning": {"max_tokens": reasoning_budget}},
|
| 305 |
+
)
|
| 306 |
+
resp = client.chat.completions.create(**kwargs)
|
| 307 |
+
text = _extract_text(resp)
|
| 308 |
+
return text
|
| 309 |
+
except Exception as exc:
|
| 310 |
+
last_exc = exc
|
| 311 |
+
msg = str(exc).lower()
|
| 312 |
+
if any(s in msg for s in ("429", "rate", "timeout", "502", "503", "504")):
|
| 313 |
+
time.sleep(2 ** attempt + random.random())
|
| 314 |
+
continue
|
| 315 |
+
# If `extra_body` / `reasoning` is rejected, retry without it.
|
| 316 |
+
if "reasoning" in msg and "unsupported" in msg:
|
| 317 |
+
try:
|
| 318 |
+
resp = client.chat.completions.create(
|
| 319 |
+
model=model,
|
| 320 |
+
messages=[{"role": "user", "content": content}],
|
| 321 |
+
max_tokens=max_tokens,
|
| 322 |
+
temperature=temperature,
|
| 323 |
+
)
|
| 324 |
+
return _extract_text(resp)
|
| 325 |
+
except Exception as exc2:
|
| 326 |
+
last_exc = exc2
|
| 327 |
+
raise
|
| 328 |
+
raise RuntimeError(f"call_model failed after {max_retries}: {last_exc}")
|
| 329 |
+
|
| 330 |
+
|
| 331 |
+
def call_judge(judge_client, judge_model: str, system_prompt: str,
|
| 332 |
+
user_text: str, max_retries: int = 3) -> Optional[Dict[str, Any]]:
|
| 333 |
+
"""OpenAI reasoning models (gpt-5.x, o-series) reject `max_tokens` and
|
| 334 |
+
require `max_completion_tokens`. Try the new param first; if a non-reasoning
|
| 335 |
+
model rejects it, fall back to `max_tokens`."""
|
| 336 |
+
if not user_text:
|
| 337 |
+
return None
|
| 338 |
+
last_exc = None
|
| 339 |
+
use_max_completion = True
|
| 340 |
+
for attempt in range(max_retries):
|
| 341 |
+
try:
|
| 342 |
+
kwargs = dict(
|
| 343 |
+
model=judge_model,
|
| 344 |
+
messages=[
|
| 345 |
+
{"role": "system", "content": system_prompt},
|
| 346 |
+
{"role": "user", "content": user_text},
|
| 347 |
+
],
|
| 348 |
+
response_format={"type": "json_object"},
|
| 349 |
+
)
|
| 350 |
+
# Reasoning models need a generous budget — they spend tokens on
|
| 351 |
+
# reasoning before emitting the JSON.
|
| 352 |
+
if use_max_completion:
|
| 353 |
+
kwargs["max_completion_tokens"] = 2000
|
| 354 |
+
else:
|
| 355 |
+
kwargs["max_tokens"] = 300
|
| 356 |
+
kwargs["temperature"] = 0.0
|
| 357 |
+
resp = judge_client.chat.completions.create(**kwargs)
|
| 358 |
+
text = _extract_text(resp)
|
| 359 |
+
if not text:
|
| 360 |
+
last_exc = RuntimeError("judge returned empty content")
|
| 361 |
+
time.sleep(1.0)
|
| 362 |
+
continue
|
| 363 |
+
for pat in (re.compile(r"```(?:json)?\s*(\{.*?\})\s*```", re.DOTALL),
|
| 364 |
+
re.compile(r"(\{.*\})", re.DOTALL)):
|
| 365 |
+
m = pat.search(text)
|
| 366 |
+
if m:
|
| 367 |
+
return json.loads(m.group(1))
|
| 368 |
+
return json.loads(text)
|
| 369 |
+
except Exception as exc:
|
| 370 |
+
last_exc = exc
|
| 371 |
+
msg = str(exc).lower()
|
| 372 |
+
# Hot-swap param if the API tells us the wrong one is set.
|
| 373 |
+
if "max_completion_tokens" in msg and "instead" in msg:
|
| 374 |
+
use_max_completion = True
|
| 375 |
+
continue
|
| 376 |
+
if "max_tokens" in msg and "max_completion_tokens" in msg:
|
| 377 |
+
use_max_completion = True
|
| 378 |
+
continue
|
| 379 |
+
if "unsupported parameter" in msg and "temperature" in msg:
|
| 380 |
+
# gpt-5.x rejects temperature; we already drop it on the new path.
|
| 381 |
+
use_max_completion = True
|
| 382 |
+
continue
|
| 383 |
+
time.sleep(1.5 ** attempt + random.random())
|
| 384 |
+
print(f" [judge] failed after {max_retries}: {last_exc}", flush=True)
|
| 385 |
+
return None
|
| 386 |
+
|
| 387 |
+
|
| 388 |
+
# ---------------------------------------------------------------------------
|
| 389 |
+
# Per-task runners
|
| 390 |
+
# ---------------------------------------------------------------------------
|
| 391 |
+
def _build_content(prompt, video_path, audio_path, send_mode):
|
| 392 |
+
"""send_mode:
|
| 393 |
+
'muxed' – audio already muxed into video, send video_url only
|
| 394 |
+
'separate' – send video_url + input_audio (model decides)
|
| 395 |
+
'video' – send video_url only (audio comes from video's own track)
|
| 396 |
+
"""
|
| 397 |
+
if send_mode == "video" or audio_path is None:
|
| 398 |
+
return _content_video_only(prompt, video_path)
|
| 399 |
+
if send_mode == "separate":
|
| 400 |
+
return _content_video_plus_audio(prompt, video_path, audio_path)
|
| 401 |
+
# muxed: caller has already muxed; audio_path is unused at request time
|
| 402 |
+
return _content_video_only(prompt, video_path)
|
| 403 |
+
|
| 404 |
+
|
| 405 |
+
def run_one_sync(item, model_client, model, judge_client, judge_model, send_mode):
|
| 406 |
+
# Sync videos already have the right audio embedded in delay/early/sync mp4.
|
| 407 |
+
content = _build_content(SYNC_PROMPT, item["video_path"], None, "video")
|
| 408 |
+
raw = call_model(model_client, model, content,
|
| 409 |
+
max_tokens=2500, reasoning_budget=1024)
|
| 410 |
+
parsed = call_judge(judge_client, judge_model, SYNC_PARSE_SYSTEM, raw) or {}
|
| 411 |
+
direction = str(parsed.get("direction", "none")).lower()
|
| 412 |
+
if direction not in ("delay", "early", "none"):
|
| 413 |
+
direction = "none"
|
| 414 |
+
synced = parsed.get("synced", False)
|
| 415 |
+
if isinstance(synced, str):
|
| 416 |
+
synced = synced.lower() in ("true", "yes", "1")
|
| 417 |
+
return {
|
| 418 |
+
"video": item["video"],
|
| 419 |
+
"video_path": str(item["video_path"]),
|
| 420 |
+
"gt_synced": item["gt_synced"],
|
| 421 |
+
"gt_direction": item["gt_direction"],
|
| 422 |
+
"gt_offset_sec": item["gt_offset_sec"],
|
| 423 |
+
"pred_synced": bool(synced),
|
| 424 |
+
"pred_direction": direction,
|
| 425 |
+
"pred_offset_sec": float(parsed.get("offset_sec", 0.0) or 0.0),
|
| 426 |
+
"pred_explanation": str(parsed.get("explanation", "")),
|
| 427 |
+
"parse_method": "openrouter_judge" if parsed else "judge_fail",
|
| 428 |
+
"send_mode": send_mode if send_mode != "muxed" else "video",
|
| 429 |
+
"raw_output": raw,
|
| 430 |
+
}
|
| 431 |
+
|
| 432 |
+
|
| 433 |
+
def run_one_mute(item, model_client, model, judge_client, judge_model, send_mode):
|
| 434 |
+
# Mux silent wav into video → send video_url with no audio (muted).
|
| 435 |
+
if send_mode == "muxed":
|
| 436 |
+
muxed = mux_audio_into_video(item["video_path"], item["audio_path"], "mute")
|
| 437 |
+
content = _content_video_only(MUTE_PROMPT_NEUTRAL, muxed)
|
| 438 |
+
else:
|
| 439 |
+
content = _build_content(MUTE_PROMPT_NEUTRAL, item["video_path"],
|
| 440 |
+
item["audio_path"], send_mode)
|
| 441 |
+
raw = call_model(model_client, model, content,
|
| 442 |
+
max_tokens=2000, reasoning_budget=768)
|
| 443 |
+
parsed = call_judge(judge_client, judge_model, MUTE_JUDGE_SYSTEM, raw) or {}
|
| 444 |
+
pred = str(parsed.get("prediction", "synced")).lower()
|
| 445 |
+
if pred not in ("muted", "synced", "delay", "early"):
|
| 446 |
+
pred = "synced"
|
| 447 |
+
return {
|
| 448 |
+
"video": item["video"],
|
| 449 |
+
"video_path": str(item["video_path"]),
|
| 450 |
+
"audio_path": str(item["audio_path"]),
|
| 451 |
+
"gt_label": "muted",
|
| 452 |
+
"pred_label": pred,
|
| 453 |
+
"correct": (pred == "muted"),
|
| 454 |
+
"judge_explanation": str(parsed.get("explanation", "")),
|
| 455 |
+
"parse_method": "openrouter_judge" if parsed else "judge_fail",
|
| 456 |
+
"prompt_mode": "neutral",
|
| 457 |
+
"send_mode": send_mode,
|
| 458 |
+
"raw_output": raw,
|
| 459 |
+
}
|
| 460 |
+
|
| 461 |
+
|
| 462 |
+
def run_one_swap_neutral(item, model_client, model, judge_client, judge_model, send_mode):
|
| 463 |
+
if send_mode == "muxed":
|
| 464 |
+
muxed = mux_audio_into_video(item["video_path"], item["audio_path"], "swap")
|
| 465 |
+
content = _content_video_only(SWAP_PROMPT_NEUTRAL, muxed)
|
| 466 |
+
else:
|
| 467 |
+
content = _build_content(SWAP_PROMPT_NEUTRAL, item["video_path"],
|
| 468 |
+
item["audio_path"], send_mode)
|
| 469 |
+
raw = call_model(model_client, model, content,
|
| 470 |
+
max_tokens=2000, reasoning_budget=768)
|
| 471 |
+
parsed = call_judge(judge_client, judge_model, SWAP_JUDGE_SYSTEM, raw) or {}
|
| 472 |
+
pred = str(parsed.get("prediction", "synced")).lower()
|
| 473 |
+
if pred not in ("synced", "delay", "early", "mismatched"):
|
| 474 |
+
pred = "synced"
|
| 475 |
+
return {
|
| 476 |
+
"video": item["video"],
|
| 477 |
+
"video_path": str(item["video_path"]),
|
| 478 |
+
"audio_path": str(item["audio_path"]),
|
| 479 |
+
"swapped_from": item["swapped_from"],
|
| 480 |
+
"gt_label": "mismatched",
|
| 481 |
+
"pred_label": pred,
|
| 482 |
+
"correct": (pred == "mismatched"),
|
| 483 |
+
"judge_explanation": str(parsed.get("explanation", "")),
|
| 484 |
+
"parse_method": "openrouter_judge" if parsed else "judge_fail",
|
| 485 |
+
"prompt_mode": "neutral",
|
| 486 |
+
"send_mode": send_mode,
|
| 487 |
+
"raw_output": raw,
|
| 488 |
+
}
|
| 489 |
+
|
| 490 |
+
|
| 491 |
+
def run_one_swap_direct(item, model_client, model, judge_client, judge_model, send_mode):
|
| 492 |
+
if send_mode == "muxed":
|
| 493 |
+
muxed = mux_audio_into_video(item["video_path"], item["audio_path"], "swap")
|
| 494 |
+
content = _content_video_only(SWAP_PROMPT_DIRECT, muxed)
|
| 495 |
+
else:
|
| 496 |
+
content = _build_content(SWAP_PROMPT_DIRECT, item["video_path"],
|
| 497 |
+
item["audio_path"], send_mode)
|
| 498 |
+
# Yes/no question: tiny answer budget, but reasoning model still needs
|
| 499 |
+
# headroom — give it enough that reasoning doesn't starve the final token.
|
| 500 |
+
raw = call_model(model_client, model, content,
|
| 501 |
+
max_tokens=1200, reasoning_budget=512)
|
| 502 |
+
t = raw.strip().lower()
|
| 503 |
+
head = t.split()[:3] if t else []
|
| 504 |
+
if t.startswith("no") or "no" in head:
|
| 505 |
+
pred = "mismatched"
|
| 506 |
+
elif t.startswith("yes") or "yes" in head:
|
| 507 |
+
pred = "synced"
|
| 508 |
+
else:
|
| 509 |
+
pred = "synced"
|
| 510 |
+
return {
|
| 511 |
+
"video": item["video"],
|
| 512 |
+
"video_path": str(item["video_path"]),
|
| 513 |
+
"audio_path": str(item["audio_path"]),
|
| 514 |
+
"swapped_from": item["swapped_from"],
|
| 515 |
+
"gt_label": "mismatched",
|
| 516 |
+
"pred_label": pred,
|
| 517 |
+
"correct": (pred == "mismatched"),
|
| 518 |
+
"parse_method": "direct",
|
| 519 |
+
"prompt_mode": "direct",
|
| 520 |
+
"send_mode": send_mode,
|
| 521 |
+
"raw_output": raw,
|
| 522 |
+
}
|
| 523 |
+
|
| 524 |
+
|
| 525 |
+
# ---------------------------------------------------------------------------
|
| 526 |
+
# Build work items
|
| 527 |
+
# ---------------------------------------------------------------------------
|
| 528 |
+
def build_sync_work() -> List[Dict[str, Any]]:
|
| 529 |
+
work = []
|
| 530 |
+
for r in load_jsonl(TEST_JSONL):
|
| 531 |
+
v = r["video"]
|
| 532 |
+
p = video_path_for(v)
|
| 533 |
+
if p is None:
|
| 534 |
+
print(f"[skip sync] missing video: {v}", flush=True)
|
| 535 |
+
continue
|
| 536 |
+
gt = parse_gt_from_name(v)
|
| 537 |
+
work.append({
|
| 538 |
+
"video": v,
|
| 539 |
+
"video_path": p,
|
| 540 |
+
"gt_synced": gt["synced"],
|
| 541 |
+
"gt_direction": gt["direction"],
|
| 542 |
+
"gt_offset_sec": gt["offset_sec"],
|
| 543 |
+
})
|
| 544 |
+
return work
|
| 545 |
+
|
| 546 |
+
|
| 547 |
+
def build_mute_work() -> List[Dict[str, Any]]:
|
| 548 |
+
work = []
|
| 549 |
+
seen = set()
|
| 550 |
+
for r in load_jsonl(TEST_JSONL):
|
| 551 |
+
v = r["video"]
|
| 552 |
+
if "_delay_" in v or "_early_" in v or v in seen:
|
| 553 |
+
continue
|
| 554 |
+
seen.add(v)
|
| 555 |
+
p = ORIG_DIR / v
|
| 556 |
+
if not p.exists():
|
| 557 |
+
continue
|
| 558 |
+
work.append({"video": v, "video_path": p, "audio_path": silent_wav_for(p)})
|
| 559 |
+
return work
|
| 560 |
+
|
| 561 |
+
|
| 562 |
+
def build_swap_work_from_gemini() -> List[Dict[str, Any]]:
|
| 563 |
+
if not GEMINI_SWAP.exists():
|
| 564 |
+
return []
|
| 565 |
+
work = []
|
| 566 |
+
for r in load_jsonl(GEMINI_SWAP):
|
| 567 |
+
v = r["video"]
|
| 568 |
+
donor = r.get("swapped_from")
|
| 569 |
+
if not donor:
|
| 570 |
+
continue
|
| 571 |
+
p = ORIG_DIR / v
|
| 572 |
+
donor_audio = AUDIO_ORIG / f"{Path(donor).stem}.wav"
|
| 573 |
+
if not p.exists() or not donor_audio.exists():
|
| 574 |
+
continue
|
| 575 |
+
work.append({"video": v, "video_path": p,
|
| 576 |
+
"audio_path": donor_audio, "swapped_from": donor})
|
| 577 |
+
return work
|
| 578 |
+
|
| 579 |
+
|
| 580 |
+
def build_swap_work_seeded(seed: int = 42) -> List[Dict[str, Any]]:
|
| 581 |
+
base = []
|
| 582 |
+
seen = set()
|
| 583 |
+
for r in load_jsonl(TEST_JSONL):
|
| 584 |
+
v = r["video"]
|
| 585 |
+
if "_delay_" in v or "_early_" in v or v in seen:
|
| 586 |
+
continue
|
| 587 |
+
seen.add(v)
|
| 588 |
+
stem = Path(v).stem
|
| 589 |
+
vp = ORIG_DIR / v
|
| 590 |
+
ap = AUDIO_ORIG / f"{stem}.wav"
|
| 591 |
+
if vp.exists() and ap.exists():
|
| 592 |
+
base.append({"video": v, "stem": stem, "video_path": vp, "own_audio": ap})
|
| 593 |
+
rng = random.Random(seed)
|
| 594 |
+
shuffled = base.copy()
|
| 595 |
+
rng.shuffle(shuffled)
|
| 596 |
+
for i in range(len(shuffled)):
|
| 597 |
+
if shuffled[i]["stem"] == base[i]["stem"]:
|
| 598 |
+
j = (i + 1) % len(shuffled)
|
| 599 |
+
shuffled[i], shuffled[j] = shuffled[j], shuffled[i]
|
| 600 |
+
work = []
|
| 601 |
+
for i, b in enumerate(base):
|
| 602 |
+
donor = shuffled[i]
|
| 603 |
+
if donor["stem"] == b["stem"]:
|
| 604 |
+
continue
|
| 605 |
+
work.append({
|
| 606 |
+
"video": b["video"],
|
| 607 |
+
"video_path": b["video_path"],
|
| 608 |
+
"audio_path": donor["own_audio"],
|
| 609 |
+
"swapped_from": donor["video"],
|
| 610 |
+
})
|
| 611 |
+
return work
|
| 612 |
+
|
| 613 |
+
|
| 614 |
+
# ---------------------------------------------------------------------------
|
| 615 |
+
# Run loop with resume + parallelism
|
| 616 |
+
# ---------------------------------------------------------------------------
|
| 617 |
+
def run_task(out_dir: Path, work, runner, model_client, model,
|
| 618 |
+
judge_client, judge_model, send_mode, workers,
|
| 619 |
+
max_samples: int = -1):
|
| 620 |
+
out_dir.mkdir(parents=True, exist_ok=True)
|
| 621 |
+
results_path = out_dir / "eval_results.jsonl"
|
| 622 |
+
|
| 623 |
+
processed = set()
|
| 624 |
+
if results_path.exists():
|
| 625 |
+
with open(results_path) as f:
|
| 626 |
+
for line in f:
|
| 627 |
+
line = line.strip()
|
| 628 |
+
if line:
|
| 629 |
+
processed.add(json.loads(line)["video"])
|
| 630 |
+
print(f"[{out_dir.name}] resume: {len(processed)} already done")
|
| 631 |
+
|
| 632 |
+
todo = [w for w in work if w["video"] not in processed]
|
| 633 |
+
if max_samples > 0:
|
| 634 |
+
todo = todo[:max_samples]
|
| 635 |
+
print(f"[{out_dir.name}] {len(todo)} new / {len(work)} total")
|
| 636 |
+
|
| 637 |
+
def _go(item):
|
| 638 |
+
try:
|
| 639 |
+
return runner(item, model_client, model, judge_client, judge_model, send_mode)
|
| 640 |
+
except Exception as exc:
|
| 641 |
+
print(f"[{out_dir.name}] error on {item['video']}: {exc}", flush=True)
|
| 642 |
+
return None
|
| 643 |
+
|
| 644 |
+
n_done = 0
|
| 645 |
+
with ThreadPoolExecutor(max_workers=workers) as ex, open(results_path, "a") as out:
|
| 646 |
+
futures = {ex.submit(_go, item): item for item in todo}
|
| 647 |
+
for fut in as_completed(futures):
|
| 648 |
+
res = fut.result()
|
| 649 |
+
if res is None:
|
| 650 |
+
continue
|
| 651 |
+
out.write(json.dumps(res, ensure_ascii=False) + "\n")
|
| 652 |
+
out.flush()
|
| 653 |
+
n_done += 1
|
| 654 |
+
if n_done % 5 == 0 or n_done == len(todo):
|
| 655 |
+
print(f"[{out_dir.name}] {n_done}/{len(todo)} done", flush=True)
|
| 656 |
+
print(f"[{out_dir.name}] saved -> {results_path}")
|
| 657 |
+
return results_path
|
| 658 |
+
|
| 659 |
+
|
| 660 |
+
# ---------------------------------------------------------------------------
|
| 661 |
+
# Light summary
|
| 662 |
+
# ---------------------------------------------------------------------------
|
| 663 |
+
def summarize(results_path: Path, kind: str) -> Dict[str, Any]:
|
| 664 |
+
if not results_path.exists():
|
| 665 |
+
return {}
|
| 666 |
+
rows = [json.loads(line) for line in open(results_path) if line.strip()]
|
| 667 |
+
n = len(rows)
|
| 668 |
+
if n == 0:
|
| 669 |
+
return {}
|
| 670 |
+
if kind == "sync":
|
| 671 |
+
sync_correct = sum(1 for r in rows if r["pred_synced"] == r["gt_synced"])
|
| 672 |
+
three_class_correct = sum(
|
| 673 |
+
1 for r in rows
|
| 674 |
+
if (("synced" if r["pred_synced"] else r["pred_direction"]) ==
|
| 675 |
+
("synced" if r["gt_synced"] else r["gt_direction"]))
|
| 676 |
+
)
|
| 677 |
+
return {"total": n,
|
| 678 |
+
"sync_desync_accuracy": round(sync_correct / n, 4),
|
| 679 |
+
"three_class_accuracy": round(three_class_correct / n, 4)}
|
| 680 |
+
if kind == "mute":
|
| 681 |
+
muted = sum(1 for r in rows if r["pred_label"] == "muted")
|
| 682 |
+
return {"total": n,
|
| 683 |
+
"mute_detection_rate": round(muted / n, 4),
|
| 684 |
+
"hallucination_rate": round(1 - muted / n, 4)}
|
| 685 |
+
if kind == "swap":
|
| 686 |
+
mis = sum(1 for r in rows if r["pred_label"] == "mismatched")
|
| 687 |
+
return {"total": n,
|
| 688 |
+
"mismatch_detection_rate": round(mis / n, 4),
|
| 689 |
+
"hallucination_rate": round(1 - mis / n, 4)}
|
| 690 |
+
return {"total": n}
|
| 691 |
+
|
| 692 |
+
|
| 693 |
+
# ---------------------------------------------------------------------------
|
| 694 |
+
# Main
|
| 695 |
+
# ---------------------------------------------------------------------------
|
| 696 |
+
def main():
|
| 697 |
+
ap = argparse.ArgumentParser()
|
| 698 |
+
ap.add_argument("--openrouter-key", default=os.environ.get("OPENROUTER_API_KEY"))
|
| 699 |
+
ap.add_argument("--judge-key",
|
| 700 |
+
default=os.environ.get("OPENAI_API_KEY") or os.environ.get("OPENROUTER_API_KEY"))
|
| 701 |
+
ap.add_argument("--model", default="nvidia/nemotron-3-nano-omni-30b-a3b-reasoning:free")
|
| 702 |
+
ap.add_argument("--judge-model", default="openai/gpt-5.4",
|
| 703 |
+
help="If you have a real OPENAI_API_KEY (≠ OR key), use 'gpt-5.4' (native).")
|
| 704 |
+
ap.add_argument("--tasks", default="sync,swap_direct,swap_neutral,mute_neutral")
|
| 705 |
+
ap.add_argument("--workers", type=int, default=4)
|
| 706 |
+
ap.add_argument("--max-samples", type=int, default=-1)
|
| 707 |
+
ap.add_argument("--send-mode", choices=["muxed", "separate", "video"], default="muxed",
|
| 708 |
+
help="muxed: ffmpeg merges target audio into video, send only video_url "
|
| 709 |
+
"(deterministic — model hears exactly what we intend). "
|
| 710 |
+
"separate: send video_url + input_audio blocks (model decides). "
|
| 711 |
+
"video: send video_url only (uses whatever audio is in the mp4).")
|
| 712 |
+
ap.add_argument("--label-suffix", default="")
|
| 713 |
+
args = ap.parse_args()
|
| 714 |
+
|
| 715 |
+
if not args.openrouter_key:
|
| 716 |
+
sys.exit("[error] need --openrouter-key or OPENROUTER_API_KEY env")
|
| 717 |
+
|
| 718 |
+
model_client = _openrouter_client(args.openrouter_key)
|
| 719 |
+
judge_client, judge_via_or = _judge_client(args.judge_key, args.openrouter_key)
|
| 720 |
+
if not judge_via_or and args.judge_model.startswith("openai/"):
|
| 721 |
+
args.judge_model = args.judge_model.split("/", 1)[1]
|
| 722 |
+
|
| 723 |
+
print(f"[config] model={args.model}")
|
| 724 |
+
print(f"[config] judge={args.judge_model} (via {'OpenRouter' if judge_via_or else 'OpenAI'})")
|
| 725 |
+
print(f"[config] send_mode={args.send_mode} workers={args.workers}")
|
| 726 |
+
|
| 727 |
+
# Derive a filesystem-safe tag from --model so the same script works for
|
| 728 |
+
# any OpenRouter omni model. `--label-suffix` is appended; `--label` (if
|
| 729 |
+
# added later) could fully override.
|
| 730 |
+
def _slug(m: str) -> str:
|
| 731 |
+
s = m.replace("/", "_").replace(":", "_").replace(" ", "_")
|
| 732 |
+
# known canonicalizations to preserve old dir names
|
| 733 |
+
if s == "nvidia_nemotron-3-nano-omni-30b-a3b-reasoning_free":
|
| 734 |
+
s = "nemotron3_nano_omni_reasoning_free"
|
| 735 |
+
return s
|
| 736 |
+
tag = _slug(args.model)
|
| 737 |
+
if args.label_suffix:
|
| 738 |
+
tag += args.label_suffix
|
| 739 |
+
elif args.send_mode == "muxed":
|
| 740 |
+
tag += "_audioMuxed"
|
| 741 |
+
elif args.send_mode == "separate":
|
| 742 |
+
tag += "_videoPlusAudio"
|
| 743 |
+
else:
|
| 744 |
+
tag += "_videoOnly"
|
| 745 |
+
|
| 746 |
+
tasks = [t.strip() for t in args.tasks.split(",") if t.strip()]
|
| 747 |
+
|
| 748 |
+
if "sync" in tasks:
|
| 749 |
+
out = run_task(EVAL_ROOT / "sync" / f"sync_{tag}",
|
| 750 |
+
build_sync_work(), run_one_sync,
|
| 751 |
+
model_client, args.model, judge_client, args.judge_model,
|
| 752 |
+
args.send_mode, args.workers, args.max_samples)
|
| 753 |
+
print(f"[sync] {summarize(out, 'sync')}")
|
| 754 |
+
|
| 755 |
+
if "swap_direct" in tasks:
|
| 756 |
+
sw = build_swap_work_from_gemini() or build_swap_work_seeded()
|
| 757 |
+
out = run_task(EVAL_ROOT / "swap" / f"swap_{tag}_promptDirect",
|
| 758 |
+
sw, run_one_swap_direct,
|
| 759 |
+
model_client, args.model, judge_client, args.judge_model,
|
| 760 |
+
args.send_mode, args.workers, args.max_samples)
|
| 761 |
+
print(f"[swap_direct] {summarize(out, 'swap')}")
|
| 762 |
+
|
| 763 |
+
if "swap_neutral" in tasks:
|
| 764 |
+
sw = build_swap_work_from_gemini() or build_swap_work_seeded()
|
| 765 |
+
out = run_task(EVAL_ROOT / "swap" / f"swap_{tag}_promptNeutral",
|
| 766 |
+
sw, run_one_swap_neutral,
|
| 767 |
+
model_client, args.model, judge_client, args.judge_model,
|
| 768 |
+
args.send_mode, args.workers, args.max_samples)
|
| 769 |
+
print(f"[swap_neutral] {summarize(out, 'swap')}")
|
| 770 |
+
|
| 771 |
+
if "mute_neutral" in tasks:
|
| 772 |
+
out = run_task(EVAL_ROOT / "mute" / f"mute_{tag}_promptNeutral",
|
| 773 |
+
build_mute_work(), run_one_mute,
|
| 774 |
+
model_client, args.model, judge_client, args.judge_model,
|
| 775 |
+
args.send_mode, args.workers, args.max_samples)
|
| 776 |
+
print(f"[mute_neutral] {summarize(out, 'mute')}")
|
| 777 |
+
|
| 778 |
+
|
| 779 |
+
if __name__ == "__main__":
|
| 780 |
+
main()
|
eval_nemotron_openrouter_originals_control.py
ADDED
|
@@ -0,0 +1,304 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Control: ask Nemotron the SAME mute/swap prompts on UNMODIFIED original
|
| 2 |
+
videos via OpenRouter, to measure false-positive rates (does the model
|
| 3 |
+
incorrectly flag a clean clip as muted / mismatched?).
|
| 4 |
+
|
| 5 |
+
Setup:
|
| 6 |
+
- Source: 142 base videos (the same ones used by the swap/mute eval).
|
| 7 |
+
- For every clip we send the ORIGINAL .mp4 as a `video_url` block — its
|
| 8 |
+
audio track is the real one and matches the visuals.
|
| 9 |
+
- No muxing. No silence. No donor audio.
|
| 10 |
+
|
| 11 |
+
Tasks (same prompts as eval_nemotron_openrouter.py):
|
| 12 |
+
- swap_direct "Does the audio in this video match...? yes/no" -> expect 'yes' / synced
|
| 13 |
+
- swap_neutral "Describe what you see and what you hear." -> judge -> expect 'synced'
|
| 14 |
+
- mute_neutral "Describe the audio you hear in this video." -> judge -> expect NOT 'muted'
|
| 15 |
+
|
| 16 |
+
Output:
|
| 17 |
+
~/eval_results/swap_original/swap_<tag>_promptDirect/eval_results.jsonl
|
| 18 |
+
~/eval_results/swap_original/swap_<tag>_promptNeutral/eval_results.jsonl
|
| 19 |
+
~/eval_results/mute_original/mute_<tag>_promptNeutral/eval_results.jsonl
|
| 20 |
+
+ metrics.json + summary.txt per dir.
|
| 21 |
+
|
| 22 |
+
Usage:
|
| 23 |
+
export OPENROUTER_API_KEY=sk-or-v1-...
|
| 24 |
+
export OPENAI_API_KEY=sk-... # judge (gpt-5.4)
|
| 25 |
+
python3 /home/ubuntu/eval_nemotron_openrouter_originals_control.py
|
| 26 |
+
python3 /home/ubuntu/eval_nemotron_openrouter_originals_control.py --tasks swap_direct --max-samples 10
|
| 27 |
+
"""
|
| 28 |
+
|
| 29 |
+
import argparse
|
| 30 |
+
import io
|
| 31 |
+
import json
|
| 32 |
+
import os
|
| 33 |
+
import sys
|
| 34 |
+
from contextlib import redirect_stdout
|
| 35 |
+
from pathlib import Path
|
| 36 |
+
from typing import Any, Dict, List
|
| 37 |
+
|
| 38 |
+
# Reuse all the heavy lifting from the main OpenRouter eval script.
|
| 39 |
+
sys.path.insert(0, "/home/ubuntu")
|
| 40 |
+
from eval_nemotron_openrouter import ( # noqa: E402
|
| 41 |
+
ORIG_DIR, TEST_JSONL,
|
| 42 |
+
SWAP_PROMPT_DIRECT, SWAP_PROMPT_NEUTRAL, MUTE_PROMPT_NEUTRAL,
|
| 43 |
+
SWAP_JUDGE_SYSTEM, MUTE_JUDGE_SYSTEM,
|
| 44 |
+
_openrouter_client, _judge_client,
|
| 45 |
+
_content_video_only,
|
| 46 |
+
call_model, call_judge,
|
| 47 |
+
run_task, summarize, load_jsonl,
|
| 48 |
+
)
|
| 49 |
+
|
| 50 |
+
EVAL_ROOT = Path("/home/ubuntu/eval_results")
|
| 51 |
+
|
| 52 |
+
|
| 53 |
+
# ---------------------------------------------------------------------------
|
| 54 |
+
# Build work — one entry per base (original) video. No donor / no silent wav.
|
| 55 |
+
# ---------------------------------------------------------------------------
|
| 56 |
+
def build_originals_work() -> List[Dict[str, Any]]:
|
| 57 |
+
work = []
|
| 58 |
+
seen = set()
|
| 59 |
+
for r in load_jsonl(TEST_JSONL):
|
| 60 |
+
v = r["video"]
|
| 61 |
+
if "_delay_" in v or "_early_" in v or v in seen:
|
| 62 |
+
continue
|
| 63 |
+
seen.add(v)
|
| 64 |
+
p = ORIG_DIR / v
|
| 65 |
+
if p.exists():
|
| 66 |
+
work.append({"video": v, "video_path": p})
|
| 67 |
+
return work
|
| 68 |
+
|
| 69 |
+
|
| 70 |
+
# ---------------------------------------------------------------------------
|
| 71 |
+
# Per-task runners — every clip's gt_label is "synced" (audio is real & matches)
|
| 72 |
+
# ---------------------------------------------------------------------------
|
| 73 |
+
def run_one_swap_direct(item, model_client, model, judge_client, judge_model, send_mode):
|
| 74 |
+
content = _content_video_only(SWAP_PROMPT_DIRECT, item["video_path"])
|
| 75 |
+
raw = call_model(model_client, model, content,
|
| 76 |
+
max_tokens=1200, reasoning_budget=512)
|
| 77 |
+
t = (raw or "").strip().lower()
|
| 78 |
+
head = t.split()[:3] if t else []
|
| 79 |
+
if t.startswith("no") or "no" in head:
|
| 80 |
+
pred = "mismatched"
|
| 81 |
+
elif t.startswith("yes") or "yes" in head:
|
| 82 |
+
pred = "synced"
|
| 83 |
+
else:
|
| 84 |
+
pred = "synced"
|
| 85 |
+
return {
|
| 86 |
+
"video": item["video"],
|
| 87 |
+
"video_path": str(item["video_path"]),
|
| 88 |
+
"gt_label": "synced",
|
| 89 |
+
"pred_label": pred,
|
| 90 |
+
"correct": (pred == "synced"),
|
| 91 |
+
"parse_method": "direct",
|
| 92 |
+
"prompt_mode": "direct",
|
| 93 |
+
"send_mode": "video",
|
| 94 |
+
"raw_output": raw,
|
| 95 |
+
}
|
| 96 |
+
|
| 97 |
+
|
| 98 |
+
def run_one_swap_neutral(item, model_client, model, judge_client, judge_model, send_mode):
|
| 99 |
+
content = _content_video_only(SWAP_PROMPT_NEUTRAL, item["video_path"])
|
| 100 |
+
raw = call_model(model_client, model, content,
|
| 101 |
+
max_tokens=2000, reasoning_budget=768)
|
| 102 |
+
parsed = call_judge(judge_client, judge_model, SWAP_JUDGE_SYSTEM, raw) or {}
|
| 103 |
+
pred = str(parsed.get("prediction", "synced")).lower()
|
| 104 |
+
if pred not in ("synced", "delay", "early", "mismatched"):
|
| 105 |
+
pred = "synced"
|
| 106 |
+
return {
|
| 107 |
+
"video": item["video"],
|
| 108 |
+
"video_path": str(item["video_path"]),
|
| 109 |
+
"gt_label": "synced",
|
| 110 |
+
"pred_label": pred,
|
| 111 |
+
"correct": (pred == "synced"),
|
| 112 |
+
"judge_explanation": str(parsed.get("explanation", "")),
|
| 113 |
+
"parse_method": "openrouter_judge" if parsed else "judge_fail",
|
| 114 |
+
"prompt_mode": "neutral",
|
| 115 |
+
"send_mode": "video",
|
| 116 |
+
"raw_output": raw,
|
| 117 |
+
}
|
| 118 |
+
|
| 119 |
+
|
| 120 |
+
def run_one_mute_neutral(item, model_client, model, judge_client, judge_model, send_mode):
|
| 121 |
+
content = _content_video_only(MUTE_PROMPT_NEUTRAL, item["video_path"])
|
| 122 |
+
raw = call_model(model_client, model, content,
|
| 123 |
+
max_tokens=2000, reasoning_budget=768)
|
| 124 |
+
parsed = call_judge(judge_client, judge_model, MUTE_JUDGE_SYSTEM, raw) or {}
|
| 125 |
+
pred = str(parsed.get("prediction", "synced")).lower()
|
| 126 |
+
if pred not in ("muted", "synced", "delay", "early"):
|
| 127 |
+
pred = "synced"
|
| 128 |
+
# On a clean original, the model *should* describe the real audio (pred != muted).
|
| 129 |
+
return {
|
| 130 |
+
"video": item["video"],
|
| 131 |
+
"video_path": str(item["video_path"]),
|
| 132 |
+
"gt_label": "synced",
|
| 133 |
+
"pred_label": pred,
|
| 134 |
+
"correct": (pred != "muted"),
|
| 135 |
+
"judge_explanation": str(parsed.get("explanation", "")),
|
| 136 |
+
"parse_method": "openrouter_judge" if parsed else "judge_fail",
|
| 137 |
+
"prompt_mode": "neutral",
|
| 138 |
+
"send_mode": "video",
|
| 139 |
+
"raw_output": raw,
|
| 140 |
+
}
|
| 141 |
+
|
| 142 |
+
|
| 143 |
+
# ---------------------------------------------------------------------------
|
| 144 |
+
# Metrics for control results (false-positive framing)
|
| 145 |
+
# ---------------------------------------------------------------------------
|
| 146 |
+
def metrics_swap_control(rows: List[Dict[str, Any]]) -> Dict[str, Any]:
|
| 147 |
+
n = len(rows)
|
| 148 |
+
if n == 0:
|
| 149 |
+
return {}
|
| 150 |
+
bd = {k: 0 for k in ("synced", "mismatched", "delay", "early")}
|
| 151 |
+
parse_stats: Dict[str, int] = {}
|
| 152 |
+
for r in rows:
|
| 153 |
+
bd[r.get("pred_label", "synced")] = bd.get(r.get("pred_label", "synced"), 0) + 1
|
| 154 |
+
pm = r.get("parse_method", "unknown")
|
| 155 |
+
parse_stats[pm] = parse_stats.get(pm, 0) + 1
|
| 156 |
+
return {
|
| 157 |
+
"total_samples": n,
|
| 158 |
+
"correct_synced_rate": round(bd["synced"] / n, 4), # the "right" answer
|
| 159 |
+
"false_mismatch_rate": round(bd["mismatched"] / n, 4), # over-sensitivity
|
| 160 |
+
"prediction_breakdown": bd,
|
| 161 |
+
"parse_stats": parse_stats,
|
| 162 |
+
}
|
| 163 |
+
|
| 164 |
+
|
| 165 |
+
def metrics_mute_control(rows: List[Dict[str, Any]]) -> Dict[str, Any]:
|
| 166 |
+
n = len(rows)
|
| 167 |
+
if n == 0:
|
| 168 |
+
return {}
|
| 169 |
+
bd = {k: 0 for k in ("synced", "muted", "delay", "early")}
|
| 170 |
+
parse_stats: Dict[str, int] = {}
|
| 171 |
+
for r in rows:
|
| 172 |
+
bd[r.get("pred_label", "synced")] = bd.get(r.get("pred_label", "synced"), 0) + 1
|
| 173 |
+
pm = r.get("parse_method", "unknown")
|
| 174 |
+
parse_stats[pm] = parse_stats.get(pm, 0) + 1
|
| 175 |
+
return {
|
| 176 |
+
"total_samples": n,
|
| 177 |
+
"describes_audio_rate": round((n - bd["muted"]) / n, 4), # the "right" answer
|
| 178 |
+
"false_muted_rate": round(bd["muted"] / n, 4), # over-sensitivity
|
| 179 |
+
"prediction_breakdown": bd,
|
| 180 |
+
"parse_stats": parse_stats,
|
| 181 |
+
}
|
| 182 |
+
|
| 183 |
+
|
| 184 |
+
def print_swap_control(m, label):
|
| 185 |
+
print("=" * 60)
|
| 186 |
+
print(f" Swap CONTROL (originals): {label}")
|
| 187 |
+
print("=" * 60)
|
| 188 |
+
print(f" Total samples: {m['total_samples']}")
|
| 189 |
+
print(f" Correct 'synced' rate: {m['correct_synced_rate']:.1%} <- desired")
|
| 190 |
+
print(f" False mismatch rate: {m['false_mismatch_rate']:.1%} <- over-sensitivity")
|
| 191 |
+
bd = m["prediction_breakdown"]
|
| 192 |
+
print(" --- Prediction Breakdown ---")
|
| 193 |
+
for k in ("synced", "mismatched", "delay", "early"):
|
| 194 |
+
print(f" {k:>10}: {bd[k]}")
|
| 195 |
+
print(" --- Parse Stats ---")
|
| 196 |
+
for k, v in sorted(m["parse_stats"].items()):
|
| 197 |
+
print(f" {k}: {v}")
|
| 198 |
+
print("=" * 60)
|
| 199 |
+
|
| 200 |
+
|
| 201 |
+
def print_mute_control(m, label):
|
| 202 |
+
print("=" * 60)
|
| 203 |
+
print(f" Mute CONTROL (originals): {label}")
|
| 204 |
+
print("=" * 60)
|
| 205 |
+
print(f" Total samples: {m['total_samples']}")
|
| 206 |
+
print(f" Describes-audio rate: {m['describes_audio_rate']:.1%} <- desired")
|
| 207 |
+
print(f" False 'muted' rate: {m['false_muted_rate']:.1%} <- over-sensitivity")
|
| 208 |
+
bd = m["prediction_breakdown"]
|
| 209 |
+
print(" --- Prediction Breakdown ---")
|
| 210 |
+
for k in ("synced", "muted", "delay", "early"):
|
| 211 |
+
print(f" {k:>10}: {bd[k]}")
|
| 212 |
+
print(" --- Parse Stats ---")
|
| 213 |
+
for k, v in sorted(m["parse_stats"].items()):
|
| 214 |
+
print(f" {k}: {v}")
|
| 215 |
+
print("=" * 60)
|
| 216 |
+
|
| 217 |
+
|
| 218 |
+
# ---------------------------------------------------------------------------
|
| 219 |
+
# Main
|
| 220 |
+
# ---------------------------------------------------------------------------
|
| 221 |
+
TASK_RUNNERS = {
|
| 222 |
+
"swap_direct": run_one_swap_direct,
|
| 223 |
+
"swap_neutral": run_one_swap_neutral,
|
| 224 |
+
"mute_neutral": run_one_mute_neutral,
|
| 225 |
+
}
|
| 226 |
+
TASK_DIR = {
|
| 227 |
+
"swap_direct": ("swap_original", "promptDirect"),
|
| 228 |
+
"swap_neutral": ("swap_original", "promptNeutral"),
|
| 229 |
+
"mute_neutral": ("mute_original", "promptNeutral"),
|
| 230 |
+
}
|
| 231 |
+
TASK_METRIC = {
|
| 232 |
+
"swap_direct": (metrics_swap_control, print_swap_control, "swap"),
|
| 233 |
+
"swap_neutral": (metrics_swap_control, print_swap_control, "swap"),
|
| 234 |
+
"mute_neutral": (metrics_mute_control, print_mute_control, "mute"),
|
| 235 |
+
}
|
| 236 |
+
|
| 237 |
+
|
| 238 |
+
def main():
|
| 239 |
+
ap = argparse.ArgumentParser()
|
| 240 |
+
ap.add_argument("--openrouter-key", default=os.environ.get("OPENROUTER_API_KEY"))
|
| 241 |
+
ap.add_argument("--judge-key",
|
| 242 |
+
default=os.environ.get("OPENAI_API_KEY") or os.environ.get("OPENROUTER_API_KEY"))
|
| 243 |
+
ap.add_argument("--model", default="nvidia/nemotron-3-nano-omni-30b-a3b-reasoning:free")
|
| 244 |
+
ap.add_argument("--judge-model", default="openai/gpt-5.4")
|
| 245 |
+
ap.add_argument("--tasks", default="swap_direct,swap_neutral,mute_neutral")
|
| 246 |
+
ap.add_argument("--workers", type=int, default=4)
|
| 247 |
+
ap.add_argument("--max-samples", type=int, default=-1)
|
| 248 |
+
ap.add_argument("--label-suffix", default="")
|
| 249 |
+
args = ap.parse_args()
|
| 250 |
+
|
| 251 |
+
if not args.openrouter_key:
|
| 252 |
+
sys.exit("[error] need OPENROUTER_API_KEY")
|
| 253 |
+
|
| 254 |
+
model_client = _openrouter_client(args.openrouter_key)
|
| 255 |
+
judge_client, judge_via_or = _judge_client(args.judge_key, args.openrouter_key)
|
| 256 |
+
if not judge_via_or and args.judge_model.startswith("openai/"):
|
| 257 |
+
args.judge_model = args.judge_model.split("/", 1)[1]
|
| 258 |
+
print(f"[config] model={args.model}")
|
| 259 |
+
print(f"[config] judge={args.judge_model} (via {'OpenRouter' if judge_via_or else 'OpenAI'})")
|
| 260 |
+
|
| 261 |
+
def _slug(m: str) -> str:
|
| 262 |
+
s = m.replace("/", "_").replace(":", "_").replace(" ", "_")
|
| 263 |
+
if s == "nvidia_nemotron-3-nano-omni-30b-a3b-reasoning_free":
|
| 264 |
+
s = "nemotron3_nano_omni_reasoning_free"
|
| 265 |
+
return s
|
| 266 |
+
tag = _slug(args.model) + (args.label_suffix or "_originalsControl")
|
| 267 |
+
work = build_originals_work()
|
| 268 |
+
print(f"[data] {len(work)} original (untampered) videos")
|
| 269 |
+
|
| 270 |
+
tasks = [t.strip() for t in args.tasks.split(",") if t.strip()]
|
| 271 |
+
for task in tasks:
|
| 272 |
+
if task not in TASK_RUNNERS:
|
| 273 |
+
print(f"[warn] unknown task {task}")
|
| 274 |
+
continue
|
| 275 |
+
bucket, suffix = TASK_DIR[task]
|
| 276 |
+
out_dir = EVAL_ROOT / bucket / f"{bucket.split('_')[0]}_{tag}_{suffix}"
|
| 277 |
+
run_task(out_dir, work, TASK_RUNNERS[task],
|
| 278 |
+
model_client, args.model, judge_client, args.judge_model,
|
| 279 |
+
send_mode="video", workers=args.workers, max_samples=args.max_samples)
|
| 280 |
+
|
| 281 |
+
# metrics
|
| 282 |
+
rows = [json.loads(l) for l in open(out_dir / "eval_results.jsonl") if l.strip()]
|
| 283 |
+
compute, prn, _kind = TASK_METRIC[task]
|
| 284 |
+
m = compute(rows)
|
| 285 |
+
m["eval_config"] = {
|
| 286 |
+
"model": args.model,
|
| 287 |
+
"provider": "openrouter",
|
| 288 |
+
"judge": args.judge_model,
|
| 289 |
+
"task": task,
|
| 290 |
+
"control": "untampered original videos (gt_label='synced' for all)",
|
| 291 |
+
"send_mode": "video (original .mp4 with its real embedded audio)",
|
| 292 |
+
}
|
| 293 |
+
(out_dir / "metrics.json").write_text(json.dumps(m, indent=2, ensure_ascii=False))
|
| 294 |
+
buf = io.StringIO()
|
| 295 |
+
with redirect_stdout(buf):
|
| 296 |
+
prn(m, out_dir.name)
|
| 297 |
+
(out_dir / "summary.txt").write_text(buf.getvalue())
|
| 298 |
+
prn(m, out_dir.name)
|
| 299 |
+
print(f" -> {out_dir / 'metrics.json'}")
|
| 300 |
+
print(f" -> {out_dir / 'summary.txt'}")
|
| 301 |
+
|
| 302 |
+
|
| 303 |
+
if __name__ == "__main__":
|
| 304 |
+
main()
|
eval_originals_control.py
ADDED
|
@@ -0,0 +1,523 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Control eval: ask the SAME mute/swap yes/no prompts on UNMODIFIED original videos.
|
| 2 |
+
|
| 3 |
+
Tests whether models correctly say 'yes' on natural audio
|
| 4 |
+
(answer should be 'yes' for both prompts when audio is unmodified).
|
| 5 |
+
|
| 6 |
+
Models supported (one per invocation, each may need a different conda env):
|
| 7 |
+
Qwen3-Omni vanilla : --model Qwen/Qwen3-Omni-30B-A3B-Instruct --label qwen3omni_vanilla
|
| 8 |
+
Ming-flash-omni : --model /opt/dlami/nvme/Ming-weights (use ming env)
|
| 9 |
+
MiniCPM-o-4_5 : --model openbmb/MiniCPM-o-4_5
|
| 10 |
+
Gemini : --gemini --gemini-model gemini-3.1-pro-preview
|
| 11 |
+
|
| 12 |
+
Output (matches existing convention):
|
| 13 |
+
~/eval_results/mute_original/mute_<label>_promptDirect/{eval_results.jsonl,metrics.json}
|
| 14 |
+
~/eval_results/swap_original/swap_<label>_promptDirect/{eval_results.jsonl,metrics.json}
|
| 15 |
+
|
| 16 |
+
Per-row schema mirrors eval_mute.py / eval_swap.py:
|
| 17 |
+
gt_label = "synced" (audio is present and matches video — model should say 'yes')
|
| 18 |
+
pred_label = "synced" (yes) | "muted" (no, mute task) | "mismatched" (no, swap task)
|
| 19 |
+
correct = (pred_label == "synced")
|
| 20 |
+
|
| 21 |
+
Usage:
|
| 22 |
+
conda activate <env-for-model>
|
| 23 |
+
OPENAI_API_KEY=sk-... # not needed for direct yes/no, kept for parity
|
| 24 |
+
GEMINI_API_KEY=... # only for --gemini
|
| 25 |
+
python3 /home/ubuntu/eval_originals_control.py --model Qwen/Qwen3-Omni-30B-A3B-Instruct \
|
| 26 |
+
--label qwen3omni_vanilla --tasks mute,swap
|
| 27 |
+
"""
|
| 28 |
+
|
| 29 |
+
import argparse
|
| 30 |
+
import gc
|
| 31 |
+
import json
|
| 32 |
+
import os
|
| 33 |
+
import re
|
| 34 |
+
import sys
|
| 35 |
+
import time
|
| 36 |
+
from concurrent.futures import ThreadPoolExecutor, as_completed
|
| 37 |
+
from pathlib import Path
|
| 38 |
+
from typing import Any, Dict, List, Optional
|
| 39 |
+
|
| 40 |
+
# ---------------------------------------------------------------------------
|
| 41 |
+
# Paths
|
| 42 |
+
# ---------------------------------------------------------------------------
|
| 43 |
+
DATA_ROOT = Path("/opt/dlami/nvme/video_source")
|
| 44 |
+
ORIG_VIDEO_DIR = DATA_ROOT / "original" / "uag_oops"
|
| 45 |
+
ORIG_AUDIO_DIR = DATA_ROOT / "extracted_audio" / "original" / "uag_oops"
|
| 46 |
+
TEST_JSONL = Path("/home/ubuntu/CleverHans-Evaluation/data/kto_training_data_v2_test.jsonl")
|
| 47 |
+
EVAL_ROOT = Path("/home/ubuntu/eval_results")
|
| 48 |
+
|
| 49 |
+
MUTE_PROMPT_DIRECT = "Is there any audible sound in this video? Answer with only 'yes' or 'no'."
|
| 50 |
+
MUTE_PROMPT_NEUTRAL = "Describe the audio you hear in this video."
|
| 51 |
+
SWAP_PROMPT_DIRECT = "Does the audio in this video match the visual content? Answer with only 'yes' or 'no'."
|
| 52 |
+
SWAP_PROMPT_NEUTRAL = "Describe what you see in the video and what you hear in the audio."
|
| 53 |
+
|
| 54 |
+
MUTE_NEUTRAL_JUDGE_SYSTEM = (
|
| 55 |
+
"You classify a model's free-form description of a short video's audio. "
|
| 56 |
+
"Return STRICT JSON only with keys: prediction (one of "
|
| 57 |
+
"'synced'|'muted'|'delay'|'early') and explanation (one sentence). "
|
| 58 |
+
"Rules: "
|
| 59 |
+
"- 'synced' iff the model describes any audible content (speech, music, "
|
| 60 |
+
" laughter, impact sounds, ambient noise, etc.) without claiming a "
|
| 61 |
+
" temporal offset. "
|
| 62 |
+
"- 'muted' iff the model claims no audio / silence / nothing audible. "
|
| 63 |
+
"- 'delay' iff the model claims audio lags behind visuals. "
|
| 64 |
+
"- 'early' iff the model claims audio precedes visuals."
|
| 65 |
+
)
|
| 66 |
+
|
| 67 |
+
SWAP_NEUTRAL_JUDGE_SYSTEM = (
|
| 68 |
+
"You classify a model's free-form description of a video's visuals and "
|
| 69 |
+
"audio for whether it claims an audio/visual mismatch. Return STRICT JSON "
|
| 70 |
+
"only with keys: prediction (one of 'synced'|'mismatched') and explanation "
|
| 71 |
+
"(one sentence). Rules: "
|
| 72 |
+
"- 'mismatched' iff the model explicitly says the audio does not match the "
|
| 73 |
+
" visuals (e.g., dubbed/voiceover, audio describes a different scene, "
|
| 74 |
+
" unrelated sounds). "
|
| 75 |
+
"- 'synced' otherwise — including when the model describes audio that "
|
| 76 |
+
" appears to fit the visuals, or simply describes both without raising any "
|
| 77 |
+
" mismatch."
|
| 78 |
+
)
|
| 79 |
+
|
| 80 |
+
|
| 81 |
+
# ---------------------------------------------------------------------------
|
| 82 |
+
# Utilities
|
| 83 |
+
# ---------------------------------------------------------------------------
|
| 84 |
+
def slugify(name: str) -> str:
|
| 85 |
+
return re.sub(r"[/:]", "_", name)
|
| 86 |
+
|
| 87 |
+
|
| 88 |
+
def load_originals() -> List[Dict[str, Any]]:
|
| 89 |
+
"""142 unique base videos, each with its on-disk video + extracted-audio paths."""
|
| 90 |
+
out, seen = [], set()
|
| 91 |
+
with open(TEST_JSONL) as f:
|
| 92 |
+
for line in f:
|
| 93 |
+
line = line.strip()
|
| 94 |
+
if not line:
|
| 95 |
+
continue
|
| 96 |
+
obj = json.loads(line)
|
| 97 |
+
v = obj["video"]
|
| 98 |
+
if "_delay_" in v or "_early_" in v or v in seen:
|
| 99 |
+
continue
|
| 100 |
+
seen.add(v)
|
| 101 |
+
video_path = ORIG_VIDEO_DIR / v
|
| 102 |
+
audio_path = ORIG_AUDIO_DIR / (v[:-4] + ".wav")
|
| 103 |
+
if not video_path.exists():
|
| 104 |
+
print(f"[skip] missing video: {v}", flush=True); continue
|
| 105 |
+
if not audio_path.exists():
|
| 106 |
+
print(f"[skip] missing audio: {audio_path.name}", flush=True); continue
|
| 107 |
+
out.append({"video": v, "video_path": str(video_path), "audio_path": str(audio_path)})
|
| 108 |
+
return out
|
| 109 |
+
|
| 110 |
+
|
| 111 |
+
def _openai_classify(raw, system_prompt, allowed, default, *,
|
| 112 |
+
api_key, judge_model, fallback_keywords=None,
|
| 113 |
+
fallback_pred=None):
|
| 114 |
+
"""Generic OpenAI JSON classifier shared by mute and swap neutral judges."""
|
| 115 |
+
try:
|
| 116 |
+
from openai import OpenAI
|
| 117 |
+
except ImportError:
|
| 118 |
+
return {"prediction": default, "judge_explanation": "openai-not-installed",
|
| 119 |
+
"parse_method": "neutral_judge_unavailable"}
|
| 120 |
+
try:
|
| 121 |
+
client = OpenAI(api_key=api_key)
|
| 122 |
+
resp = client.chat.completions.create(
|
| 123 |
+
model=judge_model,
|
| 124 |
+
messages=[
|
| 125 |
+
{"role": "system", "content": system_prompt},
|
| 126 |
+
{"role": "user", "content": raw},
|
| 127 |
+
],
|
| 128 |
+
response_format={"type": "json_object"},
|
| 129 |
+
max_completion_tokens=300,
|
| 130 |
+
)
|
| 131 |
+
obj = json.loads(resp.choices[0].message.content or "")
|
| 132 |
+
pred = str(obj.get("prediction", default)).lower()
|
| 133 |
+
if pred not in allowed:
|
| 134 |
+
pred = default
|
| 135 |
+
return {
|
| 136 |
+
"prediction": pred,
|
| 137 |
+
"judge_explanation": str(obj.get("explanation", "")),
|
| 138 |
+
"parse_method": "openai_judge",
|
| 139 |
+
}
|
| 140 |
+
except Exception as exc:
|
| 141 |
+
t = (raw or "").lower()
|
| 142 |
+
if fallback_keywords and any(k in t for k in fallback_keywords):
|
| 143 |
+
return {"prediction": fallback_pred, "judge_explanation": f"fallback: {exc}",
|
| 144 |
+
"parse_method": "neutral_keyword_fallback"}
|
| 145 |
+
return {"prediction": default, "judge_explanation": f"fallback: {exc}",
|
| 146 |
+
"parse_method": "neutral_keyword_fallback"}
|
| 147 |
+
|
| 148 |
+
|
| 149 |
+
def openai_judge_mute_neutral(raw: str, *, api_key: str, judge_model: str) -> Dict[str, Any]:
|
| 150 |
+
return _openai_classify(
|
| 151 |
+
raw, MUTE_NEUTRAL_JUDGE_SYSTEM,
|
| 152 |
+
allowed={"synced", "muted", "delay", "early"}, default="synced",
|
| 153 |
+
api_key=api_key, judge_model=judge_model,
|
| 154 |
+
fallback_keywords=("no audio", "silent", "no sound", "no audible",
|
| 155 |
+
"silence", "no discernible audio"),
|
| 156 |
+
fallback_pred="muted",
|
| 157 |
+
)
|
| 158 |
+
|
| 159 |
+
|
| 160 |
+
def openai_judge_swap_neutral(raw: str, *, api_key: str, judge_model: str) -> Dict[str, Any]:
|
| 161 |
+
return _openai_classify(
|
| 162 |
+
raw, SWAP_NEUTRAL_JUDGE_SYSTEM,
|
| 163 |
+
allowed={"synced", "mismatched"}, default="synced",
|
| 164 |
+
api_key=api_key, judge_model=judge_model,
|
| 165 |
+
fallback_keywords=("does not match", "do not match", "doesn't match",
|
| 166 |
+
"mismatch", "unrelated", "dubbed", "voiceover"),
|
| 167 |
+
fallback_pred="mismatched",
|
| 168 |
+
)
|
| 169 |
+
|
| 170 |
+
|
| 171 |
+
def parse_yesno(raw: str, *, no_label: str) -> Dict[str, Any]:
|
| 172 |
+
"""Same parsing convention as eval_mute.py / eval_swap.py direct mode.
|
| 173 |
+
|
| 174 |
+
Returns ``{"prediction": <pred>, "parse_method": "direct"}`` where pred is
|
| 175 |
+
"synced" on yes, ``no_label`` on no, and "synced" on ambiguous (matches
|
| 176 |
+
existing default; original videos with audio should yield 'yes' anyway).
|
| 177 |
+
"""
|
| 178 |
+
t = (raw or "").strip().lower()
|
| 179 |
+
if not t:
|
| 180 |
+
return {"prediction": "synced", "parse_method": "direct_empty"}
|
| 181 |
+
if t.startswith("no") or "no" in t.split()[:3]:
|
| 182 |
+
return {"prediction": no_label, "parse_method": "direct"}
|
| 183 |
+
if t.startswith("yes") or "yes" in t.split()[:3]:
|
| 184 |
+
return {"prediction": "synced", "parse_method": "direct"}
|
| 185 |
+
return {"prediction": "synced", "parse_method": "direct"}
|
| 186 |
+
|
| 187 |
+
|
| 188 |
+
def write_metrics_mute(rows, model, judge_model, out_path: Path,
|
| 189 |
+
prompt_mode: str = "direct"):
|
| 190 |
+
breakdown = {"synced": 0, "muted": 0, "delay": 0, "early": 0}
|
| 191 |
+
parse_stats: Dict[str, int] = {}
|
| 192 |
+
for r in rows:
|
| 193 |
+
pl = r["pred_label"]
|
| 194 |
+
breakdown[pl] = breakdown.get(pl, 0) + 1
|
| 195 |
+
m = r.get("parse_method", "")
|
| 196 |
+
parse_stats[m] = parse_stats.get(m, 0) + 1
|
| 197 |
+
n = len(rows)
|
| 198 |
+
metrics = {
|
| 199 |
+
"total_samples": n,
|
| 200 |
+
# Correct iff model said 'synced' (for original/control, real audio is there)
|
| 201 |
+
"audio_present_detection_rate": round(breakdown["synced"] / n, 4) if n else 0.0,
|
| 202 |
+
"false_silent_rate": round(breakdown["muted"] / n, 4) if n else 0.0,
|
| 203 |
+
"prediction_breakdown": breakdown,
|
| 204 |
+
"parse_stats": parse_stats,
|
| 205 |
+
"eval_config": {
|
| 206 |
+
"base_model": model,
|
| 207 |
+
"prompt_mode": prompt_mode,
|
| 208 |
+
"judge_model": judge_model,
|
| 209 |
+
"input_kind": "original_video_with_original_audio",
|
| 210 |
+
"task_kind": "mute_control",
|
| 211 |
+
},
|
| 212 |
+
}
|
| 213 |
+
out_path.write_text(json.dumps(metrics, indent=2, ensure_ascii=False))
|
| 214 |
+
|
| 215 |
+
|
| 216 |
+
def write_metrics_swap(rows, model, judge_model, out_path: Path,
|
| 217 |
+
prompt_mode: str = "direct"):
|
| 218 |
+
breakdown = {"synced": 0, "mismatched": 0}
|
| 219 |
+
parse_stats: Dict[str, int] = {}
|
| 220 |
+
for r in rows:
|
| 221 |
+
breakdown[r["pred_label"]] = breakdown.get(r["pred_label"], 0) + 1
|
| 222 |
+
m = r.get("parse_method", "")
|
| 223 |
+
parse_stats[m] = parse_stats.get(m, 0) + 1
|
| 224 |
+
n = len(rows)
|
| 225 |
+
metrics = {
|
| 226 |
+
"total_samples": n,
|
| 227 |
+
"match_detection_rate": round(breakdown["synced"] / n, 4) if n else 0.0,
|
| 228 |
+
"false_mismatch_rate": round(breakdown["mismatched"] / n, 4) if n else 0.0,
|
| 229 |
+
"prediction_breakdown": breakdown,
|
| 230 |
+
"parse_stats": parse_stats,
|
| 231 |
+
"eval_config": {
|
| 232 |
+
"base_model": model,
|
| 233 |
+
"prompt_mode": prompt_mode,
|
| 234 |
+
"judge_model": judge_model,
|
| 235 |
+
"input_kind": "original_video_with_original_audio",
|
| 236 |
+
"task_kind": "swap_control",
|
| 237 |
+
},
|
| 238 |
+
}
|
| 239 |
+
out_path.write_text(json.dumps(metrics, indent=2, ensure_ascii=False))
|
| 240 |
+
|
| 241 |
+
|
| 242 |
+
# ---------------------------------------------------------------------------
|
| 243 |
+
# Omni-model path (Qwen / Ming / MiniCPM): reuse multi_omni_adapter
|
| 244 |
+
# ---------------------------------------------------------------------------
|
| 245 |
+
def _load_omni(base_model: str):
|
| 246 |
+
sys.path.insert(0, "/home/ubuntu/CleverHans-Evaluation/scripts")
|
| 247 |
+
from multi_omni_adapter import get_adapter
|
| 248 |
+
print(f"[load] {base_model}", flush=True)
|
| 249 |
+
omni = get_adapter(base_model)
|
| 250 |
+
omni.load()
|
| 251 |
+
return omni
|
| 252 |
+
|
| 253 |
+
|
| 254 |
+
def run_omni_task(omni, items, prompt, parser, out_dir, max_new_tokens,
|
| 255 |
+
prompt_mode="direct", task_is_swap=False):
|
| 256 |
+
out_dir.mkdir(parents=True, exist_ok=True)
|
| 257 |
+
results_path = out_dir / "eval_results.jsonl"
|
| 258 |
+
|
| 259 |
+
processed = set()
|
| 260 |
+
if results_path.exists():
|
| 261 |
+
for line in open(results_path):
|
| 262 |
+
line = line.strip()
|
| 263 |
+
if line:
|
| 264 |
+
processed.add(json.loads(line)["video"])
|
| 265 |
+
print(f"[{out_dir.name}] resume: {len(processed)} done", flush=True)
|
| 266 |
+
|
| 267 |
+
todo = [it for it in items if it["video"] not in processed]
|
| 268 |
+
print(f"[{out_dir.name}] {len(todo)} new / {len(items)} total", flush=True)
|
| 269 |
+
|
| 270 |
+
n_done = 0
|
| 271 |
+
with open(results_path, "a") as out:
|
| 272 |
+
for it in todo:
|
| 273 |
+
try:
|
| 274 |
+
raw = omni.infer(it["video_path"], it["audio_path"], prompt,
|
| 275 |
+
max_new_tokens=max_new_tokens, temperature=0.0)
|
| 276 |
+
except Exception as exc:
|
| 277 |
+
print(f"[error] {it['video']}: {exc}", flush=True)
|
| 278 |
+
continue
|
| 279 |
+
if not raw:
|
| 280 |
+
print(f"[skip] empty raw for {it['video']}", flush=True)
|
| 281 |
+
continue
|
| 282 |
+
pj = parser(raw)
|
| 283 |
+
row = {
|
| 284 |
+
"video": it["video"],
|
| 285 |
+
"video_path": it["video_path"],
|
| 286 |
+
"gt_label": "synced",
|
| 287 |
+
"pred_label": pj["prediction"],
|
| 288 |
+
"correct": pj["prediction"] == "synced",
|
| 289 |
+
"parse_method": pj["parse_method"],
|
| 290 |
+
"prompt_mode": prompt_mode,
|
| 291 |
+
"raw_output": raw,
|
| 292 |
+
}
|
| 293 |
+
if "judge_explanation" in pj:
|
| 294 |
+
row["judge_explanation"] = pj["judge_explanation"]
|
| 295 |
+
if task_is_swap:
|
| 296 |
+
row["swapped_from"] = None # original audio
|
| 297 |
+
out.write(json.dumps(row, ensure_ascii=False) + "\n")
|
| 298 |
+
out.flush()
|
| 299 |
+
n_done += 1
|
| 300 |
+
if n_done % 10 == 0 or n_done == len(todo):
|
| 301 |
+
print(f"[{out_dir.name}] {n_done}/{len(todo)}", flush=True)
|
| 302 |
+
try:
|
| 303 |
+
import torch; torch.cuda.empty_cache()
|
| 304 |
+
except Exception:
|
| 305 |
+
pass
|
| 306 |
+
gc.collect()
|
| 307 |
+
|
| 308 |
+
return [json.loads(l) for l in open(results_path) if l.strip()]
|
| 309 |
+
|
| 310 |
+
|
| 311 |
+
# ---------------------------------------------------------------------------
|
| 312 |
+
# Gemini path
|
| 313 |
+
# ---------------------------------------------------------------------------
|
| 314 |
+
def _gemini_client(api_key: str):
|
| 315 |
+
from google import genai
|
| 316 |
+
return genai.Client(api_key=api_key)
|
| 317 |
+
|
| 318 |
+
|
| 319 |
+
def _wait_active(client, file_obj, timeout=600):
|
| 320 |
+
start = time.time()
|
| 321 |
+
while True:
|
| 322 |
+
f = client.files.get(name=file_obj.name)
|
| 323 |
+
state = getattr(f.state, "name", str(f.state))
|
| 324 |
+
if state == "ACTIVE":
|
| 325 |
+
return f
|
| 326 |
+
if state == "FAILED":
|
| 327 |
+
raise RuntimeError(f"Gemini file FAILED: {file_obj.name}")
|
| 328 |
+
if time.time() - start > timeout:
|
| 329 |
+
raise TimeoutError(f"Gemini upload state {state} after {timeout}s")
|
| 330 |
+
time.sleep(2)
|
| 331 |
+
|
| 332 |
+
|
| 333 |
+
def gemini_infer(client, model: str, video_path: str, prompt: str,
|
| 334 |
+
max_tokens=4096, temperature=0.0, delete=True) -> str:
|
| 335 |
+
"""Upload + ask. Some Gemini 3.x previews REQUIRE thinking mode (reject
|
| 336 |
+
``thinking_budget=0`` with INVALID_ARGUMENT), so we don't try to disable
|
| 337 |
+
it. We just budget enough output tokens to cover thinking + visible
|
| 338 |
+
content (default 4096)."""
|
| 339 |
+
fobj = client.files.upload(file=video_path)
|
| 340 |
+
fobj = _wait_active(client, fobj)
|
| 341 |
+
try:
|
| 342 |
+
cfg: Dict[str, Any] = {
|
| 343 |
+
"max_output_tokens": max_tokens,
|
| 344 |
+
"temperature": temperature,
|
| 345 |
+
}
|
| 346 |
+
resp = client.models.generate_content(
|
| 347 |
+
model=model,
|
| 348 |
+
contents=[prompt, fobj],
|
| 349 |
+
config=cfg,
|
| 350 |
+
)
|
| 351 |
+
return (resp.text or "").strip()
|
| 352 |
+
finally:
|
| 353 |
+
if delete:
|
| 354 |
+
try: client.files.delete(name=fobj.name)
|
| 355 |
+
except Exception: pass
|
| 356 |
+
|
| 357 |
+
|
| 358 |
+
def run_gemini_task(client, gemini_model, items, prompt, parser, out_dir, workers,
|
| 359 |
+
prompt_mode="direct", task_is_swap=False, gemini_max_tokens=2048):
|
| 360 |
+
out_dir.mkdir(parents=True, exist_ok=True)
|
| 361 |
+
results_path = out_dir / "eval_results.jsonl"
|
| 362 |
+
|
| 363 |
+
processed = set()
|
| 364 |
+
if results_path.exists():
|
| 365 |
+
for line in open(results_path):
|
| 366 |
+
line = line.strip()
|
| 367 |
+
if line:
|
| 368 |
+
processed.add(json.loads(line)["video"])
|
| 369 |
+
print(f"[{out_dir.name}] resume: {len(processed)} done", flush=True)
|
| 370 |
+
|
| 371 |
+
todo = [it for it in items if it["video"] not in processed]
|
| 372 |
+
print(f"[{out_dir.name}] {len(todo)} new / {len(items)} total", flush=True)
|
| 373 |
+
|
| 374 |
+
def _go(it):
|
| 375 |
+
try:
|
| 376 |
+
raw = gemini_infer(client, gemini_model, it["video_path"], prompt,
|
| 377 |
+
max_tokens=gemini_max_tokens)
|
| 378 |
+
except Exception as exc:
|
| 379 |
+
print(f"[error] {it['video']}: {exc}", flush=True)
|
| 380 |
+
return None
|
| 381 |
+
if not raw:
|
| 382 |
+
print(f"[empty] {it['video']}", flush=True)
|
| 383 |
+
return None
|
| 384 |
+
pj = parser(raw)
|
| 385 |
+
row = {
|
| 386 |
+
"video": it["video"],
|
| 387 |
+
"video_path": it["video_path"],
|
| 388 |
+
"gt_label": "synced",
|
| 389 |
+
"pred_label": pj["prediction"],
|
| 390 |
+
"correct": pj["prediction"] == "synced",
|
| 391 |
+
"parse_method": pj["parse_method"],
|
| 392 |
+
"prompt_mode": prompt_mode,
|
| 393 |
+
"raw_output": raw,
|
| 394 |
+
}
|
| 395 |
+
if "judge_explanation" in pj:
|
| 396 |
+
row["judge_explanation"] = pj["judge_explanation"]
|
| 397 |
+
if task_is_swap:
|
| 398 |
+
row["swapped_from"] = None
|
| 399 |
+
return row
|
| 400 |
+
|
| 401 |
+
n_done = 0
|
| 402 |
+
with ThreadPoolExecutor(max_workers=workers) as ex, open(results_path, "a") as out:
|
| 403 |
+
futures = {ex.submit(_go, it): it for it in todo}
|
| 404 |
+
for fut in as_completed(futures):
|
| 405 |
+
row = fut.result()
|
| 406 |
+
if row is None:
|
| 407 |
+
continue
|
| 408 |
+
out.write(json.dumps(row, ensure_ascii=False) + "\n")
|
| 409 |
+
out.flush()
|
| 410 |
+
n_done += 1
|
| 411 |
+
if n_done % 10 == 0 or n_done == len(todo):
|
| 412 |
+
print(f"[{out_dir.name}] {n_done}/{len(todo)}", flush=True)
|
| 413 |
+
|
| 414 |
+
return [json.loads(l) for l in open(results_path) if l.strip()]
|
| 415 |
+
|
| 416 |
+
|
| 417 |
+
# ---------------------------------------------------------------------------
|
| 418 |
+
# Main
|
| 419 |
+
# ---------------------------------------------------------------------------
|
| 420 |
+
def main():
|
| 421 |
+
ap = argparse.ArgumentParser()
|
| 422 |
+
ap.add_argument("--model", help="HF id or local path for omni model.")
|
| 423 |
+
ap.add_argument("--label", help="Output dir suffix (defaults to slug of --model).")
|
| 424 |
+
ap.add_argument("--tasks", default="mute,swap")
|
| 425 |
+
ap.add_argument("--max-new-tokens", type=int, default=0,
|
| 426 |
+
help="0 = pick per-task default (32 for direct, 256 for neutral).")
|
| 427 |
+
ap.add_argument("--mute-prompt-mode", choices=["direct", "neutral"], default="direct",
|
| 428 |
+
help="'neutral' uses 'Describe the audio you hear' + OpenAI judge.")
|
| 429 |
+
ap.add_argument("--swap-prompt-mode", choices=["direct", "neutral"], default="direct",
|
| 430 |
+
help="'neutral' uses 'Describe what you see and hear' + OpenAI judge.")
|
| 431 |
+
ap.add_argument("--all-modes", action="store_true",
|
| 432 |
+
help="Run mute×{direct,neutral} and swap×{direct,neutral} sequentially "
|
| 433 |
+
"(4 phases). Each phase has independent resume.")
|
| 434 |
+
ap.add_argument("--gemini", action="store_true",
|
| 435 |
+
help="Use Gemini API instead of an omni adapter.")
|
| 436 |
+
ap.add_argument("--gemini-model", default="gemini-3.1-pro-preview")
|
| 437 |
+
ap.add_argument("--gemini-api-key", default=os.environ.get("GEMINI_API_KEY"))
|
| 438 |
+
ap.add_argument("--openai-api-key", default=os.environ.get("OPENAI_API_KEY"),
|
| 439 |
+
help="Required when --mute-prompt-mode=neutral (for the judge).")
|
| 440 |
+
ap.add_argument("--judge-model", default="gpt-5.4")
|
| 441 |
+
ap.add_argument("--workers", type=int, default=4)
|
| 442 |
+
args = ap.parse_args()
|
| 443 |
+
|
| 444 |
+
items = load_originals()
|
| 445 |
+
print(f"[data] {len(items)} original videos", flush=True)
|
| 446 |
+
|
| 447 |
+
tasks = [t.strip() for t in args.tasks.split(",") if t.strip()]
|
| 448 |
+
if args.all_modes:
|
| 449 |
+
phases = [("mute", "direct"), ("mute", "neutral"),
|
| 450 |
+
("swap", "direct"), ("swap", "neutral")]
|
| 451 |
+
else:
|
| 452 |
+
phases = []
|
| 453 |
+
if "mute" in tasks:
|
| 454 |
+
phases.append(("mute", args.mute_prompt_mode))
|
| 455 |
+
if "swap" in tasks:
|
| 456 |
+
phases.append(("swap", args.swap_prompt_mode))
|
| 457 |
+
|
| 458 |
+
needs_openai = any(mode == "neutral" for _, mode in phases)
|
| 459 |
+
if needs_openai and not args.openai_api_key:
|
| 460 |
+
sys.exit("[error] neutral phases need --openai-api-key or OPENAI_API_KEY")
|
| 461 |
+
|
| 462 |
+
def _phase_config(task, mode):
|
| 463 |
+
"""Return (prompt, parser, max_new_tokens, dir_suffix, root_dir, write_metrics)."""
|
| 464 |
+
if task == "mute" and mode == "direct":
|
| 465 |
+
return (MUTE_PROMPT_DIRECT,
|
| 466 |
+
lambda raw: parse_yesno(raw, no_label="muted"),
|
| 467 |
+
args.max_new_tokens or 32, "_promptDirect",
|
| 468 |
+
EVAL_ROOT / "mute_original",
|
| 469 |
+
lambda rows, p: write_metrics_mute(rows, _model_name, args.judge_model, p, "direct"))
|
| 470 |
+
if task == "mute" and mode == "neutral":
|
| 471 |
+
return (MUTE_PROMPT_NEUTRAL,
|
| 472 |
+
lambda raw: openai_judge_mute_neutral(
|
| 473 |
+
raw, api_key=args.openai_api_key, judge_model=args.judge_model),
|
| 474 |
+
args.max_new_tokens or 256, "_promptNeutral",
|
| 475 |
+
EVAL_ROOT / "mute_original",
|
| 476 |
+
lambda rows, p: write_metrics_mute(rows, _model_name, args.judge_model, p, "neutral"))
|
| 477 |
+
if task == "swap" and mode == "direct":
|
| 478 |
+
return (SWAP_PROMPT_DIRECT,
|
| 479 |
+
lambda raw: parse_yesno(raw, no_label="mismatched"),
|
| 480 |
+
args.max_new_tokens or 32, "_promptDirect",
|
| 481 |
+
EVAL_ROOT / "swap_original",
|
| 482 |
+
lambda rows, p: write_metrics_swap(rows, _model_name, args.judge_model, p, "direct"))
|
| 483 |
+
if task == "swap" and mode == "neutral":
|
| 484 |
+
return (SWAP_PROMPT_NEUTRAL,
|
| 485 |
+
lambda raw: openai_judge_swap_neutral(
|
| 486 |
+
raw, api_key=args.openai_api_key, judge_model=args.judge_model),
|
| 487 |
+
args.max_new_tokens or 256, "_promptNeutral",
|
| 488 |
+
EVAL_ROOT / "swap_original",
|
| 489 |
+
lambda rows, p: write_metrics_swap(rows, _model_name, args.judge_model, p, "neutral"))
|
| 490 |
+
raise ValueError(f"bad task/mode: {task}/{mode}")
|
| 491 |
+
|
| 492 |
+
if args.gemini:
|
| 493 |
+
if not args.gemini_api_key:
|
| 494 |
+
sys.exit("[error] need --gemini-api-key or GEMINI_API_KEY")
|
| 495 |
+
_model_name = args.gemini_model
|
| 496 |
+
label = args.label or slugify(args.gemini_model)
|
| 497 |
+
client = _gemini_client(args.gemini_api_key)
|
| 498 |
+
for task, mode in phases:
|
| 499 |
+
prompt, parser, _, dir_suffix, root_dir, wm = _phase_config(task, mode)
|
| 500 |
+
d = root_dir / f"{task}_{label}{dir_suffix}"
|
| 501 |
+
rows = run_gemini_task(client, args.gemini_model, items, prompt, parser,
|
| 502 |
+
d, args.workers, prompt_mode=mode,
|
| 503 |
+
task_is_swap=(task == "swap"))
|
| 504 |
+
wm(rows, d / "metrics.json")
|
| 505 |
+
print(f"[{task}/{mode}] saved -> {d} ({len(rows)}/{len(items)}; "
|
| 506 |
+
f"{len(items) - len(rows)} missing — re-run to retry)")
|
| 507 |
+
else:
|
| 508 |
+
if not args.model:
|
| 509 |
+
sys.exit("[error] need --model (or pass --gemini)")
|
| 510 |
+
_model_name = args.model
|
| 511 |
+
label = args.label or slugify(args.model)
|
| 512 |
+
omni = _load_omni(args.model)
|
| 513 |
+
for task, mode in phases:
|
| 514 |
+
prompt, parser, max_new, dir_suffix, root_dir, wm = _phase_config(task, mode)
|
| 515 |
+
d = root_dir / f"{task}_{label}{dir_suffix}"
|
| 516 |
+
rows = run_omni_task(omni, items, prompt, parser, d, max_new,
|
| 517 |
+
prompt_mode=mode, task_is_swap=(task == "swap"))
|
| 518 |
+
wm(rows, d / "metrics.json")
|
| 519 |
+
print(f"[{task}/{mode}] saved -> {d}")
|
| 520 |
+
|
| 521 |
+
|
| 522 |
+
if __name__ == "__main__":
|
| 523 |
+
main()
|
finalize_case_study.py
ADDED
|
@@ -0,0 +1,153 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Final 6-way intersection for the case study, using all real signals.
|
| 2 |
+
|
| 3 |
+
Conditions (all must hold for a base video):
|
| 4 |
+
Qwen3-Omni:
|
| 5 |
+
- mute : raw_output starts with 'yes' (hallucinated audio in silence)
|
| 6 |
+
- swap : raw_output starts with 'yes' (claimed mismatched audio matches)
|
| 7 |
+
- sync : both delay AND early variants are wrong (sync_is_wrong())
|
| 8 |
+
Gemini-3.1-pro-preview:
|
| 9 |
+
- swap : raw_output starts with 'yes' (claimed mismatched audio matches)
|
| 10 |
+
- sync : both delay AND early variants are wrong (re-run on pool)
|
| 11 |
+
- mute : neutral prompt -> OpenAI judge marks correct=False (real hallucination)
|
| 12 |
+
|
| 13 |
+
Writes survivors to /home/ubuntu/case_study_candidates.jsonl with full failure detail.
|
| 14 |
+
"""
|
| 15 |
+
|
| 16 |
+
import json
|
| 17 |
+
import re
|
| 18 |
+
from pathlib import Path
|
| 19 |
+
|
| 20 |
+
# Qwen
|
| 21 |
+
QWEN_MUTE = "/home/ubuntu/eval_results/mute/mute_qwen3omni_vanilla_promptDirect/eval_results.jsonl"
|
| 22 |
+
QWEN_SWAP = "/home/ubuntu/eval_results/swap/swap_Qwen_Qwen3-Omni-30B-A3B-Instruct_promptDirect/eval_results.jsonl"
|
| 23 |
+
QWEN_SYNC = "/home/ubuntu/eval_results/sync/sync_qwen3omni_vanilla/eval_results.jsonl"
|
| 24 |
+
# Gemini direct swap (the original yes/no eval)
|
| 25 |
+
GEMINI_SWAP = "/home/ubuntu/eval_results/gemini_mute_sync_swap/swap/swap_gemini_gemini-3.1-pro-preview__promptDirect/eval_results.jsonl"
|
| 26 |
+
# Gemini results we re-ran on the 45-pool
|
| 27 |
+
GEMINI_SYNC = "/home/ubuntu/eval_results/case_study_pool/sync/case_study_pool_gemini_sync/eval_results.jsonl"
|
| 28 |
+
GEMINI_MUTE = "/home/ubuntu/eval_results/case_study_pool/mute/case_study_pool_gemini_mute_neutral/eval_results.jsonl"
|
| 29 |
+
|
| 30 |
+
OUT_PATH = Path("/home/ubuntu/case_study_candidates.jsonl")
|
| 31 |
+
|
| 32 |
+
SYNC_SUFFIX_RE = re.compile(r"_(delay|early)_\d+(?:\.\d+)?s(?=\.mp4$)")
|
| 33 |
+
DIRECTION_RE = re.compile(r"_(delay|early)_(\d+(?:\.\d+)?)s\.mp4$")
|
| 34 |
+
|
| 35 |
+
|
| 36 |
+
def base_of(name): return SYNC_SUFFIX_RE.sub("", name)
|
| 37 |
+
def direction_of(name):
|
| 38 |
+
m = DIRECTION_RE.search(name)
|
| 39 |
+
return m.group(1) if m else None
|
| 40 |
+
|
| 41 |
+
|
| 42 |
+
def load_jsonl(path):
|
| 43 |
+
with open(path) as f:
|
| 44 |
+
for line in f:
|
| 45 |
+
line = line.strip()
|
| 46 |
+
if line:
|
| 47 |
+
yield json.loads(line)
|
| 48 |
+
|
| 49 |
+
|
| 50 |
+
def real_yes_wrong(jsonl_path):
|
| 51 |
+
"""Wrong rows whose raw_output starts with 'yes' (drops empty/ambiguous artifacts)."""
|
| 52 |
+
out = set()
|
| 53 |
+
for r in load_jsonl(jsonl_path):
|
| 54 |
+
if r.get("correct", True):
|
| 55 |
+
continue
|
| 56 |
+
if (r.get("raw_output") or "").strip().lower().startswith("yes"):
|
| 57 |
+
out.add(r["video"])
|
| 58 |
+
return out
|
| 59 |
+
|
| 60 |
+
|
| 61 |
+
def sync_is_wrong(row):
|
| 62 |
+
gt_synced = bool(row.get("gt_synced"))
|
| 63 |
+
pred_synced = bool(row.get("pred_synced"))
|
| 64 |
+
if gt_synced != pred_synced:
|
| 65 |
+
return True
|
| 66 |
+
if not gt_synced and row.get("pred_direction") != row.get("gt_direction"):
|
| 67 |
+
return True
|
| 68 |
+
return False
|
| 69 |
+
|
| 70 |
+
|
| 71 |
+
def collect_sync_intervention_failures(jsonl_path):
|
| 72 |
+
"""{base: {direction: row_summary}} for wrong delay/early variants only."""
|
| 73 |
+
by_base = {}
|
| 74 |
+
for r in load_jsonl(jsonl_path):
|
| 75 |
+
v = r["video"]
|
| 76 |
+
d = direction_of(v)
|
| 77 |
+
if d is None:
|
| 78 |
+
continue
|
| 79 |
+
if sync_is_wrong(r):
|
| 80 |
+
by_base.setdefault(base_of(v), {})[d] = {
|
| 81 |
+
"variant": v,
|
| 82 |
+
"gt": (r.get("gt_direction"), r.get("gt_offset_sec")),
|
| 83 |
+
"pred_synced": r.get("pred_synced"),
|
| 84 |
+
"pred": (r.get("pred_direction"), r.get("pred_offset_sec")),
|
| 85 |
+
}
|
| 86 |
+
return by_base
|
| 87 |
+
|
| 88 |
+
|
| 89 |
+
def main():
|
| 90 |
+
qwen_mute_yes = real_yes_wrong(QWEN_MUTE)
|
| 91 |
+
qwen_swap_yes = real_yes_wrong(QWEN_SWAP)
|
| 92 |
+
gem_swap_yes = real_yes_wrong(GEMINI_SWAP)
|
| 93 |
+
qwen_sync_fails = collect_sync_intervention_failures(QWEN_SYNC)
|
| 94 |
+
gem_sync_fails = collect_sync_intervention_failures(GEMINI_SYNC)
|
| 95 |
+
|
| 96 |
+
qwen_sync_both = {b for b, d in qwen_sync_fails.items() if "delay" in d and "early" in d}
|
| 97 |
+
gem_sync_both = {b for b, d in gem_sync_fails.items() if "delay" in d and "early" in d}
|
| 98 |
+
|
| 99 |
+
gem_mute_wrong = {r["video"] for r in load_jsonl(GEMINI_MUTE) if not r.get("correct", True)}
|
| 100 |
+
|
| 101 |
+
print("Per-source REAL failure sets:")
|
| 102 |
+
print(f" qwen mute (yes) : {len(qwen_mute_yes)}")
|
| 103 |
+
print(f" qwen swap (yes) : {len(qwen_swap_yes)}")
|
| 104 |
+
print(f" qwen sync (delay&early): {len(qwen_sync_both)}")
|
| 105 |
+
print(f" gemini swap (yes) : {len(gem_swap_yes)}")
|
| 106 |
+
print(f" gemini sync (delay&early): {len(gem_sync_both)}")
|
| 107 |
+
print(f" gemini mute neu (judge wrong): {len(gem_mute_wrong)}")
|
| 108 |
+
|
| 109 |
+
final = (
|
| 110 |
+
qwen_mute_yes
|
| 111 |
+
& qwen_swap_yes
|
| 112 |
+
& qwen_sync_both
|
| 113 |
+
& gem_swap_yes
|
| 114 |
+
& gem_sync_both
|
| 115 |
+
& gem_mute_wrong
|
| 116 |
+
)
|
| 117 |
+
print(f"\nFinal 6-way intersection: {len(final)} videos\n")
|
| 118 |
+
|
| 119 |
+
qwen_swap_rows = {r["video"]: r for r in load_jsonl(QWEN_SWAP) if not r.get("correct", True)}
|
| 120 |
+
gem_swap_rows = {r["video"]: r for r in load_jsonl(GEMINI_SWAP) if not r.get("correct", True)}
|
| 121 |
+
gem_mute_rows = {r["video"]: r for r in load_jsonl(GEMINI_MUTE)}
|
| 122 |
+
|
| 123 |
+
rows = []
|
| 124 |
+
for v in sorted(final):
|
| 125 |
+
gem_mute = gem_mute_rows.get(v, {})
|
| 126 |
+
rec = {
|
| 127 |
+
"video": v,
|
| 128 |
+
"gemini_swap_audio_from": gem_swap_rows.get(v, {}).get("swapped_from"),
|
| 129 |
+
"qwen_swap_audio_from": qwen_swap_rows.get(v, {}).get("swapped_from"),
|
| 130 |
+
"gemini_mute_neutral_raw": (gem_mute.get("raw_output") or "").strip(),
|
| 131 |
+
"gemini_mute_judge": gem_mute.get("judge_explanation"),
|
| 132 |
+
"gemini_sync_failures": [gem_sync_fails[v]["delay"], gem_sync_fails[v]["early"]],
|
| 133 |
+
"qwen_sync_failures": [qwen_sync_fails[v]["delay"], qwen_sync_fails[v]["early"]],
|
| 134 |
+
}
|
| 135 |
+
rows.append(rec)
|
| 136 |
+
print(f"VIDEO: {v}")
|
| 137 |
+
print(f" swap audio source : {rec['gemini_swap_audio_from']}")
|
| 138 |
+
print(f" gemini mute neutral raw : {rec['gemini_mute_neutral_raw'][:140]}")
|
| 139 |
+
for label, fails in (("gemini", rec["gemini_sync_failures"]),
|
| 140 |
+
("qwen ", rec["qwen_sync_failures"])):
|
| 141 |
+
for f in fails:
|
| 142 |
+
tag = f["variant"].rsplit("_", 1)[-1].rstrip(".mp4")
|
| 143 |
+
print(f" [{label}] {tag:<8} gt={f['gt']} pred_synced={f['pred_synced']} pred={f['pred']}")
|
| 144 |
+
print()
|
| 145 |
+
|
| 146 |
+
with open(OUT_PATH, "w") as f:
|
| 147 |
+
for rec in rows:
|
| 148 |
+
f.write(json.dumps(rec, ensure_ascii=False) + "\n")
|
| 149 |
+
print(f"[saved] {len(rows)} rows -> {OUT_PATH}")
|
| 150 |
+
|
| 151 |
+
|
| 152 |
+
if __name__ == "__main__":
|
| 153 |
+
main()
|
find_case_study.py
ADDED
|
@@ -0,0 +1,140 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Find videos where Gemini and Qwen3-Omni both failed across mute/swap/sync.
|
| 2 |
+
|
| 3 |
+
Selection criteria (all must hold for a base video X.mp4):
|
| 4 |
+
1. Gemini failed on swap version of X -> X in swap case_study_explicit.jsonl
|
| 5 |
+
2. Gemini failed on muted version of X -> X in gemini mute jsonl with correct=false
|
| 6 |
+
3. Qwen failed on muted version of X -> X in qwen mute jsonl with correct=false
|
| 7 |
+
4. Qwen failed on swap version of X -> X in qwen swap jsonl with correct=false
|
| 8 |
+
5. Qwen failed on >=1 sync intervention -> X_delay_*.mp4 / X_early_*.mp4 wrong
|
| 9 |
+
(gt_synced=False but pred_synced=True,
|
| 10 |
+
OR pred_direction != gt_direction)
|
| 11 |
+
"""
|
| 12 |
+
|
| 13 |
+
import json
|
| 14 |
+
import re
|
| 15 |
+
from pathlib import Path
|
| 16 |
+
|
| 17 |
+
GEMINI_SWAP_CASE = "/home/ubuntu/eval_results/gemini_mute_sync_swap/swap/swap_gemini_gemini-3.1-pro-preview__promptDirect/case_study_explicit.jsonl"
|
| 18 |
+
GEMINI_MUTE = "/home/ubuntu/eval_results/gemini_mute_sync_swap/mute/mute_gemini_gemini-3.1-pro-preview__promptDirect/eval_results.jsonl"
|
| 19 |
+
QWEN_MUTE = "/home/ubuntu/eval_results/mute/mute_qwen3omni_vanilla_promptDirect/eval_results.jsonl"
|
| 20 |
+
QWEN_SWAP = "/home/ubuntu/eval_results/swap/swap_Qwen_Qwen3-Omni-30B-A3B-Instruct_promptDirect/eval_results.jsonl"
|
| 21 |
+
QWEN_SYNC = "/home/ubuntu/eval_results/sync/sync_qwen3omni_vanilla/eval_results.jsonl"
|
| 22 |
+
|
| 23 |
+
# Strip `_delay_2.39s` or `_early_1.95s` right before .mp4 to recover the base name.
|
| 24 |
+
SYNC_SUFFIX_RE = re.compile(r"_(delay|early)_\d+(?:\.\d+)?s(?=\.mp4$)")
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
def base_of(video_name: str) -> str:
|
| 28 |
+
return SYNC_SUFFIX_RE.sub("", video_name)
|
| 29 |
+
|
| 30 |
+
|
| 31 |
+
def load_jsonl(path: str):
|
| 32 |
+
with open(path) as f:
|
| 33 |
+
for line in f:
|
| 34 |
+
line = line.strip()
|
| 35 |
+
if line:
|
| 36 |
+
yield json.loads(line)
|
| 37 |
+
|
| 38 |
+
|
| 39 |
+
def sync_is_wrong(row: dict) -> bool:
|
| 40 |
+
"""Qwen sync correctness: pred must match gt_synced AND (when not synced) direction."""
|
| 41 |
+
gt_synced = bool(row.get("gt_synced"))
|
| 42 |
+
pred_synced = bool(row.get("pred_synced"))
|
| 43 |
+
if gt_synced != pred_synced:
|
| 44 |
+
return True
|
| 45 |
+
if not gt_synced and row.get("pred_direction") != row.get("gt_direction"):
|
| 46 |
+
return True
|
| 47 |
+
return False
|
| 48 |
+
|
| 49 |
+
|
| 50 |
+
def main():
|
| 51 |
+
# 1. Gemini swap: every row in case_study_explicit is by construction a Gemini failure.
|
| 52 |
+
gemini_swap_wrong = {row["video"] for row in load_jsonl(GEMINI_SWAP_CASE)}
|
| 53 |
+
|
| 54 |
+
# 2-4. Mute & swap with explicit `correct` field.
|
| 55 |
+
gemini_mute_wrong = {r["video"] for r in load_jsonl(GEMINI_MUTE) if not r.get("correct", True)}
|
| 56 |
+
qwen_mute_wrong = {r["video"] for r in load_jsonl(QWEN_MUTE) if not r.get("correct", True)}
|
| 57 |
+
qwen_swap_wrong = {r["video"] for r in load_jsonl(QWEN_SWAP) if not r.get("correct", True)}
|
| 58 |
+
|
| 59 |
+
# 5. Qwen sync: collect, per base video, which delay/early variants were wrong.
|
| 60 |
+
qwen_sync_variants = {}
|
| 61 |
+
for r in load_jsonl(QWEN_SYNC):
|
| 62 |
+
v = r["video"]
|
| 63 |
+
base = base_of(v)
|
| 64 |
+
if v == base:
|
| 65 |
+
continue # skip the synced original; we want intervention failures
|
| 66 |
+
if sync_is_wrong(r):
|
| 67 |
+
qwen_sync_variants.setdefault(base, []).append({
|
| 68 |
+
"variant": v,
|
| 69 |
+
"gt": (r.get("gt_direction"), r.get("gt_offset_sec")),
|
| 70 |
+
"pred_synced": r.get("pred_synced"),
|
| 71 |
+
"pred": (r.get("pred_direction"), r.get("pred_offset_sec")),
|
| 72 |
+
})
|
| 73 |
+
qwen_sync_intervention_wrong = set(qwen_sync_variants.keys())
|
| 74 |
+
|
| 75 |
+
# Intersection.
|
| 76 |
+
candidates = (
|
| 77 |
+
gemini_swap_wrong
|
| 78 |
+
& gemini_mute_wrong
|
| 79 |
+
& qwen_mute_wrong
|
| 80 |
+
& qwen_swap_wrong
|
| 81 |
+
& qwen_sync_intervention_wrong
|
| 82 |
+
)
|
| 83 |
+
|
| 84 |
+
print(f"Per-source wrong sets:")
|
| 85 |
+
print(f" gemini swap (case_study) : {len(gemini_swap_wrong)}")
|
| 86 |
+
print(f" gemini mute (correct=false) : {len(gemini_mute_wrong)}")
|
| 87 |
+
print(f" qwen mute (correct=false) : {len(qwen_mute_wrong)}")
|
| 88 |
+
print(f" qwen swap (correct=false) : {len(qwen_swap_wrong)}")
|
| 89 |
+
print(f" qwen sync (>=1 wrong inter.): {len(qwen_sync_intervention_wrong)}")
|
| 90 |
+
print(f"\nFull intersection (5/5): {len(candidates)} videos")
|
| 91 |
+
print("=" * 60)
|
| 92 |
+
|
| 93 |
+
# Pre-index swap rows so we can show what "swap audio" was used.
|
| 94 |
+
gemini_swap_rows = {r["video"]: r for r in load_jsonl(GEMINI_SWAP_CASE)}
|
| 95 |
+
qwen_swap_rows = {r["video"]: r for r in load_jsonl(QWEN_SWAP)
|
| 96 |
+
if not r.get("correct", True)}
|
| 97 |
+
|
| 98 |
+
out_path = Path("/home/ubuntu/case_study_candidates.jsonl")
|
| 99 |
+
records = []
|
| 100 |
+
for v in sorted(candidates):
|
| 101 |
+
gs = gemini_swap_rows.get(v, {})
|
| 102 |
+
qs = qwen_swap_rows.get(v, {})
|
| 103 |
+
rec = {
|
| 104 |
+
"video": v,
|
| 105 |
+
"gemini_swap_audio_from": gs.get("swapped_from"),
|
| 106 |
+
"qwen_swap_audio_from": qs.get("swapped_from"),
|
| 107 |
+
"qwen_sync_failures": qwen_sync_variants[v],
|
| 108 |
+
}
|
| 109 |
+
records.append(rec)
|
| 110 |
+
print(f"\nVIDEO: {v}")
|
| 111 |
+
print(f" swap audio source (gemini view) : {rec['gemini_swap_audio_from']}")
|
| 112 |
+
print(f" swap audio source (qwen view) : {rec['qwen_swap_audio_from']}")
|
| 113 |
+
print(f" qwen sync intervention failures :")
|
| 114 |
+
for hit in qwen_sync_variants[v]:
|
| 115 |
+
print(f" - {hit['variant']}")
|
| 116 |
+
print(f" gt={hit['gt']} pred_synced={hit['pred_synced']} pred={hit['pred']}")
|
| 117 |
+
|
| 118 |
+
with open(out_path, "w") as f:
|
| 119 |
+
for rec in records:
|
| 120 |
+
f.write(json.dumps(rec, ensure_ascii=False) + "\n")
|
| 121 |
+
print(f"\n[saved] {len(records)} candidates -> {out_path}")
|
| 122 |
+
|
| 123 |
+
# Diagnostic: if intersection is empty, show the largest reachable subset.
|
| 124 |
+
if not candidates:
|
| 125 |
+
print("\nNo full intersection. Pairwise overlap diagnostic:")
|
| 126 |
+
sets = {
|
| 127 |
+
"gemini_swap": gemini_swap_wrong,
|
| 128 |
+
"gemini_mute": gemini_mute_wrong,
|
| 129 |
+
"qwen_mute": qwen_mute_wrong,
|
| 130 |
+
"qwen_swap": qwen_swap_wrong,
|
| 131 |
+
"qwen_sync": qwen_sync_intervention_wrong,
|
| 132 |
+
}
|
| 133 |
+
names = list(sets)
|
| 134 |
+
for i, a in enumerate(names):
|
| 135 |
+
for b in names[i+1:]:
|
| 136 |
+
print(f" {a} & {b}: {len(sets[a] & sets[b])}")
|
| 137 |
+
|
| 138 |
+
|
| 139 |
+
if __name__ == "__main__":
|
| 140 |
+
main()
|
probe_gpt55_audio.py
ADDED
|
@@ -0,0 +1,70 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Probe whether the target model accepts input_audio + image_url.
|
| 2 |
+
|
| 3 |
+
Usage:
|
| 4 |
+
OPENAI_API_KEY=sk-... python3 /home/ubuntu/probe_gpt55_audio.py [model_name]
|
| 5 |
+
"""
|
| 6 |
+
import base64
|
| 7 |
+
import os
|
| 8 |
+
import struct
|
| 9 |
+
import sys
|
| 10 |
+
|
| 11 |
+
from openai import OpenAI
|
| 12 |
+
|
| 13 |
+
model = sys.argv[1] if len(sys.argv) > 1 else "gpt-5.5"
|
| 14 |
+
client = OpenAI()
|
| 15 |
+
|
| 16 |
+
# Build a 0.1s silent 16k mono WAV in memory.
|
| 17 |
+
sr, secs = 16000, 0.1
|
| 18 |
+
n = int(sr * secs)
|
| 19 |
+
header = b"RIFF" + struct.pack("<I", 36 + n*2) + b"WAVEfmt " + struct.pack(
|
| 20 |
+
"<IHHIIHH", 16, 1, 1, sr, sr*2, 2, 16) + b"data" + struct.pack("<I", n*2)
|
| 21 |
+
wav_b64 = base64.b64encode(header + b"\x00" * (n*2)).decode()
|
| 22 |
+
|
| 23 |
+
# Tiny 1x1 white PNG.
|
| 24 |
+
png_b64 = ("iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAQAAAC1HAwCAAAAC0lEQVR42mNkYAAAAAYAAj"
|
| 25 |
+
"CB0C8AAAAASUVORK5CYII=")
|
| 26 |
+
|
| 27 |
+
print(f"Testing model: {model}")
|
| 28 |
+
|
| 29 |
+
# Test 1: image only
|
| 30 |
+
try:
|
| 31 |
+
r = client.chat.completions.create(
|
| 32 |
+
model=model,
|
| 33 |
+
messages=[{"role": "user", "content": [
|
| 34 |
+
{"type": "text", "text": "ok?"},
|
| 35 |
+
{"type": "image_url", "image_url": {"url": f"data:image/png;base64,{png_b64}"}},
|
| 36 |
+
]}],
|
| 37 |
+
max_completion_tokens=8,
|
| 38 |
+
)
|
| 39 |
+
print(f" image_url: OK -> {r.choices[0].message.content!r}")
|
| 40 |
+
except Exception as exc:
|
| 41 |
+
print(f" image_url: FAIL ({exc.__class__.__name__}: {str(exc)[:200]})")
|
| 42 |
+
|
| 43 |
+
# Test 2: audio only
|
| 44 |
+
try:
|
| 45 |
+
r = client.chat.completions.create(
|
| 46 |
+
model=model,
|
| 47 |
+
messages=[{"role": "user", "content": [
|
| 48 |
+
{"type": "text", "text": "What audio is this?"},
|
| 49 |
+
{"type": "input_audio", "input_audio": {"data": wav_b64, "format": "wav"}},
|
| 50 |
+
]}],
|
| 51 |
+
max_completion_tokens=16,
|
| 52 |
+
)
|
| 53 |
+
print(f" input_audio: OK -> {r.choices[0].message.content!r}")
|
| 54 |
+
except Exception as exc:
|
| 55 |
+
print(f" input_audio: FAIL ({exc.__class__.__name__}: {str(exc)[:200]})")
|
| 56 |
+
|
| 57 |
+
# Test 3: both
|
| 58 |
+
try:
|
| 59 |
+
r = client.chat.completions.create(
|
| 60 |
+
model=model,
|
| 61 |
+
messages=[{"role": "user", "content": [
|
| 62 |
+
{"type": "text", "text": "describe"},
|
| 63 |
+
{"type": "image_url", "image_url": {"url": f"data:image/png;base64,{png_b64}"}},
|
| 64 |
+
{"type": "input_audio", "input_audio": {"data": wav_b64, "format": "wav"}},
|
| 65 |
+
]}],
|
| 66 |
+
max_completion_tokens=16,
|
| 67 |
+
)
|
| 68 |
+
print(f" image+audio: OK -> {r.choices[0].message.content!r}")
|
| 69 |
+
except Exception as exc:
|
| 70 |
+
print(f" image+audio: FAIL ({exc.__class__.__name__}: {str(exc)[:200]})")
|
probe_gpt55_one_sample.py
ADDED
|
@@ -0,0 +1,30 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Probe GPT-5.5 on a single real video frame batch — see if we get non-empty content.
|
| 2 |
+
|
| 3 |
+
Usage: OPENAI_API_KEY=sk-... python3 /home/ubuntu/probe_gpt55_one_sample.py
|
| 4 |
+
"""
|
| 5 |
+
import os, sys
|
| 6 |
+
sys.path.insert(0, "/home/ubuntu")
|
| 7 |
+
from pathlib import Path
|
| 8 |
+
from case_study_gpt55 import (
|
| 9 |
+
call_gpt_frames, extract_frames_b64, _client,
|
| 10 |
+
SYNC_PROMPT, ORIG_DIR,
|
| 11 |
+
)
|
| 12 |
+
|
| 13 |
+
if not os.environ.get("OPENAI_API_KEY"):
|
| 14 |
+
sys.exit("[error] need OPENAI_API_KEY")
|
| 15 |
+
|
| 16 |
+
client = _client(os.environ["OPENAI_API_KEY"])
|
| 17 |
+
sample = ORIG_DIR / "Anger Management - Fails of the Week (January 2019) _ FailArmy11.mp4"
|
| 18 |
+
print(f"sample: {sample} exists={sample.exists()}")
|
| 19 |
+
|
| 20 |
+
frames = extract_frames_b64(sample, n_frames=8)
|
| 21 |
+
print(f"extracted {len(frames)} frames")
|
| 22 |
+
|
| 23 |
+
for budget, effort in [(4000, "minimal"), (2000, "low"), (8000, "minimal")]:
|
| 24 |
+
print(f"\n--- max_completion_tokens={budget} reasoning_effort={effort} ---")
|
| 25 |
+
raw = call_gpt_frames(client, "gpt-5.5", SYNC_PROMPT, frames,
|
| 26 |
+
max_tokens=budget, reasoning_effort=effort)
|
| 27 |
+
print(f"raw_output ({len(raw)} chars): {raw!r}")
|
| 28 |
+
if raw:
|
| 29 |
+
print("OK -- got content; you can re-run the full eval")
|
| 30 |
+
break
|
prune_broken_rows.py
ADDED
|
@@ -0,0 +1,65 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Prune broken rows (empty raw_output / judge_fail) from eval_results.jsonl
|
| 2 |
+
files in-place so the next eval run will re-process them.
|
| 3 |
+
|
| 4 |
+
A backup .bak is written next to each modified file.
|
| 5 |
+
|
| 6 |
+
Usage:
|
| 7 |
+
python3 /home/ubuntu/prune_broken_rows.py <jsonl_or_dir> [more...]
|
| 8 |
+
python3 /home/ubuntu/prune_broken_rows.py /home/ubuntu/eval_results/sync/sync_xiaomi_mimo-v2.5_audioMuxed
|
| 9 |
+
python3 /home/ubuntu/prune_broken_rows.py /home/ubuntu/eval_results/{sync,swap,mute,swap_original,mute_original}/*xiaomi*
|
| 10 |
+
"""
|
| 11 |
+
import json
|
| 12 |
+
import shutil
|
| 13 |
+
import sys
|
| 14 |
+
from pathlib import Path
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
def is_broken(row: dict) -> str:
|
| 18 |
+
raw = (row.get("raw_output") or "").strip()
|
| 19 |
+
if not raw:
|
| 20 |
+
return "empty_raw"
|
| 21 |
+
if row.get("parse_method") == "judge_fail":
|
| 22 |
+
return "judge_fail"
|
| 23 |
+
return ""
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
def prune(jsonl: Path) -> None:
|
| 27 |
+
if jsonl.is_dir():
|
| 28 |
+
jsonl = jsonl / "eval_results.jsonl"
|
| 29 |
+
if not jsonl.exists():
|
| 30 |
+
print(f"[skip] {jsonl} does not exist")
|
| 31 |
+
return
|
| 32 |
+
rows = [json.loads(l) for l in open(jsonl) if l.strip()]
|
| 33 |
+
keep, drop = [], []
|
| 34 |
+
for r in rows:
|
| 35 |
+
why = is_broken(r)
|
| 36 |
+
(drop if why else keep).append((r, why))
|
| 37 |
+
if not drop:
|
| 38 |
+
print(f"[ok] {jsonl.parent.name}: {len(rows)} rows, none broken")
|
| 39 |
+
return
|
| 40 |
+
|
| 41 |
+
bak = jsonl.with_suffix(".jsonl.bak")
|
| 42 |
+
if not bak.exists():
|
| 43 |
+
shutil.copy2(jsonl, bak)
|
| 44 |
+
with open(jsonl, "w") as f:
|
| 45 |
+
for r, _ in keep:
|
| 46 |
+
f.write(json.dumps(r, ensure_ascii=False) + "\n")
|
| 47 |
+
|
| 48 |
+
by_reason = {}
|
| 49 |
+
for r, why in drop:
|
| 50 |
+
by_reason[why] = by_reason.get(why, 0) + 1
|
| 51 |
+
reasons = ", ".join(f"{k}={v}" for k, v in by_reason.items())
|
| 52 |
+
print(f"[pruned] {jsonl.parent.name}: kept {len(keep)} / dropped {len(drop)} ({reasons}) -> backup: {bak.name}")
|
| 53 |
+
|
| 54 |
+
|
| 55 |
+
def main():
|
| 56 |
+
args = sys.argv[1:]
|
| 57 |
+
if not args:
|
| 58 |
+
print(__doc__)
|
| 59 |
+
sys.exit(1)
|
| 60 |
+
for a in args:
|
| 61 |
+
prune(Path(a).expanduser())
|
| 62 |
+
|
| 63 |
+
|
| 64 |
+
if __name__ == "__main__":
|
| 65 |
+
main()
|
rejudge_mute_neutral.py
ADDED
|
@@ -0,0 +1,209 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Re-classify existing mute-neutral eval_results.jsonl with a 3-way GT-agnostic judge.
|
| 2 |
+
|
| 3 |
+
The original mute-neutral judge had two bugs:
|
| 4 |
+
1. Its system prompt told GPT "audio has been replaced with silence", which
|
| 5 |
+
biased classifications on CONTROL files (where audio is actually real).
|
| 6 |
+
2. Visual-only responses (model never addressed the audio at all) got
|
| 7 |
+
non-deterministically labeled as 'synced' or 'muted' depending on the
|
| 8 |
+
judge's mood, polluting both hallucination and detection rates.
|
| 9 |
+
|
| 10 |
+
This rejudge:
|
| 11 |
+
* Uses a GT-agnostic prompt: classify what the model CLAIMED about audio.
|
| 12 |
+
* Adds a third category 'visual_only' for responses that describe only
|
| 13 |
+
visuals and never mention sound (neither audio nor silence).
|
| 14 |
+
* Computes metrics with a separate visual_only_rate so they don't pollute
|
| 15 |
+
hallucination_rate or detection_rate.
|
| 16 |
+
|
| 17 |
+
Usage:
|
| 18 |
+
OPENAI_API_KEY=sk-... python3 /home/ubuntu/rejudge_mute_neutral.py \\
|
| 19 |
+
/path/to/eval_results.jsonl [/path/to/another.jsonl ...]
|
| 20 |
+
"""
|
| 21 |
+
|
| 22 |
+
import json
|
| 23 |
+
import sys
|
| 24 |
+
from pathlib import Path
|
| 25 |
+
|
| 26 |
+
NEW_MUTE_JUDGE_SYSTEM = (
|
| 27 |
+
"You read a model's free-form description of a short video. Classify "
|
| 28 |
+
"what the model claimed about the AUDIO of the video. "
|
| 29 |
+
"\n\nReturn STRICT JSON only:"
|
| 30 |
+
"\n prediction: 'muted' | 'audio_described' | 'visual_only'"
|
| 31 |
+
"\n explanation: one short sentence"
|
| 32 |
+
"\n\nRules:"
|
| 33 |
+
"\n- 'muted': the model EXPLICITLY claims silence — phrases like 'no audio', "
|
| 34 |
+
" 'no audible sound', 'video is silent', 'no speech, music, or sounds', etc."
|
| 35 |
+
"\n- 'audio_described': the model describes any specific audible content — "
|
| 36 |
+
" speech with quoted dialogue, music, sound effects, impact thuds, laughter, "
|
| 37 |
+
" ambient noise, environmental sound, etc. Even one sound counts."
|
| 38 |
+
"\n- 'visual_only': the model only describes visual content and does NOT "
|
| 39 |
+
" address the audio at all (neither claims silence nor describes any sound). "
|
| 40 |
+
" Pure scene/action descriptions go here."
|
| 41 |
+
"\n\nBe objective: do NOT assume any prior about whether the audio is "
|
| 42 |
+
"actually silent or real — base the call only on what the model said."
|
| 43 |
+
)
|
| 44 |
+
|
| 45 |
+
|
| 46 |
+
def call_judge(client, judge_model, raw):
|
| 47 |
+
try:
|
| 48 |
+
resp = client.chat.completions.create(
|
| 49 |
+
model=judge_model,
|
| 50 |
+
messages=[
|
| 51 |
+
{"role": "system", "content": NEW_MUTE_JUDGE_SYSTEM},
|
| 52 |
+
{"role": "user", "content": raw},
|
| 53 |
+
],
|
| 54 |
+
response_format={"type": "json_object"},
|
| 55 |
+
max_completion_tokens=300,
|
| 56 |
+
)
|
| 57 |
+
obj = json.loads(resp.choices[0].message.content or "")
|
| 58 |
+
pred = str(obj.get("prediction", "visual_only")).lower().strip()
|
| 59 |
+
if pred not in ("muted", "audio_described", "visual_only"):
|
| 60 |
+
pred = "visual_only"
|
| 61 |
+
return {
|
| 62 |
+
"pred_label": pred,
|
| 63 |
+
"judge_explanation": str(obj.get("explanation", "")),
|
| 64 |
+
"parse_method": "openai_judge_3way",
|
| 65 |
+
}
|
| 66 |
+
except Exception as exc:
|
| 67 |
+
return {
|
| 68 |
+
"pred_label": "visual_only",
|
| 69 |
+
"judge_explanation": f"judge_error: {exc}",
|
| 70 |
+
"parse_method": "judge_error",
|
| 71 |
+
}
|
| 72 |
+
|
| 73 |
+
|
| 74 |
+
def write_metrics(rows, out_path: Path, gt_label: str):
|
| 75 |
+
"""Compute metrics per dataset GT.
|
| 76 |
+
|
| 77 |
+
intervention (gt=muted): correct iff pred=='muted'
|
| 78 |
+
- mute_detection_rate = muted / n
|
| 79 |
+
- hallucination_rate = audio_described / n (real hallucination)
|
| 80 |
+
- visual_only_rate = visual_only / n (model dodged audio)
|
| 81 |
+
control (gt=synced): correct iff pred=='audio_described'
|
| 82 |
+
- audio_present_detection_rate = audio_described / n
|
| 83 |
+
- false_silent_rate = muted / n
|
| 84 |
+
- visual_only_rate = visual_only / n
|
| 85 |
+
"""
|
| 86 |
+
breakdown = {"muted": 0, "audio_described": 0, "visual_only": 0}
|
| 87 |
+
parse_stats = {}
|
| 88 |
+
for r in rows:
|
| 89 |
+
breakdown[r["pred_label"]] = breakdown.get(r["pred_label"], 0) + 1
|
| 90 |
+
pm = r.get("parse_method", "")
|
| 91 |
+
parse_stats[pm] = parse_stats.get(pm, 0) + 1
|
| 92 |
+
n = len(rows)
|
| 93 |
+
|
| 94 |
+
if gt_label == "muted":
|
| 95 |
+
correct_pred = "muted"
|
| 96 |
+
task_kind = "mute_intervention"
|
| 97 |
+
else: # 'synced'
|
| 98 |
+
correct_pred = "audio_described"
|
| 99 |
+
task_kind = "mute_control"
|
| 100 |
+
|
| 101 |
+
correct = breakdown[correct_pred]
|
| 102 |
+
|
| 103 |
+
metrics = {
|
| 104 |
+
"total_samples": n,
|
| 105 |
+
"gt_label": gt_label,
|
| 106 |
+
"task_kind": task_kind,
|
| 107 |
+
"accuracy": round(correct / n, 4) if n else 0.0,
|
| 108 |
+
"prediction_breakdown": breakdown,
|
| 109 |
+
"visual_only_rate": round(breakdown["visual_only"] / n, 4) if n else 0.0,
|
| 110 |
+
"parse_stats": parse_stats,
|
| 111 |
+
"judge": "openai_judge_3way",
|
| 112 |
+
}
|
| 113 |
+
if gt_label == "muted":
|
| 114 |
+
metrics["mute_detection_rate"] = round(breakdown["muted"] / n, 4) if n else 0.0
|
| 115 |
+
metrics["hallucination_rate"] = round(breakdown["audio_described"] / n, 4) if n else 0.0
|
| 116 |
+
else:
|
| 117 |
+
metrics["audio_present_detection_rate"] = round(breakdown["audio_described"] / n, 4) if n else 0.0
|
| 118 |
+
metrics["false_silent_rate"] = round(breakdown["muted"] / n, 4) if n else 0.0
|
| 119 |
+
with open(out_path, "w") as f:
|
| 120 |
+
json.dump(metrics, f, indent=2, ensure_ascii=False)
|
| 121 |
+
|
| 122 |
+
|
| 123 |
+
def _normalize_gt_label(rows):
|
| 124 |
+
"""Each dataset has exactly one GT. Pick from first row, normalize.
|
| 125 |
+
|
| 126 |
+
Existing files use gt_label='muted' for intervention and 'synced' for
|
| 127 |
+
control. We keep that convention for downstream compatibility.
|
| 128 |
+
"""
|
| 129 |
+
gt = (rows[0].get("gt_label") or "").lower()
|
| 130 |
+
if gt in ("muted", "mute"):
|
| 131 |
+
return "muted"
|
| 132 |
+
if gt in ("synced", "sync", "audio_present", "matched"):
|
| 133 |
+
return "synced"
|
| 134 |
+
return "muted" # default: assume intervention
|
| 135 |
+
|
| 136 |
+
|
| 137 |
+
def main():
|
| 138 |
+
import argparse, os
|
| 139 |
+
from concurrent.futures import ThreadPoolExecutor, as_completed
|
| 140 |
+
p = argparse.ArgumentParser()
|
| 141 |
+
p.add_argument("paths", nargs="+", help="eval_results.jsonl files to re-judge")
|
| 142 |
+
p.add_argument("--openai-key", default=os.environ.get("OPENAI_API_KEY"))
|
| 143 |
+
p.add_argument("--judge-model", default="gpt-5.4")
|
| 144 |
+
p.add_argument("--workers", type=int, default=8)
|
| 145 |
+
args = p.parse_args()
|
| 146 |
+
|
| 147 |
+
if not args.openai_key:
|
| 148 |
+
sys.exit("[error] need --openai-key or OPENAI_API_KEY")
|
| 149 |
+
|
| 150 |
+
from openai import OpenAI
|
| 151 |
+
client = OpenAI(api_key=args.openai_key)
|
| 152 |
+
|
| 153 |
+
print("[preflight] testing judge API ...", flush=True)
|
| 154 |
+
probe = call_judge(client, args.judge_model, "I hear a loud thud and laughter.")
|
| 155 |
+
if probe["parse_method"] == "judge_error":
|
| 156 |
+
sys.exit(f"[error] judge API broken; aborting. detail: {probe['judge_explanation']}")
|
| 157 |
+
print(f"[preflight] OK (model={args.judge_model})", flush=True)
|
| 158 |
+
|
| 159 |
+
for path in args.paths:
|
| 160 |
+
path = Path(path)
|
| 161 |
+
rows_in = [json.loads(l) for l in open(path) if l.strip()]
|
| 162 |
+
if not rows_in:
|
| 163 |
+
print(f"[{path}] empty, skipping")
|
| 164 |
+
continue
|
| 165 |
+
gt_label = _normalize_gt_label(rows_in)
|
| 166 |
+
correct_pred = "muted" if gt_label == "muted" else "audio_described"
|
| 167 |
+
print(f"\n[{path}] re-judging {len(rows_in)} rows (gt_label={gt_label})")
|
| 168 |
+
|
| 169 |
+
def _go(r):
|
| 170 |
+
judge = call_judge(client, args.judge_model, r.get("raw_output", "") or "")
|
| 171 |
+
new = dict(r)
|
| 172 |
+
new["orig_pred_label"] = r.get("pred_label")
|
| 173 |
+
new["orig_judge_explanation"] = r.get("judge_explanation")
|
| 174 |
+
new["pred_label"] = judge["pred_label"]
|
| 175 |
+
new["judge_explanation"] = judge["judge_explanation"]
|
| 176 |
+
new["parse_method"] = judge["parse_method"]
|
| 177 |
+
new["correct"] = (judge["pred_label"] == correct_pred)
|
| 178 |
+
return new
|
| 179 |
+
|
| 180 |
+
results = []
|
| 181 |
+
with ThreadPoolExecutor(max_workers=args.workers) as ex:
|
| 182 |
+
futures = {ex.submit(_go, r): r for r in rows_in}
|
| 183 |
+
for i, fut in enumerate(as_completed(futures), 1):
|
| 184 |
+
results.append(fut.result())
|
| 185 |
+
if i % 20 == 0 or i == len(rows_in):
|
| 186 |
+
print(f" {i}/{len(rows_in)}", flush=True)
|
| 187 |
+
|
| 188 |
+
out_jsonl = path.with_name(path.stem.replace("eval_results", "eval_results.rejudged") + path.suffix)
|
| 189 |
+
out_metrics = path.parent / "metrics.rejudged.json"
|
| 190 |
+
with open(out_jsonl, "w") as f:
|
| 191 |
+
for r in results:
|
| 192 |
+
f.write(json.dumps(r, ensure_ascii=False) + "\n")
|
| 193 |
+
write_metrics(results, out_metrics, gt_label)
|
| 194 |
+
|
| 195 |
+
flips = sum(1 for r in results if r["pred_label"] != r["orig_pred_label"])
|
| 196 |
+
old_correct = sum(1 for r in results
|
| 197 |
+
if r["orig_pred_label"] == correct_pred
|
| 198 |
+
or (correct_pred == "audio_described" and r["orig_pred_label"] == "synced"))
|
| 199 |
+
new_correct = sum(1 for r in results if r["pred_label"] == correct_pred)
|
| 200 |
+
new_visual = sum(1 for r in results if r["pred_label"] == "visual_only")
|
| 201 |
+
print(f"[{path.name}] flipped {flips}/{len(results)}; "
|
| 202 |
+
f"correct (gt={gt_label}): {old_correct} -> {new_correct}; "
|
| 203 |
+
f"visual_only: {new_visual}")
|
| 204 |
+
print(f" saved -> {out_jsonl}")
|
| 205 |
+
print(f" saved -> {out_metrics}")
|
| 206 |
+
|
| 207 |
+
|
| 208 |
+
if __name__ == "__main__":
|
| 209 |
+
main()
|
rejudge_swap_neutral.py
ADDED
|
@@ -0,0 +1,191 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Re-classify existing swap-neutral eval_results.jsonl with a SMARTER judge.
|
| 2 |
+
|
| 3 |
+
The original judge only marked 'mismatched' when the model EXPLICITLY said the
|
| 4 |
+
audio doesn't match. Neutral describe-prompts rarely do that — the model just
|
| 5 |
+
describes what it sees and what it hears, leaving incongruity implicit.
|
| 6 |
+
|
| 7 |
+
This re-judge does plausibility checking on the (visual, audio) description
|
| 8 |
+
pair: given the visual scene, would the described audio reasonably occur?
|
| 9 |
+
|
| 10 |
+
Usage:
|
| 11 |
+
OPENAI_API_KEY=sk-... python3 /home/ubuntu/rejudge_swap_neutral.py \\
|
| 12 |
+
/path/to/eval_results.jsonl [/path/to/another.jsonl ...]
|
| 13 |
+
|
| 14 |
+
For each input file, writes alongside:
|
| 15 |
+
- eval_results.rejudged.jsonl (with new pred_label / judge_explanation /
|
| 16 |
+
parse_method, plus the originals preserved
|
| 17 |
+
as orig_pred_label / orig_judge_explanation)
|
| 18 |
+
- metrics.rejudged.json
|
| 19 |
+
"""
|
| 20 |
+
|
| 21 |
+
import json
|
| 22 |
+
import sys
|
| 23 |
+
from pathlib import Path
|
| 24 |
+
|
| 25 |
+
NEW_SWAP_JUDGE_SYSTEM = (
|
| 26 |
+
"You read a model's free-form description of a short video — both the "
|
| 27 |
+
"visual content and the audio content. Your task is to determine, from "
|
| 28 |
+
"the model's own description, whether the described audio is plausibly "
|
| 29 |
+
"the natural soundtrack of the described visuals (regardless of whether "
|
| 30 |
+
"the model itself raises any concern). "
|
| 31 |
+
"\n\nReturn STRICT JSON only with keys:"
|
| 32 |
+
"\n prediction: 'mismatched' | 'synced'"
|
| 33 |
+
"\n explanation: one short sentence"
|
| 34 |
+
"\n\nClassification rules:"
|
| 35 |
+
"\n- 'mismatched' if the audio described would NOT plausibly arise from "
|
| 36 |
+
"the visual scene described. Treat it as mismatched even when the model "
|
| 37 |
+
"doesn't explicitly say so. Examples:"
|
| 38 |
+
"\n * visual=person flipping a wooden plank, audio=power-tool whirring → mismatched"
|
| 39 |
+
"\n * visual=baseball field outdoor, audio=woman laughing in a quiet room → mismatched"
|
| 40 |
+
"\n * visual=ocean waves, audio=engine revving + a clear phone-call dialogue → mismatched"
|
| 41 |
+
"\n * audio described as 'voiceover', 'dubbed', 'unrelated', 'sped-up cartoon voice' → mismatched"
|
| 42 |
+
"\n- 'synced' if the audio described is plausibly the natural soundtrack "
|
| 43 |
+
"of the visuals (impact thuds, crowd reactions, ambient noise that fits, "
|
| 44 |
+
"speech consistent with what's happening on screen)."
|
| 45 |
+
"\n- If the model only describes visuals and gives essentially NO audio "
|
| 46 |
+
"description, default to 'synced'."
|
| 47 |
+
"\n- If you genuinely cannot tell, default to 'synced'."
|
| 48 |
+
"\n- Be objective: do NOT assume any prior about whether the audio is "
|
| 49 |
+
"real or swapped — base the call only on whether the descriptions fit "
|
| 50 |
+
"each other."
|
| 51 |
+
)
|
| 52 |
+
|
| 53 |
+
|
| 54 |
+
def call_judge(client, judge_model, raw):
|
| 55 |
+
try:
|
| 56 |
+
resp = client.chat.completions.create(
|
| 57 |
+
model=judge_model,
|
| 58 |
+
messages=[
|
| 59 |
+
{"role": "system", "content": NEW_SWAP_JUDGE_SYSTEM},
|
| 60 |
+
{"role": "user", "content": raw},
|
| 61 |
+
],
|
| 62 |
+
response_format={"type": "json_object"},
|
| 63 |
+
max_completion_tokens=300,
|
| 64 |
+
)
|
| 65 |
+
obj = json.loads(resp.choices[0].message.content or "")
|
| 66 |
+
pred = str(obj.get("prediction", "synced")).lower().strip()
|
| 67 |
+
if pred not in ("mismatched", "synced"):
|
| 68 |
+
pred = "synced"
|
| 69 |
+
return {
|
| 70 |
+
"pred_label": pred,
|
| 71 |
+
"judge_explanation": str(obj.get("explanation", "")),
|
| 72 |
+
"parse_method": "openai_judge_plausibility",
|
| 73 |
+
}
|
| 74 |
+
except Exception as exc:
|
| 75 |
+
return {
|
| 76 |
+
"pred_label": "synced",
|
| 77 |
+
"judge_explanation": f"judge_error: {exc}",
|
| 78 |
+
"parse_method": "judge_error",
|
| 79 |
+
}
|
| 80 |
+
|
| 81 |
+
|
| 82 |
+
def write_metrics(rows, out_path: Path, gt_label: str):
|
| 83 |
+
"""Pick metric semantics based on the dataset's GT.
|
| 84 |
+
|
| 85 |
+
intervention (gt=mismatched): we want the model to say 'mismatched'.
|
| 86 |
+
- mismatch_detection_rate = correct / n
|
| 87 |
+
- hallucination_rate = (model said 'synced') / n
|
| 88 |
+
control (gt=synced): we want the model to say 'synced'.
|
| 89 |
+
- match_detection_rate = correct / n
|
| 90 |
+
- false_mismatch_rate = (model said 'mismatched') / n
|
| 91 |
+
"""
|
| 92 |
+
breakdown = {"mismatched": 0, "synced": 0, "delay": 0, "early": 0}
|
| 93 |
+
parse_stats = {}
|
| 94 |
+
correct = 0
|
| 95 |
+
for r in rows:
|
| 96 |
+
breakdown[r["pred_label"]] = breakdown.get(r["pred_label"], 0) + 1
|
| 97 |
+
pm = r.get("parse_method", "")
|
| 98 |
+
parse_stats[pm] = parse_stats.get(pm, 0) + 1
|
| 99 |
+
if r["pred_label"] == gt_label:
|
| 100 |
+
correct += 1
|
| 101 |
+
n = len(rows)
|
| 102 |
+
metrics = {
|
| 103 |
+
"total_samples": n,
|
| 104 |
+
"gt_label": gt_label,
|
| 105 |
+
"accuracy": round(correct / n, 4) if n else 0.0,
|
| 106 |
+
"prediction_breakdown": breakdown,
|
| 107 |
+
"parse_stats": parse_stats,
|
| 108 |
+
"judge": "openai_judge_plausibility",
|
| 109 |
+
}
|
| 110 |
+
if gt_label == "mismatched":
|
| 111 |
+
metrics["mismatch_detection_rate"] = round(breakdown["mismatched"] / n, 4) if n else 0.0
|
| 112 |
+
metrics["hallucination_rate"] = round(breakdown["synced"] / n, 4) if n else 0.0
|
| 113 |
+
metrics["task_kind"] = "swap_intervention"
|
| 114 |
+
elif gt_label == "synced":
|
| 115 |
+
metrics["match_detection_rate"] = round(breakdown["synced"] / n, 4) if n else 0.0
|
| 116 |
+
metrics["false_mismatch_rate"] = round(breakdown["mismatched"] / n, 4) if n else 0.0
|
| 117 |
+
metrics["task_kind"] = "swap_control"
|
| 118 |
+
with open(out_path, "w") as f:
|
| 119 |
+
json.dump(metrics, f, indent=2, ensure_ascii=False)
|
| 120 |
+
|
| 121 |
+
|
| 122 |
+
def main():
|
| 123 |
+
import argparse, os
|
| 124 |
+
from concurrent.futures import ThreadPoolExecutor, as_completed
|
| 125 |
+
p = argparse.ArgumentParser()
|
| 126 |
+
p.add_argument("paths", nargs="+", help="eval_results.jsonl files to re-judge")
|
| 127 |
+
p.add_argument("--openai-key", default=os.environ.get("OPENAI_API_KEY"))
|
| 128 |
+
p.add_argument("--judge-model", default="gpt-5.4")
|
| 129 |
+
p.add_argument("--workers", type=int, default=8)
|
| 130 |
+
args = p.parse_args()
|
| 131 |
+
|
| 132 |
+
if not args.openai_key:
|
| 133 |
+
sys.exit("[error] need --openai-key or OPENAI_API_KEY")
|
| 134 |
+
|
| 135 |
+
from openai import OpenAI
|
| 136 |
+
client = OpenAI(api_key=args.openai_key)
|
| 137 |
+
|
| 138 |
+
# Fail-fast preflight: a single judge call to surface auth/quota errors
|
| 139 |
+
# before we write garbage metrics across many files.
|
| 140 |
+
print("[preflight] testing judge API ...", flush=True)
|
| 141 |
+
probe = call_judge(client, args.judge_model, "video shows a dog. audio is silent.")
|
| 142 |
+
if probe["parse_method"] == "judge_error":
|
| 143 |
+
sys.exit(f"[error] judge API broken; aborting. detail: {probe['judge_explanation']}")
|
| 144 |
+
print(f"[preflight] OK (model={args.judge_model})", flush=True)
|
| 145 |
+
|
| 146 |
+
for path in args.paths:
|
| 147 |
+
path = Path(path)
|
| 148 |
+
rows_in = [json.loads(l) for l in open(path) if l.strip()]
|
| 149 |
+
if not rows_in:
|
| 150 |
+
print(f"[{path}] empty, skipping")
|
| 151 |
+
continue
|
| 152 |
+
gt_label = rows_in[0].get("gt_label") or "mismatched"
|
| 153 |
+
print(f"\n[{path}] re-judging {len(rows_in)} rows (gt_label={gt_label})")
|
| 154 |
+
|
| 155 |
+
def _go(r):
|
| 156 |
+
judge = call_judge(client, args.judge_model, r.get("raw_output", "") or "")
|
| 157 |
+
new = dict(r)
|
| 158 |
+
new["orig_pred_label"] = r.get("pred_label")
|
| 159 |
+
new["orig_judge_explanation"] = r.get("judge_explanation")
|
| 160 |
+
new["pred_label"] = judge["pred_label"]
|
| 161 |
+
new["judge_explanation"] = judge["judge_explanation"]
|
| 162 |
+
new["parse_method"] = judge["parse_method"]
|
| 163 |
+
new["correct"] = (judge["pred_label"] == gt_label)
|
| 164 |
+
return new
|
| 165 |
+
|
| 166 |
+
results = []
|
| 167 |
+
with ThreadPoolExecutor(max_workers=args.workers) as ex:
|
| 168 |
+
futures = {ex.submit(_go, r): r for r in rows_in}
|
| 169 |
+
for i, fut in enumerate(as_completed(futures), 1):
|
| 170 |
+
results.append(fut.result())
|
| 171 |
+
if i % 20 == 0 or i == len(rows_in):
|
| 172 |
+
print(f" {i}/{len(rows_in)}", flush=True)
|
| 173 |
+
|
| 174 |
+
out_jsonl = path.with_name(path.stem.replace("eval_results", "eval_results.rejudged") + path.suffix)
|
| 175 |
+
out_metrics = path.parent / "metrics.rejudged.json"
|
| 176 |
+
with open(out_jsonl, "w") as f:
|
| 177 |
+
for r in results:
|
| 178 |
+
f.write(json.dumps(r, ensure_ascii=False) + "\n")
|
| 179 |
+
write_metrics(results, out_metrics, gt_label)
|
| 180 |
+
|
| 181 |
+
flips = sum(1 for r in results if r["pred_label"] != r["orig_pred_label"])
|
| 182 |
+
old_correct = sum(1 for r in results if r["orig_pred_label"] == gt_label)
|
| 183 |
+
new_correct = sum(1 for r in results if r["pred_label"] == gt_label)
|
| 184 |
+
print(f"[{path.name}] flipped {flips}/{len(results)} predictions; "
|
| 185 |
+
f"correct (gt={gt_label}): {old_correct} -> {new_correct}")
|
| 186 |
+
print(f" saved -> {out_jsonl}")
|
| 187 |
+
print(f" saved -> {out_metrics}")
|
| 188 |
+
|
| 189 |
+
|
| 190 |
+
if __name__ == "__main__":
|
| 191 |
+
main()
|
supp/eval_daily_omni.py
ADDED
|
@@ -0,0 +1,586 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env python3
|
| 2 |
+
from __future__ import annotations
|
| 3 |
+
|
| 4 |
+
import argparse
|
| 5 |
+
import ctypes
|
| 6 |
+
import gc
|
| 7 |
+
import json
|
| 8 |
+
import os
|
| 9 |
+
import re
|
| 10 |
+
import site
|
| 11 |
+
import tempfile
|
| 12 |
+
import shutil
|
| 13 |
+
from pathlib import Path
|
| 14 |
+
from typing import Any, Dict, List, Optional
|
| 15 |
+
|
| 16 |
+
_npp_lib = Path(site.getsitepackages()[0]) / "nvidia" / "npp" / "lib"
|
| 17 |
+
_npp_so = _npp_lib / "libnppicc.so.12"
|
| 18 |
+
if _npp_so.is_file():
|
| 19 |
+
ctypes.CDLL(str(_npp_so), mode=ctypes.RTLD_GLOBAL)
|
| 20 |
+
|
| 21 |
+
import torch
|
| 22 |
+
from tqdm import tqdm
|
| 23 |
+
|
| 24 |
+
DEFAULT_DATA_DIR = Path("./data/daily_omni")
|
| 25 |
+
DEFAULT_OUTPUT_DIR = Path("./eval_results/daily_omni")
|
| 26 |
+
|
| 27 |
+
QUESTION_TYPES = [
|
| 28 |
+
"AV Event Alignment", "Event Sequence", "Inference",
|
| 29 |
+
"Reasoning", "Comparative", "Context understanding",
|
| 30 |
+
]
|
| 31 |
+
CONTENT_PARENT_CATEGORIES = [
|
| 32 |
+
"Lifestyle", "Hobbies & Interests", "Education", "Entertainment",
|
| 33 |
+
"Sports", "Science & Technology", "Art & Creativity", "News & Politics",
|
| 34 |
+
"Automotive",
|
| 35 |
+
]
|
| 36 |
+
VIDEO_CATEGORIES = [
|
| 37 |
+
"Howto & Style", "Entertainment", "Education", "Science & Technology",
|
| 38 |
+
"People & Blogs", "News & Politics", "Sports", "Music",
|
| 39 |
+
"Film & Animation", "Gaming", "Comedy", "Autos & Vehicles",
|
| 40 |
+
"Nonprofits & Activism",
|
| 41 |
+
]
|
| 42 |
+
|
| 43 |
+
MCQ_PROMPT = (
|
| 44 |
+
"Select the best answer to the following multiple-choice question "
|
| 45 |
+
"based on the video. Respond with only the letter (A, B, C, or D) "
|
| 46 |
+
"of the correct option.\n"
|
| 47 |
+
)
|
| 48 |
+
|
| 49 |
+
|
| 50 |
+
def parse_args() -> argparse.Namespace:
|
| 51 |
+
p = argparse.ArgumentParser(description="Evaluate on Daily-Omni benchmark.")
|
| 52 |
+
p.add_argument("--base-model", type=str,
|
| 53 |
+
default="Qwen/Qwen3-Omni-30B-A3B-Instruct")
|
| 54 |
+
p.add_argument("--adapter", type=str, default=None)
|
| 55 |
+
p.add_argument("--data-dir", type=Path, default=DEFAULT_DATA_DIR)
|
| 56 |
+
p.add_argument("--output-dir", type=Path, default=DEFAULT_OUTPUT_DIR)
|
| 57 |
+
p.add_argument("--max-samples", type=int, default=-1)
|
| 58 |
+
p.add_argument("--max-new-tokens", type=int, default=32)
|
| 59 |
+
p.add_argument("--temperature", type=float, default=0.0)
|
| 60 |
+
p.add_argument("--label", type=str, default=None)
|
| 61 |
+
p.add_argument("--vllm", action="store_true", default=False)
|
| 62 |
+
p.add_argument("--tp", type=int, default=None)
|
| 63 |
+
p.add_argument("--batch-size", type=int, default=32)
|
| 64 |
+
p.add_argument("--gpu-memory-utilization", type=float, default=0.90)
|
| 65 |
+
p.add_argument("--max-model-len", type=int, default=65536)
|
| 66 |
+
return p.parse_args()
|
| 67 |
+
|
| 68 |
+
|
| 69 |
+
def load_model(base_model: str, adapter: Optional[str]):
|
| 70 |
+
from omni_model_loading import load_qwen_omni_model
|
| 71 |
+
|
| 72 |
+
model, processor, _ = load_qwen_omni_model(base_model, adapter)
|
| 73 |
+
return model, processor
|
| 74 |
+
|
| 75 |
+
|
| 76 |
+
def run_inference(model, processor, video_path: str, prompt: str,
|
| 77 |
+
max_new_tokens: int, temperature: float) -> str:
|
| 78 |
+
from qwen_omni_utils import process_mm_info
|
| 79 |
+
|
| 80 |
+
tmp_dir = tempfile.mkdtemp(prefix="eval_do_")
|
| 81 |
+
masked_video = os.path.join(tmp_dir, "clip.mp4")
|
| 82 |
+
os.symlink(os.path.abspath(video_path), masked_video)
|
| 83 |
+
|
| 84 |
+
conversation = [
|
| 85 |
+
{
|
| 86 |
+
"role": "user",
|
| 87 |
+
"content": [
|
| 88 |
+
{"type": "video", "video": masked_video},
|
| 89 |
+
{"type": "text", "text": prompt},
|
| 90 |
+
],
|
| 91 |
+
}
|
| 92 |
+
]
|
| 93 |
+
|
| 94 |
+
text = processor.apply_chat_template(
|
| 95 |
+
conversation, add_generation_prompt=True, tokenize=False,
|
| 96 |
+
)
|
| 97 |
+
audios, images, videos = process_mm_info(conversation, use_audio_in_video=True)
|
| 98 |
+
inputs = processor(
|
| 99 |
+
text=text, audio=audios, images=images, videos=videos,
|
| 100 |
+
return_tensors="pt", padding=True, use_audio_in_video=True,
|
| 101 |
+
)
|
| 102 |
+
|
| 103 |
+
model_dtype = next(model.parameters()).dtype
|
| 104 |
+
converted = {}
|
| 105 |
+
for k, v in inputs.items():
|
| 106 |
+
if hasattr(v, "to"):
|
| 107 |
+
v = v.to(model.device)
|
| 108 |
+
if torch.is_floating_point(v):
|
| 109 |
+
v = v.to(model_dtype)
|
| 110 |
+
converted[k] = v
|
| 111 |
+
inputs = converted
|
| 112 |
+
|
| 113 |
+
from omni_model_loading import is_omni_thinker_model
|
| 114 |
+
|
| 115 |
+
is_thinker = is_omni_thinker_model(model)
|
| 116 |
+
if is_thinker:
|
| 117 |
+
gen_kwargs = {"max_new_tokens": max_new_tokens, "do_sample": temperature > 0}
|
| 118 |
+
else:
|
| 119 |
+
gen_kwargs = {
|
| 120 |
+
"thinker_max_new_tokens": max_new_tokens,
|
| 121 |
+
"use_audio_in_video": True,
|
| 122 |
+
"return_audio": False,
|
| 123 |
+
"do_sample": temperature > 0,
|
| 124 |
+
}
|
| 125 |
+
if temperature > 0:
|
| 126 |
+
gen_kwargs["temperature"] = temperature
|
| 127 |
+
gen_kwargs["top_p"] = 0.9
|
| 128 |
+
|
| 129 |
+
with torch.inference_mode():
|
| 130 |
+
output_ids = model.generate(**inputs, **gen_kwargs)
|
| 131 |
+
|
| 132 |
+
if isinstance(output_ids, tuple):
|
| 133 |
+
output_ids = output_ids[0]
|
| 134 |
+
|
| 135 |
+
prompt_len = inputs["input_ids"].shape[1]
|
| 136 |
+
response = processor.batch_decode(
|
| 137 |
+
output_ids[:, prompt_len:], skip_special_tokens=True,
|
| 138 |
+
)[0].strip()
|
| 139 |
+
|
| 140 |
+
shutil.rmtree(tmp_dir, ignore_errors=True)
|
| 141 |
+
return response
|
| 142 |
+
|
| 143 |
+
|
| 144 |
+
def preprocess_video_for_vllm(video_path: str):
|
| 145 |
+
from qwen_omni_utils import process_mm_info
|
| 146 |
+
import numpy as np
|
| 147 |
+
|
| 148 |
+
messages = [{
|
| 149 |
+
"role": "user",
|
| 150 |
+
"content": [
|
| 151 |
+
{"type": "video", "video": video_path, "fps": 2.0, "max_frames": 128},
|
| 152 |
+
{"type": "text", "text": "placeholder"},
|
| 153 |
+
],
|
| 154 |
+
}]
|
| 155 |
+
audios, images, videos = process_mm_info(messages, use_audio_in_video=True)
|
| 156 |
+
video_tensor = videos[0]
|
| 157 |
+
video_np = (video_tensor * 255).byte().numpy()
|
| 158 |
+
audio_tuple = None
|
| 159 |
+
if audios:
|
| 160 |
+
aud = audios[0]
|
| 161 |
+
if isinstance(aud, tuple):
|
| 162 |
+
audio_tuple = (aud[0].numpy() if hasattr(aud[0], "numpy") else np.asarray(aud[0]),
|
| 163 |
+
aud[1])
|
| 164 |
+
elif hasattr(aud, "numpy"):
|
| 165 |
+
audio_tuple = (aud.numpy(), 16000)
|
| 166 |
+
else:
|
| 167 |
+
audio_tuple = (np.asarray(aud), 16000)
|
| 168 |
+
return video_np, audio_tuple
|
| 169 |
+
|
| 170 |
+
|
| 171 |
+
SYSTEM_PROMPT = (
|
| 172 |
+
"You are Qwen, a virtual human developed by the Qwen Team, Alibaba "
|
| 173 |
+
"Group, capable of perceiving auditory and visual inputs, as well as "
|
| 174 |
+
"generating text and speech."
|
| 175 |
+
)
|
| 176 |
+
|
| 177 |
+
|
| 178 |
+
def build_vllm_prompt(question: str, base_model: str) -> str:
|
| 179 |
+
from omni_model_loading import vllm_user_mm_prefix
|
| 180 |
+
|
| 181 |
+
mm = vllm_user_mm_prefix(base_model, include_audio=True)
|
| 182 |
+
return (
|
| 183 |
+
f"<|im_start|>system\n{SYSTEM_PROMPT}<|im_end|>\n"
|
| 184 |
+
f"<|im_start|>user\n"
|
| 185 |
+
f"{mm}"
|
| 186 |
+
f"{question}<|im_end|>\n"
|
| 187 |
+
f"<|im_start|>assistant\n"
|
| 188 |
+
)
|
| 189 |
+
|
| 190 |
+
|
| 191 |
+
def extract_answer(text: str) -> str:
|
| 192 |
+
text = text.strip()
|
| 193 |
+
prefixes = [
|
| 194 |
+
"The best answer is", "The correct answer is",
|
| 195 |
+
"The answer is", "The answer", "Best answer:", "Best option:",
|
| 196 |
+
]
|
| 197 |
+
for prefix in prefixes:
|
| 198 |
+
text = text.replace(prefix, "")
|
| 199 |
+
|
| 200 |
+
if len(text.split()) > 10 and not re.search(r"[ABCD]", text):
|
| 201 |
+
return ""
|
| 202 |
+
m = re.search(r"[ABCD]", text)
|
| 203 |
+
return m[0] if m else ""
|
| 204 |
+
|
| 205 |
+
|
| 206 |
+
def load_daily_omni(data_dir: Path, max_samples: int) -> List[Dict[str, Any]]:
|
| 207 |
+
qa_path = data_dir / "qa.json"
|
| 208 |
+
videos_dir = data_dir / "Videos"
|
| 209 |
+
|
| 210 |
+
with open(qa_path) as f:
|
| 211 |
+
qa_data = json.load(f)
|
| 212 |
+
|
| 213 |
+
data = []
|
| 214 |
+
skipped = 0
|
| 215 |
+
for idx, row in enumerate(qa_data):
|
| 216 |
+
vid = row["video_id"]
|
| 217 |
+
video_path = videos_dir / vid / f"{vid}_video.mp4"
|
| 218 |
+
if not video_path.exists():
|
| 219 |
+
skipped += 1
|
| 220 |
+
continue
|
| 221 |
+
|
| 222 |
+
choices = row["Choice"]
|
| 223 |
+
options_text = "\n".join(choices)
|
| 224 |
+
prompt = MCQ_PROMPT + row["Question"] + "\n" + options_text + "\nThe best answer is:"
|
| 225 |
+
|
| 226 |
+
data.append({
|
| 227 |
+
"question_id": f"do_{idx}",
|
| 228 |
+
"video_id": vid,
|
| 229 |
+
"video_path": str(video_path),
|
| 230 |
+
"question_type": row.get("Type", ""),
|
| 231 |
+
"content_parent_category": row.get("content_parent_category", ""),
|
| 232 |
+
"content_fine_category": row.get("content_fine_category", ""),
|
| 233 |
+
"video_category": row.get("video_category", ""),
|
| 234 |
+
"video_duration": row.get("video_duration", ""),
|
| 235 |
+
"question": row["Question"],
|
| 236 |
+
"choices": choices,
|
| 237 |
+
"gt_answer": row["Answer"],
|
| 238 |
+
"prompt": prompt,
|
| 239 |
+
})
|
| 240 |
+
if skipped:
|
| 241 |
+
print(f"[data] Skipped {skipped} questions (video not found)")
|
| 242 |
+
if max_samples > 0:
|
| 243 |
+
data = data[:max_samples]
|
| 244 |
+
return data
|
| 245 |
+
|
| 246 |
+
|
| 247 |
+
def compute_metrics(results: List[Dict[str, Any]]) -> Dict[str, Any]:
|
| 248 |
+
total = len(results)
|
| 249 |
+
if total == 0:
|
| 250 |
+
return {}
|
| 251 |
+
|
| 252 |
+
correct = sum(1 for r in results if r["pred_answer"].upper() == r["gt_answer"].upper())
|
| 253 |
+
overall_acc = correct / total
|
| 254 |
+
|
| 255 |
+
def acc_for(items):
|
| 256 |
+
if not items:
|
| 257 |
+
return None
|
| 258 |
+
c = sum(1 for r in items if r["pred_answer"].upper() == r["gt_answer"].upper())
|
| 259 |
+
return round(c / len(items), 4)
|
| 260 |
+
|
| 261 |
+
per_type = {}
|
| 262 |
+
for qt in QUESTION_TYPES:
|
| 263 |
+
subset = [r for r in results if r.get("question_type") == qt]
|
| 264 |
+
if subset:
|
| 265 |
+
per_type[qt] = {"accuracy": acc_for(subset), "count": len(subset)}
|
| 266 |
+
remaining = [r for r in results if r.get("question_type") not in QUESTION_TYPES]
|
| 267 |
+
if remaining:
|
| 268 |
+
other_types = set(r.get("question_type", "") for r in remaining)
|
| 269 |
+
for ot in sorted(other_types):
|
| 270 |
+
subset = [r for r in remaining if r.get("question_type") == ot]
|
| 271 |
+
if subset:
|
| 272 |
+
per_type[ot] = {"accuracy": acc_for(subset), "count": len(subset)}
|
| 273 |
+
|
| 274 |
+
per_content_cat = {}
|
| 275 |
+
for cat in CONTENT_PARENT_CATEGORIES:
|
| 276 |
+
subset = [r for r in results if r.get("content_parent_category") == cat]
|
| 277 |
+
if subset:
|
| 278 |
+
per_content_cat[cat] = {"accuracy": acc_for(subset), "count": len(subset)}
|
| 279 |
+
|
| 280 |
+
per_video_cat = {}
|
| 281 |
+
for vc in VIDEO_CATEGORIES:
|
| 282 |
+
subset = [r for r in results if r.get("video_category") == vc]
|
| 283 |
+
if subset:
|
| 284 |
+
per_video_cat[vc] = {"accuracy": acc_for(subset), "count": len(subset)}
|
| 285 |
+
|
| 286 |
+
per_duration = {}
|
| 287 |
+
for dur in sorted(set(r.get("video_duration", "") for r in results)):
|
| 288 |
+
if not dur:
|
| 289 |
+
continue
|
| 290 |
+
subset = [r for r in results if r.get("video_duration") == dur]
|
| 291 |
+
if subset:
|
| 292 |
+
per_duration[dur] = {"accuracy": acc_for(subset), "count": len(subset)}
|
| 293 |
+
|
| 294 |
+
return {
|
| 295 |
+
"total_samples": total,
|
| 296 |
+
"overall_accuracy": round(overall_acc, 4),
|
| 297 |
+
"per_question_type": per_type,
|
| 298 |
+
"per_content_category": per_content_cat,
|
| 299 |
+
"per_video_category": per_video_cat,
|
| 300 |
+
"per_duration": per_duration,
|
| 301 |
+
}
|
| 302 |
+
|
| 303 |
+
|
| 304 |
+
def print_summary(metrics: Dict[str, Any], label: str) -> None:
|
| 305 |
+
print()
|
| 306 |
+
print(f"{'=' * 65}")
|
| 307 |
+
print(f" Daily-Omni Summary: {label}")
|
| 308 |
+
print(f"{'=' * 65}")
|
| 309 |
+
print(f" Total samples: {metrics['total_samples']}")
|
| 310 |
+
print(f" Overall Accuracy: {metrics['overall_accuracy']:.1%}")
|
| 311 |
+
|
| 312 |
+
print(f" ─── Per Question Type ───")
|
| 313 |
+
for qt, d in metrics.get("per_question_type", {}).items():
|
| 314 |
+
print(f" {qt:30s}: {d['accuracy']:.1%} ({d['count']})")
|
| 315 |
+
|
| 316 |
+
print(f" ─── Per Content Category ───")
|
| 317 |
+
for cat, d in metrics.get("per_content_category", {}).items():
|
| 318 |
+
print(f" {cat:30s}: {d['accuracy']:.1%} ({d['count']})")
|
| 319 |
+
|
| 320 |
+
print(f" ─── Per Video Category ───")
|
| 321 |
+
for vc, d in metrics.get("per_video_category", {}).items():
|
| 322 |
+
print(f" {vc:30s}: {d['accuracy']:.1%} ({d['count']})")
|
| 323 |
+
|
| 324 |
+
if metrics.get("per_duration"):
|
| 325 |
+
print(f" ─── Per Duration ───")
|
| 326 |
+
for dur, d in metrics["per_duration"].items():
|
| 327 |
+
print(f" {dur:10s}: {d['accuracy']:.1%} ({d['count']})")
|
| 328 |
+
|
| 329 |
+
print(f"{'=' * 65}")
|
| 330 |
+
|
| 331 |
+
|
| 332 |
+
def main() -> None:
|
| 333 |
+
args = parse_args()
|
| 334 |
+
label = args.label or (
|
| 335 |
+
Path(args.adapter).name if args.adapter
|
| 336 |
+
else Path(args.base_model).name
|
| 337 |
+
)
|
| 338 |
+
|
| 339 |
+
out_dir = args.output_dir / label
|
| 340 |
+
out_dir.mkdir(parents=True, exist_ok=True)
|
| 341 |
+
results_jsonl = out_dir / "eval_results.jsonl"
|
| 342 |
+
metrics_json = out_dir / "metrics.json"
|
| 343 |
+
summary_txt = out_dir / "summary.txt"
|
| 344 |
+
|
| 345 |
+
print("[data] Loading Daily-Omni dataset...")
|
| 346 |
+
test_data = load_daily_omni(args.data_dir, args.max_samples)
|
| 347 |
+
print(f"[data] {len(test_data)} questions ready for evaluation")
|
| 348 |
+
|
| 349 |
+
processed = set()
|
| 350 |
+
if results_jsonl.exists():
|
| 351 |
+
with open(results_jsonl) as f:
|
| 352 |
+
for line in f:
|
| 353 |
+
obj = json.loads(line)
|
| 354 |
+
processed.add(obj["question_id"])
|
| 355 |
+
print(f"[resume] {len(processed)} already processed, skipping")
|
| 356 |
+
|
| 357 |
+
use_vllm = args.vllm
|
| 358 |
+
model = processor = llm = None
|
| 359 |
+
vllm_preprocess_stats: Dict[str, int] | None = None
|
| 360 |
+
|
| 361 |
+
if use_vllm:
|
| 362 |
+
from vllm import LLM, SamplingParams
|
| 363 |
+
tp = args.tp or torch.cuda.device_count()
|
| 364 |
+
model_path = args.base_model
|
| 365 |
+
|
| 366 |
+
print("[vllm] Preprocessing videos (before model load) ...")
|
| 367 |
+
todo = [item for item in test_data if item["question_id"] not in processed]
|
| 368 |
+
unique_videos = list(dict.fromkeys(item["video_path"] for item in todo))
|
| 369 |
+
from omni_model_loading import parallel_preprocess_videos
|
| 370 |
+
preprocessed, preprocessed_audio, preprocess_failed_paths = parallel_preprocess_videos(
|
| 371 |
+
unique_videos, preprocess_video_for_vllm,
|
| 372 |
+
)
|
| 373 |
+
|
| 374 |
+
n_pp_skip = sum(1 for item in todo if item["video_path"] in preprocess_failed_paths)
|
| 375 |
+
if preprocess_failed_paths:
|
| 376 |
+
print(
|
| 377 |
+
f"[vllm] Preprocess failed for {len(preprocess_failed_paths)} video(s), "
|
| 378 |
+
f"{n_pp_skip} question(s) will not use vLLM."
|
| 379 |
+
)
|
| 380 |
+
vllm_preprocess_stats = {
|
| 381 |
+
"preprocess_failed_videos": len(preprocess_failed_paths),
|
| 382 |
+
"preprocess_skipped_questions": n_pp_skip,
|
| 383 |
+
}
|
| 384 |
+
|
| 385 |
+
from omni_model_loading import cap_vllm_max_model_len
|
| 386 |
+
|
| 387 |
+
vllm_max_len = cap_vllm_max_model_len(model_path, args.max_model_len)
|
| 388 |
+
print(f"[vllm] Loading {model_path} with tp={tp} (max_model_len={vllm_max_len}) ...")
|
| 389 |
+
llm = LLM(
|
| 390 |
+
model=model_path,
|
| 391 |
+
tensor_parallel_size=tp,
|
| 392 |
+
max_model_len=vllm_max_len,
|
| 393 |
+
max_num_seqs=4,
|
| 394 |
+
limit_mm_per_prompt={"video": 1, "audio": 1},
|
| 395 |
+
gpu_memory_utilization=args.gpu_memory_utilization,
|
| 396 |
+
dtype="bfloat16",
|
| 397 |
+
trust_remote_code=True,
|
| 398 |
+
)
|
| 399 |
+
sampling_params = SamplingParams(
|
| 400 |
+
temperature=args.temperature if args.temperature > 0 else 0.0,
|
| 401 |
+
top_p=0.9 if args.temperature > 0 else 1.0,
|
| 402 |
+
max_tokens=args.max_new_tokens,
|
| 403 |
+
)
|
| 404 |
+
|
| 405 |
+
vllm_todo = [item for item in todo if item["video_path"] in preprocessed]
|
| 406 |
+
fallback_items = []
|
| 407 |
+
print(f"[vllm] {len(vllm_todo)} questions ready, running inference ...")
|
| 408 |
+
|
| 409 |
+
for i, item in enumerate(vllm_todo):
|
| 410 |
+
if item["question_id"] in processed:
|
| 411 |
+
continue
|
| 412 |
+
inp = {
|
| 413 |
+
"prompt": build_vllm_prompt(item["prompt"], args.base_model),
|
| 414 |
+
"multi_modal_data": {"video": preprocessed[item["video_path"]]},
|
| 415 |
+
}
|
| 416 |
+
if item["video_path"] in preprocessed_audio:
|
| 417 |
+
inp["multi_modal_data"]["audio"] = preprocessed_audio[item["video_path"]]
|
| 418 |
+
try:
|
| 419 |
+
outputs = llm.generate([inp], sampling_params=sampling_params)
|
| 420 |
+
raw_output = outputs[0].outputs[0].text.strip()
|
| 421 |
+
pred = extract_answer(raw_output)
|
| 422 |
+
result = {
|
| 423 |
+
"question_id": item["question_id"],
|
| 424 |
+
"video_id": item["video_id"],
|
| 425 |
+
"question_type": item["question_type"],
|
| 426 |
+
"content_parent_category": item["content_parent_category"],
|
| 427 |
+
"content_fine_category": item["content_fine_category"],
|
| 428 |
+
"video_category": item["video_category"],
|
| 429 |
+
"video_duration": item["video_duration"],
|
| 430 |
+
"gt_answer": item["gt_answer"],
|
| 431 |
+
"pred_answer": pred,
|
| 432 |
+
"correct": pred.upper() == item["gt_answer"].upper(),
|
| 433 |
+
"raw_output": raw_output,
|
| 434 |
+
}
|
| 435 |
+
with open(results_jsonl, "a", encoding="utf-8") as f:
|
| 436 |
+
f.write(json.dumps(result, ensure_ascii=False) + "\n")
|
| 437 |
+
processed.add(item["question_id"])
|
| 438 |
+
except (ValueError, RuntimeError) as exc:
|
| 439 |
+
if "longer than the maximum model length" in str(exc):
|
| 440 |
+
print(f" [too long] {item['question_id']} -> fallback")
|
| 441 |
+
fallback_items.append(item)
|
| 442 |
+
else:
|
| 443 |
+
raise
|
| 444 |
+
|
| 445 |
+
if (i + 1) % 100 == 0:
|
| 446 |
+
print(f" [vllm] [{i+1}/{len(vllm_todo)}] done, {len(fallback_items)} deferred")
|
| 447 |
+
|
| 448 |
+
preprocessed.clear()
|
| 449 |
+
preprocessed_audio.clear()
|
| 450 |
+
|
| 451 |
+
vllm_results = []
|
| 452 |
+
if results_jsonl.exists():
|
| 453 |
+
with open(results_jsonl) as f:
|
| 454 |
+
for line in f:
|
| 455 |
+
vllm_results.append(json.loads(line))
|
| 456 |
+
if vllm_results:
|
| 457 |
+
vllm_metrics = compute_metrics(vllm_results)
|
| 458 |
+
vllm_metrics_path = out_dir / "metrics_vllm.json"
|
| 459 |
+
with open(vllm_metrics_path, "w", encoding="utf-8") as f:
|
| 460 |
+
json.dump(vllm_metrics, f, indent=2, ensure_ascii=False)
|
| 461 |
+
print(f"[vllm] Intermediate metrics saved to {vllm_metrics_path}")
|
| 462 |
+
print_summary(vllm_metrics, label + " (vllm only)")
|
| 463 |
+
|
| 464 |
+
if fallback_items:
|
| 465 |
+
print(f"[fallback] Running {len(fallback_items)} long-video questions with transformers ...")
|
| 466 |
+
del llm
|
| 467 |
+
gc.collect()
|
| 468 |
+
torch.cuda.empty_cache()
|
| 469 |
+
|
| 470 |
+
model, processor = load_model(args.base_model, args.adapter)
|
| 471 |
+
for item in tqdm(fallback_items, desc="Fallback", unit="q"):
|
| 472 |
+
if item["question_id"] in processed:
|
| 473 |
+
continue
|
| 474 |
+
try:
|
| 475 |
+
raw_output = run_inference(
|
| 476 |
+
model, processor, item["video_path"], item["prompt"],
|
| 477 |
+
args.max_new_tokens, args.temperature,
|
| 478 |
+
)
|
| 479 |
+
except Exception as exc:
|
| 480 |
+
import traceback
|
| 481 |
+
print(f" [error] {item['question_id']}: {exc}")
|
| 482 |
+
traceback.print_exc()
|
| 483 |
+
raw_output = ""
|
| 484 |
+
|
| 485 |
+
pred = extract_answer(raw_output)
|
| 486 |
+
result = {
|
| 487 |
+
"question_id": item["question_id"],
|
| 488 |
+
"video_id": item["video_id"],
|
| 489 |
+
"question_type": item["question_type"],
|
| 490 |
+
"content_parent_category": item["content_parent_category"],
|
| 491 |
+
"content_fine_category": item["content_fine_category"],
|
| 492 |
+
"video_category": item["video_category"],
|
| 493 |
+
"video_duration": item["video_duration"],
|
| 494 |
+
"gt_answer": item["gt_answer"],
|
| 495 |
+
"pred_answer": pred,
|
| 496 |
+
"correct": pred.upper() == item["gt_answer"].upper(),
|
| 497 |
+
"raw_output": raw_output,
|
| 498 |
+
}
|
| 499 |
+
with open(results_jsonl, "a", encoding="utf-8") as f:
|
| 500 |
+
f.write(json.dumps(result, ensure_ascii=False) + "\n")
|
| 501 |
+
processed.add(item["question_id"])
|
| 502 |
+
gc.collect()
|
| 503 |
+
torch.cuda.empty_cache()
|
| 504 |
+
|
| 505 |
+
else:
|
| 506 |
+
print("[model] Loading model...")
|
| 507 |
+
model, processor = load_model(args.base_model, args.adapter)
|
| 508 |
+
|
| 509 |
+
for item in tqdm(test_data, desc="Daily-Omni", unit="q"):
|
| 510 |
+
if item["question_id"] in processed:
|
| 511 |
+
continue
|
| 512 |
+
|
| 513 |
+
try:
|
| 514 |
+
raw_output = run_inference(
|
| 515 |
+
model, processor, item["video_path"], item["prompt"],
|
| 516 |
+
args.max_new_tokens, args.temperature,
|
| 517 |
+
)
|
| 518 |
+
except Exception as exc:
|
| 519 |
+
import traceback
|
| 520 |
+
print(f" [error] {item['question_id']}: {exc}")
|
| 521 |
+
traceback.print_exc()
|
| 522 |
+
raw_output = ""
|
| 523 |
+
|
| 524 |
+
pred = extract_answer(raw_output)
|
| 525 |
+
|
| 526 |
+
result = {
|
| 527 |
+
"question_id": item["question_id"],
|
| 528 |
+
"video_id": item["video_id"],
|
| 529 |
+
"question_type": item["question_type"],
|
| 530 |
+
"content_parent_category": item["content_parent_category"],
|
| 531 |
+
"content_fine_category": item["content_fine_category"],
|
| 532 |
+
"video_category": item["video_category"],
|
| 533 |
+
"video_duration": item["video_duration"],
|
| 534 |
+
"gt_answer": item["gt_answer"],
|
| 535 |
+
"pred_answer": pred,
|
| 536 |
+
"correct": pred.upper() == item["gt_answer"].upper(),
|
| 537 |
+
"raw_output": raw_output,
|
| 538 |
+
}
|
| 539 |
+
|
| 540 |
+
with open(results_jsonl, "a", encoding="utf-8") as f:
|
| 541 |
+
f.write(json.dumps(result, ensure_ascii=False) + "\n")
|
| 542 |
+
|
| 543 |
+
processed.add(item["question_id"])
|
| 544 |
+
gc.collect()
|
| 545 |
+
torch.cuda.empty_cache()
|
| 546 |
+
|
| 547 |
+
all_results = []
|
| 548 |
+
if results_jsonl.exists():
|
| 549 |
+
with open(results_jsonl) as f:
|
| 550 |
+
for line in f:
|
| 551 |
+
all_results.append(json.loads(line))
|
| 552 |
+
|
| 553 |
+
if not all_results:
|
| 554 |
+
print("[warn] No results to compute metrics from.")
|
| 555 |
+
return
|
| 556 |
+
|
| 557 |
+
metrics = compute_metrics(all_results)
|
| 558 |
+
metrics["eval_config"] = {
|
| 559 |
+
"base_model": args.base_model,
|
| 560 |
+
"adapter": args.adapter,
|
| 561 |
+
"data_dir": str(args.data_dir),
|
| 562 |
+
"max_new_tokens": args.max_new_tokens,
|
| 563 |
+
"temperature": args.temperature,
|
| 564 |
+
}
|
| 565 |
+
if vllm_preprocess_stats is not None:
|
| 566 |
+
metrics["eval_config"]["vllm_preprocess_skips"] = vllm_preprocess_stats
|
| 567 |
+
|
| 568 |
+
with open(metrics_json, "w", encoding="utf-8") as f:
|
| 569 |
+
json.dump(metrics, f, indent=2, ensure_ascii=False)
|
| 570 |
+
|
| 571 |
+
print_summary(metrics, label)
|
| 572 |
+
|
| 573 |
+
with open(summary_txt, "w", encoding="utf-8") as f:
|
| 574 |
+
import io, contextlib
|
| 575 |
+
buf = io.StringIO()
|
| 576 |
+
with contextlib.redirect_stdout(buf):
|
| 577 |
+
print_summary(metrics, label)
|
| 578 |
+
f.write(buf.getvalue())
|
| 579 |
+
|
| 580 |
+
print(f"\n[output] Results: {results_jsonl}")
|
| 581 |
+
print(f"[output] Metrics: {metrics_json}")
|
| 582 |
+
print(f"[output] Summary: {summary_txt}")
|
| 583 |
+
|
| 584 |
+
|
| 585 |
+
if __name__ == "__main__":
|
| 586 |
+
main()
|
supp/eval_dpo_sync.py
ADDED
|
@@ -0,0 +1,826 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env python3
|
| 2 |
+
from __future__ import annotations
|
| 3 |
+
|
| 4 |
+
import argparse
|
| 5 |
+
import ctypes
|
| 6 |
+
import gc
|
| 7 |
+
import json
|
| 8 |
+
import os
|
| 9 |
+
import re
|
| 10 |
+
import site
|
| 11 |
+
import time
|
| 12 |
+
from pathlib import Path
|
| 13 |
+
from statistics import mean, median
|
| 14 |
+
from typing import Any, Dict, List, Optional, Tuple
|
| 15 |
+
|
| 16 |
+
_npp_lib = Path(site.getsitepackages()[0]) / "nvidia" / "npp" / "lib"
|
| 17 |
+
_npp_so = _npp_lib / "libnppicc.so.12"
|
| 18 |
+
if _npp_so.is_file():
|
| 19 |
+
ctypes.CDLL(str(_npp_so), mode=ctypes.RTLD_GLOBAL)
|
| 20 |
+
|
| 21 |
+
import torch
|
| 22 |
+
from tqdm import tqdm
|
| 23 |
+
|
| 24 |
+
_openai_client = None
|
| 25 |
+
|
| 26 |
+
GPT_JUDGE_SYSTEM = """\
|
| 27 |
+
You are a structured-output extractor. The user will give you a model's free-text \
|
| 28 |
+
response about audio-video synchronization. Extract the following fields and return \
|
| 29 |
+
ONLY valid JSON (no markdown, no explanation):
|
| 30 |
+
|
| 31 |
+
{"synced": <bool>, "direction": "none"|"delay"|"early", "offset_sec": <float>, "t_v": <float or null>, "t_a": <float or null>, "explanation": "<one sentence>"}
|
| 32 |
+
|
| 33 |
+
Rules:
|
| 34 |
+
- synced: true if the model says audio and video are synchronized, false otherwise.
|
| 35 |
+
- direction: "delay" means audio comes AFTER the visual event; "early" means audio \
|
| 36 |
+
comes BEFORE the visual event; "none" if synced is true.
|
| 37 |
+
- offset_sec: estimated time gap in seconds. 0.0 if synced.
|
| 38 |
+
- t_v: the timestamp (in seconds) the model attributes to the VISUAL event. null if not mentioned.
|
| 39 |
+
- t_a: the timestamp (in seconds) the model attributes to the AUDIO event. null if not mentioned.
|
| 40 |
+
- If you cannot determine a field, use the default (true / "none" / 0.0 / null / "").
|
| 41 |
+
"""
|
| 42 |
+
|
| 43 |
+
|
| 44 |
+
def _get_openai_client(api_key: Optional[str] = None):
|
| 45 |
+
global _openai_client
|
| 46 |
+
if _openai_client is not None:
|
| 47 |
+
return _openai_client
|
| 48 |
+
key = api_key or os.environ.get("OPENAI_API_KEY")
|
| 49 |
+
if not key:
|
| 50 |
+
return None
|
| 51 |
+
from openai import OpenAI
|
| 52 |
+
_openai_client = OpenAI(api_key=key)
|
| 53 |
+
return _openai_client
|
| 54 |
+
|
| 55 |
+
|
| 56 |
+
def gpt_extract_prediction(
|
| 57 |
+
raw_output: str,
|
| 58 |
+
api_key: Optional[str] = None,
|
| 59 |
+
model: str = "gpt-5.4",
|
| 60 |
+
) -> Optional[Dict[str, Any]]:
|
| 61 |
+
client = _get_openai_client(api_key)
|
| 62 |
+
if client is None:
|
| 63 |
+
return None
|
| 64 |
+
try:
|
| 65 |
+
resp = client.chat.completions.create(
|
| 66 |
+
model=model,
|
| 67 |
+
messages=[
|
| 68 |
+
{"role": "system", "content": GPT_JUDGE_SYSTEM},
|
| 69 |
+
{"role": "user", "content": raw_output},
|
| 70 |
+
],
|
| 71 |
+
temperature=0.0,
|
| 72 |
+
max_completion_tokens=200,
|
| 73 |
+
)
|
| 74 |
+
text = resp.choices[0].message.content.strip()
|
| 75 |
+
for pat in [
|
| 76 |
+
re.compile(r"```(?:json)?\s*(\{.*?\})\s*```", re.DOTALL),
|
| 77 |
+
re.compile(r"(\{.*?\})", re.DOTALL),
|
| 78 |
+
]:
|
| 79 |
+
m = pat.search(text)
|
| 80 |
+
if m:
|
| 81 |
+
obj = json.loads(m.group(1))
|
| 82 |
+
synced = obj.get("synced")
|
| 83 |
+
if isinstance(synced, str):
|
| 84 |
+
synced = synced.lower() in ("true", "yes", "1")
|
| 85 |
+
direction = str(obj.get("direction", "none")).lower().strip()
|
| 86 |
+
if direction not in ("delay", "early", "none"):
|
| 87 |
+
direction = "none"
|
| 88 |
+
t_v_raw = obj.get("t_v")
|
| 89 |
+
t_a_raw = obj.get("t_a")
|
| 90 |
+
pred_t_v = float(t_v_raw) if t_v_raw is not None else None
|
| 91 |
+
pred_t_a = float(t_a_raw) if t_a_raw is not None else None
|
| 92 |
+
return {
|
| 93 |
+
"pred_synced": bool(synced),
|
| 94 |
+
"pred_direction": direction,
|
| 95 |
+
"pred_offset_sec": float(obj.get("offset_sec", 0.0)),
|
| 96 |
+
"pred_t_v": pred_t_v,
|
| 97 |
+
"pred_t_a": pred_t_a,
|
| 98 |
+
"pred_explanation": str(obj.get("explanation", "")),
|
| 99 |
+
"parse_method": "gpt_judge",
|
| 100 |
+
}
|
| 101 |
+
except Exception as exc:
|
| 102 |
+
print(f" [gpt-judge] API error: {exc}", flush=True)
|
| 103 |
+
return None
|
| 104 |
+
|
| 105 |
+
DATA_ROOT = Path("./data/video_source")
|
| 106 |
+
ORIGINAL_ROOT = DATA_ROOT / "original"
|
| 107 |
+
AUDIO_ROOT = DATA_ROOT / "extracted_audio" / "original"
|
| 108 |
+
|
| 109 |
+
|
| 110 |
+
def set_data_root(root: Path) -> None:
|
| 111 |
+
global DATA_ROOT, ORIGINAL_ROOT, AUDIO_ROOT
|
| 112 |
+
DATA_ROOT = root.resolve()
|
| 113 |
+
ORIGINAL_ROOT = DATA_ROOT / "original"
|
| 114 |
+
AUDIO_ROOT = DATA_ROOT / "extracted_audio" / "original"
|
| 115 |
+
|
| 116 |
+
EVAL_PROMPT = """\
|
| 117 |
+
Watch this video and listen to its audio carefully. \
|
| 118 |
+
Determine whether the audio and video tracks are synchronized. \
|
| 119 |
+
If they are not synchronized, identify the direction of the offset \
|
| 120 |
+
(audio delayed or audio early relative to video) and estimate the offset in seconds. \
|
| 121 |
+
Explain your reasoning."""
|
| 122 |
+
|
| 123 |
+
|
| 124 |
+
def parse_args() -> argparse.Namespace:
|
| 125 |
+
p = argparse.ArgumentParser(description="Evaluate sync model on test set.")
|
| 126 |
+
p.add_argument("--base-model", type=str, required=True)
|
| 127 |
+
p.add_argument("--adapter", type=str, default=None)
|
| 128 |
+
p.add_argument(
|
| 129 |
+
"--data-root",
|
| 130 |
+
type=Path,
|
| 131 |
+
default=Path("./data/video_source"),
|
| 132 |
+
)
|
| 133 |
+
p.add_argument(
|
| 134 |
+
"--test-jsonl",
|
| 135 |
+
type=Path,
|
| 136 |
+
default=None,
|
| 137 |
+
)
|
| 138 |
+
p.add_argument(
|
| 139 |
+
"--output-dir",
|
| 140 |
+
type=Path,
|
| 141 |
+
default=None,
|
| 142 |
+
)
|
| 143 |
+
p.add_argument("--max-samples", type=int, default=-1)
|
| 144 |
+
p.add_argument("--max-new-tokens", type=int, default=256)
|
| 145 |
+
p.add_argument("--temperature", type=float, default=0.0)
|
| 146 |
+
p.add_argument("--batch-size", type=int, default=1)
|
| 147 |
+
p.add_argument("--label", type=str, default=None)
|
| 148 |
+
p.add_argument("--gpt-judge", action="store_true", default=False)
|
| 149 |
+
p.add_argument("--openai-api-key", type=str, default=None)
|
| 150 |
+
p.add_argument("--gpt-model", type=str, default="gpt-5.4")
|
| 151 |
+
p.add_argument("--vllm", action="store_true", default=False)
|
| 152 |
+
p.add_argument("--tp", type=int, default=None)
|
| 153 |
+
p.add_argument("--gpu-memory-utilization", type=float, default=0.90)
|
| 154 |
+
p.add_argument("--max-model-len", type=int, default=65536)
|
| 155 |
+
return p.parse_args()
|
| 156 |
+
|
| 157 |
+
|
| 158 |
+
def parse_ground_truth(video_field: str) -> Dict[str, Any]:
|
| 159 |
+
m_delay = re.search(r"_delay_([\d.]+)s\.mp4", video_field)
|
| 160 |
+
m_early = re.search(r"_early_([\d.]+)s\.mp4", video_field)
|
| 161 |
+
if m_delay:
|
| 162 |
+
return {"synced": False, "direction": "delay", "offset_sec": float(m_delay.group(1))}
|
| 163 |
+
elif m_early:
|
| 164 |
+
return {"synced": False, "direction": "early", "offset_sec": float(m_early.group(1))}
|
| 165 |
+
else:
|
| 166 |
+
return {"synced": True, "direction": "none", "offset_sec": 0.0}
|
| 167 |
+
|
| 168 |
+
|
| 169 |
+
def resolve_video_path(video_field: str) -> str:
|
| 170 |
+
if os.path.isabs(video_field) and os.path.exists(video_field):
|
| 171 |
+
return video_field
|
| 172 |
+
candidate_dirs = [
|
| 173 |
+
ORIGINAL_ROOT / "uag_oops",
|
| 174 |
+
DATA_ROOT / "random_shift_video" / "delay",
|
| 175 |
+
DATA_ROOT / "random_shift_video" / "early",
|
| 176 |
+
ORIGINAL_ROOT,
|
| 177 |
+
]
|
| 178 |
+
for d in candidate_dirs:
|
| 179 |
+
c = d / video_field
|
| 180 |
+
if c.exists():
|
| 181 |
+
return str(c)
|
| 182 |
+
return str(ORIGINAL_ROOT / "uag_oops" / video_field)
|
| 183 |
+
|
| 184 |
+
|
| 185 |
+
def resolve_audio_path(video_path: str) -> str:
|
| 186 |
+
video_p = Path(video_path)
|
| 187 |
+
try:
|
| 188 |
+
rel = video_p.relative_to(DATA_ROOT)
|
| 189 |
+
except ValueError:
|
| 190 |
+
rel = Path(video_p.name)
|
| 191 |
+
audio_path = DATA_ROOT / "extracted_audio" / rel.with_suffix(".wav")
|
| 192 |
+
if audio_path.exists():
|
| 193 |
+
return str(audio_path)
|
| 194 |
+
base_stem = re.sub(r"_(delay|early)_[\d.]+s$", "", video_p.stem)
|
| 195 |
+
fallback = DATA_ROOT / "extracted_audio" / "original" / "uag_oops" / (base_stem + ".wav")
|
| 196 |
+
if fallback.exists():
|
| 197 |
+
return str(fallback)
|
| 198 |
+
return str(audio_path)
|
| 199 |
+
|
| 200 |
+
|
| 201 |
+
def extract_timestamps(text: str) -> Tuple[Optional[float], Optional[float]]:
|
| 202 |
+
text_lower = text.lower()
|
| 203 |
+
all_times = [(m.start(), float(m.group(1)))
|
| 204 |
+
for m in re.finditer(r"(?:at|around|about)\s+([\d]+\.?\d*)\s*s", text_lower)]
|
| 205 |
+
if len(all_times) >= 2:
|
| 206 |
+
return (all_times[0][1], all_times[1][1])
|
| 207 |
+
if len(all_times) == 1:
|
| 208 |
+
return (all_times[0][1], all_times[0][1])
|
| 209 |
+
return (None, None)
|
| 210 |
+
|
| 211 |
+
|
| 212 |
+
def load_test_data(path: Path, max_samples: int) -> List[Dict[str, Any]]:
|
| 213 |
+
data = []
|
| 214 |
+
with open(path) as f:
|
| 215 |
+
for line in f:
|
| 216 |
+
line = line.strip()
|
| 217 |
+
if not line:
|
| 218 |
+
continue
|
| 219 |
+
obj = json.loads(line)
|
| 220 |
+
video_path = resolve_video_path(obj["video"])
|
| 221 |
+
audio_path = resolve_audio_path(video_path)
|
| 222 |
+
gt = parse_ground_truth(obj["video"])
|
| 223 |
+
gt_t_v, gt_t_a = extract_timestamps(obj.get("chosen", ""))
|
| 224 |
+
data.append({
|
| 225 |
+
"video": obj["video"],
|
| 226 |
+
"video_path": video_path,
|
| 227 |
+
"audio_path": audio_path,
|
| 228 |
+
"prompt": obj["prompt"],
|
| 229 |
+
"chosen": obj["chosen"],
|
| 230 |
+
"rejected": obj["rejected"],
|
| 231 |
+
"gt_synced": gt["synced"],
|
| 232 |
+
"gt_direction": gt["direction"],
|
| 233 |
+
"gt_offset_sec": gt["offset_sec"],
|
| 234 |
+
"gt_t_v": gt_t_v,
|
| 235 |
+
"gt_t_a": gt_t_a,
|
| 236 |
+
})
|
| 237 |
+
if max_samples > 0:
|
| 238 |
+
data = data[:max_samples]
|
| 239 |
+
return data
|
| 240 |
+
|
| 241 |
+
|
| 242 |
+
def extract_prediction(text: str) -> Dict[str, Any]:
|
| 243 |
+
text = text.strip()
|
| 244 |
+
|
| 245 |
+
for pattern in [
|
| 246 |
+
re.compile(r"```(?:json)?\s*(\{.*?\})\s*```", re.DOTALL),
|
| 247 |
+
re.compile(r"(\{[^{}]*\"synced\"[^{}]*\})", re.DOTALL),
|
| 248 |
+
re.compile(r"(\{.*?\})", re.DOTALL),
|
| 249 |
+
]:
|
| 250 |
+
m = pattern.search(text)
|
| 251 |
+
if m:
|
| 252 |
+
try:
|
| 253 |
+
obj = json.loads(m.group(1))
|
| 254 |
+
synced = obj.get("synced")
|
| 255 |
+
if isinstance(synced, str):
|
| 256 |
+
synced = synced.lower() in ("true", "yes", "1")
|
| 257 |
+
direction = str(obj.get("direction", "none")).lower().strip()
|
| 258 |
+
if direction not in ("delay", "early", "none"):
|
| 259 |
+
direction = "none"
|
| 260 |
+
offset = float(obj.get("offset_sec", 0.0))
|
| 261 |
+
explanation = str(obj.get("explanation", ""))
|
| 262 |
+
t_v_raw = obj.get("t_v")
|
| 263 |
+
t_a_raw = obj.get("t_a")
|
| 264 |
+
return {
|
| 265 |
+
"pred_synced": bool(synced),
|
| 266 |
+
"pred_direction": direction,
|
| 267 |
+
"pred_offset_sec": offset,
|
| 268 |
+
"pred_t_v": float(t_v_raw) if t_v_raw is not None else None,
|
| 269 |
+
"pred_t_a": float(t_a_raw) if t_a_raw is not None else None,
|
| 270 |
+
"pred_explanation": explanation,
|
| 271 |
+
"parse_method": "json",
|
| 272 |
+
}
|
| 273 |
+
except (json.JSONDecodeError, ValueError, TypeError):
|
| 274 |
+
continue
|
| 275 |
+
|
| 276 |
+
text_lower = text.lower()
|
| 277 |
+
synced = None
|
| 278 |
+
direction = "none"
|
| 279 |
+
offset = 0.0
|
| 280 |
+
|
| 281 |
+
pred_t_v, pred_t_a = extract_timestamps(text)
|
| 282 |
+
|
| 283 |
+
desync_kws = [
|
| 284 |
+
"not synchronized", "not aligned", "desync", "mismatch", "misalign",
|
| 285 |
+
"not in sync", "out of sync", "clearly not", "not well aligned",
|
| 286 |
+
"are not aligned", "audio and visual event are clearly not",
|
| 287 |
+
]
|
| 288 |
+
sync_kws = [
|
| 289 |
+
"synchronized", "well aligned", "well-aligned", "in sync",
|
| 290 |
+
"appear synchronized", "appears synchronized", "closely aligned",
|
| 291 |
+
"audio and video are aligned", "matches closely",
|
| 292 |
+
]
|
| 293 |
+
if any(kw in text_lower for kw in desync_kws):
|
| 294 |
+
synced = False
|
| 295 |
+
elif any(kw in text_lower for kw in sync_kws):
|
| 296 |
+
synced = True
|
| 297 |
+
|
| 298 |
+
if synced is False:
|
| 299 |
+
delay_kws = ["audio delayed", "audio lags", "audio comes after", "sound comes after",
|
| 300 |
+
"sound is heard later", "audio is delayed", "sound follows"]
|
| 301 |
+
early_kws = ["audio early", "audio leads", "audio comes before", "sound comes before",
|
| 302 |
+
"audio precedes", "sound is heard before", "sound precedes", "audio is early"]
|
| 303 |
+
if any(kw in text_lower for kw in delay_kws):
|
| 304 |
+
direction = "delay"
|
| 305 |
+
elif any(kw in text_lower for kw in early_kws):
|
| 306 |
+
direction = "early"
|
| 307 |
+
|
| 308 |
+
if direction == "none" and pred_t_v is not None and pred_t_a is not None and pred_t_v != pred_t_a:
|
| 309 |
+
if pred_t_a > pred_t_v:
|
| 310 |
+
direction = "delay"
|
| 311 |
+
else:
|
| 312 |
+
direction = "early"
|
| 313 |
+
offset = abs(pred_t_a - pred_t_v)
|
| 314 |
+
|
| 315 |
+
if offset == 0.0:
|
| 316 |
+
offset_match = re.search(
|
| 317 |
+
r"(?:gap|separation|offset|mismatch|differ\w*)\s*(?:of\s+)?(?:about\s+|roughly\s+|approximately\s+)?"
|
| 318 |
+
r"([\d]+\.?\d*)\s*s",
|
| 319 |
+
text_lower,
|
| 320 |
+
)
|
| 321 |
+
if not offset_match:
|
| 322 |
+
offset_match = re.search(
|
| 323 |
+
r"(?:about\s+|roughly\s+|approximately\s+)?([\d]+\.?\d*)\s*s\s*"
|
| 324 |
+
r"(?:gap|separation|offset|mismatch|differ)",
|
| 325 |
+
text_lower,
|
| 326 |
+
)
|
| 327 |
+
if offset_match:
|
| 328 |
+
offset = float(offset_match.group(1))
|
| 329 |
+
|
| 330 |
+
if synced is None:
|
| 331 |
+
synced = True
|
| 332 |
+
|
| 333 |
+
return {
|
| 334 |
+
"pred_synced": synced,
|
| 335 |
+
"pred_direction": direction,
|
| 336 |
+
"pred_offset_sec": offset,
|
| 337 |
+
"pred_t_v": pred_t_v,
|
| 338 |
+
"pred_t_a": pred_t_a,
|
| 339 |
+
"pred_explanation": "",
|
| 340 |
+
"parse_method": "regex_fallback",
|
| 341 |
+
}
|
| 342 |
+
|
| 343 |
+
|
| 344 |
+
def load_model(base_model: str, adapter: Optional[str]):
|
| 345 |
+
from multi_omni_adapter import get_adapter
|
| 346 |
+
|
| 347 |
+
omni = get_adapter(base_model, adapter)
|
| 348 |
+
omni.load()
|
| 349 |
+
return omni
|
| 350 |
+
|
| 351 |
+
|
| 352 |
+
def run_inference(omni, video_path: str, audio_path: str,
|
| 353 |
+
max_new_tokens: int, temperature: float) -> str:
|
| 354 |
+
return omni.infer(video_path, audio_path, EVAL_PROMPT, max_new_tokens, temperature)
|
| 355 |
+
|
| 356 |
+
|
| 357 |
+
SYSTEM_PROMPT = (
|
| 358 |
+
"You are Qwen, a virtual human developed by the Qwen Team, Alibaba "
|
| 359 |
+
"Group, capable of perceiving auditory and visual inputs, as well as "
|
| 360 |
+
"generating text and speech."
|
| 361 |
+
)
|
| 362 |
+
|
| 363 |
+
|
| 364 |
+
def preprocess_video_for_vllm(video_path: str):
|
| 365 |
+
from qwen_omni_utils import process_mm_info
|
| 366 |
+
import numpy as np
|
| 367 |
+
|
| 368 |
+
messages = [{
|
| 369 |
+
"role": "user",
|
| 370 |
+
"content": [
|
| 371 |
+
{"type": "video", "video": video_path, "fps": 2.0, "max_frames": 128},
|
| 372 |
+
{"type": "text", "text": "placeholder"},
|
| 373 |
+
],
|
| 374 |
+
}]
|
| 375 |
+
audios, images, videos = process_mm_info(messages, use_audio_in_video=True)
|
| 376 |
+
video_tensor = videos[0]
|
| 377 |
+
return (video_tensor * 255).byte().numpy()
|
| 378 |
+
|
| 379 |
+
|
| 380 |
+
def preprocess_audio_for_vllm(audio_path: str, target_sr: int = 16000):
|
| 381 |
+
import numpy as np
|
| 382 |
+
import wave
|
| 383 |
+
|
| 384 |
+
with wave.open(audio_path, "rb") as w:
|
| 385 |
+
sr = w.getframerate()
|
| 386 |
+
n = w.getnframes()
|
| 387 |
+
raw = w.readframes(n)
|
| 388 |
+
x = np.frombuffer(raw, dtype=np.int16).astype(np.float32) / 32768.0
|
| 389 |
+
if sr != target_sr:
|
| 390 |
+
duration = len(x) / sr
|
| 391 |
+
new_len = int(duration * target_sr)
|
| 392 |
+
x = np.interp(
|
| 393 |
+
np.linspace(0, len(x) - 1, new_len),
|
| 394 |
+
np.arange(len(x)),
|
| 395 |
+
x,
|
| 396 |
+
)
|
| 397 |
+
return x, target_sr
|
| 398 |
+
|
| 399 |
+
|
| 400 |
+
def build_vllm_prompt(question: str, base_model: str) -> str:
|
| 401 |
+
from omni_model_loading import vllm_user_mm_prefix
|
| 402 |
+
|
| 403 |
+
mm = vllm_user_mm_prefix(base_model, include_audio=True)
|
| 404 |
+
return (
|
| 405 |
+
f"<|im_start|>system\n{SYSTEM_PROMPT}<|im_end|>\n"
|
| 406 |
+
f"<|im_start|>user\n"
|
| 407 |
+
f"{mm}"
|
| 408 |
+
f"{question}<|im_end|>\n"
|
| 409 |
+
f"<|im_start|>assistant\n"
|
| 410 |
+
)
|
| 411 |
+
|
| 412 |
+
|
| 413 |
+
def compute_metrics(results: List[Dict[str, Any]]) -> Dict[str, Any]:
|
| 414 |
+
total = len(results)
|
| 415 |
+
if total == 0:
|
| 416 |
+
return {}
|
| 417 |
+
|
| 418 |
+
sync_correct = sum(1 for r in results if r["pred_synced"] == r["gt_synced"])
|
| 419 |
+
sync_acc = sync_correct / total
|
| 420 |
+
|
| 421 |
+
desync_samples = [r for r in results if not r["gt_synced"]]
|
| 422 |
+
if desync_samples:
|
| 423 |
+
dir_correct = sum(1 for r in desync_samples if r["pred_direction"] == r["gt_direction"])
|
| 424 |
+
dir_acc = dir_correct / len(desync_samples)
|
| 425 |
+
else:
|
| 426 |
+
dir_acc = None
|
| 427 |
+
|
| 428 |
+
def label(r, prefix):
|
| 429 |
+
if r[f"{prefix}synced"]:
|
| 430 |
+
return "synced"
|
| 431 |
+
return r[f"{prefix}direction"]
|
| 432 |
+
three_class_correct = sum(1 for r in results if label(r, "pred_") == label(r, "gt_"))
|
| 433 |
+
three_class_acc = three_class_correct / total
|
| 434 |
+
|
| 435 |
+
offset_errors = []
|
| 436 |
+
for r in desync_samples:
|
| 437 |
+
if not r["pred_synced"] and r["pred_offset_sec"] > 0:
|
| 438 |
+
offset_errors.append(abs(r["pred_offset_sec"] - r["gt_offset_sec"]))
|
| 439 |
+
offset_mae = mean(offset_errors) if offset_errors else None
|
| 440 |
+
offset_median = median(offset_errors) if offset_errors else None
|
| 441 |
+
|
| 442 |
+
synced_samples = [r for r in results if r["gt_synced"]]
|
| 443 |
+
delay_samples = [r for r in results if r["gt_direction"] == "delay"]
|
| 444 |
+
early_samples = [r for r in results if r["gt_direction"] == "early"]
|
| 445 |
+
|
| 446 |
+
synced_acc = (sum(1 for r in synced_samples if r["pred_synced"]) / len(synced_samples)) if synced_samples else None
|
| 447 |
+
delay_acc = (sum(1 for r in delay_samples if not r["pred_synced"] and r["pred_direction"] == "delay") / len(delay_samples)) if delay_samples else None
|
| 448 |
+
early_acc = (sum(1 for r in early_samples if not r["pred_synced"] and r["pred_direction"] == "early") / len(early_samples)) if early_samples else None
|
| 449 |
+
|
| 450 |
+
within_05 = sum(1 for e in offset_errors if e <= 0.5) if offset_errors else 0
|
| 451 |
+
within_10 = sum(1 for e in offset_errors if e <= 1.0) if offset_errors else 0
|
| 452 |
+
|
| 453 |
+
json_parsed = sum(1 for r in results if r.get("parse_method") == "json")
|
| 454 |
+
regex_parsed = sum(1 for r in results if r.get("parse_method") == "regex_fallback")
|
| 455 |
+
gpt_parsed = sum(1 for r in results if r.get("parse_method") == "gpt_judge")
|
| 456 |
+
|
| 457 |
+
tv_errors = []
|
| 458 |
+
ta_errors = []
|
| 459 |
+
for r in results:
|
| 460 |
+
gt_tv = r.get("gt_t_v")
|
| 461 |
+
gt_ta = r.get("gt_t_a")
|
| 462 |
+
pred_tv = r.get("pred_t_v")
|
| 463 |
+
pred_ta = r.get("pred_t_a")
|
| 464 |
+
if gt_tv is not None and pred_tv is not None:
|
| 465 |
+
tv_errors.append(abs(pred_tv - gt_tv))
|
| 466 |
+
if gt_ta is not None and pred_ta is not None:
|
| 467 |
+
ta_errors.append(abs(pred_ta - gt_ta))
|
| 468 |
+
tv_mae = round(mean(tv_errors), 4) if tv_errors else None
|
| 469 |
+
ta_mae = round(mean(ta_errors), 4) if ta_errors else None
|
| 470 |
+
tv_median = round(median(tv_errors), 4) if tv_errors else None
|
| 471 |
+
ta_median = round(median(ta_errors), 4) if ta_errors else None
|
| 472 |
+
|
| 473 |
+
return {
|
| 474 |
+
"total_samples": total,
|
| 475 |
+
"sync_desync_accuracy": round(sync_acc, 4),
|
| 476 |
+
"three_class_accuracy": round(three_class_acc, 4),
|
| 477 |
+
"direction_accuracy_on_desync": round(dir_acc, 4) if dir_acc is not None else None,
|
| 478 |
+
"per_category": {
|
| 479 |
+
"synced_accuracy": round(synced_acc, 4) if synced_acc is not None else None,
|
| 480 |
+
"delay_accuracy": round(delay_acc, 4) if delay_acc is not None else None,
|
| 481 |
+
"early_accuracy": round(early_acc, 4) if early_acc is not None else None,
|
| 482 |
+
"synced_count": len(synced_samples),
|
| 483 |
+
"delay_count": len(delay_samples),
|
| 484 |
+
"early_count": len(early_samples),
|
| 485 |
+
},
|
| 486 |
+
"offset_mae_sec": round(offset_mae, 4) if offset_mae is not None else None,
|
| 487 |
+
"offset_median_sec": round(offset_median, 4) if offset_median is not None else None,
|
| 488 |
+
"offset_within_0.5s": within_05,
|
| 489 |
+
"offset_within_1.0s": within_10,
|
| 490 |
+
"offset_evaluated_count": len(offset_errors),
|
| 491 |
+
"timestamp_tv_mae_sec": tv_mae,
|
| 492 |
+
"timestamp_ta_mae_sec": ta_mae,
|
| 493 |
+
"timestamp_tv_median_sec": tv_median,
|
| 494 |
+
"timestamp_ta_median_sec": ta_median,
|
| 495 |
+
"timestamp_evaluated_tv": len(tv_errors),
|
| 496 |
+
"timestamp_evaluated_ta": len(ta_errors),
|
| 497 |
+
"parse_stats": {"json": json_parsed, "regex_fallback": regex_parsed, "gpt_judge": gpt_parsed},
|
| 498 |
+
}
|
| 499 |
+
|
| 500 |
+
|
| 501 |
+
def print_summary(metrics: Dict[str, Any], label: str) -> None:
|
| 502 |
+
print()
|
| 503 |
+
print(f"{'=' * 60}")
|
| 504 |
+
print(f" Eval Summary: {label}")
|
| 505 |
+
print(f"{'=' * 60}")
|
| 506 |
+
print(f" Total samples: {metrics['total_samples']}")
|
| 507 |
+
print(f" Sync/Desync Accuracy: {metrics['sync_desync_accuracy']:.1%}")
|
| 508 |
+
print(f" 3-Class Accuracy: {metrics['three_class_accuracy']:.1%}")
|
| 509 |
+
if metrics["direction_accuracy_on_desync"] is not None:
|
| 510 |
+
print(f" Direction Acc (desync): {metrics['direction_accuracy_on_desync']:.1%}")
|
| 511 |
+
print(f" ─── Per Category ───")
|
| 512 |
+
pc = metrics["per_category"]
|
| 513 |
+
if pc["synced_accuracy"] is not None:
|
| 514 |
+
print(f" Synced correct: {pc['synced_accuracy']:.1%} ({pc['synced_count']} samples)")
|
| 515 |
+
if pc["delay_accuracy"] is not None:
|
| 516 |
+
print(f" Delay correct: {pc['delay_accuracy']:.1%} ({pc['delay_count']} samples)")
|
| 517 |
+
if pc["early_accuracy"] is not None:
|
| 518 |
+
print(f" Early correct: {pc['early_accuracy']:.1%} ({pc['early_count']} samples)")
|
| 519 |
+
print(f" ─── Offset Estimation ───")
|
| 520 |
+
if metrics["offset_mae_sec"] is not None:
|
| 521 |
+
print(f" MAE: {metrics['offset_mae_sec']:.3f}s")
|
| 522 |
+
print(f" Median Error: {metrics['offset_median_sec']:.3f}s")
|
| 523 |
+
print(f" Within 0.5s: {metrics['offset_within_0.5s']} / {metrics['offset_evaluated_count']}")
|
| 524 |
+
print(f" Within 1.0s: {metrics['offset_within_1.0s']} / {metrics['offset_evaluated_count']}")
|
| 525 |
+
else:
|
| 526 |
+
print(f" (no valid offset predictions)")
|
| 527 |
+
print(f" ─── Timestamp Estimation ───")
|
| 528 |
+
if metrics.get("timestamp_tv_mae_sec") is not None:
|
| 529 |
+
print(f" t_v MAE: {metrics['timestamp_tv_mae_sec']:.3f}s ({metrics['timestamp_evaluated_tv']} samples)")
|
| 530 |
+
print(f" t_v Median Error: {metrics['timestamp_tv_median_sec']:.3f}s")
|
| 531 |
+
else:
|
| 532 |
+
print(f" t_v: (no valid pairs)")
|
| 533 |
+
if metrics.get("timestamp_ta_mae_sec") is not None:
|
| 534 |
+
print(f" t_a MAE: {metrics['timestamp_ta_mae_sec']:.3f}s ({metrics['timestamp_evaluated_ta']} samples)")
|
| 535 |
+
print(f" t_a Median Error: {metrics['timestamp_ta_median_sec']:.3f}s")
|
| 536 |
+
else:
|
| 537 |
+
print(f" t_a: (no valid pairs)")
|
| 538 |
+
print(f" ─── Parse Stats ───")
|
| 539 |
+
ps = metrics["parse_stats"]
|
| 540 |
+
print(f" JSON parsed: {ps['json']}")
|
| 541 |
+
print(f" GPT judge: {ps.get('gpt_judge', 0)}")
|
| 542 |
+
print(f" Regex fallback: {ps['regex_fallback']}")
|
| 543 |
+
print(f"{'=' * 60}")
|
| 544 |
+
|
| 545 |
+
|
| 546 |
+
def main() -> None:
|
| 547 |
+
args = parse_args()
|
| 548 |
+
set_data_root(args.data_root)
|
| 549 |
+
test_jsonl = args.test_jsonl or (DATA_ROOT / "test.jsonl")
|
| 550 |
+
output_dir = args.output_dir or Path("./eval_results/sync")
|
| 551 |
+
|
| 552 |
+
if args.gpt_judge:
|
| 553 |
+
client = _get_openai_client(args.openai_api_key)
|
| 554 |
+
if client is None:
|
| 555 |
+
print("[ERROR] --gpt-judge requires OPENAI_API_KEY env var or --openai-api-key argument.")
|
| 556 |
+
raise SystemExit(1)
|
| 557 |
+
try:
|
| 558 |
+
test_resp = client.chat.completions.create(
|
| 559 |
+
model=args.gpt_model,
|
| 560 |
+
messages=[{"role": "user", "content": "Say OK"}],
|
| 561 |
+
max_completion_tokens=5,
|
| 562 |
+
)
|
| 563 |
+
print(f"[gpt-judge] API verified. Model: {args.gpt_model}")
|
| 564 |
+
except Exception as exc:
|
| 565 |
+
print(f"[ERROR] GPT API check failed: {exc}")
|
| 566 |
+
raise SystemExit(1)
|
| 567 |
+
|
| 568 |
+
label = args.label or (Path(args.adapter).name if args.adapter else Path(args.base_model).name)
|
| 569 |
+
|
| 570 |
+
out_dir = output_dir / label
|
| 571 |
+
out_dir.mkdir(parents=True, exist_ok=True)
|
| 572 |
+
results_jsonl = out_dir / "eval_results.jsonl"
|
| 573 |
+
metrics_json = out_dir / "metrics.json"
|
| 574 |
+
summary_txt = out_dir / "summary.txt"
|
| 575 |
+
|
| 576 |
+
test_data = load_test_data(test_jsonl, args.max_samples)
|
| 577 |
+
print(f"[data] Loaded {len(test_data)} test samples")
|
| 578 |
+
|
| 579 |
+
processed = set()
|
| 580 |
+
if results_jsonl.exists():
|
| 581 |
+
with open(results_jsonl) as f:
|
| 582 |
+
for line in f:
|
| 583 |
+
obj = json.loads(line)
|
| 584 |
+
processed.add(obj["video"])
|
| 585 |
+
print(f"[resume] {len(processed)} already processed, skipping")
|
| 586 |
+
|
| 587 |
+
def _do_extract(raw_output: str) -> Dict[str, Any]:
|
| 588 |
+
if args.gpt_judge and raw_output:
|
| 589 |
+
gpt_pred = gpt_extract_prediction(
|
| 590 |
+
raw_output, api_key=args.openai_api_key, model=args.gpt_model,
|
| 591 |
+
)
|
| 592 |
+
if gpt_pred is not None:
|
| 593 |
+
return gpt_pred
|
| 594 |
+
return extract_prediction(raw_output)
|
| 595 |
+
|
| 596 |
+
def _build_result(item: Dict, pred: Dict, raw_output: str) -> Dict:
|
| 597 |
+
return {
|
| 598 |
+
"video": item["video"],
|
| 599 |
+
"video_path": item["video_path"],
|
| 600 |
+
"gt_synced": item["gt_synced"],
|
| 601 |
+
"gt_direction": item["gt_direction"],
|
| 602 |
+
"gt_offset_sec": item["gt_offset_sec"],
|
| 603 |
+
"gt_t_v": item["gt_t_v"],
|
| 604 |
+
"gt_t_a": item["gt_t_a"],
|
| 605 |
+
"pred_synced": pred["pred_synced"],
|
| 606 |
+
"pred_direction": pred["pred_direction"],
|
| 607 |
+
"pred_offset_sec": pred["pred_offset_sec"],
|
| 608 |
+
"pred_t_v": pred.get("pred_t_v"),
|
| 609 |
+
"pred_t_a": pred.get("pred_t_a"),
|
| 610 |
+
"pred_explanation": pred.get("pred_explanation", ""),
|
| 611 |
+
"parse_method": pred["parse_method"],
|
| 612 |
+
"raw_output": raw_output,
|
| 613 |
+
}
|
| 614 |
+
|
| 615 |
+
use_vllm = args.vllm
|
| 616 |
+
|
| 617 |
+
if use_vllm:
|
| 618 |
+
from vllm import LLM, SamplingParams
|
| 619 |
+
|
| 620 |
+
tp = args.tp or torch.cuda.device_count()
|
| 621 |
+
todo = [item for item in test_data if item["video"] not in processed]
|
| 622 |
+
|
| 623 |
+
print(f"[vllm] Preprocessing {len(todo)} samples (video + audio) ...")
|
| 624 |
+
preprocessed_v: Dict[str, Any] = {}
|
| 625 |
+
preprocessed_a: Dict[str, Any] = {}
|
| 626 |
+
failed_paths: set = set()
|
| 627 |
+
|
| 628 |
+
unique_videos = list(dict.fromkeys(item["video_path"] for item in todo))
|
| 629 |
+
unique_audios = list(dict.fromkeys(item["audio_path"] for item in todo))
|
| 630 |
+
|
| 631 |
+
for vp in tqdm(unique_videos, desc="Preprocess video", unit="video"):
|
| 632 |
+
if vp in failed_paths:
|
| 633 |
+
continue
|
| 634 |
+
try:
|
| 635 |
+
preprocessed_v[vp] = preprocess_video_for_vllm(vp)
|
| 636 |
+
except Exception as e:
|
| 637 |
+
failed_paths.add(vp)
|
| 638 |
+
print(f" [skip] video preprocess error: {Path(vp).name}: {e}")
|
| 639 |
+
|
| 640 |
+
for ap in tqdm(unique_audios, desc="Preprocess audio", unit="audio"):
|
| 641 |
+
if ap in failed_paths:
|
| 642 |
+
continue
|
| 643 |
+
try:
|
| 644 |
+
preprocessed_a[ap] = preprocess_audio_for_vllm(ap)
|
| 645 |
+
except Exception as e:
|
| 646 |
+
failed_paths.add(ap)
|
| 647 |
+
print(f" [skip] audio preprocess error: {Path(ap).name}: {e}")
|
| 648 |
+
|
| 649 |
+
n_skip = sum(1 for item in todo
|
| 650 |
+
if item["video_path"] in failed_paths or item["audio_path"] in failed_paths)
|
| 651 |
+
if failed_paths:
|
| 652 |
+
print(f"[vllm] Preprocess failed for {len(failed_paths)} path(s), "
|
| 653 |
+
f"{n_skip} sample(s) will be skipped.")
|
| 654 |
+
|
| 655 |
+
from omni_model_loading import cap_vllm_max_model_len
|
| 656 |
+
|
| 657 |
+
vllm_max_len = cap_vllm_max_model_len(args.base_model, args.max_model_len)
|
| 658 |
+
print(f"[vllm] Loading {args.base_model} with tp={tp} (max_model_len={vllm_max_len}) ...")
|
| 659 |
+
llm = LLM(
|
| 660 |
+
model=args.base_model,
|
| 661 |
+
tensor_parallel_size=tp,
|
| 662 |
+
max_model_len=vllm_max_len,
|
| 663 |
+
max_num_seqs=4,
|
| 664 |
+
limit_mm_per_prompt={"video": 1, "audio": 1},
|
| 665 |
+
gpu_memory_utilization=args.gpu_memory_utilization,
|
| 666 |
+
dtype="bfloat16",
|
| 667 |
+
trust_remote_code=True,
|
| 668 |
+
)
|
| 669 |
+
sampling_params = SamplingParams(
|
| 670 |
+
temperature=args.temperature if args.temperature > 0 else 0.0,
|
| 671 |
+
top_p=0.9 if args.temperature > 0 else 1.0,
|
| 672 |
+
max_tokens=args.max_new_tokens,
|
| 673 |
+
)
|
| 674 |
+
|
| 675 |
+
vllm_todo = [item for item in todo
|
| 676 |
+
if item["video_path"] not in failed_paths
|
| 677 |
+
and item["audio_path"] not in failed_paths]
|
| 678 |
+
fallback_items = [item for item in todo
|
| 679 |
+
if item["video_path"] in failed_paths
|
| 680 |
+
or item["audio_path"] in failed_paths]
|
| 681 |
+
print(f"[vllm] {len(vllm_todo)} samples ready, {len(fallback_items)} deferred to transformers ...")
|
| 682 |
+
|
| 683 |
+
for i, item in enumerate(vllm_todo):
|
| 684 |
+
if item["video"] in processed:
|
| 685 |
+
continue
|
| 686 |
+
inp = {
|
| 687 |
+
"prompt": build_vllm_prompt(EVAL_PROMPT, args.base_model),
|
| 688 |
+
"multi_modal_data": {
|
| 689 |
+
"video": preprocessed_v[item["video_path"]],
|
| 690 |
+
"audio": preprocessed_a[item["audio_path"]],
|
| 691 |
+
},
|
| 692 |
+
}
|
| 693 |
+
try:
|
| 694 |
+
outputs = llm.generate([inp], sampling_params=sampling_params)
|
| 695 |
+
raw_output = outputs[0].outputs[0].text.strip()
|
| 696 |
+
except (ValueError, RuntimeError) as exc:
|
| 697 |
+
if "longer than the maximum model length" in str(exc):
|
| 698 |
+
print(f" [too long] {item['video']} -> fallback")
|
| 699 |
+
fallback_items.append(item)
|
| 700 |
+
continue
|
| 701 |
+
else:
|
| 702 |
+
print(f" [error] {item['video']}: {exc}")
|
| 703 |
+
raw_output = ""
|
| 704 |
+
|
| 705 |
+
pred = _do_extract(raw_output)
|
| 706 |
+
result = _build_result(item, pred, raw_output)
|
| 707 |
+
|
| 708 |
+
with open(results_jsonl, "a", encoding="utf-8") as f:
|
| 709 |
+
f.write(json.dumps(result, ensure_ascii=False) + "\n")
|
| 710 |
+
processed.add(item["video"])
|
| 711 |
+
|
| 712 |
+
if (i + 1) % 100 == 0:
|
| 713 |
+
print(f" [vllm] [{i+1}/{len(vllm_todo)}] done, {len(fallback_items)} deferred")
|
| 714 |
+
|
| 715 |
+
preprocessed_v.clear()
|
| 716 |
+
preprocessed_a.clear()
|
| 717 |
+
|
| 718 |
+
if fallback_items:
|
| 719 |
+
print(f"[fallback] Running {len(fallback_items)} samples with transformers ...")
|
| 720 |
+
del llm
|
| 721 |
+
gc.collect()
|
| 722 |
+
torch.cuda.empty_cache()
|
| 723 |
+
|
| 724 |
+
omni = load_model(args.base_model, args.adapter)
|
| 725 |
+
for item in tqdm(fallback_items, desc="Fallback", unit="q"):
|
| 726 |
+
if item["video"] in processed:
|
| 727 |
+
continue
|
| 728 |
+
try:
|
| 729 |
+
raw_output = run_inference(
|
| 730 |
+
omni, item["video_path"], item["audio_path"],
|
| 731 |
+
args.max_new_tokens, args.temperature,
|
| 732 |
+
)
|
| 733 |
+
except Exception as exc:
|
| 734 |
+
import traceback
|
| 735 |
+
print(f" [error] {item['video']}: {exc}")
|
| 736 |
+
traceback.print_exc()
|
| 737 |
+
raw_output = ""
|
| 738 |
+
|
| 739 |
+
pred = _do_extract(raw_output)
|
| 740 |
+
result = _build_result(item, pred, raw_output)
|
| 741 |
+
|
| 742 |
+
with open(results_jsonl, "a", encoding="utf-8") as f:
|
| 743 |
+
f.write(json.dumps(result, ensure_ascii=False) + "\n")
|
| 744 |
+
processed.add(item["video"])
|
| 745 |
+
gc.collect()
|
| 746 |
+
torch.cuda.empty_cache()
|
| 747 |
+
|
| 748 |
+
else:
|
| 749 |
+
todo = [it for it in test_data if it["video"] not in processed]
|
| 750 |
+
if not todo:
|
| 751 |
+
print(f"[resume] all {len(test_data)} samples already done — skipping model load")
|
| 752 |
+
omni = None
|
| 753 |
+
else:
|
| 754 |
+
omni = load_model(args.base_model, args.adapter)
|
| 755 |
+
|
| 756 |
+
for item in tqdm(test_data, desc="Evaluating", unit="sample"):
|
| 757 |
+
if item["video"] in processed:
|
| 758 |
+
continue
|
| 759 |
+
|
| 760 |
+
if not os.path.exists(item["video_path"]):
|
| 761 |
+
print(f" [skip] Video not found: {item['video_path']}")
|
| 762 |
+
continue
|
| 763 |
+
|
| 764 |
+
try:
|
| 765 |
+
raw_output = run_inference(
|
| 766 |
+
omni, item["video_path"], item["audio_path"],
|
| 767 |
+
args.max_new_tokens, args.temperature,
|
| 768 |
+
)
|
| 769 |
+
except Exception as exc:
|
| 770 |
+
import traceback
|
| 771 |
+
print(f" [error] {item['video']}: {exc}")
|
| 772 |
+
traceback.print_exc()
|
| 773 |
+
continue
|
| 774 |
+
if not raw_output:
|
| 775 |
+
print(f" [skip] empty output for {item['video']}; will retry next run")
|
| 776 |
+
continue
|
| 777 |
+
|
| 778 |
+
pred = _do_extract(raw_output)
|
| 779 |
+
result = _build_result(item, pred, raw_output)
|
| 780 |
+
|
| 781 |
+
with open(results_jsonl, "a", encoding="utf-8") as f:
|
| 782 |
+
f.write(json.dumps(result, ensure_ascii=False) + "\n")
|
| 783 |
+
|
| 784 |
+
processed.add(item["video"])
|
| 785 |
+
gc.collect()
|
| 786 |
+
torch.cuda.empty_cache()
|
| 787 |
+
|
| 788 |
+
all_results = []
|
| 789 |
+
if results_jsonl.exists():
|
| 790 |
+
with open(results_jsonl) as f:
|
| 791 |
+
for line in f:
|
| 792 |
+
all_results.append(json.loads(line))
|
| 793 |
+
|
| 794 |
+
metrics = compute_metrics(all_results)
|
| 795 |
+
metrics["eval_config"] = {
|
| 796 |
+
"base_model": args.base_model,
|
| 797 |
+
"adapter": args.adapter,
|
| 798 |
+
"data_root": str(args.data_root),
|
| 799 |
+
"test_jsonl": str(test_jsonl),
|
| 800 |
+
"total_test_samples": len(test_data),
|
| 801 |
+
"max_new_tokens": args.max_new_tokens,
|
| 802 |
+
"temperature": args.temperature,
|
| 803 |
+
"gpt_judge": args.gpt_judge,
|
| 804 |
+
"gpt_model": args.gpt_model if args.gpt_judge else None,
|
| 805 |
+
"vllm": args.vllm,
|
| 806 |
+
}
|
| 807 |
+
|
| 808 |
+
with open(metrics_json, "w", encoding="utf-8") as f:
|
| 809 |
+
json.dump(metrics, f, indent=2, ensure_ascii=False)
|
| 810 |
+
|
| 811 |
+
print_summary(metrics, label)
|
| 812 |
+
|
| 813 |
+
with open(summary_txt, "w", encoding="utf-8") as f:
|
| 814 |
+
import io, contextlib
|
| 815 |
+
buf = io.StringIO()
|
| 816 |
+
with contextlib.redirect_stdout(buf):
|
| 817 |
+
print_summary(metrics, label)
|
| 818 |
+
f.write(buf.getvalue())
|
| 819 |
+
|
| 820 |
+
print(f"\n[output] Results JSONL: {results_jsonl}")
|
| 821 |
+
print(f"[output] Metrics JSON: {metrics_json}")
|
| 822 |
+
print(f"[output] Summary: {summary_txt}")
|
| 823 |
+
|
| 824 |
+
|
| 825 |
+
if __name__ == "__main__":
|
| 826 |
+
main()
|
supp/eval_lvbench.py
ADDED
|
@@ -0,0 +1,632 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env python3
|
| 2 |
+
from __future__ import annotations
|
| 3 |
+
|
| 4 |
+
import argparse
|
| 5 |
+
import ctypes
|
| 6 |
+
import gc
|
| 7 |
+
import json
|
| 8 |
+
import os
|
| 9 |
+
import re
|
| 10 |
+
import site
|
| 11 |
+
import tempfile
|
| 12 |
+
import shutil
|
| 13 |
+
from pathlib import Path
|
| 14 |
+
from typing import Any, Dict, List, Optional
|
| 15 |
+
|
| 16 |
+
_npp_lib = Path(site.getsitepackages()[0]) / "nvidia" / "npp" / "lib"
|
| 17 |
+
_npp_so = _npp_lib / "libnppicc.so.12"
|
| 18 |
+
if _npp_so.is_file():
|
| 19 |
+
ctypes.CDLL(str(_npp_so), mode=ctypes.RTLD_GLOBAL)
|
| 20 |
+
|
| 21 |
+
import torch
|
| 22 |
+
from tqdm import tqdm
|
| 23 |
+
|
| 24 |
+
DEFAULT_VIDEO_DIR = Path("./data/lvbench")
|
| 25 |
+
DEFAULT_OUTPUT_DIR = Path("./eval_results/lvbench")
|
| 26 |
+
|
| 27 |
+
VIDEO_TYPES = ["cartoon", "documentary", "live", "selfmedia", "sport", "tv"]
|
| 28 |
+
|
| 29 |
+
MCQ_PROMPT = (
|
| 30 |
+
"Select the best answer to the following multiple-choice question "
|
| 31 |
+
"based on the video. Respond with only the letter (A, B, C, or D) "
|
| 32 |
+
"of the correct option.\n"
|
| 33 |
+
)
|
| 34 |
+
|
| 35 |
+
|
| 36 |
+
def parse_args() -> argparse.Namespace:
|
| 37 |
+
p = argparse.ArgumentParser(description="Evaluate on LVBench benchmark.")
|
| 38 |
+
p.add_argument("--base-model", type=str,
|
| 39 |
+
default="Qwen/Qwen3-Omni-30B-A3B-Instruct")
|
| 40 |
+
p.add_argument("--adapter", type=str, default=None)
|
| 41 |
+
p.add_argument("--video-dir", type=Path, default=DEFAULT_VIDEO_DIR)
|
| 42 |
+
p.add_argument("--output-dir", type=Path, default=DEFAULT_OUTPUT_DIR)
|
| 43 |
+
p.add_argument("--max-samples", type=int, default=-1)
|
| 44 |
+
p.add_argument("--max-new-tokens", type=int, default=32)
|
| 45 |
+
p.add_argument("--temperature", type=float, default=0.0)
|
| 46 |
+
p.add_argument("--label", type=str, default=None)
|
| 47 |
+
p.add_argument("--vllm", action="store_true", default=False)
|
| 48 |
+
p.add_argument("--tp", type=int, default=None)
|
| 49 |
+
p.add_argument("--batch-size", type=int, default=16)
|
| 50 |
+
p.add_argument("--gpu-memory-utilization", type=float, default=0.90)
|
| 51 |
+
p.add_argument("--max-model-len", type=int, default=65536)
|
| 52 |
+
p.add_argument("--max-num-seqs", type=int, default=4)
|
| 53 |
+
p.add_argument("--vllm-batch-size", type=int, default=1)
|
| 54 |
+
p.add_argument("--enforce-eager", action="store_true", default=False)
|
| 55 |
+
p.add_argument("--shard", type=int, default=0)
|
| 56 |
+
p.add_argument("--num-shards", type=int, default=1)
|
| 57 |
+
p.add_argument("--merge-only", action="store_true", default=False)
|
| 58 |
+
return p.parse_args()
|
| 59 |
+
|
| 60 |
+
|
| 61 |
+
def load_model(base_model: str, adapter: Optional[str]):
|
| 62 |
+
from omni_model_loading import load_qwen_omni_model
|
| 63 |
+
|
| 64 |
+
model, processor, _ = load_qwen_omni_model(base_model, adapter)
|
| 65 |
+
return model, processor
|
| 66 |
+
|
| 67 |
+
|
| 68 |
+
def run_inference(model, processor, video_path: str, prompt: str,
|
| 69 |
+
max_new_tokens: int, temperature: float,
|
| 70 |
+
cached_mm: Optional[Dict[str, Any]] = None) -> str:
|
| 71 |
+
from qwen_omni_utils import process_mm_info
|
| 72 |
+
|
| 73 |
+
tmp_dir = tempfile.mkdtemp(prefix="eval_lvb_")
|
| 74 |
+
masked_video = os.path.join(tmp_dir, "clip.mp4")
|
| 75 |
+
os.symlink(os.path.abspath(video_path), masked_video)
|
| 76 |
+
|
| 77 |
+
conversation = [
|
| 78 |
+
{
|
| 79 |
+
"role": "user",
|
| 80 |
+
"content": [
|
| 81 |
+
{"type": "video", "video": masked_video},
|
| 82 |
+
{"type": "text", "text": prompt},
|
| 83 |
+
],
|
| 84 |
+
}
|
| 85 |
+
]
|
| 86 |
+
|
| 87 |
+
text = processor.apply_chat_template(
|
| 88 |
+
conversation, add_generation_prompt=True, tokenize=False,
|
| 89 |
+
)
|
| 90 |
+
if cached_mm is not None:
|
| 91 |
+
audios, images, videos = cached_mm["audios"], cached_mm["images"], cached_mm["videos"]
|
| 92 |
+
else:
|
| 93 |
+
audios, images, videos = process_mm_info(conversation, use_audio_in_video=True)
|
| 94 |
+
inputs = processor(
|
| 95 |
+
text=text, audio=audios, images=images, videos=videos,
|
| 96 |
+
return_tensors="pt", padding=True, use_audio_in_video=True,
|
| 97 |
+
)
|
| 98 |
+
|
| 99 |
+
model_dtype = next(model.parameters()).dtype
|
| 100 |
+
converted = {}
|
| 101 |
+
for k, v in inputs.items():
|
| 102 |
+
if hasattr(v, "to"):
|
| 103 |
+
v = v.to(model.device)
|
| 104 |
+
if torch.is_floating_point(v):
|
| 105 |
+
v = v.to(model_dtype)
|
| 106 |
+
converted[k] = v
|
| 107 |
+
inputs = converted
|
| 108 |
+
|
| 109 |
+
from omni_model_loading import is_omni_thinker_model
|
| 110 |
+
|
| 111 |
+
is_thinker = is_omni_thinker_model(model)
|
| 112 |
+
if is_thinker:
|
| 113 |
+
gen_kwargs = {"max_new_tokens": max_new_tokens, "do_sample": temperature > 0}
|
| 114 |
+
else:
|
| 115 |
+
gen_kwargs = {
|
| 116 |
+
"thinker_max_new_tokens": max_new_tokens,
|
| 117 |
+
"use_audio_in_video": True,
|
| 118 |
+
"return_audio": False,
|
| 119 |
+
"do_sample": temperature > 0,
|
| 120 |
+
}
|
| 121 |
+
if temperature > 0:
|
| 122 |
+
gen_kwargs["temperature"] = temperature
|
| 123 |
+
gen_kwargs["top_p"] = 0.9
|
| 124 |
+
|
| 125 |
+
with torch.inference_mode():
|
| 126 |
+
output_ids = model.generate(**inputs, **gen_kwargs)
|
| 127 |
+
|
| 128 |
+
if isinstance(output_ids, tuple):
|
| 129 |
+
output_ids = output_ids[0]
|
| 130 |
+
|
| 131 |
+
prompt_len = inputs["input_ids"].shape[1]
|
| 132 |
+
response = processor.batch_decode(
|
| 133 |
+
output_ids[:, prompt_len:], skip_special_tokens=True,
|
| 134 |
+
)[0].strip()
|
| 135 |
+
|
| 136 |
+
shutil.rmtree(tmp_dir, ignore_errors=True)
|
| 137 |
+
return response
|
| 138 |
+
|
| 139 |
+
|
| 140 |
+
def preprocess_video_for_vllm(video_path: str):
|
| 141 |
+
from qwen_omni_utils import process_mm_info
|
| 142 |
+
import numpy as np
|
| 143 |
+
|
| 144 |
+
messages = [{
|
| 145 |
+
"role": "user",
|
| 146 |
+
"content": [
|
| 147 |
+
{"type": "video", "video": video_path, "fps": 2.0, "max_frames": 128},
|
| 148 |
+
{"type": "text", "text": "placeholder"},
|
| 149 |
+
],
|
| 150 |
+
}]
|
| 151 |
+
audios, images, videos = process_mm_info(messages, use_audio_in_video=True)
|
| 152 |
+
video_tensor = videos[0]
|
| 153 |
+
video_np = (video_tensor * 255).byte().numpy()
|
| 154 |
+
audio_tuple = None
|
| 155 |
+
if audios:
|
| 156 |
+
aud = audios[0]
|
| 157 |
+
if isinstance(aud, tuple):
|
| 158 |
+
audio_tuple = (aud[0].numpy() if hasattr(aud[0], "numpy") else np.asarray(aud[0]),
|
| 159 |
+
aud[1])
|
| 160 |
+
elif hasattr(aud, "numpy"):
|
| 161 |
+
audio_tuple = (aud.numpy(), 16000)
|
| 162 |
+
else:
|
| 163 |
+
audio_tuple = (np.asarray(aud), 16000)
|
| 164 |
+
return video_np, audio_tuple
|
| 165 |
+
|
| 166 |
+
|
| 167 |
+
SYSTEM_PROMPT = (
|
| 168 |
+
"You are Qwen, a virtual human developed by the Qwen Team, Alibaba "
|
| 169 |
+
"Group, capable of perceiving auditory and visual inputs, as well as "
|
| 170 |
+
"generating text and speech."
|
| 171 |
+
)
|
| 172 |
+
|
| 173 |
+
|
| 174 |
+
def build_vllm_prompt(question: str, base_model: str) -> str:
|
| 175 |
+
from omni_model_loading import vllm_user_mm_prefix
|
| 176 |
+
|
| 177 |
+
mm = vllm_user_mm_prefix(base_model, include_audio=True)
|
| 178 |
+
return (
|
| 179 |
+
f"<|im_start|>system\n{SYSTEM_PROMPT}<|im_end|>\n"
|
| 180 |
+
f"<|im_start|>user\n"
|
| 181 |
+
f"{mm}"
|
| 182 |
+
f"{question}<|im_end|>\n"
|
| 183 |
+
f"<|im_start|>assistant\n"
|
| 184 |
+
)
|
| 185 |
+
|
| 186 |
+
|
| 187 |
+
def extract_answer(text: str) -> str:
|
| 188 |
+
text = text.strip()
|
| 189 |
+
prefixes = [
|
| 190 |
+
"The best answer is", "The correct answer is",
|
| 191 |
+
"The answer is", "The answer", "Best answer:", "Best option:",
|
| 192 |
+
]
|
| 193 |
+
for prefix in prefixes:
|
| 194 |
+
text = text.replace(prefix, "")
|
| 195 |
+
|
| 196 |
+
if len(text.split()) > 10 and not re.search(r"[ABCD]", text):
|
| 197 |
+
return ""
|
| 198 |
+
m = re.search(r"[ABCD]", text)
|
| 199 |
+
return m[0] if m else ""
|
| 200 |
+
|
| 201 |
+
|
| 202 |
+
def load_lvbench(video_dir: Path, max_samples: int) -> List[Dict[str, Any]]:
|
| 203 |
+
from datasets import load_dataset
|
| 204 |
+
ds = load_dataset("lmms-lab/LVBench", split="train")
|
| 205 |
+
data = []
|
| 206 |
+
skipped = 0
|
| 207 |
+
for row in ds:
|
| 208 |
+
vid = row["key"]
|
| 209 |
+
video_path = video_dir / f"{vid}.mp4"
|
| 210 |
+
if not video_path.exists():
|
| 211 |
+
skipped += 1
|
| 212 |
+
continue
|
| 213 |
+
|
| 214 |
+
prompt = MCQ_PROMPT + row["question"] + "\nThe best answer is:"
|
| 215 |
+
|
| 216 |
+
data.append({
|
| 217 |
+
"uid": row["uid"],
|
| 218 |
+
"video_id": vid,
|
| 219 |
+
"video_path": str(video_path),
|
| 220 |
+
"video_type": row["type"],
|
| 221 |
+
"question_type": row["question_type"],
|
| 222 |
+
"question": row["question"],
|
| 223 |
+
"gt_answer": row["answer"],
|
| 224 |
+
"time_reference": row.get("time_reference", ""),
|
| 225 |
+
"prompt": prompt,
|
| 226 |
+
})
|
| 227 |
+
if skipped:
|
| 228 |
+
print(f"[data] Skipped {skipped} questions (video not found)")
|
| 229 |
+
if max_samples > 0:
|
| 230 |
+
data = data[:max_samples]
|
| 231 |
+
return data
|
| 232 |
+
|
| 233 |
+
|
| 234 |
+
def compute_metrics(results: List[Dict[str, Any]]) -> Dict[str, Any]:
|
| 235 |
+
total = len(results)
|
| 236 |
+
if total == 0:
|
| 237 |
+
return {}
|
| 238 |
+
|
| 239 |
+
correct = sum(1 for r in results if r["pred_answer"].upper() == r["gt_answer"].upper())
|
| 240 |
+
overall_acc = correct / total
|
| 241 |
+
|
| 242 |
+
def acc_for(items):
|
| 243 |
+
if not items:
|
| 244 |
+
return None
|
| 245 |
+
c = sum(1 for r in items if r["pred_answer"].upper() == r["gt_answer"].upper())
|
| 246 |
+
return round(c / len(items), 4)
|
| 247 |
+
|
| 248 |
+
per_type = {}
|
| 249 |
+
for vt in VIDEO_TYPES:
|
| 250 |
+
subset = [r for r in results if r["video_type"] == vt]
|
| 251 |
+
if subset:
|
| 252 |
+
per_type[vt] = {"accuracy": acc_for(subset), "count": len(subset)}
|
| 253 |
+
|
| 254 |
+
q_types = set()
|
| 255 |
+
for r in results:
|
| 256 |
+
if isinstance(r.get("question_type"), list):
|
| 257 |
+
q_types.update(r["question_type"])
|
| 258 |
+
elif r.get("question_type"):
|
| 259 |
+
q_types.add(r["question_type"])
|
| 260 |
+
|
| 261 |
+
per_qtype = {}
|
| 262 |
+
for qt in sorted(q_types):
|
| 263 |
+
subset = [r for r in results if qt in (r.get("question_type", [])
|
| 264 |
+
if isinstance(r.get("question_type"), list) else [r.get("question_type")])]
|
| 265 |
+
if subset:
|
| 266 |
+
per_qtype[qt] = {"accuracy": acc_for(subset), "count": len(subset)}
|
| 267 |
+
|
| 268 |
+
return {
|
| 269 |
+
"total_samples": total,
|
| 270 |
+
"overall_accuracy": round(overall_acc, 4),
|
| 271 |
+
"per_video_type": per_type,
|
| 272 |
+
"per_question_type": per_qtype,
|
| 273 |
+
}
|
| 274 |
+
|
| 275 |
+
|
| 276 |
+
def print_summary(metrics: Dict[str, Any], label: str) -> None:
|
| 277 |
+
print()
|
| 278 |
+
print(f"{'=' * 65}")
|
| 279 |
+
print(f" LVBench Summary: {label}")
|
| 280 |
+
print(f"{'=' * 65}")
|
| 281 |
+
print(f" Total samples: {metrics['total_samples']}")
|
| 282 |
+
print(f" Overall Accuracy: {metrics['overall_accuracy']:.1%}")
|
| 283 |
+
|
| 284 |
+
print(f" ─── Per Video Type ───")
|
| 285 |
+
for vt in VIDEO_TYPES:
|
| 286 |
+
if vt in metrics.get("per_video_type", {}):
|
| 287 |
+
d = metrics["per_video_type"][vt]
|
| 288 |
+
print(f" {vt:15s}: {d['accuracy']:.1%} ({d['count']} questions)")
|
| 289 |
+
|
| 290 |
+
print(f" ─── Per Question Type ───")
|
| 291 |
+
for qt, d in sorted(metrics.get("per_question_type", {}).items()):
|
| 292 |
+
print(f" {qt:30s}: {d['accuracy']:.1%} ({d['count']})")
|
| 293 |
+
|
| 294 |
+
print(f"{'=' * 65}")
|
| 295 |
+
|
| 296 |
+
|
| 297 |
+
def _load_processed_uids(out_dir: Path) -> set:
|
| 298 |
+
processed: set = set()
|
| 299 |
+
for p in sorted(out_dir.glob("eval_results*.jsonl")):
|
| 300 |
+
try:
|
| 301 |
+
with open(p) as f:
|
| 302 |
+
for line in f:
|
| 303 |
+
try:
|
| 304 |
+
processed.add(json.loads(line)["uid"])
|
| 305 |
+
except Exception:
|
| 306 |
+
continue
|
| 307 |
+
except FileNotFoundError:
|
| 308 |
+
continue
|
| 309 |
+
return processed
|
| 310 |
+
|
| 311 |
+
|
| 312 |
+
def _finalize_metrics(out_dir: Path, label: str, args: argparse.Namespace,
|
| 313 |
+
vllm_preprocess_stats: Optional[Dict[str, int]] = None) -> None:
|
| 314 |
+
results_by_uid: Dict[str, Dict[str, Any]] = {}
|
| 315 |
+
source_files = sorted(out_dir.glob("eval_results*.jsonl"))
|
| 316 |
+
for p in source_files:
|
| 317 |
+
with open(p) as f:
|
| 318 |
+
for line in f:
|
| 319 |
+
try:
|
| 320 |
+
obj = json.loads(line)
|
| 321 |
+
except Exception:
|
| 322 |
+
continue
|
| 323 |
+
results_by_uid[obj["uid"]] = obj
|
| 324 |
+
|
| 325 |
+
if not results_by_uid:
|
| 326 |
+
print("[warn] No results to compute metrics from.")
|
| 327 |
+
return
|
| 328 |
+
|
| 329 |
+
all_results = list(results_by_uid.values())
|
| 330 |
+
merged_jsonl = out_dir / "eval_results.jsonl"
|
| 331 |
+
with open(merged_jsonl, "w", encoding="utf-8") as f:
|
| 332 |
+
for r in all_results:
|
| 333 |
+
f.write(json.dumps(r, ensure_ascii=False) + "\n")
|
| 334 |
+
print(f"[merge] Wrote {len(all_results)} unique results to {merged_jsonl} "
|
| 335 |
+
f"(merged {len(source_files)} source file(s)).")
|
| 336 |
+
|
| 337 |
+
metrics = compute_metrics(all_results)
|
| 338 |
+
metrics["eval_config"] = {
|
| 339 |
+
"base_model": args.base_model,
|
| 340 |
+
"adapter": args.adapter,
|
| 341 |
+
"video_dir": str(args.video_dir),
|
| 342 |
+
"max_new_tokens": args.max_new_tokens,
|
| 343 |
+
"temperature": args.temperature,
|
| 344 |
+
"vllm": bool(args.vllm),
|
| 345 |
+
"max_num_seqs": args.max_num_seqs,
|
| 346 |
+
"vllm_batch_size": args.vllm_batch_size,
|
| 347 |
+
"max_model_len": args.max_model_len,
|
| 348 |
+
"num_shards": args.num_shards,
|
| 349 |
+
}
|
| 350 |
+
if vllm_preprocess_stats is not None:
|
| 351 |
+
metrics["eval_config"]["vllm_preprocess_skips"] = vllm_preprocess_stats
|
| 352 |
+
|
| 353 |
+
metrics_json = out_dir / "metrics.json"
|
| 354 |
+
summary_txt = out_dir / "summary.txt"
|
| 355 |
+
with open(metrics_json, "w", encoding="utf-8") as f:
|
| 356 |
+
json.dump(metrics, f, indent=2, ensure_ascii=False)
|
| 357 |
+
|
| 358 |
+
print_summary(metrics, label)
|
| 359 |
+
|
| 360 |
+
with open(summary_txt, "w", encoding="utf-8") as f:
|
| 361 |
+
import io, contextlib
|
| 362 |
+
buf = io.StringIO()
|
| 363 |
+
with contextlib.redirect_stdout(buf):
|
| 364 |
+
print_summary(metrics, label)
|
| 365 |
+
f.write(buf.getvalue())
|
| 366 |
+
|
| 367 |
+
print(f"\n[output] Results: {merged_jsonl}")
|
| 368 |
+
print(f"[output] Metrics: {metrics_json}")
|
| 369 |
+
print(f"[output] Summary: {summary_txt}")
|
| 370 |
+
|
| 371 |
+
|
| 372 |
+
def main() -> None:
|
| 373 |
+
args = parse_args()
|
| 374 |
+
label = args.label or (
|
| 375 |
+
Path(args.adapter).name if args.adapter
|
| 376 |
+
else Path(args.base_model).name
|
| 377 |
+
)
|
| 378 |
+
|
| 379 |
+
out_dir = args.output_dir / label
|
| 380 |
+
out_dir.mkdir(parents=True, exist_ok=True)
|
| 381 |
+
|
| 382 |
+
if args.num_shards < 1 or args.shard < 0 or args.shard >= args.num_shards:
|
| 383 |
+
raise SystemExit(f"Invalid --shard {args.shard} / --num-shards {args.num_shards}")
|
| 384 |
+
is_sharded = args.num_shards > 1
|
| 385 |
+
shard_tag = f".shard{args.shard}of{args.num_shards}" if is_sharded else ""
|
| 386 |
+
results_jsonl = out_dir / f"eval_results{shard_tag}.jsonl"
|
| 387 |
+
|
| 388 |
+
if args.merge_only:
|
| 389 |
+
print(f"[merge-only] out_dir={out_dir}")
|
| 390 |
+
_finalize_metrics(out_dir, label, args)
|
| 391 |
+
return
|
| 392 |
+
|
| 393 |
+
print("[data] Loading LVBench dataset...")
|
| 394 |
+
test_data = load_lvbench(args.video_dir, args.max_samples)
|
| 395 |
+
print(f"[data] {len(test_data)} total questions")
|
| 396 |
+
|
| 397 |
+
if is_sharded:
|
| 398 |
+
shard_data = [x for i, x in enumerate(test_data)
|
| 399 |
+
if i % args.num_shards == args.shard]
|
| 400 |
+
print(f"[shard] shard={args.shard}/{args.num_shards} -> "
|
| 401 |
+
f"{len(shard_data)} questions in this shard")
|
| 402 |
+
test_data = shard_data
|
| 403 |
+
|
| 404 |
+
processed = _load_processed_uids(out_dir)
|
| 405 |
+
if processed:
|
| 406 |
+
print(f"[resume] {len(processed)} uids already processed across all "
|
| 407 |
+
f"eval_results*.jsonl under {out_dir}")
|
| 408 |
+
|
| 409 |
+
use_vllm = args.vllm
|
| 410 |
+
model = processor = llm = None
|
| 411 |
+
vllm_preprocess_stats: Dict[str, int] | None = None
|
| 412 |
+
|
| 413 |
+
if use_vllm:
|
| 414 |
+
from vllm import LLM, SamplingParams
|
| 415 |
+
tp = args.tp or torch.cuda.device_count()
|
| 416 |
+
model_path = args.base_model
|
| 417 |
+
|
| 418 |
+
print(f"[vllm] Preprocessing videos (before model load) ...")
|
| 419 |
+
todo = [item for item in test_data if item["uid"] not in processed]
|
| 420 |
+
unique_videos = list(dict.fromkeys(item["video_path"] for item in todo))
|
| 421 |
+
from omni_model_loading import parallel_preprocess_videos
|
| 422 |
+
preprocessed, preprocessed_audio, preprocess_failed_paths = parallel_preprocess_videos(
|
| 423 |
+
unique_videos, preprocess_video_for_vllm,
|
| 424 |
+
)
|
| 425 |
+
|
| 426 |
+
n_pp_skip = sum(1 for item in todo if item["video_path"] in preprocess_failed_paths)
|
| 427 |
+
if preprocess_failed_paths:
|
| 428 |
+
print(
|
| 429 |
+
f"[vllm] Preprocess failed for {len(preprocess_failed_paths)} video(s), "
|
| 430 |
+
f"{n_pp_skip} question(s) will not use vLLM (run continues)."
|
| 431 |
+
)
|
| 432 |
+
vllm_preprocess_stats = {
|
| 433 |
+
"preprocess_failed_videos": len(preprocess_failed_paths),
|
| 434 |
+
"preprocess_skipped_questions": n_pp_skip,
|
| 435 |
+
}
|
| 436 |
+
|
| 437 |
+
from omni_model_loading import cap_vllm_max_model_len
|
| 438 |
+
|
| 439 |
+
vllm_max_len = cap_vllm_max_model_len(model_path, args.max_model_len)
|
| 440 |
+
print(f"[vllm] Loading {model_path} with tp={tp} "
|
| 441 |
+
f"(max_num_seqs={args.max_num_seqs}, max_model_len={vllm_max_len}) ...")
|
| 442 |
+
llm_kwargs = dict(
|
| 443 |
+
model=model_path,
|
| 444 |
+
tensor_parallel_size=tp,
|
| 445 |
+
max_model_len=vllm_max_len,
|
| 446 |
+
max_num_seqs=args.max_num_seqs,
|
| 447 |
+
gpu_memory_utilization=args.gpu_memory_utilization,
|
| 448 |
+
dtype="bfloat16",
|
| 449 |
+
trust_remote_code=True,
|
| 450 |
+
limit_mm_per_prompt={"video": 1, "audio": 1},
|
| 451 |
+
enforce_eager=args.enforce_eager,
|
| 452 |
+
)
|
| 453 |
+
llm = LLM(**llm_kwargs)
|
| 454 |
+
sampling_params = SamplingParams(
|
| 455 |
+
temperature=args.temperature if args.temperature > 0 else 0.0,
|
| 456 |
+
top_p=0.9 if args.temperature > 0 else 1.0,
|
| 457 |
+
max_tokens=args.max_new_tokens,
|
| 458 |
+
)
|
| 459 |
+
|
| 460 |
+
vllm_todo = [item for item in todo if item["video_path"] in preprocessed]
|
| 461 |
+
fallback_items = []
|
| 462 |
+
print(f"[vllm] {len(vllm_todo)} questions ready, running inference "
|
| 463 |
+
f"(batch={args.vllm_batch_size}) ...")
|
| 464 |
+
|
| 465 |
+
def _write_result(item: Dict[str, Any], raw_output: str) -> None:
|
| 466 |
+
pred = extract_answer(raw_output)
|
| 467 |
+
result = {
|
| 468 |
+
"uid": item["uid"],
|
| 469 |
+
"video_id": item["video_id"],
|
| 470 |
+
"video_type": item["video_type"],
|
| 471 |
+
"question_type": item["question_type"],
|
| 472 |
+
"gt_answer": item["gt_answer"],
|
| 473 |
+
"pred_answer": pred,
|
| 474 |
+
"correct": pred.upper() == item["gt_answer"].upper(),
|
| 475 |
+
"raw_output": raw_output,
|
| 476 |
+
}
|
| 477 |
+
with open(results_jsonl, "a", encoding="utf-8") as f:
|
| 478 |
+
f.write(json.dumps(result, ensure_ascii=False) + "\n")
|
| 479 |
+
processed.add(item["uid"])
|
| 480 |
+
|
| 481 |
+
def _build_inp(item: Dict[str, Any]) -> Dict[str, Any]:
|
| 482 |
+
inp = {
|
| 483 |
+
"prompt": build_vllm_prompt(item["prompt"], args.base_model),
|
| 484 |
+
"multi_modal_data": {"video": preprocessed[item["video_path"]]},
|
| 485 |
+
}
|
| 486 |
+
if item["video_path"] in preprocessed_audio:
|
| 487 |
+
inp["multi_modal_data"]["audio"] = preprocessed_audio[item["video_path"]]
|
| 488 |
+
return inp
|
| 489 |
+
|
| 490 |
+
def _flush(batch: List[Dict[str, Any]]) -> None:
|
| 491 |
+
if not batch:
|
| 492 |
+
return
|
| 493 |
+
inps = [b["inp"] for b in batch]
|
| 494 |
+
try:
|
| 495 |
+
outs = llm.generate(inps, sampling_params=sampling_params)
|
| 496 |
+
for b, o in zip(batch, outs):
|
| 497 |
+
_write_result(b["item"], o.outputs[0].text.strip())
|
| 498 |
+
return
|
| 499 |
+
except (ValueError, RuntimeError) as exc:
|
| 500 |
+
if "longer than the maximum model length" not in str(exc):
|
| 501 |
+
raise
|
| 502 |
+
for b in batch:
|
| 503 |
+
try:
|
| 504 |
+
outs = llm.generate([b["inp"]], sampling_params=sampling_params)
|
| 505 |
+
_write_result(b["item"], outs[0].outputs[0].text.strip())
|
| 506 |
+
except (ValueError, RuntimeError) as exc2:
|
| 507 |
+
if "longer than the maximum model length" in str(exc2):
|
| 508 |
+
print(f" [too long] {b['item']['uid']} -> fallback")
|
| 509 |
+
fallback_items.append(b["item"])
|
| 510 |
+
else:
|
| 511 |
+
raise
|
| 512 |
+
|
| 513 |
+
batch: List[Dict[str, Any]] = []
|
| 514 |
+
for i, item in enumerate(vllm_todo):
|
| 515 |
+
if item["uid"] in processed:
|
| 516 |
+
continue
|
| 517 |
+
batch.append({"inp": _build_inp(item), "item": item})
|
| 518 |
+
if len(batch) >= max(1, args.vllm_batch_size):
|
| 519 |
+
_flush(batch)
|
| 520 |
+
batch = []
|
| 521 |
+
if (i + 1) % 50 == 0:
|
| 522 |
+
print(f" [vllm] [{i+1}/{len(vllm_todo)}] submitted, "
|
| 523 |
+
f"{len(fallback_items)} deferred")
|
| 524 |
+
_flush(batch)
|
| 525 |
+
|
| 526 |
+
preprocessed.clear()
|
| 527 |
+
preprocessed_audio.clear()
|
| 528 |
+
|
| 529 |
+
if fallback_items:
|
| 530 |
+
print(f"[fallback] Running {len(fallback_items)} long-video questions with transformers ...")
|
| 531 |
+
del llm
|
| 532 |
+
gc.collect()
|
| 533 |
+
torch.cuda.empty_cache()
|
| 534 |
+
|
| 535 |
+
fallback_items.sort(key=lambda it: it["video_path"])
|
| 536 |
+
|
| 537 |
+
model, processor = load_model(args.base_model, args.adapter)
|
| 538 |
+
last_vp: Optional[str] = None
|
| 539 |
+
cached_mm: Optional[Dict[str, Any]] = None
|
| 540 |
+
for item in tqdm(fallback_items, desc="Fallback", unit="q"):
|
| 541 |
+
if item["uid"] in processed:
|
| 542 |
+
continue
|
| 543 |
+
if item["video_path"] != last_vp:
|
| 544 |
+
cached_mm = None
|
| 545 |
+
last_vp = item["video_path"]
|
| 546 |
+
try:
|
| 547 |
+
if cached_mm is None:
|
| 548 |
+
from qwen_omni_utils import process_mm_info as _pmi
|
| 549 |
+
tmp_conv = [{"role": "user", "content": [
|
| 550 |
+
{"type": "video", "video": item["video_path"]},
|
| 551 |
+
{"type": "text", "text": item["prompt"]},
|
| 552 |
+
]}]
|
| 553 |
+
a, im, v = _pmi(tmp_conv, use_audio_in_video=True)
|
| 554 |
+
cached_mm = {"audios": a, "images": im, "videos": v}
|
| 555 |
+
raw_output = run_inference(
|
| 556 |
+
model, processor, item["video_path"], item["prompt"],
|
| 557 |
+
args.max_new_tokens, args.temperature,
|
| 558 |
+
cached_mm=cached_mm,
|
| 559 |
+
)
|
| 560 |
+
except Exception as exc:
|
| 561 |
+
import traceback
|
| 562 |
+
print(f" [error] {item['uid']}: {exc}")
|
| 563 |
+
traceback.print_exc()
|
| 564 |
+
raw_output = ""
|
| 565 |
+
cached_mm = None
|
| 566 |
+
|
| 567 |
+
pred = extract_answer(raw_output)
|
| 568 |
+
result = {
|
| 569 |
+
"uid": item["uid"],
|
| 570 |
+
"video_id": item["video_id"],
|
| 571 |
+
"video_type": item["video_type"],
|
| 572 |
+
"question_type": item["question_type"],
|
| 573 |
+
"gt_answer": item["gt_answer"],
|
| 574 |
+
"pred_answer": pred,
|
| 575 |
+
"correct": pred.upper() == item["gt_answer"].upper(),
|
| 576 |
+
"raw_output": raw_output,
|
| 577 |
+
}
|
| 578 |
+
with open(results_jsonl, "a", encoding="utf-8") as f:
|
| 579 |
+
f.write(json.dumps(result, ensure_ascii=False) + "\n")
|
| 580 |
+
processed.add(item["uid"])
|
| 581 |
+
gc.collect()
|
| 582 |
+
torch.cuda.empty_cache()
|
| 583 |
+
|
| 584 |
+
else:
|
| 585 |
+
print("[model] Loading model...")
|
| 586 |
+
model, processor = load_model(args.base_model, args.adapter)
|
| 587 |
+
|
| 588 |
+
for item in tqdm(test_data, desc="LVBench", unit="q"):
|
| 589 |
+
if item["uid"] in processed:
|
| 590 |
+
continue
|
| 591 |
+
|
| 592 |
+
try:
|
| 593 |
+
raw_output = run_inference(
|
| 594 |
+
model, processor, item["video_path"], item["prompt"],
|
| 595 |
+
args.max_new_tokens, args.temperature,
|
| 596 |
+
)
|
| 597 |
+
except Exception as exc:
|
| 598 |
+
import traceback
|
| 599 |
+
print(f" [error] {item['uid']}: {exc}")
|
| 600 |
+
traceback.print_exc()
|
| 601 |
+
raw_output = ""
|
| 602 |
+
|
| 603 |
+
pred = extract_answer(raw_output)
|
| 604 |
+
|
| 605 |
+
result = {
|
| 606 |
+
"uid": item["uid"],
|
| 607 |
+
"video_id": item["video_id"],
|
| 608 |
+
"video_type": item["video_type"],
|
| 609 |
+
"question_type": item["question_type"],
|
| 610 |
+
"gt_answer": item["gt_answer"],
|
| 611 |
+
"pred_answer": pred,
|
| 612 |
+
"correct": pred.upper() == item["gt_answer"].upper(),
|
| 613 |
+
"raw_output": raw_output,
|
| 614 |
+
}
|
| 615 |
+
|
| 616 |
+
with open(results_jsonl, "a", encoding="utf-8") as f:
|
| 617 |
+
f.write(json.dumps(result, ensure_ascii=False) + "\n")
|
| 618 |
+
|
| 619 |
+
processed.add(item["uid"])
|
| 620 |
+
gc.collect()
|
| 621 |
+
torch.cuda.empty_cache()
|
| 622 |
+
|
| 623 |
+
if is_sharded:
|
| 624 |
+
print(f"[shard {args.shard}/{args.num_shards}] finished inference. "
|
| 625 |
+
f"Run `--merge-only` after all shards complete to produce final metrics.")
|
| 626 |
+
return
|
| 627 |
+
|
| 628 |
+
_finalize_metrics(out_dir, label, args, vllm_preprocess_stats)
|
| 629 |
+
|
| 630 |
+
|
| 631 |
+
if __name__ == "__main__":
|
| 632 |
+
main()
|
supp/eval_vggsoundsync.py
ADDED
|
@@ -0,0 +1,777 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env python3
|
| 2 |
+
from __future__ import annotations
|
| 3 |
+
|
| 4 |
+
import argparse
|
| 5 |
+
import ctypes
|
| 6 |
+
import gc
|
| 7 |
+
import json
|
| 8 |
+
import os
|
| 9 |
+
import re
|
| 10 |
+
|
| 11 |
+
if not (os.environ.get("FORCE_QWENVL_VIDEO_READER") or "").strip():
|
| 12 |
+
os.environ["FORCE_QWENVL_VIDEO_READER"] = "torchvision"
|
| 13 |
+
if not (os.environ.get("AV_LOG_LEVEL") or "").strip():
|
| 14 |
+
os.environ["AV_LOG_LEVEL"] = "quiet"
|
| 15 |
+
import site
|
| 16 |
+
import tempfile
|
| 17 |
+
import shutil
|
| 18 |
+
from pathlib import Path
|
| 19 |
+
from statistics import mean, median
|
| 20 |
+
from typing import Any, Dict, List, Optional
|
| 21 |
+
|
| 22 |
+
_npp_lib = Path(site.getsitepackages()[0]) / "nvidia" / "npp" / "lib"
|
| 23 |
+
_npp_so = _npp_lib / "libnppicc.so.12"
|
| 24 |
+
if _npp_so.is_file():
|
| 25 |
+
ctypes.CDLL(str(_npp_so), mode=ctypes.RTLD_GLOBAL)
|
| 26 |
+
|
| 27 |
+
import torch
|
| 28 |
+
from tqdm import tqdm
|
| 29 |
+
|
| 30 |
+
DEFAULT_OUTPUT_DIR = Path("./eval_results/vggsoundsync")
|
| 31 |
+
|
| 32 |
+
_openai_client = None
|
| 33 |
+
|
| 34 |
+
GPT_JUDGE_SYSTEM = """\
|
| 35 |
+
You are a structured-output extractor. The user will give you a model's free-text \
|
| 36 |
+
response about audio-video synchronization. Extract the following fields and return \
|
| 37 |
+
ONLY valid JSON (no markdown, no explanation):
|
| 38 |
+
|
| 39 |
+
{"synced": <bool>, "direction": "none"|"delay"|"early", "offset_sec": <float>}
|
| 40 |
+
|
| 41 |
+
Rules:
|
| 42 |
+
- synced: true if the model says audio and video are synchronized, false otherwise.
|
| 43 |
+
- direction: "delay" means audio comes AFTER the visual event; "early" means audio \
|
| 44 |
+
comes BEFORE the visual event; "none" if synced is true.
|
| 45 |
+
- offset_sec: estimated time gap in seconds. 0.0 if synced.
|
| 46 |
+
- If you cannot determine a field, use the default (true / "none" / 0.0).
|
| 47 |
+
"""
|
| 48 |
+
|
| 49 |
+
|
| 50 |
+
def _get_openai_client(api_key: Optional[str] = None):
|
| 51 |
+
global _openai_client
|
| 52 |
+
if _openai_client is not None:
|
| 53 |
+
return _openai_client
|
| 54 |
+
key = api_key or os.environ.get("OPENAI_API_KEY")
|
| 55 |
+
if not key:
|
| 56 |
+
return None
|
| 57 |
+
from openai import OpenAI
|
| 58 |
+
_openai_client = OpenAI(api_key=key)
|
| 59 |
+
return _openai_client
|
| 60 |
+
|
| 61 |
+
|
| 62 |
+
def gpt_extract_prediction(raw_output: str, api_key: Optional[str] = None,
|
| 63 |
+
model: str = "gpt-5.4") -> Optional[Dict[str, Any]]:
|
| 64 |
+
client = _get_openai_client(api_key)
|
| 65 |
+
if client is None:
|
| 66 |
+
return None
|
| 67 |
+
try:
|
| 68 |
+
resp = client.chat.completions.create(
|
| 69 |
+
model=model,
|
| 70 |
+
messages=[
|
| 71 |
+
{"role": "system", "content": GPT_JUDGE_SYSTEM},
|
| 72 |
+
{"role": "user", "content": raw_output},
|
| 73 |
+
],
|
| 74 |
+
temperature=0.0,
|
| 75 |
+
max_completion_tokens=200,
|
| 76 |
+
)
|
| 77 |
+
text = resp.choices[0].message.content.strip()
|
| 78 |
+
for pat in [
|
| 79 |
+
re.compile(r"```(?:json)?\s*(\{.*?\})\s*```", re.DOTALL),
|
| 80 |
+
re.compile(r"(\{.*?\})", re.DOTALL),
|
| 81 |
+
]:
|
| 82 |
+
m = pat.search(text)
|
| 83 |
+
if m:
|
| 84 |
+
obj = json.loads(m.group(1))
|
| 85 |
+
synced = obj.get("synced")
|
| 86 |
+
if isinstance(synced, str):
|
| 87 |
+
synced = synced.lower() in ("true", "yes", "1")
|
| 88 |
+
direction = str(obj.get("direction", "none")).lower().strip()
|
| 89 |
+
if direction not in ("delay", "early", "none"):
|
| 90 |
+
direction = "none"
|
| 91 |
+
return {
|
| 92 |
+
"pred_synced": bool(synced),
|
| 93 |
+
"pred_direction": direction,
|
| 94 |
+
"pred_offset_sec": float(obj.get("offset_sec", 0.0)),
|
| 95 |
+
"parse_method": "gpt_judge",
|
| 96 |
+
}
|
| 97 |
+
except Exception as exc:
|
| 98 |
+
print(f" [gpt-judge] API error: {exc}", flush=True)
|
| 99 |
+
return None
|
| 100 |
+
|
| 101 |
+
|
| 102 |
+
MCQ_PROMPT = """\
|
| 103 |
+
Watch this video and listen to its audio carefully.
|
| 104 |
+
Determine the synchronization status between the audio and video tracks.
|
| 105 |
+
Select the best answer:
|
| 106 |
+
|
| 107 |
+
A) The audio and video are synchronized.
|
| 108 |
+
B) The audio is delayed (comes after the visual event).
|
| 109 |
+
C) The audio is early (comes before the visual event).
|
| 110 |
+
|
| 111 |
+
Answer with only the letter (A, B, or C)."""
|
| 112 |
+
|
| 113 |
+
MCQ_PROMPT_SHUFFLED = """\
|
| 114 |
+
Watch this video and listen to its audio carefully.
|
| 115 |
+
Determine the synchronization status between the audio and video tracks.
|
| 116 |
+
Select the best answer:
|
| 117 |
+
|
| 118 |
+
A) The audio is early (comes before the visual event).
|
| 119 |
+
B) The audio and video are synchronized.
|
| 120 |
+
C) The audio is delayed (comes after the visual event).
|
| 121 |
+
|
| 122 |
+
Answer with only the letter (A, B, or C)."""
|
| 123 |
+
|
| 124 |
+
FREETEXT_PROMPT = """\
|
| 125 |
+
Watch this video and listen to its audio carefully. \
|
| 126 |
+
Determine whether the audio and video tracks are synchronized. \
|
| 127 |
+
If they are not synchronized, identify the direction of the offset \
|
| 128 |
+
(audio delayed or audio early relative to video) and estimate the offset in seconds. \
|
| 129 |
+
Explain your reasoning."""
|
| 130 |
+
|
| 131 |
+
|
| 132 |
+
def parse_args() -> argparse.Namespace:
|
| 133 |
+
p = argparse.ArgumentParser(description="Evaluate on VGG-Sound Sync (out-of-domain sync).")
|
| 134 |
+
p.add_argument("--base-model", type=str, required=True)
|
| 135 |
+
p.add_argument("--adapter", type=str, default=None)
|
| 136 |
+
p.add_argument("--test-jsonl", type=Path, required=True)
|
| 137 |
+
p.add_argument("--output-dir", type=Path, default=DEFAULT_OUTPUT_DIR)
|
| 138 |
+
p.add_argument("--mode", choices=["mcq", "freetext"], default="mcq")
|
| 139 |
+
p.add_argument("--max-samples", type=int, default=-1)
|
| 140 |
+
p.add_argument("--max-new-tokens", type=int, default=64)
|
| 141 |
+
p.add_argument("--temperature", type=float, default=0.0)
|
| 142 |
+
p.add_argument("--label", type=str, default=None)
|
| 143 |
+
p.add_argument("--gpt-judge", action="store_true", default=False)
|
| 144 |
+
p.add_argument("--openai-api-key", type=str, default=None)
|
| 145 |
+
p.add_argument("--gpt-model", type=str, default="gpt-5.4")
|
| 146 |
+
p.add_argument("--shuffle-mcq", action="store_true", default=False)
|
| 147 |
+
p.add_argument("--vllm", action="store_true", default=False)
|
| 148 |
+
p.add_argument("--tp", type=int, default=None)
|
| 149 |
+
p.add_argument("--batch-size", type=int, default=16)
|
| 150 |
+
p.add_argument("--gpu-memory-utilization", type=float, default=0.90)
|
| 151 |
+
p.add_argument("--max-model-len", type=int, default=65536)
|
| 152 |
+
return p.parse_args()
|
| 153 |
+
|
| 154 |
+
|
| 155 |
+
def load_test_data(path: Path, max_samples: int) -> List[Dict[str, Any]]:
|
| 156 |
+
base = path.parent.resolve()
|
| 157 |
+
data = []
|
| 158 |
+
with open(path) as f:
|
| 159 |
+
for line in f:
|
| 160 |
+
line = line.strip()
|
| 161 |
+
if not line:
|
| 162 |
+
continue
|
| 163 |
+
obj = json.loads(line)
|
| 164 |
+
for key in ("video_path", "audio_path"):
|
| 165 |
+
if key not in obj or not obj[key]:
|
| 166 |
+
continue
|
| 167 |
+
p = Path(obj[key])
|
| 168 |
+
if not p.is_absolute():
|
| 169 |
+
obj[key] = str((base / p).resolve())
|
| 170 |
+
data.append(obj)
|
| 171 |
+
if max_samples > 0:
|
| 172 |
+
data = data[:max_samples]
|
| 173 |
+
return data
|
| 174 |
+
|
| 175 |
+
|
| 176 |
+
SYSTEM_PROMPT = (
|
| 177 |
+
"You are Qwen, a virtual human developed by the Qwen Team, Alibaba "
|
| 178 |
+
"Group, capable of perceiving auditory and visual inputs, as well as "
|
| 179 |
+
"generating text and speech."
|
| 180 |
+
)
|
| 181 |
+
|
| 182 |
+
|
| 183 |
+
def preprocess_video_for_vllm(video_path: str):
|
| 184 |
+
from qwen_omni_utils import process_mm_info
|
| 185 |
+
import numpy as np
|
| 186 |
+
|
| 187 |
+
messages = [{
|
| 188 |
+
"role": "user",
|
| 189 |
+
"content": [
|
| 190 |
+
{"type": "video", "video": video_path, "fps": 2.0, "max_frames": 128},
|
| 191 |
+
{"type": "text", "text": "placeholder"},
|
| 192 |
+
],
|
| 193 |
+
}]
|
| 194 |
+
audios, images, videos = process_mm_info(messages, use_audio_in_video=True)
|
| 195 |
+
video_tensor = videos[0]
|
| 196 |
+
return (video_tensor * 255).byte().numpy()
|
| 197 |
+
|
| 198 |
+
|
| 199 |
+
def preprocess_audio_for_vllm(audio_path: str, target_sr: int = 16000):
|
| 200 |
+
import numpy as np
|
| 201 |
+
import wave
|
| 202 |
+
|
| 203 |
+
with wave.open(audio_path, "rb") as w:
|
| 204 |
+
sr = w.getframerate()
|
| 205 |
+
n = w.getnframes()
|
| 206 |
+
raw = w.readframes(n)
|
| 207 |
+
x = np.frombuffer(raw, dtype=np.int16).astype(np.float32) / 32768.0
|
| 208 |
+
if sr != target_sr:
|
| 209 |
+
duration = len(x) / sr
|
| 210 |
+
new_len = int(duration * target_sr)
|
| 211 |
+
x = np.interp(
|
| 212 |
+
np.linspace(0, len(x) - 1, new_len),
|
| 213 |
+
np.arange(len(x)),
|
| 214 |
+
x,
|
| 215 |
+
)
|
| 216 |
+
return x, target_sr
|
| 217 |
+
|
| 218 |
+
|
| 219 |
+
def build_vllm_prompt(question: str, base_model: str, include_audio: bool = True) -> str:
|
| 220 |
+
from omni_model_loading import vllm_user_mm_prefix
|
| 221 |
+
|
| 222 |
+
mm = vllm_user_mm_prefix(base_model, include_audio=include_audio)
|
| 223 |
+
return (
|
| 224 |
+
f"<|im_start|>system\n{SYSTEM_PROMPT}<|im_end|>\n"
|
| 225 |
+
f"<|im_start|>user\n"
|
| 226 |
+
f"{mm}"
|
| 227 |
+
f"{question}<|im_end|>\n"
|
| 228 |
+
f"<|im_start|>assistant\n"
|
| 229 |
+
)
|
| 230 |
+
|
| 231 |
+
|
| 232 |
+
def load_model(base_model: str, adapter: Optional[str]):
|
| 233 |
+
from omni_model_loading import load_qwen_omni_model
|
| 234 |
+
|
| 235 |
+
model, processor, _ = load_qwen_omni_model(base_model, adapter)
|
| 236 |
+
return model, processor
|
| 237 |
+
|
| 238 |
+
|
| 239 |
+
def run_inference(model, processor, video_path: str, audio_path: str,
|
| 240 |
+
prompt: str, max_new_tokens: int, temperature: float) -> str:
|
| 241 |
+
from qwen_omni_utils import process_mm_info
|
| 242 |
+
|
| 243 |
+
tmp_dir = tempfile.mkdtemp(prefix="eval_vggsync_")
|
| 244 |
+
masked_video = os.path.join(tmp_dir, "clip.mp4")
|
| 245 |
+
masked_audio = os.path.join(tmp_dir, "clip.wav")
|
| 246 |
+
os.symlink(os.path.abspath(video_path), masked_video)
|
| 247 |
+
os.symlink(os.path.abspath(audio_path), masked_audio)
|
| 248 |
+
|
| 249 |
+
conversation = [{
|
| 250 |
+
"role": "user",
|
| 251 |
+
"content": [
|
| 252 |
+
{"type": "video", "video": masked_video},
|
| 253 |
+
{"type": "audio", "audio": masked_audio},
|
| 254 |
+
{"type": "text", "text": prompt},
|
| 255 |
+
],
|
| 256 |
+
}]
|
| 257 |
+
|
| 258 |
+
text = processor.apply_chat_template(conversation, add_generation_prompt=True, tokenize=False)
|
| 259 |
+
audios, images, videos = process_mm_info(conversation, use_audio_in_video=True)
|
| 260 |
+
inputs = processor(
|
| 261 |
+
text=text, audio=audios, images=images, videos=videos,
|
| 262 |
+
return_tensors="pt", padding=True, use_audio_in_video=True,
|
| 263 |
+
)
|
| 264 |
+
|
| 265 |
+
model_dtype = next(model.parameters()).dtype
|
| 266 |
+
converted = {}
|
| 267 |
+
for k, v in inputs.items():
|
| 268 |
+
if hasattr(v, "to"):
|
| 269 |
+
v = v.to(model.device)
|
| 270 |
+
if torch.is_floating_point(v):
|
| 271 |
+
v = v.to(model_dtype)
|
| 272 |
+
converted[k] = v
|
| 273 |
+
inputs = converted
|
| 274 |
+
|
| 275 |
+
from omni_model_loading import is_omni_thinker_model
|
| 276 |
+
|
| 277 |
+
is_thinker = is_omni_thinker_model(model)
|
| 278 |
+
if is_thinker:
|
| 279 |
+
gen_kwargs = {"max_new_tokens": max_new_tokens, "do_sample": temperature > 0}
|
| 280 |
+
else:
|
| 281 |
+
gen_kwargs = {
|
| 282 |
+
"thinker_max_new_tokens": max_new_tokens,
|
| 283 |
+
"use_audio_in_video": True,
|
| 284 |
+
"return_audio": False,
|
| 285 |
+
"do_sample": temperature > 0,
|
| 286 |
+
}
|
| 287 |
+
if temperature > 0:
|
| 288 |
+
gen_kwargs["temperature"] = temperature
|
| 289 |
+
gen_kwargs["top_p"] = 0.9
|
| 290 |
+
|
| 291 |
+
with torch.inference_mode():
|
| 292 |
+
output_ids = model.generate(**inputs, **gen_kwargs)
|
| 293 |
+
if isinstance(output_ids, tuple):
|
| 294 |
+
output_ids = output_ids[0]
|
| 295 |
+
|
| 296 |
+
prompt_len = inputs["input_ids"].shape[1]
|
| 297 |
+
response = processor.batch_decode(
|
| 298 |
+
output_ids[:, prompt_len:], skip_special_tokens=True,
|
| 299 |
+
)[0].strip()
|
| 300 |
+
|
| 301 |
+
shutil.rmtree(tmp_dir, ignore_errors=True)
|
| 302 |
+
return response
|
| 303 |
+
|
| 304 |
+
|
| 305 |
+
def extract_mcq_answer(text: str, answer_map: Optional[Dict[str, str]] = None) -> Dict[str, Any]:
|
| 306 |
+
text = text.strip().upper()
|
| 307 |
+
m = re.search(r"[ABC]", text)
|
| 308 |
+
letter = m[0] if m else ""
|
| 309 |
+
|
| 310 |
+
if answer_map is None:
|
| 311 |
+
answer_map = {"A": "synced", "B": "delay", "C": "early"}
|
| 312 |
+
|
| 313 |
+
key_to_pred = {
|
| 314 |
+
"synced": {"pred_synced": True, "pred_direction": "none"},
|
| 315 |
+
"delay": {"pred_synced": False, "pred_direction": "delay"},
|
| 316 |
+
"early": {"pred_synced": False, "pred_direction": "early"},
|
| 317 |
+
}
|
| 318 |
+
|
| 319 |
+
if letter in answer_map and answer_map[letter] in key_to_pred:
|
| 320 |
+
return {**key_to_pred[answer_map[letter]], "pred_offset_sec": 0.0,
|
| 321 |
+
"pred_letter": letter, "parse_method": "mcq"}
|
| 322 |
+
return {"pred_synced": True, "pred_direction": "none", "pred_offset_sec": 0.0,
|
| 323 |
+
"pred_letter": "", "parse_method": "mcq_failed"}
|
| 324 |
+
|
| 325 |
+
|
| 326 |
+
def extract_freetext_prediction(text: str) -> Dict[str, Any]:
|
| 327 |
+
text_stripped = text.strip()
|
| 328 |
+
for pattern in [
|
| 329 |
+
re.compile(r"```(?:json)?\s*(\{.*?\})\s*```", re.DOTALL),
|
| 330 |
+
re.compile(r"(\{[^{}]*\"synced\"[^{}]*\})", re.DOTALL),
|
| 331 |
+
re.compile(r"(\{.*?\})", re.DOTALL),
|
| 332 |
+
]:
|
| 333 |
+
m = pattern.search(text_stripped)
|
| 334 |
+
if m:
|
| 335 |
+
try:
|
| 336 |
+
obj = json.loads(m.group(1))
|
| 337 |
+
synced = obj.get("synced")
|
| 338 |
+
if isinstance(synced, str):
|
| 339 |
+
synced = synced.lower() in ("true", "yes", "1")
|
| 340 |
+
direction = str(obj.get("direction", "none")).lower().strip()
|
| 341 |
+
if direction not in ("delay", "early", "none"):
|
| 342 |
+
direction = "none"
|
| 343 |
+
return {
|
| 344 |
+
"pred_synced": bool(synced),
|
| 345 |
+
"pred_direction": direction,
|
| 346 |
+
"pred_offset_sec": float(obj.get("offset_sec", 0.0)),
|
| 347 |
+
"parse_method": "json",
|
| 348 |
+
}
|
| 349 |
+
except (json.JSONDecodeError, ValueError, TypeError):
|
| 350 |
+
continue
|
| 351 |
+
|
| 352 |
+
text_lower = text_stripped.lower()
|
| 353 |
+
synced = None
|
| 354 |
+
direction = "none"
|
| 355 |
+
offset = 0.0
|
| 356 |
+
|
| 357 |
+
desync_kws = ["not synchronized", "not aligned", "desync", "mismatch",
|
| 358 |
+
"not in sync", "out of sync", "not well aligned"]
|
| 359 |
+
sync_kws = ["synchronized", "well aligned", "well-aligned", "in sync",
|
| 360 |
+
"closely aligned", "matches closely"]
|
| 361 |
+
if any(kw in text_lower for kw in desync_kws):
|
| 362 |
+
synced = False
|
| 363 |
+
elif any(kw in text_lower for kw in sync_kws):
|
| 364 |
+
synced = True
|
| 365 |
+
|
| 366 |
+
if synced is False:
|
| 367 |
+
delay_kws = ["audio delayed", "audio lags", "audio comes after",
|
| 368 |
+
"sound comes after", "audio is delayed", "sound follows"]
|
| 369 |
+
early_kws = ["audio early", "audio leads", "audio comes before",
|
| 370 |
+
"sound comes before", "audio precedes", "audio is early"]
|
| 371 |
+
if any(kw in text_lower for kw in delay_kws):
|
| 372 |
+
direction = "delay"
|
| 373 |
+
elif any(kw in text_lower for kw in early_kws):
|
| 374 |
+
direction = "early"
|
| 375 |
+
|
| 376 |
+
offset_match = re.search(
|
| 377 |
+
r"(?:gap|offset|mismatch|differ\w*)\s*(?:of\s+)?(?:about\s+|roughly\s+|approximately\s+)?"
|
| 378 |
+
r"([\d]+\.?\d*)\s*s", text_lower)
|
| 379 |
+
if offset_match:
|
| 380 |
+
offset = float(offset_match.group(1))
|
| 381 |
+
|
| 382 |
+
if synced is None:
|
| 383 |
+
synced = True
|
| 384 |
+
|
| 385 |
+
return {
|
| 386 |
+
"pred_synced": synced,
|
| 387 |
+
"pred_direction": direction,
|
| 388 |
+
"pred_offset_sec": offset,
|
| 389 |
+
"parse_method": "regex_fallback",
|
| 390 |
+
}
|
| 391 |
+
|
| 392 |
+
|
| 393 |
+
DIFFICULTY_ORDER = ["synced", "hard", "medium", "easy", "very_easy"]
|
| 394 |
+
|
| 395 |
+
|
| 396 |
+
def compute_metrics(results: List[Dict[str, Any]]) -> Dict[str, Any]:
|
| 397 |
+
total = len(results)
|
| 398 |
+
if total == 0:
|
| 399 |
+
return {}
|
| 400 |
+
|
| 401 |
+
sync_correct = sum(1 for r in results if r["pred_synced"] == r["gt_synced"])
|
| 402 |
+
sync_acc = sync_correct / total
|
| 403 |
+
|
| 404 |
+
def _label(r, prefix):
|
| 405 |
+
if r[f"{prefix}synced"]:
|
| 406 |
+
return "synced"
|
| 407 |
+
return r[f"{prefix}direction"]
|
| 408 |
+
|
| 409 |
+
three_class_correct = sum(1 for r in results if _label(r, "pred_") == _label(r, "gt_"))
|
| 410 |
+
three_class_acc = three_class_correct / total
|
| 411 |
+
|
| 412 |
+
desync = [r for r in results if not r["gt_synced"]]
|
| 413 |
+
if desync:
|
| 414 |
+
dir_correct = sum(1 for r in desync if r["pred_direction"] == r["gt_direction"])
|
| 415 |
+
dir_acc = dir_correct / len(desync)
|
| 416 |
+
else:
|
| 417 |
+
dir_acc = None
|
| 418 |
+
|
| 419 |
+
per_difficulty = {}
|
| 420 |
+
for d in DIFFICULTY_ORDER:
|
| 421 |
+
subset = [r for r in results if r["difficulty"] == d]
|
| 422 |
+
if not subset:
|
| 423 |
+
continue
|
| 424 |
+
if d == "synced":
|
| 425 |
+
acc = sum(1 for r in subset if r["pred_synced"]) / len(subset)
|
| 426 |
+
else:
|
| 427 |
+
acc = sum(1 for r in subset if _label(r, "pred_") == _label(r, "gt_")) / len(subset)
|
| 428 |
+
per_difficulty[d] = {"accuracy": round(acc, 4), "count": len(subset)}
|
| 429 |
+
|
| 430 |
+
per_class: Dict[str, Dict] = {}
|
| 431 |
+
classes = sorted(set(r.get("label", "") for r in results))
|
| 432 |
+
for cls in classes:
|
| 433 |
+
subset = [r for r in results if r.get("label") == cls]
|
| 434 |
+
if not subset:
|
| 435 |
+
continue
|
| 436 |
+
acc = sum(1 for r in subset if _label(r, "pred_") == _label(r, "gt_")) / len(subset)
|
| 437 |
+
per_class[cls] = {"accuracy": round(acc, 4), "count": len(subset)}
|
| 438 |
+
|
| 439 |
+
offset_errors = []
|
| 440 |
+
for r in desync:
|
| 441 |
+
if not r["pred_synced"] and r["pred_offset_sec"] > 0:
|
| 442 |
+
offset_errors.append(abs(r["pred_offset_sec"] - r["gt_offset_sec"]))
|
| 443 |
+
|
| 444 |
+
parse_stats = {}
|
| 445 |
+
for r in results:
|
| 446 |
+
m = r.get("parse_method", "unknown")
|
| 447 |
+
parse_stats[m] = parse_stats.get(m, 0) + 1
|
| 448 |
+
|
| 449 |
+
metrics = {
|
| 450 |
+
"total_samples": total,
|
| 451 |
+
"sync_desync_accuracy": round(sync_acc, 4),
|
| 452 |
+
"three_class_accuracy": round(three_class_acc, 4),
|
| 453 |
+
"direction_accuracy_on_desync": round(dir_acc, 4) if dir_acc is not None else None,
|
| 454 |
+
"per_difficulty": per_difficulty,
|
| 455 |
+
"per_class": per_class,
|
| 456 |
+
"parse_stats": parse_stats,
|
| 457 |
+
}
|
| 458 |
+
if offset_errors:
|
| 459 |
+
metrics["offset_mae_sec"] = round(mean(offset_errors), 4)
|
| 460 |
+
metrics["offset_median_sec"] = round(median(offset_errors), 4)
|
| 461 |
+
within_02 = sum(1 for e in offset_errors if e <= 0.2)
|
| 462 |
+
within_05 = sum(1 for e in offset_errors if e <= 0.5)
|
| 463 |
+
metrics["offset_within_0.2s"] = within_02
|
| 464 |
+
metrics["offset_within_0.5s"] = within_05
|
| 465 |
+
metrics["offset_evaluated_count"] = len(offset_errors)
|
| 466 |
+
|
| 467 |
+
return metrics
|
| 468 |
+
|
| 469 |
+
|
| 470 |
+
def print_summary(metrics: Dict[str, Any], label: str) -> None:
|
| 471 |
+
print()
|
| 472 |
+
print(f"{'=' * 65}")
|
| 473 |
+
print(f" VGG-Sound Sync Eval: {label}")
|
| 474 |
+
print(f"{'=' * 65}")
|
| 475 |
+
print(f" Total samples: {metrics['total_samples']}")
|
| 476 |
+
print(f" Sync/Desync Accuracy: {metrics['sync_desync_accuracy']:.1%}")
|
| 477 |
+
print(f" 3-Class Accuracy: {metrics['three_class_accuracy']:.1%}")
|
| 478 |
+
if metrics.get("direction_accuracy_on_desync") is not None:
|
| 479 |
+
print(f" Direction Acc (desync): {metrics['direction_accuracy_on_desync']:.1%}")
|
| 480 |
+
print(f" ─── Per Difficulty ───")
|
| 481 |
+
for d in DIFFICULTY_ORDER:
|
| 482 |
+
if d in metrics.get("per_difficulty", {}):
|
| 483 |
+
info = metrics["per_difficulty"][d]
|
| 484 |
+
print(f" {d:10s}: {info['accuracy']:.1%} ({info['count']} samples)")
|
| 485 |
+
if metrics.get("offset_mae_sec") is not None:
|
| 486 |
+
print(f" ─── Offset Estimation (freetext only) ───")
|
| 487 |
+
print(f" MAE: {metrics['offset_mae_sec']:.3f}s")
|
| 488 |
+
print(f" Median Error: {metrics['offset_median_sec']:.3f}s")
|
| 489 |
+
print(f" Within 0.2s: {metrics['offset_within_0.2s']} / {metrics['offset_evaluated_count']}")
|
| 490 |
+
print(f" Within 0.5s: {metrics['offset_within_0.5s']} / {metrics['offset_evaluated_count']}")
|
| 491 |
+
print(f" ─── Parse Stats ───")
|
| 492 |
+
for method, count in sorted(metrics.get("parse_stats", {}).items()):
|
| 493 |
+
print(f" {method}: {count}")
|
| 494 |
+
print(f"{'=' * 65}")
|
| 495 |
+
|
| 496 |
+
|
| 497 |
+
def _extract_pred(raw_output: str, mode: str, gpt_judge: bool,
|
| 498 |
+
openai_api_key: Optional[str], gpt_model: str,
|
| 499 |
+
answer_map: Optional[Dict[str, str]] = None) -> Dict[str, Any]:
|
| 500 |
+
if mode == "mcq":
|
| 501 |
+
return extract_mcq_answer(raw_output, answer_map=answer_map)
|
| 502 |
+
if gpt_judge and raw_output:
|
| 503 |
+
gpt_pred = gpt_extract_prediction(raw_output, api_key=openai_api_key, model=gpt_model)
|
| 504 |
+
if gpt_pred is not None:
|
| 505 |
+
return gpt_pred
|
| 506 |
+
return extract_freetext_prediction(raw_output)
|
| 507 |
+
|
| 508 |
+
|
| 509 |
+
def _build_result(item: Dict, pred: Dict, raw_output: str, mode: str) -> Dict:
|
| 510 |
+
result = {
|
| 511 |
+
"uid": item["uid"],
|
| 512 |
+
"ytid": item["ytid"],
|
| 513 |
+
"label": item.get("label", ""),
|
| 514 |
+
"difficulty": item["difficulty"],
|
| 515 |
+
"gt_synced": item["gt_synced"],
|
| 516 |
+
"gt_direction": item["gt_direction"],
|
| 517 |
+
"gt_offset_sec": item["gt_offset_sec"],
|
| 518 |
+
"pred_synced": pred["pred_synced"],
|
| 519 |
+
"pred_direction": pred["pred_direction"],
|
| 520 |
+
"pred_offset_sec": pred.get("pred_offset_sec", 0.0),
|
| 521 |
+
"parse_method": pred["parse_method"],
|
| 522 |
+
"raw_output": raw_output,
|
| 523 |
+
}
|
| 524 |
+
if mode == "mcq":
|
| 525 |
+
result["pred_letter"] = pred.get("pred_letter", "")
|
| 526 |
+
return result
|
| 527 |
+
|
| 528 |
+
|
| 529 |
+
def _save_and_finalize(results_jsonl: Path, metrics_json: Path, summary_txt: Path,
|
| 530 |
+
args, label: str):
|
| 531 |
+
all_results = []
|
| 532 |
+
if results_jsonl.exists():
|
| 533 |
+
with open(results_jsonl) as f:
|
| 534 |
+
for line in f:
|
| 535 |
+
all_results.append(json.loads(line))
|
| 536 |
+
|
| 537 |
+
if not all_results:
|
| 538 |
+
print("[warn] No results.")
|
| 539 |
+
return
|
| 540 |
+
|
| 541 |
+
metrics = compute_metrics(all_results)
|
| 542 |
+
metrics["eval_config"] = {
|
| 543 |
+
"base_model": args.base_model,
|
| 544 |
+
"adapter": args.adapter,
|
| 545 |
+
"mode": args.mode,
|
| 546 |
+
"test_jsonl": str(args.test_jsonl),
|
| 547 |
+
"max_new_tokens": args.max_new_tokens,
|
| 548 |
+
"temperature": args.temperature,
|
| 549 |
+
"vllm": args.vllm,
|
| 550 |
+
}
|
| 551 |
+
|
| 552 |
+
with open(metrics_json, "w", encoding="utf-8") as f:
|
| 553 |
+
json.dump(metrics, f, indent=2, ensure_ascii=False)
|
| 554 |
+
|
| 555 |
+
print_summary(metrics, label)
|
| 556 |
+
|
| 557 |
+
with open(summary_txt, "w", encoding="utf-8") as f:
|
| 558 |
+
import io, contextlib
|
| 559 |
+
buf = io.StringIO()
|
| 560 |
+
with contextlib.redirect_stdout(buf):
|
| 561 |
+
print_summary(metrics, label)
|
| 562 |
+
f.write(buf.getvalue())
|
| 563 |
+
|
| 564 |
+
print(f"\n[output] Results: {results_jsonl}")
|
| 565 |
+
print(f"[output] Metrics: {metrics_json}")
|
| 566 |
+
print(f"[output] Summary: {summary_txt}")
|
| 567 |
+
|
| 568 |
+
|
| 569 |
+
def main() -> None:
|
| 570 |
+
args = parse_args()
|
| 571 |
+
label = args.label or (Path(args.adapter).name if args.adapter else Path(args.base_model).name)
|
| 572 |
+
default_prompt = MCQ_PROMPT if args.mode == "mcq" else FREETEXT_PROMPT
|
| 573 |
+
|
| 574 |
+
if args.gpt_judge and args.mode == "freetext":
|
| 575 |
+
client = _get_openai_client(args.openai_api_key)
|
| 576 |
+
if client is None:
|
| 577 |
+
print("[ERROR] --gpt-judge requires OPENAI_API_KEY or --openai-api-key.")
|
| 578 |
+
raise SystemExit(1)
|
| 579 |
+
|
| 580 |
+
out_dir = args.output_dir / label
|
| 581 |
+
out_dir.mkdir(parents=True, exist_ok=True)
|
| 582 |
+
results_jsonl = out_dir / "eval_results.jsonl"
|
| 583 |
+
metrics_json = out_dir / "metrics.json"
|
| 584 |
+
summary_txt = out_dir / "summary.txt"
|
| 585 |
+
|
| 586 |
+
test_data = load_test_data(args.test_jsonl, args.max_samples)
|
| 587 |
+
print(f"[data] {len(test_data)} samples loaded (mode={args.mode})")
|
| 588 |
+
|
| 589 |
+
processed = set()
|
| 590 |
+
if results_jsonl.exists():
|
| 591 |
+
with open(results_jsonl) as f:
|
| 592 |
+
for line in f:
|
| 593 |
+
processed.add(json.loads(line)["uid"])
|
| 594 |
+
print(f"[resume] {len(processed)} already done")
|
| 595 |
+
|
| 596 |
+
use_vllm = args.vllm
|
| 597 |
+
|
| 598 |
+
if use_vllm:
|
| 599 |
+
from vllm import LLM, SamplingParams
|
| 600 |
+
|
| 601 |
+
tp = args.tp or torch.cuda.device_count()
|
| 602 |
+
todo = [item for item in test_data if item["uid"] not in processed]
|
| 603 |
+
|
| 604 |
+
uniq_v = list(dict.fromkeys(item["video_path"] for item in todo))
|
| 605 |
+
uniq_a = list(dict.fromkeys(item["audio_path"] for item in todo))
|
| 606 |
+
print(
|
| 607 |
+
f"[vllm] Phase 1 — CPU preprocess: {len(uniq_v)} unique videos, {len(uniq_a)} unique audios "
|
| 608 |
+
f"for {len(todo)} samples (GPUs idle until model load).",
|
| 609 |
+
flush=True,
|
| 610 |
+
)
|
| 611 |
+
preprocessed_v: Dict[str, Any] = {}
|
| 612 |
+
preprocessed_a: Dict[str, Any] = {}
|
| 613 |
+
failed_paths: set = set()
|
| 614 |
+
|
| 615 |
+
for vp in tqdm(uniq_v, desc="Preprocess video", unit="file"):
|
| 616 |
+
if vp in failed_paths:
|
| 617 |
+
continue
|
| 618 |
+
try:
|
| 619 |
+
preprocessed_v[vp] = preprocess_video_for_vllm(vp)
|
| 620 |
+
except Exception as e:
|
| 621 |
+
failed_paths.add(vp)
|
| 622 |
+
print(f" [skip] video preprocess error: {Path(vp).name}: {e}")
|
| 623 |
+
|
| 624 |
+
for ap in tqdm(uniq_a, desc="Preprocess audio", unit="file"):
|
| 625 |
+
if ap in failed_paths:
|
| 626 |
+
continue
|
| 627 |
+
try:
|
| 628 |
+
preprocessed_a[ap] = preprocess_audio_for_vllm(ap)
|
| 629 |
+
except Exception as e:
|
| 630 |
+
failed_paths.add(ap)
|
| 631 |
+
print(f" [skip] audio preprocess error: {Path(ap).name}: {e}")
|
| 632 |
+
|
| 633 |
+
n_skip = sum(1 for item in todo
|
| 634 |
+
if item["video_path"] in failed_paths or item["audio_path"] in failed_paths)
|
| 635 |
+
if failed_paths:
|
| 636 |
+
print(f"[vllm] Preprocess failed for {len(failed_paths)} path(s), "
|
| 637 |
+
f"{n_skip} sample(s) will be skipped.")
|
| 638 |
+
|
| 639 |
+
from omni_model_loading import cap_vllm_max_model_len
|
| 640 |
+
|
| 641 |
+
vllm_max_len = cap_vllm_max_model_len(args.base_model, args.max_model_len)
|
| 642 |
+
print(f"[vllm] Loading {args.base_model} with tp={tp} (max_model_len={vllm_max_len}) ...")
|
| 643 |
+
llm = LLM(
|
| 644 |
+
model=args.base_model,
|
| 645 |
+
tensor_parallel_size=tp,
|
| 646 |
+
max_model_len=vllm_max_len,
|
| 647 |
+
max_num_seqs=4,
|
| 648 |
+
limit_mm_per_prompt={"video": 1, "audio": 1},
|
| 649 |
+
gpu_memory_utilization=args.gpu_memory_utilization,
|
| 650 |
+
dtype="bfloat16",
|
| 651 |
+
trust_remote_code=True,
|
| 652 |
+
)
|
| 653 |
+
sampling_params = SamplingParams(
|
| 654 |
+
temperature=args.temperature if args.temperature > 0 else 0.0,
|
| 655 |
+
top_p=0.9 if args.temperature > 0 else 1.0,
|
| 656 |
+
max_tokens=args.max_new_tokens,
|
| 657 |
+
)
|
| 658 |
+
|
| 659 |
+
vllm_todo = [item for item in todo
|
| 660 |
+
if item["video_path"] not in failed_paths
|
| 661 |
+
and item["audio_path"] not in failed_paths]
|
| 662 |
+
fallback_items = [item for item in todo
|
| 663 |
+
if item["video_path"] in failed_paths
|
| 664 |
+
or item["audio_path"] in failed_paths]
|
| 665 |
+
print(f"[vllm] {len(vllm_todo)} samples ready, {len(fallback_items)} deferred to fallback ...")
|
| 666 |
+
|
| 667 |
+
for i, item in enumerate(vllm_todo):
|
| 668 |
+
if item["uid"] in processed:
|
| 669 |
+
continue
|
| 670 |
+
item_prompt = item.get("mcq_prompt", default_prompt) if args.mode == "mcq" else default_prompt
|
| 671 |
+
item_answer_map = item.get("mcq_answer_map") if args.mode == "mcq" else None
|
| 672 |
+
inp = {
|
| 673 |
+
"prompt": build_vllm_prompt(item_prompt, args.base_model, include_audio=True),
|
| 674 |
+
"multi_modal_data": {
|
| 675 |
+
"video": preprocessed_v[item["video_path"]],
|
| 676 |
+
"audio": preprocessed_a[item["audio_path"]],
|
| 677 |
+
},
|
| 678 |
+
}
|
| 679 |
+
try:
|
| 680 |
+
outputs = llm.generate([inp], sampling_params=sampling_params)
|
| 681 |
+
raw_output = outputs[0].outputs[0].text.strip()
|
| 682 |
+
except (ValueError, RuntimeError) as exc:
|
| 683 |
+
print(f" [error] {item['uid']}: {exc}")
|
| 684 |
+
raw_output = ""
|
| 685 |
+
|
| 686 |
+
pred = _extract_pred(raw_output, args.mode, args.gpt_judge,
|
| 687 |
+
args.openai_api_key, args.gpt_model,
|
| 688 |
+
answer_map=item_answer_map)
|
| 689 |
+
result = _build_result(item, pred, raw_output, args.mode)
|
| 690 |
+
|
| 691 |
+
with open(results_jsonl, "a", encoding="utf-8") as f:
|
| 692 |
+
f.write(json.dumps(result, ensure_ascii=False) + "\n")
|
| 693 |
+
processed.add(item["uid"])
|
| 694 |
+
|
| 695 |
+
if (i + 1) % 100 == 0:
|
| 696 |
+
print(f" [vllm] [{i + 1}/{len(vllm_todo)}] done")
|
| 697 |
+
|
| 698 |
+
preprocessed_v.clear()
|
| 699 |
+
preprocessed_a.clear()
|
| 700 |
+
|
| 701 |
+
if fallback_items:
|
| 702 |
+
print(f"[fallback] Running {len(fallback_items)} samples with transformers ...")
|
| 703 |
+
del llm
|
| 704 |
+
gc.collect()
|
| 705 |
+
torch.cuda.empty_cache()
|
| 706 |
+
|
| 707 |
+
model, processor = load_model(args.base_model, args.adapter)
|
| 708 |
+
for item in tqdm(fallback_items, desc="Fallback", unit="q"):
|
| 709 |
+
if item["uid"] in processed:
|
| 710 |
+
continue
|
| 711 |
+
item_prompt = item.get("mcq_prompt", default_prompt) if args.mode == "mcq" else default_prompt
|
| 712 |
+
item_answer_map = item.get("mcq_answer_map") if args.mode == "mcq" else None
|
| 713 |
+
try:
|
| 714 |
+
raw_output = run_inference(
|
| 715 |
+
model, processor, item["video_path"], item["audio_path"],
|
| 716 |
+
item_prompt, args.max_new_tokens, args.temperature,
|
| 717 |
+
)
|
| 718 |
+
except Exception as exc:
|
| 719 |
+
import traceback
|
| 720 |
+
print(f" [error] {item['uid']}: {exc}")
|
| 721 |
+
traceback.print_exc()
|
| 722 |
+
raw_output = ""
|
| 723 |
+
|
| 724 |
+
pred = _extract_pred(raw_output, args.mode, args.gpt_judge,
|
| 725 |
+
args.openai_api_key, args.gpt_model,
|
| 726 |
+
answer_map=item_answer_map)
|
| 727 |
+
result = _build_result(item, pred, raw_output, args.mode)
|
| 728 |
+
|
| 729 |
+
with open(results_jsonl, "a", encoding="utf-8") as f:
|
| 730 |
+
f.write(json.dumps(result, ensure_ascii=False) + "\n")
|
| 731 |
+
processed.add(item["uid"])
|
| 732 |
+
gc.collect()
|
| 733 |
+
torch.cuda.empty_cache()
|
| 734 |
+
|
| 735 |
+
else:
|
| 736 |
+
model, processor = load_model(args.base_model, args.adapter)
|
| 737 |
+
|
| 738 |
+
for item in tqdm(test_data, desc="VGGSync", unit="q"):
|
| 739 |
+
if item["uid"] in processed:
|
| 740 |
+
continue
|
| 741 |
+
if not os.path.exists(item["video_path"]):
|
| 742 |
+
print(f" [skip] video not found: {item['video_path']}")
|
| 743 |
+
continue
|
| 744 |
+
if not os.path.exists(item["audio_path"]):
|
| 745 |
+
print(f" [skip] audio not found: {item['audio_path']}")
|
| 746 |
+
continue
|
| 747 |
+
|
| 748 |
+
item_prompt = item.get("mcq_prompt", default_prompt) if args.mode == "mcq" else default_prompt
|
| 749 |
+
item_answer_map = item.get("mcq_answer_map") if args.mode == "mcq" else None
|
| 750 |
+
|
| 751 |
+
try:
|
| 752 |
+
raw_output = run_inference(
|
| 753 |
+
model, processor, item["video_path"], item["audio_path"],
|
| 754 |
+
item_prompt, args.max_new_tokens, args.temperature,
|
| 755 |
+
)
|
| 756 |
+
except Exception as exc:
|
| 757 |
+
import traceback
|
| 758 |
+
print(f" [error] {item['uid']}: {exc}")
|
| 759 |
+
traceback.print_exc()
|
| 760 |
+
raw_output = ""
|
| 761 |
+
|
| 762 |
+
pred = _extract_pred(raw_output, args.mode, args.gpt_judge,
|
| 763 |
+
args.openai_api_key, args.gpt_model,
|
| 764 |
+
answer_map=item_answer_map)
|
| 765 |
+
result = _build_result(item, pred, raw_output, args.mode)
|
| 766 |
+
|
| 767 |
+
with open(results_jsonl, "a", encoding="utf-8") as f:
|
| 768 |
+
f.write(json.dumps(result, ensure_ascii=False) + "\n")
|
| 769 |
+
processed.add(item["uid"])
|
| 770 |
+
gc.collect()
|
| 771 |
+
torch.cuda.empty_cache()
|
| 772 |
+
|
| 773 |
+
_save_and_finalize(results_jsonl, metrics_json, summary_txt, args, label)
|
| 774 |
+
|
| 775 |
+
|
| 776 |
+
if __name__ == "__main__":
|
| 777 |
+
main()
|
supp/eval_videomme.py
ADDED
|
@@ -0,0 +1,564 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env python3
|
| 2 |
+
from __future__ import annotations
|
| 3 |
+
|
| 4 |
+
import argparse
|
| 5 |
+
import ctypes
|
| 6 |
+
import gc
|
| 7 |
+
import json
|
| 8 |
+
import os
|
| 9 |
+
import re
|
| 10 |
+
import site
|
| 11 |
+
import tempfile
|
| 12 |
+
import shutil
|
| 13 |
+
from pathlib import Path
|
| 14 |
+
from typing import Any, Dict, List, Optional
|
| 15 |
+
|
| 16 |
+
_npp_lib = Path(site.getsitepackages()[0]) / "nvidia" / "npp" / "lib"
|
| 17 |
+
_npp_so = _npp_lib / "libnppicc.so.12"
|
| 18 |
+
if _npp_so.is_file():
|
| 19 |
+
ctypes.CDLL(str(_npp_so), mode=ctypes.RTLD_GLOBAL)
|
| 20 |
+
|
| 21 |
+
import torch
|
| 22 |
+
from tqdm import tqdm
|
| 23 |
+
|
| 24 |
+
DEFAULT_VIDEO_DIR = Path("./data/videomme/data")
|
| 25 |
+
DEFAULT_OUTPUT_DIR = Path("./eval_results/videomme")
|
| 26 |
+
|
| 27 |
+
VIDEO_TYPES = ["short", "medium", "long"]
|
| 28 |
+
CATEGORIES = [
|
| 29 |
+
"Knowledge", "Film & Television", "Sports Competition",
|
| 30 |
+
"Artistic Performance", "Life Record", "Multilingual",
|
| 31 |
+
]
|
| 32 |
+
TASK_CATEGORIES = [
|
| 33 |
+
"Temporal Perception", "Spatial Perception", "Attribute Perception",
|
| 34 |
+
"Action Recognition", "Object Recognition", "OCR Problems",
|
| 35 |
+
"Counting Problem", "Temporal Reasoning", "Spatial Reasoning",
|
| 36 |
+
"Action Reasoning", "Object Reasoning", "Information Synopsis",
|
| 37 |
+
]
|
| 38 |
+
|
| 39 |
+
MCQ_PROMPT = (
|
| 40 |
+
"Select the best answer to the following multiple-choice question "
|
| 41 |
+
"based on the video. Respond with only the letter (A, B, C, or D) "
|
| 42 |
+
"of the correct option.\n"
|
| 43 |
+
)
|
| 44 |
+
|
| 45 |
+
|
| 46 |
+
def parse_args() -> argparse.Namespace:
|
| 47 |
+
p = argparse.ArgumentParser(description="Evaluate on Video-MME benchmark.")
|
| 48 |
+
p.add_argument("--base-model", type=str,
|
| 49 |
+
default="Qwen/Qwen3-Omni-30B-A3B-Instruct")
|
| 50 |
+
p.add_argument("--adapter", type=str, default=None)
|
| 51 |
+
p.add_argument("--video-dir", type=Path, default=DEFAULT_VIDEO_DIR)
|
| 52 |
+
p.add_argument("--output-dir", type=Path, default=DEFAULT_OUTPUT_DIR)
|
| 53 |
+
p.add_argument("--max-samples", type=int, default=-1)
|
| 54 |
+
p.add_argument("--max-new-tokens", type=int, default=32)
|
| 55 |
+
p.add_argument("--temperature", type=float, default=0.0)
|
| 56 |
+
p.add_argument("--label", type=str, default=None)
|
| 57 |
+
p.add_argument("--vllm", action="store_true", default=False)
|
| 58 |
+
p.add_argument("--tp", type=int, default=None)
|
| 59 |
+
p.add_argument("--batch-size", type=int, default=32)
|
| 60 |
+
p.add_argument("--gpu-memory-utilization", type=float, default=0.90)
|
| 61 |
+
p.add_argument("--max-model-len", type=int, default=65536)
|
| 62 |
+
return p.parse_args()
|
| 63 |
+
|
| 64 |
+
|
| 65 |
+
def load_model(base_model: str, adapter: Optional[str]):
|
| 66 |
+
from omni_model_loading import load_qwen_omni_model
|
| 67 |
+
|
| 68 |
+
model, processor, _ = load_qwen_omni_model(base_model, adapter)
|
| 69 |
+
return model, processor
|
| 70 |
+
|
| 71 |
+
|
| 72 |
+
def run_inference(model, processor, video_path: str, prompt: str,
|
| 73 |
+
max_new_tokens: int, temperature: float) -> str:
|
| 74 |
+
from qwen_omni_utils import process_mm_info
|
| 75 |
+
|
| 76 |
+
tmp_dir = tempfile.mkdtemp(prefix="eval_vmme_")
|
| 77 |
+
masked_video = os.path.join(tmp_dir, "clip.mp4")
|
| 78 |
+
os.symlink(os.path.abspath(video_path), masked_video)
|
| 79 |
+
|
| 80 |
+
conversation = [
|
| 81 |
+
{
|
| 82 |
+
"role": "user",
|
| 83 |
+
"content": [
|
| 84 |
+
{"type": "video", "video": masked_video},
|
| 85 |
+
{"type": "text", "text": prompt},
|
| 86 |
+
],
|
| 87 |
+
}
|
| 88 |
+
]
|
| 89 |
+
|
| 90 |
+
text = processor.apply_chat_template(
|
| 91 |
+
conversation, add_generation_prompt=True, tokenize=False,
|
| 92 |
+
)
|
| 93 |
+
audios, images, videos = process_mm_info(conversation, use_audio_in_video=True)
|
| 94 |
+
inputs = processor(
|
| 95 |
+
text=text, audio=audios, images=images, videos=videos,
|
| 96 |
+
return_tensors="pt", padding=True, use_audio_in_video=True,
|
| 97 |
+
)
|
| 98 |
+
|
| 99 |
+
model_dtype = next(model.parameters()).dtype
|
| 100 |
+
converted = {}
|
| 101 |
+
for k, v in inputs.items():
|
| 102 |
+
if hasattr(v, "to"):
|
| 103 |
+
v = v.to(model.device)
|
| 104 |
+
if torch.is_floating_point(v):
|
| 105 |
+
v = v.to(model_dtype)
|
| 106 |
+
converted[k] = v
|
| 107 |
+
inputs = converted
|
| 108 |
+
|
| 109 |
+
from omni_model_loading import is_omni_thinker_model
|
| 110 |
+
|
| 111 |
+
is_thinker = is_omni_thinker_model(model)
|
| 112 |
+
if is_thinker:
|
| 113 |
+
gen_kwargs = {"max_new_tokens": max_new_tokens, "do_sample": temperature > 0}
|
| 114 |
+
else:
|
| 115 |
+
gen_kwargs = {
|
| 116 |
+
"thinker_max_new_tokens": max_new_tokens,
|
| 117 |
+
"use_audio_in_video": True,
|
| 118 |
+
"return_audio": False,
|
| 119 |
+
"do_sample": temperature > 0,
|
| 120 |
+
}
|
| 121 |
+
if temperature > 0:
|
| 122 |
+
gen_kwargs["temperature"] = temperature
|
| 123 |
+
gen_kwargs["top_p"] = 0.9
|
| 124 |
+
|
| 125 |
+
with torch.inference_mode():
|
| 126 |
+
output_ids = model.generate(**inputs, **gen_kwargs)
|
| 127 |
+
|
| 128 |
+
if isinstance(output_ids, tuple):
|
| 129 |
+
output_ids = output_ids[0]
|
| 130 |
+
|
| 131 |
+
prompt_len = inputs["input_ids"].shape[1]
|
| 132 |
+
response = processor.batch_decode(
|
| 133 |
+
output_ids[:, prompt_len:], skip_special_tokens=True,
|
| 134 |
+
)[0].strip()
|
| 135 |
+
|
| 136 |
+
shutil.rmtree(tmp_dir, ignore_errors=True)
|
| 137 |
+
return response
|
| 138 |
+
|
| 139 |
+
|
| 140 |
+
def preprocess_video_for_vllm(video_path: str):
|
| 141 |
+
from qwen_omni_utils import process_mm_info
|
| 142 |
+
import numpy as np
|
| 143 |
+
|
| 144 |
+
messages = [{
|
| 145 |
+
"role": "user",
|
| 146 |
+
"content": [
|
| 147 |
+
{"type": "video", "video": video_path, "nframes": 128},
|
| 148 |
+
{"type": "text", "text": "placeholder"},
|
| 149 |
+
],
|
| 150 |
+
}]
|
| 151 |
+
audios, images, videos = process_mm_info(messages, use_audio_in_video=True)
|
| 152 |
+
video_tensor = videos[0]
|
| 153 |
+
video_np = (video_tensor * 255).byte().numpy()
|
| 154 |
+
audio_tuple = None
|
| 155 |
+
if audios:
|
| 156 |
+
aud = audios[0]
|
| 157 |
+
if isinstance(aud, tuple):
|
| 158 |
+
audio_tuple = (aud[0].numpy() if hasattr(aud[0], "numpy") else np.asarray(aud[0]),
|
| 159 |
+
aud[1])
|
| 160 |
+
elif hasattr(aud, "numpy"):
|
| 161 |
+
audio_tuple = (aud.numpy(), 16000)
|
| 162 |
+
else:
|
| 163 |
+
audio_tuple = (np.asarray(aud), 16000)
|
| 164 |
+
return video_np, audio_tuple
|
| 165 |
+
|
| 166 |
+
|
| 167 |
+
SYSTEM_PROMPT = (
|
| 168 |
+
"You are Qwen, a virtual human developed by the Qwen Team, Alibaba "
|
| 169 |
+
"Group, capable of perceiving auditory and visual inputs, as well as "
|
| 170 |
+
"generating text and speech."
|
| 171 |
+
)
|
| 172 |
+
|
| 173 |
+
|
| 174 |
+
def build_vllm_prompt(question: str, base_model: str) -> str:
|
| 175 |
+
from omni_model_loading import vllm_user_mm_prefix
|
| 176 |
+
|
| 177 |
+
mm = vllm_user_mm_prefix(base_model, include_audio=True)
|
| 178 |
+
return (
|
| 179 |
+
f"<|im_start|>system\n{SYSTEM_PROMPT}<|im_end|>\n"
|
| 180 |
+
f"<|im_start|>user\n"
|
| 181 |
+
f"{mm}"
|
| 182 |
+
f"{question}<|im_end|>\n"
|
| 183 |
+
f"<|im_start|>assistant\n"
|
| 184 |
+
)
|
| 185 |
+
|
| 186 |
+
|
| 187 |
+
def extract_answer(text: str) -> str:
|
| 188 |
+
text = text.strip()
|
| 189 |
+
prefixes = [
|
| 190 |
+
"The best answer is", "The correct answer is",
|
| 191 |
+
"The answer is", "The answer", "Best answer:", "Best option:",
|
| 192 |
+
]
|
| 193 |
+
for prefix in prefixes:
|
| 194 |
+
text = text.replace(prefix, "")
|
| 195 |
+
|
| 196 |
+
if len(text.split()) > 10 and not re.search(r"[ABCD]", text):
|
| 197 |
+
return ""
|
| 198 |
+
m = re.search(r"[ABCD]", text)
|
| 199 |
+
return m[0] if m else ""
|
| 200 |
+
|
| 201 |
+
|
| 202 |
+
def load_videomme(video_dir: Path, max_samples: int) -> List[Dict[str, Any]]:
|
| 203 |
+
from datasets import load_dataset
|
| 204 |
+
ds = load_dataset("lmms-lab/Video-MME", split="test")
|
| 205 |
+
data = []
|
| 206 |
+
skipped = 0
|
| 207 |
+
for row in ds:
|
| 208 |
+
vid = row["videoID"]
|
| 209 |
+
video_path = video_dir / f"{vid}.mp4"
|
| 210 |
+
if not video_path.exists():
|
| 211 |
+
for ext in [".MP4", ".mkv"]:
|
| 212 |
+
alt = video_dir / f"{vid}{ext}"
|
| 213 |
+
if alt.exists():
|
| 214 |
+
video_path = alt
|
| 215 |
+
break
|
| 216 |
+
if not video_path.exists():
|
| 217 |
+
skipped += 1
|
| 218 |
+
continue
|
| 219 |
+
|
| 220 |
+
options_text = "\n".join(row["options"])
|
| 221 |
+
prompt = MCQ_PROMPT + row["question"] + "\n" + options_text + "\nThe best answer is:"
|
| 222 |
+
|
| 223 |
+
data.append({
|
| 224 |
+
"question_id": row["question_id"],
|
| 225 |
+
"video_id": vid,
|
| 226 |
+
"video_path": str(video_path),
|
| 227 |
+
"duration": row["duration"],
|
| 228 |
+
"domain": row["domain"],
|
| 229 |
+
"sub_category": row["sub_category"],
|
| 230 |
+
"task_type": row["task_type"],
|
| 231 |
+
"question": row["question"],
|
| 232 |
+
"options": row["options"],
|
| 233 |
+
"gt_answer": row["answer"],
|
| 234 |
+
"prompt": prompt,
|
| 235 |
+
})
|
| 236 |
+
if skipped:
|
| 237 |
+
print(f"[data] Skipped {skipped} questions (video not found)")
|
| 238 |
+
if max_samples > 0:
|
| 239 |
+
data = data[:max_samples]
|
| 240 |
+
return data
|
| 241 |
+
|
| 242 |
+
|
| 243 |
+
def compute_metrics(results: List[Dict[str, Any]]) -> Dict[str, Any]:
|
| 244 |
+
total = len(results)
|
| 245 |
+
if total == 0:
|
| 246 |
+
return {}
|
| 247 |
+
|
| 248 |
+
correct = sum(1 for r in results if r["pred_answer"].upper() == r["gt_answer"].upper())
|
| 249 |
+
overall_acc = correct / total
|
| 250 |
+
|
| 251 |
+
def acc_for(items):
|
| 252 |
+
if not items:
|
| 253 |
+
return None
|
| 254 |
+
c = sum(1 for r in items if r["pred_answer"].upper() == r["gt_answer"].upper())
|
| 255 |
+
return round(c / len(items), 4)
|
| 256 |
+
|
| 257 |
+
per_duration = {}
|
| 258 |
+
for vt in VIDEO_TYPES:
|
| 259 |
+
subset = [r for r in results if r["duration"] == vt]
|
| 260 |
+
if subset:
|
| 261 |
+
per_duration[vt] = {"accuracy": acc_for(subset), "count": len(subset)}
|
| 262 |
+
|
| 263 |
+
per_domain = {}
|
| 264 |
+
for cat in CATEGORIES:
|
| 265 |
+
subset = [r for r in results if r["domain"] == cat]
|
| 266 |
+
if subset:
|
| 267 |
+
per_domain[cat] = {"accuracy": acc_for(subset), "count": len(subset)}
|
| 268 |
+
|
| 269 |
+
per_task = {}
|
| 270 |
+
for task in TASK_CATEGORIES:
|
| 271 |
+
subset = [r for r in results if r["task_type"] == task]
|
| 272 |
+
if subset:
|
| 273 |
+
per_task[task] = {"accuracy": acc_for(subset), "count": len(subset)}
|
| 274 |
+
|
| 275 |
+
return {
|
| 276 |
+
"total_samples": total,
|
| 277 |
+
"overall_accuracy": round(overall_acc, 4),
|
| 278 |
+
"per_duration": per_duration,
|
| 279 |
+
"per_domain": per_domain,
|
| 280 |
+
"per_task_type": per_task,
|
| 281 |
+
}
|
| 282 |
+
|
| 283 |
+
|
| 284 |
+
def print_summary(metrics: Dict[str, Any], label: str) -> None:
|
| 285 |
+
print()
|
| 286 |
+
print(f"{'=' * 65}")
|
| 287 |
+
print(f" Video-MME Summary: {label}")
|
| 288 |
+
print(f"{'=' * 65}")
|
| 289 |
+
print(f" Total samples: {metrics['total_samples']}")
|
| 290 |
+
print(f" Overall Accuracy: {metrics['overall_accuracy']:.1%}")
|
| 291 |
+
|
| 292 |
+
print(f" ─── Per Duration ───")
|
| 293 |
+
for vt in VIDEO_TYPES:
|
| 294 |
+
if vt in metrics["per_duration"]:
|
| 295 |
+
d = metrics["per_duration"][vt]
|
| 296 |
+
print(f" {vt:8s}: {d['accuracy']:.1%} ({d['count']} questions)")
|
| 297 |
+
|
| 298 |
+
print(f" ─── Per Domain ───")
|
| 299 |
+
for cat in CATEGORIES:
|
| 300 |
+
if cat in metrics["per_domain"]:
|
| 301 |
+
d = metrics["per_domain"][cat]
|
| 302 |
+
print(f" {cat:25s}: {d['accuracy']:.1%} ({d['count']})")
|
| 303 |
+
|
| 304 |
+
print(f" ─── Per Task Type ───")
|
| 305 |
+
for task in TASK_CATEGORIES:
|
| 306 |
+
if task in metrics["per_task_type"]:
|
| 307 |
+
d = metrics["per_task_type"][task]
|
| 308 |
+
print(f" {task:25s}: {d['accuracy']:.1%} ({d['count']})")
|
| 309 |
+
|
| 310 |
+
print(f"{'=' * 65}")
|
| 311 |
+
|
| 312 |
+
|
| 313 |
+
def main() -> None:
|
| 314 |
+
args = parse_args()
|
| 315 |
+
label = args.label or (
|
| 316 |
+
Path(args.adapter).name if args.adapter
|
| 317 |
+
else Path(args.base_model).name
|
| 318 |
+
)
|
| 319 |
+
|
| 320 |
+
out_dir = args.output_dir / label
|
| 321 |
+
out_dir.mkdir(parents=True, exist_ok=True)
|
| 322 |
+
results_jsonl = out_dir / "eval_results.jsonl"
|
| 323 |
+
metrics_json = out_dir / "metrics.json"
|
| 324 |
+
summary_txt = out_dir / "summary.txt"
|
| 325 |
+
|
| 326 |
+
print("[data] Loading Video-MME dataset...")
|
| 327 |
+
test_data = load_videomme(args.video_dir, args.max_samples)
|
| 328 |
+
print(f"[data] {len(test_data)} questions ready for evaluation")
|
| 329 |
+
|
| 330 |
+
processed = set()
|
| 331 |
+
if results_jsonl.exists():
|
| 332 |
+
with open(results_jsonl) as f:
|
| 333 |
+
for line in f:
|
| 334 |
+
obj = json.loads(line)
|
| 335 |
+
processed.add(obj["question_id"])
|
| 336 |
+
print(f"[resume] {len(processed)} already processed, skipping")
|
| 337 |
+
|
| 338 |
+
use_vllm = args.vllm
|
| 339 |
+
model = processor = llm = None
|
| 340 |
+
vllm_preprocess_stats: Dict[str, int] | None = None
|
| 341 |
+
|
| 342 |
+
if use_vllm:
|
| 343 |
+
from vllm import LLM, SamplingParams
|
| 344 |
+
tp = args.tp or torch.cuda.device_count()
|
| 345 |
+
model_path = args.base_model
|
| 346 |
+
|
| 347 |
+
print("[vllm] Preprocessing videos (before model load) ...")
|
| 348 |
+
todo = [item for item in test_data if item["question_id"] not in processed]
|
| 349 |
+
unique_videos = list(dict.fromkeys(item["video_path"] for item in todo))
|
| 350 |
+
from omni_model_loading import parallel_preprocess_videos
|
| 351 |
+
preprocessed, preprocessed_audio, preprocess_failed_paths = parallel_preprocess_videos(
|
| 352 |
+
unique_videos, preprocess_video_for_vllm,
|
| 353 |
+
)
|
| 354 |
+
|
| 355 |
+
n_pp_skip = sum(1 for item in todo if item["video_path"] in preprocess_failed_paths)
|
| 356 |
+
if preprocess_failed_paths:
|
| 357 |
+
print(
|
| 358 |
+
f"[vllm] Preprocess failed for {len(preprocess_failed_paths)} video(s), "
|
| 359 |
+
f"{n_pp_skip} question(s) will not use vLLM (run continues)."
|
| 360 |
+
)
|
| 361 |
+
vllm_preprocess_stats = {
|
| 362 |
+
"preprocess_failed_videos": len(preprocess_failed_paths),
|
| 363 |
+
"preprocess_skipped_questions": n_pp_skip,
|
| 364 |
+
}
|
| 365 |
+
|
| 366 |
+
from omni_model_loading import cap_vllm_max_model_len
|
| 367 |
+
|
| 368 |
+
vllm_max_len = cap_vllm_max_model_len(model_path, args.max_model_len)
|
| 369 |
+
print(f"[vllm] Loading {model_path} with tp={tp} (max_model_len={vllm_max_len}) ...")
|
| 370 |
+
llm = LLM(
|
| 371 |
+
model=model_path,
|
| 372 |
+
tensor_parallel_size=tp,
|
| 373 |
+
max_model_len=vllm_max_len,
|
| 374 |
+
max_num_seqs=4,
|
| 375 |
+
limit_mm_per_prompt={"video": 1, "audio": 1},
|
| 376 |
+
gpu_memory_utilization=args.gpu_memory_utilization,
|
| 377 |
+
dtype="bfloat16",
|
| 378 |
+
trust_remote_code=True,
|
| 379 |
+
)
|
| 380 |
+
sampling_params = SamplingParams(
|
| 381 |
+
temperature=args.temperature if args.temperature > 0 else 0.0,
|
| 382 |
+
top_p=0.9 if args.temperature > 0 else 1.0,
|
| 383 |
+
max_tokens=args.max_new_tokens,
|
| 384 |
+
)
|
| 385 |
+
|
| 386 |
+
vllm_todo = [item for item in todo if item["video_path"] in preprocessed]
|
| 387 |
+
fallback_items = []
|
| 388 |
+
print(f"[vllm] {len(vllm_todo)} questions ready, running inference ...")
|
| 389 |
+
|
| 390 |
+
for i, item in enumerate(vllm_todo):
|
| 391 |
+
if item["question_id"] in processed:
|
| 392 |
+
continue
|
| 393 |
+
inp = {
|
| 394 |
+
"prompt": build_vllm_prompt(item["prompt"], args.base_model),
|
| 395 |
+
"multi_modal_data": {"video": preprocessed[item["video_path"]]},
|
| 396 |
+
}
|
| 397 |
+
if item["video_path"] in preprocessed_audio:
|
| 398 |
+
inp["multi_modal_data"]["audio"] = preprocessed_audio[item["video_path"]]
|
| 399 |
+
try:
|
| 400 |
+
outputs = llm.generate([inp], sampling_params=sampling_params)
|
| 401 |
+
raw_output = outputs[0].outputs[0].text.strip()
|
| 402 |
+
pred = extract_answer(raw_output)
|
| 403 |
+
result = {
|
| 404 |
+
"question_id": item["question_id"],
|
| 405 |
+
"video_id": item["video_id"],
|
| 406 |
+
"duration": item["duration"],
|
| 407 |
+
"domain": item["domain"],
|
| 408 |
+
"sub_category": item["sub_category"],
|
| 409 |
+
"task_type": item["task_type"],
|
| 410 |
+
"gt_answer": item["gt_answer"],
|
| 411 |
+
"pred_answer": pred,
|
| 412 |
+
"correct": pred.upper() == item["gt_answer"].upper(),
|
| 413 |
+
"raw_output": raw_output,
|
| 414 |
+
}
|
| 415 |
+
with open(results_jsonl, "a", encoding="utf-8") as f:
|
| 416 |
+
f.write(json.dumps(result, ensure_ascii=False) + "\n")
|
| 417 |
+
processed.add(item["question_id"])
|
| 418 |
+
except (ValueError, RuntimeError) as exc:
|
| 419 |
+
if "longer than the maximum model length" in str(exc):
|
| 420 |
+
print(f" [too long] {item['question_id']} -> fallback")
|
| 421 |
+
fallback_items.append(item)
|
| 422 |
+
else:
|
| 423 |
+
raise
|
| 424 |
+
|
| 425 |
+
if (i + 1) % 100 == 0:
|
| 426 |
+
print(f" [vllm] [{i+1}/{len(vllm_todo)}] done, {len(fallback_items)} deferred")
|
| 427 |
+
|
| 428 |
+
preprocessed.clear()
|
| 429 |
+
preprocessed_audio.clear()
|
| 430 |
+
|
| 431 |
+
vllm_results = []
|
| 432 |
+
if results_jsonl.exists():
|
| 433 |
+
with open(results_jsonl) as f:
|
| 434 |
+
for line in f:
|
| 435 |
+
vllm_results.append(json.loads(line))
|
| 436 |
+
if vllm_results:
|
| 437 |
+
vllm_metrics = compute_metrics(vllm_results)
|
| 438 |
+
vllm_metrics_path = out_dir / "metrics_vllm.json"
|
| 439 |
+
with open(vllm_metrics_path, "w", encoding="utf-8") as f:
|
| 440 |
+
json.dump(vllm_metrics, f, indent=2, ensure_ascii=False)
|
| 441 |
+
print(f"[vllm] Intermediate metrics saved to {vllm_metrics_path}")
|
| 442 |
+
print_summary(vllm_metrics, label + " (vllm only)")
|
| 443 |
+
|
| 444 |
+
if fallback_items:
|
| 445 |
+
print(f"[fallback] Running {len(fallback_items)} long-video questions with transformers ...")
|
| 446 |
+
del llm
|
| 447 |
+
gc.collect()
|
| 448 |
+
torch.cuda.empty_cache()
|
| 449 |
+
|
| 450 |
+
model, processor = load_model(args.base_model, args.adapter)
|
| 451 |
+
for item in tqdm(fallback_items, desc="Fallback", unit="q"):
|
| 452 |
+
if item["question_id"] in processed:
|
| 453 |
+
continue
|
| 454 |
+
try:
|
| 455 |
+
raw_output = run_inference(
|
| 456 |
+
model, processor, item["video_path"], item["prompt"],
|
| 457 |
+
args.max_new_tokens, args.temperature,
|
| 458 |
+
)
|
| 459 |
+
except Exception as exc:
|
| 460 |
+
import traceback
|
| 461 |
+
print(f" [error] {item['question_id']}: {exc}")
|
| 462 |
+
traceback.print_exc()
|
| 463 |
+
raw_output = ""
|
| 464 |
+
|
| 465 |
+
pred = extract_answer(raw_output)
|
| 466 |
+
result = {
|
| 467 |
+
"question_id": item["question_id"],
|
| 468 |
+
"video_id": item["video_id"],
|
| 469 |
+
"duration": item["duration"],
|
| 470 |
+
"domain": item["domain"],
|
| 471 |
+
"sub_category": item["sub_category"],
|
| 472 |
+
"task_type": item["task_type"],
|
| 473 |
+
"gt_answer": item["gt_answer"],
|
| 474 |
+
"pred_answer": pred,
|
| 475 |
+
"correct": pred.upper() == item["gt_answer"].upper(),
|
| 476 |
+
"raw_output": raw_output,
|
| 477 |
+
}
|
| 478 |
+
with open(results_jsonl, "a", encoding="utf-8") as f:
|
| 479 |
+
f.write(json.dumps(result, ensure_ascii=False) + "\n")
|
| 480 |
+
processed.add(item["question_id"])
|
| 481 |
+
gc.collect()
|
| 482 |
+
torch.cuda.empty_cache()
|
| 483 |
+
|
| 484 |
+
else:
|
| 485 |
+
print("[model] Loading model...")
|
| 486 |
+
model, processor = load_model(args.base_model, args.adapter)
|
| 487 |
+
|
| 488 |
+
for item in tqdm(test_data, desc="Video-MME", unit="q"):
|
| 489 |
+
if item["question_id"] in processed:
|
| 490 |
+
continue
|
| 491 |
+
|
| 492 |
+
try:
|
| 493 |
+
raw_output = run_inference(
|
| 494 |
+
model, processor, item["video_path"], item["prompt"],
|
| 495 |
+
args.max_new_tokens, args.temperature,
|
| 496 |
+
)
|
| 497 |
+
except Exception as exc:
|
| 498 |
+
import traceback
|
| 499 |
+
print(f" [error] {item['question_id']}: {exc}")
|
| 500 |
+
traceback.print_exc()
|
| 501 |
+
raw_output = ""
|
| 502 |
+
|
| 503 |
+
pred = extract_answer(raw_output)
|
| 504 |
+
|
| 505 |
+
result = {
|
| 506 |
+
"question_id": item["question_id"],
|
| 507 |
+
"video_id": item["video_id"],
|
| 508 |
+
"duration": item["duration"],
|
| 509 |
+
"domain": item["domain"],
|
| 510 |
+
"sub_category": item["sub_category"],
|
| 511 |
+
"task_type": item["task_type"],
|
| 512 |
+
"gt_answer": item["gt_answer"],
|
| 513 |
+
"pred_answer": pred,
|
| 514 |
+
"correct": pred.upper() == item["gt_answer"].upper(),
|
| 515 |
+
"raw_output": raw_output,
|
| 516 |
+
}
|
| 517 |
+
|
| 518 |
+
with open(results_jsonl, "a", encoding="utf-8") as f:
|
| 519 |
+
f.write(json.dumps(result, ensure_ascii=False) + "\n")
|
| 520 |
+
|
| 521 |
+
processed.add(item["question_id"])
|
| 522 |
+
gc.collect()
|
| 523 |
+
torch.cuda.empty_cache()
|
| 524 |
+
|
| 525 |
+
all_results = []
|
| 526 |
+
if results_jsonl.exists():
|
| 527 |
+
with open(results_jsonl) as f:
|
| 528 |
+
for line in f:
|
| 529 |
+
all_results.append(json.loads(line))
|
| 530 |
+
|
| 531 |
+
if not all_results:
|
| 532 |
+
print("[warn] No results to compute metrics from.")
|
| 533 |
+
return
|
| 534 |
+
|
| 535 |
+
metrics = compute_metrics(all_results)
|
| 536 |
+
metrics["eval_config"] = {
|
| 537 |
+
"base_model": args.base_model,
|
| 538 |
+
"adapter": args.adapter,
|
| 539 |
+
"video_dir": str(args.video_dir),
|
| 540 |
+
"max_new_tokens": args.max_new_tokens,
|
| 541 |
+
"temperature": args.temperature,
|
| 542 |
+
}
|
| 543 |
+
if vllm_preprocess_stats is not None:
|
| 544 |
+
metrics["eval_config"]["vllm_preprocess_skips"] = vllm_preprocess_stats
|
| 545 |
+
|
| 546 |
+
with open(metrics_json, "w", encoding="utf-8") as f:
|
| 547 |
+
json.dump(metrics, f, indent=2, ensure_ascii=False)
|
| 548 |
+
|
| 549 |
+
print_summary(metrics, label)
|
| 550 |
+
|
| 551 |
+
with open(summary_txt, "w", encoding="utf-8") as f:
|
| 552 |
+
import io, contextlib
|
| 553 |
+
buf = io.StringIO()
|
| 554 |
+
with contextlib.redirect_stdout(buf):
|
| 555 |
+
print_summary(metrics, label)
|
| 556 |
+
f.write(buf.getvalue())
|
| 557 |
+
|
| 558 |
+
print(f"\n[output] Results: {results_jsonl}")
|
| 559 |
+
print(f"[output] Metrics: {metrics_json}")
|
| 560 |
+
print(f"[output] Summary: {summary_txt}")
|
| 561 |
+
|
| 562 |
+
|
| 563 |
+
if __name__ == "__main__":
|
| 564 |
+
main()
|
supp/eval_worldsense.py
ADDED
|
@@ -0,0 +1,591 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env python3
|
| 2 |
+
from __future__ import annotations
|
| 3 |
+
|
| 4 |
+
import argparse
|
| 5 |
+
import ctypes
|
| 6 |
+
import gc
|
| 7 |
+
import json
|
| 8 |
+
import os
|
| 9 |
+
import re
|
| 10 |
+
import site
|
| 11 |
+
import tempfile
|
| 12 |
+
import shutil
|
| 13 |
+
from pathlib import Path
|
| 14 |
+
from typing import Any, Dict, List, Optional
|
| 15 |
+
|
| 16 |
+
_npp_lib = Path(site.getsitepackages()[0]) / "nvidia" / "npp" / "lib"
|
| 17 |
+
_npp_so = _npp_lib / "libnppicc.so.12"
|
| 18 |
+
if _npp_so.is_file():
|
| 19 |
+
ctypes.CDLL(str(_npp_so), mode=ctypes.RTLD_GLOBAL)
|
| 20 |
+
|
| 21 |
+
import torch
|
| 22 |
+
from tqdm import tqdm
|
| 23 |
+
|
| 24 |
+
DEFAULT_DATA_DIR = Path("./data/worldsense")
|
| 25 |
+
DEFAULT_OUTPUT_DIR = Path("./eval_results/worldsense")
|
| 26 |
+
|
| 27 |
+
DURATIONS = ["<1min", "1-2min", "2-4min", "4-6min", "6-8min", ">8min"]
|
| 28 |
+
DOMAINS = [
|
| 29 |
+
"Music", "Culture & Politics", "Tech & Science", "Daily Life",
|
| 30 |
+
"Film & TV", "Sports", "Performance", "Games",
|
| 31 |
+
]
|
| 32 |
+
TASK_DOMAINS = ["Understanding", "Reasoning", "Recognition"]
|
| 33 |
+
TASK_TYPES = [
|
| 34 |
+
"Action Counting", "Anomaly Recognition", "Attribute Reasoning",
|
| 35 |
+
"Attribute Recognition", "Audio Change", "Audio Counting",
|
| 36 |
+
"Audio Recognition", "Audio Source Localization", "Causal Reasoning",
|
| 37 |
+
"Emotion Change", "Event Recognition", "Event Sorting",
|
| 38 |
+
"Hallucination", "Human Emotions", "Human Interaction",
|
| 39 |
+
"Human-object Interaction", "Object Counting",
|
| 40 |
+
"Object Existence Recognition", "Object State Change",
|
| 41 |
+
"Relation Reasoning", "Scene Recognition", "Spatial Relation",
|
| 42 |
+
"Temporal Localization", "Temporal Prediction",
|
| 43 |
+
"Text and Diagram Understanding", "Video Emotions",
|
| 44 |
+
]
|
| 45 |
+
|
| 46 |
+
MCQ_PROMPT = (
|
| 47 |
+
"Select the best answer to the following multiple-choice question "
|
| 48 |
+
"based on the video. Respond with only the letter (A, B, C, or D) "
|
| 49 |
+
"of the correct option.\n"
|
| 50 |
+
)
|
| 51 |
+
|
| 52 |
+
|
| 53 |
+
def parse_args() -> argparse.Namespace:
|
| 54 |
+
p = argparse.ArgumentParser(description="Evaluate on WorldSense benchmark.")
|
| 55 |
+
p.add_argument("--base-model", type=str,
|
| 56 |
+
default="Qwen/Qwen3-Omni-30B-A3B-Instruct")
|
| 57 |
+
p.add_argument("--adapter", type=str, default=None)
|
| 58 |
+
p.add_argument("--data-dir", type=Path, default=DEFAULT_DATA_DIR)
|
| 59 |
+
p.add_argument("--output-dir", type=Path, default=DEFAULT_OUTPUT_DIR)
|
| 60 |
+
p.add_argument("--max-samples", type=int, default=-1)
|
| 61 |
+
p.add_argument("--max-new-tokens", type=int, default=32)
|
| 62 |
+
p.add_argument("--temperature", type=float, default=0.0)
|
| 63 |
+
p.add_argument("--label", type=str, default=None)
|
| 64 |
+
p.add_argument("--vllm", action="store_true", default=False)
|
| 65 |
+
p.add_argument("--tp", type=int, default=None)
|
| 66 |
+
p.add_argument("--batch-size", type=int, default=32)
|
| 67 |
+
p.add_argument("--gpu-memory-utilization", type=float, default=0.90)
|
| 68 |
+
p.add_argument("--max-model-len", type=int, default=65536)
|
| 69 |
+
return p.parse_args()
|
| 70 |
+
|
| 71 |
+
|
| 72 |
+
def load_model(base_model: str, adapter: Optional[str]):
|
| 73 |
+
from omni_model_loading import load_qwen_omni_model
|
| 74 |
+
|
| 75 |
+
model, processor, _ = load_qwen_omni_model(base_model, adapter)
|
| 76 |
+
return model, processor
|
| 77 |
+
|
| 78 |
+
|
| 79 |
+
def run_inference(model, processor, video_path: str, prompt: str,
|
| 80 |
+
max_new_tokens: int, temperature: float) -> str:
|
| 81 |
+
from qwen_omni_utils import process_mm_info
|
| 82 |
+
|
| 83 |
+
tmp_dir = tempfile.mkdtemp(prefix="eval_ws_")
|
| 84 |
+
masked_video = os.path.join(tmp_dir, "clip.mp4")
|
| 85 |
+
os.symlink(os.path.abspath(video_path), masked_video)
|
| 86 |
+
|
| 87 |
+
conversation = [
|
| 88 |
+
{
|
| 89 |
+
"role": "user",
|
| 90 |
+
"content": [
|
| 91 |
+
{"type": "video", "video": masked_video},
|
| 92 |
+
{"type": "text", "text": prompt},
|
| 93 |
+
],
|
| 94 |
+
}
|
| 95 |
+
]
|
| 96 |
+
|
| 97 |
+
text = processor.apply_chat_template(
|
| 98 |
+
conversation, add_generation_prompt=True, tokenize=False,
|
| 99 |
+
)
|
| 100 |
+
audios, images, videos = process_mm_info(conversation, use_audio_in_video=True)
|
| 101 |
+
inputs = processor(
|
| 102 |
+
text=text, audio=audios, images=images, videos=videos,
|
| 103 |
+
return_tensors="pt", padding=True, use_audio_in_video=True,
|
| 104 |
+
)
|
| 105 |
+
|
| 106 |
+
model_dtype = next(model.parameters()).dtype
|
| 107 |
+
converted = {}
|
| 108 |
+
for k, v in inputs.items():
|
| 109 |
+
if hasattr(v, "to"):
|
| 110 |
+
v = v.to(model.device)
|
| 111 |
+
if torch.is_floating_point(v):
|
| 112 |
+
v = v.to(model_dtype)
|
| 113 |
+
converted[k] = v
|
| 114 |
+
inputs = converted
|
| 115 |
+
|
| 116 |
+
from omni_model_loading import is_omni_thinker_model
|
| 117 |
+
|
| 118 |
+
is_thinker = is_omni_thinker_model(model)
|
| 119 |
+
if is_thinker:
|
| 120 |
+
gen_kwargs = {"max_new_tokens": max_new_tokens, "do_sample": temperature > 0}
|
| 121 |
+
else:
|
| 122 |
+
gen_kwargs = {
|
| 123 |
+
"thinker_max_new_tokens": max_new_tokens,
|
| 124 |
+
"use_audio_in_video": True,
|
| 125 |
+
"return_audio": False,
|
| 126 |
+
"do_sample": temperature > 0,
|
| 127 |
+
}
|
| 128 |
+
if temperature > 0:
|
| 129 |
+
gen_kwargs["temperature"] = temperature
|
| 130 |
+
gen_kwargs["top_p"] = 0.9
|
| 131 |
+
|
| 132 |
+
with torch.inference_mode():
|
| 133 |
+
output_ids = model.generate(**inputs, **gen_kwargs)
|
| 134 |
+
|
| 135 |
+
if isinstance(output_ids, tuple):
|
| 136 |
+
output_ids = output_ids[0]
|
| 137 |
+
|
| 138 |
+
prompt_len = inputs["input_ids"].shape[1]
|
| 139 |
+
response = processor.batch_decode(
|
| 140 |
+
output_ids[:, prompt_len:], skip_special_tokens=True,
|
| 141 |
+
)[0].strip()
|
| 142 |
+
|
| 143 |
+
shutil.rmtree(tmp_dir, ignore_errors=True)
|
| 144 |
+
return response
|
| 145 |
+
|
| 146 |
+
|
| 147 |
+
def preprocess_video_for_vllm(video_path: str):
|
| 148 |
+
from qwen_omni_utils import process_mm_info
|
| 149 |
+
import numpy as np
|
| 150 |
+
|
| 151 |
+
messages = [{
|
| 152 |
+
"role": "user",
|
| 153 |
+
"content": [
|
| 154 |
+
{"type": "video", "video": video_path, "fps": 2.0, "max_frames": 128},
|
| 155 |
+
{"type": "text", "text": "placeholder"},
|
| 156 |
+
],
|
| 157 |
+
}]
|
| 158 |
+
audios, images, videos = process_mm_info(messages, use_audio_in_video=True)
|
| 159 |
+
video_tensor = videos[0]
|
| 160 |
+
video_np = (video_tensor * 255).byte().numpy()
|
| 161 |
+
audio_tuple = None
|
| 162 |
+
if audios:
|
| 163 |
+
aud = audios[0]
|
| 164 |
+
if isinstance(aud, tuple):
|
| 165 |
+
audio_tuple = (aud[0].numpy() if hasattr(aud[0], "numpy") else np.asarray(aud[0]),
|
| 166 |
+
aud[1])
|
| 167 |
+
elif hasattr(aud, "numpy"):
|
| 168 |
+
audio_tuple = (aud.numpy(), 16000)
|
| 169 |
+
else:
|
| 170 |
+
audio_tuple = (np.asarray(aud), 16000)
|
| 171 |
+
return video_np, audio_tuple
|
| 172 |
+
|
| 173 |
+
|
| 174 |
+
SYSTEM_PROMPT = (
|
| 175 |
+
"You are Qwen, a virtual human developed by the Qwen Team, Alibaba "
|
| 176 |
+
"Group, capable of perceiving auditory and visual inputs, as well as "
|
| 177 |
+
"generating text and speech."
|
| 178 |
+
)
|
| 179 |
+
|
| 180 |
+
|
| 181 |
+
def build_vllm_prompt(question: str, base_model: str) -> str:
|
| 182 |
+
from omni_model_loading import vllm_user_mm_prefix
|
| 183 |
+
|
| 184 |
+
mm = vllm_user_mm_prefix(base_model, include_audio=True)
|
| 185 |
+
return (
|
| 186 |
+
f"<|im_start|>system\n{SYSTEM_PROMPT}<|im_end|>\n"
|
| 187 |
+
f"<|im_start|>user\n"
|
| 188 |
+
f"{mm}"
|
| 189 |
+
f"{question}<|im_end|>\n"
|
| 190 |
+
f"<|im_start|>assistant\n"
|
| 191 |
+
)
|
| 192 |
+
|
| 193 |
+
|
| 194 |
+
def extract_answer(text: str) -> str:
|
| 195 |
+
text = text.strip()
|
| 196 |
+
prefixes = [
|
| 197 |
+
"The best answer is", "The correct answer is",
|
| 198 |
+
"The answer is", "The answer", "Best answer:", "Best option:",
|
| 199 |
+
]
|
| 200 |
+
for prefix in prefixes:
|
| 201 |
+
text = text.replace(prefix, "")
|
| 202 |
+
|
| 203 |
+
if len(text.split()) > 10 and not re.search(r"[ABCD]", text):
|
| 204 |
+
return ""
|
| 205 |
+
m = re.search(r"[ABCD]", text)
|
| 206 |
+
return m[0] if m else ""
|
| 207 |
+
|
| 208 |
+
|
| 209 |
+
def load_worldsense(data_dir: Path, max_samples: int) -> List[Dict[str, Any]]:
|
| 210 |
+
import pandas as pd
|
| 211 |
+
|
| 212 |
+
parquet_path = data_dir / "data" / "test-00000-of-00001.parquet"
|
| 213 |
+
if not parquet_path.exists():
|
| 214 |
+
from datasets import load_dataset
|
| 215 |
+
ds = load_dataset("lmms-lab/WorldSense", split="test")
|
| 216 |
+
df = ds.to_pandas()
|
| 217 |
+
else:
|
| 218 |
+
df = pd.read_parquet(parquet_path)
|
| 219 |
+
|
| 220 |
+
data = []
|
| 221 |
+
skipped = 0
|
| 222 |
+
for idx, row in df.iterrows():
|
| 223 |
+
video_rel = row["video_path"]
|
| 224 |
+
video_path = data_dir / video_rel.lstrip("./")
|
| 225 |
+
if not video_path.exists():
|
| 226 |
+
skipped += 1
|
| 227 |
+
continue
|
| 228 |
+
|
| 229 |
+
candidates = list(row["candidates"])
|
| 230 |
+
options_text = "\n".join(candidates)
|
| 231 |
+
prompt = MCQ_PROMPT + row["question"] + "\n" + options_text + "\nThe best answer is:"
|
| 232 |
+
|
| 233 |
+
data.append({
|
| 234 |
+
"question_id": f"ws_{row['index']}",
|
| 235 |
+
"video_id": row["video"],
|
| 236 |
+
"video_path": str(video_path),
|
| 237 |
+
"duration": row["duration"],
|
| 238 |
+
"domain": row["domain"],
|
| 239 |
+
"sub_category": row["sub_category"],
|
| 240 |
+
"task_domain": row["task_domain"],
|
| 241 |
+
"task_type": row["task_type"],
|
| 242 |
+
"question": row["question"],
|
| 243 |
+
"candidates": candidates,
|
| 244 |
+
"gt_answer": row["answer"],
|
| 245 |
+
"prompt": prompt,
|
| 246 |
+
})
|
| 247 |
+
if skipped:
|
| 248 |
+
print(f"[data] Skipped {skipped} questions (video not found)")
|
| 249 |
+
if max_samples > 0:
|
| 250 |
+
data = data[:max_samples]
|
| 251 |
+
return data
|
| 252 |
+
|
| 253 |
+
|
| 254 |
+
def compute_metrics(results: List[Dict[str, Any]]) -> Dict[str, Any]:
|
| 255 |
+
total = len(results)
|
| 256 |
+
if total == 0:
|
| 257 |
+
return {}
|
| 258 |
+
|
| 259 |
+
correct = sum(1 for r in results if r["pred_answer"].upper() == r["gt_answer"].upper())
|
| 260 |
+
overall_acc = correct / total
|
| 261 |
+
|
| 262 |
+
def acc_for(items):
|
| 263 |
+
if not items:
|
| 264 |
+
return None
|
| 265 |
+
c = sum(1 for r in items if r["pred_answer"].upper() == r["gt_answer"].upper())
|
| 266 |
+
return round(c / len(items), 4)
|
| 267 |
+
|
| 268 |
+
per_duration = {}
|
| 269 |
+
for dur in DURATIONS:
|
| 270 |
+
subset = [r for r in results if r["duration"] == dur]
|
| 271 |
+
if subset:
|
| 272 |
+
per_duration[dur] = {"accuracy": acc_for(subset), "count": len(subset)}
|
| 273 |
+
|
| 274 |
+
per_domain = {}
|
| 275 |
+
for dom in DOMAINS:
|
| 276 |
+
subset = [r for r in results if r["domain"] == dom]
|
| 277 |
+
if subset:
|
| 278 |
+
per_domain[dom] = {"accuracy": acc_for(subset), "count": len(subset)}
|
| 279 |
+
|
| 280 |
+
per_task_domain = {}
|
| 281 |
+
for td in TASK_DOMAINS:
|
| 282 |
+
subset = [r for r in results if r.get("task_domain") == td]
|
| 283 |
+
if subset:
|
| 284 |
+
per_task_domain[td] = {"accuracy": acc_for(subset), "count": len(subset)}
|
| 285 |
+
|
| 286 |
+
per_task_type = {}
|
| 287 |
+
for tt in TASK_TYPES:
|
| 288 |
+
subset = [r for r in results if r["task_type"] == tt]
|
| 289 |
+
if subset:
|
| 290 |
+
per_task_type[tt] = {"accuracy": acc_for(subset), "count": len(subset)}
|
| 291 |
+
|
| 292 |
+
return {
|
| 293 |
+
"total_samples": total,
|
| 294 |
+
"overall_accuracy": round(overall_acc, 4),
|
| 295 |
+
"per_duration": per_duration,
|
| 296 |
+
"per_domain": per_domain,
|
| 297 |
+
"per_task_domain": per_task_domain,
|
| 298 |
+
"per_task_type": per_task_type,
|
| 299 |
+
}
|
| 300 |
+
|
| 301 |
+
|
| 302 |
+
def print_summary(metrics: Dict[str, Any], label: str) -> None:
|
| 303 |
+
print()
|
| 304 |
+
print(f"{'=' * 65}")
|
| 305 |
+
print(f" WorldSense Summary: {label}")
|
| 306 |
+
print(f"{'=' * 65}")
|
| 307 |
+
print(f" Total samples: {metrics['total_samples']}")
|
| 308 |
+
print(f" Overall Accuracy: {metrics['overall_accuracy']:.1%}")
|
| 309 |
+
|
| 310 |
+
print(f" ─── Per Duration ───")
|
| 311 |
+
for dur in DURATIONS:
|
| 312 |
+
if dur in metrics["per_duration"]:
|
| 313 |
+
d = metrics["per_duration"][dur]
|
| 314 |
+
print(f" {dur:8s}: {d['accuracy']:.1%} ({d['count']} questions)")
|
| 315 |
+
|
| 316 |
+
print(f" ─── Per Domain ───")
|
| 317 |
+
for dom in DOMAINS:
|
| 318 |
+
if dom in metrics["per_domain"]:
|
| 319 |
+
d = metrics["per_domain"][dom]
|
| 320 |
+
print(f" {dom:25s}: {d['accuracy']:.1%} ({d['count']})")
|
| 321 |
+
|
| 322 |
+
print(f" ─── Per Task Domain ───")
|
| 323 |
+
for td in TASK_DOMAINS:
|
| 324 |
+
if td in metrics["per_task_domain"]:
|
| 325 |
+
d = metrics["per_task_domain"][td]
|
| 326 |
+
print(f" {td:25s}: {d['accuracy']:.1%} ({d['count']})")
|
| 327 |
+
|
| 328 |
+
print(f" ─── Per Task Type ───")
|
| 329 |
+
for tt in TASK_TYPES:
|
| 330 |
+
if tt in metrics["per_task_type"]:
|
| 331 |
+
d = metrics["per_task_type"][tt]
|
| 332 |
+
print(f" {tt:30s}: {d['accuracy']:.1%} ({d['count']})")
|
| 333 |
+
|
| 334 |
+
print(f"{'=' * 65}")
|
| 335 |
+
|
| 336 |
+
|
| 337 |
+
def main() -> None:
|
| 338 |
+
args = parse_args()
|
| 339 |
+
label = args.label or (
|
| 340 |
+
Path(args.adapter).name if args.adapter
|
| 341 |
+
else Path(args.base_model).name
|
| 342 |
+
)
|
| 343 |
+
|
| 344 |
+
out_dir = args.output_dir / label
|
| 345 |
+
out_dir.mkdir(parents=True, exist_ok=True)
|
| 346 |
+
results_jsonl = out_dir / "eval_results.jsonl"
|
| 347 |
+
metrics_json = out_dir / "metrics.json"
|
| 348 |
+
summary_txt = out_dir / "summary.txt"
|
| 349 |
+
|
| 350 |
+
print("[data] Loading WorldSense dataset...")
|
| 351 |
+
test_data = load_worldsense(args.data_dir, args.max_samples)
|
| 352 |
+
print(f"[data] {len(test_data)} questions ready for evaluation")
|
| 353 |
+
|
| 354 |
+
processed = set()
|
| 355 |
+
if results_jsonl.exists():
|
| 356 |
+
with open(results_jsonl) as f:
|
| 357 |
+
for line in f:
|
| 358 |
+
obj = json.loads(line)
|
| 359 |
+
processed.add(obj["question_id"])
|
| 360 |
+
print(f"[resume] {len(processed)} already processed, skipping")
|
| 361 |
+
|
| 362 |
+
use_vllm = args.vllm
|
| 363 |
+
model = processor = llm = None
|
| 364 |
+
vllm_preprocess_stats: Dict[str, int] | None = None
|
| 365 |
+
|
| 366 |
+
if use_vllm:
|
| 367 |
+
from vllm import LLM, SamplingParams
|
| 368 |
+
tp = args.tp or torch.cuda.device_count()
|
| 369 |
+
model_path = args.base_model
|
| 370 |
+
|
| 371 |
+
print("[vllm] Preprocessing videos (before model load) ...")
|
| 372 |
+
todo = [item for item in test_data if item["question_id"] not in processed]
|
| 373 |
+
unique_videos = list(dict.fromkeys(item["video_path"] for item in todo))
|
| 374 |
+
from omni_model_loading import parallel_preprocess_videos
|
| 375 |
+
preprocessed, preprocessed_audio, preprocess_failed_paths = parallel_preprocess_videos(
|
| 376 |
+
unique_videos, preprocess_video_for_vllm,
|
| 377 |
+
)
|
| 378 |
+
|
| 379 |
+
n_pp_skip = sum(1 for item in todo if item["video_path"] in preprocess_failed_paths)
|
| 380 |
+
if preprocess_failed_paths:
|
| 381 |
+
print(
|
| 382 |
+
f"[vllm] Preprocess failed for {len(preprocess_failed_paths)} video(s), "
|
| 383 |
+
f"{n_pp_skip} question(s) will not use vLLM."
|
| 384 |
+
)
|
| 385 |
+
vllm_preprocess_stats = {
|
| 386 |
+
"preprocess_failed_videos": len(preprocess_failed_paths),
|
| 387 |
+
"preprocess_skipped_questions": n_pp_skip,
|
| 388 |
+
}
|
| 389 |
+
|
| 390 |
+
from omni_model_loading import cap_vllm_max_model_len
|
| 391 |
+
|
| 392 |
+
vllm_max_len = cap_vllm_max_model_len(model_path, args.max_model_len)
|
| 393 |
+
print(f"[vllm] Loading {model_path} with tp={tp} (max_model_len={vllm_max_len}) ...")
|
| 394 |
+
llm = LLM(
|
| 395 |
+
model=model_path,
|
| 396 |
+
tensor_parallel_size=tp,
|
| 397 |
+
max_model_len=vllm_max_len,
|
| 398 |
+
max_num_seqs=4,
|
| 399 |
+
limit_mm_per_prompt={"video": 1, "audio": 1},
|
| 400 |
+
gpu_memory_utilization=args.gpu_memory_utilization,
|
| 401 |
+
dtype="bfloat16",
|
| 402 |
+
trust_remote_code=True,
|
| 403 |
+
)
|
| 404 |
+
sampling_params = SamplingParams(
|
| 405 |
+
temperature=args.temperature if args.temperature > 0 else 0.0,
|
| 406 |
+
top_p=0.9 if args.temperature > 0 else 1.0,
|
| 407 |
+
max_tokens=args.max_new_tokens,
|
| 408 |
+
)
|
| 409 |
+
|
| 410 |
+
vllm_todo = [item for item in todo if item["video_path"] in preprocessed]
|
| 411 |
+
fallback_items = []
|
| 412 |
+
print(f"[vllm] {len(vllm_todo)} questions ready, running inference ...")
|
| 413 |
+
|
| 414 |
+
for i, item in enumerate(vllm_todo):
|
| 415 |
+
if item["question_id"] in processed:
|
| 416 |
+
continue
|
| 417 |
+
inp = {
|
| 418 |
+
"prompt": build_vllm_prompt(item["prompt"], args.base_model),
|
| 419 |
+
"multi_modal_data": {"video": preprocessed[item["video_path"]]},
|
| 420 |
+
}
|
| 421 |
+
if item["video_path"] in preprocessed_audio:
|
| 422 |
+
inp["multi_modal_data"]["audio"] = preprocessed_audio[item["video_path"]]
|
| 423 |
+
try:
|
| 424 |
+
outputs = llm.generate([inp], sampling_params=sampling_params)
|
| 425 |
+
raw_output = outputs[0].outputs[0].text.strip()
|
| 426 |
+
pred = extract_answer(raw_output)
|
| 427 |
+
result = {
|
| 428 |
+
"question_id": item["question_id"],
|
| 429 |
+
"video_id": item["video_id"],
|
| 430 |
+
"duration": item["duration"],
|
| 431 |
+
"domain": item["domain"],
|
| 432 |
+
"sub_category": item["sub_category"],
|
| 433 |
+
"task_domain": item["task_domain"],
|
| 434 |
+
"task_type": item["task_type"],
|
| 435 |
+
"gt_answer": item["gt_answer"],
|
| 436 |
+
"pred_answer": pred,
|
| 437 |
+
"correct": pred.upper() == item["gt_answer"].upper(),
|
| 438 |
+
"raw_output": raw_output,
|
| 439 |
+
}
|
| 440 |
+
with open(results_jsonl, "a", encoding="utf-8") as f:
|
| 441 |
+
f.write(json.dumps(result, ensure_ascii=False) + "\n")
|
| 442 |
+
processed.add(item["question_id"])
|
| 443 |
+
except (ValueError, RuntimeError) as exc:
|
| 444 |
+
if "longer than the maximum model length" in str(exc):
|
| 445 |
+
print(f" [too long] {item['question_id']} -> fallback")
|
| 446 |
+
fallback_items.append(item)
|
| 447 |
+
else:
|
| 448 |
+
raise
|
| 449 |
+
|
| 450 |
+
if (i + 1) % 100 == 0:
|
| 451 |
+
print(f" [vllm] [{i+1}/{len(vllm_todo)}] done, {len(fallback_items)} deferred")
|
| 452 |
+
|
| 453 |
+
preprocessed.clear()
|
| 454 |
+
preprocessed_audio.clear()
|
| 455 |
+
|
| 456 |
+
vllm_results = []
|
| 457 |
+
if results_jsonl.exists():
|
| 458 |
+
with open(results_jsonl) as f:
|
| 459 |
+
for line in f:
|
| 460 |
+
vllm_results.append(json.loads(line))
|
| 461 |
+
if vllm_results:
|
| 462 |
+
vllm_metrics = compute_metrics(vllm_results)
|
| 463 |
+
vllm_metrics_path = out_dir / "metrics_vllm.json"
|
| 464 |
+
with open(vllm_metrics_path, "w", encoding="utf-8") as f:
|
| 465 |
+
json.dump(vllm_metrics, f, indent=2, ensure_ascii=False)
|
| 466 |
+
print(f"[vllm] Intermediate metrics saved to {vllm_metrics_path}")
|
| 467 |
+
print_summary(vllm_metrics, label + " (vllm only)")
|
| 468 |
+
|
| 469 |
+
if fallback_items:
|
| 470 |
+
print(f"[fallback] Running {len(fallback_items)} long-video questions with transformers ...")
|
| 471 |
+
del llm
|
| 472 |
+
gc.collect()
|
| 473 |
+
torch.cuda.empty_cache()
|
| 474 |
+
|
| 475 |
+
model, processor = load_model(args.base_model, args.adapter)
|
| 476 |
+
for item in tqdm(fallback_items, desc="Fallback", unit="q"):
|
| 477 |
+
if item["question_id"] in processed:
|
| 478 |
+
continue
|
| 479 |
+
try:
|
| 480 |
+
raw_output = run_inference(
|
| 481 |
+
model, processor, item["video_path"], item["prompt"],
|
| 482 |
+
args.max_new_tokens, args.temperature,
|
| 483 |
+
)
|
| 484 |
+
except Exception as exc:
|
| 485 |
+
import traceback
|
| 486 |
+
print(f" [error] {item['question_id']}: {exc}")
|
| 487 |
+
traceback.print_exc()
|
| 488 |
+
raw_output = ""
|
| 489 |
+
|
| 490 |
+
pred = extract_answer(raw_output)
|
| 491 |
+
result = {
|
| 492 |
+
"question_id": item["question_id"],
|
| 493 |
+
"video_id": item["video_id"],
|
| 494 |
+
"duration": item["duration"],
|
| 495 |
+
"domain": item["domain"],
|
| 496 |
+
"sub_category": item["sub_category"],
|
| 497 |
+
"task_domain": item["task_domain"],
|
| 498 |
+
"task_type": item["task_type"],
|
| 499 |
+
"gt_answer": item["gt_answer"],
|
| 500 |
+
"pred_answer": pred,
|
| 501 |
+
"correct": pred.upper() == item["gt_answer"].upper(),
|
| 502 |
+
"raw_output": raw_output,
|
| 503 |
+
}
|
| 504 |
+
with open(results_jsonl, "a", encoding="utf-8") as f:
|
| 505 |
+
f.write(json.dumps(result, ensure_ascii=False) + "\n")
|
| 506 |
+
processed.add(item["question_id"])
|
| 507 |
+
gc.collect()
|
| 508 |
+
torch.cuda.empty_cache()
|
| 509 |
+
|
| 510 |
+
else:
|
| 511 |
+
print("[model] Loading model...")
|
| 512 |
+
model, processor = load_model(args.base_model, args.adapter)
|
| 513 |
+
|
| 514 |
+
for item in tqdm(test_data, desc="WorldSense", unit="q"):
|
| 515 |
+
if item["question_id"] in processed:
|
| 516 |
+
continue
|
| 517 |
+
|
| 518 |
+
try:
|
| 519 |
+
raw_output = run_inference(
|
| 520 |
+
model, processor, item["video_path"], item["prompt"],
|
| 521 |
+
args.max_new_tokens, args.temperature,
|
| 522 |
+
)
|
| 523 |
+
except Exception as exc:
|
| 524 |
+
import traceback
|
| 525 |
+
print(f" [error] {item['question_id']}: {exc}")
|
| 526 |
+
traceback.print_exc()
|
| 527 |
+
raw_output = ""
|
| 528 |
+
|
| 529 |
+
pred = extract_answer(raw_output)
|
| 530 |
+
|
| 531 |
+
result = {
|
| 532 |
+
"question_id": item["question_id"],
|
| 533 |
+
"video_id": item["video_id"],
|
| 534 |
+
"duration": item["duration"],
|
| 535 |
+
"domain": item["domain"],
|
| 536 |
+
"sub_category": item["sub_category"],
|
| 537 |
+
"task_domain": item["task_domain"],
|
| 538 |
+
"task_type": item["task_type"],
|
| 539 |
+
"gt_answer": item["gt_answer"],
|
| 540 |
+
"pred_answer": pred,
|
| 541 |
+
"correct": pred.upper() == item["gt_answer"].upper(),
|
| 542 |
+
"raw_output": raw_output,
|
| 543 |
+
}
|
| 544 |
+
|
| 545 |
+
with open(results_jsonl, "a", encoding="utf-8") as f:
|
| 546 |
+
f.write(json.dumps(result, ensure_ascii=False) + "\n")
|
| 547 |
+
|
| 548 |
+
processed.add(item["question_id"])
|
| 549 |
+
gc.collect()
|
| 550 |
+
torch.cuda.empty_cache()
|
| 551 |
+
|
| 552 |
+
all_results = []
|
| 553 |
+
if results_jsonl.exists():
|
| 554 |
+
with open(results_jsonl) as f:
|
| 555 |
+
for line in f:
|
| 556 |
+
all_results.append(json.loads(line))
|
| 557 |
+
|
| 558 |
+
if not all_results:
|
| 559 |
+
print("[warn] No results to compute metrics from.")
|
| 560 |
+
return
|
| 561 |
+
|
| 562 |
+
metrics = compute_metrics(all_results)
|
| 563 |
+
metrics["eval_config"] = {
|
| 564 |
+
"base_model": args.base_model,
|
| 565 |
+
"adapter": args.adapter,
|
| 566 |
+
"data_dir": str(args.data_dir),
|
| 567 |
+
"max_new_tokens": args.max_new_tokens,
|
| 568 |
+
"temperature": args.temperature,
|
| 569 |
+
}
|
| 570 |
+
if vllm_preprocess_stats is not None:
|
| 571 |
+
metrics["eval_config"]["vllm_preprocess_skips"] = vllm_preprocess_stats
|
| 572 |
+
|
| 573 |
+
with open(metrics_json, "w", encoding="utf-8") as f:
|
| 574 |
+
json.dump(metrics, f, indent=2, ensure_ascii=False)
|
| 575 |
+
|
| 576 |
+
print_summary(metrics, label)
|
| 577 |
+
|
| 578 |
+
with open(summary_txt, "w", encoding="utf-8") as f:
|
| 579 |
+
import io, contextlib
|
| 580 |
+
buf = io.StringIO()
|
| 581 |
+
with contextlib.redirect_stdout(buf):
|
| 582 |
+
print_summary(metrics, label)
|
| 583 |
+
f.write(buf.getvalue())
|
| 584 |
+
|
| 585 |
+
print(f"\n[output] Results: {results_jsonl}")
|
| 586 |
+
print(f"[output] Metrics: {metrics_json}")
|
| 587 |
+
print(f"[output] Summary: {summary_txt}")
|
| 588 |
+
|
| 589 |
+
|
| 590 |
+
if __name__ == "__main__":
|
| 591 |
+
main()
|
sync_split_acc.py
ADDED
|
@@ -0,0 +1,176 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Print sync accuracy split into:
|
| 2 |
+
- original (gt_synced=True) : did the model correctly say 'synced'?
|
| 3 |
+
- shifted (gt_direction=delay/early): did the model correctly call it desync?
|
| 4 |
+
(and, separately, get the direction right?)
|
| 5 |
+
|
| 6 |
+
Usage:
|
| 7 |
+
python3 /home/ubuntu/sync_split_acc.py <eval_results.jsonl OR its parent dir> [more...]
|
| 8 |
+
|
| 9 |
+
Examples:
|
| 10 |
+
python3 /home/ubuntu/sync_split_acc.py ~/eval_results/sync/sync_qwen3omni_vanilla
|
| 11 |
+
python3 /home/ubuntu/sync_split_acc.py ~/eval_results/sync/sync_qwen3omni_vanilla/eval_results.jsonl
|
| 12 |
+
python3 /home/ubuntu/sync_split_acc.py ~/eval_results/sync/sync_* # multiple at once
|
| 13 |
+
"""
|
| 14 |
+
import json
|
| 15 |
+
import sys
|
| 16 |
+
from pathlib import Path
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
def resolve(arg: str) -> Path:
|
| 20 |
+
"""Return either an eval_results.jsonl (preferred) or a metrics.json
|
| 21 |
+
(fallback when per-sample data isn't available)."""
|
| 22 |
+
p = Path(arg).expanduser()
|
| 23 |
+
if p.is_dir():
|
| 24 |
+
jsonl = p / "eval_results.jsonl"
|
| 25 |
+
metrics = p / "metrics.json"
|
| 26 |
+
return jsonl if jsonl.exists() else metrics
|
| 27 |
+
return p
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
def report_from_metrics(metrics_path: Path) -> None:
|
| 31 |
+
"""Fallback: derive original-vs-shifted breakdown from a pre-computed
|
| 32 |
+
metrics.json that has total_samples, sync_desync_accuracy,
|
| 33 |
+
three_class_accuracy, per_category{synced/delay/early _accuracy/_count}."""
|
| 34 |
+
m = json.load(open(metrics_path))
|
| 35 |
+
total = m.get("total_samples")
|
| 36 |
+
pc = m.get("per_category", {})
|
| 37 |
+
if not total or not pc:
|
| 38 |
+
print(f"[skip] {metrics_path} missing fields needed for split")
|
| 39 |
+
return
|
| 40 |
+
|
| 41 |
+
n_orig = pc.get("synced_count", 0)
|
| 42 |
+
n_delay = pc.get("delay_count", 0)
|
| 43 |
+
n_early = pc.get("early_count", 0)
|
| 44 |
+
n_shift = n_delay + n_early
|
| 45 |
+
|
| 46 |
+
syn_acc = pc.get("synced_accuracy") or 0
|
| 47 |
+
delay_acc = pc.get("delay_accuracy") or 0 # strict: detected AND direction right
|
| 48 |
+
early_acc = pc.get("early_accuracy") or 0 # strict: detected AND direction right
|
| 49 |
+
|
| 50 |
+
sync_desync_acc = m.get("sync_desync_accuracy") or 0
|
| 51 |
+
three_class_acc = m.get("three_class_accuracy") or 0
|
| 52 |
+
|
| 53 |
+
orig_correct = n_orig * syn_acc # said 'synced' on originals
|
| 54 |
+
delay_dir_correct = n_delay * delay_acc # detected + dir right
|
| 55 |
+
early_dir_correct = n_early * early_acc
|
| 56 |
+
shifted_dir_correct = delay_dir_correct + early_dir_correct
|
| 57 |
+
|
| 58 |
+
# detected desync (looser, ignores direction): from sync_desync_accuracy.
|
| 59 |
+
# sync_desync_acc = (orig_correct + shifted_detected) / total
|
| 60 |
+
shifted_detected = sync_desync_acc * total - orig_correct
|
| 61 |
+
|
| 62 |
+
print("=" * 64)
|
| 63 |
+
print(f" {metrics_path.parent.name} [from metrics.json — no per-sample jsonl]")
|
| 64 |
+
print("=" * 64)
|
| 65 |
+
print(f" total samples : {int(total)}")
|
| 66 |
+
print(f" --- original (gt = synced) ---")
|
| 67 |
+
if n_orig:
|
| 68 |
+
print(f" n : {n_orig}")
|
| 69 |
+
print(f" correctly said 'synced' : {int(round(orig_correct))} / {n_orig} = {syn_acc:.4%}")
|
| 70 |
+
print(f" --- shifted (gt = delay/early) ---")
|
| 71 |
+
print(f" n : {n_shift} (delay={n_delay}, early={n_early})")
|
| 72 |
+
if n_shift:
|
| 73 |
+
print(f" detected desync : {int(round(shifted_detected))} / {n_shift} = "
|
| 74 |
+
f"{shifted_detected / n_shift:.4%}")
|
| 75 |
+
print(f" + got direction right : {int(round(shifted_dir_correct))} / {n_shift} = "
|
| 76 |
+
f"{shifted_dir_correct / n_shift:.4%}")
|
| 77 |
+
if n_delay:
|
| 78 |
+
print(f" delay direction right : {int(round(delay_dir_correct))} / {n_delay} = "
|
| 79 |
+
f"{delay_acc:.4%}")
|
| 80 |
+
if n_early:
|
| 81 |
+
print(f" early direction right : {int(round(early_dir_correct))} / {n_early} = "
|
| 82 |
+
f"{early_acc:.4%}")
|
| 83 |
+
if m.get("offset_mae_sec") is not None:
|
| 84 |
+
print(f" --- offset estimate ---")
|
| 85 |
+
print(f" MAE : {m['offset_mae_sec']:.4f}s "
|
| 86 |
+
f"(n={m.get('offset_evaluated_count', '?')})")
|
| 87 |
+
if m.get("offset_median_sec") is not None:
|
| 88 |
+
print(f" median : {m['offset_median_sec']:.4f}s")
|
| 89 |
+
print("=" * 64)
|
| 90 |
+
print()
|
| 91 |
+
|
| 92 |
+
|
| 93 |
+
def report(jsonl: Path) -> None:
|
| 94 |
+
if not jsonl.exists():
|
| 95 |
+
print(f"[skip] {jsonl} does not exist")
|
| 96 |
+
return
|
| 97 |
+
if jsonl.name == "metrics.json":
|
| 98 |
+
report_from_metrics(jsonl)
|
| 99 |
+
return
|
| 100 |
+
rows = [json.loads(l) for l in open(jsonl) if l.strip()]
|
| 101 |
+
n = len(rows)
|
| 102 |
+
if n == 0:
|
| 103 |
+
print(f"[skip] {jsonl} is empty")
|
| 104 |
+
return
|
| 105 |
+
|
| 106 |
+
orig = [r for r in rows if r["gt_synced"]]
|
| 107 |
+
delay = [r for r in rows if r.get("gt_direction") == "delay"]
|
| 108 |
+
early = [r for r in rows if r.get("gt_direction") == "early"]
|
| 109 |
+
shifted = delay + early
|
| 110 |
+
|
| 111 |
+
# On originals: correct iff pred_synced is True.
|
| 112 |
+
orig_correct = sum(1 for r in orig if r["pred_synced"])
|
| 113 |
+
# On shifted: correct iff pred_synced is False (i.e. detected desync).
|
| 114 |
+
shifted_detected = sum(1 for r in shifted if not r["pred_synced"])
|
| 115 |
+
# Stricter: also got the direction right.
|
| 116 |
+
shifted_dir_correct = sum(
|
| 117 |
+
1 for r in shifted
|
| 118 |
+
if not r["pred_synced"] and r.get("pred_direction") == r["gt_direction"]
|
| 119 |
+
)
|
| 120 |
+
|
| 121 |
+
print("=" * 64)
|
| 122 |
+
print(f" {jsonl.parent.name}")
|
| 123 |
+
print("=" * 64)
|
| 124 |
+
print(f" total samples : {n}")
|
| 125 |
+
print(f" --- original (gt = synced) ---")
|
| 126 |
+
print(f" n : {len(orig)}")
|
| 127 |
+
if orig:
|
| 128 |
+
print(f" correctly said 'synced' : {orig_correct} / {len(orig)} = "
|
| 129 |
+
f"{(orig_correct / len(orig)):.4%}")
|
| 130 |
+
print(f" --- shifted (gt = delay/early) ---")
|
| 131 |
+
print(f" n : {len(shifted)} (delay={len(delay)}, early={len(early)})")
|
| 132 |
+
if shifted:
|
| 133 |
+
print(f" detected desync : {shifted_detected} / {len(shifted)} = "
|
| 134 |
+
f"{(shifted_detected / len(shifted)):.4%}")
|
| 135 |
+
print(f" + got direction right : {shifted_dir_correct} / {len(shifted)} = "
|
| 136 |
+
f"{(shifted_dir_correct / len(shifted)):.4%}")
|
| 137 |
+
if delay:
|
| 138 |
+
d_det = sum(1 for r in delay if not r["pred_synced"])
|
| 139 |
+
d_dir = sum(1 for r in delay if not r["pred_synced"] and r["pred_direction"] == "delay")
|
| 140 |
+
print(f" delay only detected : {d_det} / {len(delay)} = {d_det / len(delay):.4%}"
|
| 141 |
+
f" (direction right: {d_dir} = {d_dir / len(delay):.4%})")
|
| 142 |
+
if early:
|
| 143 |
+
e_det = sum(1 for r in early if not r["pred_synced"])
|
| 144 |
+
e_dir = sum(1 for r in early if not r["pred_synced"] and r["pred_direction"] == "early")
|
| 145 |
+
print(f" early only detected : {e_det} / {len(early)} = {e_det / len(early):.4%}"
|
| 146 |
+
f" (direction right: {e_dir} = {e_dir / len(early):.4%})")
|
| 147 |
+
|
| 148 |
+
# offset MAE on shifted videos that were predicted as desync with a non-zero offset
|
| 149 |
+
errs = [abs(r["pred_offset_sec"] - r["gt_offset_sec"])
|
| 150 |
+
for r in shifted
|
| 151 |
+
if not r["pred_synced"] and r.get("pred_offset_sec", 0) > 0]
|
| 152 |
+
if errs:
|
| 153 |
+
errs.sort()
|
| 154 |
+
med = errs[len(errs) // 2]
|
| 155 |
+
print(f" --- offset estimate on detected shifted ---")
|
| 156 |
+
print(f" MAE : {sum(errs) / len(errs):.4f}s (n={len(errs)})")
|
| 157 |
+
print(f" median : {med:.4f}s")
|
| 158 |
+
print(f" within 0.5s : {sum(1 for e in errs if e <= 0.5)} / {len(errs)} = "
|
| 159 |
+
f"{sum(1 for e in errs if e <= 0.5) / len(errs):.4%}")
|
| 160 |
+
print(f" within 1.0s : {sum(1 for e in errs if e <= 1.0)} / {len(errs)} = "
|
| 161 |
+
f"{sum(1 for e in errs if e <= 1.0) / len(errs):.4%}")
|
| 162 |
+
print("=" * 64)
|
| 163 |
+
print()
|
| 164 |
+
|
| 165 |
+
|
| 166 |
+
def main():
|
| 167 |
+
args = sys.argv[1:]
|
| 168 |
+
if not args:
|
| 169 |
+
print(__doc__)
|
| 170 |
+
sys.exit(1)
|
| 171 |
+
for a in args:
|
| 172 |
+
report(resolve(a))
|
| 173 |
+
|
| 174 |
+
|
| 175 |
+
if __name__ == "__main__":
|
| 176 |
+
main()
|