| """Re-test the 12 case-study candidates' sync variants on Gemini. |
| |
| Pipeline: |
| 1. Read /home/ubuntu/case_study_candidates.jsonl (12 base videos). |
| 2. Filter the master test jsonl to the 36 rows that belong to those 12 bases |
| (each base + its delay + early variant). |
| 3. Invoke scripts/eval_gemini_mute_sync_swap.py --task sync --openai-parse-sync. |
| 4. Read the Gemini sync results and report which of the 36 are wrong, using |
| the same correctness rule as find_case_study.py: |
| wrong = (gt_synced != pred_synced) OR |
| (not gt_synced AND pred_direction != gt_direction) |
| |
| Usage: |
| GEMINI_API_KEY=... OPENAI_API_KEY=... python3 /home/ubuntu/case_study_gemini_sync.py |
| or pass --gemini-key / --openai-key. |
| """ |
|
|
| import argparse |
| import json |
| import os |
| import re |
| import subprocess |
| import sys |
| from pathlib import Path |
|
|
| REPO = Path("/home/ubuntu/CleverHans-Evaluation") |
| TEST_JSONL = REPO / "data" / "kto_training_data_v2_test.jsonl" |
| EVAL_SCRIPT = REPO / "scripts" / "eval_gemini_mute_sync_swap.py" |
| DATA_ROOT = Path("/opt/dlami/nvme/video_source") |
| CANDIDATES = Path("/home/ubuntu/case_study_pool.jsonl") |
|
|
| OUT_ROOT = Path("/home/ubuntu/eval_results/case_study_pool") |
| FILTERED_JSON = OUT_ROOT / "sync_test.jsonl" |
| LABEL = "case_study_pool_gemini_sync" |
| GEMINI_RESULT = OUT_ROOT / "sync" / LABEL / "eval_results.jsonl" |
| WRONG_OUT = OUT_ROOT / "gemini_wrong.jsonl" |
|
|
| SYNC_SUFFIX_RE = re.compile(r"_(delay|early)_\d+(?:\.\d+)?s(?=\.mp4$)") |
|
|
|
|
| def base_of(name: str) -> str: |
| return SYNC_SUFFIX_RE.sub("", name) |
|
|
|
|
| def load_jsonl(path): |
| with open(path) as f: |
| for line in f: |
| line = line.strip() |
| if line: |
| yield json.loads(line) |
|
|
|
|
| def step_filter(): |
| bases = {row["video"] for row in load_jsonl(CANDIDATES)} |
| print(f"[filter] {len(bases)} base videos from {CANDIDATES}") |
|
|
| OUT_ROOT.mkdir(parents=True, exist_ok=True) |
| kept = [] |
| for row in load_jsonl(TEST_JSONL): |
| if base_of(row["video"]) in bases: |
| kept.append(row) |
|
|
| with open(FILTERED_JSON, "w") as f: |
| for row in kept: |
| f.write(json.dumps(row, ensure_ascii=False) + "\n") |
| print(f"[filter] kept {len(kept)} rows -> {FILTERED_JSON}") |
| if len(kept) != 3 * len(bases): |
| print(f"[warn] expected {3*len(bases)} rows (3 per base), got {len(kept)}") |
|
|
|
|
| def step_run_gemini(gemini_key: str, openai_key: str, model: str, workers: int): |
| env = os.environ.copy() |
| env["GEMINI_API_KEY"] = gemini_key |
| env["OPENAI_API_KEY"] = openai_key |
|
|
| cmd = [ |
| sys.executable, str(EVAL_SCRIPT), |
| "--task", "sync", |
| "--model", model, |
| "--data-root", str(DATA_ROOT), |
| "--test-jsonl", str(FILTERED_JSON), |
| "--output-dir", str(OUT_ROOT), |
| "--label", LABEL, |
| "--openai-parse-sync", |
| "--workers", str(workers), |
| ] |
| print(f"[run] {' '.join(cmd)}") |
| subprocess.run(cmd, env=env, check=True) |
|
|
|
|
| def step_report(): |
| if not GEMINI_RESULT.exists(): |
| sys.exit(f"[error] expected results at {GEMINI_RESULT}; eval did not produce one") |
|
|
| rows = list(load_jsonl(GEMINI_RESULT)) |
| wrong = [] |
| for r in rows: |
| gt_synced = bool(r["gt_synced"]) |
| pred_synced = bool(r["pred_synced"]) |
| is_wrong = (gt_synced != pred_synced) or ( |
| (not gt_synced) and r.get("pred_direction") != r.get("gt_direction") |
| ) |
| if is_wrong: |
| wrong.append({ |
| "video": r["video"], |
| "base": base_of(r["video"]), |
| "gt": (r["gt_direction"], r["gt_offset_sec"]), |
| "pred_synced": pred_synced, |
| "pred": (r.get("pred_direction"), r.get("pred_offset_sec")), |
| "raw": r.get("raw_output", "")[:200], |
| }) |
|
|
| by_base = {} |
| for w in wrong: |
| by_base.setdefault(w["base"], []).append(w) |
|
|
| print(f"\n[report] {len(rows)} sync samples evaluated, {len(wrong)} wrong") |
| print(f"[report] wrong distribution by base video:") |
| for base in sorted(by_base): |
| hits = by_base[base] |
| print(f"\n {base} ({len(hits)} variant(s) wrong)") |
| for h in hits: |
| print(f" - {h['video']}") |
| print(f" gt={h['gt']} pred_synced={h['pred_synced']} pred={h['pred']}") |
|
|
| with open(WRONG_OUT, "w") as f: |
| for w in wrong: |
| f.write(json.dumps(w, ensure_ascii=False) + "\n") |
| print(f"\n[saved] wrong rows -> {WRONG_OUT}") |
|
|
| bases_total = {base_of(r["video"]) for r in rows} |
| bases_any_wrong = set(by_base) |
| bases_all_three_wrong = {b for b, hs in by_base.items() |
| if len(hs) == 3} |
| print(f"\n[summary]") |
| print(f" candidates with >=1 wrong sync : {len(bases_any_wrong)} / {len(bases_total)}") |
| print(f" candidates with all 3 wrong : {len(bases_all_three_wrong)} / {len(bases_total)}") |
| if bases_all_three_wrong: |
| print(f" perfect-fail candidates:") |
| for b in sorted(bases_all_three_wrong): |
| print(f" - {b}") |
|
|
|
|
| def main(): |
| p = argparse.ArgumentParser() |
| p.add_argument("--gemini-key", default=os.environ.get("GEMINI_API_KEY")) |
| p.add_argument("--openai-key", default=os.environ.get("OPENAI_API_KEY")) |
| p.add_argument("--model", default="gemini-3.1-pro-preview") |
| p.add_argument("--workers", type=int, default=4) |
| p.add_argument("--skip-run", action="store_true", |
| help="Skip the Gemini call (use existing results to re-report).") |
| args = p.parse_args() |
|
|
| step_filter() |
| if not args.skip_run: |
| if not args.gemini_key: |
| sys.exit("[error] need --gemini-key or GEMINI_API_KEY env") |
| if not args.openai_key: |
| sys.exit("[error] need --openai-key or OPENAI_API_KEY env") |
| step_run_gemini(args.gemini_key, args.openai_key, args.model, args.workers) |
| step_report() |
|
|
|
|
| if __name__ == "__main__": |
| main() |
|
|