File size: 4,347 Bytes
203a7fb
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
"""Re-run Gemini mute on the 6 case-study candidates with NEUTRAL prompt.

Direct prompt failures for Gemini all turned out to be empty responses (API
artifacts), not real audio hallucinations. Neutral prompt ("Describe the audio
you hear") forces an open-ended answer; OpenAI judge classifies whether the
response hallucinates audio content vs correctly reports silence.

Pipeline:
  1. Read /home/ubuntu/case_study_candidates.jsonl (6 base videos).
  2. Build a 6-row test jsonl (mute loader dedupes to base originals only).
  3. Run scripts/eval_gemini_mute_sync_swap.py --tasks mute
       --mute-prompt-mode neutral --openai-judge.
  4. Print per-video raw_output + judge classification.

Usage:
  GEMINI_API_KEY=... OPENAI_API_KEY=... python3 /home/ubuntu/case_study_gemini_mute_neutral.py
"""

import argparse
import json
import os
import subprocess
import sys
from pathlib import Path

REPO        = Path("/home/ubuntu/CleverHans-Evaluation")
TEST_JSONL  = REPO / "data" / "kto_training_data_v2_test.jsonl"
EVAL_SCRIPT = REPO / "scripts" / "eval_gemini_mute_sync_swap.py"
DATA_ROOT   = Path("/opt/dlami/nvme/video_source")
CANDIDATES  = Path("/home/ubuntu/case_study_pool.jsonl")  # 45-video pool

OUT_ROOT      = Path("/home/ubuntu/eval_results/case_study_pool")
FILTERED_JSON = OUT_ROOT / "mute_test.jsonl"
LABEL         = "case_study_pool_gemini_mute_neutral"
GEMINI_RESULT = OUT_ROOT / "mute" / LABEL / "eval_results.jsonl"


def load_jsonl(path):
    with open(path) as f:
        for line in f:
            line = line.strip()
            if line:
                yield json.loads(line)


def step_filter():
    bases = {row["video"] for row in load_jsonl(CANDIDATES)}
    print(f"[filter] {len(bases)} base videos from {CANDIDATES}")

    OUT_ROOT.mkdir(parents=True, exist_ok=True)
    kept = []
    for row in load_jsonl(TEST_JSONL):
        v = row["video"]
        if "_delay_" in v or "_early_" in v:
            continue
        if v in bases:
            kept.append(row)

    with open(FILTERED_JSON, "w") as f:
        for row in kept:
            f.write(json.dumps(row, ensure_ascii=False) + "\n")
    print(f"[filter] kept {len(kept)} rows -> {FILTERED_JSON}")
    if len(kept) != len(bases):
        print(f"[warn] expected {len(bases)} rows, got {len(kept)}")


def step_run(gemini_key, openai_key, model, workers):
    env = os.environ.copy()
    env["GEMINI_API_KEY"] = gemini_key
    env["OPENAI_API_KEY"] = openai_key

    cmd = [
        sys.executable, str(EVAL_SCRIPT),
        "--tasks", "mute",
        "--model", model,
        "--data-root", str(DATA_ROOT),
        "--test-jsonl", str(FILTERED_JSON),
        "--output-dir", str(OUT_ROOT),
        "--label", LABEL,
        "--mute-prompt-mode", "neutral",
        "--openai-judge",
        "--workers", str(workers),
    ]
    print(f"[run] {' '.join(cmd)}")
    subprocess.run(cmd, env=env, check=True)


def step_report():
    if not GEMINI_RESULT.exists():
        sys.exit(f"[error] expected results at {GEMINI_RESULT}")
    rows = list(load_jsonl(GEMINI_RESULT))
    print(f"\n[report] {len(rows)} mute samples (neutral, OpenAI judged):")
    for r in rows:
        v = r["video"]
        pred = r.get("pred_label")
        correct = r.get("correct")
        raw = (r.get("raw_output") or "").strip().replace("\n", " ")[:240]
        je = (r.get("judge_explanation") or "").strip()[:120]
        print(f"\n  VIDEO: {v}")
        print(f"    pred={pred}  correct={correct}")
        print(f"    raw : {raw!r}")
        if je:
            print(f"    judge: {je}")


def main():
    p = argparse.ArgumentParser()
    p.add_argument("--gemini-key", default=os.environ.get("GEMINI_API_KEY"))
    p.add_argument("--openai-key", default=os.environ.get("OPENAI_API_KEY"))
    p.add_argument("--model", default="gemini-3.1-pro-preview")
    p.add_argument("--workers", type=int, default=4)
    p.add_argument("--skip-run", action="store_true")
    args = p.parse_args()

    step_filter()
    if not args.skip_run:
        if not args.gemini_key:
            sys.exit("[error] need --gemini-key or GEMINI_API_KEY env")
        if not args.openai_key:
            sys.exit("[error] need --openai-key or OPENAI_API_KEY env")
        step_run(args.gemini_key, args.openai_key, args.model, args.workers)
    step_report()


if __name__ == "__main__":
    main()