miscellaneous / case_study_gemini_mute_neutral.py
Rakancorle11's picture
Upload supp/ and root scripts
203a7fb verified
"""Re-run Gemini mute on the 6 case-study candidates with NEUTRAL prompt.
Direct prompt failures for Gemini all turned out to be empty responses (API
artifacts), not real audio hallucinations. Neutral prompt ("Describe the audio
you hear") forces an open-ended answer; OpenAI judge classifies whether the
response hallucinates audio content vs correctly reports silence.
Pipeline:
1. Read /home/ubuntu/case_study_candidates.jsonl (6 base videos).
2. Build a 6-row test jsonl (mute loader dedupes to base originals only).
3. Run scripts/eval_gemini_mute_sync_swap.py --tasks mute
--mute-prompt-mode neutral --openai-judge.
4. Print per-video raw_output + judge classification.
Usage:
GEMINI_API_KEY=... OPENAI_API_KEY=... python3 /home/ubuntu/case_study_gemini_mute_neutral.py
"""
import argparse
import json
import os
import subprocess
import sys
from pathlib import Path
REPO = Path("/home/ubuntu/CleverHans-Evaluation")
TEST_JSONL = REPO / "data" / "kto_training_data_v2_test.jsonl"
EVAL_SCRIPT = REPO / "scripts" / "eval_gemini_mute_sync_swap.py"
DATA_ROOT = Path("/opt/dlami/nvme/video_source")
CANDIDATES = Path("/home/ubuntu/case_study_pool.jsonl") # 45-video pool
OUT_ROOT = Path("/home/ubuntu/eval_results/case_study_pool")
FILTERED_JSON = OUT_ROOT / "mute_test.jsonl"
LABEL = "case_study_pool_gemini_mute_neutral"
GEMINI_RESULT = OUT_ROOT / "mute" / LABEL / "eval_results.jsonl"
def load_jsonl(path):
with open(path) as f:
for line in f:
line = line.strip()
if line:
yield json.loads(line)
def step_filter():
bases = {row["video"] for row in load_jsonl(CANDIDATES)}
print(f"[filter] {len(bases)} base videos from {CANDIDATES}")
OUT_ROOT.mkdir(parents=True, exist_ok=True)
kept = []
for row in load_jsonl(TEST_JSONL):
v = row["video"]
if "_delay_" in v or "_early_" in v:
continue
if v in bases:
kept.append(row)
with open(FILTERED_JSON, "w") as f:
for row in kept:
f.write(json.dumps(row, ensure_ascii=False) + "\n")
print(f"[filter] kept {len(kept)} rows -> {FILTERED_JSON}")
if len(kept) != len(bases):
print(f"[warn] expected {len(bases)} rows, got {len(kept)}")
def step_run(gemini_key, openai_key, model, workers):
env = os.environ.copy()
env["GEMINI_API_KEY"] = gemini_key
env["OPENAI_API_KEY"] = openai_key
cmd = [
sys.executable, str(EVAL_SCRIPT),
"--tasks", "mute",
"--model", model,
"--data-root", str(DATA_ROOT),
"--test-jsonl", str(FILTERED_JSON),
"--output-dir", str(OUT_ROOT),
"--label", LABEL,
"--mute-prompt-mode", "neutral",
"--openai-judge",
"--workers", str(workers),
]
print(f"[run] {' '.join(cmd)}")
subprocess.run(cmd, env=env, check=True)
def step_report():
if not GEMINI_RESULT.exists():
sys.exit(f"[error] expected results at {GEMINI_RESULT}")
rows = list(load_jsonl(GEMINI_RESULT))
print(f"\n[report] {len(rows)} mute samples (neutral, OpenAI judged):")
for r in rows:
v = r["video"]
pred = r.get("pred_label")
correct = r.get("correct")
raw = (r.get("raw_output") or "").strip().replace("\n", " ")[:240]
je = (r.get("judge_explanation") or "").strip()[:120]
print(f"\n VIDEO: {v}")
print(f" pred={pred} correct={correct}")
print(f" raw : {raw!r}")
if je:
print(f" judge: {je}")
def main():
p = argparse.ArgumentParser()
p.add_argument("--gemini-key", default=os.environ.get("GEMINI_API_KEY"))
p.add_argument("--openai-key", default=os.environ.get("OPENAI_API_KEY"))
p.add_argument("--model", default="gemini-3.1-pro-preview")
p.add_argument("--workers", type=int, default=4)
p.add_argument("--skip-run", action="store_true")
args = p.parse_args()
step_filter()
if not args.skip_run:
if not args.gemini_key:
sys.exit("[error] need --gemini-key or GEMINI_API_KEY env")
if not args.openai_key:
sys.exit("[error] need --openai-key or OPENAI_API_KEY env")
step_run(args.gemini_key, args.openai_key, args.model, args.workers)
step_report()
if __name__ == "__main__":
main()