| """Evaluate GPT-5.5 on the FULL test set: sync (426), mute neutral (142), swap direct (142). |
| |
| GPT-5.5 chat.completions does NOT accept input_audio (audio is gated to |
| gpt-4o-audio-preview / realtime). We therefore send VIDEO FRAMES ONLY (8 frames |
| sampled evenly per clip) + the appropriate text prompt. This is a documented |
| handicap — the resulting numbers reflect what GPT-5.5 can do without hearing |
| the audio. |
| |
| Audio is intentionally NOT extracted; for sync the model has no temporal audio |
| cue, for mute the audio is silent anyway, for swap the model can't hear the |
| donor track. |
| |
| Outputs (label includes _visualOnly to flag the handicap): |
| ~/eval_results/sync/sync_gpt-5.5_visualOnly/ |
| ~/eval_results/mute/mute_gpt-5.5_visualOnly_promptNeutral/ |
| ~/eval_results/swap/swap_gpt-5.5_visualOnly_promptDirect/ |
| |
| Swap donor pairing reuses the assignments from the Gemini swap eval |
| (same seed=42, so identical to Qwen swap pairing too). |
| |
| Usage: |
| OPENAI_API_KEY=sk-... python3 /home/ubuntu/case_study_gpt55.py |
| """ |
|
|
| import argparse |
| import base64 |
| import json |
| import os |
| import re |
| import subprocess |
| import sys |
| import tempfile |
| from concurrent.futures import ThreadPoolExecutor, as_completed |
| from pathlib import Path |
| from typing import Any, Dict, List, Optional |
|
|
| |
| |
| |
| DATA_ROOT = Path("/opt/dlami/nvme/video_source") |
| ORIG_DIR = DATA_ROOT / "original" / "uag_oops" |
| DELAY_DIR = DATA_ROOT / "random_shift_video" / "delay" |
| EARLY_DIR = DATA_ROOT / "random_shift_video" / "early" |
|
|
| TEST_JSONL = Path("/home/ubuntu/CleverHans-Evaluation/data/kto_training_data_v2_test.jsonl") |
| GEMINI_SWAP = Path("/home/ubuntu/eval_results/gemini_mute_sync_swap/swap/" |
| "swap_gemini_gemini-3.1-pro-preview__promptDirect/eval_results.jsonl") |
|
|
| EVAL_ROOT = Path("/home/ubuntu/eval_results") |
| FFMPEG = "/home/ubuntu/anaconda3/envs/video/bin/ffmpeg" |
| FFPROBE = "/home/ubuntu/anaconda3/envs/video/bin/ffprobe" |
|
|
| DIR_OFFSET_RE = re.compile(r"_(delay|early)_(\d+(?:\.\d+)?)s\.mp4$") |
|
|
| |
| |
| |
| SYNC_PROMPT = ( |
| "Watch this video and listen to its audio carefully. " |
| "Determine whether the audio and video tracks are synchronized. " |
| "If they are not synchronized, identify the direction of the offset " |
| "(audio delayed or audio early relative to video) and estimate the offset in seconds. " |
| "Explain your reasoning." |
| ) |
| MUTE_PROMPT_NEUTRAL = "Describe the audio you hear in this video." |
| SWAP_PROMPT_DIRECT = ( |
| "Does the audio in this video match the visual content? " |
| "Answer with only 'yes' or 'no'." |
| ) |
|
|
| SYNC_PARSE_SYSTEM = ( |
| "You parse a free-form judgement about whether the audio in a short video is " |
| "synchronized with the visuals. Return STRICT JSON only with keys: " |
| "synced (bool), direction (one of 'delay'|'early'|'none'), " |
| "offset_sec (number, 0 if synced), t_v (number|null, visual cue time), " |
| "t_a (number|null, audio cue time), explanation (string). " |
| "If the answer says they are aligned, set synced=true, direction='none', offset_sec=0." |
| ) |
| MUTE_JUDGE_SYSTEM = ( |
| "You are evaluating a model's response about a video whose audio has been secretly " |
| "replaced with silence. Classify into ONE of: " |
| '{"prediction": "muted"|"synced"|"delay"|"early", "explanation": "<one sentence>"}. ' |
| "Rules: " |
| "- 'muted' iff the model says no audio / silence / no audible sound. " |
| "- 'synced' iff the model describes audio that matches the visuals (HALLUCINATION). " |
| "- 'delay'/'early' iff the model claims a temporal offset. " |
| "Return STRICT JSON only." |
| ) |
|
|
|
|
| |
| |
| |
| def load_jsonl(path): |
| with open(path) as f: |
| for line in f: |
| line = line.strip() |
| if line: |
| yield json.loads(line) |
|
|
|
|
| def parse_gt_from_name(video_name: str) -> Dict[str, Any]: |
| m = DIR_OFFSET_RE.search(video_name) |
| if m: |
| return {"synced": False, "direction": m.group(1), "offset_sec": float(m.group(2))} |
| return {"synced": True, "direction": "none", "offset_sec": 0.0} |
|
|
|
|
| def video_path_for(video_name: str) -> Optional[Path]: |
| """Resolve a sync test name to its on-disk mp4 (synced original / delay / early).""" |
| m = DIR_OFFSET_RE.search(video_name) |
| if m is None: |
| p = ORIG_DIR / video_name |
| elif m.group(1) == "delay": |
| p = DELAY_DIR / video_name |
| else: |
| p = EARLY_DIR / video_name |
| return p if p.exists() else None |
|
|
|
|
| def video_duration(path: Path) -> float: |
| out = subprocess.run( |
| [FFPROBE, "-v", "error", "-show_entries", "format=duration", |
| "-of", "default=noprint_wrappers=1:nokey=1", str(path)], |
| capture_output=True, text=True, check=True, |
| ) |
| try: |
| return float(out.stdout.strip()) |
| except ValueError: |
| return 5.0 |
|
|
|
|
| def extract_frames_b64(video_path: Path, n_frames: int = 8) -> List[str]: |
| dur = video_duration(video_path) |
| if dur <= 0.05: |
| dur = 0.5 |
| timestamps = [dur * (i + 0.5) / n_frames for i in range(n_frames)] |
| out = [] |
| with tempfile.TemporaryDirectory() as td: |
| for i, t in enumerate(timestamps): |
| png = Path(td) / f"f_{i:02d}.png" |
| subprocess.run( |
| [FFMPEG, "-y", "-ss", f"{t:.3f}", "-i", str(video_path), |
| "-frames:v", "1", "-vf", "scale=512:-2", |
| "-loglevel", "error", str(png)], |
| check=True, |
| ) |
| out.append(base64.b64encode(png.read_bytes()).decode()) |
| return out |
|
|
|
|
| |
| |
| |
| def _client(api_key: str): |
| from openai import OpenAI |
| return OpenAI(api_key=api_key) |
|
|
|
|
| def call_gpt_frames(client, model, prompt, frames_b64, |
| max_tokens=4000, temperature=0.0, |
| reasoning_effort="minimal", _diag=True) -> str: |
| """Send text + frames to a (possibly-reasoning) GPT model. |
| |
| GPT-5 family is a reasoning model: max_completion_tokens covers BOTH the |
| hidden reasoning trace AND the visible content. If the token budget is too |
| low, content can come back empty. We: |
| - bump max_completion_tokens to 4000 |
| - pass reasoning_effort='minimal' to keep most of the budget for content |
| - retry once on temperature-rejection |
| - if content is still empty, fall through with a diagnostic-friendly log |
| """ |
| content: List[Dict[str, Any]] = [{"type": "text", "text": prompt}] |
| for fb in frames_b64: |
| content.append({"type": "image_url", |
| "image_url": {"url": f"data:image/png;base64,{fb}"}}) |
|
|
| base = dict( |
| model=model, |
| messages=[{"role": "user", "content": content}], |
| max_completion_tokens=max_tokens, |
| ) |
|
|
| def _try(extra): |
| return client.chat.completions.create(**base, **extra) |
|
|
| resp = None |
| for kwargs in ( |
| {"temperature": temperature, "reasoning_effort": reasoning_effort}, |
| {"reasoning_effort": reasoning_effort}, |
| {"temperature": temperature}, |
| {}, |
| ): |
| try: |
| resp = _try(kwargs) |
| break |
| except Exception as exc: |
| last_err = exc |
| continue |
| if resp is None: |
| raise last_err |
|
|
| msg = resp.choices[0].message |
| text = (msg.content or "").strip() |
| if not text: |
| |
| fin = resp.choices[0].finish_reason |
| refusal = getattr(msg, "refusal", None) |
| usage = getattr(resp, "usage", None) |
| if _diag: |
| print(f" [gpt empty] finish={fin} refusal={refusal} usage={usage}", |
| flush=True) |
| return text |
|
|
|
|
| def call_judge(client, judge_model, system_prompt, user_text) -> Optional[Dict[str, Any]]: |
| try: |
| resp = client.chat.completions.create( |
| model=judge_model, |
| messages=[ |
| {"role": "system", "content": system_prompt}, |
| {"role": "user", "content": user_text}, |
| ], |
| response_format={"type": "json_object"}, |
| max_completion_tokens=300, |
| ) |
| return json.loads(resp.choices[0].message.content or "") |
| except Exception as exc: |
| print(f" [judge] error: {exc}", flush=True) |
| return None |
|
|
|
|
| |
| |
| |
| def run_one_sync(client, model, judge_model, item, n_frames): |
| raw = call_gpt_frames( |
| client, model, SYNC_PROMPT, |
| extract_frames_b64(item["video_path"], n_frames), |
| ) |
| parsed = call_judge(client, judge_model, SYNC_PARSE_SYSTEM, raw) or {} |
| direction = str(parsed.get("direction", "none")).lower() |
| if direction not in ("delay", "early", "none"): |
| direction = "none" |
| synced = parsed.get("synced", False) |
| if isinstance(synced, str): |
| synced = synced.lower() in ("true", "yes", "1") |
| return { |
| "video": item["video"], |
| "video_path": str(item["video_path"]), |
| "gt_synced": item["gt_synced"], |
| "gt_direction": item["gt_direction"], |
| "gt_offset_sec": item["gt_offset_sec"], |
| "pred_synced": bool(synced), |
| "pred_direction": direction, |
| "pred_offset_sec": float(parsed.get("offset_sec", 0.0) or 0.0), |
| "pred_explanation": str(parsed.get("explanation", "")), |
| "parse_method": "openai_parse" if parsed else "openai_parse_fail", |
| "raw_output": raw, |
| } |
|
|
|
|
| def run_one_mute(client, model, judge_model, item, n_frames): |
| raw = call_gpt_frames( |
| client, model, MUTE_PROMPT_NEUTRAL, |
| extract_frames_b64(item["video_path"], n_frames), |
| ) |
| parsed = call_judge(client, judge_model, MUTE_JUDGE_SYSTEM, raw) or {} |
| pred = str(parsed.get("prediction", "synced")).lower() |
| if pred not in ("muted", "synced", "delay", "early"): |
| pred = "synced" |
| return { |
| "video": item["video"], |
| "video_path": str(item["video_path"]), |
| "gt_label": "muted", |
| "pred_label": pred, |
| "correct": (pred == "muted"), |
| "judge_explanation": str(parsed.get("explanation", "")), |
| "parse_method": "openai_judge" if parsed else "judge_fail", |
| "prompt_mode": "neutral", |
| "raw_output": raw, |
| } |
|
|
|
|
| def run_one_swap(client, model, judge_model, item, n_frames): |
| raw = call_gpt_frames( |
| client, model, SWAP_PROMPT_DIRECT, |
| extract_frames_b64(item["video_path"], n_frames), |
| |
| ) |
| t = raw.strip().lower() |
| if t.startswith("no") or "no" in t.split()[:3]: |
| pred = "mismatched" |
| elif t.startswith("yes") or "yes" in t.split()[:3]: |
| pred = "synced" |
| else: |
| pred = "synced" |
| return { |
| "video": item["video"], |
| "video_path": str(item["video_path"]), |
| "swapped_from": item["swapped_from"], |
| "gt_label": "mismatched", |
| "pred_label": pred, |
| "correct": (pred == "mismatched"), |
| "parse_method": "direct", |
| "prompt_mode": "direct", |
| "raw_output": raw, |
| } |
|
|
|
|
| |
| |
| |
| def build_sync_work() -> List[Dict[str, Any]]: |
| """All 426 sync samples (142 base × 3 variants).""" |
| work = [] |
| for r in load_jsonl(TEST_JSONL): |
| v = r["video"] |
| p = video_path_for(v) |
| if p is None: |
| print(f"[skip sync] missing file: {v}", flush=True) |
| continue |
| gt = parse_gt_from_name(v) |
| work.append({ |
| "video": v, |
| "video_path": p, |
| "gt_synced": gt["synced"], |
| "gt_direction": gt["direction"], |
| "gt_offset_sec": gt["offset_sec"], |
| }) |
| return work |
|
|
|
|
| def build_mute_work() -> List[Dict[str, Any]]: |
| """142 base videos (skip delay/early variants); audio is implicit silence.""" |
| work = [] |
| seen = set() |
| for r in load_jsonl(TEST_JSONL): |
| v = r["video"] |
| if "_delay_" in v or "_early_" in v or v in seen: |
| continue |
| seen.add(v) |
| p = ORIG_DIR / v |
| if not p.exists(): |
| print(f"[skip mute] missing: {v}", flush=True) |
| continue |
| work.append({"video": v, "video_path": p}) |
| return work |
|
|
|
|
| def build_swap_work() -> List[Dict[str, Any]]: |
| """142 base videos with the same donor pairing used in Gemini swap eval.""" |
| if not GEMINI_SWAP.exists(): |
| sys.exit(f"[error] need {GEMINI_SWAP} for swap donor pairings") |
| work = [] |
| for r in load_jsonl(GEMINI_SWAP): |
| v = r["video"] |
| donor = r.get("swapped_from") |
| if not donor: |
| continue |
| p = ORIG_DIR / v |
| if not p.exists(): |
| print(f"[skip swap] missing: {v}", flush=True) |
| continue |
| work.append({"video": v, "video_path": p, "swapped_from": donor}) |
| return work |
|
|
|
|
| |
| |
| |
| def _safe_div(a, b): |
| return round(a / b, 4) if b else 0.0 |
|
|
|
|
| def metrics_mute(rows, model, judge_model): |
| breakdown = {"muted": 0, "synced": 0, "delay": 0, "early": 0} |
| parse_stats: Dict[str, int] = {} |
| for r in rows: |
| breakdown[r["pred_label"]] = breakdown.get(r["pred_label"], 0) + 1 |
| m = r.get("parse_method", "") |
| parse_stats[m] = parse_stats.get(m, 0) + 1 |
| n = len(rows) |
| return { |
| "total_samples": n, |
| "mute_detection_rate": _safe_div(breakdown["muted"], n), |
| "hallucination_rate": _safe_div(n - breakdown["muted"], n), |
| "prediction_breakdown": breakdown, |
| "parse_stats": parse_stats, |
| "eval_config": { |
| "base_model": model, |
| "prompt_mode": "neutral", |
| "openai_judge": True, |
| "judge_model": judge_model, |
| "input_modality": "frames_only", |
| }, |
| } |
|
|
|
|
| def metrics_swap(rows, model, judge_model): |
| breakdown = {"mismatched": 0, "synced": 0, "delay": 0, "early": 0} |
| parse_stats: Dict[str, int] = {} |
| for r in rows: |
| breakdown[r["pred_label"]] = breakdown.get(r["pred_label"], 0) + 1 |
| m = r.get("parse_method", "") |
| parse_stats[m] = parse_stats.get(m, 0) + 1 |
| n = len(rows) |
| return { |
| "total_samples": n, |
| "mismatch_detection_rate": _safe_div(breakdown["mismatched"], n), |
| "hallucination_rate": _safe_div(n - breakdown["mismatched"], n), |
| "prediction_breakdown": breakdown, |
| "parse_stats": parse_stats, |
| "eval_config": { |
| "base_model": model, |
| "prompt_mode": "direct", |
| "judge_model": judge_model, |
| "input_modality": "frames_only", |
| }, |
| } |
|
|
|
|
| def metrics_sync(rows, model, judge_model): |
| n = len(rows) |
| by_cat = {"synced": [], "delay": [], "early": []} |
| for r in rows: |
| if r["gt_synced"]: |
| by_cat["synced"].append(r) |
| else: |
| by_cat[r["gt_direction"]].append(r) |
|
|
| def _is_correct(r): |
| if r["gt_synced"]: |
| return r["pred_synced"] |
| return (not r["pred_synced"]) and r["pred_direction"] == r["gt_direction"] |
|
|
| sync_desync = sum(1 for r in rows if bool(r["pred_synced"]) == bool(r["gt_synced"])) |
| three_class = sum(1 for r in rows if _is_correct(r)) |
| desync_rows = [r for r in rows if not r["gt_synced"]] |
| dir_correct = sum(1 for r in desync_rows |
| if (not r["pred_synced"]) and r["pred_direction"] == r["gt_direction"]) |
|
|
| |
| offsets = [] |
| for r in desync_rows: |
| if r.get("pred_offset_sec") is not None and r.get("gt_offset_sec") is not None: |
| offsets.append(abs(float(r["pred_offset_sec"]) - float(r["gt_offset_sec"]))) |
| offset_mae = round(sum(offsets) / len(offsets), 4) if offsets else None |
|
|
| parse_stats: Dict[str, int] = {} |
| for r in rows: |
| m = r.get("parse_method", "") |
| parse_stats[m] = parse_stats.get(m, 0) + 1 |
|
|
| return { |
| "total_samples": n, |
| "sync_desync_accuracy": _safe_div(sync_desync, n), |
| "three_class_accuracy": _safe_div(three_class, n), |
| "direction_accuracy_on_desync": _safe_div(dir_correct, len(desync_rows)), |
| "per_category": { |
| "synced_accuracy": _safe_div(sum(1 for r in by_cat["synced"] if _is_correct(r)), |
| len(by_cat["synced"])), |
| "delay_accuracy": _safe_div(sum(1 for r in by_cat["delay"] if _is_correct(r)), |
| len(by_cat["delay"])), |
| "early_accuracy": _safe_div(sum(1 for r in by_cat["early"] if _is_correct(r)), |
| len(by_cat["early"])), |
| "synced_count": len(by_cat["synced"]), |
| "delay_count": len(by_cat["delay"]), |
| "early_count": len(by_cat["early"]), |
| }, |
| "offset_mae_sec": offset_mae, |
| "offset_evaluated_count": len(offsets), |
| "parse_stats": parse_stats, |
| "eval_config": { |
| "base_model": model, |
| "openai_parse_sync": True, |
| "judge_model": judge_model, |
| "input_modality": "frames_only", |
| }, |
| } |
|
|
|
|
| METRICS_FN = { |
| "sync": metrics_sync, |
| "mute": metrics_mute, |
| "swap": metrics_swap, |
| } |
|
|
|
|
| |
| |
| |
| def run_task(out_dir: Path, work, runner, client, model, judge_model, n_frames, workers, |
| task_kind: str): |
| out_dir.mkdir(parents=True, exist_ok=True) |
| results_path = out_dir / "eval_results.jsonl" |
|
|
| processed = set() |
| if results_path.exists(): |
| with open(results_path) as f: |
| for line in f: |
| line = line.strip() |
| if line: |
| processed.add(json.loads(line)["video"]) |
| print(f"[{out_dir.name}] resume: {len(processed)} already done") |
|
|
| todo = [w for w in work if w["video"] not in processed] |
| print(f"[{out_dir.name}] {len(todo)} new / {len(work)} total") |
|
|
| def _go(item): |
| try: |
| return runner(client, model, judge_model, item, n_frames) |
| except Exception as exc: |
| print(f"[{out_dir.name}] error on {item['video']}: {exc}", flush=True) |
| return None |
|
|
| n_done = 0 |
| with ThreadPoolExecutor(max_workers=workers) as ex, open(results_path, "a") as out: |
| futures = {ex.submit(_go, item): item for item in todo} |
| for fut in as_completed(futures): |
| res = fut.result() |
| if res is None: |
| continue |
| out.write(json.dumps(res, ensure_ascii=False) + "\n") |
| out.flush() |
| n_done += 1 |
| if n_done % 10 == 0 or n_done == len(todo): |
| print(f"[{out_dir.name}] {n_done}/{len(todo)} done", flush=True) |
| print(f"[{out_dir.name}] saved -> {results_path}") |
|
|
| |
| rows = [json.loads(l) for l in open(results_path) if l.strip()] |
| metrics = METRICS_FN[task_kind](rows, model, judge_model) |
| metrics_path = out_dir / "metrics.json" |
| with open(metrics_path, "w") as f: |
| json.dump(metrics, f, indent=2, ensure_ascii=False) |
| print(f"[{out_dir.name}] metrics -> {metrics_path}") |
| return results_path |
|
|
|
|
| |
| |
| |
| def main(): |
| ap = argparse.ArgumentParser() |
| ap.add_argument("--openai-key", default=os.environ.get("OPENAI_API_KEY")) |
| ap.add_argument("--model", default="gpt-5.5", |
| help="Model under test (frames-only multimodal).") |
| ap.add_argument("--judge-model", default="gpt-5.4") |
| ap.add_argument("--tasks", default="sync,mute,swap") |
| ap.add_argument("--workers", type=int, default=6) |
| ap.add_argument("--n-frames", type=int, default=8) |
| ap.add_argument("--label-suffix", default="_visualOnly", |
| help="Tagged into output dir name to flag the no-audio handicap.") |
| args = ap.parse_args() |
|
|
| if not args.openai_key: |
| sys.exit("[error] need --openai-key or OPENAI_API_KEY env") |
|
|
| client = _client(args.openai_key) |
| model_tag = args.model.replace("/", "_") + args.label_suffix |
|
|
| tasks = [t.strip() for t in args.tasks.split(",") if t.strip()] |
| if "sync" in tasks: |
| run_task(EVAL_ROOT / "sync" / f"sync_{model_tag}", |
| build_sync_work(), run_one_sync, |
| client, args.model, args.judge_model, args.n_frames, args.workers, |
| task_kind="sync") |
| if "mute" in tasks: |
| run_task(EVAL_ROOT / "mute" / f"mute_{model_tag}_promptNeutral", |
| build_mute_work(), run_one_mute, |
| client, args.model, args.judge_model, args.n_frames, args.workers, |
| task_kind="mute") |
| if "swap" in tasks: |
| run_task(EVAL_ROOT / "swap" / f"swap_{model_tag}_promptDirect", |
| build_swap_work(), run_one_swap, |
| client, args.model, args.judge_model, args.n_frames, args.workers, |
| task_kind="swap") |
|
|
|
|
| if __name__ == "__main__": |
| main() |
|
|