code-202604_v2 / eval /eval_dpo_sync.py
Rakancorle11's picture
Snapshot 20260424-2142
0a4deb9 verified
#!/usr/bin/env python3
"""
Automated evaluation script for audio-video sync DPO models.
Workflow:
1. Load test set (kto_training_data_v2_test.jsonl)
2. Parse ground truth from video filenames (synced / delay Xs / early Xs)
3. Run model inference with a structured JSON prompt
4. Extract predictions: JSON parse -> GPT-5.4 judge (optional) -> regex fallback
5. Compute metrics: Sync/Desync Accuracy, Direction Accuracy, Offset MAE
6. Output a summary table + per-sample JSONL for analysis
Usage:
python eval_dpo_sync.py \
--base-model Rakancorle11/qwen3omni_full_sft_with_audio_for_dpo \
--adapter /opt/dlami/nvme/LlamaFactory/saves/Qwen3-Omni-Instruct/dpo/qwen3omni_dpo_lora_with_audio_v4_data_8632
# With GPT-5.4 judge fallback:
python eval_dpo_sync.py \
--base-model Rakancorle11/qwen3omni_full_sft_revised_thinker_key \
--adapter /opt/dlami/nvme/.../adapter \
--gpt-judge --openai-api-key sk-xxx
# Evaluate base model without adapter (SFT baseline):
python eval_dpo_sync.py \
--base-model Rakancorle11/qwen3omni_full_sft_with_audio_for_dpo
# Evaluate vanilla Qwen3-Omni (no SFT, no DPO):
python eval_dpo_sync.py \
--base-model Qwen/Qwen3-Omni-30B-A3B-Instruct
"""
from __future__ import annotations
import argparse
import gc
import json
import os
import re
import time
from pathlib import Path
from statistics import mean, median
from typing import Any, Dict, List, Optional, Tuple
import torch
from tqdm import tqdm
# ---------------------------------------------------------------------------
# GPT-5.4 judge helper
# ---------------------------------------------------------------------------
_openai_client = None
GPT_JUDGE_SYSTEM = """\
You are a structured-output extractor. The user will give you a model's free-text \
response about audio-video synchronization. Extract the following fields and return \
ONLY valid JSON (no markdown, no explanation):
{"synced": <bool>, "direction": "none"|"delay"|"early", "offset_sec": <float>, "t_v": <float or null>, "t_a": <float or null>, "explanation": "<one sentence>"}
Rules:
- synced: true if the model says audio and video are synchronized, false otherwise.
- direction: "delay" means audio comes AFTER the visual event; "early" means audio \
comes BEFORE the visual event; "none" if synced is true.
- offset_sec: estimated time gap in seconds. 0.0 if synced.
- t_v: the timestamp (in seconds) the model attributes to the VISUAL event. null if not mentioned.
- t_a: the timestamp (in seconds) the model attributes to the AUDIO event. null if not mentioned.
- If you cannot determine a field, use the default (true / "none" / 0.0 / null / "").
"""
def _get_openai_client(api_key: Optional[str] = None):
global _openai_client
if _openai_client is not None:
return _openai_client
key = api_key or os.environ.get("OPENAI_API_KEY")
if not key:
return None
from openai import OpenAI
_openai_client = OpenAI(api_key=key)
return _openai_client
def gpt_extract_prediction(
raw_output: str,
api_key: Optional[str] = None,
model: str = "gpt-5.4",
) -> Optional[Dict[str, Any]]:
"""Call GPT to parse raw model output into structured prediction."""
client = _get_openai_client(api_key)
if client is None:
return None
try:
resp = client.chat.completions.create(
model=model,
messages=[
{"role": "system", "content": GPT_JUDGE_SYSTEM},
{"role": "user", "content": raw_output},
],
temperature=0.0,
max_completion_tokens=200,
)
text = resp.choices[0].message.content.strip()
for pat in [
re.compile(r"```(?:json)?\s*(\{.*?\})\s*```", re.DOTALL),
re.compile(r"(\{.*?\})", re.DOTALL),
]:
m = pat.search(text)
if m:
obj = json.loads(m.group(1))
synced = obj.get("synced")
if isinstance(synced, str):
synced = synced.lower() in ("true", "yes", "1")
direction = str(obj.get("direction", "none")).lower().strip()
if direction not in ("delay", "early", "none"):
direction = "none"
t_v_raw = obj.get("t_v")
t_a_raw = obj.get("t_a")
pred_t_v = float(t_v_raw) if t_v_raw is not None else None
pred_t_a = float(t_a_raw) if t_a_raw is not None else None
return {
"pred_synced": bool(synced),
"pred_direction": direction,
"pred_offset_sec": float(obj.get("offset_sec", 0.0)),
"pred_t_v": pred_t_v,
"pred_t_a": pred_t_a,
"pred_explanation": str(obj.get("explanation", "")),
"parse_method": "gpt_judge",
}
except Exception as exc:
print(f" [gpt-judge] API error: {exc}", flush=True)
return None
DATA_ROOT = Path("/opt/dlami/nvme/video_source")
ORIGINAL_ROOT = DATA_ROOT / "original"
AUDIO_ROOT = DATA_ROOT / "extracted_audio" / "original"
def set_data_root(root: Path) -> None:
"""Point path resolution at a new server layout (original/, random_shift_video/, extracted_audio/)."""
global DATA_ROOT, ORIGINAL_ROOT, AUDIO_ROOT
DATA_ROOT = root.resolve()
ORIGINAL_ROOT = DATA_ROOT / "original"
AUDIO_ROOT = DATA_ROOT / "extracted_audio" / "original"
EVAL_PROMPT = """\
Watch this video and listen to its audio carefully. \
Determine whether the audio and video tracks are synchronized. \
If they are not synchronized, identify the direction of the offset \
(audio delayed or audio early relative to video) and estimate the offset in seconds. \
Explain your reasoning."""
def parse_args() -> argparse.Namespace:
p = argparse.ArgumentParser(description="Evaluate DPO sync model on test set.")
p.add_argument("--base-model", type=str,
default="Rakancorle11/qwen3omni_full_sft_with_audio_for_dpo")
p.add_argument("--adapter", type=str, default=None,
help="Path to LoRA adapter. If omitted, evaluates base model only.")
p.add_argument(
"--data-root",
type=Path,
default=Path("/opt/dlami/nvme/video_source"),
help="Directory containing original/, random_shift_video/, extracted_audio/ (video+audio for sync eval).",
)
p.add_argument(
"--test-jsonl",
type=Path,
default=None,
help="Test JSONL (default: <data-root>/kto_training_data_v2_test.jsonl).",
)
p.add_argument(
"--output-dir",
type=Path,
default=None,
help="Eval output root (default: /home/ubuntu/eval_results/sync).",
)
p.add_argument("--max-samples", type=int, default=-1,
help="Limit number of test samples (-1 = all).")
p.add_argument("--max-new-tokens", type=int, default=256)
p.add_argument("--temperature", type=float, default=0.0)
p.add_argument("--batch-size", type=int, default=1)
p.add_argument("--label", type=str, default=None,
help="Label for this eval run (used in output filenames).")
p.add_argument("--gpt-judge", action="store_true", default=False,
help="Use GPT-5.4 as fallback parser when JSON extraction fails.")
p.add_argument("--openai-api-key", type=str, default=None,
help="OpenAI API key. Falls back to OPENAI_API_KEY env var.")
p.add_argument("--gpt-model", type=str, default="gpt-5.4",
help="OpenAI model to use for GPT judge (default: gpt-5.4).")
return p.parse_args()
def parse_ground_truth(video_field: str) -> Dict[str, Any]:
"""Extract ground truth sync label from video filename."""
m_delay = re.search(r"_delay_([\d.]+)s\.mp4", video_field)
m_early = re.search(r"_early_([\d.]+)s\.mp4", video_field)
if m_delay:
return {"synced": False, "direction": "delay", "offset_sec": float(m_delay.group(1))}
elif m_early:
return {"synced": False, "direction": "early", "offset_sec": float(m_early.group(1))}
else:
return {"synced": True, "direction": "none", "offset_sec": 0.0}
def resolve_video_path(video_field: str) -> str:
if os.path.isabs(video_field) and os.path.exists(video_field):
return video_field
candidate_dirs = [
ORIGINAL_ROOT / "uag_oops",
DATA_ROOT / "random_shift_video" / "delay",
DATA_ROOT / "random_shift_video" / "early",
ORIGINAL_ROOT,
]
for d in candidate_dirs:
c = d / video_field
if c.exists():
return str(c)
return str(ORIGINAL_ROOT / "uag_oops" / video_field)
def resolve_audio_path(video_path: str) -> str:
video_p = Path(video_path)
try:
rel = video_p.relative_to(DATA_ROOT)
except ValueError:
rel = Path(video_p.name)
audio_path = DATA_ROOT / "extracted_audio" / rel.with_suffix(".wav")
if audio_path.exists():
return str(audio_path)
base_stem = re.sub(r"_(delay|early)_[\d.]+s$", "", video_p.stem)
fallback = DATA_ROOT / "extracted_audio" / "original" / "uag_oops" / (base_stem + ".wav")
if fallback.exists():
return str(fallback)
return str(audio_path)
def extract_timestamps(text: str) -> Tuple[Optional[float], Optional[float]]:
"""Extract (t_visual, t_audio) from text using 'at/around/about Xs' pattern.
If >= 2 timestamps found, first is t_v, second is t_a.
If exactly 1 timestamp found (common for synced samples), treat as both t_v and t_a.
"""
text_lower = text.lower()
all_times = [(m.start(), float(m.group(1)))
for m in re.finditer(r"(?:at|around|about)\s+([\d]+\.?\d*)\s*s", text_lower)]
if len(all_times) >= 2:
return (all_times[0][1], all_times[1][1])
if len(all_times) == 1:
return (all_times[0][1], all_times[0][1])
return (None, None)
def load_test_data(path: Path, max_samples: int) -> List[Dict[str, Any]]:
data = []
with open(path) as f:
for line in f:
line = line.strip()
if not line:
continue
obj = json.loads(line)
video_path = resolve_video_path(obj["video"])
audio_path = resolve_audio_path(video_path)
gt = parse_ground_truth(obj["video"])
gt_t_v, gt_t_a = extract_timestamps(obj.get("chosen", ""))
data.append({
"video": obj["video"],
"video_path": video_path,
"audio_path": audio_path,
"prompt": obj["prompt"],
"chosen": obj["chosen"],
"rejected": obj["rejected"],
"gt_synced": gt["synced"],
"gt_direction": gt["direction"],
"gt_offset_sec": gt["offset_sec"],
"gt_t_v": gt_t_v,
"gt_t_a": gt_t_a,
})
if max_samples > 0:
data = data[:max_samples]
return data
def extract_prediction(text: str) -> Dict[str, Any]:
"""Extract structured prediction from model output text."""
text = text.strip()
# Try JSON extraction
for pattern in [
re.compile(r"```(?:json)?\s*(\{.*?\})\s*```", re.DOTALL),
re.compile(r"(\{[^{}]*\"synced\"[^{}]*\})", re.DOTALL),
re.compile(r"(\{.*?\})", re.DOTALL),
]:
m = pattern.search(text)
if m:
try:
obj = json.loads(m.group(1))
synced = obj.get("synced")
if isinstance(synced, str):
synced = synced.lower() in ("true", "yes", "1")
direction = str(obj.get("direction", "none")).lower().strip()
if direction not in ("delay", "early", "none"):
direction = "none"
offset = float(obj.get("offset_sec", 0.0))
explanation = str(obj.get("explanation", ""))
t_v_raw = obj.get("t_v")
t_a_raw = obj.get("t_a")
return {
"pred_synced": bool(synced),
"pred_direction": direction,
"pred_offset_sec": offset,
"pred_t_v": float(t_v_raw) if t_v_raw is not None else None,
"pred_t_a": float(t_a_raw) if t_a_raw is not None else None,
"pred_explanation": explanation,
"parse_method": "json",
}
except (json.JSONDecodeError, ValueError, TypeError):
continue
# Fallback: structured extraction from free text
text_lower = text.lower()
synced = None
direction = "none"
offset = 0.0
pred_t_v, pred_t_a = extract_timestamps(text)
# Step 1: Determine sync vs desync
desync_kws = [
"not synchronized", "not aligned", "desync", "mismatch", "misalign",
"not in sync", "out of sync", "clearly not", "not well aligned",
"are not aligned", "audio and visual event are clearly not",
]
sync_kws = [
"synchronized", "well aligned", "well-aligned", "in sync",
"appear synchronized", "appears synchronized", "closely aligned",
"audio and video are aligned", "matches closely",
]
if any(kw in text_lower for kw in desync_kws):
synced = False
elif any(kw in text_lower for kw in sync_kws):
synced = True
if synced is False:
# Step 2a: Try explicit direction keywords
delay_kws = ["audio delayed", "audio lags", "audio comes after", "sound comes after",
"sound is heard later", "audio is delayed", "sound follows"]
early_kws = ["audio early", "audio leads", "audio comes before", "sound comes before",
"audio precedes", "sound is heard before", "sound precedes", "audio is early"]
if any(kw in text_lower for kw in delay_kws):
direction = "delay"
elif any(kw in text_lower for kw in early_kws):
direction = "early"
# Step 2b: If no explicit keyword, use extracted timestamps to infer direction
if direction == "none" and pred_t_v is not None and pred_t_a is not None and pred_t_v != pred_t_a:
if pred_t_a > pred_t_v:
direction = "delay"
else:
direction = "early"
offset = abs(pred_t_a - pred_t_v)
# Step 2c: Extract offset from gap/separation mentions if not yet found
if offset == 0.0:
offset_match = re.search(
r"(?:gap|separation|offset|mismatch|differ\w*)\s*(?:of\s+)?(?:about\s+|roughly\s+|approximately\s+)?"
r"([\d]+\.?\d*)\s*s",
text_lower,
)
if not offset_match:
offset_match = re.search(
r"(?:about\s+|roughly\s+|approximately\s+)?([\d]+\.?\d*)\s*s\s*"
r"(?:gap|separation|offset|mismatch|differ)",
text_lower,
)
if offset_match:
offset = float(offset_match.group(1))
if synced is None:
synced = True
return {
"pred_synced": synced,
"pred_direction": direction,
"pred_offset_sec": offset,
"pred_t_v": pred_t_v,
"pred_t_a": pred_t_a,
"pred_explanation": "",
"parse_method": "regex_fallback",
}
def load_model(base_model: str, adapter: Optional[str]):
import json as _json
from transformers import (
AutoConfig,
AutoProcessor,
Qwen3OmniMoeForConditionalGeneration,
Qwen3OmniMoeThinkerConfig,
Qwen3OmniMoeThinkerForConditionalGeneration,
)
print(f"[load] Base model: {base_model}")
processor = AutoProcessor.from_pretrained(base_model, trust_remote_code=True)
model_path = Path(base_model)
cfg_path = model_path / "config.json" if model_path.exists() else None
model_type = None
if cfg_path and cfg_path.exists():
with open(cfg_path, "r") as f:
model_type = _json.load(f).get("model_type")
if not model_type:
try:
from huggingface_hub import hf_hub_download
cached = hf_hub_download(base_model, "config.json")
with open(cached, "r") as f:
model_type = _json.load(f).get("model_type")
except Exception:
pass
print(f"[load] Detected model_type: {model_type}")
if model_type == "qwen3_omni_moe_thinker":
config = Qwen3OmniMoeThinkerConfig.from_pretrained(base_model)
model = Qwen3OmniMoeThinkerForConditionalGeneration.from_pretrained(
base_model, config=config, torch_dtype=torch.bfloat16, device_map="auto",
)
else:
config = AutoConfig.from_pretrained(base_model, trust_remote_code=True)
model = Qwen3OmniMoeForConditionalGeneration.from_pretrained(
base_model, config=config, torch_dtype=torch.bfloat16, device_map="auto",
)
if adapter:
from peft import PeftModel
from safetensors.torch import load_file, save_file
import tempfile, shutil
print(f"[load] Loading LoRA adapter: {adapter}")
adapter_cfg_path = Path(adapter) / "adapter_config.json"
with open(adapter_cfg_path) as f:
adapter_cfg = json.loads(f.read())
target_modules = adapter_cfg.get("target_modules", [])
needs_remap = any(t.startswith("model.layers.") for t in target_modules) and model_type != "qwen3_omni_moe_thinker"
if needs_remap:
print("[load] Adapter was trained on thinker-only model; remapping keys to full model...")
tmp_dir = Path(tempfile.mkdtemp(prefix="adapter_remap_"))
for fn in Path(adapter).iterdir():
if fn.is_dir():
continue
if fn.name == "adapter_config.json":
new_targets = []
for t in target_modules:
if t.startswith("model.layers."):
new_targets.append("thinker." + t)
elif t[0].isdigit():
new_targets.append("thinker.model.layers." + t)
else:
new_targets.append(t)
adapter_cfg["target_modules"] = new_targets
with open(tmp_dir / "adapter_config.json", "w") as f:
json.dump(adapter_cfg, f, indent=2)
elif fn.suffix == ".safetensors" and "adapter" in fn.name:
tensors = load_file(str(fn))
remapped = {}
for k, v in tensors.items():
if ".model.layers." in k and ".thinker." not in k:
new_k = k.replace("base_model.model.model.layers.", "base_model.model.thinker.model.layers.")
remapped[new_k] = v
else:
remapped[k] = v
save_file(remapped, str(tmp_dir / fn.name))
else:
shutil.copy2(str(fn), str(tmp_dir / fn.name))
adapter = str(tmp_dir)
model = PeftModel.from_pretrained(model, adapter)
model = model.merge_and_unload()
if hasattr(model, "disable_talker"):
model.disable_talker()
model.eval()
print(f"[load] Model ready ({type(model).__name__}). Device: {model.device}")
return model, processor
def run_inference(model, processor, video_path: str, audio_path: str,
max_new_tokens: int, temperature: float) -> str:
from qwen_omni_utils import process_mm_info
import tempfile
tmp_dir = tempfile.mkdtemp(prefix="eval_masked_")
masked_video = os.path.join(tmp_dir, "clip.mp4")
masked_audio = os.path.join(tmp_dir, "clip.wav")
os.symlink(os.path.abspath(video_path), masked_video)
os.symlink(os.path.abspath(audio_path), masked_audio)
conversation = [
{
"role": "user",
"content": [
{"type": "video", "video": masked_video},
{"type": "audio", "audio": masked_audio},
{"type": "text", "text": EVAL_PROMPT},
],
}
]
text = processor.apply_chat_template(
conversation, add_generation_prompt=True, tokenize=False,
)
audios, images, videos = process_mm_info(conversation, use_audio_in_video=False)
inputs = processor(
text=text, audio=audios, images=images, videos=videos,
return_tensors="pt", padding=True, use_audio_in_video=False,
)
model_dtype = next(model.parameters()).dtype
converted = {}
for k, v in inputs.items():
if hasattr(v, "to"):
v = v.to(model.device)
if torch.is_floating_point(v):
v = v.to(model_dtype)
converted[k] = v
inputs = converted
from transformers import Qwen3OmniMoeThinkerForConditionalGeneration
is_thinker = isinstance(model, Qwen3OmniMoeThinkerForConditionalGeneration)
if is_thinker:
gen_kwargs = {"max_new_tokens": max_new_tokens, "do_sample": temperature > 0}
else:
gen_kwargs = {
"thinker_max_new_tokens": max_new_tokens,
"use_audio_in_video": False,
"return_audio": False,
"do_sample": temperature > 0,
}
if temperature > 0:
gen_kwargs["temperature"] = temperature
gen_kwargs["top_p"] = 0.9
with torch.inference_mode():
output_ids = model.generate(**inputs, **gen_kwargs)
prompt_len = inputs["input_ids"].shape[1]
response = processor.batch_decode(
output_ids[:, prompt_len:], skip_special_tokens=True,
)[0].strip()
import shutil
shutil.rmtree(tmp_dir, ignore_errors=True)
return response
def compute_metrics(results: List[Dict[str, Any]]) -> Dict[str, Any]:
"""Compute all evaluation metrics."""
total = len(results)
if total == 0:
return {}
# 1. Sync/Desync binary accuracy
sync_correct = sum(1 for r in results if r["pred_synced"] == r["gt_synced"])
sync_acc = sync_correct / total
# 2. Direction accuracy (on desync samples only)
desync_samples = [r for r in results if not r["gt_synced"]]
if desync_samples:
dir_correct = sum(1 for r in desync_samples if r["pred_direction"] == r["gt_direction"])
dir_acc = dir_correct / len(desync_samples)
else:
dir_acc = None
# 3. Full 3-class accuracy (synced / delay / early)
def label(r, prefix):
if r[f"{prefix}synced"]:
return "synced"
return r[f"{prefix}direction"]
three_class_correct = sum(1 for r in results if label(r, "pred_") == label(r, "gt_"))
three_class_acc = three_class_correct / total
# 4. Offset MAE (on desync samples where model also predicts desync)
offset_errors = []
for r in desync_samples:
if not r["pred_synced"] and r["pred_offset_sec"] > 0:
offset_errors.append(abs(r["pred_offset_sec"] - r["gt_offset_sec"]))
offset_mae = mean(offset_errors) if offset_errors else None
offset_median = median(offset_errors) if offset_errors else None
# 5. Per-category breakdown
synced_samples = [r for r in results if r["gt_synced"]]
delay_samples = [r for r in results if r["gt_direction"] == "delay"]
early_samples = [r for r in results if r["gt_direction"] == "early"]
synced_acc = (sum(1 for r in synced_samples if r["pred_synced"]) / len(synced_samples)) if synced_samples else None
delay_acc = (sum(1 for r in delay_samples if not r["pred_synced"] and r["pred_direction"] == "delay") / len(delay_samples)) if delay_samples else None
early_acc = (sum(1 for r in early_samples if not r["pred_synced"] and r["pred_direction"] == "early") / len(early_samples)) if early_samples else None
# 6. Tolerance-based offset accuracy
within_05 = sum(1 for e in offset_errors if e <= 0.5) if offset_errors else 0
within_10 = sum(1 for e in offset_errors if e <= 1.0) if offset_errors else 0
# 7. Parse method stats
json_parsed = sum(1 for r in results if r.get("parse_method") == "json")
regex_parsed = sum(1 for r in results if r.get("parse_method") == "regex_fallback")
gpt_parsed = sum(1 for r in results if r.get("parse_method") == "gpt_judge")
# 8. Timestamp MAE (t_v and t_a separately)
tv_errors = []
ta_errors = []
for r in results:
gt_tv = r.get("gt_t_v")
gt_ta = r.get("gt_t_a")
pred_tv = r.get("pred_t_v")
pred_ta = r.get("pred_t_a")
if gt_tv is not None and pred_tv is not None:
tv_errors.append(abs(pred_tv - gt_tv))
if gt_ta is not None and pred_ta is not None:
ta_errors.append(abs(pred_ta - gt_ta))
tv_mae = round(mean(tv_errors), 4) if tv_errors else None
ta_mae = round(mean(ta_errors), 4) if ta_errors else None
tv_median = round(median(tv_errors), 4) if tv_errors else None
ta_median = round(median(ta_errors), 4) if ta_errors else None
return {
"total_samples": total,
"sync_desync_accuracy": round(sync_acc, 4),
"three_class_accuracy": round(three_class_acc, 4),
"direction_accuracy_on_desync": round(dir_acc, 4) if dir_acc is not None else None,
"per_category": {
"synced_accuracy": round(synced_acc, 4) if synced_acc is not None else None,
"delay_accuracy": round(delay_acc, 4) if delay_acc is not None else None,
"early_accuracy": round(early_acc, 4) if early_acc is not None else None,
"synced_count": len(synced_samples),
"delay_count": len(delay_samples),
"early_count": len(early_samples),
},
"offset_mae_sec": round(offset_mae, 4) if offset_mae is not None else None,
"offset_median_sec": round(offset_median, 4) if offset_median is not None else None,
"offset_within_0.5s": within_05,
"offset_within_1.0s": within_10,
"offset_evaluated_count": len(offset_errors),
"timestamp_tv_mae_sec": tv_mae,
"timestamp_ta_mae_sec": ta_mae,
"timestamp_tv_median_sec": tv_median,
"timestamp_ta_median_sec": ta_median,
"timestamp_evaluated_tv": len(tv_errors),
"timestamp_evaluated_ta": len(ta_errors),
"parse_stats": {"json": json_parsed, "regex_fallback": regex_parsed, "gpt_judge": gpt_parsed},
}
def print_summary(metrics: Dict[str, Any], label: str) -> None:
print()
print(f"{'=' * 60}")
print(f" Eval Summary: {label}")
print(f"{'=' * 60}")
print(f" Total samples: {metrics['total_samples']}")
print(f" Sync/Desync Accuracy: {metrics['sync_desync_accuracy']:.1%}")
print(f" 3-Class Accuracy: {metrics['three_class_accuracy']:.1%}")
if metrics["direction_accuracy_on_desync"] is not None:
print(f" Direction Acc (desync): {metrics['direction_accuracy_on_desync']:.1%}")
print(f" ─── Per Category ───")
pc = metrics["per_category"]
if pc["synced_accuracy"] is not None:
print(f" Synced correct: {pc['synced_accuracy']:.1%} ({pc['synced_count']} samples)")
if pc["delay_accuracy"] is not None:
print(f" Delay correct: {pc['delay_accuracy']:.1%} ({pc['delay_count']} samples)")
if pc["early_accuracy"] is not None:
print(f" Early correct: {pc['early_accuracy']:.1%} ({pc['early_count']} samples)")
print(f" ─── Offset Estimation ───")
if metrics["offset_mae_sec"] is not None:
print(f" MAE: {metrics['offset_mae_sec']:.3f}s")
print(f" Median Error: {metrics['offset_median_sec']:.3f}s")
print(f" Within 0.5s: {metrics['offset_within_0.5s']} / {metrics['offset_evaluated_count']}")
print(f" Within 1.0s: {metrics['offset_within_1.0s']} / {metrics['offset_evaluated_count']}")
else:
print(f" (no valid offset predictions)")
print(f" ─── Timestamp Estimation ───")
if metrics.get("timestamp_tv_mae_sec") is not None:
print(f" t_v MAE: {metrics['timestamp_tv_mae_sec']:.3f}s ({metrics['timestamp_evaluated_tv']} samples)")
print(f" t_v Median Error: {metrics['timestamp_tv_median_sec']:.3f}s")
else:
print(f" t_v: (no valid pairs)")
if metrics.get("timestamp_ta_mae_sec") is not None:
print(f" t_a MAE: {metrics['timestamp_ta_mae_sec']:.3f}s ({metrics['timestamp_evaluated_ta']} samples)")
print(f" t_a Median Error: {metrics['timestamp_ta_median_sec']:.3f}s")
else:
print(f" t_a: (no valid pairs)")
print(f" ─── Parse Stats ───")
ps = metrics["parse_stats"]
print(f" JSON parsed: {ps['json']}")
print(f" GPT judge: {ps.get('gpt_judge', 0)}")
print(f" Regex fallback: {ps['regex_fallback']}")
print(f"{'=' * 60}")
def main() -> None:
args = parse_args()
set_data_root(args.data_root)
test_jsonl = args.test_jsonl or (DATA_ROOT / "kto_training_data_v2_test.jsonl")
output_dir = args.output_dir or Path("/home/ubuntu/eval_results/sync")
if args.gpt_judge:
client = _get_openai_client(args.openai_api_key)
if client is None:
print("[ERROR] --gpt-judge requires OPENAI_API_KEY env var or --openai-api-key argument.")
raise SystemExit(1)
try:
test_resp = client.chat.completions.create(
model=args.gpt_model,
messages=[{"role": "user", "content": "Say OK"}],
max_completion_tokens=5,
)
print(f"[gpt-judge] API verified. Model: {args.gpt_model}")
except Exception as exc:
print(f"[ERROR] GPT API check failed: {exc}")
raise SystemExit(1)
label = args.label or (Path(args.adapter).name if args.adapter else Path(args.base_model).name)
out_dir = output_dir / label
out_dir.mkdir(parents=True, exist_ok=True)
results_jsonl = out_dir / "eval_results.jsonl"
metrics_json = out_dir / "metrics.json"
summary_txt = out_dir / "summary.txt"
# Load data
test_data = load_test_data(test_jsonl, args.max_samples)
print(f"[data] Loaded {len(test_data)} test samples")
# Resume support
processed = set()
if results_jsonl.exists():
with open(results_jsonl) as f:
for line in f:
obj = json.loads(line)
processed.add(obj["video"])
print(f"[resume] {len(processed)} already processed, skipping")
# Load model
model, processor = load_model(args.base_model, args.adapter)
# Inference loop
for item in tqdm(test_data, desc="Evaluating", unit="sample"):
if item["video"] in processed:
continue
if not os.path.exists(item["video_path"]):
print(f" [skip] Video not found: {item['video_path']}")
continue
try:
raw_output = run_inference(
model, processor, item["video_path"], item["audio_path"],
args.max_new_tokens, args.temperature,
)
except Exception as exc:
import traceback
print(f" [error] {item['video']}: {exc}")
traceback.print_exc()
raw_output = ""
if args.gpt_judge and raw_output:
gpt_pred = gpt_extract_prediction(
raw_output, api_key=args.openai_api_key, model=args.gpt_model,
)
if gpt_pred is not None:
pred = gpt_pred
else:
pred = extract_prediction(raw_output)
else:
pred = extract_prediction(raw_output)
result = {
"video": item["video"],
"video_path": item["video_path"],
"gt_synced": item["gt_synced"],
"gt_direction": item["gt_direction"],
"gt_offset_sec": item["gt_offset_sec"],
"gt_t_v": item["gt_t_v"],
"gt_t_a": item["gt_t_a"],
"pred_synced": pred["pred_synced"],
"pred_direction": pred["pred_direction"],
"pred_offset_sec": pred["pred_offset_sec"],
"pred_t_v": pred.get("pred_t_v"),
"pred_t_a": pred.get("pred_t_a"),
"pred_explanation": pred["pred_explanation"],
"parse_method": pred["parse_method"],
"raw_output": raw_output,
}
with open(results_jsonl, "a", encoding="utf-8") as f:
f.write(json.dumps(result, ensure_ascii=False) + "\n")
processed.add(item["video"])
gc.collect()
torch.cuda.empty_cache()
# Load all results and compute metrics
all_results = []
with open(results_jsonl) as f:
for line in f:
all_results.append(json.loads(line))
metrics = compute_metrics(all_results)
metrics["eval_config"] = {
"base_model": args.base_model,
"adapter": args.adapter,
"data_root": str(args.data_root),
"test_jsonl": str(test_jsonl),
"total_test_samples": len(test_data),
"max_new_tokens": args.max_new_tokens,
"temperature": args.temperature,
"gpt_judge": args.gpt_judge,
"gpt_model": args.gpt_model if args.gpt_judge else None,
}
with open(metrics_json, "w", encoding="utf-8") as f:
json.dump(metrics, f, indent=2, ensure_ascii=False)
print_summary(metrics, label)
with open(summary_txt, "w", encoding="utf-8") as f:
import io, contextlib
buf = io.StringIO()
with contextlib.redirect_stdout(buf):
print_summary(metrics, label)
f.write(buf.getvalue())
print(f"\n[output] Results JSONL: {results_jsonl}")
print(f"[output] Metrics JSON: {metrics_json}")
print(f"[output] Summary: {summary_txt}")
if __name__ == "__main__":
main()