Snapshot 20260424-2142
Browse filesThis view is limited to 50 files because it contains too many changes. See raw diff
- data_gen/av_mcqa_pipeline/01_select_finevideo.py +223 -0
- data_gen/av_mcqa_pipeline/01b_select_local_finevideo.py +70 -0
- data_gen/av_mcqa_pipeline/02_gemini_generate_av_mcqa.py +323 -0
- data_gen/av_mcqa_pipeline/02b_openrouter_generate_local.py +170 -0
- data_gen/av_mcqa_pipeline/03_gpt5_video_only_filter.py +170 -0
- data_gen/av_mcqa_pipeline/03b_gpt5_filter_local.py +150 -0
- data_gen/av_mcqa_pipeline/04_assemble_dpo.py +92 -0
- data_gen/av_mcqa_pipeline/05_rewrite_chosen_longform.py +144 -0
- data_gen/av_mcqa_pipeline/README.md +109 -0
- data_gen/build_dpo_dataset.py +431 -0
- data_gen/convert_mcqa_to_dpo.py +118 -0
- data_gen/export_disagreement_to_csv.py +341 -0
- data_gen/export_frame_selection_to_csv.py +160 -0
- data_gen/gemini_annotator_multikey.py +778 -0
- data_gen/generate_activitynet_chosen.py +176 -0
- data_gen/generate_activitynet_rejected.py +350 -0
- data_gen/generate_onpolicy_dpo.py +478 -0
- data_gen/multi_model_frame_selection_compare.py +335 -0
- data_gen/multi_model_visual_grounding.py +385 -0
- eval/compute_videomme_metrics_from_jsonl.py +177 -0
- eval/download_lvbench.py +62 -0
- eval/download_videomme.py +76 -0
- eval/eval_dpo_sync.py +841 -0
- eval/eval_lvbench.py +647 -0
- eval/eval_videomme.py +679 -0
- finevideo/download_finevideo_subset.py +254 -0
- finevideo/extract_audio_finevideo.py +94 -0
- finevideo/filter_finevideo.py +431 -0
- finevideo/generate_finevideo_dpo.py +307 -0
- finevideo/generate_finevideo_sft.py +606 -0
- hf_upload/COMMANDS.md +168 -0
- hf_upload/README.md +80 -0
- hf_upload/data/kto_training_data_v2_test.jsonl +0 -0
- hf_upload/scripts/compute_videomme_metrics_from_jsonl.py +177 -0
- hf_upload/scripts/download_lvbench.py +62 -0
- hf_upload/scripts/download_videomme.py +76 -0
- hf_upload/scripts/eval_dpo_sync.py +841 -0
- hf_upload/scripts/eval_lvbench.py +647 -0
- hf_upload/scripts/eval_videomme.py +679 -0
- hf_upload/scripts/merge_adapter.py +139 -0
- hf_upload/setup_data.sh +179 -0
- hf_upload/setup_env.sh +126 -0
- merge_adapter.py +139 -0
- run_qwen3omni_sync_diag_3prompts.sh +94 -0
- sft_model_convert_and_onpolicy_dpo/convert_thinker_to_full.py +189 -0
- sft_model_convert_and_onpolicy_dpo/generate_onpolicy_dpo.py +422 -0
- sft_model_convert_and_onpolicy_dpo/stage1_vllm.py +269 -0
- sync_research/audio_llm_benchmark.py +1463 -0
- sync_research/audio_llm_benchmark_shifted.py +621 -0
- sync_research/batch_shift_videos.py +200 -0
data_gen/av_mcqa_pipeline/01_select_finevideo.py
ADDED
|
@@ -0,0 +1,223 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env python3
|
| 2 |
+
"""
|
| 3 |
+
Step 1: Select FineVideo videos for AV-MCQA generation, then download
|
| 4 |
+
the missing ones from Hugging Face.
|
| 5 |
+
|
| 6 |
+
Selection rules:
|
| 7 |
+
- source pool: filtered_candidates_all_passing.jsonl (4616 entries)
|
| 8 |
+
- EXCLUDE videos already used in existing local cache (1000 in
|
| 9 |
+
filtered_candidates.jsonl), so new generation gets *new* videos
|
| 10 |
+
- mean audioVisualCorrelation per scene >= MIN_MEAN_AVC
|
| 11 |
+
- duration in [MIN_DURATION, MAX_DURATION]
|
| 12 |
+
- balanced across content_parent_category (max MAX_PER_CATEGORY each)
|
| 13 |
+
- target ~TOTAL_TARGET videos
|
| 14 |
+
|
| 15 |
+
Download missing videos via streaming the FineVideo HF dataset, saving:
|
| 16 |
+
- /opt/dlami/nvme/finevideo_av_mcqa/videos/finevideo_{idx:06d}.mp4
|
| 17 |
+
- /opt/dlami/nvme/finevideo_av_mcqa/audio/finevideo_{idx:06d}.wav (extracted via ffmpeg)
|
| 18 |
+
|
| 19 |
+
Output: /opt/dlami/nvme/av_mcqa/01_selected_videos.jsonl (one selected video per line)
|
| 20 |
+
"""
|
| 21 |
+
|
| 22 |
+
import json
|
| 23 |
+
import os
|
| 24 |
+
import subprocess
|
| 25 |
+
from collections import defaultdict
|
| 26 |
+
from pathlib import Path
|
| 27 |
+
|
| 28 |
+
CANDIDATES_ALL = "/opt/dlami/nvme/finevideo/filtered_candidates_all_passing.jsonl"
|
| 29 |
+
CANDIDATES_LOCAL = "/opt/dlami/nvme/finevideo/filtered_candidates.jsonl" # exclude these (already used)
|
| 30 |
+
|
| 31 |
+
NEW_VIDEO_DIR = "/opt/dlami/nvme/finevideo_av_mcqa/videos"
|
| 32 |
+
NEW_AUDIO_DIR = "/opt/dlami/nvme/finevideo_av_mcqa/audio"
|
| 33 |
+
OUTPUT_FILE = "/opt/dlami/nvme/av_mcqa/01_selected_videos.jsonl"
|
| 34 |
+
|
| 35 |
+
MIN_MEAN_AVC = 0.85
|
| 36 |
+
MIN_DURATION = 30
|
| 37 |
+
MAX_DURATION = 180
|
| 38 |
+
MAX_PER_CATEGORY = 100
|
| 39 |
+
TOTAL_TARGET = 800
|
| 40 |
+
|
| 41 |
+
|
| 42 |
+
def fname(idx: int) -> str:
|
| 43 |
+
return f"finevideo_{idx:06d}"
|
| 44 |
+
|
| 45 |
+
|
| 46 |
+
def select_targets() -> list[dict]:
|
| 47 |
+
"""Pick target videos by filtering metadata only (no downloads yet)."""
|
| 48 |
+
all_pool = []
|
| 49 |
+
with open(CANDIDATES_ALL) as f:
|
| 50 |
+
for line in f:
|
| 51 |
+
all_pool.append(json.loads(line))
|
| 52 |
+
|
| 53 |
+
already_local_idx = set()
|
| 54 |
+
with open(CANDIDATES_LOCAL) as f:
|
| 55 |
+
for line in f:
|
| 56 |
+
already_local_idx.add(json.loads(line)["finevideo_idx"])
|
| 57 |
+
|
| 58 |
+
print(f"Pool: {len(all_pool)} (all_passing), excluding {len(already_local_idx)} already-used local")
|
| 59 |
+
|
| 60 |
+
fresh = [v for v in all_pool if v["finevideo_idx"] not in already_local_idx]
|
| 61 |
+
print(f"Fresh candidates (not yet locally used): {len(fresh)}")
|
| 62 |
+
|
| 63 |
+
selected = []
|
| 64 |
+
for v in fresh:
|
| 65 |
+
d = v["duration_seconds"]
|
| 66 |
+
if d < MIN_DURATION or d > MAX_DURATION:
|
| 67 |
+
continue
|
| 68 |
+
avcs = [s["audioVisualCorrelation"] for s in v["scenes"]
|
| 69 |
+
if "audioVisualCorrelation" in s
|
| 70 |
+
and 0.0 <= s["audioVisualCorrelation"] <= 1.0]
|
| 71 |
+
if not avcs:
|
| 72 |
+
continue
|
| 73 |
+
m = sum(avcs) / len(avcs)
|
| 74 |
+
if m < MIN_MEAN_AVC:
|
| 75 |
+
continue
|
| 76 |
+
v["_mean_avc"] = m
|
| 77 |
+
selected.append(v)
|
| 78 |
+
|
| 79 |
+
print(f"After AVC/duration filter: {len(selected)}")
|
| 80 |
+
|
| 81 |
+
# Balance per category
|
| 82 |
+
by_cat = defaultdict(list)
|
| 83 |
+
for v in selected:
|
| 84 |
+
by_cat[v.get("content_parent_category", "Unknown")].append(v)
|
| 85 |
+
for cat in by_cat:
|
| 86 |
+
by_cat[cat].sort(key=lambda x: -x["_mean_avc"])
|
| 87 |
+
|
| 88 |
+
print("\nPer-category counts (after AVC filter):")
|
| 89 |
+
for cat in sorted(by_cat):
|
| 90 |
+
print(f" {cat:30s} {len(by_cat[cat])}")
|
| 91 |
+
|
| 92 |
+
balanced = []
|
| 93 |
+
for cat, vs in by_cat.items():
|
| 94 |
+
balanced.extend(vs[:MAX_PER_CATEGORY])
|
| 95 |
+
balanced.sort(key=lambda x: -x["_mean_avc"])
|
| 96 |
+
balanced = balanced[:TOTAL_TARGET]
|
| 97 |
+
|
| 98 |
+
print(f"\nFinal selection: {len(balanced)} videos")
|
| 99 |
+
final_cat = defaultdict(int)
|
| 100 |
+
for v in balanced:
|
| 101 |
+
final_cat[v["content_parent_category"]] += 1
|
| 102 |
+
for cat, n in sorted(final_cat.items(), key=lambda x: -x[1]):
|
| 103 |
+
print(f" {cat:30s} {n}")
|
| 104 |
+
|
| 105 |
+
return balanced
|
| 106 |
+
|
| 107 |
+
|
| 108 |
+
def extract_audio(mp4_path: str, wav_path: str) -> bool:
|
| 109 |
+
"""ffmpeg: extract audio to mono 16k wav."""
|
| 110 |
+
try:
|
| 111 |
+
subprocess.run(
|
| 112 |
+
["ffmpeg", "-y", "-loglevel", "error",
|
| 113 |
+
"-i", mp4_path, "-vn",
|
| 114 |
+
"-ac", "1", "-ar", "16000", wav_path],
|
| 115 |
+
check=True,
|
| 116 |
+
)
|
| 117 |
+
return True
|
| 118 |
+
except subprocess.CalledProcessError:
|
| 119 |
+
return False
|
| 120 |
+
|
| 121 |
+
|
| 122 |
+
def download_videos(targets: list[dict]) -> list[dict]:
|
| 123 |
+
"""Stream the FineVideo HF dataset and save matching mp4 + extract wav."""
|
| 124 |
+
os.makedirs(NEW_VIDEO_DIR, exist_ok=True)
|
| 125 |
+
os.makedirs(NEW_AUDIO_DIR, exist_ok=True)
|
| 126 |
+
|
| 127 |
+
target_map = {v["finevideo_idx"]: v for v in targets}
|
| 128 |
+
|
| 129 |
+
# Skip if already downloaded
|
| 130 |
+
remaining = {}
|
| 131 |
+
ready = []
|
| 132 |
+
for idx, v in target_map.items():
|
| 133 |
+
mp4_path = os.path.join(NEW_VIDEO_DIR, f"{fname(idx)}.mp4")
|
| 134 |
+
wav_path = os.path.join(NEW_AUDIO_DIR, f"{fname(idx)}.wav")
|
| 135 |
+
if os.path.exists(mp4_path) and os.path.exists(wav_path):
|
| 136 |
+
v["_video_path"] = mp4_path
|
| 137 |
+
v["_audio_path"] = wav_path
|
| 138 |
+
v["_filename"] = fname(idx)
|
| 139 |
+
ready.append(v)
|
| 140 |
+
else:
|
| 141 |
+
remaining[idx] = v
|
| 142 |
+
|
| 143 |
+
print(f"\nAlready downloaded: {len(ready)}")
|
| 144 |
+
print(f"To download: {len(remaining)}")
|
| 145 |
+
|
| 146 |
+
if not remaining:
|
| 147 |
+
return ready
|
| 148 |
+
|
| 149 |
+
# Stream the HF dataset; pick out matching indices.
|
| 150 |
+
# NOTE: streaming through a multi-TB dataset to find sparse targets
|
| 151 |
+
# can be slow. If you run this often, consider sharded downloads.
|
| 152 |
+
from datasets import load_dataset
|
| 153 |
+
|
| 154 |
+
ds = load_dataset("HuggingFaceFV/finevideo", split="train", streaming=True)
|
| 155 |
+
|
| 156 |
+
saved = 0
|
| 157 |
+
target_idx_set = set(remaining)
|
| 158 |
+
for i, sample in enumerate(ds):
|
| 159 |
+
if i not in target_idx_set:
|
| 160 |
+
continue
|
| 161 |
+
|
| 162 |
+
try:
|
| 163 |
+
mp4_bytes = sample["mp4"]
|
| 164 |
+
mp4_path = os.path.join(NEW_VIDEO_DIR, f"{fname(i)}.mp4")
|
| 165 |
+
with open(mp4_path, "wb") as f:
|
| 166 |
+
f.write(mp4_bytes)
|
| 167 |
+
|
| 168 |
+
wav_path = os.path.join(NEW_AUDIO_DIR, f"{fname(i)}.wav")
|
| 169 |
+
if not extract_audio(mp4_path, wav_path):
|
| 170 |
+
# video has no audio? skip
|
| 171 |
+
os.remove(mp4_path)
|
| 172 |
+
continue
|
| 173 |
+
|
| 174 |
+
v = remaining[i]
|
| 175 |
+
v["_video_path"] = mp4_path
|
| 176 |
+
v["_audio_path"] = wav_path
|
| 177 |
+
v["_filename"] = fname(i)
|
| 178 |
+
ready.append(v)
|
| 179 |
+
saved += 1
|
| 180 |
+
target_idx_set.discard(i)
|
| 181 |
+
|
| 182 |
+
if saved % 20 == 0:
|
| 183 |
+
print(f" downloaded {saved}/{len(remaining)} (last idx={i})")
|
| 184 |
+
|
| 185 |
+
if not target_idx_set:
|
| 186 |
+
break
|
| 187 |
+
except Exception as e:
|
| 188 |
+
print(f" download failed idx={i}: {e}")
|
| 189 |
+
|
| 190 |
+
print(f"\nDownloaded {saved} new videos")
|
| 191 |
+
if target_idx_set:
|
| 192 |
+
print(f"WARN: {len(target_idx_set)} targets not found in stream "
|
| 193 |
+
f"(dataset may be sharded differently)")
|
| 194 |
+
return ready
|
| 195 |
+
|
| 196 |
+
|
| 197 |
+
def main():
|
| 198 |
+
os.makedirs(os.path.dirname(OUTPUT_FILE), exist_ok=True)
|
| 199 |
+
targets = select_targets()
|
| 200 |
+
|
| 201 |
+
ready = download_videos(targets)
|
| 202 |
+
print(f"\nTotal ready (downloaded + with audio): {len(ready)}")
|
| 203 |
+
|
| 204 |
+
with open(OUTPUT_FILE, "w") as f:
|
| 205 |
+
for v in ready:
|
| 206 |
+
slim = {
|
| 207 |
+
"finevideo_idx": v["finevideo_idx"],
|
| 208 |
+
"filename": v["_filename"],
|
| 209 |
+
"video_path": v["_video_path"],
|
| 210 |
+
"audio_path": v["_audio_path"],
|
| 211 |
+
"duration_seconds": v["duration_seconds"],
|
| 212 |
+
"mean_avc": round(v["_mean_avc"], 3),
|
| 213 |
+
"content_parent_category": v["content_parent_category"],
|
| 214 |
+
"content_fine_category": v["content_fine_category"],
|
| 215 |
+
"youtube_title": v.get("youtube_title", ""),
|
| 216 |
+
"scenes": v["scenes"],
|
| 217 |
+
}
|
| 218 |
+
f.write(json.dumps(slim, ensure_ascii=False) + "\n")
|
| 219 |
+
print(f"\nWrote {OUTPUT_FILE}")
|
| 220 |
+
|
| 221 |
+
|
| 222 |
+
if __name__ == "__main__":
|
| 223 |
+
main()
|
data_gen/av_mcqa_pipeline/01b_select_local_finevideo.py
ADDED
|
@@ -0,0 +1,70 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env python3
|
| 2 |
+
"""
|
| 3 |
+
Step 1b: Select the 1000 LOCAL FineVideo videos (already downloaded,
|
| 4 |
+
already used in finevideo_dpo) for additional AV-MCQA generation.
|
| 5 |
+
|
| 6 |
+
These videos already have long-description DPO pairs, but AV-MCQA is a
|
| 7 |
+
completely different format (short 4-choice questions requiring audio),
|
| 8 |
+
so reusing the same videos is fine.
|
| 9 |
+
|
| 10 |
+
Output: /opt/dlami/nvme/av_mcqa/01b_selected_local_videos.jsonl
|
| 11 |
+
"""
|
| 12 |
+
|
| 13 |
+
import json
|
| 14 |
+
import os
|
| 15 |
+
|
| 16 |
+
CANDIDATES_LOCAL = "/opt/dlami/nvme/finevideo/filtered_candidates.jsonl"
|
| 17 |
+
VIDEO_DIR = "/opt/dlami/nvme/finevideo/videos"
|
| 18 |
+
AUDIO_DIR = "/opt/dlami/nvme/finevideo/audio"
|
| 19 |
+
OUTPUT_FILE = "/opt/dlami/nvme/av_mcqa/01b_selected_local_videos.jsonl"
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
def fname(idx: int) -> str:
|
| 23 |
+
return f"finevideo_{idx:06d}"
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
def main():
|
| 27 |
+
os.makedirs(os.path.dirname(OUTPUT_FILE), exist_ok=True)
|
| 28 |
+
|
| 29 |
+
candidates = []
|
| 30 |
+
with open(CANDIDATES_LOCAL) as f:
|
| 31 |
+
for line in f:
|
| 32 |
+
candidates.append(json.loads(line))
|
| 33 |
+
print(f"Local candidates: {len(candidates)}")
|
| 34 |
+
|
| 35 |
+
ready = []
|
| 36 |
+
for v in candidates:
|
| 37 |
+
fn = fname(v["finevideo_idx"])
|
| 38 |
+
mp4 = os.path.join(VIDEO_DIR, f"{fn}.mp4")
|
| 39 |
+
wav = os.path.join(AUDIO_DIR, f"{fn}.wav")
|
| 40 |
+
if not os.path.exists(mp4) or not os.path.exists(wav):
|
| 41 |
+
continue
|
| 42 |
+
|
| 43 |
+
avcs = [s["audioVisualCorrelation"] for s in v["scenes"]
|
| 44 |
+
if "audioVisualCorrelation" in s
|
| 45 |
+
and 0.0 <= s["audioVisualCorrelation"] <= 1.0]
|
| 46 |
+
mean_avc = sum(avcs) / len(avcs) if avcs else 0.0
|
| 47 |
+
|
| 48 |
+
ready.append({
|
| 49 |
+
"finevideo_idx": v["finevideo_idx"],
|
| 50 |
+
"filename": fn,
|
| 51 |
+
"video_path": mp4,
|
| 52 |
+
"audio_path": wav,
|
| 53 |
+
"duration_seconds": v["duration_seconds"],
|
| 54 |
+
"mean_avc": round(mean_avc, 3),
|
| 55 |
+
"content_parent_category": v.get("content_parent_category", "Unknown"),
|
| 56 |
+
"content_fine_category": v.get("content_fine_category", "Unknown"),
|
| 57 |
+
"youtube_title": v.get("youtube_title", ""),
|
| 58 |
+
"scenes": v["scenes"],
|
| 59 |
+
})
|
| 60 |
+
|
| 61 |
+
print(f"Videos with mp4+wav: {len(ready)}")
|
| 62 |
+
|
| 63 |
+
with open(OUTPUT_FILE, "w") as f:
|
| 64 |
+
for v in ready:
|
| 65 |
+
f.write(json.dumps(v, ensure_ascii=False) + "\n")
|
| 66 |
+
print(f"Wrote {OUTPUT_FILE}")
|
| 67 |
+
|
| 68 |
+
|
| 69 |
+
if __name__ == "__main__":
|
| 70 |
+
main()
|
data_gen/av_mcqa_pipeline/02_gemini_generate_av_mcqa.py
ADDED
|
@@ -0,0 +1,323 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env python3
|
| 2 |
+
"""
|
| 3 |
+
Step 2: For each selected FineVideo video, ask a video-capable model
|
| 4 |
+
(audio + visual) to generate Daily-Omni-style AV-MCQAs.
|
| 5 |
+
|
| 6 |
+
Backends, in priority order:
|
| 7 |
+
1. Gemini direct API (google-genai), with up to 3 keys rotating.
|
| 8 |
+
- GOOGLE_API_KEY_1 / GOOGLE_API_KEY_2 / GOOGLE_API_KEY_3
|
| 9 |
+
- or fallback to GOOGLE_API_KEY (single)
|
| 10 |
+
2. OpenRouter (openai-compatible), used when ALL Gemini keys hit
|
| 11 |
+
quota / rate-limit / 5xx.
|
| 12 |
+
- OPENROUTER_API_KEY
|
| 13 |
+
- default model: google/gemini-2.5-pro (overridable)
|
| 14 |
+
|
| 15 |
+
Output JSONL per video. Resumable.
|
| 16 |
+
|
| 17 |
+
Usage:
|
| 18 |
+
export GOOGLE_API_KEY_1=...
|
| 19 |
+
export GOOGLE_API_KEY_2=...
|
| 20 |
+
export GOOGLE_API_KEY_3=...
|
| 21 |
+
export OPENROUTER_API_KEY=...
|
| 22 |
+
python 02_gemini_generate_av_mcqa.py
|
| 23 |
+
"""
|
| 24 |
+
|
| 25 |
+
import os
|
| 26 |
+
import json
|
| 27 |
+
import time
|
| 28 |
+
import base64
|
| 29 |
+
import threading
|
| 30 |
+
from concurrent.futures import ThreadPoolExecutor, as_completed
|
| 31 |
+
from pathlib import Path
|
| 32 |
+
|
| 33 |
+
INPUT_FILE = os.environ.get("AV_MCQA_INPUT", "/opt/dlami/nvme/av_mcqa/01_selected_videos.jsonl")
|
| 34 |
+
OUTPUT_FILE = os.environ.get("AV_MCQA_OUTPUT", "/opt/dlami/nvme/av_mcqa/02_gemini_generated.jsonl")
|
| 35 |
+
|
| 36 |
+
GEMINI_MODEL = os.environ.get("GEMINI_MODEL", "gemini-3.1-pro-preview")
|
| 37 |
+
OPENROUTER_MODEL = os.environ.get("OPENROUTER_MODEL", "google/gemini-3.1-pro-preview")
|
| 38 |
+
|
| 39 |
+
MAX_WORKERS = 6
|
| 40 |
+
QUESTIONS_PER_VIDEO = 3
|
| 41 |
+
|
| 42 |
+
GEN_PROMPT = """You are designing multiple-choice questions for a video benchmark that tests audio-visual reasoning.
|
| 43 |
+
|
| 44 |
+
Watch and listen to the attached video carefully. Then generate exactly {N} multiple-choice questions of the following types (vary across types if possible):
|
| 45 |
+
|
| 46 |
+
1. AV Event Alignment: "What visual element appeared when [audio event]?" or "Which audio event matches [visual event]?"
|
| 47 |
+
2. Event Sequence: chronological order of audio-visual events.
|
| 48 |
+
3. Context Understanding: what audio + visual elements coexist at a moment.
|
| 49 |
+
4. Inference: based on what you see AND hear, infer purpose, emotion, intent, or relationship.
|
| 50 |
+
5. Reasoning: causal reasoning combining audio and visual evidence.
|
| 51 |
+
6. Comparative: contrast two AV elements or moments.
|
| 52 |
+
|
| 53 |
+
STRICT REQUIREMENTS for each question:
|
| 54 |
+
- Must REQUIRE both audio AND video to answer correctly. A model with only video frames (no audio) MUST NOT be able to answer.
|
| 55 |
+
- 4 plausible options (A/B/C/D). Distractors must be semantically close, not silly.
|
| 56 |
+
- Provide the correct letter and a brief evidence note ("audio: ..., visual: ...").
|
| 57 |
+
- Avoid yes/no questions and trivia answerable from title alone.
|
| 58 |
+
|
| 59 |
+
Return STRICT JSON, no prose:
|
| 60 |
+
{{
|
| 61 |
+
"qa_pairs": [
|
| 62 |
+
{{
|
| 63 |
+
"type": "AV Event Alignment",
|
| 64 |
+
"question": "...",
|
| 65 |
+
"options": {{"A": "...", "B": "...", "C": "...", "D": "..."}},
|
| 66 |
+
"answer": "B",
|
| 67 |
+
"audio_evidence": "around 0:12, the speaker says X",
|
| 68 |
+
"visual_evidence": "at the same moment, Y appears on screen"
|
| 69 |
+
}}
|
| 70 |
+
]
|
| 71 |
+
}}
|
| 72 |
+
"""
|
| 73 |
+
|
| 74 |
+
|
| 75 |
+
# =============================================================================
|
| 76 |
+
# Backend pool: round-robin over Gemini keys, fallback to OpenRouter
|
| 77 |
+
# =============================================================================
|
| 78 |
+
|
| 79 |
+
class GeminiBackend:
|
| 80 |
+
"""One Gemini key wrapper. Marks itself disabled on quota error."""
|
| 81 |
+
|
| 82 |
+
def __init__(self, api_key: str, label: str):
|
| 83 |
+
self.label = label
|
| 84 |
+
self.api_key = api_key
|
| 85 |
+
self.disabled = False
|
| 86 |
+
self.disabled_reason = ""
|
| 87 |
+
self._client = None
|
| 88 |
+
self._lock = threading.Lock()
|
| 89 |
+
|
| 90 |
+
def client(self):
|
| 91 |
+
if self._client is None:
|
| 92 |
+
from google import genai
|
| 93 |
+
self._client = genai.Client(api_key=self.api_key)
|
| 94 |
+
return self._client
|
| 95 |
+
|
| 96 |
+
def call(self, video_path: str, prompt_text: str) -> dict:
|
| 97 |
+
from google.genai import types
|
| 98 |
+
c = self.client()
|
| 99 |
+
uploaded = c.files.upload(file=video_path)
|
| 100 |
+
for _ in range(60):
|
| 101 |
+
f = c.files.get(name=uploaded.name)
|
| 102 |
+
if f.state.name == "ACTIVE":
|
| 103 |
+
uploaded = f
|
| 104 |
+
break
|
| 105 |
+
time.sleep(1)
|
| 106 |
+
|
| 107 |
+
try:
|
| 108 |
+
resp = c.models.generate_content(
|
| 109 |
+
model=GEMINI_MODEL,
|
| 110 |
+
contents=[uploaded, prompt_text],
|
| 111 |
+
config=types.GenerateContentConfig(
|
| 112 |
+
temperature=0.4,
|
| 113 |
+
response_mime_type="application/json",
|
| 114 |
+
),
|
| 115 |
+
)
|
| 116 |
+
return json.loads(resp.text)
|
| 117 |
+
finally:
|
| 118 |
+
try:
|
| 119 |
+
c.files.delete(name=uploaded.name)
|
| 120 |
+
except Exception:
|
| 121 |
+
pass
|
| 122 |
+
|
| 123 |
+
|
| 124 |
+
class OpenRouterBackend:
|
| 125 |
+
"""OpenRouter Gemini fallback. Sends video as inline file (URL or base64)."""
|
| 126 |
+
|
| 127 |
+
def __init__(self, api_key: str):
|
| 128 |
+
self.api_key = api_key
|
| 129 |
+
self._client = None
|
| 130 |
+
|
| 131 |
+
def client(self):
|
| 132 |
+
if self._client is None:
|
| 133 |
+
from openai import OpenAI
|
| 134 |
+
self._client = OpenAI(
|
| 135 |
+
base_url="https://openrouter.ai/api/v1",
|
| 136 |
+
api_key=self.api_key,
|
| 137 |
+
default_headers={
|
| 138 |
+
"HTTP-Referer": "https://localhost",
|
| 139 |
+
"X-Title": "av-mcqa-pipeline",
|
| 140 |
+
},
|
| 141 |
+
)
|
| 142 |
+
return self._client
|
| 143 |
+
|
| 144 |
+
def call(self, video_path: str, prompt_text: str) -> dict:
|
| 145 |
+
c = self.client()
|
| 146 |
+
with open(video_path, "rb") as f:
|
| 147 |
+
data = f.read()
|
| 148 |
+
b64 = base64.b64encode(data).decode("utf-8")
|
| 149 |
+
|
| 150 |
+
# OpenRouter Gemini supports file_data via inputs
|
| 151 |
+
# Format: input as "file" type with data URL
|
| 152 |
+
resp = c.chat.completions.create(
|
| 153 |
+
model=OPENROUTER_MODEL,
|
| 154 |
+
messages=[{
|
| 155 |
+
"role": "user",
|
| 156 |
+
"content": [
|
| 157 |
+
{"type": "file",
|
| 158 |
+
"file": {"file_data": f"data:video/mp4;base64,{b64}",
|
| 159 |
+
"filename": os.path.basename(video_path)}},
|
| 160 |
+
{"type": "text", "text": prompt_text},
|
| 161 |
+
],
|
| 162 |
+
}],
|
| 163 |
+
response_format={"type": "json_object"},
|
| 164 |
+
temperature=0.4,
|
| 165 |
+
)
|
| 166 |
+
text = resp.choices[0].message.content or "{}"
|
| 167 |
+
return json.loads(text)
|
| 168 |
+
|
| 169 |
+
|
| 170 |
+
class BackendPool:
|
| 171 |
+
"""Try Gemini keys round-robin; fall back to OpenRouter when all dead."""
|
| 172 |
+
|
| 173 |
+
QUOTA_KEYWORDS = ("quota", "rate", "exhaust", "429", "RESOURCE_EXHAUSTED",
|
| 174 |
+
"exceeded", "limit")
|
| 175 |
+
|
| 176 |
+
def __init__(self):
|
| 177 |
+
self.gemini_backends: list[GeminiBackend] = []
|
| 178 |
+
for i in (1, 2, 3):
|
| 179 |
+
k = os.environ.get(f"GOOGLE_API_KEY_{i}")
|
| 180 |
+
if k:
|
| 181 |
+
self.gemini_backends.append(GeminiBackend(k, f"gemini_{i}"))
|
| 182 |
+
if not self.gemini_backends:
|
| 183 |
+
k = os.environ.get("GOOGLE_API_KEY")
|
| 184 |
+
if k:
|
| 185 |
+
self.gemini_backends.append(GeminiBackend(k, "gemini_default"))
|
| 186 |
+
|
| 187 |
+
or_key = os.environ.get("OPENROUTER_API_KEY")
|
| 188 |
+
self.openrouter = OpenRouterBackend(or_key) if or_key else None
|
| 189 |
+
|
| 190 |
+
if not self.gemini_backends and not self.openrouter:
|
| 191 |
+
raise RuntimeError(
|
| 192 |
+
"No API keys found. Set GOOGLE_API_KEY_1/2/3 or GOOGLE_API_KEY, "
|
| 193 |
+
"and/or OPENROUTER_API_KEY."
|
| 194 |
+
)
|
| 195 |
+
|
| 196 |
+
self._next = 0
|
| 197 |
+
self._lock = threading.Lock()
|
| 198 |
+
print(f"Backends initialized: gemini={len(self.gemini_backends)}, "
|
| 199 |
+
f"openrouter={'yes' if self.openrouter else 'no'}")
|
| 200 |
+
|
| 201 |
+
def _is_quota_error(self, err: Exception) -> bool:
|
| 202 |
+
s = str(err).lower()
|
| 203 |
+
return any(k.lower() in s for k in self.QUOTA_KEYWORDS)
|
| 204 |
+
|
| 205 |
+
def _next_alive_gemini(self) -> GeminiBackend | None:
|
| 206 |
+
with self._lock:
|
| 207 |
+
n = len(self.gemini_backends)
|
| 208 |
+
for _ in range(n):
|
| 209 |
+
b = self.gemini_backends[self._next % n]
|
| 210 |
+
self._next += 1
|
| 211 |
+
if not b.disabled:
|
| 212 |
+
return b
|
| 213 |
+
return None
|
| 214 |
+
|
| 215 |
+
def call(self, video_path: str, prompt_text: str) -> dict:
|
| 216 |
+
# Try alive Gemini keys
|
| 217 |
+
last_err = None
|
| 218 |
+
for _ in range(len(self.gemini_backends) + 1):
|
| 219 |
+
b = self._next_alive_gemini()
|
| 220 |
+
if b is None:
|
| 221 |
+
break
|
| 222 |
+
try:
|
| 223 |
+
return b.call(video_path, prompt_text)
|
| 224 |
+
except Exception as e:
|
| 225 |
+
last_err = e
|
| 226 |
+
if self._is_quota_error(e):
|
| 227 |
+
b.disabled = True
|
| 228 |
+
b.disabled_reason = str(e)[:200]
|
| 229 |
+
print(f" [{b.label}] disabled (quota): {b.disabled_reason}")
|
| 230 |
+
continue
|
| 231 |
+
else:
|
| 232 |
+
# Non-quota error: try next backend, but don't disable this key
|
| 233 |
+
print(f" [{b.label}] transient error: {e}")
|
| 234 |
+
continue
|
| 235 |
+
|
| 236 |
+
# All Gemini exhausted -> OpenRouter
|
| 237 |
+
if self.openrouter:
|
| 238 |
+
for attempt in range(3):
|
| 239 |
+
try:
|
| 240 |
+
return self.openrouter.call(video_path, prompt_text)
|
| 241 |
+
except Exception as e:
|
| 242 |
+
last_err = e
|
| 243 |
+
if attempt < 2:
|
| 244 |
+
time.sleep(2 ** (attempt + 1))
|
| 245 |
+
|
| 246 |
+
raise RuntimeError(f"All backends failed; last error: {last_err}")
|
| 247 |
+
|
| 248 |
+
|
| 249 |
+
# =============================================================================
|
| 250 |
+
# Driver
|
| 251 |
+
# =============================================================================
|
| 252 |
+
|
| 253 |
+
def load_done() -> set:
|
| 254 |
+
done = set()
|
| 255 |
+
if os.path.exists(OUTPUT_FILE):
|
| 256 |
+
with open(OUTPUT_FILE) as f:
|
| 257 |
+
for line in f:
|
| 258 |
+
try:
|
| 259 |
+
done.add(json.loads(line)["finevideo_idx"])
|
| 260 |
+
except Exception:
|
| 261 |
+
pass
|
| 262 |
+
return done
|
| 263 |
+
|
| 264 |
+
|
| 265 |
+
def process_one(pool: BackendPool, video_meta: dict) -> dict:
|
| 266 |
+
idx = video_meta["finevideo_idx"]
|
| 267 |
+
prompt = GEN_PROMPT.format(N=QUESTIONS_PER_VIDEO)
|
| 268 |
+
try:
|
| 269 |
+
data = pool.call(video_meta["video_path"], prompt)
|
| 270 |
+
qa_pairs = data.get("qa_pairs", [])
|
| 271 |
+
return {
|
| 272 |
+
"finevideo_idx": idx,
|
| 273 |
+
"filename": video_meta["filename"],
|
| 274 |
+
"video_path": video_meta["video_path"],
|
| 275 |
+
"audio_path": video_meta["audio_path"],
|
| 276 |
+
"duration_seconds": video_meta["duration_seconds"],
|
| 277 |
+
"mean_avc": video_meta["mean_avc"],
|
| 278 |
+
"content_parent_category": video_meta["content_parent_category"],
|
| 279 |
+
"content_fine_category": video_meta["content_fine_category"],
|
| 280 |
+
"qa_pairs": qa_pairs,
|
| 281 |
+
}
|
| 282 |
+
except Exception as e:
|
| 283 |
+
return {"finevideo_idx": idx, "error": str(e)}
|
| 284 |
+
|
| 285 |
+
|
| 286 |
+
def main():
|
| 287 |
+
os.makedirs(os.path.dirname(OUTPUT_FILE), exist_ok=True)
|
| 288 |
+
pool = BackendPool()
|
| 289 |
+
|
| 290 |
+
videos = []
|
| 291 |
+
with open(INPUT_FILE) as f:
|
| 292 |
+
for line in f:
|
| 293 |
+
videos.append(json.loads(line))
|
| 294 |
+
print(f"Input videos: {len(videos)}")
|
| 295 |
+
|
| 296 |
+
done = load_done()
|
| 297 |
+
todo = [v for v in videos if v["finevideo_idx"] not in done]
|
| 298 |
+
print(f"Resuming: {len(done)} done, {len(todo)} todo")
|
| 299 |
+
if not todo:
|
| 300 |
+
return
|
| 301 |
+
|
| 302 |
+
success = errors = 0
|
| 303 |
+
with open(OUTPUT_FILE, "a") as out_f:
|
| 304 |
+
with ThreadPoolExecutor(max_workers=MAX_WORKERS) as ex:
|
| 305 |
+
futs = {ex.submit(process_one, pool, v): v for v in todo}
|
| 306 |
+
for i, fut in enumerate(as_completed(futs), 1):
|
| 307 |
+
rec = fut.result()
|
| 308 |
+
if "error" in rec:
|
| 309 |
+
errors += 1
|
| 310 |
+
print(f" [{i}/{len(todo)}] ERROR idx={rec['finevideo_idx']}: {rec['error'][:200]}")
|
| 311 |
+
else:
|
| 312 |
+
success += 1
|
| 313 |
+
out_f.write(json.dumps(rec, ensure_ascii=False) + "\n")
|
| 314 |
+
out_f.flush()
|
| 315 |
+
if i % 20 == 0:
|
| 316 |
+
print(f" [{i}/{len(todo)}] success={success} errors={errors}")
|
| 317 |
+
|
| 318 |
+
print(f"\nDone. success={success} errors={errors}")
|
| 319 |
+
print(f"Output: {OUTPUT_FILE}")
|
| 320 |
+
|
| 321 |
+
|
| 322 |
+
if __name__ == "__main__":
|
| 323 |
+
main()
|
data_gen/av_mcqa_pipeline/02b_openrouter_generate_local.py
ADDED
|
@@ -0,0 +1,170 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env python3
|
| 2 |
+
"""
|
| 3 |
+
Step 2b: Generate AV-MCQA for local 1000 FineVideo videos using
|
| 4 |
+
OpenRouter only (Gemini quota exhausted).
|
| 5 |
+
|
| 6 |
+
Reads: /opt/dlami/nvme/av_mcqa/01b_selected_local_videos.jsonl
|
| 7 |
+
Writes: /opt/dlami/nvme/av_mcqa/02b_gemini_generated_local.jsonl
|
| 8 |
+
|
| 9 |
+
Usage:
|
| 10 |
+
export OPENROUTER_API_KEY=...
|
| 11 |
+
python 02b_openrouter_generate_local.py
|
| 12 |
+
"""
|
| 13 |
+
|
| 14 |
+
import os
|
| 15 |
+
import json
|
| 16 |
+
import time
|
| 17 |
+
import base64
|
| 18 |
+
from concurrent.futures import ThreadPoolExecutor, as_completed
|
| 19 |
+
|
| 20 |
+
OPENROUTER_MODEL = os.environ.get("OPENROUTER_MODEL", "google/gemini-3.1-pro-preview")
|
| 21 |
+
|
| 22 |
+
INPUT_FILE = "/opt/dlami/nvme/av_mcqa/01b_selected_local_videos.jsonl"
|
| 23 |
+
OUTPUT_FILE = "/opt/dlami/nvme/av_mcqa/02b_gemini_generated_local.jsonl"
|
| 24 |
+
|
| 25 |
+
MAX_WORKERS = 6
|
| 26 |
+
QUESTIONS_PER_VIDEO = 3
|
| 27 |
+
|
| 28 |
+
GEN_PROMPT = """You are designing multiple-choice questions for a video benchmark that tests audio-visual reasoning.
|
| 29 |
+
|
| 30 |
+
Watch and listen to the attached video carefully. Then generate exactly {N} multiple-choice questions of the following types (vary across types if possible):
|
| 31 |
+
|
| 32 |
+
1. AV Event Alignment: "What visual element appeared when [audio event]?" or "Which audio event matches [visual event]?"
|
| 33 |
+
2. Event Sequence: chronological order of audio-visual events.
|
| 34 |
+
3. Context Understanding: what audio + visual elements coexist at a moment.
|
| 35 |
+
4. Inference: based on what you see AND hear, infer purpose, emotion, intent, or relationship.
|
| 36 |
+
5. Reasoning: causal reasoning combining audio and visual evidence.
|
| 37 |
+
6. Comparative: contrast two AV elements or moments.
|
| 38 |
+
|
| 39 |
+
STRICT REQUIREMENTS for each question:
|
| 40 |
+
- Must REQUIRE both audio AND video to answer correctly. A model with only video frames (no audio) MUST NOT be able to answer.
|
| 41 |
+
- 4 plausible options (A/B/C/D). Distractors must be semantically close, not silly.
|
| 42 |
+
- Provide the correct letter and a brief evidence note ("audio: ..., visual: ...").
|
| 43 |
+
- Avoid yes/no questions and trivia answerable from title alone.
|
| 44 |
+
|
| 45 |
+
Return STRICT JSON, no prose:
|
| 46 |
+
{{
|
| 47 |
+
"qa_pairs": [
|
| 48 |
+
{{
|
| 49 |
+
"type": "AV Event Alignment",
|
| 50 |
+
"question": "...",
|
| 51 |
+
"options": {{"A": "...", "B": "...", "C": "...", "D": "..."}},
|
| 52 |
+
"answer": "B",
|
| 53 |
+
"audio_evidence": "around 0:12, the speaker says X",
|
| 54 |
+
"visual_evidence": "at the same moment, Y appears on screen"
|
| 55 |
+
}}
|
| 56 |
+
]
|
| 57 |
+
}}
|
| 58 |
+
"""
|
| 59 |
+
|
| 60 |
+
|
| 61 |
+
def load_done() -> set:
|
| 62 |
+
done = set()
|
| 63 |
+
if os.path.exists(OUTPUT_FILE):
|
| 64 |
+
with open(OUTPUT_FILE) as f:
|
| 65 |
+
for line in f:
|
| 66 |
+
try:
|
| 67 |
+
done.add(json.loads(line)["finevideo_idx"])
|
| 68 |
+
except Exception:
|
| 69 |
+
pass
|
| 70 |
+
return done
|
| 71 |
+
|
| 72 |
+
|
| 73 |
+
def get_client():
|
| 74 |
+
from openai import OpenAI
|
| 75 |
+
return OpenAI(
|
| 76 |
+
base_url="https://openrouter.ai/api/v1",
|
| 77 |
+
api_key=os.environ["OPENROUTER_API_KEY"],
|
| 78 |
+
default_headers={
|
| 79 |
+
"HTTP-Referer": "https://localhost",
|
| 80 |
+
"X-Title": "av-mcqa-pipeline",
|
| 81 |
+
},
|
| 82 |
+
)
|
| 83 |
+
|
| 84 |
+
|
| 85 |
+
def call_openrouter(client, video_path: str, prompt_text: str) -> dict:
|
| 86 |
+
with open(video_path, "rb") as f:
|
| 87 |
+
b64 = base64.b64encode(f.read()).decode("utf-8")
|
| 88 |
+
|
| 89 |
+
resp = client.chat.completions.create(
|
| 90 |
+
model=OPENROUTER_MODEL,
|
| 91 |
+
messages=[{
|
| 92 |
+
"role": "user",
|
| 93 |
+
"content": [
|
| 94 |
+
{"type": "file",
|
| 95 |
+
"file": {"file_data": f"data:video/mp4;base64,{b64}",
|
| 96 |
+
"filename": os.path.basename(video_path)}},
|
| 97 |
+
{"type": "text", "text": prompt_text},
|
| 98 |
+
],
|
| 99 |
+
}],
|
| 100 |
+
response_format={"type": "json_object"},
|
| 101 |
+
temperature=0.4,
|
| 102 |
+
)
|
| 103 |
+
text = resp.choices[0].message.content or "{}"
|
| 104 |
+
return json.loads(text)
|
| 105 |
+
|
| 106 |
+
|
| 107 |
+
def process_one(client, video_meta: dict) -> dict:
|
| 108 |
+
idx = video_meta["finevideo_idx"]
|
| 109 |
+
prompt = GEN_PROMPT.format(N=QUESTIONS_PER_VIDEO)
|
| 110 |
+
for attempt in range(3):
|
| 111 |
+
try:
|
| 112 |
+
data = call_openrouter(client, video_meta["video_path"], prompt)
|
| 113 |
+
qa_pairs = data.get("qa_pairs", [])
|
| 114 |
+
return {
|
| 115 |
+
"finevideo_idx": idx,
|
| 116 |
+
"filename": video_meta["filename"],
|
| 117 |
+
"video_path": video_meta["video_path"],
|
| 118 |
+
"audio_path": video_meta["audio_path"],
|
| 119 |
+
"duration_seconds": video_meta["duration_seconds"],
|
| 120 |
+
"mean_avc": video_meta["mean_avc"],
|
| 121 |
+
"content_parent_category": video_meta["content_parent_category"],
|
| 122 |
+
"content_fine_category": video_meta["content_fine_category"],
|
| 123 |
+
"qa_pairs": qa_pairs,
|
| 124 |
+
}
|
| 125 |
+
except Exception as e:
|
| 126 |
+
if attempt < 2:
|
| 127 |
+
time.sleep(2 ** (attempt + 1))
|
| 128 |
+
else:
|
| 129 |
+
return {"finevideo_idx": idx, "error": str(e)}
|
| 130 |
+
|
| 131 |
+
|
| 132 |
+
def main():
|
| 133 |
+
os.makedirs(os.path.dirname(OUTPUT_FILE), exist_ok=True)
|
| 134 |
+
client = get_client()
|
| 135 |
+
|
| 136 |
+
videos = []
|
| 137 |
+
with open(INPUT_FILE) as f:
|
| 138 |
+
for line in f:
|
| 139 |
+
videos.append(json.loads(line))
|
| 140 |
+
print(f"Input videos: {len(videos)}")
|
| 141 |
+
|
| 142 |
+
done = load_done()
|
| 143 |
+
todo = [v for v in videos if v["finevideo_idx"] not in done]
|
| 144 |
+
print(f"Resuming: {len(done)} done, {len(todo)} todo")
|
| 145 |
+
if not todo:
|
| 146 |
+
print("All done.")
|
| 147 |
+
return
|
| 148 |
+
|
| 149 |
+
success = errors = 0
|
| 150 |
+
with open(OUTPUT_FILE, "a") as out_f:
|
| 151 |
+
with ThreadPoolExecutor(max_workers=MAX_WORKERS) as ex:
|
| 152 |
+
futs = {ex.submit(process_one, client, v): v for v in todo}
|
| 153 |
+
for i, fut in enumerate(as_completed(futs), 1):
|
| 154 |
+
rec = fut.result()
|
| 155 |
+
if "error" in rec:
|
| 156 |
+
errors += 1
|
| 157 |
+
print(f" [{i}/{len(todo)}] ERROR idx={rec['finevideo_idx']}: {rec['error'][:200]}")
|
| 158 |
+
else:
|
| 159 |
+
success += 1
|
| 160 |
+
out_f.write(json.dumps(rec, ensure_ascii=False) + "\n")
|
| 161 |
+
out_f.flush()
|
| 162 |
+
if i % 20 == 0:
|
| 163 |
+
print(f" [{i}/{len(todo)}] success={success} errors={errors}")
|
| 164 |
+
|
| 165 |
+
print(f"\nDone. success={success} errors={errors}")
|
| 166 |
+
print(f"Output: {OUTPUT_FILE}")
|
| 167 |
+
|
| 168 |
+
|
| 169 |
+
if __name__ == "__main__":
|
| 170 |
+
main()
|
data_gen/av_mcqa_pipeline/03_gpt5_video_only_filter.py
ADDED
|
@@ -0,0 +1,170 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env python3
|
| 2 |
+
"""
|
| 3 |
+
Step 3: For each Gemini-generated MCQA, give GPT-5.4 the same question
|
| 4 |
+
plus VIDEO FRAMES ONLY (no audio). If GPT picks the correct answer with
|
| 5 |
+
high confidence, the question is answerable without audio -> drop it.
|
| 6 |
+
|
| 7 |
+
Surviving questions are guaranteed to require audio + visual jointly.
|
| 8 |
+
|
| 9 |
+
Usage:
|
| 10 |
+
export OPENAI_API_KEY="sk-..."
|
| 11 |
+
python 03_gpt5_video_only_filter.py
|
| 12 |
+
"""
|
| 13 |
+
|
| 14 |
+
import json
|
| 15 |
+
import os
|
| 16 |
+
import re
|
| 17 |
+
import time
|
| 18 |
+
import base64
|
| 19 |
+
import io
|
| 20 |
+
from concurrent.futures import ThreadPoolExecutor, as_completed
|
| 21 |
+
from pathlib import Path
|
| 22 |
+
|
| 23 |
+
import av
|
| 24 |
+
import numpy as np
|
| 25 |
+
from openai import OpenAI
|
| 26 |
+
|
| 27 |
+
INPUT_FILE = "/opt/dlami/nvme/av_mcqa/02_gemini_generated.jsonl"
|
| 28 |
+
OUTPUT_FILE = "/opt/dlami/nvme/av_mcqa/03_filtered_must_audio.jsonl"
|
| 29 |
+
|
| 30 |
+
MODEL = "gpt-5.4-2026-03-05"
|
| 31 |
+
MAX_WORKERS = 8
|
| 32 |
+
NUM_FRAMES = 12
|
| 33 |
+
ANSWER_RE = re.compile(r"\b([A-D])\b")
|
| 34 |
+
|
| 35 |
+
|
| 36 |
+
client = OpenAI()
|
| 37 |
+
|
| 38 |
+
|
| 39 |
+
def extract_frames(video_path: str, num_frames: int) -> list[bytes]:
|
| 40 |
+
container = av.open(video_path)
|
| 41 |
+
stream = next(s for s in container.streams if s.type == "video")
|
| 42 |
+
total = stream.frames or 300
|
| 43 |
+
indices = set(np.linspace(0, total - 1, num_frames).astype(int).tolist())
|
| 44 |
+
frames = []
|
| 45 |
+
for i, frame in enumerate(container.decode(stream)):
|
| 46 |
+
if i in indices:
|
| 47 |
+
buf = io.BytesIO()
|
| 48 |
+
frame.to_image().save(buf, format="JPEG", quality=80)
|
| 49 |
+
frames.append(buf.getvalue())
|
| 50 |
+
if len(frames) >= num_frames:
|
| 51 |
+
break
|
| 52 |
+
container.close()
|
| 53 |
+
return frames
|
| 54 |
+
|
| 55 |
+
|
| 56 |
+
def gpt_video_only_answer(video_path: str, question: str, options: dict) -> str:
|
| 57 |
+
"""Send frames + question, get a single-letter answer."""
|
| 58 |
+
frames = extract_frames(video_path, NUM_FRAMES)
|
| 59 |
+
image_msgs = []
|
| 60 |
+
for f in frames:
|
| 61 |
+
b64 = base64.b64encode(f).decode("utf-8")
|
| 62 |
+
image_msgs.append({
|
| 63 |
+
"type": "image_url",
|
| 64 |
+
"image_url": {"url": f"data:image/jpeg;base64,{b64}", "detail": "low"},
|
| 65 |
+
})
|
| 66 |
+
|
| 67 |
+
options_text = "\n".join(f"{k}. {v}" for k, v in options.items())
|
| 68 |
+
user_text = (
|
| 69 |
+
f"You are watching a silent video (no audio). "
|
| 70 |
+
f"Below are evenly-spaced frames. Answer the multiple-choice question by responding with only the letter A, B, C, or D.\n\n"
|
| 71 |
+
f"Question: {question}\n\n"
|
| 72 |
+
f"Options:\n{options_text}\n\n"
|
| 73 |
+
f"Answer with a single letter:"
|
| 74 |
+
)
|
| 75 |
+
|
| 76 |
+
resp = client.chat.completions.create(
|
| 77 |
+
model=MODEL,
|
| 78 |
+
messages=[
|
| 79 |
+
{"role": "system", "content": "You answer multiple-choice questions by responding with only one letter."},
|
| 80 |
+
{"role": "user", "content": [{"type": "text", "text": user_text}, *image_msgs]},
|
| 81 |
+
],
|
| 82 |
+
max_completion_tokens=4,
|
| 83 |
+
temperature=0.0,
|
| 84 |
+
)
|
| 85 |
+
raw = (resp.choices[0].message.content or "").strip().upper()
|
| 86 |
+
m = ANSWER_RE.search(raw)
|
| 87 |
+
return m.group(1) if m else ""
|
| 88 |
+
|
| 89 |
+
|
| 90 |
+
def process_video_record(rec: dict) -> dict:
|
| 91 |
+
"""Filter qa_pairs of one video; keep only those GPT (video-only) gets WRONG."""
|
| 92 |
+
survived = []
|
| 93 |
+
for qa in rec.get("qa_pairs", []):
|
| 94 |
+
question = qa.get("question", "")
|
| 95 |
+
options = qa.get("options", {})
|
| 96 |
+
correct = qa.get("answer", "")
|
| 97 |
+
if not (question and options and correct):
|
| 98 |
+
continue
|
| 99 |
+
|
| 100 |
+
try:
|
| 101 |
+
for attempt in range(3):
|
| 102 |
+
try:
|
| 103 |
+
pred = gpt_video_only_answer(rec["video_path"], question, options)
|
| 104 |
+
break
|
| 105 |
+
except Exception as e:
|
| 106 |
+
if attempt < 2:
|
| 107 |
+
time.sleep(2 ** (attempt + 1))
|
| 108 |
+
else:
|
| 109 |
+
raise
|
| 110 |
+
except Exception as e:
|
| 111 |
+
qa["_filter_error"] = str(e)
|
| 112 |
+
continue
|
| 113 |
+
|
| 114 |
+
qa["_video_only_pred"] = pred
|
| 115 |
+
# KEEP only if GPT (video-only) is wrong -> question genuinely needs audio
|
| 116 |
+
if pred != correct:
|
| 117 |
+
survived.append(qa)
|
| 118 |
+
|
| 119 |
+
rec["qa_pairs"] = survived
|
| 120 |
+
return rec
|
| 121 |
+
|
| 122 |
+
|
| 123 |
+
def main():
|
| 124 |
+
os.makedirs(os.path.dirname(OUTPUT_FILE), exist_ok=True)
|
| 125 |
+
|
| 126 |
+
records = []
|
| 127 |
+
with open(INPUT_FILE) as f:
|
| 128 |
+
for line in f:
|
| 129 |
+
records.append(json.loads(line))
|
| 130 |
+
print(f"Input video records: {len(records)}")
|
| 131 |
+
|
| 132 |
+
# Resume
|
| 133 |
+
done_idx = set()
|
| 134 |
+
if os.path.exists(OUTPUT_FILE):
|
| 135 |
+
with open(OUTPUT_FILE) as f:
|
| 136 |
+
for line in f:
|
| 137 |
+
try:
|
| 138 |
+
done_idx.add(json.loads(line)["finevideo_idx"])
|
| 139 |
+
except Exception:
|
| 140 |
+
pass
|
| 141 |
+
todo = [r for r in records if r["finevideo_idx"] not in done_idx]
|
| 142 |
+
print(f"Resuming: {len(done_idx)} done, {len(todo)} todo")
|
| 143 |
+
if not todo:
|
| 144 |
+
print("All done.")
|
| 145 |
+
return
|
| 146 |
+
|
| 147 |
+
total_in = 0
|
| 148 |
+
total_out = 0
|
| 149 |
+
with open(OUTPUT_FILE, "a") as out_f:
|
| 150 |
+
with ThreadPoolExecutor(max_workers=MAX_WORKERS) as ex:
|
| 151 |
+
futures = {ex.submit(process_video_record, r): r for r in todo}
|
| 152 |
+
for i, fut in enumerate(as_completed(futures), 1):
|
| 153 |
+
rec = fut.result()
|
| 154 |
+
in_n = sum(1 for q in rec.get("qa_pairs", []) if "question" in q) + sum(
|
| 155 |
+
1 for q in rec.get("qa_pairs", []) if q.get("_filter_error")
|
| 156 |
+
) # not exact but indicative
|
| 157 |
+
out_n = len(rec["qa_pairs"])
|
| 158 |
+
total_in += len(rec.get("qa_pairs", [])) if False else 0 # not used
|
| 159 |
+
total_out += out_n
|
| 160 |
+
out_f.write(json.dumps(rec, ensure_ascii=False) + "\n")
|
| 161 |
+
out_f.flush()
|
| 162 |
+
if i % 20 == 0:
|
| 163 |
+
print(f" [{i}/{len(todo)}] surviving qa total so far: {total_out}")
|
| 164 |
+
|
| 165 |
+
print(f"\nDone. Surviving qa pairs: {total_out}")
|
| 166 |
+
print(f"Output: {OUTPUT_FILE}")
|
| 167 |
+
|
| 168 |
+
|
| 169 |
+
if __name__ == "__main__":
|
| 170 |
+
main()
|
data_gen/av_mcqa_pipeline/03b_gpt5_filter_local.py
ADDED
|
@@ -0,0 +1,150 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env python3
|
| 2 |
+
"""
|
| 3 |
+
Step 3b: GPT-5.4 video-only filter for the local 1000 FineVideo batch.
|
| 4 |
+
Same logic as 03_gpt5_video_only_filter.py but reads/writes different files.
|
| 5 |
+
|
| 6 |
+
Reads: /opt/dlami/nvme/av_mcqa/02b_gemini_generated_local.jsonl
|
| 7 |
+
Writes: /opt/dlami/nvme/av_mcqa/03b_filtered_must_audio_local.jsonl
|
| 8 |
+
"""
|
| 9 |
+
|
| 10 |
+
import json
|
| 11 |
+
import os
|
| 12 |
+
import re
|
| 13 |
+
import time
|
| 14 |
+
import base64
|
| 15 |
+
import io
|
| 16 |
+
from concurrent.futures import ThreadPoolExecutor, as_completed
|
| 17 |
+
|
| 18 |
+
import av
|
| 19 |
+
import numpy as np
|
| 20 |
+
from openai import OpenAI
|
| 21 |
+
|
| 22 |
+
INPUT_FILE = "/opt/dlami/nvme/av_mcqa/02b_gemini_generated_local.jsonl"
|
| 23 |
+
OUTPUT_FILE = "/opt/dlami/nvme/av_mcqa/03b_filtered_must_audio_local.jsonl"
|
| 24 |
+
|
| 25 |
+
MODEL = "gpt-5.4-2026-03-05"
|
| 26 |
+
MAX_WORKERS = 8
|
| 27 |
+
NUM_FRAMES = 12
|
| 28 |
+
ANSWER_RE = re.compile(r"\b([A-D])\b")
|
| 29 |
+
|
| 30 |
+
client = OpenAI()
|
| 31 |
+
|
| 32 |
+
|
| 33 |
+
def extract_frames(video_path: str, num_frames: int) -> list[bytes]:
|
| 34 |
+
container = av.open(video_path)
|
| 35 |
+
stream = next(s for s in container.streams if s.type == "video")
|
| 36 |
+
total = stream.frames or 300
|
| 37 |
+
indices = set(np.linspace(0, total - 1, num_frames).astype(int).tolist())
|
| 38 |
+
frames = []
|
| 39 |
+
for i, frame in enumerate(container.decode(stream)):
|
| 40 |
+
if i in indices:
|
| 41 |
+
buf = io.BytesIO()
|
| 42 |
+
frame.to_image().save(buf, format="JPEG", quality=80)
|
| 43 |
+
frames.append(buf.getvalue())
|
| 44 |
+
if len(frames) >= num_frames:
|
| 45 |
+
break
|
| 46 |
+
container.close()
|
| 47 |
+
return frames
|
| 48 |
+
|
| 49 |
+
|
| 50 |
+
def gpt_video_only_answer(video_path: str, question: str, options: dict) -> str:
|
| 51 |
+
frames = extract_frames(video_path, NUM_FRAMES)
|
| 52 |
+
image_msgs = []
|
| 53 |
+
for f in frames:
|
| 54 |
+
b64 = base64.b64encode(f).decode("utf-8")
|
| 55 |
+
image_msgs.append({
|
| 56 |
+
"type": "image_url",
|
| 57 |
+
"image_url": {"url": f"data:image/jpeg;base64,{b64}", "detail": "low"},
|
| 58 |
+
})
|
| 59 |
+
|
| 60 |
+
options_text = "\n".join(f"{k}. {v}" for k, v in options.items())
|
| 61 |
+
user_text = (
|
| 62 |
+
f"You are watching a silent video (no audio). "
|
| 63 |
+
f"Below are evenly-spaced frames. Answer the multiple-choice question by responding with only the letter A, B, C, or D.\n\n"
|
| 64 |
+
f"Question: {question}\n\n"
|
| 65 |
+
f"Options:\n{options_text}\n\n"
|
| 66 |
+
f"Answer with a single letter:"
|
| 67 |
+
)
|
| 68 |
+
|
| 69 |
+
resp = client.chat.completions.create(
|
| 70 |
+
model=MODEL,
|
| 71 |
+
messages=[
|
| 72 |
+
{"role": "system", "content": "You answer multiple-choice questions by responding with only one letter."},
|
| 73 |
+
{"role": "user", "content": [{"type": "text", "text": user_text}, *image_msgs]},
|
| 74 |
+
],
|
| 75 |
+
max_completion_tokens=4,
|
| 76 |
+
temperature=0.0,
|
| 77 |
+
)
|
| 78 |
+
raw = (resp.choices[0].message.content or "").strip().upper()
|
| 79 |
+
m = ANSWER_RE.search(raw)
|
| 80 |
+
return m.group(1) if m else ""
|
| 81 |
+
|
| 82 |
+
|
| 83 |
+
def process_video_record(rec: dict) -> dict:
|
| 84 |
+
survived = []
|
| 85 |
+
for qa in rec.get("qa_pairs", []):
|
| 86 |
+
question = qa.get("question", "")
|
| 87 |
+
options = qa.get("options", {})
|
| 88 |
+
correct = qa.get("answer", "")
|
| 89 |
+
if not (question and options and correct):
|
| 90 |
+
continue
|
| 91 |
+
try:
|
| 92 |
+
for attempt in range(3):
|
| 93 |
+
try:
|
| 94 |
+
pred = gpt_video_only_answer(rec["video_path"], question, options)
|
| 95 |
+
break
|
| 96 |
+
except Exception as e:
|
| 97 |
+
if attempt < 2:
|
| 98 |
+
time.sleep(2 ** (attempt + 1))
|
| 99 |
+
else:
|
| 100 |
+
raise
|
| 101 |
+
except Exception as e:
|
| 102 |
+
continue
|
| 103 |
+
qa["_video_only_pred"] = pred
|
| 104 |
+
if pred != correct:
|
| 105 |
+
survived.append(qa)
|
| 106 |
+
rec["qa_pairs"] = survived
|
| 107 |
+
return rec
|
| 108 |
+
|
| 109 |
+
|
| 110 |
+
def main():
|
| 111 |
+
os.makedirs(os.path.dirname(OUTPUT_FILE), exist_ok=True)
|
| 112 |
+
|
| 113 |
+
records = []
|
| 114 |
+
with open(INPUT_FILE) as f:
|
| 115 |
+
for line in f:
|
| 116 |
+
records.append(json.loads(line))
|
| 117 |
+
print(f"Input video records: {len(records)}")
|
| 118 |
+
|
| 119 |
+
done_idx = set()
|
| 120 |
+
if os.path.exists(OUTPUT_FILE):
|
| 121 |
+
with open(OUTPUT_FILE) as f:
|
| 122 |
+
for line in f:
|
| 123 |
+
try:
|
| 124 |
+
done_idx.add(json.loads(line)["finevideo_idx"])
|
| 125 |
+
except Exception:
|
| 126 |
+
pass
|
| 127 |
+
todo = [r for r in records if r["finevideo_idx"] not in done_idx]
|
| 128 |
+
print(f"Resuming: {len(done_idx)} done, {len(todo)} todo")
|
| 129 |
+
if not todo:
|
| 130 |
+
print("All done.")
|
| 131 |
+
return
|
| 132 |
+
|
| 133 |
+
total_out = 0
|
| 134 |
+
with open(OUTPUT_FILE, "a") as out_f:
|
| 135 |
+
with ThreadPoolExecutor(max_workers=MAX_WORKERS) as ex:
|
| 136 |
+
futures = {ex.submit(process_video_record, r): r for r in todo}
|
| 137 |
+
for i, fut in enumerate(as_completed(futures), 1):
|
| 138 |
+
rec = fut.result()
|
| 139 |
+
total_out += len(rec["qa_pairs"])
|
| 140 |
+
out_f.write(json.dumps(rec, ensure_ascii=False) + "\n")
|
| 141 |
+
out_f.flush()
|
| 142 |
+
if i % 20 == 0:
|
| 143 |
+
print(f" [{i}/{len(todo)}] surviving qa total so far: {total_out}")
|
| 144 |
+
|
| 145 |
+
print(f"\nDone. Surviving qa pairs: {total_out}")
|
| 146 |
+
print(f"Output: {OUTPUT_FILE}")
|
| 147 |
+
|
| 148 |
+
|
| 149 |
+
if __name__ == "__main__":
|
| 150 |
+
main()
|
data_gen/av_mcqa_pipeline/04_assemble_dpo.py
ADDED
|
@@ -0,0 +1,92 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env python3
|
| 2 |
+
"""
|
| 3 |
+
Step 4: Convert filtered AV-MCQAs into LlamaFactory DPO format.
|
| 4 |
+
|
| 5 |
+
For each surviving QA:
|
| 6 |
+
chosen = correct option (letter + full text)
|
| 7 |
+
rejected = randomly picked wrong option (letter + full text)
|
| 8 |
+
"""
|
| 9 |
+
|
| 10 |
+
import json
|
| 11 |
+
import os
|
| 12 |
+
import random
|
| 13 |
+
from collections import Counter
|
| 14 |
+
|
| 15 |
+
random.seed(42)
|
| 16 |
+
|
| 17 |
+
INPUT_FILES = [
|
| 18 |
+
"/opt/dlami/nvme/av_mcqa/03_filtered_must_audio.jsonl", # batch 1: 767 new videos
|
| 19 |
+
"/opt/dlami/nvme/av_mcqa/03b_filtered_must_audio_local.jsonl", # batch 2: 1000 local videos
|
| 20 |
+
]
|
| 21 |
+
OUTPUT_FILE = "/home/ubuntu/LlamaFactory/data/dpo_finevideo_av_mcqa.json"
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
def make_text(letter: str, options: dict) -> str:
|
| 25 |
+
return f"{letter}. {options.get(letter, '').strip()}"
|
| 26 |
+
|
| 27 |
+
|
| 28 |
+
def main():
|
| 29 |
+
records = []
|
| 30 |
+
for input_file in INPUT_FILES:
|
| 31 |
+
if not os.path.exists(input_file):
|
| 32 |
+
print(f"Skipping (not found): {input_file}")
|
| 33 |
+
continue
|
| 34 |
+
with open(input_file) as f:
|
| 35 |
+
for line in f:
|
| 36 |
+
records.append(json.loads(line))
|
| 37 |
+
print(f"Loaded {input_file}")
|
| 38 |
+
|
| 39 |
+
dpo = []
|
| 40 |
+
type_counter = Counter()
|
| 41 |
+
cat_counter = Counter()
|
| 42 |
+
for rec in records:
|
| 43 |
+
v_path = rec["video_path"]
|
| 44 |
+
a_path = rec["audio_path"]
|
| 45 |
+
cat = rec.get("content_parent_category", "Unknown")
|
| 46 |
+
for qa in rec.get("qa_pairs", []):
|
| 47 |
+
q = qa.get("question", "").strip()
|
| 48 |
+
opts = qa.get("options", {})
|
| 49 |
+
correct = qa.get("answer", "").strip()
|
| 50 |
+
if not (q and len(opts) >= 4 and correct in opts):
|
| 51 |
+
continue
|
| 52 |
+
wrong = [k for k in opts if k != correct]
|
| 53 |
+
if not wrong:
|
| 54 |
+
continue
|
| 55 |
+
rej = random.choice(wrong)
|
| 56 |
+
|
| 57 |
+
options_text = "\n".join(f"{k}. {opts[k]}" for k in sorted(opts))
|
| 58 |
+
user_content = (
|
| 59 |
+
f"<video><audio>{q}\n{options_text}\n"
|
| 60 |
+
f"Please provide your answer by stating the letter followed by the full option."
|
| 61 |
+
)
|
| 62 |
+
|
| 63 |
+
dpo.append({
|
| 64 |
+
"messages": [{"role": "user", "content": user_content}],
|
| 65 |
+
"chosen": {"role": "assistant", "content": make_text(correct, opts)},
|
| 66 |
+
"rejected": {"role": "assistant", "content": make_text(rej, opts)},
|
| 67 |
+
"videos": [v_path],
|
| 68 |
+
"audios": [a_path],
|
| 69 |
+
})
|
| 70 |
+
type_counter[qa.get("type", "Unknown")] += 1
|
| 71 |
+
cat_counter[cat] += 1
|
| 72 |
+
|
| 73 |
+
random.shuffle(dpo)
|
| 74 |
+
os.makedirs(os.path.dirname(OUTPUT_FILE), exist_ok=True)
|
| 75 |
+
with open(OUTPUT_FILE, "w") as f:
|
| 76 |
+
json.dump(dpo, f, ensure_ascii=False, indent=1)
|
| 77 |
+
|
| 78 |
+
print(f"Wrote {len(dpo)} DPO pairs -> {OUTPUT_FILE}")
|
| 79 |
+
print("\nQuestion type distribution:")
|
| 80 |
+
for t, n in type_counter.most_common():
|
| 81 |
+
print(f" {t:30s} {n}")
|
| 82 |
+
print("\nVideo category distribution:")
|
| 83 |
+
for c, n in cat_counter.most_common():
|
| 84 |
+
print(f" {c:30s} {n}")
|
| 85 |
+
|
| 86 |
+
# Reminder to register
|
| 87 |
+
print("\nNext: register in dataset_info.json with key 'dpo_finevideo_av_mcqa'.")
|
| 88 |
+
print("It uses both videos and audios fields.")
|
| 89 |
+
|
| 90 |
+
|
| 91 |
+
if __name__ == "__main__":
|
| 92 |
+
main()
|
data_gen/av_mcqa_pipeline/05_rewrite_chosen_longform.py
ADDED
|
@@ -0,0 +1,144 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env python3
|
| 2 |
+
"""
|
| 3 |
+
Rewrite AV-MCQA DPO chosen from short "B. xxx" format to long descriptive
|
| 4 |
+
format matching SFT/finevideo style.
|
| 5 |
+
|
| 6 |
+
Uses Gemini's audio_evidence + visual_evidence fields to construct a
|
| 7 |
+
chosen answer that:
|
| 8 |
+
1. Contains the correct MCQA letter + option text
|
| 9 |
+
2. Explains why using timestamp + audio + visual reasoning
|
| 10 |
+
3. Matches the long-form descriptive style the SFT model prefers
|
| 11 |
+
|
| 12 |
+
Output: dpo_finevideo_av_mcqa_longform.json
|
| 13 |
+
"""
|
| 14 |
+
|
| 15 |
+
import json
|
| 16 |
+
import os
|
| 17 |
+
import random
|
| 18 |
+
from collections import Counter
|
| 19 |
+
|
| 20 |
+
random.seed(42)
|
| 21 |
+
|
| 22 |
+
INPUT_FILES = [
|
| 23 |
+
"/opt/dlami/nvme/av_mcqa/03_filtered_must_audio.jsonl",
|
| 24 |
+
"/opt/dlami/nvme/av_mcqa/03b_filtered_must_audio_local.jsonl",
|
| 25 |
+
]
|
| 26 |
+
OUTPUT_FILE = "/home/ubuntu/LlamaFactory/data/dpo_finevideo_av_mcqa_longform.json"
|
| 27 |
+
|
| 28 |
+
|
| 29 |
+
def build_chosen_longform(qa: dict) -> str:
|
| 30 |
+
"""Construct long-form chosen from MCQA + evidence fields."""
|
| 31 |
+
letter = qa["answer"]
|
| 32 |
+
opt_text = qa["options"].get(letter, "").strip()
|
| 33 |
+
audio_ev = qa.get("audio_evidence", "").strip()
|
| 34 |
+
visual_ev = qa.get("visual_evidence", "").strip()
|
| 35 |
+
|
| 36 |
+
parts = []
|
| 37 |
+
# Audio-visual reasoning in natural prose
|
| 38 |
+
if audio_ev and visual_ev:
|
| 39 |
+
parts.append(f"In the video, {audio_ev.rstrip('.')}; at the same moment in the visual, {visual_ev.rstrip('.')}.")
|
| 40 |
+
elif audio_ev:
|
| 41 |
+
parts.append(f"In the audio, {audio_ev.rstrip('.')}.")
|
| 42 |
+
elif visual_ev:
|
| 43 |
+
parts.append(f"Visually, {visual_ev.rstrip('.')}.")
|
| 44 |
+
parts.append(f"Therefore the correct answer is {letter}. {opt_text}.")
|
| 45 |
+
|
| 46 |
+
return " ".join(parts)
|
| 47 |
+
|
| 48 |
+
|
| 49 |
+
def build_rejected_longform(qa: dict, rej_letter: str) -> str:
|
| 50 |
+
"""Construct a plausible wrong long-form answer using the SAME audio/visual
|
| 51 |
+
evidence but leading to the wrong conclusion. This is the cleanest DPO
|
| 52 |
+
signal: identical reasoning prefix, different final answer. No fabricated
|
| 53 |
+
evidence (prevents teaching hallucination), same length as chosen."""
|
| 54 |
+
rej_text = qa["options"].get(rej_letter, "").strip()
|
| 55 |
+
audio_ev = qa.get("audio_evidence", "").strip()
|
| 56 |
+
visual_ev = qa.get("visual_evidence", "").strip()
|
| 57 |
+
|
| 58 |
+
parts = []
|
| 59 |
+
if audio_ev and visual_ev:
|
| 60 |
+
parts.append(f"In the video, {audio_ev.rstrip('.')}; at the same moment in the visual, {visual_ev.rstrip('.')}.")
|
| 61 |
+
elif audio_ev:
|
| 62 |
+
parts.append(f"In the audio, {audio_ev.rstrip('.')}.")
|
| 63 |
+
elif visual_ev:
|
| 64 |
+
parts.append(f"Visually, {visual_ev.rstrip('.')}.")
|
| 65 |
+
parts.append(f"Based on this, the answer is {rej_letter}. {rej_text}.")
|
| 66 |
+
|
| 67 |
+
return " ".join(parts)
|
| 68 |
+
|
| 69 |
+
|
| 70 |
+
def main():
|
| 71 |
+
records = []
|
| 72 |
+
for fp in INPUT_FILES:
|
| 73 |
+
if not os.path.exists(fp):
|
| 74 |
+
print(f"skip missing: {fp}")
|
| 75 |
+
continue
|
| 76 |
+
with open(fp) as f:
|
| 77 |
+
for line in f:
|
| 78 |
+
records.append(json.loads(line))
|
| 79 |
+
print(f"loaded {fp}")
|
| 80 |
+
|
| 81 |
+
dpo = []
|
| 82 |
+
type_counter = Counter()
|
| 83 |
+
dropped_missing_evidence = 0
|
| 84 |
+
for rec in records:
|
| 85 |
+
v_path = rec["video_path"]
|
| 86 |
+
a_path = rec["audio_path"]
|
| 87 |
+
for qa in rec.get("qa_pairs", []):
|
| 88 |
+
q = qa.get("question", "").strip()
|
| 89 |
+
opts = qa.get("options", {})
|
| 90 |
+
correct = qa.get("answer", "").strip()
|
| 91 |
+
if not (q and len(opts) >= 4 and correct in opts):
|
| 92 |
+
continue
|
| 93 |
+
|
| 94 |
+
# Require at least some evidence to justify long-form rewrite
|
| 95 |
+
if not (qa.get("audio_evidence") or qa.get("visual_evidence")):
|
| 96 |
+
dropped_missing_evidence += 1
|
| 97 |
+
continue
|
| 98 |
+
|
| 99 |
+
wrong = [k for k in opts if k != correct]
|
| 100 |
+
if not wrong:
|
| 101 |
+
continue
|
| 102 |
+
rej = random.choice(wrong)
|
| 103 |
+
|
| 104 |
+
chosen_text = build_chosen_longform(qa)
|
| 105 |
+
rejected_text = build_rejected_longform(qa, rej)
|
| 106 |
+
|
| 107 |
+
options_text = "\n".join(f"{k}. {opts[k]}" for k in sorted(opts))
|
| 108 |
+
user_content = (
|
| 109 |
+
f"<video><audio>{q}\n{options_text}\n"
|
| 110 |
+
f"Please provide your answer by stating the letter followed by the full option, "
|
| 111 |
+
f"with a brief explanation grounded in the audio and visual cues."
|
| 112 |
+
)
|
| 113 |
+
|
| 114 |
+
dpo.append({
|
| 115 |
+
"messages": [{"role": "user", "content": user_content}],
|
| 116 |
+
"chosen": {"role": "assistant", "content": chosen_text},
|
| 117 |
+
"rejected": {"role": "assistant", "content": rejected_text},
|
| 118 |
+
"videos": [v_path],
|
| 119 |
+
"audios": [a_path],
|
| 120 |
+
})
|
| 121 |
+
type_counter[qa.get("type", "Unknown")] += 1
|
| 122 |
+
|
| 123 |
+
random.shuffle(dpo)
|
| 124 |
+
os.makedirs(os.path.dirname(OUTPUT_FILE), exist_ok=True)
|
| 125 |
+
with open(OUTPUT_FILE, "w") as f:
|
| 126 |
+
json.dump(dpo, f, ensure_ascii=False, indent=1)
|
| 127 |
+
|
| 128 |
+
print(f"\nWrote {len(dpo)} long-form DPO pairs -> {OUTPUT_FILE}")
|
| 129 |
+
print(f"Dropped (no evidence): {dropped_missing_evidence}")
|
| 130 |
+
print("\nQuestion type distribution:")
|
| 131 |
+
for t, n in type_counter.most_common():
|
| 132 |
+
print(f" {t:30s} {n}")
|
| 133 |
+
|
| 134 |
+
# Show a sample
|
| 135 |
+
if dpo:
|
| 136 |
+
s = dpo[0]
|
| 137 |
+
print("\n=== Sample ===")
|
| 138 |
+
print(f"USER: {s['messages'][0]['content'][:200]}")
|
| 139 |
+
print(f"CHOSEN: {s['chosen']['content']}")
|
| 140 |
+
print(f"REJECTED: {s['rejected']['content']}")
|
| 141 |
+
|
| 142 |
+
|
| 143 |
+
if __name__ == "__main__":
|
| 144 |
+
main()
|
data_gen/av_mcqa_pipeline/README.md
ADDED
|
@@ -0,0 +1,109 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# AV-MCQA DPO data pipeline (FineVideo + Gemini + GPT-5.4)
|
| 2 |
+
|
| 3 |
+
Goal: generate audio-visual MCQA DPO pairs targeted at WorldSense / Daily-Omni
|
| 4 |
+
style benchmarks. Each surviving question is guaranteed to require BOTH audio
|
| 5 |
+
and video to answer correctly.
|
| 6 |
+
|
| 7 |
+
## Pipeline (4 stages)
|
| 8 |
+
|
| 9 |
+
1. **`01_select_finevideo.py`**
|
| 10 |
+
Pool: `filtered_candidates_all_passing.jsonl` (4616 entries with full
|
| 11 |
+
metadata including `audioVisualCorrelation` per scene). EXCLUDE the 1000
|
| 12 |
+
already-used local videos (in `filtered_candidates.jsonl`) so we get *new*
|
| 13 |
+
videos. Filter: `mean AVC >= 0.85`, duration `[30, 180]s`, balance per
|
| 14 |
+
`content_parent_category` (cap 100/cat, target ~800).
|
| 15 |
+
Then download missing videos via HF streaming
|
| 16 |
+
(`HuggingFaceFV/finevideo`), saving mp4 + extracted wav to
|
| 17 |
+
`/opt/dlami/nvme/finevideo_av_mcqa/{videos,audio}/`.
|
| 18 |
+
Output: `/opt/dlami/nvme/av_mcqa/01_selected_videos.jsonl`
|
| 19 |
+
|
| 20 |
+
2. **`02_gemini_generate_av_mcqa.py`**
|
| 21 |
+
Upload each mp4 (audio+video) to Gemini. Ask for 3 MCQAs per video,
|
| 22 |
+
spanning Daily-Omni's 6 question types (AV Event Alignment, Event Sequence,
|
| 23 |
+
Context Understanding, Inference, Reasoning, Comparative). Each MCQA has
|
| 24 |
+
4 options and a marked correct letter.
|
| 25 |
+
|
| 26 |
+
**Backend pool**:
|
| 27 |
+
- up to 3 Gemini direct keys: `GOOGLE_API_KEY_1`, `GOOGLE_API_KEY_2`,
|
| 28 |
+
`GOOGLE_API_KEY_3` (round-robin; a key is auto-disabled when it hits
|
| 29 |
+
quota / rate-limit / RESOURCE_EXHAUSTED).
|
| 30 |
+
- fallback to OpenRouter when ALL Gemini keys are exhausted:
|
| 31 |
+
`OPENROUTER_API_KEY`, default model `google/gemini-2.5-pro`
|
| 32 |
+
(override with `OPENROUTER_MODEL=...`).
|
| 33 |
+
- default Gemini model `gemini-3-flash` (override with `GEMINI_MODEL=...`,
|
| 34 |
+
e.g. `gemini-2.5-flash` or `gemini-2.5-pro`).
|
| 35 |
+
|
| 36 |
+
Output: `/opt/dlami/nvme/av_mcqa/02_gemini_generated.jsonl`
|
| 37 |
+
|
| 38 |
+
3. **`03_gpt5_video_only_filter.py`**
|
| 39 |
+
For each MCQA, send VIDEO FRAMES ONLY (no audio) to GPT-5.4. If GPT picks
|
| 40 |
+
the correct answer, the question is answerable without audio -> drop.
|
| 41 |
+
Surviving questions truly need audio.
|
| 42 |
+
Output: `/opt/dlami/nvme/av_mcqa/03_filtered_must_audio.jsonl`
|
| 43 |
+
Env: `OPENAI_API_KEY`
|
| 44 |
+
|
| 45 |
+
4. **`04_assemble_dpo.py`**
|
| 46 |
+
Convert surviving MCQAs to LlamaFactory DPO format. `chosen` = correct
|
| 47 |
+
option (letter + full text); `rejected` = randomly chosen wrong option.
|
| 48 |
+
Each entry includes both `videos` and `audios` paths.
|
| 49 |
+
Output: `/home/ubuntu/LlamaFactory/data/dpo_finevideo_av_mcqa.json`
|
| 50 |
+
|
| 51 |
+
## Run
|
| 52 |
+
|
| 53 |
+
```bash
|
| 54 |
+
conda activate video
|
| 55 |
+
|
| 56 |
+
export GOOGLE_API_KEY_1=...
|
| 57 |
+
export GOOGLE_API_KEY_2=...
|
| 58 |
+
export GOOGLE_API_KEY_3=...
|
| 59 |
+
export OPENROUTER_API_KEY=... # fallback
|
| 60 |
+
export OPENAI_API_KEY=sk-... # for step 3 (GPT-5.4 video-only filter)
|
| 61 |
+
|
| 62 |
+
cd /home/ubuntu/code-202604_v2/data_gen/av_mcqa_pipeline
|
| 63 |
+
python 01_select_finevideo.py # selects + downloads new videos via HF streaming
|
| 64 |
+
python 02_gemini_generate_av_mcqa.py # multi-key + OpenRouter fallback
|
| 65 |
+
python 03_gpt5_video_only_filter.py
|
| 66 |
+
python 04_assemble_dpo.py
|
| 67 |
+
```
|
| 68 |
+
|
| 69 |
+
All steps write JSONL line-by-line and resume on rerun.
|
| 70 |
+
|
| 71 |
+
## Register dataset (after step 4)
|
| 72 |
+
|
| 73 |
+
Append to `LlamaFactory/data/dataset_info.json`:
|
| 74 |
+
|
| 75 |
+
```json
|
| 76 |
+
"dpo_finevideo_av_mcqa": {
|
| 77 |
+
"file_name": "dpo_finevideo_av_mcqa.json",
|
| 78 |
+
"formatting": "sharegpt",
|
| 79 |
+
"ranking": true,
|
| 80 |
+
"columns": {
|
| 81 |
+
"messages": "messages",
|
| 82 |
+
"chosen": "chosen",
|
| 83 |
+
"rejected": "rejected",
|
| 84 |
+
"videos": "videos",
|
| 85 |
+
"audios": "audios"
|
| 86 |
+
},
|
| 87 |
+
"tags": {
|
| 88 |
+
"role_tag": "role",
|
| 89 |
+
"content_tag": "content",
|
| 90 |
+
"user_tag": "user",
|
| 91 |
+
"assistant_tag": "assistant"
|
| 92 |
+
}
|
| 93 |
+
}
|
| 94 |
+
```
|
| 95 |
+
|
| 96 |
+
## Cost rough estimate
|
| 97 |
+
|
| 98 |
+
- Step 2 (Gemini 3 Flash, ~800 videos × 1 call): ~$25-60
|
| 99 |
+
- Step 3 (GPT-5.4 with 12 frames × ~2400 questions): ~$30-80
|
| 100 |
+
- Step 4: free
|
| 101 |
+
|
| 102 |
+
Total: roughly **$60-150**, finishing in a few hours.
|
| 103 |
+
|
| 104 |
+
## Tunables
|
| 105 |
+
|
| 106 |
+
- `MIN_MEAN_AVC` in step 1 (default 0.85): higher = stricter AV-correlated videos
|
| 107 |
+
- `QUESTIONS_PER_VIDEO` in step 2 (default 3): more = more candidates, more $$$
|
| 108 |
+
- Survival rate in step 3 will likely be 30-60%; if too low, lower MIN_MEAN_AVC
|
| 109 |
+
in step 1 or refine the Gemini prompt to emphasize "must need audio".
|
data_gen/build_dpo_dataset.py
ADDED
|
@@ -0,0 +1,431 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env python3
|
| 2 |
+
from __future__ import annotations
|
| 3 |
+
|
| 4 |
+
import argparse
|
| 5 |
+
import asyncio
|
| 6 |
+
import json
|
| 7 |
+
import os
|
| 8 |
+
import random
|
| 9 |
+
from collections import defaultdict
|
| 10 |
+
from pathlib import Path
|
| 11 |
+
from typing import Any, Dict, List, Optional, Set, Tuple
|
| 12 |
+
|
| 13 |
+
from openai import AsyncOpenAI
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
PROMPT_CHOICES = [
|
| 17 |
+
"Are the audio and video tracks synchronized in this video?",
|
| 18 |
+
"Identify the exact timing of the visual impact and its corresponding sound. Do they align?",
|
| 19 |
+
"Is there any noticeable audio delay or temporal manipulation in this clip?",
|
| 20 |
+
]
|
| 21 |
+
|
| 22 |
+
RESPONSE_KEYS = [
|
| 23 |
+
"resp_sample1_correct",
|
| 24 |
+
"resp_sample1_hallucinated",
|
| 25 |
+
"resp_sample2_correct",
|
| 26 |
+
"resp_sample2_hallucinated",
|
| 27 |
+
"resp_sample3_correct",
|
| 28 |
+
"resp_sample3_hallucinated",
|
| 29 |
+
]
|
| 30 |
+
|
| 31 |
+
|
| 32 |
+
def parse_args() -> argparse.Namespace:
|
| 33 |
+
p = argparse.ArgumentParser(
|
| 34 |
+
description="Build KTO preference dataset from original + shifted videos with GPT-5.4 text generation."
|
| 35 |
+
)
|
| 36 |
+
p.add_argument(
|
| 37 |
+
"--labels-jsonl",
|
| 38 |
+
type=Path,
|
| 39 |
+
default=Path("/home/ubuntu/video_source/gemini_label_result/timestamps_results_normalized.jsonl"),
|
| 40 |
+
)
|
| 41 |
+
p.add_argument(
|
| 42 |
+
"--shift-meta-jsonl",
|
| 43 |
+
type=Path,
|
| 44 |
+
default=Path("/home/ubuntu/video_source/random_shift_video/shift_metadata.jsonl"),
|
| 45 |
+
)
|
| 46 |
+
p.add_argument(
|
| 47 |
+
"--output-jsonl",
|
| 48 |
+
type=Path,
|
| 49 |
+
default=Path("/home/ubuntu/video_source/kto_training_data.jsonl"),
|
| 50 |
+
)
|
| 51 |
+
p.add_argument("--model", type=str, default="gpt-5.4")
|
| 52 |
+
p.add_argument("--max-concurrency", type=int, default=8)
|
| 53 |
+
p.add_argument("--max-retries", type=int, default=5)
|
| 54 |
+
p.add_argument("--base-backoff-sec", type=float, default=2.0)
|
| 55 |
+
p.add_argument("--seed", type=int, default=42)
|
| 56 |
+
p.add_argument("--ffprobe-timeout-sec", type=int, default=30)
|
| 57 |
+
return p.parse_args()
|
| 58 |
+
|
| 59 |
+
|
| 60 |
+
def read_jsonl(path: Path) -> List[Dict[str, Any]]:
|
| 61 |
+
out: List[Dict[str, Any]] = []
|
| 62 |
+
with path.open("r", encoding="utf-8") as f:
|
| 63 |
+
for line in f:
|
| 64 |
+
s = line.strip()
|
| 65 |
+
if not s:
|
| 66 |
+
continue
|
| 67 |
+
out.append(json.loads(s))
|
| 68 |
+
return out
|
| 69 |
+
|
| 70 |
+
|
| 71 |
+
def append_jsonl(path: Path, row: Dict[str, Any]) -> None:
|
| 72 |
+
path.parent.mkdir(parents=True, exist_ok=True)
|
| 73 |
+
with path.open("a", encoding="utf-8") as f:
|
| 74 |
+
f.write(json.dumps(row, ensure_ascii=False) + "\n")
|
| 75 |
+
|
| 76 |
+
|
| 77 |
+
def original_rel_from_label_row(row: Dict[str, Any]) -> str:
|
| 78 |
+
# Convert absolute original path to relative path under /home/ubuntu/video_source/original.
|
| 79 |
+
video_path = Path(str(row["video_path"]))
|
| 80 |
+
parts = list(video_path.parts)
|
| 81 |
+
if "original" in parts:
|
| 82 |
+
idx = parts.index("original")
|
| 83 |
+
return str(Path(*parts[idx + 1 :]))
|
| 84 |
+
return str(video_path.name)
|
| 85 |
+
|
| 86 |
+
|
| 87 |
+
def build_annotations_map(rows: List[Dict[str, Any]]) -> Dict[str, Dict[str, Any]]:
|
| 88 |
+
out: Dict[str, Dict[str, Any]] = {}
|
| 89 |
+
for r in rows:
|
| 90 |
+
rel = original_rel_from_label_row(r)
|
| 91 |
+
out[rel] = r
|
| 92 |
+
return out
|
| 93 |
+
|
| 94 |
+
|
| 95 |
+
def build_shift_map(rows: List[Dict[str, Any]]) -> Dict[str, Dict[str, Dict[str, Any]]]:
|
| 96 |
+
out: Dict[str, Dict[str, Dict[str, Any]]] = defaultdict(dict)
|
| 97 |
+
for r in rows:
|
| 98 |
+
orig = str(r.get("original_video", "")).strip()
|
| 99 |
+
direction = str(r.get("shift_direction", "")).strip()
|
| 100 |
+
if not orig or direction not in {"delayed", "early"}:
|
| 101 |
+
continue
|
| 102 |
+
# Keep first occurrence to avoid duplicates in metadata.
|
| 103 |
+
if direction not in out[orig]:
|
| 104 |
+
out[orig][direction] = r
|
| 105 |
+
return out
|
| 106 |
+
|
| 107 |
+
|
| 108 |
+
async def ffprobe_duration_seconds(video_path: str, timeout_sec: int) -> float:
|
| 109 |
+
proc = await asyncio.create_subprocess_exec(
|
| 110 |
+
"ffprobe",
|
| 111 |
+
"-v",
|
| 112 |
+
"error",
|
| 113 |
+
"-show_entries",
|
| 114 |
+
"format=duration",
|
| 115 |
+
"-of",
|
| 116 |
+
"default=noprint_wrappers=1:nokey=1",
|
| 117 |
+
video_path,
|
| 118 |
+
stdout=asyncio.subprocess.PIPE,
|
| 119 |
+
stderr=asyncio.subprocess.PIPE,
|
| 120 |
+
)
|
| 121 |
+
try:
|
| 122 |
+
stdout, stderr = await asyncio.wait_for(proc.communicate(), timeout=float(timeout_sec))
|
| 123 |
+
except asyncio.TimeoutError:
|
| 124 |
+
proc.kill()
|
| 125 |
+
await proc.wait()
|
| 126 |
+
raise RuntimeError(f"ffprobe timeout: {video_path}")
|
| 127 |
+
if proc.returncode != 0:
|
| 128 |
+
raise RuntimeError(f"ffprobe failed for {video_path}: {(stderr or b'').decode(errors='ignore')[-200:]}")
|
| 129 |
+
txt = (stdout or b"").decode().strip()
|
| 130 |
+
return float(txt)
|
| 131 |
+
|
| 132 |
+
|
| 133 |
+
def make_system_prompt() -> str:
|
| 134 |
+
return (
|
| 135 |
+
"You generate ONLY JSON. Return EXACTLY six string fields and no extras: "
|
| 136 |
+
+ ", ".join(RESPONSE_KEYS)
|
| 137 |
+
+ ". "
|
| 138 |
+
"Do not perform arithmetic beyond using provided facts. Python already computed all numbers. "
|
| 139 |
+
"Important style rule: the three clips are anonymous. "
|
| 140 |
+
"Never infer or mention dataset labels/split names/source names or paths. "
|
| 141 |
+
"Do not use words such as original, delayed, early, manipulated, shifted."
|
| 142 |
+
)
|
| 143 |
+
|
| 144 |
+
|
| 145 |
+
def make_user_payload(facts: Dict[str, Any]) -> str:
|
| 146 |
+
return (
|
| 147 |
+
"Facts (ground truth computed by Python):\n"
|
| 148 |
+
+ json.dumps(facts, ensure_ascii=False, indent=2)
|
| 149 |
+
+ "\n\nInstruction:\n"
|
| 150 |
+
"Return a JSON object with exactly these 6 string values:\n"
|
| 151 |
+
"1) resp_sample1_correct: confirms synchronization for sample_1. Integrate event_v and event_a. "
|
| 152 |
+
"If sample1_gap <= 0.4s, explicitly mention negligible natural tolerance of sample1_gap.\n"
|
| 153 |
+
"2) resp_sample1_hallucinated: incorrectly claims a noticeable mismatch for sample_1.\n"
|
| 154 |
+
"3) resp_sample2_correct: expert explanation for sample_2, integrate event_v/event_a, "
|
| 155 |
+
"must cite t_visual and t_audio_sample2, and use natural estimation language with sample2_gap.\n"
|
| 156 |
+
"4) resp_sample2_hallucinated: incorrectly claims sample_2 is perfectly synchronized at t_visual.\n"
|
| 157 |
+
"5) resp_sample3_correct: expert explanation for sample_3, integrate event_v/event_a, "
|
| 158 |
+
"must cite t_visual and t_audio_sample3, and use natural estimation language with sample3_gap.\n"
|
| 159 |
+
"6) resp_sample3_hallucinated: incorrectly claims sample_3 is perfectly synchronized.\n"
|
| 160 |
+
"Hard constraint: clip labels are anonymous. Do not use words such as original/delayed/early/manipulated.\n"
|
| 161 |
+
"All six values must be concise, natural, and label-neutral."
|
| 162 |
+
)
|
| 163 |
+
|
| 164 |
+
|
| 165 |
+
async def call_llm_with_backoff(
|
| 166 |
+
client: AsyncOpenAI,
|
| 167 |
+
model: str,
|
| 168 |
+
facts: Dict[str, Any],
|
| 169 |
+
max_retries: int,
|
| 170 |
+
base_backoff_sec: float,
|
| 171 |
+
) -> Dict[str, str]:
|
| 172 |
+
system_prompt = make_system_prompt()
|
| 173 |
+
user_prompt = make_user_payload(facts)
|
| 174 |
+
|
| 175 |
+
last_exc: Optional[Exception] = None
|
| 176 |
+
for attempt in range(1, max_retries + 1):
|
| 177 |
+
try:
|
| 178 |
+
resp = await client.chat.completions.create(
|
| 179 |
+
model=model,
|
| 180 |
+
response_format={"type": "json_object"},
|
| 181 |
+
messages=[
|
| 182 |
+
{"role": "system", "content": system_prompt},
|
| 183 |
+
{"role": "user", "content": user_prompt},
|
| 184 |
+
],
|
| 185 |
+
)
|
| 186 |
+
txt = (resp.choices[0].message.content or "").strip()
|
| 187 |
+
obj = json.loads(txt)
|
| 188 |
+
missing = [k for k in RESPONSE_KEYS if k not in obj or not isinstance(obj[k], str)]
|
| 189 |
+
if missing:
|
| 190 |
+
raise ValueError(f"Missing/invalid response keys: {missing}")
|
| 191 |
+
return {k: obj[k] for k in RESPONSE_KEYS}
|
| 192 |
+
except Exception as exc:
|
| 193 |
+
last_exc = exc
|
| 194 |
+
if attempt == max_retries:
|
| 195 |
+
break
|
| 196 |
+
wait_s = base_backoff_sec * (2 ** (attempt - 1))
|
| 197 |
+
await asyncio.sleep(wait_s)
|
| 198 |
+
raise RuntimeError(f"LLM generation failed after retries: {last_exc}")
|
| 199 |
+
|
| 200 |
+
|
| 201 |
+
def build_existing_completion_state(
|
| 202 |
+
output_jsonl: Path,
|
| 203 |
+
shifted_to_original: Dict[str, str],
|
| 204 |
+
) -> Dict[str, Set[str]]:
|
| 205 |
+
"""
|
| 206 |
+
Returns mapping: original_rel -> set of row types already written {"orig","delayed","early"}.
|
| 207 |
+
"""
|
| 208 |
+
state: Dict[str, Set[str]] = defaultdict(set)
|
| 209 |
+
if not output_jsonl.exists():
|
| 210 |
+
return state
|
| 211 |
+
with output_jsonl.open("r", encoding="utf-8") as f:
|
| 212 |
+
for line in f:
|
| 213 |
+
s = line.strip()
|
| 214 |
+
if not s:
|
| 215 |
+
continue
|
| 216 |
+
try:
|
| 217 |
+
row = json.loads(s)
|
| 218 |
+
except json.JSONDecodeError:
|
| 219 |
+
continue
|
| 220 |
+
video = str(row.get("video", "")).strip()
|
| 221 |
+
if not video:
|
| 222 |
+
continue
|
| 223 |
+
# Infer row type and original key.
|
| 224 |
+
if video in shifted_to_original:
|
| 225 |
+
orig = shifted_to_original[video]
|
| 226 |
+
if "_delay_" in Path(video).name:
|
| 227 |
+
state[orig].add("delayed")
|
| 228 |
+
elif "_early_" in Path(video).name:
|
| 229 |
+
state[orig].add("early")
|
| 230 |
+
else:
|
| 231 |
+
# Treat as original row if it looks like an original relative path.
|
| 232 |
+
state[video].add("orig")
|
| 233 |
+
return state
|
| 234 |
+
|
| 235 |
+
|
| 236 |
+
async def process_one_original(
|
| 237 |
+
original_rel: str,
|
| 238 |
+
ann_row: Dict[str, Any],
|
| 239 |
+
shift_row_delayed: Dict[str, Any],
|
| 240 |
+
shift_row_early: Dict[str, Any],
|
| 241 |
+
args: argparse.Namespace,
|
| 242 |
+
client: AsyncOpenAI,
|
| 243 |
+
sem: asyncio.Semaphore,
|
| 244 |
+
write_lock: asyncio.Lock,
|
| 245 |
+
completion_state: Dict[str, Set[str]],
|
| 246 |
+
prompt_rng: random.Random,
|
| 247 |
+
counters: Dict[str, int],
|
| 248 |
+
) -> None:
|
| 249 |
+
existing_types = completion_state.get(original_rel, set())
|
| 250 |
+
if existing_types == {"orig", "delayed", "early"}:
|
| 251 |
+
counters["skipped_completed"] += 1
|
| 252 |
+
return
|
| 253 |
+
|
| 254 |
+
async with sem:
|
| 255 |
+
original_video_path = str(ann_row["video_path"])
|
| 256 |
+
video_duration = await ffprobe_duration_seconds(original_video_path, args.ffprobe_timeout_sec)
|
| 257 |
+
|
| 258 |
+
t_visual = float(ann_row["t_visual"])
|
| 259 |
+
t_audio = float(ann_row["t_audio"])
|
| 260 |
+
event_v = str(ann_row.get("event_description", "")).strip()
|
| 261 |
+
event_a = str(ann_row.get("sound_description", "")).strip()
|
| 262 |
+
delayed_shift_seconds = float(shift_row_delayed["exact_shift_seconds"])
|
| 263 |
+
early_shift_seconds = float(shift_row_early["exact_shift_seconds"])
|
| 264 |
+
|
| 265 |
+
original_delta = abs(t_visual - t_audio)
|
| 266 |
+
t_audio_delayed = min(t_audio + delayed_shift_seconds, video_duration)
|
| 267 |
+
t_audio_early = max(t_audio - early_shift_seconds, 0.0)
|
| 268 |
+
true_delayed_gap = round(abs(t_audio_delayed - t_visual), 2)
|
| 269 |
+
true_early_gap = round(abs(t_audio_early - t_visual), 2)
|
| 270 |
+
|
| 271 |
+
facts = {
|
| 272 |
+
"video_duration": round(video_duration, 3),
|
| 273 |
+
"t_visual": round(t_visual, 3),
|
| 274 |
+
"t_audio_sample1": round(t_audio, 3),
|
| 275 |
+
"event_v": event_v,
|
| 276 |
+
"event_a": event_a,
|
| 277 |
+
"shift_seconds_sample2": round(delayed_shift_seconds, 6),
|
| 278 |
+
"shift_seconds_sample3": round(early_shift_seconds, 6),
|
| 279 |
+
"t_audio_sample2": round(t_audio_delayed, 3),
|
| 280 |
+
"t_audio_sample3": round(t_audio_early, 3),
|
| 281 |
+
"sample1_gap": round(original_delta, 3),
|
| 282 |
+
"sample2_gap": true_delayed_gap,
|
| 283 |
+
"sample3_gap": true_early_gap,
|
| 284 |
+
}
|
| 285 |
+
|
| 286 |
+
texts = await call_llm_with_backoff(
|
| 287 |
+
client=client,
|
| 288 |
+
model=args.model,
|
| 289 |
+
facts=facts,
|
| 290 |
+
max_retries=args.max_retries,
|
| 291 |
+
base_backoff_sec=args.base_backoff_sec,
|
| 292 |
+
)
|
| 293 |
+
|
| 294 |
+
selected_prompt = prompt_rng.choice(PROMPT_CHOICES)
|
| 295 |
+
rows = [
|
| 296 |
+
{
|
| 297 |
+
"video": original_rel,
|
| 298 |
+
"prompt": selected_prompt,
|
| 299 |
+
"chosen": texts["resp_sample1_correct"],
|
| 300 |
+
"rejected": texts["resp_sample1_hallucinated"],
|
| 301 |
+
},
|
| 302 |
+
{
|
| 303 |
+
"video": str(shift_row_delayed["shifted_video_path"]),
|
| 304 |
+
"prompt": selected_prompt,
|
| 305 |
+
"chosen": texts["resp_sample2_correct"],
|
| 306 |
+
"rejected": texts["resp_sample2_hallucinated"],
|
| 307 |
+
},
|
| 308 |
+
{
|
| 309 |
+
"video": str(shift_row_early["shifted_video_path"]),
|
| 310 |
+
"prompt": selected_prompt,
|
| 311 |
+
"chosen": texts["resp_sample3_correct"],
|
| 312 |
+
"rejected": texts["resp_sample3_hallucinated"],
|
| 313 |
+
},
|
| 314 |
+
]
|
| 315 |
+
|
| 316 |
+
missing_rows: List[Dict[str, Any]] = []
|
| 317 |
+
if "orig" not in existing_types:
|
| 318 |
+
missing_rows.append(rows[0])
|
| 319 |
+
if "delayed" not in existing_types:
|
| 320 |
+
missing_rows.append(rows[1])
|
| 321 |
+
if "early" not in existing_types:
|
| 322 |
+
missing_rows.append(rows[2])
|
| 323 |
+
|
| 324 |
+
async with write_lock:
|
| 325 |
+
for r in missing_rows:
|
| 326 |
+
append_jsonl(args.output_jsonl, r)
|
| 327 |
+
completion_state[original_rel] = {"orig", "delayed", "early"}
|
| 328 |
+
|
| 329 |
+
counters["processed_originals"] += 1
|
| 330 |
+
counters["written_rows"] += len(missing_rows)
|
| 331 |
+
|
| 332 |
+
|
| 333 |
+
async def main_async() -> None:
|
| 334 |
+
args = parse_args()
|
| 335 |
+
random.seed(args.seed)
|
| 336 |
+
|
| 337 |
+
if not os.getenv("OPENAI_API_KEY"):
|
| 338 |
+
raise RuntimeError("OPENAI_API_KEY is required.")
|
| 339 |
+
client = AsyncOpenAI(api_key=os.getenv("OPENAI_API_KEY"))
|
| 340 |
+
|
| 341 |
+
labels = read_jsonl(args.labels_jsonl)
|
| 342 |
+
shift_meta = read_jsonl(args.shift_meta_jsonl)
|
| 343 |
+
ann_map = build_annotations_map(labels)
|
| 344 |
+
shift_map = build_shift_map(shift_meta)
|
| 345 |
+
|
| 346 |
+
# Build reverse map for resume detection from existing output rows.
|
| 347 |
+
shifted_to_original: Dict[str, str] = {}
|
| 348 |
+
for orig, both in shift_map.items():
|
| 349 |
+
for direction in ("delayed", "early"):
|
| 350 |
+
item = both.get(direction)
|
| 351 |
+
if item:
|
| 352 |
+
shifted_to_original[str(item["shifted_video_path"])] = orig
|
| 353 |
+
|
| 354 |
+
completion_state = build_existing_completion_state(args.output_jsonl, shifted_to_original)
|
| 355 |
+
|
| 356 |
+
# Process only originals present in labels (1420) and that are included in shift set (1589 superset).
|
| 357 |
+
candidates: List[Tuple[str, Dict[str, Any], Dict[str, Any], Dict[str, Any]]] = []
|
| 358 |
+
missing_shift = 0
|
| 359 |
+
for original_rel, ann_row in ann_map.items():
|
| 360 |
+
sides = shift_map.get(original_rel, {})
|
| 361 |
+
delayed = sides.get("delayed")
|
| 362 |
+
early = sides.get("early")
|
| 363 |
+
if delayed is None or early is None:
|
| 364 |
+
missing_shift += 1
|
| 365 |
+
continue
|
| 366 |
+
candidates.append((original_rel, ann_row, delayed, early))
|
| 367 |
+
candidates.sort(key=lambda x: x[0])
|
| 368 |
+
|
| 369 |
+
sem = asyncio.Semaphore(max(1, args.max_concurrency))
|
| 370 |
+
write_lock = asyncio.Lock()
|
| 371 |
+
prompt_rng = random.Random(args.seed)
|
| 372 |
+
counters = {
|
| 373 |
+
"processed_originals": 0,
|
| 374 |
+
"written_rows": 0,
|
| 375 |
+
"skipped_completed": 0,
|
| 376 |
+
"failed_originals": 0,
|
| 377 |
+
}
|
| 378 |
+
|
| 379 |
+
async def runner(item: Tuple[str, Dict[str, Any], Dict[str, Any], Dict[str, Any]]) -> None:
|
| 380 |
+
orig, ann, drow, erow = item
|
| 381 |
+
try:
|
| 382 |
+
await process_one_original(
|
| 383 |
+
original_rel=orig,
|
| 384 |
+
ann_row=ann,
|
| 385 |
+
shift_row_delayed=drow,
|
| 386 |
+
shift_row_early=erow,
|
| 387 |
+
args=args,
|
| 388 |
+
client=client,
|
| 389 |
+
sem=sem,
|
| 390 |
+
write_lock=write_lock,
|
| 391 |
+
completion_state=completion_state,
|
| 392 |
+
prompt_rng=prompt_rng,
|
| 393 |
+
counters=counters,
|
| 394 |
+
)
|
| 395 |
+
except Exception as exc:
|
| 396 |
+
counters["failed_originals"] += 1
|
| 397 |
+
print(f"[error] {orig}: {exc}")
|
| 398 |
+
|
| 399 |
+
tasks = [asyncio.create_task(runner(it)) for it in candidates]
|
| 400 |
+
for i, t in enumerate(asyncio.as_completed(tasks), start=1):
|
| 401 |
+
await t
|
| 402 |
+
if i % 50 == 0:
|
| 403 |
+
print(
|
| 404 |
+
f"[progress] done={i}/{len(candidates)} "
|
| 405 |
+
f"processed={counters['processed_originals']} "
|
| 406 |
+
f"written_rows={counters['written_rows']} "
|
| 407 |
+
f"skipped={counters['skipped_completed']} "
|
| 408 |
+
f"failed={counters['failed_originals']}"
|
| 409 |
+
)
|
| 410 |
+
|
| 411 |
+
print("========== KTO Build Summary ==========")
|
| 412 |
+
print(f"Labels rows : {len(labels)}")
|
| 413 |
+
print(f"Unique labeled originals : {len(ann_map)}")
|
| 414 |
+
print(f"Unique shifted originals : {len(shift_map)}")
|
| 415 |
+
print(f"Labeled but missing shift pairs : {missing_shift}")
|
| 416 |
+
print(f"Candidate originals : {len(candidates)}")
|
| 417 |
+
print(f"Processed originals : {counters['processed_originals']}")
|
| 418 |
+
print(f"Skipped completed originals : {counters['skipped_completed']}")
|
| 419 |
+
print(f"Failed originals : {counters['failed_originals']}")
|
| 420 |
+
print(f"Rows appended to output : {counters['written_rows']}")
|
| 421 |
+
print(f"Output JSONL : {args.output_jsonl}")
|
| 422 |
+
print("=======================================")
|
| 423 |
+
|
| 424 |
+
|
| 425 |
+
def main() -> None:
|
| 426 |
+
asyncio.run(main_async())
|
| 427 |
+
|
| 428 |
+
|
| 429 |
+
if __name__ == "__main__":
|
| 430 |
+
main()
|
| 431 |
+
|
data_gen/convert_mcqa_to_dpo.py
ADDED
|
@@ -0,0 +1,118 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env python3
|
| 2 |
+
"""Convert LLaVA-Video-178K MCQA data to LlamaFactory DPO format.
|
| 3 |
+
|
| 4 |
+
For each QA pair:
|
| 5 |
+
chosen = correct answer (letter + full option text)
|
| 6 |
+
rejected = randomly picked wrong option (letter + full option text)
|
| 7 |
+
|
| 8 |
+
Output: single JSON file ready for LlamaFactory DPO training.
|
| 9 |
+
"""
|
| 10 |
+
|
| 11 |
+
import json, os, re, random, sys
|
| 12 |
+
from pathlib import Path
|
| 13 |
+
|
| 14 |
+
random.seed(42)
|
| 15 |
+
|
| 16 |
+
BASE = "/opt/dlami/nvme/datasets/LLaVA-Video-178K-subset"
|
| 17 |
+
|
| 18 |
+
MC_FILES = [
|
| 19 |
+
("0_30_s_academic_v0_1", "0_30_s_academic_mc_v0_1_qa_processed.json"),
|
| 20 |
+
("30_60_s_academic_v0_1", "30_60_s_academic_mc_v0_1_qa_processed.json"),
|
| 21 |
+
("1_2_m_academic_v0_1", "1_2_m_academic_mc_v0_1_qa_processed.json"),
|
| 22 |
+
("2_3_m_academic_v0_1", "2_3_m_academic_mc_v0_1_qa_processed.json"),
|
| 23 |
+
("0_30_s_perceptiontest", "0_30_s_perceptiontest_mc_qa_processed.json"),
|
| 24 |
+
("30_60_s_perceptiontest", "30_60_s_perceptiontest_mc_qa_processed.json"),
|
| 25 |
+
]
|
| 26 |
+
|
| 27 |
+
OPTION_RE = re.compile(r"^([A-F])\.\s*(.+)", re.MULTILINE)
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
def parse_options(question_text: str) -> dict[str, str]:
|
| 31 |
+
"""Extract {letter: full_text} from the question body."""
|
| 32 |
+
opts = {}
|
| 33 |
+
for m in OPTION_RE.finditer(question_text):
|
| 34 |
+
opts[m.group(1)] = m.group(2).strip()
|
| 35 |
+
return opts
|
| 36 |
+
|
| 37 |
+
|
| 38 |
+
def parse_answer_letter(answer_text: str) -> str:
|
| 39 |
+
"""Extract the answer letter from gpt value like 'B.' or 'D. Light pink.'"""
|
| 40 |
+
answer_text = answer_text.strip()
|
| 41 |
+
m = re.match(r"^([A-F])", answer_text)
|
| 42 |
+
return m.group(1) if m else ""
|
| 43 |
+
|
| 44 |
+
|
| 45 |
+
def make_full_answer(letter: str, options: dict[str, str]) -> str:
|
| 46 |
+
"""Return 'X. full text' format."""
|
| 47 |
+
text = options.get(letter, "")
|
| 48 |
+
if text:
|
| 49 |
+
return f"{letter}. {text}"
|
| 50 |
+
return f"{letter}."
|
| 51 |
+
|
| 52 |
+
|
| 53 |
+
def convert():
|
| 54 |
+
dpo_records = []
|
| 55 |
+
stats = {"total_qa": 0, "skipped_no_options": 0, "skipped_no_answer": 0}
|
| 56 |
+
|
| 57 |
+
for subdir, fname in MC_FILES:
|
| 58 |
+
fpath = os.path.join(BASE, subdir, fname)
|
| 59 |
+
data = json.load(open(fpath))
|
| 60 |
+
|
| 61 |
+
for entry in data:
|
| 62 |
+
video_rel = entry["video"] # e.g. perception_test/videos/video_2779.mp4
|
| 63 |
+
video_abs = os.path.join(BASE, subdir, video_rel)
|
| 64 |
+
|
| 65 |
+
if not os.path.exists(video_abs):
|
| 66 |
+
continue
|
| 67 |
+
|
| 68 |
+
convs = entry["conversations"]
|
| 69 |
+
for i in range(0, len(convs) - 1, 2):
|
| 70 |
+
q_text = convs[i]["value"] # human question with <image> tag and options
|
| 71 |
+
a_text = convs[i + 1]["value"] # gpt answer
|
| 72 |
+
|
| 73 |
+
stats["total_qa"] += 1
|
| 74 |
+
|
| 75 |
+
options = parse_options(q_text)
|
| 76 |
+
if len(options) < 2:
|
| 77 |
+
stats["skipped_no_options"] += 1
|
| 78 |
+
continue
|
| 79 |
+
|
| 80 |
+
correct_letter = parse_answer_letter(a_text)
|
| 81 |
+
if not correct_letter or correct_letter not in options:
|
| 82 |
+
stats["skipped_no_answer"] += 1
|
| 83 |
+
continue
|
| 84 |
+
|
| 85 |
+
wrong_letters = [k for k in options if k != correct_letter]
|
| 86 |
+
rejected_letter = random.choice(wrong_letters)
|
| 87 |
+
|
| 88 |
+
chosen_text = make_full_answer(correct_letter, options)
|
| 89 |
+
rejected_text = make_full_answer(rejected_letter, options)
|
| 90 |
+
|
| 91 |
+
# Replace <image> with <video>; ensure exactly one <video> token
|
| 92 |
+
# (LLaVA only adds <image> on the first turn of each entry; later turns
|
| 93 |
+
# inherit the video implicitly. Since we split each turn into its own
|
| 94 |
+
# DPO record, every record needs its own <video> token.)
|
| 95 |
+
user_content = q_text.replace("<image>", "<video>")
|
| 96 |
+
if "<video>" not in user_content:
|
| 97 |
+
user_content = "<video>" + user_content
|
| 98 |
+
|
| 99 |
+
record = {
|
| 100 |
+
"messages": [
|
| 101 |
+
{"role": "user", "content": user_content}
|
| 102 |
+
],
|
| 103 |
+
"chosen": {"role": "assistant", "content": chosen_text},
|
| 104 |
+
"rejected": {"role": "assistant", "content": rejected_text},
|
| 105 |
+
"videos": [video_abs],
|
| 106 |
+
}
|
| 107 |
+
dpo_records.append(record)
|
| 108 |
+
|
| 109 |
+
out_path = "/home/ubuntu/LlamaFactory/data/dpo_llava_academic_perceptiontest_mcqa.json"
|
| 110 |
+
with open(out_path, "w") as f:
|
| 111 |
+
json.dump(dpo_records, f, ensure_ascii=False, indent=1)
|
| 112 |
+
|
| 113 |
+
print(f"Written {len(dpo_records)} DPO pairs to {out_path}")
|
| 114 |
+
print(f"Stats: {stats}")
|
| 115 |
+
|
| 116 |
+
|
| 117 |
+
if __name__ == "__main__":
|
| 118 |
+
convert()
|
data_gen/export_disagreement_to_csv.py
ADDED
|
@@ -0,0 +1,341 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env python3
|
| 2 |
+
import argparse
|
| 3 |
+
import csv
|
| 4 |
+
import json
|
| 5 |
+
from pathlib import Path
|
| 6 |
+
from typing import Any, Dict, List, Optional
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
def parse_args() -> argparse.Namespace:
|
| 10 |
+
parser = argparse.ArgumentParser(
|
| 11 |
+
description="Export model timestamp disagreements from JSONL to CSV."
|
| 12 |
+
)
|
| 13 |
+
parser.add_argument(
|
| 14 |
+
"--input",
|
| 15 |
+
type=Path,
|
| 16 |
+
default=Path("/home/ubuntu/video_source/result/visual_timestamps_compare.jsonl"),
|
| 17 |
+
help="Path to visual_timestamps_compare.jsonl",
|
| 18 |
+
)
|
| 19 |
+
parser.add_argument(
|
| 20 |
+
"--output",
|
| 21 |
+
type=Path,
|
| 22 |
+
default=Path("/home/ubuntu/video_source/result/visual_timestamps_compare_review.csv"),
|
| 23 |
+
help="Output CSV path",
|
| 24 |
+
)
|
| 25 |
+
parser.add_argument(
|
| 26 |
+
"--min-spread",
|
| 27 |
+
type=float,
|
| 28 |
+
default=0.8,
|
| 29 |
+
help="Export rows where max timestamp spread >= this value",
|
| 30 |
+
)
|
| 31 |
+
parser.add_argument(
|
| 32 |
+
"--top-k",
|
| 33 |
+
type=int,
|
| 34 |
+
default=0,
|
| 35 |
+
help="If >0, export only top-K most disagreed rows after filtering",
|
| 36 |
+
)
|
| 37 |
+
parser.add_argument(
|
| 38 |
+
"--filter-mode",
|
| 39 |
+
choices=[
|
| 40 |
+
"spread",
|
| 41 |
+
"gemini_claude",
|
| 42 |
+
"gemini_gpt5_1",
|
| 43 |
+
"outlier_consensus",
|
| 44 |
+
"gemini_validated",
|
| 45 |
+
"gemini_need_review",
|
| 46 |
+
],
|
| 47 |
+
default="spread",
|
| 48 |
+
help="Filtering rule to apply",
|
| 49 |
+
)
|
| 50 |
+
parser.add_argument(
|
| 51 |
+
"--min-diff",
|
| 52 |
+
type=float,
|
| 53 |
+
default=0.8,
|
| 54 |
+
help="Minimum absolute difference for gemini_* filter modes",
|
| 55 |
+
)
|
| 56 |
+
parser.add_argument(
|
| 57 |
+
"--outlier-threshold",
|
| 58 |
+
type=float,
|
| 59 |
+
default=3.0,
|
| 60 |
+
help="Outlier threshold for outlier_consensus mode",
|
| 61 |
+
)
|
| 62 |
+
parser.add_argument(
|
| 63 |
+
"--pair-agree-threshold",
|
| 64 |
+
type=float,
|
| 65 |
+
default=0.6,
|
| 66 |
+
help="Agreement threshold for kept model pair in outlier_consensus mode",
|
| 67 |
+
)
|
| 68 |
+
parser.add_argument(
|
| 69 |
+
"--gemini-agree-threshold",
|
| 70 |
+
type=float,
|
| 71 |
+
default=1.0,
|
| 72 |
+
help="For gemini_* modes: accept if OpenAI or Claude differs from Gemini by less than this threshold",
|
| 73 |
+
)
|
| 74 |
+
return parser.parse_args()
|
| 75 |
+
|
| 76 |
+
|
| 77 |
+
def load_jsonl(path: Path) -> List[Dict[str, Any]]:
|
| 78 |
+
data: List[Dict[str, Any]] = []
|
| 79 |
+
with path.open("r", encoding="utf-8") as f:
|
| 80 |
+
for line_no, line in enumerate(f, start=1):
|
| 81 |
+
s = line.strip()
|
| 82 |
+
if not s:
|
| 83 |
+
continue
|
| 84 |
+
try:
|
| 85 |
+
obj = json.loads(s)
|
| 86 |
+
except json.JSONDecodeError as exc:
|
| 87 |
+
raise ValueError(f"Invalid JSONL at line {line_no}: {exc}") from exc
|
| 88 |
+
if not isinstance(obj, dict):
|
| 89 |
+
raise ValueError(f"Line {line_no} is not a JSON object.")
|
| 90 |
+
data.append(obj)
|
| 91 |
+
return data
|
| 92 |
+
|
| 93 |
+
|
| 94 |
+
def _to_float_or_none(value: Any) -> Optional[float]:
|
| 95 |
+
if value is None:
|
| 96 |
+
return None
|
| 97 |
+
try:
|
| 98 |
+
return float(value)
|
| 99 |
+
except (TypeError, ValueError):
|
| 100 |
+
return None
|
| 101 |
+
|
| 102 |
+
|
| 103 |
+
def _abs_diff(a: Optional[float], b: Optional[float]) -> Optional[float]:
|
| 104 |
+
if a is None or b is None:
|
| 105 |
+
return None
|
| 106 |
+
return abs(a - b)
|
| 107 |
+
|
| 108 |
+
|
| 109 |
+
def _round_or_none(v: Optional[float], digits: int = 3) -> Optional[float]:
|
| 110 |
+
if v is None:
|
| 111 |
+
return None
|
| 112 |
+
return round(float(v), digits)
|
| 113 |
+
|
| 114 |
+
|
| 115 |
+
def _compute_consensus(values: List[float]) -> float:
|
| 116 |
+
s = sorted(values)
|
| 117 |
+
n = len(s)
|
| 118 |
+
if n % 2 == 1:
|
| 119 |
+
return s[n // 2]
|
| 120 |
+
return (s[n // 2 - 1] + s[n // 2]) / 2.0
|
| 121 |
+
|
| 122 |
+
|
| 123 |
+
def build_row(item: Dict[str, Any]) -> Dict[str, Any]:
|
| 124 |
+
t_gemini = _to_float_or_none(item.get("t_gemini"))
|
| 125 |
+
t_gpt5_1 = _to_float_or_none(item.get("t_gpt5_1"))
|
| 126 |
+
t_claude = _to_float_or_none(item.get("t_claude"))
|
| 127 |
+
|
| 128 |
+
available = [x for x in [t_gemini, t_gpt5_1, t_claude] if x is not None]
|
| 129 |
+
if available:
|
| 130 |
+
t_min = min(available)
|
| 131 |
+
t_max = max(available)
|
| 132 |
+
spread = t_max - t_min
|
| 133 |
+
consensus = _compute_consensus(available)
|
| 134 |
+
else:
|
| 135 |
+
t_min = None
|
| 136 |
+
t_max = None
|
| 137 |
+
spread = None
|
| 138 |
+
consensus = None
|
| 139 |
+
|
| 140 |
+
missing_count = int(t_gemini is None) + int(t_gpt5_1 is None) + int(t_claude is None)
|
| 141 |
+
pair_count = int(t_gemini is not None and t_gpt5_1 is not None) + int(
|
| 142 |
+
t_gemini is not None and t_claude is not None
|
| 143 |
+
) + int(t_gpt5_1 is not None and t_claude is not None)
|
| 144 |
+
|
| 145 |
+
diff_gemini_gpt5_1 = _abs_diff(t_gemini, t_gpt5_1)
|
| 146 |
+
diff_gemini_claude = _abs_diff(t_gemini, t_claude)
|
| 147 |
+
diff_gpt5_1_claude = _abs_diff(t_gpt5_1, t_claude)
|
| 148 |
+
|
| 149 |
+
return {
|
| 150 |
+
"video_id": item.get("video_id", ""),
|
| 151 |
+
"t_gemini": _round_or_none(t_gemini),
|
| 152 |
+
"t_gpt5_1": _round_or_none(t_gpt5_1),
|
| 153 |
+
"t_claude": _round_or_none(t_claude),
|
| 154 |
+
"diff_gemini_gpt5_1": _round_or_none(diff_gemini_gpt5_1),
|
| 155 |
+
"diff_gemini_claude": _round_or_none(diff_gemini_claude),
|
| 156 |
+
"diff_gpt5_1_claude": _round_or_none(diff_gpt5_1_claude),
|
| 157 |
+
"spread_max_min": _round_or_none(spread),
|
| 158 |
+
"t_min": _round_or_none(t_min),
|
| 159 |
+
"t_max": _round_or_none(t_max),
|
| 160 |
+
"consensus_median": _round_or_none(consensus),
|
| 161 |
+
"models_available": len(available),
|
| 162 |
+
"pairwise_available": pair_count,
|
| 163 |
+
"missing_models": missing_count,
|
| 164 |
+
"event_description": item.get("event_description", ""),
|
| 165 |
+
"manual_note": "",
|
| 166 |
+
"manual_label": "",
|
| 167 |
+
}
|
| 168 |
+
|
| 169 |
+
|
| 170 |
+
def classify_outlier_consensus(
|
| 171 |
+
row: Dict[str, Any],
|
| 172 |
+
outlier_threshold: float,
|
| 173 |
+
pair_agree_threshold: float,
|
| 174 |
+
) -> Optional[Dict[str, Any]]:
|
| 175 |
+
tg = row.get("t_gemini")
|
| 176 |
+
to = row.get("t_gpt5_1")
|
| 177 |
+
tc = row.get("t_claude")
|
| 178 |
+
if tg is None or to is None or tc is None:
|
| 179 |
+
return None
|
| 180 |
+
|
| 181 |
+
d_go = abs(float(tg) - float(to))
|
| 182 |
+
d_gc = abs(float(tg) - float(tc))
|
| 183 |
+
d_oc = abs(float(to) - float(tc))
|
| 184 |
+
|
| 185 |
+
outlier_model = ""
|
| 186 |
+
kept_models = ""
|
| 187 |
+
kept_diff: Optional[float] = None
|
| 188 |
+
final_t_visual: Optional[float] = None
|
| 189 |
+
|
| 190 |
+
if d_go > outlier_threshold and d_gc > outlier_threshold:
|
| 191 |
+
outlier_model = "gemini"
|
| 192 |
+
kept_models = "gpt5_1+claude"
|
| 193 |
+
kept_diff = d_oc
|
| 194 |
+
final_t_visual = (float(to) + float(tc)) / 2.0
|
| 195 |
+
elif d_go > outlier_threshold and d_oc > outlier_threshold:
|
| 196 |
+
outlier_model = "gpt5_1"
|
| 197 |
+
kept_models = "gemini+claude"
|
| 198 |
+
kept_diff = d_gc
|
| 199 |
+
final_t_visual = (float(tg) + float(tc)) / 2.0
|
| 200 |
+
elif d_gc > outlier_threshold and d_oc > outlier_threshold:
|
| 201 |
+
outlier_model = "claude"
|
| 202 |
+
kept_models = "gemini+gpt5_1"
|
| 203 |
+
kept_diff = d_go
|
| 204 |
+
final_t_visual = (float(tg) + float(to)) / 2.0
|
| 205 |
+
else:
|
| 206 |
+
return None
|
| 207 |
+
|
| 208 |
+
if kept_diff is None or kept_diff >= pair_agree_threshold:
|
| 209 |
+
return None
|
| 210 |
+
|
| 211 |
+
out = dict(row)
|
| 212 |
+
out["outlier_model"] = outlier_model
|
| 213 |
+
out["kept_models"] = kept_models
|
| 214 |
+
out["kept_pair_diff"] = _round_or_none(kept_diff)
|
| 215 |
+
out["final_t_visual"] = _round_or_none(final_t_visual)
|
| 216 |
+
out["rule"] = (
|
| 217 |
+
f"outlier>{outlier_threshold}s and kept_pair<{pair_agree_threshold}s"
|
| 218 |
+
)
|
| 219 |
+
return out
|
| 220 |
+
|
| 221 |
+
|
| 222 |
+
def main() -> None:
|
| 223 |
+
args = parse_args()
|
| 224 |
+
if not args.input.exists():
|
| 225 |
+
raise FileNotFoundError(f"Input file not found: {args.input}")
|
| 226 |
+
|
| 227 |
+
items = load_jsonl(args.input)
|
| 228 |
+
rows = [build_row(item) for item in items]
|
| 229 |
+
|
| 230 |
+
filtered: List[Dict[str, Any]] = []
|
| 231 |
+
for row in rows:
|
| 232 |
+
if args.filter_mode == "spread":
|
| 233 |
+
spread = row.get("spread_max_min")
|
| 234 |
+
if spread is not None and float(spread) >= args.min_spread:
|
| 235 |
+
filtered.append(row)
|
| 236 |
+
elif args.filter_mode == "gemini_claude":
|
| 237 |
+
diff = row.get("diff_gemini_claude")
|
| 238 |
+
if diff is not None and float(diff) > args.min_diff:
|
| 239 |
+
filtered.append(row)
|
| 240 |
+
elif args.filter_mode == "gemini_gpt5_1":
|
| 241 |
+
diff = row.get("diff_gemini_gpt5_1")
|
| 242 |
+
if diff is not None and float(diff) > args.min_diff:
|
| 243 |
+
filtered.append(row)
|
| 244 |
+
elif args.filter_mode == "outlier_consensus":
|
| 245 |
+
accepted = classify_outlier_consensus(
|
| 246 |
+
row=row,
|
| 247 |
+
outlier_threshold=args.outlier_threshold,
|
| 248 |
+
pair_agree_threshold=args.pair_agree_threshold,
|
| 249 |
+
)
|
| 250 |
+
if accepted is not None:
|
| 251 |
+
filtered.append(accepted)
|
| 252 |
+
elif args.filter_mode in {"gemini_validated", "gemini_need_review"}:
|
| 253 |
+
d_go = row.get("diff_gemini_gpt5_1")
|
| 254 |
+
d_gc = row.get("diff_gemini_claude")
|
| 255 |
+
validated = (
|
| 256 |
+
(d_go is not None and float(d_go) < args.gemini_agree_threshold)
|
| 257 |
+
or (d_gc is not None and float(d_gc) < args.gemini_agree_threshold)
|
| 258 |
+
)
|
| 259 |
+
if args.filter_mode == "gemini_validated" and validated:
|
| 260 |
+
filtered.append(row)
|
| 261 |
+
if args.filter_mode == "gemini_need_review" and not validated:
|
| 262 |
+
filtered.append(row)
|
| 263 |
+
|
| 264 |
+
if args.filter_mode == "spread":
|
| 265 |
+
sort_key = "spread_max_min"
|
| 266 |
+
elif args.filter_mode == "gemini_claude":
|
| 267 |
+
sort_key = "diff_gemini_claude"
|
| 268 |
+
elif args.filter_mode == "gemini_gpt5_1":
|
| 269 |
+
sort_key = "diff_gemini_gpt5_1"
|
| 270 |
+
elif args.filter_mode == "gemini_validated":
|
| 271 |
+
sort_key = "spread_max_min"
|
| 272 |
+
elif args.filter_mode == "gemini_need_review":
|
| 273 |
+
sort_key = "spread_max_min"
|
| 274 |
+
else:
|
| 275 |
+
sort_key = "kept_pair_diff"
|
| 276 |
+
filtered.sort(key=lambda r: (r.get(sort_key) or -1, r["video_id"]), reverse=True)
|
| 277 |
+
if args.top_k > 0:
|
| 278 |
+
filtered = filtered[: args.top_k]
|
| 279 |
+
|
| 280 |
+
args.output.parent.mkdir(parents=True, exist_ok=True)
|
| 281 |
+
with args.output.open("w", newline="", encoding="utf-8") as f:
|
| 282 |
+
writer = csv.DictWriter(
|
| 283 |
+
f,
|
| 284 |
+
fieldnames=[
|
| 285 |
+
"video_id",
|
| 286 |
+
"t_gemini",
|
| 287 |
+
"t_gpt5_1",
|
| 288 |
+
"t_claude",
|
| 289 |
+
"diff_gemini_gpt5_1",
|
| 290 |
+
"diff_gemini_claude",
|
| 291 |
+
"diff_gpt5_1_claude",
|
| 292 |
+
"spread_max_min",
|
| 293 |
+
"t_min",
|
| 294 |
+
"t_max",
|
| 295 |
+
"consensus_median",
|
| 296 |
+
"models_available",
|
| 297 |
+
"pairwise_available",
|
| 298 |
+
"missing_models",
|
| 299 |
+
"event_description",
|
| 300 |
+
"manual_note",
|
| 301 |
+
"manual_label",
|
| 302 |
+
"outlier_model",
|
| 303 |
+
"kept_models",
|
| 304 |
+
"kept_pair_diff",
|
| 305 |
+
"final_t_visual",
|
| 306 |
+
"rule",
|
| 307 |
+
],
|
| 308 |
+
)
|
| 309 |
+
writer.writeheader()
|
| 310 |
+
writer.writerows(filtered)
|
| 311 |
+
|
| 312 |
+
print(f"Input rows: {len(items)}")
|
| 313 |
+
if args.filter_mode == "spread":
|
| 314 |
+
print(f"Rows with spread >= {args.min_spread}: {len(filtered)}")
|
| 315 |
+
elif args.filter_mode == "gemini_claude":
|
| 316 |
+
print(f"Rows with |t_gemini - t_claude| > {args.min_diff}: {len(filtered)}")
|
| 317 |
+
elif args.filter_mode == "gemini_gpt5_1":
|
| 318 |
+
print(f"Rows with |t_gemini - t_gpt5_1| > {args.min_diff}: {len(filtered)}")
|
| 319 |
+
elif args.filter_mode == "gemini_validated":
|
| 320 |
+
print(
|
| 321 |
+
"Accepted rows with Gemini validation rule: "
|
| 322 |
+
f"{len(filtered)} (|Gemini-OpenAI| < {args.gemini_agree_threshold} OR "
|
| 323 |
+
f"|Gemini-Claude| < {args.gemini_agree_threshold})"
|
| 324 |
+
)
|
| 325 |
+
elif args.filter_mode == "gemini_need_review":
|
| 326 |
+
print(
|
| 327 |
+
"Need-review rows with Gemini validation rule: "
|
| 328 |
+
f"{len(filtered)} (neither OpenAI nor Claude within {args.gemini_agree_threshold}s of Gemini)"
|
| 329 |
+
)
|
| 330 |
+
else:
|
| 331 |
+
print(
|
| 332 |
+
"Accepted rows with outlier-consensus rule: "
|
| 333 |
+
f"{len(filtered)} (outlier>{args.outlier_threshold}, kept_pair<{args.pair_agree_threshold})"
|
| 334 |
+
)
|
| 335 |
+
if args.top_k > 0:
|
| 336 |
+
print(f"Top-K limit applied: {args.top_k}")
|
| 337 |
+
print(f"CSV written to: {args.output}")
|
| 338 |
+
|
| 339 |
+
|
| 340 |
+
if __name__ == "__main__":
|
| 341 |
+
main()
|
data_gen/export_frame_selection_to_csv.py
ADDED
|
@@ -0,0 +1,160 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env python3
|
| 2 |
+
import argparse
|
| 3 |
+
import csv
|
| 4 |
+
import json
|
| 5 |
+
from pathlib import Path
|
| 6 |
+
from typing import Any, Dict, List, Optional
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
DEFAULT_INPUT = Path("/home/ubuntu/video_source/result/frame_selection_compare.jsonl")
|
| 10 |
+
DEFAULT_OUTPUT = Path("/home/ubuntu/video_source/result/frame_selection_compare.csv")
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
def parse_args() -> argparse.Namespace:
|
| 14 |
+
p = argparse.ArgumentParser(description="Convert frame_selection_compare.jsonl to readable CSV.")
|
| 15 |
+
p.add_argument("--input", type=Path, default=DEFAULT_INPUT, help="Input JSONL path")
|
| 16 |
+
p.add_argument("--output", type=Path, default=DEFAULT_OUTPUT, help="Output CSV path")
|
| 17 |
+
p.add_argument(
|
| 18 |
+
"--sort-by",
|
| 19 |
+
choices=["spread", "video_id"],
|
| 20 |
+
default="spread",
|
| 21 |
+
help="Sort output by disagreement spread or by video_id",
|
| 22 |
+
)
|
| 23 |
+
p.add_argument(
|
| 24 |
+
"--min-spread",
|
| 25 |
+
type=float,
|
| 26 |
+
default=0.0,
|
| 27 |
+
help="If >0, keep only rows where max time spread >= min-spread seconds",
|
| 28 |
+
)
|
| 29 |
+
return p.parse_args()
|
| 30 |
+
|
| 31 |
+
|
| 32 |
+
def load_jsonl(path: Path) -> List[Dict[str, Any]]:
|
| 33 |
+
if not path.exists():
|
| 34 |
+
raise FileNotFoundError(f"Input JSONL not found: {path}")
|
| 35 |
+
rows: List[Dict[str, Any]] = []
|
| 36 |
+
with path.open("r", encoding="utf-8") as f:
|
| 37 |
+
for line_no, line in enumerate(f, start=1):
|
| 38 |
+
s = line.strip()
|
| 39 |
+
if not s:
|
| 40 |
+
continue
|
| 41 |
+
try:
|
| 42 |
+
obj = json.loads(s)
|
| 43 |
+
except json.JSONDecodeError as exc:
|
| 44 |
+
raise ValueError(f"Invalid JSON at line {line_no}: {exc}") from exc
|
| 45 |
+
if not isinstance(obj, dict):
|
| 46 |
+
raise ValueError(f"Line {line_no} is not a JSON object.")
|
| 47 |
+
rows.append(obj)
|
| 48 |
+
return rows
|
| 49 |
+
|
| 50 |
+
|
| 51 |
+
def to_float_or_none(v: Any) -> Optional[float]:
|
| 52 |
+
if v is None:
|
| 53 |
+
return None
|
| 54 |
+
try:
|
| 55 |
+
return float(v)
|
| 56 |
+
except (TypeError, ValueError):
|
| 57 |
+
return None
|
| 58 |
+
|
| 59 |
+
|
| 60 |
+
def to_int_or_none(v: Any) -> Optional[int]:
|
| 61 |
+
if v is None:
|
| 62 |
+
return None
|
| 63 |
+
try:
|
| 64 |
+
return int(v)
|
| 65 |
+
except (TypeError, ValueError):
|
| 66 |
+
return None
|
| 67 |
+
|
| 68 |
+
|
| 69 |
+
def abs_diff(a: Optional[float], b: Optional[float]) -> Optional[float]:
|
| 70 |
+
if a is None or b is None:
|
| 71 |
+
return None
|
| 72 |
+
return abs(a - b)
|
| 73 |
+
|
| 74 |
+
|
| 75 |
+
def r3(v: Optional[float]) -> Optional[float]:
|
| 76 |
+
if v is None:
|
| 77 |
+
return None
|
| 78 |
+
return round(float(v), 3)
|
| 79 |
+
|
| 80 |
+
|
| 81 |
+
def build_row(item: Dict[str, Any]) -> Dict[str, Any]:
|
| 82 |
+
t_g = to_float_or_none(item.get("t_gemini"))
|
| 83 |
+
t_o = to_float_or_none(item.get("t_openai"))
|
| 84 |
+
t_c = to_float_or_none(item.get("t_claude"))
|
| 85 |
+
|
| 86 |
+
fi_g = to_int_or_none(item.get("gemini_frame_index"))
|
| 87 |
+
fi_o = to_int_or_none(item.get("openai_frame_index"))
|
| 88 |
+
fi_c = to_int_or_none(item.get("claude_frame_index"))
|
| 89 |
+
|
| 90 |
+
available = [t for t in [t_g, t_o, t_c] if t is not None]
|
| 91 |
+
spread = (max(available) - min(available)) if available else None
|
| 92 |
+
|
| 93 |
+
return {
|
| 94 |
+
"video_id": item.get("video_id", ""),
|
| 95 |
+
"sampled_fps": to_float_or_none(item.get("sampled_fps")),
|
| 96 |
+
"num_sampled_frames": to_int_or_none(item.get("num_sampled_frames")),
|
| 97 |
+
"gemini_frame_index": fi_g,
|
| 98 |
+
"openai_frame_index": fi_o,
|
| 99 |
+
"claude_frame_index": fi_c,
|
| 100 |
+
"t_gemini": r3(t_g),
|
| 101 |
+
"t_openai": r3(t_o),
|
| 102 |
+
"t_claude": r3(t_c),
|
| 103 |
+
"diff_gemini_openai": r3(abs_diff(t_g, t_o)),
|
| 104 |
+
"diff_gemini_claude": r3(abs_diff(t_g, t_c)),
|
| 105 |
+
"diff_openai_claude": r3(abs_diff(t_o, t_c)),
|
| 106 |
+
"spread_max_min": r3(spread),
|
| 107 |
+
"models_available": len(available),
|
| 108 |
+
"missing_models": int(t_g is None) + int(t_o is None) + int(t_c is None),
|
| 109 |
+
"event_desc_gemini": item.get("event_desc_gemini"),
|
| 110 |
+
"event_desc_openai": item.get("event_desc_openai"),
|
| 111 |
+
"event_desc_claude": item.get("event_desc_claude"),
|
| 112 |
+
}
|
| 113 |
+
|
| 114 |
+
|
| 115 |
+
def main() -> None:
|
| 116 |
+
args = parse_args()
|
| 117 |
+
raw = load_jsonl(args.input)
|
| 118 |
+
rows = [build_row(x) for x in raw]
|
| 119 |
+
|
| 120 |
+
if args.min_spread > 0:
|
| 121 |
+
rows = [r for r in rows if r.get("spread_max_min") is not None and float(r["spread_max_min"]) >= args.min_spread]
|
| 122 |
+
|
| 123 |
+
if args.sort_by == "spread":
|
| 124 |
+
rows.sort(key=lambda r: (r.get("spread_max_min") or -1.0, r.get("video_id", "")), reverse=True)
|
| 125 |
+
else:
|
| 126 |
+
rows.sort(key=lambda r: str(r.get("video_id", "")))
|
| 127 |
+
|
| 128 |
+
args.output.parent.mkdir(parents=True, exist_ok=True)
|
| 129 |
+
fieldnames = [
|
| 130 |
+
"video_id",
|
| 131 |
+
"sampled_fps",
|
| 132 |
+
"num_sampled_frames",
|
| 133 |
+
"gemini_frame_index",
|
| 134 |
+
"openai_frame_index",
|
| 135 |
+
"claude_frame_index",
|
| 136 |
+
"t_gemini",
|
| 137 |
+
"t_openai",
|
| 138 |
+
"t_claude",
|
| 139 |
+
"diff_gemini_openai",
|
| 140 |
+
"diff_gemini_claude",
|
| 141 |
+
"diff_openai_claude",
|
| 142 |
+
"spread_max_min",
|
| 143 |
+
"models_available",
|
| 144 |
+
"missing_models",
|
| 145 |
+
"event_desc_gemini",
|
| 146 |
+
"event_desc_openai",
|
| 147 |
+
"event_desc_claude",
|
| 148 |
+
]
|
| 149 |
+
with args.output.open("w", encoding="utf-8", newline="") as f:
|
| 150 |
+
writer = csv.DictWriter(f, fieldnames=fieldnames)
|
| 151 |
+
writer.writeheader()
|
| 152 |
+
writer.writerows(rows)
|
| 153 |
+
|
| 154 |
+
print(f"Input rows: {len(raw)}")
|
| 155 |
+
print(f"Exported rows: {len(rows)}")
|
| 156 |
+
print(f"CSV written to: {args.output}")
|
| 157 |
+
|
| 158 |
+
|
| 159 |
+
if __name__ == "__main__":
|
| 160 |
+
main()
|
data_gen/gemini_annotator_multikey.py
ADDED
|
@@ -0,0 +1,778 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env python3
|
| 2 |
+
"""
|
| 3 |
+
Production-grade multi-key Gemini annotator with OpenRouter fallback.
|
| 4 |
+
|
| 5 |
+
Core features:
|
| 6 |
+
1) Dual-pass annotation per video:
|
| 7 |
+
- Pass 1: visual impact timestamp -> t_visual
|
| 8 |
+
- Pass 2: audio impact timestamp -> t_audio
|
| 9 |
+
2) Append-only checkpointing via JSONL (resume-safe).
|
| 10 |
+
3) Thread-safe native Gemini API key rotation on quota/rate errors.
|
| 11 |
+
4) Automatic OpenRouter fallback when all native keys are exhausted.
|
| 12 |
+
5) Concurrency with bounded workers and robust failure isolation.
|
| 13 |
+
"""
|
| 14 |
+
|
| 15 |
+
from __future__ import annotations
|
| 16 |
+
|
| 17 |
+
import argparse
|
| 18 |
+
import base64
|
| 19 |
+
import json
|
| 20 |
+
import logging
|
| 21 |
+
import os
|
| 22 |
+
import re
|
| 23 |
+
import threading
|
| 24 |
+
import time
|
| 25 |
+
from concurrent.futures import Future, ThreadPoolExecutor, as_completed
|
| 26 |
+
from dataclasses import dataclass
|
| 27 |
+
from pathlib import Path
|
| 28 |
+
from typing import Any, Dict, List, Optional, Sequence, Set, Tuple
|
| 29 |
+
|
| 30 |
+
import numpy as np
|
| 31 |
+
from google import genai
|
| 32 |
+
from google.genai import types as genai_types
|
| 33 |
+
from openai import OpenAI
|
| 34 |
+
|
| 35 |
+
|
| 36 |
+
VIDEO_EXTS = {".mp4", ".mov", ".mkv", ".avi", ".webm", ".m4v"}
|
| 37 |
+
|
| 38 |
+
VISUAL_PROMPT = """You are an expert in physics and video forensics.
|
| 39 |
+
Analyze this video and find the exact timestamp (seconds) of the single highest-energy physical impact event.
|
| 40 |
+
|
| 41 |
+
Rules:
|
| 42 |
+
- Return strictly JSON only.
|
| 43 |
+
- "t_visual" must be a number in seconds.
|
| 44 |
+
- If uncertain, still return your best estimate.
|
| 45 |
+
|
| 46 |
+
Output format:
|
| 47 |
+
{
|
| 48 |
+
"t_visual": 2.5,
|
| 49 |
+
"event_description": "brief description of the visual impact"
|
| 50 |
+
}
|
| 51 |
+
"""
|
| 52 |
+
|
| 53 |
+
AUDIO_PROMPT = """You are an expert in acoustic forensics.
|
| 54 |
+
Analyze this video and find the exact timestamp (seconds) of the loudest physical impact sound.
|
| 55 |
+
|
| 56 |
+
Rules:
|
| 57 |
+
- Return strictly JSON only.
|
| 58 |
+
- "t_audio" must be a number in seconds.
|
| 59 |
+
- Ignore speech/background music unless it is the physical impact itself.
|
| 60 |
+
- If uncertain, still return your best estimate.
|
| 61 |
+
|
| 62 |
+
Output format:
|
| 63 |
+
{
|
| 64 |
+
"t_audio": 2.6,
|
| 65 |
+
"sound_description": "brief description of the impact sound"
|
| 66 |
+
}
|
| 67 |
+
"""
|
| 68 |
+
|
| 69 |
+
|
| 70 |
+
@dataclass
|
| 71 |
+
class AnnotationResult:
|
| 72 |
+
video_relpath: str
|
| 73 |
+
video_name: str
|
| 74 |
+
video_path: str
|
| 75 |
+
t_visual: float
|
| 76 |
+
t_audio: float
|
| 77 |
+
event_description: str
|
| 78 |
+
sound_description: str
|
| 79 |
+
provider: str
|
| 80 |
+
model: str
|
| 81 |
+
native_key_label: Optional[str]
|
| 82 |
+
elapsed_sec: float
|
| 83 |
+
created_at: float
|
| 84 |
+
|
| 85 |
+
def to_dict(self) -> Dict[str, Any]:
|
| 86 |
+
return {
|
| 87 |
+
"video_relpath": self.video_relpath,
|
| 88 |
+
"video_name": self.video_name,
|
| 89 |
+
"video_path": self.video_path,
|
| 90 |
+
"t_visual": round(float(self.t_visual), 6),
|
| 91 |
+
"t_audio": round(float(self.t_audio), 6),
|
| 92 |
+
"event_description": self.event_description,
|
| 93 |
+
"sound_description": self.sound_description,
|
| 94 |
+
"provider": self.provider,
|
| 95 |
+
"model": self.model,
|
| 96 |
+
"native_key_label": self.native_key_label,
|
| 97 |
+
"elapsed_sec": round(float(self.elapsed_sec), 3),
|
| 98 |
+
"created_at": self.created_at,
|
| 99 |
+
}
|
| 100 |
+
|
| 101 |
+
|
| 102 |
+
class NativeKeyManager:
|
| 103 |
+
"""Thread-safe native Gemini key rotation manager."""
|
| 104 |
+
|
| 105 |
+
def __init__(self, keys: Sequence[Tuple[str, str]]):
|
| 106 |
+
filtered = [(label, key.strip()) for label, key in keys if key and key.strip()]
|
| 107 |
+
if not filtered:
|
| 108 |
+
raise ValueError("At least one native Gemini API key is required.")
|
| 109 |
+
self._keys: List[Tuple[str, str]] = list(filtered)
|
| 110 |
+
self._lock = threading.Lock()
|
| 111 |
+
self._active_idx = 0
|
| 112 |
+
self._quota_exhausted_labels: Set[str] = set()
|
| 113 |
+
|
| 114 |
+
def get_active(self) -> Optional[Tuple[str, str]]:
|
| 115 |
+
with self._lock:
|
| 116 |
+
for _ in range(len(self._keys)):
|
| 117 |
+
label, key = self._keys[self._active_idx]
|
| 118 |
+
if label not in self._quota_exhausted_labels:
|
| 119 |
+
return label, key
|
| 120 |
+
self._active_idx = (self._active_idx + 1) % len(self._keys)
|
| 121 |
+
return None
|
| 122 |
+
|
| 123 |
+
def mark_quota_and_rotate(self, label: str) -> bool:
|
| 124 |
+
"""
|
| 125 |
+
Mark a key as quota exhausted and rotate to next available key.
|
| 126 |
+
Returns True if all native keys are exhausted after this update.
|
| 127 |
+
"""
|
| 128 |
+
with self._lock:
|
| 129 |
+
self._quota_exhausted_labels.add(label)
|
| 130 |
+
if self._keys:
|
| 131 |
+
self._active_idx = (self._active_idx + 1) % len(self._keys)
|
| 132 |
+
return len(self._quota_exhausted_labels) >= len(self._keys)
|
| 133 |
+
|
| 134 |
+
def all_exhausted(self) -> bool:
|
| 135 |
+
with self._lock:
|
| 136 |
+
return len(self._quota_exhausted_labels) >= len(self._keys)
|
| 137 |
+
|
| 138 |
+
def status_snapshot(self) -> Dict[str, Any]:
|
| 139 |
+
with self._lock:
|
| 140 |
+
active = self._keys[self._active_idx][0] if self._keys else None
|
| 141 |
+
return {
|
| 142 |
+
"active_key_label": active,
|
| 143 |
+
"quota_exhausted_labels": sorted(self._quota_exhausted_labels),
|
| 144 |
+
"total_native_keys": len(self._keys),
|
| 145 |
+
}
|
| 146 |
+
|
| 147 |
+
|
| 148 |
+
def setup_logging(verbose: bool) -> None:
|
| 149 |
+
level = logging.DEBUG if verbose else logging.INFO
|
| 150 |
+
logging.basicConfig(
|
| 151 |
+
level=level,
|
| 152 |
+
format="%(asctime)s | %(levelname)s | %(threadName)s | %(message)s",
|
| 153 |
+
)
|
| 154 |
+
|
| 155 |
+
|
| 156 |
+
def parse_args() -> argparse.Namespace:
|
| 157 |
+
p = argparse.ArgumentParser(
|
| 158 |
+
description="Dual-pass Gemini annotator with multi-key rotation and OpenRouter fallback."
|
| 159 |
+
)
|
| 160 |
+
p.add_argument(
|
| 161 |
+
"--video-dir",
|
| 162 |
+
type=Path,
|
| 163 |
+
default=Path("/home/ubuntu/video_source/original/uag_oops"),
|
| 164 |
+
help="Root directory containing input videos.",
|
| 165 |
+
)
|
| 166 |
+
p.add_argument(
|
| 167 |
+
"--results-jsonl",
|
| 168 |
+
type=Path,
|
| 169 |
+
default=Path("/home/ubuntu/video_source/result/timestamps_results.jsonl"),
|
| 170 |
+
help="Append-only output JSONL for successful results.",
|
| 171 |
+
)
|
| 172 |
+
p.add_argument(
|
| 173 |
+
"--failed-log",
|
| 174 |
+
type=Path,
|
| 175 |
+
default=Path("/home/ubuntu/video_source/result/failed_videos.log"),
|
| 176 |
+
help="Append-only failure log file.",
|
| 177 |
+
)
|
| 178 |
+
p.add_argument(
|
| 179 |
+
"--native-model",
|
| 180 |
+
type=str,
|
| 181 |
+
default="gemini-3.1-pro-preview",
|
| 182 |
+
help="Native Gemini model id for google-genai SDK.",
|
| 183 |
+
)
|
| 184 |
+
p.add_argument(
|
| 185 |
+
"--openrouter-model",
|
| 186 |
+
type=str,
|
| 187 |
+
default="google/gemini-2.5-pro",
|
| 188 |
+
help="OpenRouter model id (OpenAI-compatible endpoint).",
|
| 189 |
+
)
|
| 190 |
+
p.add_argument(
|
| 191 |
+
"--openrouter-base-url",
|
| 192 |
+
type=str,
|
| 193 |
+
default="https://openrouter.ai/api/v1",
|
| 194 |
+
help="OpenRouter OpenAI-compatible base URL.",
|
| 195 |
+
)
|
| 196 |
+
p.add_argument(
|
| 197 |
+
"--max-workers",
|
| 198 |
+
type=int,
|
| 199 |
+
default=4,
|
| 200 |
+
help="Number of concurrent workers (recommended 3-5).",
|
| 201 |
+
)
|
| 202 |
+
p.add_argument(
|
| 203 |
+
"--max-retries-per-video",
|
| 204 |
+
type=int,
|
| 205 |
+
default=3,
|
| 206 |
+
help="Retries for non-quota transient failures per provider.",
|
| 207 |
+
)
|
| 208 |
+
p.add_argument(
|
| 209 |
+
"--file-ready-timeout-sec",
|
| 210 |
+
type=int,
|
| 211 |
+
default=300,
|
| 212 |
+
help="Max seconds waiting for uploaded Gemini file to become ACTIVE.",
|
| 213 |
+
)
|
| 214 |
+
p.add_argument(
|
| 215 |
+
"--retry-base-sec",
|
| 216 |
+
type=float,
|
| 217 |
+
default=2.0,
|
| 218 |
+
help="Base seconds for exponential retry backoff.",
|
| 219 |
+
)
|
| 220 |
+
p.add_argument(
|
| 221 |
+
"--native-request-timeout-sec",
|
| 222 |
+
type=int,
|
| 223 |
+
default=120,
|
| 224 |
+
help="Per-request timeout (seconds) for native Gemini generate calls.",
|
| 225 |
+
)
|
| 226 |
+
p.add_argument(
|
| 227 |
+
"--openrouter-request-timeout-sec",
|
| 228 |
+
type=int,
|
| 229 |
+
default=120,
|
| 230 |
+
help="Per-request timeout (seconds) for OpenRouter API calls.",
|
| 231 |
+
)
|
| 232 |
+
p.add_argument(
|
| 233 |
+
"--verbose",
|
| 234 |
+
action="store_true",
|
| 235 |
+
help="Enable debug logging.",
|
| 236 |
+
)
|
| 237 |
+
p.add_argument(
|
| 238 |
+
"--progress-interval-sec",
|
| 239 |
+
type=float,
|
| 240 |
+
default=10.0,
|
| 241 |
+
help="Seconds between heartbeat progress logs.",
|
| 242 |
+
)
|
| 243 |
+
return p.parse_args()
|
| 244 |
+
|
| 245 |
+
|
| 246 |
+
def discover_videos(root: Path) -> List[Path]:
|
| 247 |
+
if not root.exists():
|
| 248 |
+
raise FileNotFoundError(f"Video directory not found: {root}")
|
| 249 |
+
vids: List[Path] = []
|
| 250 |
+
for p in root.rglob("*"):
|
| 251 |
+
if p.is_file() and p.suffix.lower() in VIDEO_EXTS:
|
| 252 |
+
vids.append(p)
|
| 253 |
+
vids.sort()
|
| 254 |
+
if not vids:
|
| 255 |
+
raise FileNotFoundError(f"No videos found under: {root}")
|
| 256 |
+
return vids
|
| 257 |
+
|
| 258 |
+
|
| 259 |
+
def load_processed_set(results_jsonl: Path) -> Set[str]:
|
| 260 |
+
processed: Set[str] = set()
|
| 261 |
+
if not results_jsonl.exists():
|
| 262 |
+
return processed
|
| 263 |
+
with results_jsonl.open("r", encoding="utf-8") as f:
|
| 264 |
+
for line in f:
|
| 265 |
+
s = line.strip()
|
| 266 |
+
if not s:
|
| 267 |
+
continue
|
| 268 |
+
try:
|
| 269 |
+
obj = json.loads(s)
|
| 270 |
+
except json.JSONDecodeError:
|
| 271 |
+
continue
|
| 272 |
+
rel = str(obj.get("video_relpath", "")).strip()
|
| 273 |
+
name = str(obj.get("video_name", "")).strip()
|
| 274 |
+
if rel:
|
| 275 |
+
processed.add(rel)
|
| 276 |
+
elif name:
|
| 277 |
+
processed.add(name)
|
| 278 |
+
return processed
|
| 279 |
+
|
| 280 |
+
|
| 281 |
+
def append_jsonl_threadsafe(path: Path, row: Dict[str, Any], lock: threading.Lock) -> None:
|
| 282 |
+
path.parent.mkdir(parents=True, exist_ok=True)
|
| 283 |
+
encoded = json.dumps(row, ensure_ascii=False)
|
| 284 |
+
with lock:
|
| 285 |
+
with path.open("a", encoding="utf-8") as f:
|
| 286 |
+
f.write(encoded + "\n")
|
| 287 |
+
|
| 288 |
+
|
| 289 |
+
def append_failure_threadsafe(path: Path, text: str, lock: threading.Lock) -> None:
|
| 290 |
+
path.parent.mkdir(parents=True, exist_ok=True)
|
| 291 |
+
with lock:
|
| 292 |
+
with path.open("a", encoding="utf-8") as f:
|
| 293 |
+
f.write(text + "\n")
|
| 294 |
+
|
| 295 |
+
|
| 296 |
+
def extract_json_object(text: str) -> Dict[str, Any]:
|
| 297 |
+
text = (text or "").strip()
|
| 298 |
+
fence_match = re.search(r"```(?:json)?\s*(\{.*?\})\s*```", text, flags=re.DOTALL)
|
| 299 |
+
if fence_match:
|
| 300 |
+
return json.loads(fence_match.group(1))
|
| 301 |
+
try:
|
| 302 |
+
obj = json.loads(text)
|
| 303 |
+
if isinstance(obj, dict):
|
| 304 |
+
return obj
|
| 305 |
+
except json.JSONDecodeError:
|
| 306 |
+
pass
|
| 307 |
+
start = text.find("{")
|
| 308 |
+
end = text.rfind("}")
|
| 309 |
+
if start != -1 and end != -1 and end > start:
|
| 310 |
+
obj = json.loads(text[start : end + 1])
|
| 311 |
+
if isinstance(obj, dict):
|
| 312 |
+
return obj
|
| 313 |
+
raise ValueError("No valid JSON object found in model output.")
|
| 314 |
+
|
| 315 |
+
|
| 316 |
+
def coerce_visual(data: Dict[str, Any]) -> Tuple[float, str]:
|
| 317 |
+
if "t_visual" not in data:
|
| 318 |
+
raise ValueError(f"Missing t_visual in visual output: {data}")
|
| 319 |
+
ts = float(data["t_visual"])
|
| 320 |
+
if not np.isfinite(ts):
|
| 321 |
+
raise ValueError(f"t_visual is not finite: {ts}")
|
| 322 |
+
desc = str(data.get("event_description", "")).strip()
|
| 323 |
+
if not desc:
|
| 324 |
+
desc = "unspecified visual impact"
|
| 325 |
+
return ts, desc
|
| 326 |
+
|
| 327 |
+
|
| 328 |
+
def coerce_audio(data: Dict[str, Any]) -> Tuple[float, str]:
|
| 329 |
+
if "t_audio" not in data:
|
| 330 |
+
raise ValueError(f"Missing t_audio in audio output: {data}")
|
| 331 |
+
ts = float(data["t_audio"])
|
| 332 |
+
if not np.isfinite(ts):
|
| 333 |
+
raise ValueError(f"t_audio is not finite: {ts}")
|
| 334 |
+
desc = str(data.get("sound_description", "")).strip()
|
| 335 |
+
if not desc:
|
| 336 |
+
desc = "unspecified impact sound"
|
| 337 |
+
return ts, desc
|
| 338 |
+
|
| 339 |
+
|
| 340 |
+
def is_quota_error(exc: Exception) -> bool:
|
| 341 |
+
msg = str(exc).upper()
|
| 342 |
+
keys = [
|
| 343 |
+
"429",
|
| 344 |
+
"TOO MANY REQUESTS",
|
| 345 |
+
"RESOURCE_EXHAUSTED",
|
| 346 |
+
"RATE LIMIT",
|
| 347 |
+
"QUOTA",
|
| 348 |
+
"EXCEEDED",
|
| 349 |
+
]
|
| 350 |
+
return any(k in msg for k in keys)
|
| 351 |
+
|
| 352 |
+
|
| 353 |
+
def is_retryable_error(exc: Exception) -> bool:
|
| 354 |
+
msg = str(exc).upper()
|
| 355 |
+
transient = [
|
| 356 |
+
"500",
|
| 357 |
+
"502",
|
| 358 |
+
"503",
|
| 359 |
+
"504",
|
| 360 |
+
"UNAVAILABLE",
|
| 361 |
+
"TIMEOUT",
|
| 362 |
+
"CONNECTION",
|
| 363 |
+
"DEADLINE",
|
| 364 |
+
]
|
| 365 |
+
return any(k in msg for k in transient)
|
| 366 |
+
|
| 367 |
+
|
| 368 |
+
def wait_until_native_file_active(
|
| 369 |
+
client: genai.Client, file_name: str, timeout_sec: int, poll_sec: float = 2.0
|
| 370 |
+
) -> None:
|
| 371 |
+
deadline = time.time() + timeout_sec
|
| 372 |
+
while time.time() < deadline:
|
| 373 |
+
meta = client.files.get(name=file_name)
|
| 374 |
+
state = getattr(getattr(meta, "state", None), "name", None) or str(getattr(meta, "state", ""))
|
| 375 |
+
state = str(state).upper()
|
| 376 |
+
if state == "ACTIVE":
|
| 377 |
+
return
|
| 378 |
+
if state in {"FAILED", "CANCELLED"}:
|
| 379 |
+
raise RuntimeError(f"Native Gemini file processing failed. state={state}, file={file_name}")
|
| 380 |
+
time.sleep(poll_sec)
|
| 381 |
+
raise TimeoutError(f"Timed out waiting for file ACTIVE: {file_name}")
|
| 382 |
+
|
| 383 |
+
|
| 384 |
+
def _mime_from_suffix(path: Path) -> str:
|
| 385 |
+
ext = path.suffix.lower()
|
| 386 |
+
mapping = {
|
| 387 |
+
".mp4": "video/mp4",
|
| 388 |
+
".mov": "video/quicktime",
|
| 389 |
+
".mkv": "video/x-matroska",
|
| 390 |
+
".avi": "video/x-msvideo",
|
| 391 |
+
".webm": "video/webm",
|
| 392 |
+
".m4v": "video/x-m4v",
|
| 393 |
+
}
|
| 394 |
+
return mapping.get(ext, "video/mp4")
|
| 395 |
+
|
| 396 |
+
|
| 397 |
+
def run_native_dual_pass(
|
| 398 |
+
video_path: Path,
|
| 399 |
+
api_key: str,
|
| 400 |
+
model: str,
|
| 401 |
+
file_ready_timeout_sec: int,
|
| 402 |
+
request_timeout_sec: int,
|
| 403 |
+
) -> Tuple[float, str, float, str]:
|
| 404 |
+
# google-genai HttpOptions.timeout expects milliseconds.
|
| 405 |
+
timeout_ms = max(int(request_timeout_sec * 1000), 1000)
|
| 406 |
+
client = genai.Client(
|
| 407 |
+
api_key=api_key,
|
| 408 |
+
http_options=genai_types.HttpOptions(timeout=timeout_ms),
|
| 409 |
+
)
|
| 410 |
+
uploaded = client.files.upload(file=str(video_path))
|
| 411 |
+
uploaded_name = getattr(uploaded, "name", None)
|
| 412 |
+
uploaded_uri = getattr(uploaded, "uri", None)
|
| 413 |
+
mime_type = _mime_from_suffix(video_path)
|
| 414 |
+
|
| 415 |
+
if not uploaded_name:
|
| 416 |
+
raise RuntimeError("Native upload returned no file name.")
|
| 417 |
+
if not uploaded_uri:
|
| 418 |
+
raise RuntimeError("Native upload returned no file URI.")
|
| 419 |
+
|
| 420 |
+
try:
|
| 421 |
+
wait_until_native_file_active(client, uploaded_name, timeout_sec=file_ready_timeout_sec)
|
| 422 |
+
|
| 423 |
+
video_part = {"file_data": {"file_uri": uploaded_uri, "mime_type": mime_type}}
|
| 424 |
+
|
| 425 |
+
logging.info("PASS1_VISUAL start | %s", video_path.name)
|
| 426 |
+
resp_visual = client.models.generate_content(
|
| 427 |
+
model=model,
|
| 428 |
+
contents=[VISUAL_PROMPT, video_part],
|
| 429 |
+
)
|
| 430 |
+
logging.info("PASS1_VISUAL done | %s", video_path.name)
|
| 431 |
+
raw_visual = (getattr(resp_visual, "text", "") or "").strip()
|
| 432 |
+
visual_obj = extract_json_object(raw_visual)
|
| 433 |
+
t_visual, event_desc = coerce_visual(visual_obj)
|
| 434 |
+
|
| 435 |
+
logging.info("PASS2_AUDIO start | %s", video_path.name)
|
| 436 |
+
resp_audio = client.models.generate_content(
|
| 437 |
+
model=model,
|
| 438 |
+
contents=[AUDIO_PROMPT, video_part],
|
| 439 |
+
)
|
| 440 |
+
logging.info("PASS2_AUDIO done | %s", video_path.name)
|
| 441 |
+
raw_audio = (getattr(resp_audio, "text", "") or "").strip()
|
| 442 |
+
audio_obj = extract_json_object(raw_audio)
|
| 443 |
+
t_audio, sound_desc = coerce_audio(audio_obj)
|
| 444 |
+
return t_visual, event_desc, t_audio, sound_desc
|
| 445 |
+
finally:
|
| 446 |
+
try:
|
| 447 |
+
client.files.delete(name=uploaded_name)
|
| 448 |
+
except Exception as exc:
|
| 449 |
+
logging.warning("Failed to delete uploaded native file %s: %s", uploaded_name, exc)
|
| 450 |
+
|
| 451 |
+
|
| 452 |
+
def encode_video_data_url(video_path: Path) -> str:
|
| 453 |
+
mime = _mime_from_suffix(video_path)
|
| 454 |
+
raw = video_path.read_bytes()
|
| 455 |
+
b64 = base64.b64encode(raw).decode("utf-8")
|
| 456 |
+
return f"data:{mime};base64,{b64}"
|
| 457 |
+
|
| 458 |
+
|
| 459 |
+
def run_openrouter_dual_pass(
|
| 460 |
+
video_path: Path,
|
| 461 |
+
api_key: str,
|
| 462 |
+
model: str,
|
| 463 |
+
base_url: str,
|
| 464 |
+
request_timeout_sec: int,
|
| 465 |
+
) -> Tuple[float, str, float, str]:
|
| 466 |
+
client = OpenAI(
|
| 467 |
+
api_key=api_key,
|
| 468 |
+
base_url=base_url,
|
| 469 |
+
timeout=float(request_timeout_sec),
|
| 470 |
+
)
|
| 471 |
+
video_data_url = encode_video_data_url(video_path)
|
| 472 |
+
|
| 473 |
+
visual_content: List[Dict[str, Any]] = [
|
| 474 |
+
{"type": "input_text", "text": VISUAL_PROMPT},
|
| 475 |
+
{"type": "input_video", "video_url": video_data_url},
|
| 476 |
+
]
|
| 477 |
+
logging.info("PASS1_VISUAL start | %s (openrouter)", video_path.name)
|
| 478 |
+
visual_resp = client.responses.create(
|
| 479 |
+
model=model,
|
| 480 |
+
input=[{"role": "user", "content": visual_content}],
|
| 481 |
+
)
|
| 482 |
+
logging.info("PASS1_VISUAL done | %s (openrouter)", video_path.name)
|
| 483 |
+
visual_text = (getattr(visual_resp, "output_text", "") or "").strip()
|
| 484 |
+
visual_obj = extract_json_object(visual_text)
|
| 485 |
+
t_visual, event_desc = coerce_visual(visual_obj)
|
| 486 |
+
|
| 487 |
+
audio_content: List[Dict[str, Any]] = [
|
| 488 |
+
{"type": "input_text", "text": AUDIO_PROMPT},
|
| 489 |
+
{"type": "input_video", "video_url": video_data_url},
|
| 490 |
+
]
|
| 491 |
+
logging.info("PASS2_AUDIO start | %s (openrouter)", video_path.name)
|
| 492 |
+
audio_resp = client.responses.create(
|
| 493 |
+
model=model,
|
| 494 |
+
input=[{"role": "user", "content": audio_content}],
|
| 495 |
+
)
|
| 496 |
+
logging.info("PASS2_AUDIO done | %s (openrouter)", video_path.name)
|
| 497 |
+
audio_text = (getattr(audio_resp, "output_text", "") or "").strip()
|
| 498 |
+
audio_obj = extract_json_object(audio_text)
|
| 499 |
+
t_audio, sound_desc = coerce_audio(audio_obj)
|
| 500 |
+
return t_visual, event_desc, t_audio, sound_desc
|
| 501 |
+
|
| 502 |
+
|
| 503 |
+
def annotate_one_video(
|
| 504 |
+
video_path: Path,
|
| 505 |
+
video_relpath: str,
|
| 506 |
+
key_manager: NativeKeyManager,
|
| 507 |
+
args: argparse.Namespace,
|
| 508 |
+
openrouter_key: Optional[str],
|
| 509 |
+
) -> AnnotationResult:
|
| 510 |
+
started = time.time()
|
| 511 |
+
|
| 512 |
+
# Native path with rotation.
|
| 513 |
+
native_attempt = 0
|
| 514 |
+
while native_attempt < args.max_retries_per_video:
|
| 515 |
+
native_active = key_manager.get_active()
|
| 516 |
+
if native_active is None:
|
| 517 |
+
break
|
| 518 |
+
|
| 519 |
+
label, key = native_active
|
| 520 |
+
try:
|
| 521 |
+
t_visual, event_desc, t_audio, sound_desc = run_native_dual_pass(
|
| 522 |
+
video_path=video_path,
|
| 523 |
+
api_key=key,
|
| 524 |
+
model=args.native_model,
|
| 525 |
+
file_ready_timeout_sec=args.file_ready_timeout_sec,
|
| 526 |
+
request_timeout_sec=args.native_request_timeout_sec,
|
| 527 |
+
)
|
| 528 |
+
return AnnotationResult(
|
| 529 |
+
video_relpath=video_relpath,
|
| 530 |
+
video_name=video_path.name,
|
| 531 |
+
video_path=str(video_path),
|
| 532 |
+
t_visual=t_visual,
|
| 533 |
+
t_audio=t_audio,
|
| 534 |
+
event_description=event_desc,
|
| 535 |
+
sound_description=sound_desc,
|
| 536 |
+
provider="native_gemini",
|
| 537 |
+
model=args.native_model,
|
| 538 |
+
native_key_label=label,
|
| 539 |
+
elapsed_sec=time.time() - started,
|
| 540 |
+
created_at=time.time(),
|
| 541 |
+
)
|
| 542 |
+
except Exception as exc:
|
| 543 |
+
if is_quota_error(exc):
|
| 544 |
+
all_exhausted = key_manager.mark_quota_and_rotate(label)
|
| 545 |
+
logging.warning(
|
| 546 |
+
"Quota/rate hit on native key=%s for %s. exhausted_all=%s err=%s",
|
| 547 |
+
label,
|
| 548 |
+
video_relpath,
|
| 549 |
+
all_exhausted,
|
| 550 |
+
exc,
|
| 551 |
+
)
|
| 552 |
+
if all_exhausted:
|
| 553 |
+
break
|
| 554 |
+
continue
|
| 555 |
+
|
| 556 |
+
native_attempt += 1
|
| 557 |
+
if native_attempt >= args.max_retries_per_video:
|
| 558 |
+
logging.error(
|
| 559 |
+
"Native non-quota error exhausted retries for %s: %s",
|
| 560 |
+
video_relpath,
|
| 561 |
+
exc,
|
| 562 |
+
)
|
| 563 |
+
break
|
| 564 |
+
sleep_s = args.retry_base_sec * (2 ** (native_attempt - 1))
|
| 565 |
+
logging.warning(
|
| 566 |
+
"Native transient error for %s (attempt %d/%d). sleeping %.1fs. err=%s",
|
| 567 |
+
video_relpath,
|
| 568 |
+
native_attempt,
|
| 569 |
+
args.max_retries_per_video,
|
| 570 |
+
sleep_s,
|
| 571 |
+
exc,
|
| 572 |
+
)
|
| 573 |
+
time.sleep(sleep_s)
|
| 574 |
+
|
| 575 |
+
# Fallback path.
|
| 576 |
+
if not openrouter_key:
|
| 577 |
+
raise RuntimeError("Native keys unavailable/exhausted and OPENROUTER_API_KEY is missing.")
|
| 578 |
+
|
| 579 |
+
fallback_attempt = 0
|
| 580 |
+
while fallback_attempt < args.max_retries_per_video:
|
| 581 |
+
try:
|
| 582 |
+
t_visual, event_desc, t_audio, sound_desc = run_openrouter_dual_pass(
|
| 583 |
+
video_path=video_path,
|
| 584 |
+
api_key=openrouter_key,
|
| 585 |
+
model=args.openrouter_model,
|
| 586 |
+
base_url=args.openrouter_base_url,
|
| 587 |
+
request_timeout_sec=args.openrouter_request_timeout_sec,
|
| 588 |
+
)
|
| 589 |
+
return AnnotationResult(
|
| 590 |
+
video_relpath=video_relpath,
|
| 591 |
+
video_name=video_path.name,
|
| 592 |
+
video_path=str(video_path),
|
| 593 |
+
t_visual=t_visual,
|
| 594 |
+
t_audio=t_audio,
|
| 595 |
+
event_description=event_desc,
|
| 596 |
+
sound_description=sound_desc,
|
| 597 |
+
provider="openrouter",
|
| 598 |
+
model=args.openrouter_model,
|
| 599 |
+
native_key_label=None,
|
| 600 |
+
elapsed_sec=time.time() - started,
|
| 601 |
+
created_at=time.time(),
|
| 602 |
+
)
|
| 603 |
+
except Exception as exc:
|
| 604 |
+
fallback_attempt += 1
|
| 605 |
+
if fallback_attempt >= args.max_retries_per_video:
|
| 606 |
+
raise RuntimeError(f"OpenRouter fallback failed after retries: {exc}") from exc
|
| 607 |
+
sleep_s = args.retry_base_sec * (2 ** (fallback_attempt - 1))
|
| 608 |
+
logging.warning(
|
| 609 |
+
"OpenRouter error for %s (attempt %d/%d). sleeping %.1fs. err=%s",
|
| 610 |
+
video_relpath,
|
| 611 |
+
fallback_attempt,
|
| 612 |
+
args.max_retries_per_video,
|
| 613 |
+
sleep_s,
|
| 614 |
+
exc,
|
| 615 |
+
)
|
| 616 |
+
time.sleep(sleep_s)
|
| 617 |
+
|
| 618 |
+
raise RuntimeError("Unexpected annotation loop exit.")
|
| 619 |
+
|
| 620 |
+
|
| 621 |
+
def main() -> None:
|
| 622 |
+
args = parse_args()
|
| 623 |
+
setup_logging(args.verbose)
|
| 624 |
+
|
| 625 |
+
# Read keys from environment.
|
| 626 |
+
native_keys = [
|
| 627 |
+
("MINE", os.getenv("GEMINI_API_KEY_MINE", "").strip()),
|
| 628 |
+
("A", os.getenv("GEMINI_API_KEY_A", "").strip()),
|
| 629 |
+
("B", os.getenv("GEMINI_API_KEY_B", "").strip()),
|
| 630 |
+
]
|
| 631 |
+
openrouter_key = os.getenv("OPENROUTER_API_KEY", "").strip() or None
|
| 632 |
+
|
| 633 |
+
key_manager = NativeKeyManager(native_keys)
|
| 634 |
+
snapshot = key_manager.status_snapshot()
|
| 635 |
+
logging.info("Native key pool initialized: %s", snapshot)
|
| 636 |
+
logging.info("OpenRouter fallback configured: %s", bool(openrouter_key))
|
| 637 |
+
|
| 638 |
+
all_videos = discover_videos(args.video_dir)
|
| 639 |
+
processed_set = load_processed_set(args.results_jsonl)
|
| 640 |
+
|
| 641 |
+
pending: List[Tuple[Path, str]] = []
|
| 642 |
+
for video in all_videos:
|
| 643 |
+
rel = str(video.relative_to(args.video_dir))
|
| 644 |
+
if rel in processed_set or video.name in processed_set:
|
| 645 |
+
continue
|
| 646 |
+
pending.append((video, rel))
|
| 647 |
+
|
| 648 |
+
logging.info("Total videos discovered: %d", len(all_videos))
|
| 649 |
+
logging.info("Already processed (checkpoint): %d", len(all_videos) - len(pending))
|
| 650 |
+
logging.info("Pending videos: %d", len(pending))
|
| 651 |
+
if not pending:
|
| 652 |
+
logging.info("Nothing to process. Exiting.")
|
| 653 |
+
return
|
| 654 |
+
|
| 655 |
+
jsonl_lock = threading.Lock()
|
| 656 |
+
fail_lock = threading.Lock()
|
| 657 |
+
counters_lock = threading.Lock()
|
| 658 |
+
counters = {"ok": 0, "failed": 0, "started": 0}
|
| 659 |
+
total_pending = len(pending)
|
| 660 |
+
progress_stop = threading.Event()
|
| 661 |
+
last_completion_ts = [time.time()]
|
| 662 |
+
last_progress_log_ts = [0.0]
|
| 663 |
+
last_snapshot = [(-1, -1, -1, -1)]
|
| 664 |
+
|
| 665 |
+
def progress_heartbeat() -> None:
|
| 666 |
+
while not progress_stop.wait(max(1.0, args.progress_interval_sec)):
|
| 667 |
+
with counters_lock:
|
| 668 |
+
ok = counters["ok"]
|
| 669 |
+
failed = counters["failed"]
|
| 670 |
+
started_n = counters["started"]
|
| 671 |
+
done = ok + failed
|
| 672 |
+
in_flight = max(0, started_n - done)
|
| 673 |
+
elapsed = max(time.time() - started_at, 1e-6)
|
| 674 |
+
rate = done / elapsed
|
| 675 |
+
remaining = max(0, total_pending - done)
|
| 676 |
+
eta_sec = (remaining / rate) if rate > 0 else None
|
| 677 |
+
eta_text = f"{eta_sec/60.0:.1f}m" if eta_sec is not None else "N/A"
|
| 678 |
+
now = time.time()
|
| 679 |
+
stagnant_sec = now - last_completion_ts[0]
|
| 680 |
+
snapshot = (done, ok, failed, in_flight)
|
| 681 |
+
|
| 682 |
+
# Avoid noisy repeated logs when nothing changes:
|
| 683 |
+
# - always log when progress state changes
|
| 684 |
+
# - otherwise log once per minute as heartbeat
|
| 685 |
+
should_log = False
|
| 686 |
+
if snapshot != last_snapshot[0]:
|
| 687 |
+
should_log = True
|
| 688 |
+
elif now - last_progress_log_ts[0] >= 60.0:
|
| 689 |
+
should_log = True
|
| 690 |
+
|
| 691 |
+
if should_log:
|
| 692 |
+
logging.info(
|
| 693 |
+
"PROGRESS done=%d/%d ok=%d failed=%d in_flight=%d rate=%.3f vids/s eta=%s stagnant=%.0fs",
|
| 694 |
+
done,
|
| 695 |
+
total_pending,
|
| 696 |
+
ok,
|
| 697 |
+
failed,
|
| 698 |
+
in_flight,
|
| 699 |
+
rate,
|
| 700 |
+
eta_text,
|
| 701 |
+
stagnant_sec,
|
| 702 |
+
)
|
| 703 |
+
last_snapshot[0] = snapshot
|
| 704 |
+
last_progress_log_ts[0] = now
|
| 705 |
+
|
| 706 |
+
def worker(video_item: Tuple[int, Path, str]) -> Tuple[str, Optional[AnnotationResult], Optional[str]]:
|
| 707 |
+
idx, vpath, rel = video_item
|
| 708 |
+
with counters_lock:
|
| 709 |
+
counters["started"] += 1
|
| 710 |
+
logging.info("START [%d/%d] %s", idx, total_pending, rel)
|
| 711 |
+
try:
|
| 712 |
+
res = annotate_one_video(
|
| 713 |
+
video_path=vpath,
|
| 714 |
+
video_relpath=rel,
|
| 715 |
+
key_manager=key_manager,
|
| 716 |
+
args=args,
|
| 717 |
+
openrouter_key=openrouter_key,
|
| 718 |
+
)
|
| 719 |
+
return rel, res, None
|
| 720 |
+
except Exception as exc:
|
| 721 |
+
return rel, None, str(exc)
|
| 722 |
+
|
| 723 |
+
futures: List[Future] = []
|
| 724 |
+
started_at = time.time()
|
| 725 |
+
progress_thread = threading.Thread(
|
| 726 |
+
target=progress_heartbeat,
|
| 727 |
+
name="progress-heartbeat",
|
| 728 |
+
daemon=True,
|
| 729 |
+
)
|
| 730 |
+
progress_thread.start()
|
| 731 |
+
with ThreadPoolExecutor(max_workers=max(1, args.max_workers)) as ex:
|
| 732 |
+
for idx, item in enumerate(pending, start=1):
|
| 733 |
+
vpath, rel = item
|
| 734 |
+
payload = (idx, vpath, rel)
|
| 735 |
+
futures.append(ex.submit(worker, payload))
|
| 736 |
+
|
| 737 |
+
for fut in as_completed(futures):
|
| 738 |
+
rel, res, err = fut.result()
|
| 739 |
+
if res is not None:
|
| 740 |
+
append_jsonl_threadsafe(args.results_jsonl, res.to_dict(), jsonl_lock)
|
| 741 |
+
with counters_lock:
|
| 742 |
+
counters["ok"] += 1
|
| 743 |
+
done = counters["ok"] + counters["failed"]
|
| 744 |
+
last_completion_ts[0] = time.time()
|
| 745 |
+
logging.info(
|
| 746 |
+
"DONE [%d/%d] %s | provider=%s | t_visual=%.3f | t_audio=%.3f",
|
| 747 |
+
done,
|
| 748 |
+
len(pending),
|
| 749 |
+
rel,
|
| 750 |
+
res.provider,
|
| 751 |
+
res.t_visual,
|
| 752 |
+
res.t_audio,
|
| 753 |
+
)
|
| 754 |
+
else:
|
| 755 |
+
fail_line = f"{time.time():.0f}\t{rel}\t{err}"
|
| 756 |
+
append_failure_threadsafe(args.failed_log, fail_line, fail_lock)
|
| 757 |
+
with counters_lock:
|
| 758 |
+
counters["failed"] += 1
|
| 759 |
+
done = counters["ok"] + counters["failed"]
|
| 760 |
+
last_completion_ts[0] = time.time()
|
| 761 |
+
logging.error("FAIL [%d/%d] %s | %s", done, len(pending), rel, err)
|
| 762 |
+
|
| 763 |
+
progress_stop.set()
|
| 764 |
+
progress_thread.join(timeout=1.0)
|
| 765 |
+
|
| 766 |
+
elapsed = time.time() - started_at
|
| 767 |
+
logging.info("=========== RUN SUMMARY ===========")
|
| 768 |
+
logging.info("Processed success: %d", counters["ok"])
|
| 769 |
+
logging.info("Processed failed : %d", counters["failed"])
|
| 770 |
+
logging.info("Elapsed seconds : %.1f", elapsed)
|
| 771 |
+
logging.info("Results JSONL : %s", args.results_jsonl.resolve())
|
| 772 |
+
logging.info("Failed log : %s", args.failed_log.resolve())
|
| 773 |
+
logging.info("===================================")
|
| 774 |
+
|
| 775 |
+
|
| 776 |
+
if __name__ == "__main__":
|
| 777 |
+
main()
|
| 778 |
+
|
data_gen/generate_activitynet_chosen.py
ADDED
|
@@ -0,0 +1,176 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env python3
|
| 2 |
+
"""
|
| 3 |
+
Generate chosen responses for ActivityNet DPO data using GPT-5.4.
|
| 4 |
+
Usage:
|
| 5 |
+
export OPENAI_API_KEY="sk-..."
|
| 6 |
+
python generate_activitynet_chosen.py
|
| 7 |
+
"""
|
| 8 |
+
|
| 9 |
+
import json
|
| 10 |
+
import os
|
| 11 |
+
import sys
|
| 12 |
+
import base64
|
| 13 |
+
import time
|
| 14 |
+
from pathlib import Path
|
| 15 |
+
from concurrent.futures import ThreadPoolExecutor, as_completed
|
| 16 |
+
|
| 17 |
+
import av
|
| 18 |
+
import numpy as np
|
| 19 |
+
from openai import OpenAI
|
| 20 |
+
|
| 21 |
+
# === Config ===
|
| 22 |
+
VIDEO_DIR = "/opt/dlami/nvme/sharegpt_activitynet_raw/activitynet/videos"
|
| 23 |
+
DPO_SOURCE = "/opt/dlami/nvme/sharegpt_instructions/video_instruction/train/dpo/sft_dpo_17k.jsonl"
|
| 24 |
+
OUTPUT_FILE = "/opt/dlami/nvme/activitynet_gpt_chosen.jsonl"
|
| 25 |
+
NUM_FRAMES = 10
|
| 26 |
+
MAX_WORKERS = 8 # parallel API calls
|
| 27 |
+
MODEL = "gpt-5.4-2026-03-05"
|
| 28 |
+
|
| 29 |
+
client = OpenAI()
|
| 30 |
+
|
| 31 |
+
|
| 32 |
+
def extract_frames(video_path: str, num_frames: int = 10) -> list[bytes]:
|
| 33 |
+
"""Extract evenly-spaced frames from video, return as JPEG bytes."""
|
| 34 |
+
container = av.open(video_path)
|
| 35 |
+
stream = next(s for s in container.streams if s.type == "video")
|
| 36 |
+
total_frames = stream.frames
|
| 37 |
+
if total_frames == 0:
|
| 38 |
+
total_frames = 300 # fallback
|
| 39 |
+
|
| 40 |
+
indices = np.linspace(0, total_frames - 1, num_frames).astype(int)
|
| 41 |
+
frames = []
|
| 42 |
+
for i, frame in enumerate(container.decode(stream)):
|
| 43 |
+
if i in indices:
|
| 44 |
+
img = frame.to_image()
|
| 45 |
+
import io
|
| 46 |
+
buf = io.BytesIO()
|
| 47 |
+
img.save(buf, format="JPEG", quality=85)
|
| 48 |
+
frames.append(buf.getvalue())
|
| 49 |
+
if len(frames) >= num_frames:
|
| 50 |
+
break
|
| 51 |
+
|
| 52 |
+
container.close()
|
| 53 |
+
return frames
|
| 54 |
+
|
| 55 |
+
|
| 56 |
+
def generate_chosen(video_id: str, prompt: str) -> dict:
|
| 57 |
+
"""Call GPT-5.4 with video frames + question, return chosen response."""
|
| 58 |
+
video_path = os.path.join(VIDEO_DIR, f"{video_id}.mp4")
|
| 59 |
+
if not os.path.exists(video_path):
|
| 60 |
+
return {"video": video_id, "error": f"video not found: {video_path}"}
|
| 61 |
+
|
| 62 |
+
try:
|
| 63 |
+
frames = extract_frames(video_path, NUM_FRAMES)
|
| 64 |
+
except Exception as e:
|
| 65 |
+
return {"video": video_id, "error": f"frame extraction failed: {e}"}
|
| 66 |
+
|
| 67 |
+
image_messages = []
|
| 68 |
+
for i, frame_bytes in enumerate(frames):
|
| 69 |
+
b64 = base64.b64encode(frame_bytes).decode("utf-8")
|
| 70 |
+
image_messages.append({
|
| 71 |
+
"type": "image_url",
|
| 72 |
+
"image_url": {"url": f"data:image/jpeg;base64,{b64}", "detail": "low"}
|
| 73 |
+
})
|
| 74 |
+
|
| 75 |
+
system_prompt = (
|
| 76 |
+
"You are watching a video. The following images are evenly-spaced frames extracted from the video. "
|
| 77 |
+
"Answer the question based on what you observe in these frames. "
|
| 78 |
+
"Be specific, accurate, and detailed. Describe visual details you actually see. "
|
| 79 |
+
"Do not hallucinate or guess about things not visible in the frames."
|
| 80 |
+
)
|
| 81 |
+
|
| 82 |
+
user_content = [
|
| 83 |
+
{"type": "text", "text": f"These are {len(frames)} frames from a video:"},
|
| 84 |
+
*image_messages,
|
| 85 |
+
{"type": "text", "text": f"\nQuestion: {prompt}\n\nPlease provide a detailed and accurate answer."},
|
| 86 |
+
]
|
| 87 |
+
|
| 88 |
+
for attempt in range(3):
|
| 89 |
+
try:
|
| 90 |
+
response = client.chat.completions.create(
|
| 91 |
+
model=MODEL,
|
| 92 |
+
messages=[
|
| 93 |
+
{"role": "system", "content": system_prompt},
|
| 94 |
+
{"role": "user", "content": user_content},
|
| 95 |
+
],
|
| 96 |
+
max_completion_tokens=512,
|
| 97 |
+
temperature=0.3,
|
| 98 |
+
)
|
| 99 |
+
answer = response.choices[0].message.content.strip()
|
| 100 |
+
return {
|
| 101 |
+
"video": video_id,
|
| 102 |
+
"prompt": prompt,
|
| 103 |
+
"chosen_gpt": answer,
|
| 104 |
+
"tokens_used": response.usage.total_tokens if response.usage else 0,
|
| 105 |
+
}
|
| 106 |
+
except Exception as e:
|
| 107 |
+
if attempt < 2:
|
| 108 |
+
time.sleep(2 ** (attempt + 1))
|
| 109 |
+
else:
|
| 110 |
+
return {"video": video_id, "error": f"API failed after 3 retries: {e}"}
|
| 111 |
+
|
| 112 |
+
|
| 113 |
+
def main():
|
| 114 |
+
# Load ActivityNet DPO entries
|
| 115 |
+
activitynet = []
|
| 116 |
+
with open(DPO_SOURCE) as f:
|
| 117 |
+
for line in f:
|
| 118 |
+
d = json.loads(line)
|
| 119 |
+
if d.get("video", "").startswith("v_"):
|
| 120 |
+
activitynet.append(d)
|
| 121 |
+
|
| 122 |
+
print(f"ActivityNet DPO entries: {len(activitynet)}")
|
| 123 |
+
|
| 124 |
+
# Resume from existing output
|
| 125 |
+
done_ids = set()
|
| 126 |
+
if os.path.exists(OUTPUT_FILE):
|
| 127 |
+
with open(OUTPUT_FILE) as f:
|
| 128 |
+
for line in f:
|
| 129 |
+
d = json.loads(line)
|
| 130 |
+
done_ids.add(f"{d['video']}_{d.get('prompt', '')[:50]}")
|
| 131 |
+
print(f"Resuming: {len(done_ids)} already done")
|
| 132 |
+
|
| 133 |
+
todo = []
|
| 134 |
+
for d in activitynet:
|
| 135 |
+
key = f"{d['video']}_{d['prompt'][:50]}"
|
| 136 |
+
if key not in done_ids:
|
| 137 |
+
todo.append(d)
|
| 138 |
+
|
| 139 |
+
print(f"To process: {len(todo)}")
|
| 140 |
+
if not todo:
|
| 141 |
+
print("All done!")
|
| 142 |
+
return
|
| 143 |
+
|
| 144 |
+
# Process with thread pool
|
| 145 |
+
total_tokens = 0
|
| 146 |
+
success = 0
|
| 147 |
+
errors = 0
|
| 148 |
+
|
| 149 |
+
with open(OUTPUT_FILE, "a") as out_f:
|
| 150 |
+
with ThreadPoolExecutor(max_workers=MAX_WORKERS) as executor:
|
| 151 |
+
futures = {
|
| 152 |
+
executor.submit(generate_chosen, d["video"], d["prompt"]): d
|
| 153 |
+
for d in todo
|
| 154 |
+
}
|
| 155 |
+
|
| 156 |
+
for i, future in enumerate(as_completed(futures), 1):
|
| 157 |
+
result = future.result()
|
| 158 |
+
|
| 159 |
+
if "error" in result:
|
| 160 |
+
errors += 1
|
| 161 |
+
print(f" [{i}/{len(todo)}] ERROR {result['video']}: {result['error']}")
|
| 162 |
+
else:
|
| 163 |
+
success += 1
|
| 164 |
+
total_tokens += result.get("tokens_used", 0)
|
| 165 |
+
out_f.write(json.dumps(result, ensure_ascii=False) + "\n")
|
| 166 |
+
out_f.flush()
|
| 167 |
+
|
| 168 |
+
if i % 100 == 0:
|
| 169 |
+
print(f" [{i}/{len(todo)}] success={success}, errors={errors}, tokens={total_tokens:,}")
|
| 170 |
+
|
| 171 |
+
print(f"\nDone! success={success}, errors={errors}, total_tokens={total_tokens:,}")
|
| 172 |
+
print(f"Output: {OUTPUT_FILE}")
|
| 173 |
+
|
| 174 |
+
|
| 175 |
+
if __name__ == "__main__":
|
| 176 |
+
main()
|
data_gen/generate_activitynet_rejected.py
ADDED
|
@@ -0,0 +1,350 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env python3
|
| 2 |
+
"""
|
| 3 |
+
Generate rejected responses for ActivityNet DPO using the SFT base model.
|
| 4 |
+
8-GPU parallel inference for maximum speed.
|
| 5 |
+
|
| 6 |
+
Usage:
|
| 7 |
+
# Stage 1: Generate rejected (8 GPU parallel)
|
| 8 |
+
python generate_activitynet_rejected.py --stage 1
|
| 9 |
+
|
| 10 |
+
# Stage 2: Assemble final DPO json
|
| 11 |
+
python generate_activitynet_rejected.py --stage 2
|
| 12 |
+
|
| 13 |
+
# Both
|
| 14 |
+
python generate_activitynet_rejected.py --stage all
|
| 15 |
+
"""
|
| 16 |
+
|
| 17 |
+
import argparse
|
| 18 |
+
import json
|
| 19 |
+
import os
|
| 20 |
+
import logging
|
| 21 |
+
import multiprocessing as mp
|
| 22 |
+
from pathlib import Path
|
| 23 |
+
|
| 24 |
+
import torch
|
| 25 |
+
from tqdm import tqdm
|
| 26 |
+
|
| 27 |
+
LOG = logging.getLogger("activitynet_rejected")
|
| 28 |
+
logging.basicConfig(level=logging.INFO, format="%(asctime)s [%(process)d] %(message)s")
|
| 29 |
+
|
| 30 |
+
VIDEO_DIR = "/opt/dlami/nvme/sharegpt_activitynet_raw/activitynet/videos"
|
| 31 |
+
AUDIO_DIR = "/opt/dlami/nvme/sharegpt_activitynet_audio"
|
| 32 |
+
GPT_CHOSEN_FILE = "/opt/dlami/nvme/activitynet_gpt_chosen.jsonl"
|
| 33 |
+
RAW_REJECTED_DIR = "/opt/dlami/nvme/activitynet_rejected_shards"
|
| 34 |
+
FINAL_OUTPUT = "/home/ubuntu/LlamaFactory/data/dpo_activitynet_gpt_chosen.json"
|
| 35 |
+
|
| 36 |
+
SFT_MODEL_PATH = "Rakancorle11/qwen3omni_full_sft_revised_thinker_key"
|
| 37 |
+
VANILLA_MODEL_PATH = "Qwen/Qwen3-Omni-30B-A3B-Instruct"
|
| 38 |
+
NUM_GPUS = len(os.environ.get("CUDA_VISIBLE_DEVICES", "0,1,2,3,4,5,6,7").split(","))
|
| 39 |
+
|
| 40 |
+
|
| 41 |
+
def worker_fn(gpu_id: int, shard: list[dict], model_path: str, output_dir: str, model_type: str):
|
| 42 |
+
"""Each worker loads model on one GPU and processes its shard."""
|
| 43 |
+
visible_gpus = os.environ.get("CUDA_VISIBLE_DEVICES", "0,1,2,3,4,5,6,7").split(",")
|
| 44 |
+
os.environ["CUDA_VISIBLE_DEVICES"] = visible_gpus[gpu_id]
|
| 45 |
+
|
| 46 |
+
from transformers import AutoConfig, AutoProcessor, Qwen3OmniMoeForConditionalGeneration
|
| 47 |
+
from qwen_omni_utils import process_mm_info
|
| 48 |
+
|
| 49 |
+
LOG.info("GPU %d: loading model %s (type=%s), shard size=%d", gpu_id, model_path, model_type, len(shard))
|
| 50 |
+
processor = AutoProcessor.from_pretrained(model_path, trust_remote_code=True)
|
| 51 |
+
|
| 52 |
+
config = AutoConfig.from_pretrained(model_path, trust_remote_code=True)
|
| 53 |
+
detected_type = getattr(config, "model_type", "")
|
| 54 |
+
|
| 55 |
+
if detected_type == "qwen3_omni_moe_thinker":
|
| 56 |
+
from transformers import Qwen3OmniMoeThinkerConfig, Qwen3OmniMoeThinkerForConditionalGeneration
|
| 57 |
+
thinker_config = Qwen3OmniMoeThinkerConfig.from_pretrained(model_path)
|
| 58 |
+
model = Qwen3OmniMoeThinkerForConditionalGeneration.from_pretrained(
|
| 59 |
+
model_path, config=thinker_config, torch_dtype=torch.bfloat16,
|
| 60 |
+
device_map="auto", attn_implementation="sdpa",
|
| 61 |
+
)
|
| 62 |
+
is_thinker = True
|
| 63 |
+
else:
|
| 64 |
+
model = Qwen3OmniMoeForConditionalGeneration.from_pretrained(
|
| 65 |
+
model_path, config=config, torch_dtype=torch.bfloat16,
|
| 66 |
+
device_map="auto", attn_implementation="sdpa",
|
| 67 |
+
)
|
| 68 |
+
if hasattr(model, "disable_talker"):
|
| 69 |
+
model.disable_talker()
|
| 70 |
+
is_thinker = False
|
| 71 |
+
model.eval()
|
| 72 |
+
|
| 73 |
+
output_file = os.path.join(output_dir, f"shard_{gpu_id}.jsonl")
|
| 74 |
+
|
| 75 |
+
# Resume support
|
| 76 |
+
done_keys = set()
|
| 77 |
+
if os.path.exists(output_file):
|
| 78 |
+
with open(output_file) as f:
|
| 79 |
+
for line in f:
|
| 80 |
+
d = json.loads(line)
|
| 81 |
+
done_keys.add(f"{d['video']}_{d['prompt'][:50]}")
|
| 82 |
+
LOG.info("GPU %d: resuming, %d already done", gpu_id, len(done_keys))
|
| 83 |
+
|
| 84 |
+
todo = [d for d in shard if f"{d['video']}_{d['prompt'][:50]}" not in done_keys]
|
| 85 |
+
LOG.info("GPU %d: %d to process", gpu_id, len(todo))
|
| 86 |
+
|
| 87 |
+
with open(output_file, "a") as out_f:
|
| 88 |
+
for i, d in enumerate(todo):
|
| 89 |
+
video_path = os.path.join(VIDEO_DIR, f"{d['video']}.mp4")
|
| 90 |
+
audio_path = os.path.join(AUDIO_DIR, f"{d['video']}.wav")
|
| 91 |
+
|
| 92 |
+
if not os.path.exists(video_path):
|
| 93 |
+
continue
|
| 94 |
+
|
| 95 |
+
try:
|
| 96 |
+
messages = [
|
| 97 |
+
{
|
| 98 |
+
"role": "user",
|
| 99 |
+
"content": [
|
| 100 |
+
{"type": "video", "video": video_path},
|
| 101 |
+
*(
|
| 102 |
+
[{"type": "audio", "audio": audio_path}]
|
| 103 |
+
if os.path.exists(audio_path)
|
| 104 |
+
else []
|
| 105 |
+
),
|
| 106 |
+
{"type": "text", "text": d["prompt"]},
|
| 107 |
+
],
|
| 108 |
+
},
|
| 109 |
+
]
|
| 110 |
+
|
| 111 |
+
text = processor.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
|
| 112 |
+
audios, images, videos = process_mm_info(messages, use_audio_in_video=False)
|
| 113 |
+
inputs = processor(
|
| 114 |
+
text=[text], audio=audios, images=images, videos=videos,
|
| 115 |
+
return_tensors="pt", padding=True,
|
| 116 |
+
)
|
| 117 |
+
|
| 118 |
+
model_dtype = next(model.parameters()).dtype
|
| 119 |
+
for k, v in list(inputs.items()):
|
| 120 |
+
if hasattr(v, "to"):
|
| 121 |
+
v = v.to(model.device)
|
| 122 |
+
if torch.is_floating_point(v):
|
| 123 |
+
v = v.to(model_dtype)
|
| 124 |
+
inputs[k] = v
|
| 125 |
+
|
| 126 |
+
is_thinker_model = is_thinker
|
| 127 |
+
with torch.no_grad():
|
| 128 |
+
if is_thinker_model:
|
| 129 |
+
gen_kwargs = {
|
| 130 |
+
"max_new_tokens": 512,
|
| 131 |
+
"do_sample": True,
|
| 132 |
+
"temperature": 0.7,
|
| 133 |
+
"top_p": 0.9,
|
| 134 |
+
}
|
| 135 |
+
else:
|
| 136 |
+
gen_kwargs = {
|
| 137 |
+
"thinker_max_new_tokens": 512,
|
| 138 |
+
"return_audio": False,
|
| 139 |
+
"use_audio_in_video": False,
|
| 140 |
+
"do_sample": True,
|
| 141 |
+
"temperature": 0.7,
|
| 142 |
+
"top_p": 0.9,
|
| 143 |
+
}
|
| 144 |
+
output_ids = model.generate(**inputs, **gen_kwargs)
|
| 145 |
+
|
| 146 |
+
if isinstance(output_ids, tuple):
|
| 147 |
+
output_ids = output_ids[0]
|
| 148 |
+
|
| 149 |
+
input_len = inputs["input_ids"].shape[-1]
|
| 150 |
+
response = processor.decode(output_ids[0][input_len:], skip_special_tokens=True).strip()
|
| 151 |
+
|
| 152 |
+
result = {
|
| 153 |
+
"video": d["video"],
|
| 154 |
+
"prompt": d["prompt"],
|
| 155 |
+
"rejected_base": response,
|
| 156 |
+
}
|
| 157 |
+
out_f.write(json.dumps(result, ensure_ascii=False) + "\n")
|
| 158 |
+
out_f.flush()
|
| 159 |
+
|
| 160 |
+
except Exception as e:
|
| 161 |
+
LOG.error("GPU %d: failed %s: %s", gpu_id, d["video"], e)
|
| 162 |
+
continue
|
| 163 |
+
|
| 164 |
+
if (i + 1) % 50 == 0:
|
| 165 |
+
LOG.info("GPU %d: %d/%d done", gpu_id, i + 1, len(todo))
|
| 166 |
+
|
| 167 |
+
LOG.info("GPU %d: finished", gpu_id)
|
| 168 |
+
|
| 169 |
+
|
| 170 |
+
def stage1_generate_rejected(model_type: str):
|
| 171 |
+
"""Split data across 8 GPUs and run in parallel."""
|
| 172 |
+
if model_type == "sft":
|
| 173 |
+
model_path = SFT_MODEL_PATH
|
| 174 |
+
output_dir = RAW_REJECTED_DIR + "_sft"
|
| 175 |
+
else:
|
| 176 |
+
model_path = VANILLA_MODEL_PATH
|
| 177 |
+
output_dir = RAW_REJECTED_DIR + "_vanilla"
|
| 178 |
+
|
| 179 |
+
os.makedirs(output_dir, exist_ok=True)
|
| 180 |
+
|
| 181 |
+
gpt_data = []
|
| 182 |
+
with open(GPT_CHOSEN_FILE) as f:
|
| 183 |
+
for line in f:
|
| 184 |
+
d = json.loads(line)
|
| 185 |
+
if "error" not in d:
|
| 186 |
+
gpt_data.append(d)
|
| 187 |
+
|
| 188 |
+
LOG.info("[%s] Total entries: %d, splitting across %d GPUs", model_type, len(gpt_data), NUM_GPUS)
|
| 189 |
+
|
| 190 |
+
# Split into shards
|
| 191 |
+
shards = [[] for _ in range(NUM_GPUS)]
|
| 192 |
+
for i, d in enumerate(gpt_data):
|
| 193 |
+
shards[i % NUM_GPUS].append(d)
|
| 194 |
+
|
| 195 |
+
for i, s in enumerate(shards):
|
| 196 |
+
LOG.info(" GPU %d: %d entries", i, len(s))
|
| 197 |
+
|
| 198 |
+
# Launch workers
|
| 199 |
+
processes = []
|
| 200 |
+
mp.set_start_method("spawn", force=True)
|
| 201 |
+
for gpu_id in range(NUM_GPUS):
|
| 202 |
+
p = mp.Process(target=worker_fn, args=(gpu_id, shards[gpu_id], model_path, output_dir, model_type))
|
| 203 |
+
p.start()
|
| 204 |
+
processes.append(p)
|
| 205 |
+
|
| 206 |
+
for p in processes:
|
| 207 |
+
p.join()
|
| 208 |
+
|
| 209 |
+
LOG.info("All workers finished")
|
| 210 |
+
|
| 211 |
+
|
| 212 |
+
def stage2_assemble(model_type: str):
|
| 213 |
+
"""Merge shard outputs + GPT chosen into final DPO data."""
|
| 214 |
+
if model_type == "sft":
|
| 215 |
+
rejected_dir = RAW_REJECTED_DIR + "_sft"
|
| 216 |
+
output_file = FINAL_OUTPUT.replace(".json", "_sft.json")
|
| 217 |
+
dataset_name = "dpo_activitynet_gpt_chosen_sft"
|
| 218 |
+
else:
|
| 219 |
+
rejected_dir = RAW_REJECTED_DIR + "_vanilla"
|
| 220 |
+
output_file = FINAL_OUTPUT.replace(".json", "_vanilla.json")
|
| 221 |
+
dataset_name = "dpo_activitynet_gpt_chosen_vanilla"
|
| 222 |
+
# Load GPT chosen
|
| 223 |
+
gpt_map = {}
|
| 224 |
+
with open(GPT_CHOSEN_FILE) as f:
|
| 225 |
+
for line in f:
|
| 226 |
+
d = json.loads(line)
|
| 227 |
+
if "error" not in d:
|
| 228 |
+
key = f"{d['video']}_{d['prompt']}"
|
| 229 |
+
gpt_map[key] = d["chosen_gpt"]
|
| 230 |
+
|
| 231 |
+
# Load all rejected shards
|
| 232 |
+
rej_map = {}
|
| 233 |
+
for shard_file in sorted(Path(rejected_dir).glob("shard_*.jsonl")):
|
| 234 |
+
with open(shard_file) as f:
|
| 235 |
+
for line in f:
|
| 236 |
+
d = json.loads(line)
|
| 237 |
+
key = f"{d['video']}_{d['prompt']}"
|
| 238 |
+
rej_map[key] = d["rejected_base"]
|
| 239 |
+
|
| 240 |
+
overlap_keys = set(gpt_map) & set(rej_map)
|
| 241 |
+
LOG.info("GPT chosen: %d, Base rejected: %d, Overlap: %d", len(gpt_map), len(rej_map), len(overlap_keys))
|
| 242 |
+
|
| 243 |
+
results = []
|
| 244 |
+
skipped_similar = 0
|
| 245 |
+
|
| 246 |
+
for key in overlap_keys:
|
| 247 |
+
chosen = gpt_map[key]
|
| 248 |
+
rejected = rej_map[key]
|
| 249 |
+
|
| 250 |
+
c_words = set(chosen.lower().split())
|
| 251 |
+
r_words = set(rejected.lower().split())
|
| 252 |
+
word_overlap = len(c_words & r_words) / len(c_words | r_words) if c_words | r_words else 0
|
| 253 |
+
if word_overlap > 0.7:
|
| 254 |
+
skipped_similar += 1
|
| 255 |
+
continue
|
| 256 |
+
|
| 257 |
+
# Find video_id and prompt from key
|
| 258 |
+
# key format: "v_xxxxx-Scene-NNN_prompt text"
|
| 259 |
+
# Find the original entry to get clean video_id and prompt
|
| 260 |
+
pass
|
| 261 |
+
|
| 262 |
+
# Simpler: iterate GPT chosen file
|
| 263 |
+
results = []
|
| 264 |
+
skipped_similar = 0
|
| 265 |
+
skipped_no_rejected = 0
|
| 266 |
+
|
| 267 |
+
with open(GPT_CHOSEN_FILE) as f:
|
| 268 |
+
for line in f:
|
| 269 |
+
d = json.loads(line)
|
| 270 |
+
if "error" in d:
|
| 271 |
+
continue
|
| 272 |
+
|
| 273 |
+
key = f"{d['video']}_{d['prompt']}"
|
| 274 |
+
if key not in rej_map:
|
| 275 |
+
skipped_no_rejected += 1
|
| 276 |
+
continue
|
| 277 |
+
|
| 278 |
+
chosen = d["chosen_gpt"]
|
| 279 |
+
rejected = rej_map[key]
|
| 280 |
+
|
| 281 |
+
c_words = set(chosen.lower().split())
|
| 282 |
+
r_words = set(rejected.lower().split())
|
| 283 |
+
word_overlap = len(c_words & r_words) / len(c_words | r_words) if c_words | r_words else 0
|
| 284 |
+
if word_overlap > 0.7:
|
| 285 |
+
skipped_similar += 1
|
| 286 |
+
continue
|
| 287 |
+
|
| 288 |
+
video_path = os.path.join(VIDEO_DIR, f"{d['video']}.mp4")
|
| 289 |
+
audio_path = os.path.join(AUDIO_DIR, f"{d['video']}.wav")
|
| 290 |
+
|
| 291 |
+
results.append({
|
| 292 |
+
"messages": [{"role": "user", "content": f"<video><audio>{d['prompt']}"}],
|
| 293 |
+
"chosen": {"role": "assistant", "content": chosen},
|
| 294 |
+
"rejected": {"role": "assistant", "content": rejected},
|
| 295 |
+
"videos": [video_path],
|
| 296 |
+
"audios": [audio_path] if os.path.exists(audio_path) else [],
|
| 297 |
+
})
|
| 298 |
+
|
| 299 |
+
with open(output_file, "w") as f:
|
| 300 |
+
json.dump(results, f, indent=2, ensure_ascii=False)
|
| 301 |
+
|
| 302 |
+
LOG.info("Final DPO data: %d entries -> %s", len(results), output_file)
|
| 303 |
+
LOG.info("Skipped (too similar): %d", skipped_similar)
|
| 304 |
+
LOG.info("Skipped (no rejected): %d", skipped_no_rejected)
|
| 305 |
+
|
| 306 |
+
# Register in dataset_info.json
|
| 307 |
+
info_path = "/home/ubuntu/LlamaFactory/data/dataset_info.json"
|
| 308 |
+
info = json.load(open(info_path))
|
| 309 |
+
info[dataset_name] = {
|
| 310 |
+
"file_name": os.path.basename(output_file),
|
| 311 |
+
"formatting": "sharegpt",
|
| 312 |
+
"ranking": True,
|
| 313 |
+
"columns": {
|
| 314 |
+
"messages": "messages",
|
| 315 |
+
"chosen": "chosen",
|
| 316 |
+
"rejected": "rejected",
|
| 317 |
+
"videos": "videos",
|
| 318 |
+
"audios": "audios"
|
| 319 |
+
},
|
| 320 |
+
"tags": {
|
| 321 |
+
"role_tag": "role",
|
| 322 |
+
"content_tag": "content",
|
| 323 |
+
"user_tag": "user",
|
| 324 |
+
"assistant_tag": "assistant"
|
| 325 |
+
}
|
| 326 |
+
}
|
| 327 |
+
with open(info_path, "w") as f:
|
| 328 |
+
json.dump(info, f, indent=2, ensure_ascii=False)
|
| 329 |
+
LOG.info("Registered %s in dataset_info.json", dataset_name)
|
| 330 |
+
|
| 331 |
+
|
| 332 |
+
def main():
|
| 333 |
+
parser = argparse.ArgumentParser()
|
| 334 |
+
parser.add_argument("--stage", choices=["1", "2", "all"], default="all")
|
| 335 |
+
parser.add_argument("--model", choices=["sft", "vanilla", "both"], default="both",
|
| 336 |
+
help="Which model to use for rejected generation")
|
| 337 |
+
args = parser.parse_args()
|
| 338 |
+
|
| 339 |
+
models = ["sft", "vanilla"] if args.model == "both" else [args.model]
|
| 340 |
+
|
| 341 |
+
for model_type in models:
|
| 342 |
+
LOG.info("===== Processing with %s model =====", model_type)
|
| 343 |
+
if args.stage in ("1", "all"):
|
| 344 |
+
stage1_generate_rejected(model_type)
|
| 345 |
+
if args.stage in ("2", "all"):
|
| 346 |
+
stage2_assemble(model_type)
|
| 347 |
+
|
| 348 |
+
|
| 349 |
+
if __name__ == "__main__":
|
| 350 |
+
main()
|
data_gen/generate_onpolicy_dpo.py
ADDED
|
@@ -0,0 +1,478 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env python3
|
| 2 |
+
"""
|
| 3 |
+
On-policy DPO data augmentation pipeline.
|
| 4 |
+
|
| 5 |
+
Three stages:
|
| 6 |
+
Stage 1: Load SFT-trained Qwen3-Omni, run inference on training videos
|
| 7 |
+
to collect model responses. Output: raw_responses.jsonl
|
| 8 |
+
Stage 2: Use GPT-5.4 as judge to evaluate each model response against
|
| 9 |
+
ground truth label (synced / delay Xs / early Xs).
|
| 10 |
+
Output: judged_responses.jsonl
|
| 11 |
+
Stage 3: Filter wrong responses as rejected, pair with ground truth chosen,
|
| 12 |
+
merge with existing DPO data. Output: dpo_video_train_onpolicy.json
|
| 13 |
+
|
| 14 |
+
Usage:
|
| 15 |
+
python generate_onpolicy_dpo.py --stage 1 # run inference (GPU needed)
|
| 16 |
+
python generate_onpolicy_dpo.py --stage 2 # run LLM judge (OPENAI_API_KEY needed)
|
| 17 |
+
python generate_onpolicy_dpo.py --stage 3 # filter & merge
|
| 18 |
+
python generate_onpolicy_dpo.py --stage all # run all stages
|
| 19 |
+
"""
|
| 20 |
+
|
| 21 |
+
from __future__ import annotations
|
| 22 |
+
|
| 23 |
+
import argparse
|
| 24 |
+
import gc
|
| 25 |
+
import json
|
| 26 |
+
import logging
|
| 27 |
+
import os
|
| 28 |
+
import re
|
| 29 |
+
import time
|
| 30 |
+
from pathlib import Path
|
| 31 |
+
|
| 32 |
+
import torch
|
| 33 |
+
from tqdm import tqdm
|
| 34 |
+
|
| 35 |
+
LOG = logging.getLogger("onpolicy_dpo")
|
| 36 |
+
|
| 37 |
+
# ── Paths ──
|
| 38 |
+
DATA_ROOT = Path("/home/ubuntu/video_source")
|
| 39 |
+
ORIGINAL_ROOT = DATA_ROOT / "original"
|
| 40 |
+
INPUT_JSONL = DATA_ROOT / "kto_training_data_v2_train.jsonl"
|
| 41 |
+
EXISTING_DPO = Path("/home/ubuntu/LlamaFactory/data/dpo_video_train_with_audio.json")
|
| 42 |
+
RAW_RESPONSES = DATA_ROOT / "onpolicy_raw_responses.jsonl"
|
| 43 |
+
JUDGED_RESPONSES = DATA_ROOT / "onpolicy_judged_responses.jsonl"
|
| 44 |
+
OUTPUT_DPO = Path("/home/ubuntu/LlamaFactory/data/dpo_video_train_onpolicy.json")
|
| 45 |
+
EXTRACTED_AUDIO_ROOT = DATA_ROOT / "extracted_audio"
|
| 46 |
+
|
| 47 |
+
# Use the checkpoint that is known to include the correct audio-capable model.
|
| 48 |
+
SFT_MODEL_PATH = "/opt/dlami/nvme/LlamaFactory/saves/Qwen3-Omni-Instruct/full/qwen3omni_full_sft_with_audio"
|
| 49 |
+
|
| 50 |
+
|
| 51 |
+
def resolve_video_path(video_field: str) -> str:
|
| 52 |
+
"""Resolve relative video paths to absolute paths."""
|
| 53 |
+
if os.path.isabs(video_field):
|
| 54 |
+
return video_field
|
| 55 |
+
return str(ORIGINAL_ROOT / video_field)
|
| 56 |
+
|
| 57 |
+
|
| 58 |
+
def get_audio_path(video_path: str) -> str:
|
| 59 |
+
"""Derive extracted .wav path from a video path."""
|
| 60 |
+
rel = os.path.relpath(video_path, str(DATA_ROOT))
|
| 61 |
+
audio_rel = os.path.splitext(rel)[0] + ".wav"
|
| 62 |
+
return str(EXTRACTED_AUDIO_ROOT / audio_rel)
|
| 63 |
+
|
| 64 |
+
|
| 65 |
+
def parse_ground_truth_label(video_path: str) -> dict:
|
| 66 |
+
"""Extract sync label from video file path."""
|
| 67 |
+
if "/original/" in video_path:
|
| 68 |
+
return {"is_synced": True, "direction": None, "offset_sec": 0.0}
|
| 69 |
+
m = re.search(r"_(delay|early)_([\d.]+)s\.", video_path)
|
| 70 |
+
if m:
|
| 71 |
+
return {"is_synced": False, "direction": m.group(1), "offset_sec": float(m.group(2))}
|
| 72 |
+
return {"is_synced": True, "direction": None, "offset_sec": 0.0}
|
| 73 |
+
|
| 74 |
+
|
| 75 |
+
def read_input_data() -> list[dict]:
|
| 76 |
+
"""Read kto_training_data_v2_train.jsonl, resolve paths."""
|
| 77 |
+
data = []
|
| 78 |
+
with open(INPUT_JSONL) as f:
|
| 79 |
+
for line in f:
|
| 80 |
+
line = line.strip()
|
| 81 |
+
if not line:
|
| 82 |
+
continue
|
| 83 |
+
obj = json.loads(line)
|
| 84 |
+
obj["video"] = resolve_video_path(obj["video"])
|
| 85 |
+
data.append(obj)
|
| 86 |
+
return data
|
| 87 |
+
|
| 88 |
+
|
| 89 |
+
# ═══════════════════════════════════════════════════════════════
|
| 90 |
+
# Stage 1: Inference
|
| 91 |
+
# ═══════════════════════════════════════════════════════════════
|
| 92 |
+
|
| 93 |
+
def load_sft_model():
|
| 94 |
+
import json as _json
|
| 95 |
+
|
| 96 |
+
from transformers import (
|
| 97 |
+
AutoConfig,
|
| 98 |
+
AutoProcessor,
|
| 99 |
+
Qwen3OmniMoeForConditionalGeneration,
|
| 100 |
+
Qwen3OmniMoeThinkerConfig,
|
| 101 |
+
Qwen3OmniMoeThinkerForConditionalGeneration,
|
| 102 |
+
)
|
| 103 |
+
|
| 104 |
+
LOG.info("Loading processor from %s", SFT_MODEL_PATH)
|
| 105 |
+
processor = AutoProcessor.from_pretrained(SFT_MODEL_PATH)
|
| 106 |
+
|
| 107 |
+
LOG.info("Loading model from %s", SFT_MODEL_PATH)
|
| 108 |
+
cfg_path = Path(SFT_MODEL_PATH) / "config.json"
|
| 109 |
+
with open(cfg_path, "r") as f:
|
| 110 |
+
cfg_dict = _json.load(f)
|
| 111 |
+
model_type = cfg_dict.get("model_type")
|
| 112 |
+
|
| 113 |
+
# `qwen3omni_full_sft_with_audio` uses `model_type=qwen3_omni_moe_thinker`,
|
| 114 |
+
# which AutoConfig can't always map in older Transformers.
|
| 115 |
+
if model_type == "qwen3_omni_moe_thinker":
|
| 116 |
+
config = Qwen3OmniMoeThinkerConfig.from_pretrained(SFT_MODEL_PATH)
|
| 117 |
+
model = Qwen3OmniMoeThinkerForConditionalGeneration.from_pretrained(
|
| 118 |
+
SFT_MODEL_PATH,
|
| 119 |
+
config=config,
|
| 120 |
+
torch_dtype=torch.bfloat16,
|
| 121 |
+
device_map="auto",
|
| 122 |
+
)
|
| 123 |
+
else:
|
| 124 |
+
config = AutoConfig.from_pretrained(SFT_MODEL_PATH)
|
| 125 |
+
# Do not override `enable_audio_output` here.
|
| 126 |
+
# Forcing it changes internal module prefixes and may cause weights
|
| 127 |
+
# to not load correctly (leading to garbage text).
|
| 128 |
+
model = Qwen3OmniMoeForConditionalGeneration.from_pretrained(
|
| 129 |
+
SFT_MODEL_PATH,
|
| 130 |
+
config=config,
|
| 131 |
+
torch_dtype=torch.bfloat16,
|
| 132 |
+
device_map="auto",
|
| 133 |
+
)
|
| 134 |
+
model.eval()
|
| 135 |
+
return model, processor
|
| 136 |
+
|
| 137 |
+
|
| 138 |
+
def run_inference_single(model, processor, video_path: str, audio_path: str,
|
| 139 |
+
question: str, temperature: float, max_new_tokens: int) -> str:
|
| 140 |
+
from qwen_omni_utils import process_mm_info
|
| 141 |
+
|
| 142 |
+
from llamafactory.data.mm_plugin import get_mm_plugin
|
| 143 |
+
|
| 144 |
+
plugin = get_mm_plugin(
|
| 145 |
+
"qwen2_omni",
|
| 146 |
+
image_token="<|image_pad|>",
|
| 147 |
+
video_token="<|video_pad|>",
|
| 148 |
+
audio_token="<|audio_pad|>",
|
| 149 |
+
)
|
| 150 |
+
messages_mm = [
|
| 151 |
+
{
|
| 152 |
+
"role": "user",
|
| 153 |
+
"content": [
|
| 154 |
+
{"type": "video", "video": video_path},
|
| 155 |
+
{"type": "audio", "audio": audio_path},
|
| 156 |
+
{"type": "text", "text": question},
|
| 157 |
+
],
|
| 158 |
+
},
|
| 159 |
+
]
|
| 160 |
+
messages_for_template = plugin.process_messages(
|
| 161 |
+
[{"role": "user", "content": f"<video><audio>\n{question}"}],
|
| 162 |
+
images=[],
|
| 163 |
+
videos=[video_path],
|
| 164 |
+
audios=[audio_path],
|
| 165 |
+
processor=processor,
|
| 166 |
+
)
|
| 167 |
+
text = processor.apply_chat_template(
|
| 168 |
+
messages_for_template, tokenize=False, add_generation_prompt=True
|
| 169 |
+
)
|
| 170 |
+
audios, images, videos = process_mm_info(messages_mm, use_audio_in_video=False)
|
| 171 |
+
inputs = processor(
|
| 172 |
+
text=[text], audio=audios, images=images, videos=videos,
|
| 173 |
+
return_tensors="pt", padding=True,
|
| 174 |
+
)
|
| 175 |
+
# Model is loaded in bfloat16; processor typically returns float32 for
|
| 176 |
+
# image/video/audio features. Cast floating inputs to match model dtype.
|
| 177 |
+
model_dtype = next(model.parameters()).dtype
|
| 178 |
+
converted = {}
|
| 179 |
+
for k, v in inputs.items():
|
| 180 |
+
if hasattr(v, "to"):
|
| 181 |
+
v = v.to(model.device)
|
| 182 |
+
if torch.is_floating_point(v):
|
| 183 |
+
v = v.to(model_dtype)
|
| 184 |
+
converted[k] = v
|
| 185 |
+
inputs = converted
|
| 186 |
+
|
| 187 |
+
from transformers import Qwen3OmniMoeThinkerForConditionalGeneration
|
| 188 |
+
|
| 189 |
+
if isinstance(model, Qwen3OmniMoeThinkerForConditionalGeneration):
|
| 190 |
+
gen_kwargs = {
|
| 191 |
+
"max_new_tokens": max_new_tokens,
|
| 192 |
+
"do_sample": temperature > 0,
|
| 193 |
+
}
|
| 194 |
+
if temperature > 0:
|
| 195 |
+
gen_kwargs["temperature"] = temperature
|
| 196 |
+
gen_kwargs["top_p"] = 0.9
|
| 197 |
+
else:
|
| 198 |
+
gen_kwargs = {
|
| 199 |
+
"thinker_max_new_tokens": max_new_tokens,
|
| 200 |
+
"use_audio_in_video": False,
|
| 201 |
+
"return_audio": False,
|
| 202 |
+
"do_sample": temperature > 0,
|
| 203 |
+
}
|
| 204 |
+
if temperature > 0:
|
| 205 |
+
gen_kwargs["temperature"] = temperature
|
| 206 |
+
gen_kwargs["top_p"] = 0.9
|
| 207 |
+
|
| 208 |
+
with torch.inference_mode():
|
| 209 |
+
output_ids = model.generate(**inputs, **gen_kwargs)
|
| 210 |
+
|
| 211 |
+
prompt_len = inputs["input_ids"].shape[1]
|
| 212 |
+
response = processor.batch_decode(output_ids[:, prompt_len:], skip_special_tokens=True)[0].strip()
|
| 213 |
+
return response
|
| 214 |
+
|
| 215 |
+
|
| 216 |
+
def stage1_inference(args):
|
| 217 |
+
LOG.info("=== Stage 1: Inference ===")
|
| 218 |
+
|
| 219 |
+
data = read_input_data()
|
| 220 |
+
LOG.info("Loaded %d samples from %s", len(data), INPUT_JSONL)
|
| 221 |
+
|
| 222 |
+
processed = set()
|
| 223 |
+
processed_count = 0
|
| 224 |
+
if RAW_RESPONSES.exists():
|
| 225 |
+
with open(RAW_RESPONSES) as f:
|
| 226 |
+
for line in f:
|
| 227 |
+
obj = json.loads(line)
|
| 228 |
+
# Only treat a video as processed when we actually generated at least one response.
|
| 229 |
+
if obj.get("responses"):
|
| 230 |
+
ok = any((isinstance(r, dict) and r.get("response")) for r in obj.get("responses", []))
|
| 231 |
+
if ok:
|
| 232 |
+
processed.add(obj["video"])
|
| 233 |
+
LOG.info("Resuming: %d already processed", len(processed))
|
| 234 |
+
|
| 235 |
+
model, processor = load_sft_model()
|
| 236 |
+
|
| 237 |
+
for item in tqdm(data, desc="Inference"):
|
| 238 |
+
video_path = item["video"]
|
| 239 |
+
if video_path in processed:
|
| 240 |
+
continue
|
| 241 |
+
|
| 242 |
+
audio_path = get_audio_path(video_path)
|
| 243 |
+
if not os.path.exists(audio_path):
|
| 244 |
+
LOG.warning("Audio not found: %s, skipping", audio_path)
|
| 245 |
+
continue
|
| 246 |
+
|
| 247 |
+
question = item["prompt"]
|
| 248 |
+
ground_truth = item["chosen"]
|
| 249 |
+
label = parse_ground_truth_label(video_path)
|
| 250 |
+
|
| 251 |
+
responses = []
|
| 252 |
+
for temp in args.temperatures:
|
| 253 |
+
try:
|
| 254 |
+
resp = run_inference_single(
|
| 255 |
+
model, processor, video_path, audio_path,
|
| 256 |
+
question, temperature=temp, max_new_tokens=args.max_new_tokens,
|
| 257 |
+
)
|
| 258 |
+
responses.append({"temperature": temp, "response": resp})
|
| 259 |
+
except Exception as e:
|
| 260 |
+
LOG.warning("Failed on %s (temp=%.1f): %s", video_path, temp, e)
|
| 261 |
+
|
| 262 |
+
gc.collect()
|
| 263 |
+
torch.cuda.empty_cache()
|
| 264 |
+
|
| 265 |
+
out = {
|
| 266 |
+
"video": video_path,
|
| 267 |
+
"audio": audio_path,
|
| 268 |
+
"question": question,
|
| 269 |
+
"ground_truth": ground_truth,
|
| 270 |
+
"label": label,
|
| 271 |
+
"responses": responses,
|
| 272 |
+
}
|
| 273 |
+
with open(RAW_RESPONSES, "a") as f:
|
| 274 |
+
f.write(json.dumps(out, ensure_ascii=False) + "\n")
|
| 275 |
+
if responses:
|
| 276 |
+
processed.add(video_path)
|
| 277 |
+
processed_count += 1
|
| 278 |
+
if args.max_samples > 0 and processed_count >= args.max_samples:
|
| 279 |
+
break
|
| 280 |
+
|
| 281 |
+
LOG.info("Stage 1 done. Wrote %s", RAW_RESPONSES)
|
| 282 |
+
|
| 283 |
+
|
| 284 |
+
# ═══════════════════════════════════════════════════════════════
|
| 285 |
+
# Stage 2: LLM Judge (GPT-5.4)
|
| 286 |
+
# ═══════════════════════════════════════════════════════════════
|
| 287 |
+
|
| 288 |
+
OFFSET_TOL_SEC = 1.0
|
| 289 |
+
|
| 290 |
+
JUDGE_PROMPT = """\
|
| 291 |
+
You are evaluating a model's response about audio-video synchronization.
|
| 292 |
+
|
| 293 |
+
## Ground truth about this video:
|
| 294 |
+
- Synchronized: {is_synced}
|
| 295 |
+
- If not synchronized — direction: {direction}, offset: {offset_sec}s
|
| 296 |
+
|
| 297 |
+
## Model's response:
|
| 298 |
+
\"\"\"{response}\"\"\"
|
| 299 |
+
|
| 300 |
+
## Task:
|
| 301 |
+
Determine if the model's response is CORRECT or INCORRECT.
|
| 302 |
+
|
| 303 |
+
A response is CORRECT if:
|
| 304 |
+
1. For synced videos: model says it's synchronized/aligned (minor tolerance claims are OK)
|
| 305 |
+
2. For not-synced videos: model says it's NOT synchronized AND gets the direction right (delay vs early).
|
| 306 |
+
Additionally, if the model mentions/estimates the audio-video gap (numeric or clearly implied), it should be within ±{offset_tol_sec}s of the true offset.
|
| 307 |
+
|
| 308 |
+
A response is INCORRECT if:
|
| 309 |
+
1. For synced videos: model says it's not synchronized or claims a significant gap
|
| 310 |
+
2. For not-synced videos: model says it's synchronized, or gets the direction wrong
|
| 311 |
+
OR (if the model provides a gap estimate) the absolute gap error is > {offset_tol_sec}s.
|
| 312 |
+
|
| 313 |
+
Output ONLY a JSON object, no markdown fences:
|
| 314 |
+
{{"correct": true or false, "reason": "brief explanation"}}"""
|
| 315 |
+
|
| 316 |
+
|
| 317 |
+
def judge_with_openai(response: str, label: dict, client) -> dict:
|
| 318 |
+
prompt = JUDGE_PROMPT.format(
|
| 319 |
+
is_synced=label["is_synced"],
|
| 320 |
+
direction=label.get("direction") or "N/A",
|
| 321 |
+
offset_sec=label.get("offset_sec", 0),
|
| 322 |
+
offset_tol_sec=OFFSET_TOL_SEC,
|
| 323 |
+
response=response,
|
| 324 |
+
)
|
| 325 |
+
|
| 326 |
+
result = client.chat.completions.create(
|
| 327 |
+
model="gpt-5.4",
|
| 328 |
+
messages=[{"role": "user", "content": prompt}],
|
| 329 |
+
temperature=0,
|
| 330 |
+
max_tokens=200,
|
| 331 |
+
)
|
| 332 |
+
|
| 333 |
+
text = result.choices[0].message.content.strip()
|
| 334 |
+
text = re.sub(r"^```json\s*", "", text)
|
| 335 |
+
text = re.sub(r"\s*```$", "", text)
|
| 336 |
+
|
| 337 |
+
try:
|
| 338 |
+
return json.loads(text)
|
| 339 |
+
except json.JSONDecodeError:
|
| 340 |
+
LOG.warning("Failed to parse judge output: %s", text)
|
| 341 |
+
return {"correct": None, "reason": text}
|
| 342 |
+
|
| 343 |
+
|
| 344 |
+
def stage2_judge(args):
|
| 345 |
+
LOG.info("=== Stage 2: LLM Judge (GPT-5.4) ===")
|
| 346 |
+
|
| 347 |
+
from openai import OpenAI
|
| 348 |
+
api_key = os.environ.get("OPENAI_API_KEY")
|
| 349 |
+
if not api_key:
|
| 350 |
+
raise ValueError("Set OPENAI_API_KEY environment variable")
|
| 351 |
+
client = OpenAI(api_key=api_key)
|
| 352 |
+
|
| 353 |
+
raw_data = []
|
| 354 |
+
with open(RAW_RESPONSES) as f:
|
| 355 |
+
for line in f:
|
| 356 |
+
raw_data.append(json.loads(line))
|
| 357 |
+
LOG.info("Loaded %d raw response records", len(raw_data))
|
| 358 |
+
|
| 359 |
+
already_judged = set()
|
| 360 |
+
if JUDGED_RESPONSES.exists():
|
| 361 |
+
with open(JUDGED_RESPONSES) as f:
|
| 362 |
+
for line in f:
|
| 363 |
+
obj = json.loads(line)
|
| 364 |
+
already_judged.add((obj["video"], obj["response"][:80]))
|
| 365 |
+
LOG.info("Resuming: %d already judged", len(already_judged))
|
| 366 |
+
|
| 367 |
+
for record in tqdm(raw_data, desc="Judging"):
|
| 368 |
+
label = record["label"]
|
| 369 |
+
for resp_obj in record["responses"]:
|
| 370 |
+
resp_text = resp_obj["response"]
|
| 371 |
+
key = (record["video"], resp_text[:80])
|
| 372 |
+
if key in already_judged:
|
| 373 |
+
continue
|
| 374 |
+
|
| 375 |
+
try:
|
| 376 |
+
verdict = judge_with_openai(resp_text, label, client)
|
| 377 |
+
except Exception as e:
|
| 378 |
+
LOG.warning("Judge failed for %s: %s", record["video"], e)
|
| 379 |
+
time.sleep(2)
|
| 380 |
+
continue
|
| 381 |
+
|
| 382 |
+
out = {
|
| 383 |
+
"video": record["video"],
|
| 384 |
+
"audio": record["audio"],
|
| 385 |
+
"question": record["question"],
|
| 386 |
+
"ground_truth": record["ground_truth"],
|
| 387 |
+
"label": label,
|
| 388 |
+
"response": resp_text,
|
| 389 |
+
"temperature": resp_obj["temperature"],
|
| 390 |
+
"judge": verdict,
|
| 391 |
+
}
|
| 392 |
+
with open(JUDGED_RESPONSES, "a") as f:
|
| 393 |
+
f.write(json.dumps(out, ensure_ascii=False) + "\n")
|
| 394 |
+
already_judged.add(key)
|
| 395 |
+
|
| 396 |
+
time.sleep(0.05)
|
| 397 |
+
|
| 398 |
+
LOG.info("Stage 2 done. Wrote %s", JUDGED_RESPONSES)
|
| 399 |
+
|
| 400 |
+
|
| 401 |
+
# ═══════════════════════════════════════════════════════════════
|
| 402 |
+
# Stage 3: Filter & Merge
|
| 403 |
+
# ═══════════════════════════════════════════════════════════════
|
| 404 |
+
|
| 405 |
+
def stage3_merge(args):
|
| 406 |
+
LOG.info("=== Stage 3: Filter & Merge ===")
|
| 407 |
+
|
| 408 |
+
judged = []
|
| 409 |
+
with open(JUDGED_RESPONSES) as f:
|
| 410 |
+
for line in f:
|
| 411 |
+
judged.append(json.loads(line))
|
| 412 |
+
LOG.info("Loaded %d judged records", len(judged))
|
| 413 |
+
|
| 414 |
+
new_pairs = []
|
| 415 |
+
seen = set()
|
| 416 |
+
for record in judged:
|
| 417 |
+
verdict = record.get("judge", {})
|
| 418 |
+
if verdict.get("correct") is not False:
|
| 419 |
+
continue
|
| 420 |
+
|
| 421 |
+
dedup_key = (record["video"], record["response"][:80])
|
| 422 |
+
if dedup_key in seen:
|
| 423 |
+
continue
|
| 424 |
+
seen.add(dedup_key)
|
| 425 |
+
|
| 426 |
+
pair = {
|
| 427 |
+
"messages": [
|
| 428 |
+
{"role": "user", "content": "<video><audio>" + record["question"]}
|
| 429 |
+
],
|
| 430 |
+
"chosen": {"role": "assistant", "content": record["ground_truth"]},
|
| 431 |
+
"rejected": {"role": "assistant", "content": record["response"]},
|
| 432 |
+
"videos": [record["video"]],
|
| 433 |
+
"audios": [record["audio"]],
|
| 434 |
+
}
|
| 435 |
+
new_pairs.append(pair)
|
| 436 |
+
|
| 437 |
+
LOG.info("New on-policy DPO pairs: %d (from %d judged)", len(new_pairs), len(judged))
|
| 438 |
+
|
| 439 |
+
with open(EXISTING_DPO) as f:
|
| 440 |
+
existing = json.load(f)
|
| 441 |
+
LOG.info("Existing DPO pairs: %d", len(existing))
|
| 442 |
+
|
| 443 |
+
merged = existing + new_pairs
|
| 444 |
+
LOG.info("Merged total: %d pairs", len(merged))
|
| 445 |
+
|
| 446 |
+
with open(OUTPUT_DPO, "w") as f:
|
| 447 |
+
json.dump(merged, f, indent=2, ensure_ascii=False)
|
| 448 |
+
LOG.info("Wrote %s", OUTPUT_DPO)
|
| 449 |
+
|
| 450 |
+
|
| 451 |
+
# ═══════════════════════════════════════════════════════════════
|
| 452 |
+
|
| 453 |
+
def main():
|
| 454 |
+
logging.basicConfig(level=logging.INFO, format="%(asctime)s | %(levelname)s | %(message)s")
|
| 455 |
+
|
| 456 |
+
parser = argparse.ArgumentParser(description="On-policy DPO data augmentation")
|
| 457 |
+
parser.add_argument("--stage", type=str, default="all", choices=["1", "2", "3", "all"])
|
| 458 |
+
parser.add_argument("--temperatures", type=float, nargs="+", default=[0.0, 0.7, 1.0],
|
| 459 |
+
help="Sampling temperatures for stage 1. Multiple = more responses per video.")
|
| 460 |
+
parser.add_argument("--max-new-tokens", type=int, default=256)
|
| 461 |
+
parser.add_argument(
|
| 462 |
+
"--max-samples",
|
| 463 |
+
type=int,
|
| 464 |
+
default=-1,
|
| 465 |
+
help="Stage 1 only: max number of videos to process (-1 means all).",
|
| 466 |
+
)
|
| 467 |
+
args = parser.parse_args()
|
| 468 |
+
|
| 469 |
+
if args.stage in ("1", "all"):
|
| 470 |
+
stage1_inference(args)
|
| 471 |
+
if args.stage in ("2", "all"):
|
| 472 |
+
stage2_judge(args)
|
| 473 |
+
if args.stage in ("3", "all"):
|
| 474 |
+
stage3_merge(args)
|
| 475 |
+
|
| 476 |
+
|
| 477 |
+
if __name__ == "__main__":
|
| 478 |
+
main()
|
data_gen/multi_model_frame_selection_compare.py
ADDED
|
@@ -0,0 +1,335 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env python3
|
| 2 |
+
"""
|
| 3 |
+
Fair multi-model visual grounding by single-stage frame selection.
|
| 4 |
+
|
| 5 |
+
Core idea:
|
| 6 |
+
- Extract the SAME sampled frames from each muted video.
|
| 7 |
+
- Ask Gemini / OpenAI / Claude to choose the best impact frame index.
|
| 8 |
+
- Convert frame index to timestamp in code (timestamp = frame_index / sampled_fps).
|
| 9 |
+
"""
|
| 10 |
+
|
| 11 |
+
import argparse
|
| 12 |
+
import base64
|
| 13 |
+
import json
|
| 14 |
+
import logging
|
| 15 |
+
import os
|
| 16 |
+
import random
|
| 17 |
+
import re
|
| 18 |
+
from dataclasses import dataclass
|
| 19 |
+
from pathlib import Path
|
| 20 |
+
from typing import Any, Dict, List, Optional
|
| 21 |
+
|
| 22 |
+
import cv2
|
| 23 |
+
from tqdm import tqdm
|
| 24 |
+
|
| 25 |
+
try:
|
| 26 |
+
import google.generativeai as genai
|
| 27 |
+
except Exception: # pragma: no cover
|
| 28 |
+
genai = None
|
| 29 |
+
|
| 30 |
+
try:
|
| 31 |
+
from openai import OpenAI
|
| 32 |
+
except Exception: # pragma: no cover
|
| 33 |
+
OpenAI = None
|
| 34 |
+
|
| 35 |
+
try:
|
| 36 |
+
import anthropic
|
| 37 |
+
except Exception: # pragma: no cover
|
| 38 |
+
anthropic = None
|
| 39 |
+
|
| 40 |
+
|
| 41 |
+
MUTED_DIR = Path("/home/ubuntu/video_source/muted_original")
|
| 42 |
+
OUTPUT_JSONL = Path("/home/ubuntu/video_source/result/frame_selection_compare.jsonl")
|
| 43 |
+
|
| 44 |
+
GEMINI_MODEL = "gemini-3.1-pro-preview"
|
| 45 |
+
OPENAI_MODEL = "gpt-5.1"
|
| 46 |
+
CLAUDE_MODEL = "claude-sonnet-4-6"
|
| 47 |
+
|
| 48 |
+
SAMPLED_FPS = 4.0
|
| 49 |
+
MAX_VIDEOS = 100
|
| 50 |
+
RANDOM_SEED = 40
|
| 51 |
+
MAX_FRAMES_PER_VIDEO = 240
|
| 52 |
+
|
| 53 |
+
|
| 54 |
+
def build_prompt(num_frames: int) -> str:
|
| 55 |
+
return f"""You are an expert in physics and video forensics.
|
| 56 |
+
You will receive an ordered sequence of frames from one muted video.
|
| 57 |
+
Your task: choose the SINGLE frame that shows the MAXIMUM PHYSICAL IMPACT
|
| 58 |
+
(hard collision, hardest fall contact, strongest crash moment).
|
| 59 |
+
|
| 60 |
+
Important:
|
| 61 |
+
1) Output must be strictly JSON.
|
| 62 |
+
2) The frame index must be an integer in [0, {num_frames - 1}].
|
| 63 |
+
3) Pick only one best frame.
|
| 64 |
+
|
| 65 |
+
Return exactly:
|
| 66 |
+
{{
|
| 67 |
+
"best_frame_index": 12,
|
| 68 |
+
"event_description": "brief description of the impact"
|
| 69 |
+
}}"""
|
| 70 |
+
|
| 71 |
+
|
| 72 |
+
@dataclass
|
| 73 |
+
class FrameSelectionResult:
|
| 74 |
+
best_frame_index: int
|
| 75 |
+
event_description: str
|
| 76 |
+
|
| 77 |
+
|
| 78 |
+
@dataclass
|
| 79 |
+
class SampledFrame:
|
| 80 |
+
sampled_index: int
|
| 81 |
+
jpeg_bytes: bytes
|
| 82 |
+
jpeg_b64: str
|
| 83 |
+
|
| 84 |
+
|
| 85 |
+
def setup_logging() -> None:
|
| 86 |
+
logging.basicConfig(
|
| 87 |
+
level=logging.INFO,
|
| 88 |
+
format="%(asctime)s | %(levelname)s | %(message)s",
|
| 89 |
+
)
|
| 90 |
+
|
| 91 |
+
|
| 92 |
+
def _extract_json_object(text: str) -> Dict[str, Any]:
|
| 93 |
+
text = text.strip()
|
| 94 |
+
fence_match = re.search(r"```(?:json)?\s*(\{.*?\})\s*```", text, flags=re.DOTALL)
|
| 95 |
+
if fence_match:
|
| 96 |
+
return json.loads(fence_match.group(1))
|
| 97 |
+
try:
|
| 98 |
+
return json.loads(text)
|
| 99 |
+
except json.JSONDecodeError:
|
| 100 |
+
pass
|
| 101 |
+
start = text.find("{")
|
| 102 |
+
end = text.rfind("}")
|
| 103 |
+
if start != -1 and end != -1 and end > start:
|
| 104 |
+
return json.loads(text[start : end + 1])
|
| 105 |
+
raise ValueError("No valid JSON object found in model output.")
|
| 106 |
+
|
| 107 |
+
|
| 108 |
+
def _coerce_selection(data: Dict[str, Any], num_frames: int) -> FrameSelectionResult:
|
| 109 |
+
if "best_frame_index" not in data or "event_description" not in data:
|
| 110 |
+
raise ValueError(f"Missing required keys: {data}")
|
| 111 |
+
idx = int(data["best_frame_index"])
|
| 112 |
+
if idx < 0 or idx >= num_frames:
|
| 113 |
+
raise ValueError(f"best_frame_index out of range: {idx} / {num_frames}")
|
| 114 |
+
desc = str(data["event_description"]).strip()
|
| 115 |
+
if not desc:
|
| 116 |
+
raise ValueError("event_description is empty.")
|
| 117 |
+
return FrameSelectionResult(best_frame_index=idx, event_description=desc)
|
| 118 |
+
|
| 119 |
+
|
| 120 |
+
def append_jsonl(path: Path, payload: Dict[str, Any]) -> None:
|
| 121 |
+
path.parent.mkdir(parents=True, exist_ok=True)
|
| 122 |
+
with path.open("a", encoding="utf-8") as f:
|
| 123 |
+
f.write(json.dumps(payload, ensure_ascii=False) + "\n")
|
| 124 |
+
|
| 125 |
+
|
| 126 |
+
def sample_video_frames(video_path: Path, sampled_fps: float, max_frames: int) -> List[SampledFrame]:
|
| 127 |
+
cap = cv2.VideoCapture(str(video_path))
|
| 128 |
+
if not cap.isOpened():
|
| 129 |
+
raise RuntimeError(f"Could not open video: {video_path}")
|
| 130 |
+
|
| 131 |
+
source_fps = cap.get(cv2.CAP_PROP_FPS)
|
| 132 |
+
if not source_fps or source_fps <= 0:
|
| 133 |
+
source_fps = 30.0
|
| 134 |
+
|
| 135 |
+
frame_interval = max(int(round(source_fps / sampled_fps)), 1)
|
| 136 |
+
frames: List[SampledFrame] = []
|
| 137 |
+
frame_idx = 0
|
| 138 |
+
sampled_idx = 0
|
| 139 |
+
while True:
|
| 140 |
+
ok, frame = cap.read()
|
| 141 |
+
if not ok:
|
| 142 |
+
break
|
| 143 |
+
if frame_idx % frame_interval == 0:
|
| 144 |
+
ok_enc, buffer = cv2.imencode(".jpg", frame)
|
| 145 |
+
if ok_enc:
|
| 146 |
+
jpeg_bytes = buffer.tobytes()
|
| 147 |
+
frames.append(
|
| 148 |
+
SampledFrame(
|
| 149 |
+
sampled_index=sampled_idx,
|
| 150 |
+
jpeg_bytes=jpeg_bytes,
|
| 151 |
+
jpeg_b64=base64.b64encode(jpeg_bytes).decode("utf-8"),
|
| 152 |
+
)
|
| 153 |
+
)
|
| 154 |
+
sampled_idx += 1
|
| 155 |
+
if len(frames) >= max_frames:
|
| 156 |
+
break
|
| 157 |
+
frame_idx += 1
|
| 158 |
+
cap.release()
|
| 159 |
+
|
| 160 |
+
if not frames:
|
| 161 |
+
raise RuntimeError(f"No frames sampled from video: {video_path}")
|
| 162 |
+
return frames
|
| 163 |
+
|
| 164 |
+
|
| 165 |
+
def query_gemini(frames: List[SampledFrame], api_key: str) -> FrameSelectionResult:
|
| 166 |
+
if genai is None:
|
| 167 |
+
raise RuntimeError("google.generativeai package is not installed.")
|
| 168 |
+
if not api_key:
|
| 169 |
+
raise RuntimeError("GEMINI_API_KEY is missing.")
|
| 170 |
+
|
| 171 |
+
genai.configure(api_key=api_key)
|
| 172 |
+
model = genai.GenerativeModel(GEMINI_MODEL)
|
| 173 |
+
|
| 174 |
+
content: List[Any] = [build_prompt(len(frames))]
|
| 175 |
+
for frame in frames:
|
| 176 |
+
content.append(f"Frame index: {frame.sampled_index}")
|
| 177 |
+
content.append({"mime_type": "image/jpeg", "data": frame.jpeg_bytes})
|
| 178 |
+
|
| 179 |
+
resp = model.generate_content(content)
|
| 180 |
+
raw_text = getattr(resp, "text", "") or ""
|
| 181 |
+
parsed = _extract_json_object(raw_text)
|
| 182 |
+
return _coerce_selection(parsed, len(frames))
|
| 183 |
+
|
| 184 |
+
|
| 185 |
+
def query_openai(frames: List[SampledFrame], api_key: str) -> FrameSelectionResult:
|
| 186 |
+
if OpenAI is None:
|
| 187 |
+
raise RuntimeError("openai package is not installed.")
|
| 188 |
+
if not api_key:
|
| 189 |
+
raise RuntimeError("OPENAI_API_KEY is missing.")
|
| 190 |
+
|
| 191 |
+
client = OpenAI(api_key=api_key)
|
| 192 |
+
content: List[Dict[str, Any]] = [{"type": "input_text", "text": build_prompt(len(frames))}]
|
| 193 |
+
for frame in frames:
|
| 194 |
+
content.append({"type": "input_text", "text": f"Frame index: {frame.sampled_index}"})
|
| 195 |
+
content.append(
|
| 196 |
+
{
|
| 197 |
+
"type": "input_image",
|
| 198 |
+
"image_url": f"data:image/jpeg;base64,{frame.jpeg_b64}",
|
| 199 |
+
}
|
| 200 |
+
)
|
| 201 |
+
|
| 202 |
+
response = client.responses.create(
|
| 203 |
+
model=OPENAI_MODEL,
|
| 204 |
+
input=[{"role": "user", "content": content}],
|
| 205 |
+
)
|
| 206 |
+
raw_text = getattr(response, "output_text", "") or ""
|
| 207 |
+
parsed = _extract_json_object(raw_text)
|
| 208 |
+
return _coerce_selection(parsed, len(frames))
|
| 209 |
+
|
| 210 |
+
|
| 211 |
+
def query_claude(frames: List[SampledFrame], api_key: str) -> FrameSelectionResult:
|
| 212 |
+
if anthropic is None:
|
| 213 |
+
raise RuntimeError("anthropic package is not installed.")
|
| 214 |
+
if not api_key:
|
| 215 |
+
raise RuntimeError("ANTHROPIC_API_KEY is missing.")
|
| 216 |
+
|
| 217 |
+
client = anthropic.Anthropic(api_key=api_key)
|
| 218 |
+
content: List[Dict[str, Any]] = [{"type": "text", "text": build_prompt(len(frames))}]
|
| 219 |
+
for frame in frames:
|
| 220 |
+
content.append({"type": "text", "text": f"Frame index: {frame.sampled_index}"})
|
| 221 |
+
content.append(
|
| 222 |
+
{
|
| 223 |
+
"type": "image",
|
| 224 |
+
"source": {
|
| 225 |
+
"type": "base64",
|
| 226 |
+
"media_type": "image/jpeg",
|
| 227 |
+
"data": frame.jpeg_b64,
|
| 228 |
+
},
|
| 229 |
+
}
|
| 230 |
+
)
|
| 231 |
+
|
| 232 |
+
response = client.messages.create(
|
| 233 |
+
model=CLAUDE_MODEL,
|
| 234 |
+
max_tokens=300,
|
| 235 |
+
messages=[{"role": "user", "content": content}],
|
| 236 |
+
)
|
| 237 |
+
|
| 238 |
+
chunks = []
|
| 239 |
+
for block in getattr(response, "content", []):
|
| 240 |
+
if getattr(block, "type", None) == "text":
|
| 241 |
+
chunks.append(getattr(block, "text", ""))
|
| 242 |
+
raw_text = "\n".join(chunks).strip()
|
| 243 |
+
parsed = _extract_json_object(raw_text)
|
| 244 |
+
return _coerce_selection(parsed, len(frames))
|
| 245 |
+
|
| 246 |
+
|
| 247 |
+
def safe_query(query_fn, frames: List[SampledFrame], api_key: str, model_name: str) -> Optional[FrameSelectionResult]:
|
| 248 |
+
try:
|
| 249 |
+
return query_fn(frames, api_key)
|
| 250 |
+
except Exception as exc:
|
| 251 |
+
logging.error("%s failed: %s", model_name, exc)
|
| 252 |
+
return None
|
| 253 |
+
|
| 254 |
+
|
| 255 |
+
def idx_to_timestamp(best_frame_index: Optional[int], sampled_fps: float) -> Optional[float]:
|
| 256 |
+
if best_frame_index is None:
|
| 257 |
+
return None
|
| 258 |
+
return round(float(best_frame_index) / sampled_fps, 3)
|
| 259 |
+
|
| 260 |
+
|
| 261 |
+
def process_one_video(video_path: Path, gemini_key: str, openai_key: str, anthropic_key: str) -> Dict[str, Any]:
|
| 262 |
+
frames = sample_video_frames(video_path, sampled_fps=SAMPLED_FPS, max_frames=MAX_FRAMES_PER_VIDEO)
|
| 263 |
+
|
| 264 |
+
gemini_res = safe_query(query_gemini, frames, gemini_key, "Gemini")
|
| 265 |
+
openai_res = safe_query(query_openai, frames, openai_key, "OpenAI")
|
| 266 |
+
claude_res = safe_query(query_claude, frames, anthropic_key, "Claude")
|
| 267 |
+
|
| 268 |
+
row = {
|
| 269 |
+
"video_id": video_path.name,
|
| 270 |
+
"sampled_fps": SAMPLED_FPS,
|
| 271 |
+
"num_sampled_frames": len(frames),
|
| 272 |
+
"gemini_frame_index": gemini_res.best_frame_index if gemini_res else None,
|
| 273 |
+
"openai_frame_index": openai_res.best_frame_index if openai_res else None,
|
| 274 |
+
"claude_frame_index": claude_res.best_frame_index if claude_res else None,
|
| 275 |
+
"t_gemini": idx_to_timestamp(gemini_res.best_frame_index if gemini_res else None, SAMPLED_FPS),
|
| 276 |
+
"t_openai": idx_to_timestamp(openai_res.best_frame_index if openai_res else None, SAMPLED_FPS),
|
| 277 |
+
"t_claude": idx_to_timestamp(claude_res.best_frame_index if claude_res else None, SAMPLED_FPS),
|
| 278 |
+
"event_desc_gemini": gemini_res.event_description if gemini_res else None,
|
| 279 |
+
"event_desc_openai": openai_res.event_description if openai_res else None,
|
| 280 |
+
"event_desc_claude": claude_res.event_description if claude_res else None,
|
| 281 |
+
}
|
| 282 |
+
return row
|
| 283 |
+
|
| 284 |
+
|
| 285 |
+
def parse_args() -> argparse.Namespace:
|
| 286 |
+
parser = argparse.ArgumentParser(description="Fair 3-model frame-selection comparison.")
|
| 287 |
+
parser.add_argument("--muted-dir", type=Path, default=MUTED_DIR, help="Muted videos folder")
|
| 288 |
+
parser.add_argument("--output-jsonl", type=Path, default=OUTPUT_JSONL, help="Output JSONL file")
|
| 289 |
+
parser.add_argument("--sampled-fps", type=float, default=SAMPLED_FPS, help="Frame sampling FPS")
|
| 290 |
+
parser.add_argument("--max-videos", type=int, default=MAX_VIDEOS, help="How many videos to run")
|
| 291 |
+
parser.add_argument("--max-frames", type=int, default=MAX_FRAMES_PER_VIDEO, help="Max sampled frames per video")
|
| 292 |
+
parser.add_argument("--seed", type=int, default=RANDOM_SEED, help="Random seed for video selection")
|
| 293 |
+
parser.add_argument("--overwrite-output", action="store_true", help="Delete previous output file before running")
|
| 294 |
+
return parser.parse_args()
|
| 295 |
+
|
| 296 |
+
|
| 297 |
+
def main() -> None:
|
| 298 |
+
global SAMPLED_FPS, MAX_FRAMES_PER_VIDEO
|
| 299 |
+
setup_logging()
|
| 300 |
+
args = parse_args()
|
| 301 |
+
SAMPLED_FPS = args.sampled_fps
|
| 302 |
+
MAX_FRAMES_PER_VIDEO = args.max_frames
|
| 303 |
+
|
| 304 |
+
gemini_key = os.getenv("GEMINI_API_KEY", "").strip()
|
| 305 |
+
openai_key = os.getenv("OPENAI_API_KEY", "").strip()
|
| 306 |
+
anthropic_key = os.getenv("ANTHROPIC_API_KEY", "").strip()
|
| 307 |
+
|
| 308 |
+
if not args.muted_dir.exists():
|
| 309 |
+
raise FileNotFoundError(f"Muted folder not found: {args.muted_dir}")
|
| 310 |
+
|
| 311 |
+
videos = sorted(args.muted_dir.rglob("*.mp4"))
|
| 312 |
+
if not videos:
|
| 313 |
+
raise FileNotFoundError(f"No mp4 found in: {args.muted_dir}")
|
| 314 |
+
|
| 315 |
+
rng = random.Random(args.seed)
|
| 316 |
+
if len(videos) > args.max_videos:
|
| 317 |
+
videos = rng.sample(videos, args.max_videos)
|
| 318 |
+
videos.sort()
|
| 319 |
+
|
| 320 |
+
if args.overwrite_output and args.output_jsonl.exists():
|
| 321 |
+
args.output_jsonl.unlink()
|
| 322 |
+
|
| 323 |
+
logging.info("Videos selected: %d", len(videos))
|
| 324 |
+
logging.info("sampled_fps=%.2f | max_frames=%d", SAMPLED_FPS, MAX_FRAMES_PER_VIDEO)
|
| 325 |
+
logging.info("Output: %s", args.output_jsonl)
|
| 326 |
+
|
| 327 |
+
for video_path in tqdm(videos, desc="Frame Selection Compare", unit="video"):
|
| 328 |
+
row = process_one_video(video_path, gemini_key, openai_key, anthropic_key)
|
| 329 |
+
append_jsonl(args.output_jsonl, row)
|
| 330 |
+
|
| 331 |
+
logging.info("Done.")
|
| 332 |
+
|
| 333 |
+
|
| 334 |
+
if __name__ == "__main__":
|
| 335 |
+
main()
|
data_gen/multi_model_visual_grounding.py
ADDED
|
@@ -0,0 +1,385 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env python3
|
| 2 |
+
import base64
|
| 3 |
+
import json
|
| 4 |
+
import logging
|
| 5 |
+
import os
|
| 6 |
+
import random
|
| 7 |
+
import re
|
| 8 |
+
import subprocess
|
| 9 |
+
import time
|
| 10 |
+
from dataclasses import dataclass
|
| 11 |
+
from pathlib import Path
|
| 12 |
+
from typing import Any, Dict, List, Optional
|
| 13 |
+
|
| 14 |
+
import cv2
|
| 15 |
+
from tqdm import tqdm
|
| 16 |
+
|
| 17 |
+
try:
|
| 18 |
+
import google.generativeai as genai
|
| 19 |
+
except Exception: # pragma: no cover - optional dependency
|
| 20 |
+
genai = None
|
| 21 |
+
|
| 22 |
+
try:
|
| 23 |
+
from openai import OpenAI
|
| 24 |
+
except Exception: # pragma: no cover - optional dependency
|
| 25 |
+
OpenAI = None
|
| 26 |
+
|
| 27 |
+
try:
|
| 28 |
+
import anthropic
|
| 29 |
+
except Exception: # pragma: no cover - optional dependency
|
| 30 |
+
anthropic = None
|
| 31 |
+
|
| 32 |
+
|
| 33 |
+
ORIGINAL_DIR = Path("/home/ubuntu/video_source/original/uag_oops")
|
| 34 |
+
MUTED_DIR = Path("/home/ubuntu/video_source/muted_original")
|
| 35 |
+
OUTPUT_JSONL = Path("/home/ubuntu/video_source/muted_original/visual_timestamps_compare.jsonl")
|
| 36 |
+
|
| 37 |
+
VISUAL_PROMPT = """You are an expert in physics and video forensics. Carefully analyze this muted video to find the exact moment a high-energy physical event occurs (e.g., a hard fall, a collision, something breaking).
|
| 38 |
+
You must output strictly in JSON format with the following keys:
|
| 39 |
+
{
|
| 40 |
+
"visual_timestamp": 2.5,
|
| 41 |
+
"event_description": "Brief description of the impact"
|
| 42 |
+
}"""
|
| 43 |
+
|
| 44 |
+
GEMINI_MODEL = "gemini-3.1-pro-preview"
|
| 45 |
+
OPENAI_MODEL = "gpt-5.1"
|
| 46 |
+
CLAUDE_MODEL = "claude-sonnet-4-6"
|
| 47 |
+
SAMPLE_SIZE = 100
|
| 48 |
+
RANDOM_SEED = 42
|
| 49 |
+
|
| 50 |
+
MAX_RETRIES = 2
|
| 51 |
+
RETRY_BASE_SECONDS = 2.0
|
| 52 |
+
GEMINI_FILE_READY_TIMEOUT_SECONDS = 120
|
| 53 |
+
GEMINI_FILE_POLL_SECONDS = 2
|
| 54 |
+
OPENAI_TARGET_FPS = 1.0
|
| 55 |
+
CLAUDE_TARGET_FPS = 1.0
|
| 56 |
+
|
| 57 |
+
|
| 58 |
+
@dataclass
|
| 59 |
+
class VisualResult:
|
| 60 |
+
visual_timestamp: float
|
| 61 |
+
event_description: str
|
| 62 |
+
|
| 63 |
+
|
| 64 |
+
def setup_logging() -> None:
|
| 65 |
+
logging.basicConfig(
|
| 66 |
+
level=logging.INFO,
|
| 67 |
+
format="%(asctime)s | %(levelname)s | %(message)s",
|
| 68 |
+
)
|
| 69 |
+
|
| 70 |
+
|
| 71 |
+
def ensure_ffmpeg() -> None:
|
| 72 |
+
subprocess.run(
|
| 73 |
+
["ffmpeg", "-version"],
|
| 74 |
+
check=True,
|
| 75 |
+
stdout=subprocess.PIPE,
|
| 76 |
+
stderr=subprocess.PIPE,
|
| 77 |
+
text=True,
|
| 78 |
+
)
|
| 79 |
+
|
| 80 |
+
|
| 81 |
+
def with_retries(fn, *args, **kwargs):
|
| 82 |
+
last_exc: Optional[Exception] = None
|
| 83 |
+
for attempt in range(1, MAX_RETRIES + 1):
|
| 84 |
+
try:
|
| 85 |
+
return fn(*args, **kwargs)
|
| 86 |
+
except Exception as exc: # pragma: no cover - network and API variability
|
| 87 |
+
last_exc = exc
|
| 88 |
+
if attempt < MAX_RETRIES:
|
| 89 |
+
wait_s = RETRY_BASE_SECONDS * (2 ** (attempt - 1))
|
| 90 |
+
logging.warning(
|
| 91 |
+
"Attempt %s/%s failed: %s. Retrying in %.1fs...",
|
| 92 |
+
attempt,
|
| 93 |
+
MAX_RETRIES,
|
| 94 |
+
exc,
|
| 95 |
+
wait_s,
|
| 96 |
+
)
|
| 97 |
+
time.sleep(wait_s)
|
| 98 |
+
raise RuntimeError(f"All retry attempts failed. Last error: {last_exc}")
|
| 99 |
+
|
| 100 |
+
|
| 101 |
+
def _extract_json_object(text: str) -> Dict[str, Any]:
|
| 102 |
+
text = text.strip()
|
| 103 |
+
|
| 104 |
+
fence_match = re.search(r"```(?:json)?\s*(\{.*?\})\s*```", text, flags=re.DOTALL)
|
| 105 |
+
if fence_match:
|
| 106 |
+
return json.loads(fence_match.group(1))
|
| 107 |
+
|
| 108 |
+
try:
|
| 109 |
+
return json.loads(text)
|
| 110 |
+
except json.JSONDecodeError:
|
| 111 |
+
pass
|
| 112 |
+
|
| 113 |
+
start = text.find("{")
|
| 114 |
+
end = text.rfind("}")
|
| 115 |
+
if start != -1 and end != -1 and end > start:
|
| 116 |
+
return json.loads(text[start : end + 1])
|
| 117 |
+
|
| 118 |
+
raise ValueError("No valid JSON object found in model output.")
|
| 119 |
+
|
| 120 |
+
|
| 121 |
+
def _coerce_visual_result(data: Dict[str, Any]) -> VisualResult:
|
| 122 |
+
ts = float(data["visual_timestamp"])
|
| 123 |
+
desc = str(data["event_description"]).strip()
|
| 124 |
+
if not desc:
|
| 125 |
+
raise ValueError("event_description is empty.")
|
| 126 |
+
return VisualResult(visual_timestamp=ts, event_description=desc)
|
| 127 |
+
|
| 128 |
+
|
| 129 |
+
def append_jsonl(path: Path, payload: Dict[str, Any]) -> None:
|
| 130 |
+
path.parent.mkdir(parents=True, exist_ok=True)
|
| 131 |
+
with path.open("a", encoding="utf-8") as f:
|
| 132 |
+
f.write(json.dumps(payload, ensure_ascii=False) + "\n")
|
| 133 |
+
|
| 134 |
+
|
| 135 |
+
def extract_muted_video(input_video: Path, output_video: Path) -> None:
|
| 136 |
+
output_video.parent.mkdir(parents=True, exist_ok=True)
|
| 137 |
+
cmd = [
|
| 138 |
+
"ffmpeg",
|
| 139 |
+
"-y",
|
| 140 |
+
"-i",
|
| 141 |
+
str(input_video),
|
| 142 |
+
"-an",
|
| 143 |
+
"-c:v",
|
| 144 |
+
"copy",
|
| 145 |
+
str(output_video),
|
| 146 |
+
]
|
| 147 |
+
subprocess.run(cmd, check=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True)
|
| 148 |
+
|
| 149 |
+
|
| 150 |
+
def prepare_muted_videos(original_dir: Path, muted_dir: Path) -> List[Path]:
|
| 151 |
+
if not original_dir.exists():
|
| 152 |
+
raise FileNotFoundError(f"Original folder not found: {original_dir}")
|
| 153 |
+
|
| 154 |
+
originals = sorted(original_dir.rglob("*.mp4"))
|
| 155 |
+
if not originals:
|
| 156 |
+
raise FileNotFoundError(f"No .mp4 files found under: {original_dir}")
|
| 157 |
+
|
| 158 |
+
muted_dir.mkdir(parents=True, exist_ok=True)
|
| 159 |
+
muted_paths: List[Path] = []
|
| 160 |
+
|
| 161 |
+
for src in originals:
|
| 162 |
+
rel = src.relative_to(original_dir)
|
| 163 |
+
dst = muted_dir / rel
|
| 164 |
+
if not dst.exists():
|
| 165 |
+
logging.info("Muting: %s -> %s", src, dst)
|
| 166 |
+
extract_muted_video(src, dst)
|
| 167 |
+
else:
|
| 168 |
+
logging.info("Muted exists, skip: %s", dst)
|
| 169 |
+
muted_paths.append(dst)
|
| 170 |
+
return muted_paths
|
| 171 |
+
|
| 172 |
+
|
| 173 |
+
def video_frames_to_base64_jpegs(video_path: Path, target_fps: float) -> List[str]:
|
| 174 |
+
cap = cv2.VideoCapture(str(video_path))
|
| 175 |
+
if not cap.isOpened():
|
| 176 |
+
raise RuntimeError(f"Could not open video: {video_path}")
|
| 177 |
+
|
| 178 |
+
source_fps = cap.get(cv2.CAP_PROP_FPS)
|
| 179 |
+
if not source_fps or source_fps <= 0:
|
| 180 |
+
source_fps = 30.0
|
| 181 |
+
frame_interval = max(int(round(source_fps / target_fps)), 1)
|
| 182 |
+
|
| 183 |
+
frames_b64: List[str] = []
|
| 184 |
+
frame_idx = 0
|
| 185 |
+
while True:
|
| 186 |
+
ok, frame = cap.read()
|
| 187 |
+
if not ok:
|
| 188 |
+
break
|
| 189 |
+
if frame_idx % frame_interval == 0:
|
| 190 |
+
ok_enc, buffer = cv2.imencode(".jpg", frame)
|
| 191 |
+
if ok_enc:
|
| 192 |
+
frames_b64.append(base64.b64encode(buffer.tobytes()).decode("utf-8"))
|
| 193 |
+
frame_idx += 1
|
| 194 |
+
|
| 195 |
+
cap.release()
|
| 196 |
+
if not frames_b64:
|
| 197 |
+
raise RuntimeError(f"No frames extracted from: {video_path}")
|
| 198 |
+
return frames_b64
|
| 199 |
+
|
| 200 |
+
|
| 201 |
+
def wait_for_gemini_file_active(file_name: str) -> None:
|
| 202 |
+
deadline = time.time() + GEMINI_FILE_READY_TIMEOUT_SECONDS
|
| 203 |
+
while time.time() < deadline:
|
| 204 |
+
file_obj = genai.get_file(file_name)
|
| 205 |
+
state_obj = getattr(file_obj, "state", None)
|
| 206 |
+
state_name = str(getattr(state_obj, "name", state_obj))
|
| 207 |
+
if state_name == "ACTIVE":
|
| 208 |
+
return
|
| 209 |
+
if state_name in {"FAILED", "CANCELLED"}:
|
| 210 |
+
raise RuntimeError(f"Gemini file processing failed: state={state_name}")
|
| 211 |
+
time.sleep(GEMINI_FILE_POLL_SECONDS)
|
| 212 |
+
raise TimeoutError("Gemini file not ACTIVE before timeout.")
|
| 213 |
+
|
| 214 |
+
|
| 215 |
+
def query_gemini_visual(muted_video: Path, api_key: str) -> VisualResult:
|
| 216 |
+
"""
|
| 217 |
+
Placeholder section for Gemini API call.
|
| 218 |
+
Replace internals as needed for your account/project config.
|
| 219 |
+
"""
|
| 220 |
+
if genai is None:
|
| 221 |
+
raise RuntimeError("google.generativeai is not installed.")
|
| 222 |
+
if not api_key:
|
| 223 |
+
raise RuntimeError("GEMINI_API_KEY is missing.")
|
| 224 |
+
|
| 225 |
+
genai.configure(api_key=api_key)
|
| 226 |
+
model = genai.GenerativeModel(GEMINI_MODEL)
|
| 227 |
+
uploaded = genai.upload_file(path=str(muted_video))
|
| 228 |
+
try:
|
| 229 |
+
wait_for_gemini_file_active(uploaded.name)
|
| 230 |
+
resp = model.generate_content([VISUAL_PROMPT, uploaded])
|
| 231 |
+
raw_text = getattr(resp, "text", "") or ""
|
| 232 |
+
data = _extract_json_object(raw_text)
|
| 233 |
+
return _coerce_visual_result(data)
|
| 234 |
+
finally:
|
| 235 |
+
try:
|
| 236 |
+
genai.delete_file(uploaded.name)
|
| 237 |
+
except Exception:
|
| 238 |
+
logging.warning("Failed to delete Gemini upload: %s", uploaded.name)
|
| 239 |
+
|
| 240 |
+
|
| 241 |
+
def query_openai_visual(muted_video: Path, api_key: str) -> VisualResult:
|
| 242 |
+
"""
|
| 243 |
+
Placeholder section for OpenAI API call (GPT-5.1).
|
| 244 |
+
Replace internals if your OpenAI account expects another multimodal format.
|
| 245 |
+
"""
|
| 246 |
+
if OpenAI is None:
|
| 247 |
+
raise RuntimeError("openai package is not installed.")
|
| 248 |
+
if not api_key:
|
| 249 |
+
raise RuntimeError("OPENAI_API_KEY is missing.")
|
| 250 |
+
|
| 251 |
+
client = OpenAI(api_key=api_key)
|
| 252 |
+
image_b64_list = video_frames_to_base64_jpegs(muted_video, target_fps=OPENAI_TARGET_FPS)
|
| 253 |
+
content: List[Dict[str, Any]] = [{"type": "input_text", "text": VISUAL_PROMPT}]
|
| 254 |
+
for b64_img in image_b64_list:
|
| 255 |
+
content.append(
|
| 256 |
+
{
|
| 257 |
+
"type": "input_image",
|
| 258 |
+
"image_url": f"data:image/jpeg;base64,{b64_img}",
|
| 259 |
+
}
|
| 260 |
+
)
|
| 261 |
+
|
| 262 |
+
response = client.responses.create(
|
| 263 |
+
model=OPENAI_MODEL,
|
| 264 |
+
input=[{"role": "user", "content": content}],
|
| 265 |
+
)
|
| 266 |
+
raw_text = getattr(response, "output_text", "") or ""
|
| 267 |
+
data = _extract_json_object(raw_text)
|
| 268 |
+
return _coerce_visual_result(data)
|
| 269 |
+
|
| 270 |
+
|
| 271 |
+
def query_claude_visual(muted_video: Path, api_key: str) -> VisualResult:
|
| 272 |
+
"""
|
| 273 |
+
Placeholder section for Anthropic API call (Claude Sonnet 4.6).
|
| 274 |
+
Current implementation samples video frames and sends them as image blocks.
|
| 275 |
+
"""
|
| 276 |
+
if anthropic is None:
|
| 277 |
+
raise RuntimeError("anthropic package is not installed.")
|
| 278 |
+
if not api_key:
|
| 279 |
+
raise RuntimeError("ANTHROPIC_API_KEY is missing.")
|
| 280 |
+
|
| 281 |
+
client = anthropic.Anthropic(api_key=api_key)
|
| 282 |
+
image_b64_list = video_frames_to_base64_jpegs(muted_video, target_fps=CLAUDE_TARGET_FPS)
|
| 283 |
+
|
| 284 |
+
content: List[Dict[str, Any]] = [{"type": "text", "text": VISUAL_PROMPT}]
|
| 285 |
+
for b64_img in image_b64_list:
|
| 286 |
+
content.append(
|
| 287 |
+
{
|
| 288 |
+
"type": "image",
|
| 289 |
+
"source": {
|
| 290 |
+
"type": "base64",
|
| 291 |
+
"media_type": "image/jpeg",
|
| 292 |
+
"data": b64_img,
|
| 293 |
+
},
|
| 294 |
+
}
|
| 295 |
+
)
|
| 296 |
+
|
| 297 |
+
response = client.messages.create(
|
| 298 |
+
model=CLAUDE_MODEL,
|
| 299 |
+
max_tokens=300,
|
| 300 |
+
messages=[{"role": "user", "content": content}],
|
| 301 |
+
)
|
| 302 |
+
|
| 303 |
+
chunks = []
|
| 304 |
+
for block in getattr(response, "content", []):
|
| 305 |
+
if getattr(block, "type", None) == "text":
|
| 306 |
+
chunks.append(getattr(block, "text", ""))
|
| 307 |
+
raw_text = "\n".join(chunks).strip()
|
| 308 |
+
data = _extract_json_object(raw_text)
|
| 309 |
+
return _coerce_visual_result(data)
|
| 310 |
+
|
| 311 |
+
|
| 312 |
+
def safe_query(query_fn, muted_video: Path, api_key: str, model_name: str) -> Optional[VisualResult]:
|
| 313 |
+
try:
|
| 314 |
+
return with_retries(query_fn, muted_video, api_key)
|
| 315 |
+
except Exception as exc:
|
| 316 |
+
logging.error("%s failed for %s: %s", model_name, muted_video.name, exc)
|
| 317 |
+
return None
|
| 318 |
+
|
| 319 |
+
|
| 320 |
+
def choose_event_description(
|
| 321 |
+
gemini_result: Optional[VisualResult],
|
| 322 |
+
openai_result: Optional[VisualResult],
|
| 323 |
+
claude_result: Optional[VisualResult],
|
| 324 |
+
) -> Optional[str]:
|
| 325 |
+
if gemini_result:
|
| 326 |
+
return gemini_result.event_description
|
| 327 |
+
if openai_result:
|
| 328 |
+
return openai_result.event_description
|
| 329 |
+
if claude_result:
|
| 330 |
+
return claude_result.event_description
|
| 331 |
+
return None
|
| 332 |
+
|
| 333 |
+
|
| 334 |
+
def process_video(muted_video: Path, gemini_key: str, openai_key: str, anthropic_key: str) -> Dict[str, Any]:
|
| 335 |
+
logging.info("Processing muted video: %s", muted_video.name)
|
| 336 |
+
|
| 337 |
+
gemini_result = safe_query(query_gemini_visual, muted_video, gemini_key, "Gemini")
|
| 338 |
+
openai_result = safe_query(query_openai_visual, muted_video, openai_key, "OpenAI")
|
| 339 |
+
claude_result = safe_query(query_claude_visual, muted_video, anthropic_key, "Claude")
|
| 340 |
+
|
| 341 |
+
row = {
|
| 342 |
+
"video_id": muted_video.name,
|
| 343 |
+
"t_gemini": round(gemini_result.visual_timestamp, 3) if gemini_result else None,
|
| 344 |
+
"t_gpt5_1": round(openai_result.visual_timestamp, 3) if openai_result else None,
|
| 345 |
+
"t_claude": round(claude_result.visual_timestamp, 3) if claude_result else None,
|
| 346 |
+
"event_description": choose_event_description(gemini_result, openai_result, claude_result),
|
| 347 |
+
}
|
| 348 |
+
return row
|
| 349 |
+
|
| 350 |
+
|
| 351 |
+
def main() -> None:
|
| 352 |
+
setup_logging()
|
| 353 |
+
ensure_ffmpeg()
|
| 354 |
+
|
| 355 |
+
gemini_key = os.getenv("GEMINI_API_KEY", "").strip()
|
| 356 |
+
openai_key = os.getenv("OPENAI_API_KEY", "").strip()
|
| 357 |
+
anthropic_key = os.getenv("ANTHROPIC_API_KEY", "").strip()
|
| 358 |
+
|
| 359 |
+
muted_paths = prepare_muted_videos(ORIGINAL_DIR, MUTED_DIR)
|
| 360 |
+
logging.info("Total muted videos ready: %d", len(muted_paths))
|
| 361 |
+
|
| 362 |
+
all_videos = sorted(muted_paths)
|
| 363 |
+
rng = random.Random(RANDOM_SEED)
|
| 364 |
+
if len(all_videos) <= SAMPLE_SIZE:
|
| 365 |
+
selected_videos = all_videos
|
| 366 |
+
else:
|
| 367 |
+
selected_videos = rng.sample(all_videos, SAMPLE_SIZE)
|
| 368 |
+
selected_videos.sort()
|
| 369 |
+
logging.info(
|
| 370 |
+
"Testing %d videos (random sample size=%d, seed=%d).",
|
| 371 |
+
len(selected_videos),
|
| 372 |
+
SAMPLE_SIZE,
|
| 373 |
+
RANDOM_SEED,
|
| 374 |
+
)
|
| 375 |
+
|
| 376 |
+
for video in tqdm(selected_videos, desc="Visual Grounding", unit="video"):
|
| 377 |
+
row = process_video(video, gemini_key, openai_key, anthropic_key)
|
| 378 |
+
append_jsonl(OUTPUT_JSONL, row)
|
| 379 |
+
logging.info("Saved row: %s", row)
|
| 380 |
+
|
| 381 |
+
logging.info("Done. Output JSONL: %s", OUTPUT_JSONL.resolve())
|
| 382 |
+
|
| 383 |
+
|
| 384 |
+
if __name__ == "__main__":
|
| 385 |
+
main()
|
eval/compute_videomme_metrics_from_jsonl.py
ADDED
|
@@ -0,0 +1,177 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env python3
|
| 2 |
+
"""
|
| 3 |
+
Recompute Video-MME metrics from eval_results.jsonl (same schema as eval_videomme.py).
|
| 4 |
+
|
| 5 |
+
Example:
|
| 6 |
+
python compute_videomme_metrics_from_jsonl.py \\
|
| 7 |
+
--jsonl /home/ubuntu/video_source/eval_results/videomme/vmme_vanilla/eval_results.jsonl \\
|
| 8 |
+
--out /home/ubuntu/video_source/eval_results/videomme/vmme_vanilla/metrics.json
|
| 9 |
+
"""
|
| 10 |
+
|
| 11 |
+
from __future__ import annotations
|
| 12 |
+
|
| 13 |
+
import argparse
|
| 14 |
+
import json
|
| 15 |
+
from pathlib import Path
|
| 16 |
+
from typing import Any, Dict, List
|
| 17 |
+
|
| 18 |
+
VIDEO_TYPES = ["short", "medium", "long"]
|
| 19 |
+
CATEGORIES = [
|
| 20 |
+
"Knowledge",
|
| 21 |
+
"Film & Television",
|
| 22 |
+
"Sports Competition",
|
| 23 |
+
"Artistic Performance",
|
| 24 |
+
"Life Record",
|
| 25 |
+
"Multilingual",
|
| 26 |
+
]
|
| 27 |
+
TASK_CATEGORIES = [
|
| 28 |
+
"Temporal Perception",
|
| 29 |
+
"Spatial Perception",
|
| 30 |
+
"Attribute Perception",
|
| 31 |
+
"Action Recognition",
|
| 32 |
+
"Object Recognition",
|
| 33 |
+
"OCR Problems",
|
| 34 |
+
"Counting Problem",
|
| 35 |
+
"Temporal Reasoning",
|
| 36 |
+
"Spatial Reasoning",
|
| 37 |
+
"Action Reasoning",
|
| 38 |
+
"Object Reasoning",
|
| 39 |
+
"Information Synopsis",
|
| 40 |
+
]
|
| 41 |
+
|
| 42 |
+
|
| 43 |
+
def load_results(path: Path) -> List[Dict[str, Any]]:
|
| 44 |
+
rows = []
|
| 45 |
+
with open(path, encoding="utf-8") as f:
|
| 46 |
+
for line in f:
|
| 47 |
+
line = line.strip()
|
| 48 |
+
if not line:
|
| 49 |
+
continue
|
| 50 |
+
rows.append(json.loads(line))
|
| 51 |
+
return rows
|
| 52 |
+
|
| 53 |
+
|
| 54 |
+
def compute_metrics(results: List[Dict[str, Any]]) -> Dict[str, Any]:
|
| 55 |
+
total = len(results)
|
| 56 |
+
if total == 0:
|
| 57 |
+
return {}
|
| 58 |
+
|
| 59 |
+
correct = sum(
|
| 60 |
+
1 for r in results if str(r.get("pred_answer", "")).upper() == str(r["gt_answer"]).upper()
|
| 61 |
+
)
|
| 62 |
+
overall_acc = correct / total
|
| 63 |
+
|
| 64 |
+
def acc_for(items: List[Dict[str, Any]]) -> float | None:
|
| 65 |
+
if not items:
|
| 66 |
+
return None
|
| 67 |
+
c = sum(
|
| 68 |
+
1
|
| 69 |
+
for r in items
|
| 70 |
+
if str(r.get("pred_answer", "")).upper() == str(r["gt_answer"]).upper()
|
| 71 |
+
)
|
| 72 |
+
return round(c / len(items), 4)
|
| 73 |
+
|
| 74 |
+
per_duration: Dict[str, Any] = {}
|
| 75 |
+
for vt in VIDEO_TYPES:
|
| 76 |
+
subset = [r for r in results if r.get("duration") == vt]
|
| 77 |
+
if subset:
|
| 78 |
+
per_duration[vt] = {"accuracy": acc_for(subset), "count": len(subset)}
|
| 79 |
+
|
| 80 |
+
per_domain: Dict[str, Any] = {}
|
| 81 |
+
for cat in CATEGORIES:
|
| 82 |
+
subset = [r for r in results if r.get("domain") == cat]
|
| 83 |
+
if subset:
|
| 84 |
+
per_domain[cat] = {"accuracy": acc_for(subset), "count": len(subset)}
|
| 85 |
+
|
| 86 |
+
per_task: Dict[str, Any] = {}
|
| 87 |
+
for task in TASK_CATEGORIES:
|
| 88 |
+
subset = [r for r in results if r.get("task_type") == task]
|
| 89 |
+
if subset:
|
| 90 |
+
per_task[task] = {"accuracy": acc_for(subset), "count": len(subset)}
|
| 91 |
+
|
| 92 |
+
return {
|
| 93 |
+
"total_samples": total,
|
| 94 |
+
"overall_accuracy": round(overall_acc, 4),
|
| 95 |
+
"per_duration": per_duration,
|
| 96 |
+
"per_domain": per_domain,
|
| 97 |
+
"per_task_type": per_task,
|
| 98 |
+
}
|
| 99 |
+
|
| 100 |
+
|
| 101 |
+
def print_summary(metrics: Dict[str, Any], label: str) -> None:
|
| 102 |
+
print()
|
| 103 |
+
print("=" * 65)
|
| 104 |
+
print(f" Video-MME Summary: {label}")
|
| 105 |
+
print("=" * 65)
|
| 106 |
+
print(f" Total samples: {metrics['total_samples']}")
|
| 107 |
+
print(f" Overall Accuracy: {metrics['overall_accuracy']:.1%}")
|
| 108 |
+
print(" --- Per Duration ---")
|
| 109 |
+
for vt in VIDEO_TYPES:
|
| 110 |
+
if vt in metrics["per_duration"]:
|
| 111 |
+
d = metrics["per_duration"][vt]
|
| 112 |
+
print(f" {vt:8s}: {d['accuracy']:.1%} ({d['count']} questions)")
|
| 113 |
+
print(" --- Per Domain ---")
|
| 114 |
+
for cat in CATEGORIES:
|
| 115 |
+
if cat in metrics["per_domain"]:
|
| 116 |
+
d = metrics["per_domain"][cat]
|
| 117 |
+
print(f" {cat:25s}: {d['accuracy']:.1%} ({d['count']})")
|
| 118 |
+
print(" --- Per Task Type ---")
|
| 119 |
+
for task in TASK_CATEGORIES:
|
| 120 |
+
if task in metrics["per_task_type"]:
|
| 121 |
+
d = metrics["per_task_type"][task]
|
| 122 |
+
print(f" {task:25s}: {d['accuracy']:.1%} ({d['count']})")
|
| 123 |
+
print("=" * 65)
|
| 124 |
+
|
| 125 |
+
|
| 126 |
+
def main() -> None:
|
| 127 |
+
p = argparse.ArgumentParser()
|
| 128 |
+
p.add_argument(
|
| 129 |
+
"--jsonl",
|
| 130 |
+
type=Path,
|
| 131 |
+
default=Path("/home/ubuntu/eval_results/videomme/vmme_vanilla/eval_results.jsonl"),
|
| 132 |
+
)
|
| 133 |
+
p.add_argument(
|
| 134 |
+
"--out",
|
| 135 |
+
type=Path,
|
| 136 |
+
default=None,
|
| 137 |
+
help="Write metrics JSON here (default: <jsonl_dir>/metrics.json)",
|
| 138 |
+
)
|
| 139 |
+
p.add_argument("--label", type=str, default="from_jsonl")
|
| 140 |
+
p.add_argument("--base-model", type=str, default="Qwen/Qwen3-Omni-30B-A3B-Instruct")
|
| 141 |
+
p.add_argument("--adapter", type=str, default=None)
|
| 142 |
+
p.add_argument(
|
| 143 |
+
"--video-dir",
|
| 144 |
+
type=str,
|
| 145 |
+
default="/opt/dlami/nvme/videomme/data/data",
|
| 146 |
+
)
|
| 147 |
+
p.add_argument("--max-new-tokens", type=int, default=32)
|
| 148 |
+
p.add_argument("--temperature", type=float, default=0.0)
|
| 149 |
+
args = p.parse_args()
|
| 150 |
+
|
| 151 |
+
out_path = args.out or (args.jsonl.parent / "metrics.json")
|
| 152 |
+
|
| 153 |
+
results = load_results(args.jsonl)
|
| 154 |
+
metrics = compute_metrics(results)
|
| 155 |
+
if not metrics:
|
| 156 |
+
print("[warn] No rows in jsonl.")
|
| 157 |
+
return
|
| 158 |
+
|
| 159 |
+
metrics["eval_config"] = {
|
| 160 |
+
"base_model": args.base_model,
|
| 161 |
+
"adapter": args.adapter,
|
| 162 |
+
"video_dir": args.video_dir,
|
| 163 |
+
"max_new_tokens": args.max_new_tokens,
|
| 164 |
+
"temperature": args.temperature,
|
| 165 |
+
"source_jsonl": str(args.jsonl.resolve()),
|
| 166 |
+
}
|
| 167 |
+
|
| 168 |
+
out_path.parent.mkdir(parents=True, exist_ok=True)
|
| 169 |
+
with open(out_path, "w", encoding="utf-8") as f:
|
| 170 |
+
json.dump(metrics, f, indent=2, ensure_ascii=False)
|
| 171 |
+
|
| 172 |
+
print_summary(metrics, args.label)
|
| 173 |
+
print(f"\n[written] {out_path}")
|
| 174 |
+
|
| 175 |
+
|
| 176 |
+
if __name__ == "__main__":
|
| 177 |
+
main()
|
eval/download_lvbench.py
ADDED
|
@@ -0,0 +1,62 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env python3
|
| 2 |
+
"""
|
| 3 |
+
Download LVBench videos from Hugging Face (lmms-lab/LVBench).
|
| 4 |
+
|
| 5 |
+
Downloads chunked zip files and extracts them.
|
| 6 |
+
|
| 7 |
+
Usage:
|
| 8 |
+
python download_lvbench.py
|
| 9 |
+
python download_lvbench.py --output-dir /path/to/lvbench
|
| 10 |
+
"""
|
| 11 |
+
|
| 12 |
+
from __future__ import annotations
|
| 13 |
+
|
| 14 |
+
import argparse
|
| 15 |
+
import zipfile
|
| 16 |
+
from pathlib import Path
|
| 17 |
+
|
| 18 |
+
from huggingface_hub import hf_hub_download
|
| 19 |
+
|
| 20 |
+
REPO_ID = "lmms-lab/LVBench"
|
| 21 |
+
DEFAULT_OUTPUT = Path("/opt/dlami/nvme/lvbench")
|
| 22 |
+
|
| 23 |
+
VIDEO_ZIPS = [f"video_chunks/videos_chunk_{i:03d}.zip" for i in range(1, 15)]
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
def parse_args() -> argparse.Namespace:
|
| 27 |
+
p = argparse.ArgumentParser(description="Download LVBench from Hugging Face.")
|
| 28 |
+
p.add_argument("--output-dir", type=Path, default=DEFAULT_OUTPUT)
|
| 29 |
+
return p.parse_args()
|
| 30 |
+
|
| 31 |
+
|
| 32 |
+
def download_and_extract(repo_file: str, extract_to: Path, label: str) -> None:
|
| 33 |
+
extract_to.mkdir(parents=True, exist_ok=True)
|
| 34 |
+
print(f" Downloading {label}: {repo_file} ...")
|
| 35 |
+
local_zip = hf_hub_download(
|
| 36 |
+
repo_id=REPO_ID, filename=repo_file, repo_type="dataset",
|
| 37 |
+
)
|
| 38 |
+
print(f" Extracting to {extract_to} ...")
|
| 39 |
+
with zipfile.ZipFile(local_zip, "r") as zf:
|
| 40 |
+
zf.extractall(extract_to)
|
| 41 |
+
print(f" Done: {repo_file}")
|
| 42 |
+
|
| 43 |
+
|
| 44 |
+
def main() -> None:
|
| 45 |
+
args = parse_args()
|
| 46 |
+
out_dir = args.output_dir
|
| 47 |
+
|
| 48 |
+
existing = sum(1 for f in out_dir.iterdir() if f.suffix == ".mp4") if out_dir.exists() else 0
|
| 49 |
+
if existing >= 100:
|
| 50 |
+
print(f"[skip] Already have {existing} videos in {out_dir}")
|
| 51 |
+
else:
|
| 52 |
+
print(f"Downloading {len(VIDEO_ZIPS)} video chunks...")
|
| 53 |
+
for i, zf in enumerate(VIDEO_ZIPS, 1):
|
| 54 |
+
download_and_extract(zf, out_dir, f"chunk {i}/{len(VIDEO_ZIPS)}")
|
| 55 |
+
|
| 56 |
+
video_count = sum(1 for f in out_dir.iterdir() if f.suffix == ".mp4") if out_dir.exists() else 0
|
| 57 |
+
print(f"\nDone. Total videos on disk: {video_count}")
|
| 58 |
+
print(f" Videos: {out_dir}")
|
| 59 |
+
|
| 60 |
+
|
| 61 |
+
if __name__ == "__main__":
|
| 62 |
+
main()
|
eval/download_videomme.py
ADDED
|
@@ -0,0 +1,76 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env python3
|
| 2 |
+
"""
|
| 3 |
+
Download Video-MME videos from Hugging Face (lmms-lab/Video-MME).
|
| 4 |
+
|
| 5 |
+
Downloads chunked zip files and extracts them.
|
| 6 |
+
|
| 7 |
+
Usage:
|
| 8 |
+
python download_videomme.py
|
| 9 |
+
python download_videomme.py --output-dir /path/to/videomme
|
| 10 |
+
"""
|
| 11 |
+
|
| 12 |
+
from __future__ import annotations
|
| 13 |
+
|
| 14 |
+
import argparse
|
| 15 |
+
import zipfile
|
| 16 |
+
from pathlib import Path
|
| 17 |
+
|
| 18 |
+
from huggingface_hub import hf_hub_download
|
| 19 |
+
|
| 20 |
+
REPO_ID = "lmms-lab/Video-MME"
|
| 21 |
+
DEFAULT_OUTPUT = Path("/opt/dlami/nvme/videomme")
|
| 22 |
+
|
| 23 |
+
VIDEO_ZIPS = [f"videos_chunked_{i:02d}.zip" for i in range(1, 21)]
|
| 24 |
+
SUBTITLE_ZIP = "subtitle.zip"
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
def parse_args() -> argparse.Namespace:
|
| 28 |
+
p = argparse.ArgumentParser(description="Download Video-MME from Hugging Face.")
|
| 29 |
+
p.add_argument("--output-dir", type=Path, default=DEFAULT_OUTPUT)
|
| 30 |
+
p.add_argument("--skip-subtitles", action="store_true")
|
| 31 |
+
return p.parse_args()
|
| 32 |
+
|
| 33 |
+
|
| 34 |
+
def download_and_extract(repo_file: str, extract_to: Path, label: str) -> None:
|
| 35 |
+
extract_to.mkdir(parents=True, exist_ok=True)
|
| 36 |
+
print(f" Downloading {label}: {repo_file} ...")
|
| 37 |
+
local_zip = hf_hub_download(
|
| 38 |
+
repo_id=REPO_ID, filename=repo_file, repo_type="dataset",
|
| 39 |
+
)
|
| 40 |
+
print(f" Extracting to {extract_to} ...")
|
| 41 |
+
with zipfile.ZipFile(local_zip, "r") as zf:
|
| 42 |
+
zf.extractall(extract_to)
|
| 43 |
+
print(f" Done: {repo_file}")
|
| 44 |
+
|
| 45 |
+
|
| 46 |
+
def main() -> None:
|
| 47 |
+
args = parse_args()
|
| 48 |
+
data_dir = args.output_dir / "data"
|
| 49 |
+
sub_dir = args.output_dir / "subtitle"
|
| 50 |
+
|
| 51 |
+
existing = sum(1 for f in data_dir.iterdir() if f.suffix in (".mp4", ".MP4", ".mkv")) if data_dir.exists() else 0
|
| 52 |
+
if existing >= 900:
|
| 53 |
+
print(f"[skip] Already have {existing} videos in {data_dir}")
|
| 54 |
+
else:
|
| 55 |
+
print(f"[1/2] Downloading {len(VIDEO_ZIPS)} video chunks...")
|
| 56 |
+
for i, zf in enumerate(VIDEO_ZIPS, 1):
|
| 57 |
+
download_and_extract(zf, data_dir, f"chunk {i}/{len(VIDEO_ZIPS)}")
|
| 58 |
+
|
| 59 |
+
if args.skip_subtitles:
|
| 60 |
+
print("[skip] Subtitle download skipped (--skip-subtitles)")
|
| 61 |
+
else:
|
| 62 |
+
existing_subs = sum(1 for f in sub_dir.iterdir() if f.suffix == ".srt") if sub_dir.exists() else 0
|
| 63 |
+
if existing_subs >= 800:
|
| 64 |
+
print(f"[skip] Already have {existing_subs} subtitles in {sub_dir}")
|
| 65 |
+
else:
|
| 66 |
+
print("[2/2] Downloading subtitles...")
|
| 67 |
+
download_and_extract(SUBTITLE_ZIP, sub_dir, "subtitles")
|
| 68 |
+
|
| 69 |
+
video_count = sum(1 for f in data_dir.iterdir() if f.suffix in (".mp4", ".MP4", ".mkv")) if data_dir.exists() else 0
|
| 70 |
+
print(f"\nDone. Total videos on disk: {video_count}")
|
| 71 |
+
print(f" Videos: {data_dir}")
|
| 72 |
+
print(f" Subtitles: {sub_dir}")
|
| 73 |
+
|
| 74 |
+
|
| 75 |
+
if __name__ == "__main__":
|
| 76 |
+
main()
|
eval/eval_dpo_sync.py
ADDED
|
@@ -0,0 +1,841 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env python3
|
| 2 |
+
"""
|
| 3 |
+
Automated evaluation script for audio-video sync DPO models.
|
| 4 |
+
|
| 5 |
+
Workflow:
|
| 6 |
+
1. Load test set (kto_training_data_v2_test.jsonl)
|
| 7 |
+
2. Parse ground truth from video filenames (synced / delay Xs / early Xs)
|
| 8 |
+
3. Run model inference with a structured JSON prompt
|
| 9 |
+
4. Extract predictions: JSON parse -> GPT-5.4 judge (optional) -> regex fallback
|
| 10 |
+
5. Compute metrics: Sync/Desync Accuracy, Direction Accuracy, Offset MAE
|
| 11 |
+
6. Output a summary table + per-sample JSONL for analysis
|
| 12 |
+
|
| 13 |
+
Usage:
|
| 14 |
+
python eval_dpo_sync.py \
|
| 15 |
+
--base-model Rakancorle11/qwen3omni_full_sft_with_audio_for_dpo \
|
| 16 |
+
--adapter /opt/dlami/nvme/LlamaFactory/saves/Qwen3-Omni-Instruct/dpo/qwen3omni_dpo_lora_with_audio_v4_data_8632
|
| 17 |
+
|
| 18 |
+
# With GPT-5.4 judge fallback:
|
| 19 |
+
python eval_dpo_sync.py \
|
| 20 |
+
--base-model Rakancorle11/qwen3omni_full_sft_revised_thinker_key \
|
| 21 |
+
--adapter /opt/dlami/nvme/.../adapter \
|
| 22 |
+
--gpt-judge --openai-api-key sk-xxx
|
| 23 |
+
|
| 24 |
+
# Evaluate base model without adapter (SFT baseline):
|
| 25 |
+
python eval_dpo_sync.py \
|
| 26 |
+
--base-model Rakancorle11/qwen3omni_full_sft_with_audio_for_dpo
|
| 27 |
+
|
| 28 |
+
# Evaluate vanilla Qwen3-Omni (no SFT, no DPO):
|
| 29 |
+
python eval_dpo_sync.py \
|
| 30 |
+
--base-model Qwen/Qwen3-Omni-30B-A3B-Instruct
|
| 31 |
+
"""
|
| 32 |
+
|
| 33 |
+
from __future__ import annotations
|
| 34 |
+
|
| 35 |
+
import argparse
|
| 36 |
+
import gc
|
| 37 |
+
import json
|
| 38 |
+
import os
|
| 39 |
+
import re
|
| 40 |
+
import time
|
| 41 |
+
from pathlib import Path
|
| 42 |
+
from statistics import mean, median
|
| 43 |
+
from typing import Any, Dict, List, Optional, Tuple
|
| 44 |
+
|
| 45 |
+
import torch
|
| 46 |
+
from tqdm import tqdm
|
| 47 |
+
|
| 48 |
+
# ---------------------------------------------------------------------------
|
| 49 |
+
# GPT-5.4 judge helper
|
| 50 |
+
# ---------------------------------------------------------------------------
|
| 51 |
+
_openai_client = None
|
| 52 |
+
|
| 53 |
+
GPT_JUDGE_SYSTEM = """\
|
| 54 |
+
You are a structured-output extractor. The user will give you a model's free-text \
|
| 55 |
+
response about audio-video synchronization. Extract the following fields and return \
|
| 56 |
+
ONLY valid JSON (no markdown, no explanation):
|
| 57 |
+
|
| 58 |
+
{"synced": <bool>, "direction": "none"|"delay"|"early", "offset_sec": <float>, "t_v": <float or null>, "t_a": <float or null>, "explanation": "<one sentence>"}
|
| 59 |
+
|
| 60 |
+
Rules:
|
| 61 |
+
- synced: true if the model says audio and video are synchronized, false otherwise.
|
| 62 |
+
- direction: "delay" means audio comes AFTER the visual event; "early" means audio \
|
| 63 |
+
comes BEFORE the visual event; "none" if synced is true.
|
| 64 |
+
- offset_sec: estimated time gap in seconds. 0.0 if synced.
|
| 65 |
+
- t_v: the timestamp (in seconds) the model attributes to the VISUAL event. null if not mentioned.
|
| 66 |
+
- t_a: the timestamp (in seconds) the model attributes to the AUDIO event. null if not mentioned.
|
| 67 |
+
- If you cannot determine a field, use the default (true / "none" / 0.0 / null / "").
|
| 68 |
+
"""
|
| 69 |
+
|
| 70 |
+
|
| 71 |
+
def _get_openai_client(api_key: Optional[str] = None):
|
| 72 |
+
global _openai_client
|
| 73 |
+
if _openai_client is not None:
|
| 74 |
+
return _openai_client
|
| 75 |
+
key = api_key or os.environ.get("OPENAI_API_KEY")
|
| 76 |
+
if not key:
|
| 77 |
+
return None
|
| 78 |
+
from openai import OpenAI
|
| 79 |
+
_openai_client = OpenAI(api_key=key)
|
| 80 |
+
return _openai_client
|
| 81 |
+
|
| 82 |
+
|
| 83 |
+
def gpt_extract_prediction(
|
| 84 |
+
raw_output: str,
|
| 85 |
+
api_key: Optional[str] = None,
|
| 86 |
+
model: str = "gpt-5.4",
|
| 87 |
+
) -> Optional[Dict[str, Any]]:
|
| 88 |
+
"""Call GPT to parse raw model output into structured prediction."""
|
| 89 |
+
client = _get_openai_client(api_key)
|
| 90 |
+
if client is None:
|
| 91 |
+
return None
|
| 92 |
+
try:
|
| 93 |
+
resp = client.chat.completions.create(
|
| 94 |
+
model=model,
|
| 95 |
+
messages=[
|
| 96 |
+
{"role": "system", "content": GPT_JUDGE_SYSTEM},
|
| 97 |
+
{"role": "user", "content": raw_output},
|
| 98 |
+
],
|
| 99 |
+
temperature=0.0,
|
| 100 |
+
max_completion_tokens=200,
|
| 101 |
+
)
|
| 102 |
+
text = resp.choices[0].message.content.strip()
|
| 103 |
+
for pat in [
|
| 104 |
+
re.compile(r"```(?:json)?\s*(\{.*?\})\s*```", re.DOTALL),
|
| 105 |
+
re.compile(r"(\{.*?\})", re.DOTALL),
|
| 106 |
+
]:
|
| 107 |
+
m = pat.search(text)
|
| 108 |
+
if m:
|
| 109 |
+
obj = json.loads(m.group(1))
|
| 110 |
+
synced = obj.get("synced")
|
| 111 |
+
if isinstance(synced, str):
|
| 112 |
+
synced = synced.lower() in ("true", "yes", "1")
|
| 113 |
+
direction = str(obj.get("direction", "none")).lower().strip()
|
| 114 |
+
if direction not in ("delay", "early", "none"):
|
| 115 |
+
direction = "none"
|
| 116 |
+
t_v_raw = obj.get("t_v")
|
| 117 |
+
t_a_raw = obj.get("t_a")
|
| 118 |
+
pred_t_v = float(t_v_raw) if t_v_raw is not None else None
|
| 119 |
+
pred_t_a = float(t_a_raw) if t_a_raw is not None else None
|
| 120 |
+
return {
|
| 121 |
+
"pred_synced": bool(synced),
|
| 122 |
+
"pred_direction": direction,
|
| 123 |
+
"pred_offset_sec": float(obj.get("offset_sec", 0.0)),
|
| 124 |
+
"pred_t_v": pred_t_v,
|
| 125 |
+
"pred_t_a": pred_t_a,
|
| 126 |
+
"pred_explanation": str(obj.get("explanation", "")),
|
| 127 |
+
"parse_method": "gpt_judge",
|
| 128 |
+
}
|
| 129 |
+
except Exception as exc:
|
| 130 |
+
print(f" [gpt-judge] API error: {exc}", flush=True)
|
| 131 |
+
return None
|
| 132 |
+
|
| 133 |
+
DATA_ROOT = Path("/opt/dlami/nvme/video_source")
|
| 134 |
+
ORIGINAL_ROOT = DATA_ROOT / "original"
|
| 135 |
+
AUDIO_ROOT = DATA_ROOT / "extracted_audio" / "original"
|
| 136 |
+
|
| 137 |
+
|
| 138 |
+
def set_data_root(root: Path) -> None:
|
| 139 |
+
"""Point path resolution at a new server layout (original/, random_shift_video/, extracted_audio/)."""
|
| 140 |
+
global DATA_ROOT, ORIGINAL_ROOT, AUDIO_ROOT
|
| 141 |
+
DATA_ROOT = root.resolve()
|
| 142 |
+
ORIGINAL_ROOT = DATA_ROOT / "original"
|
| 143 |
+
AUDIO_ROOT = DATA_ROOT / "extracted_audio" / "original"
|
| 144 |
+
|
| 145 |
+
EVAL_PROMPT = """\
|
| 146 |
+
Watch this video and listen to its audio carefully. \
|
| 147 |
+
Determine whether the audio and video tracks are synchronized. \
|
| 148 |
+
If they are not synchronized, identify the direction of the offset \
|
| 149 |
+
(audio delayed or audio early relative to video) and estimate the offset in seconds. \
|
| 150 |
+
Explain your reasoning."""
|
| 151 |
+
|
| 152 |
+
|
| 153 |
+
def parse_args() -> argparse.Namespace:
|
| 154 |
+
p = argparse.ArgumentParser(description="Evaluate DPO sync model on test set.")
|
| 155 |
+
p.add_argument("--base-model", type=str,
|
| 156 |
+
default="Rakancorle11/qwen3omni_full_sft_with_audio_for_dpo")
|
| 157 |
+
p.add_argument("--adapter", type=str, default=None,
|
| 158 |
+
help="Path to LoRA adapter. If omitted, evaluates base model only.")
|
| 159 |
+
p.add_argument(
|
| 160 |
+
"--data-root",
|
| 161 |
+
type=Path,
|
| 162 |
+
default=Path("/opt/dlami/nvme/video_source"),
|
| 163 |
+
help="Directory containing original/, random_shift_video/, extracted_audio/ (video+audio for sync eval).",
|
| 164 |
+
)
|
| 165 |
+
p.add_argument(
|
| 166 |
+
"--test-jsonl",
|
| 167 |
+
type=Path,
|
| 168 |
+
default=None,
|
| 169 |
+
help="Test JSONL (default: <data-root>/kto_training_data_v2_test.jsonl).",
|
| 170 |
+
)
|
| 171 |
+
p.add_argument(
|
| 172 |
+
"--output-dir",
|
| 173 |
+
type=Path,
|
| 174 |
+
default=None,
|
| 175 |
+
help="Eval output root (default: /home/ubuntu/eval_results/sync).",
|
| 176 |
+
)
|
| 177 |
+
p.add_argument("--max-samples", type=int, default=-1,
|
| 178 |
+
help="Limit number of test samples (-1 = all).")
|
| 179 |
+
p.add_argument("--max-new-tokens", type=int, default=256)
|
| 180 |
+
p.add_argument("--temperature", type=float, default=0.0)
|
| 181 |
+
p.add_argument("--batch-size", type=int, default=1)
|
| 182 |
+
p.add_argument("--label", type=str, default=None,
|
| 183 |
+
help="Label for this eval run (used in output filenames).")
|
| 184 |
+
p.add_argument("--gpt-judge", action="store_true", default=False,
|
| 185 |
+
help="Use GPT-5.4 as fallback parser when JSON extraction fails.")
|
| 186 |
+
p.add_argument("--openai-api-key", type=str, default=None,
|
| 187 |
+
help="OpenAI API key. Falls back to OPENAI_API_KEY env var.")
|
| 188 |
+
p.add_argument("--gpt-model", type=str, default="gpt-5.4",
|
| 189 |
+
help="OpenAI model to use for GPT judge (default: gpt-5.4).")
|
| 190 |
+
return p.parse_args()
|
| 191 |
+
|
| 192 |
+
|
| 193 |
+
def parse_ground_truth(video_field: str) -> Dict[str, Any]:
|
| 194 |
+
"""Extract ground truth sync label from video filename."""
|
| 195 |
+
m_delay = re.search(r"_delay_([\d.]+)s\.mp4", video_field)
|
| 196 |
+
m_early = re.search(r"_early_([\d.]+)s\.mp4", video_field)
|
| 197 |
+
if m_delay:
|
| 198 |
+
return {"synced": False, "direction": "delay", "offset_sec": float(m_delay.group(1))}
|
| 199 |
+
elif m_early:
|
| 200 |
+
return {"synced": False, "direction": "early", "offset_sec": float(m_early.group(1))}
|
| 201 |
+
else:
|
| 202 |
+
return {"synced": True, "direction": "none", "offset_sec": 0.0}
|
| 203 |
+
|
| 204 |
+
|
| 205 |
+
def resolve_video_path(video_field: str) -> str:
|
| 206 |
+
if os.path.isabs(video_field) and os.path.exists(video_field):
|
| 207 |
+
return video_field
|
| 208 |
+
candidate_dirs = [
|
| 209 |
+
ORIGINAL_ROOT / "uag_oops",
|
| 210 |
+
DATA_ROOT / "random_shift_video" / "delay",
|
| 211 |
+
DATA_ROOT / "random_shift_video" / "early",
|
| 212 |
+
ORIGINAL_ROOT,
|
| 213 |
+
]
|
| 214 |
+
for d in candidate_dirs:
|
| 215 |
+
c = d / video_field
|
| 216 |
+
if c.exists():
|
| 217 |
+
return str(c)
|
| 218 |
+
return str(ORIGINAL_ROOT / "uag_oops" / video_field)
|
| 219 |
+
|
| 220 |
+
|
| 221 |
+
def resolve_audio_path(video_path: str) -> str:
|
| 222 |
+
video_p = Path(video_path)
|
| 223 |
+
try:
|
| 224 |
+
rel = video_p.relative_to(DATA_ROOT)
|
| 225 |
+
except ValueError:
|
| 226 |
+
rel = Path(video_p.name)
|
| 227 |
+
audio_path = DATA_ROOT / "extracted_audio" / rel.with_suffix(".wav")
|
| 228 |
+
if audio_path.exists():
|
| 229 |
+
return str(audio_path)
|
| 230 |
+
base_stem = re.sub(r"_(delay|early)_[\d.]+s$", "", video_p.stem)
|
| 231 |
+
fallback = DATA_ROOT / "extracted_audio" / "original" / "uag_oops" / (base_stem + ".wav")
|
| 232 |
+
if fallback.exists():
|
| 233 |
+
return str(fallback)
|
| 234 |
+
return str(audio_path)
|
| 235 |
+
|
| 236 |
+
|
| 237 |
+
def extract_timestamps(text: str) -> Tuple[Optional[float], Optional[float]]:
|
| 238 |
+
"""Extract (t_visual, t_audio) from text using 'at/around/about Xs' pattern.
|
| 239 |
+
|
| 240 |
+
If >= 2 timestamps found, first is t_v, second is t_a.
|
| 241 |
+
If exactly 1 timestamp found (common for synced samples), treat as both t_v and t_a.
|
| 242 |
+
"""
|
| 243 |
+
text_lower = text.lower()
|
| 244 |
+
all_times = [(m.start(), float(m.group(1)))
|
| 245 |
+
for m in re.finditer(r"(?:at|around|about)\s+([\d]+\.?\d*)\s*s", text_lower)]
|
| 246 |
+
if len(all_times) >= 2:
|
| 247 |
+
return (all_times[0][1], all_times[1][1])
|
| 248 |
+
if len(all_times) == 1:
|
| 249 |
+
return (all_times[0][1], all_times[0][1])
|
| 250 |
+
return (None, None)
|
| 251 |
+
|
| 252 |
+
|
| 253 |
+
def load_test_data(path: Path, max_samples: int) -> List[Dict[str, Any]]:
|
| 254 |
+
data = []
|
| 255 |
+
with open(path) as f:
|
| 256 |
+
for line in f:
|
| 257 |
+
line = line.strip()
|
| 258 |
+
if not line:
|
| 259 |
+
continue
|
| 260 |
+
obj = json.loads(line)
|
| 261 |
+
video_path = resolve_video_path(obj["video"])
|
| 262 |
+
audio_path = resolve_audio_path(video_path)
|
| 263 |
+
gt = parse_ground_truth(obj["video"])
|
| 264 |
+
gt_t_v, gt_t_a = extract_timestamps(obj.get("chosen", ""))
|
| 265 |
+
data.append({
|
| 266 |
+
"video": obj["video"],
|
| 267 |
+
"video_path": video_path,
|
| 268 |
+
"audio_path": audio_path,
|
| 269 |
+
"prompt": obj["prompt"],
|
| 270 |
+
"chosen": obj["chosen"],
|
| 271 |
+
"rejected": obj["rejected"],
|
| 272 |
+
"gt_synced": gt["synced"],
|
| 273 |
+
"gt_direction": gt["direction"],
|
| 274 |
+
"gt_offset_sec": gt["offset_sec"],
|
| 275 |
+
"gt_t_v": gt_t_v,
|
| 276 |
+
"gt_t_a": gt_t_a,
|
| 277 |
+
})
|
| 278 |
+
if max_samples > 0:
|
| 279 |
+
data = data[:max_samples]
|
| 280 |
+
return data
|
| 281 |
+
|
| 282 |
+
|
| 283 |
+
def extract_prediction(text: str) -> Dict[str, Any]:
|
| 284 |
+
"""Extract structured prediction from model output text."""
|
| 285 |
+
text = text.strip()
|
| 286 |
+
|
| 287 |
+
# Try JSON extraction
|
| 288 |
+
for pattern in [
|
| 289 |
+
re.compile(r"```(?:json)?\s*(\{.*?\})\s*```", re.DOTALL),
|
| 290 |
+
re.compile(r"(\{[^{}]*\"synced\"[^{}]*\})", re.DOTALL),
|
| 291 |
+
re.compile(r"(\{.*?\})", re.DOTALL),
|
| 292 |
+
]:
|
| 293 |
+
m = pattern.search(text)
|
| 294 |
+
if m:
|
| 295 |
+
try:
|
| 296 |
+
obj = json.loads(m.group(1))
|
| 297 |
+
synced = obj.get("synced")
|
| 298 |
+
if isinstance(synced, str):
|
| 299 |
+
synced = synced.lower() in ("true", "yes", "1")
|
| 300 |
+
direction = str(obj.get("direction", "none")).lower().strip()
|
| 301 |
+
if direction not in ("delay", "early", "none"):
|
| 302 |
+
direction = "none"
|
| 303 |
+
offset = float(obj.get("offset_sec", 0.0))
|
| 304 |
+
explanation = str(obj.get("explanation", ""))
|
| 305 |
+
t_v_raw = obj.get("t_v")
|
| 306 |
+
t_a_raw = obj.get("t_a")
|
| 307 |
+
return {
|
| 308 |
+
"pred_synced": bool(synced),
|
| 309 |
+
"pred_direction": direction,
|
| 310 |
+
"pred_offset_sec": offset,
|
| 311 |
+
"pred_t_v": float(t_v_raw) if t_v_raw is not None else None,
|
| 312 |
+
"pred_t_a": float(t_a_raw) if t_a_raw is not None else None,
|
| 313 |
+
"pred_explanation": explanation,
|
| 314 |
+
"parse_method": "json",
|
| 315 |
+
}
|
| 316 |
+
except (json.JSONDecodeError, ValueError, TypeError):
|
| 317 |
+
continue
|
| 318 |
+
|
| 319 |
+
# Fallback: structured extraction from free text
|
| 320 |
+
text_lower = text.lower()
|
| 321 |
+
synced = None
|
| 322 |
+
direction = "none"
|
| 323 |
+
offset = 0.0
|
| 324 |
+
|
| 325 |
+
pred_t_v, pred_t_a = extract_timestamps(text)
|
| 326 |
+
|
| 327 |
+
# Step 1: Determine sync vs desync
|
| 328 |
+
desync_kws = [
|
| 329 |
+
"not synchronized", "not aligned", "desync", "mismatch", "misalign",
|
| 330 |
+
"not in sync", "out of sync", "clearly not", "not well aligned",
|
| 331 |
+
"are not aligned", "audio and visual event are clearly not",
|
| 332 |
+
]
|
| 333 |
+
sync_kws = [
|
| 334 |
+
"synchronized", "well aligned", "well-aligned", "in sync",
|
| 335 |
+
"appear synchronized", "appears synchronized", "closely aligned",
|
| 336 |
+
"audio and video are aligned", "matches closely",
|
| 337 |
+
]
|
| 338 |
+
if any(kw in text_lower for kw in desync_kws):
|
| 339 |
+
synced = False
|
| 340 |
+
elif any(kw in text_lower for kw in sync_kws):
|
| 341 |
+
synced = True
|
| 342 |
+
|
| 343 |
+
if synced is False:
|
| 344 |
+
# Step 2a: Try explicit direction keywords
|
| 345 |
+
delay_kws = ["audio delayed", "audio lags", "audio comes after", "sound comes after",
|
| 346 |
+
"sound is heard later", "audio is delayed", "sound follows"]
|
| 347 |
+
early_kws = ["audio early", "audio leads", "audio comes before", "sound comes before",
|
| 348 |
+
"audio precedes", "sound is heard before", "sound precedes", "audio is early"]
|
| 349 |
+
if any(kw in text_lower for kw in delay_kws):
|
| 350 |
+
direction = "delay"
|
| 351 |
+
elif any(kw in text_lower for kw in early_kws):
|
| 352 |
+
direction = "early"
|
| 353 |
+
|
| 354 |
+
# Step 2b: If no explicit keyword, use extracted timestamps to infer direction
|
| 355 |
+
if direction == "none" and pred_t_v is not None and pred_t_a is not None and pred_t_v != pred_t_a:
|
| 356 |
+
if pred_t_a > pred_t_v:
|
| 357 |
+
direction = "delay"
|
| 358 |
+
else:
|
| 359 |
+
direction = "early"
|
| 360 |
+
offset = abs(pred_t_a - pred_t_v)
|
| 361 |
+
|
| 362 |
+
# Step 2c: Extract offset from gap/separation mentions if not yet found
|
| 363 |
+
if offset == 0.0:
|
| 364 |
+
offset_match = re.search(
|
| 365 |
+
r"(?:gap|separation|offset|mismatch|differ\w*)\s*(?:of\s+)?(?:about\s+|roughly\s+|approximately\s+)?"
|
| 366 |
+
r"([\d]+\.?\d*)\s*s",
|
| 367 |
+
text_lower,
|
| 368 |
+
)
|
| 369 |
+
if not offset_match:
|
| 370 |
+
offset_match = re.search(
|
| 371 |
+
r"(?:about\s+|roughly\s+|approximately\s+)?([\d]+\.?\d*)\s*s\s*"
|
| 372 |
+
r"(?:gap|separation|offset|mismatch|differ)",
|
| 373 |
+
text_lower,
|
| 374 |
+
)
|
| 375 |
+
if offset_match:
|
| 376 |
+
offset = float(offset_match.group(1))
|
| 377 |
+
|
| 378 |
+
if synced is None:
|
| 379 |
+
synced = True
|
| 380 |
+
|
| 381 |
+
return {
|
| 382 |
+
"pred_synced": synced,
|
| 383 |
+
"pred_direction": direction,
|
| 384 |
+
"pred_offset_sec": offset,
|
| 385 |
+
"pred_t_v": pred_t_v,
|
| 386 |
+
"pred_t_a": pred_t_a,
|
| 387 |
+
"pred_explanation": "",
|
| 388 |
+
"parse_method": "regex_fallback",
|
| 389 |
+
}
|
| 390 |
+
|
| 391 |
+
|
| 392 |
+
def load_model(base_model: str, adapter: Optional[str]):
|
| 393 |
+
import json as _json
|
| 394 |
+
from transformers import (
|
| 395 |
+
AutoConfig,
|
| 396 |
+
AutoProcessor,
|
| 397 |
+
Qwen3OmniMoeForConditionalGeneration,
|
| 398 |
+
Qwen3OmniMoeThinkerConfig,
|
| 399 |
+
Qwen3OmniMoeThinkerForConditionalGeneration,
|
| 400 |
+
)
|
| 401 |
+
|
| 402 |
+
print(f"[load] Base model: {base_model}")
|
| 403 |
+
processor = AutoProcessor.from_pretrained(base_model, trust_remote_code=True)
|
| 404 |
+
|
| 405 |
+
model_path = Path(base_model)
|
| 406 |
+
cfg_path = model_path / "config.json" if model_path.exists() else None
|
| 407 |
+
|
| 408 |
+
model_type = None
|
| 409 |
+
if cfg_path and cfg_path.exists():
|
| 410 |
+
with open(cfg_path, "r") as f:
|
| 411 |
+
model_type = _json.load(f).get("model_type")
|
| 412 |
+
|
| 413 |
+
if not model_type:
|
| 414 |
+
try:
|
| 415 |
+
from huggingface_hub import hf_hub_download
|
| 416 |
+
cached = hf_hub_download(base_model, "config.json")
|
| 417 |
+
with open(cached, "r") as f:
|
| 418 |
+
model_type = _json.load(f).get("model_type")
|
| 419 |
+
except Exception:
|
| 420 |
+
pass
|
| 421 |
+
|
| 422 |
+
print(f"[load] Detected model_type: {model_type}")
|
| 423 |
+
|
| 424 |
+
if model_type == "qwen3_omni_moe_thinker":
|
| 425 |
+
config = Qwen3OmniMoeThinkerConfig.from_pretrained(base_model)
|
| 426 |
+
model = Qwen3OmniMoeThinkerForConditionalGeneration.from_pretrained(
|
| 427 |
+
base_model, config=config, torch_dtype=torch.bfloat16, device_map="auto",
|
| 428 |
+
)
|
| 429 |
+
else:
|
| 430 |
+
config = AutoConfig.from_pretrained(base_model, trust_remote_code=True)
|
| 431 |
+
model = Qwen3OmniMoeForConditionalGeneration.from_pretrained(
|
| 432 |
+
base_model, config=config, torch_dtype=torch.bfloat16, device_map="auto",
|
| 433 |
+
)
|
| 434 |
+
|
| 435 |
+
if adapter:
|
| 436 |
+
from peft import PeftModel
|
| 437 |
+
from safetensors.torch import load_file, save_file
|
| 438 |
+
import tempfile, shutil
|
| 439 |
+
|
| 440 |
+
print(f"[load] Loading LoRA adapter: {adapter}")
|
| 441 |
+
|
| 442 |
+
adapter_cfg_path = Path(adapter) / "adapter_config.json"
|
| 443 |
+
with open(adapter_cfg_path) as f:
|
| 444 |
+
adapter_cfg = json.loads(f.read())
|
| 445 |
+
target_modules = adapter_cfg.get("target_modules", [])
|
| 446 |
+
needs_remap = any(t.startswith("model.layers.") for t in target_modules) and model_type != "qwen3_omni_moe_thinker"
|
| 447 |
+
|
| 448 |
+
if needs_remap:
|
| 449 |
+
print("[load] Adapter was trained on thinker-only model; remapping keys to full model...")
|
| 450 |
+
tmp_dir = Path(tempfile.mkdtemp(prefix="adapter_remap_"))
|
| 451 |
+
for fn in Path(adapter).iterdir():
|
| 452 |
+
if fn.is_dir():
|
| 453 |
+
continue
|
| 454 |
+
if fn.name == "adapter_config.json":
|
| 455 |
+
new_targets = []
|
| 456 |
+
for t in target_modules:
|
| 457 |
+
if t.startswith("model.layers."):
|
| 458 |
+
new_targets.append("thinker." + t)
|
| 459 |
+
elif t[0].isdigit():
|
| 460 |
+
new_targets.append("thinker.model.layers." + t)
|
| 461 |
+
else:
|
| 462 |
+
new_targets.append(t)
|
| 463 |
+
adapter_cfg["target_modules"] = new_targets
|
| 464 |
+
with open(tmp_dir / "adapter_config.json", "w") as f:
|
| 465 |
+
json.dump(adapter_cfg, f, indent=2)
|
| 466 |
+
elif fn.suffix == ".safetensors" and "adapter" in fn.name:
|
| 467 |
+
tensors = load_file(str(fn))
|
| 468 |
+
remapped = {}
|
| 469 |
+
for k, v in tensors.items():
|
| 470 |
+
if ".model.layers." in k and ".thinker." not in k:
|
| 471 |
+
new_k = k.replace("base_model.model.model.layers.", "base_model.model.thinker.model.layers.")
|
| 472 |
+
remapped[new_k] = v
|
| 473 |
+
else:
|
| 474 |
+
remapped[k] = v
|
| 475 |
+
save_file(remapped, str(tmp_dir / fn.name))
|
| 476 |
+
else:
|
| 477 |
+
shutil.copy2(str(fn), str(tmp_dir / fn.name))
|
| 478 |
+
adapter = str(tmp_dir)
|
| 479 |
+
|
| 480 |
+
model = PeftModel.from_pretrained(model, adapter)
|
| 481 |
+
model = model.merge_and_unload()
|
| 482 |
+
|
| 483 |
+
if hasattr(model, "disable_talker"):
|
| 484 |
+
model.disable_talker()
|
| 485 |
+
model.eval()
|
| 486 |
+
print(f"[load] Model ready ({type(model).__name__}). Device: {model.device}")
|
| 487 |
+
return model, processor
|
| 488 |
+
|
| 489 |
+
|
| 490 |
+
def run_inference(model, processor, video_path: str, audio_path: str,
|
| 491 |
+
max_new_tokens: int, temperature: float) -> str:
|
| 492 |
+
from qwen_omni_utils import process_mm_info
|
| 493 |
+
import tempfile
|
| 494 |
+
|
| 495 |
+
tmp_dir = tempfile.mkdtemp(prefix="eval_masked_")
|
| 496 |
+
masked_video = os.path.join(tmp_dir, "clip.mp4")
|
| 497 |
+
masked_audio = os.path.join(tmp_dir, "clip.wav")
|
| 498 |
+
os.symlink(os.path.abspath(video_path), masked_video)
|
| 499 |
+
os.symlink(os.path.abspath(audio_path), masked_audio)
|
| 500 |
+
|
| 501 |
+
conversation = [
|
| 502 |
+
{
|
| 503 |
+
"role": "user",
|
| 504 |
+
"content": [
|
| 505 |
+
{"type": "video", "video": masked_video},
|
| 506 |
+
{"type": "audio", "audio": masked_audio},
|
| 507 |
+
{"type": "text", "text": EVAL_PROMPT},
|
| 508 |
+
],
|
| 509 |
+
}
|
| 510 |
+
]
|
| 511 |
+
|
| 512 |
+
text = processor.apply_chat_template(
|
| 513 |
+
conversation, add_generation_prompt=True, tokenize=False,
|
| 514 |
+
)
|
| 515 |
+
audios, images, videos = process_mm_info(conversation, use_audio_in_video=False)
|
| 516 |
+
inputs = processor(
|
| 517 |
+
text=text, audio=audios, images=images, videos=videos,
|
| 518 |
+
return_tensors="pt", padding=True, use_audio_in_video=False,
|
| 519 |
+
)
|
| 520 |
+
|
| 521 |
+
model_dtype = next(model.parameters()).dtype
|
| 522 |
+
converted = {}
|
| 523 |
+
for k, v in inputs.items():
|
| 524 |
+
if hasattr(v, "to"):
|
| 525 |
+
v = v.to(model.device)
|
| 526 |
+
if torch.is_floating_point(v):
|
| 527 |
+
v = v.to(model_dtype)
|
| 528 |
+
converted[k] = v
|
| 529 |
+
inputs = converted
|
| 530 |
+
|
| 531 |
+
from transformers import Qwen3OmniMoeThinkerForConditionalGeneration
|
| 532 |
+
is_thinker = isinstance(model, Qwen3OmniMoeThinkerForConditionalGeneration)
|
| 533 |
+
if is_thinker:
|
| 534 |
+
gen_kwargs = {"max_new_tokens": max_new_tokens, "do_sample": temperature > 0}
|
| 535 |
+
else:
|
| 536 |
+
gen_kwargs = {
|
| 537 |
+
"thinker_max_new_tokens": max_new_tokens,
|
| 538 |
+
"use_audio_in_video": False,
|
| 539 |
+
"return_audio": False,
|
| 540 |
+
"do_sample": temperature > 0,
|
| 541 |
+
}
|
| 542 |
+
if temperature > 0:
|
| 543 |
+
gen_kwargs["temperature"] = temperature
|
| 544 |
+
gen_kwargs["top_p"] = 0.9
|
| 545 |
+
|
| 546 |
+
with torch.inference_mode():
|
| 547 |
+
output_ids = model.generate(**inputs, **gen_kwargs)
|
| 548 |
+
|
| 549 |
+
prompt_len = inputs["input_ids"].shape[1]
|
| 550 |
+
response = processor.batch_decode(
|
| 551 |
+
output_ids[:, prompt_len:], skip_special_tokens=True,
|
| 552 |
+
)[0].strip()
|
| 553 |
+
|
| 554 |
+
import shutil
|
| 555 |
+
shutil.rmtree(tmp_dir, ignore_errors=True)
|
| 556 |
+
|
| 557 |
+
return response
|
| 558 |
+
|
| 559 |
+
|
| 560 |
+
def compute_metrics(results: List[Dict[str, Any]]) -> Dict[str, Any]:
|
| 561 |
+
"""Compute all evaluation metrics."""
|
| 562 |
+
total = len(results)
|
| 563 |
+
if total == 0:
|
| 564 |
+
return {}
|
| 565 |
+
|
| 566 |
+
# 1. Sync/Desync binary accuracy
|
| 567 |
+
sync_correct = sum(1 for r in results if r["pred_synced"] == r["gt_synced"])
|
| 568 |
+
sync_acc = sync_correct / total
|
| 569 |
+
|
| 570 |
+
# 2. Direction accuracy (on desync samples only)
|
| 571 |
+
desync_samples = [r for r in results if not r["gt_synced"]]
|
| 572 |
+
if desync_samples:
|
| 573 |
+
dir_correct = sum(1 for r in desync_samples if r["pred_direction"] == r["gt_direction"])
|
| 574 |
+
dir_acc = dir_correct / len(desync_samples)
|
| 575 |
+
else:
|
| 576 |
+
dir_acc = None
|
| 577 |
+
|
| 578 |
+
# 3. Full 3-class accuracy (synced / delay / early)
|
| 579 |
+
def label(r, prefix):
|
| 580 |
+
if r[f"{prefix}synced"]:
|
| 581 |
+
return "synced"
|
| 582 |
+
return r[f"{prefix}direction"]
|
| 583 |
+
three_class_correct = sum(1 for r in results if label(r, "pred_") == label(r, "gt_"))
|
| 584 |
+
three_class_acc = three_class_correct / total
|
| 585 |
+
|
| 586 |
+
# 4. Offset MAE (on desync samples where model also predicts desync)
|
| 587 |
+
offset_errors = []
|
| 588 |
+
for r in desync_samples:
|
| 589 |
+
if not r["pred_synced"] and r["pred_offset_sec"] > 0:
|
| 590 |
+
offset_errors.append(abs(r["pred_offset_sec"] - r["gt_offset_sec"]))
|
| 591 |
+
offset_mae = mean(offset_errors) if offset_errors else None
|
| 592 |
+
offset_median = median(offset_errors) if offset_errors else None
|
| 593 |
+
|
| 594 |
+
# 5. Per-category breakdown
|
| 595 |
+
synced_samples = [r for r in results if r["gt_synced"]]
|
| 596 |
+
delay_samples = [r for r in results if r["gt_direction"] == "delay"]
|
| 597 |
+
early_samples = [r for r in results if r["gt_direction"] == "early"]
|
| 598 |
+
|
| 599 |
+
synced_acc = (sum(1 for r in synced_samples if r["pred_synced"]) / len(synced_samples)) if synced_samples else None
|
| 600 |
+
delay_acc = (sum(1 for r in delay_samples if not r["pred_synced"] and r["pred_direction"] == "delay") / len(delay_samples)) if delay_samples else None
|
| 601 |
+
early_acc = (sum(1 for r in early_samples if not r["pred_synced"] and r["pred_direction"] == "early") / len(early_samples)) if early_samples else None
|
| 602 |
+
|
| 603 |
+
# 6. Tolerance-based offset accuracy
|
| 604 |
+
within_05 = sum(1 for e in offset_errors if e <= 0.5) if offset_errors else 0
|
| 605 |
+
within_10 = sum(1 for e in offset_errors if e <= 1.0) if offset_errors else 0
|
| 606 |
+
|
| 607 |
+
# 7. Parse method stats
|
| 608 |
+
json_parsed = sum(1 for r in results if r.get("parse_method") == "json")
|
| 609 |
+
regex_parsed = sum(1 for r in results if r.get("parse_method") == "regex_fallback")
|
| 610 |
+
gpt_parsed = sum(1 for r in results if r.get("parse_method") == "gpt_judge")
|
| 611 |
+
|
| 612 |
+
# 8. Timestamp MAE (t_v and t_a separately)
|
| 613 |
+
tv_errors = []
|
| 614 |
+
ta_errors = []
|
| 615 |
+
for r in results:
|
| 616 |
+
gt_tv = r.get("gt_t_v")
|
| 617 |
+
gt_ta = r.get("gt_t_a")
|
| 618 |
+
pred_tv = r.get("pred_t_v")
|
| 619 |
+
pred_ta = r.get("pred_t_a")
|
| 620 |
+
if gt_tv is not None and pred_tv is not None:
|
| 621 |
+
tv_errors.append(abs(pred_tv - gt_tv))
|
| 622 |
+
if gt_ta is not None and pred_ta is not None:
|
| 623 |
+
ta_errors.append(abs(pred_ta - gt_ta))
|
| 624 |
+
tv_mae = round(mean(tv_errors), 4) if tv_errors else None
|
| 625 |
+
ta_mae = round(mean(ta_errors), 4) if ta_errors else None
|
| 626 |
+
tv_median = round(median(tv_errors), 4) if tv_errors else None
|
| 627 |
+
ta_median = round(median(ta_errors), 4) if ta_errors else None
|
| 628 |
+
|
| 629 |
+
return {
|
| 630 |
+
"total_samples": total,
|
| 631 |
+
"sync_desync_accuracy": round(sync_acc, 4),
|
| 632 |
+
"three_class_accuracy": round(three_class_acc, 4),
|
| 633 |
+
"direction_accuracy_on_desync": round(dir_acc, 4) if dir_acc is not None else None,
|
| 634 |
+
"per_category": {
|
| 635 |
+
"synced_accuracy": round(synced_acc, 4) if synced_acc is not None else None,
|
| 636 |
+
"delay_accuracy": round(delay_acc, 4) if delay_acc is not None else None,
|
| 637 |
+
"early_accuracy": round(early_acc, 4) if early_acc is not None else None,
|
| 638 |
+
"synced_count": len(synced_samples),
|
| 639 |
+
"delay_count": len(delay_samples),
|
| 640 |
+
"early_count": len(early_samples),
|
| 641 |
+
},
|
| 642 |
+
"offset_mae_sec": round(offset_mae, 4) if offset_mae is not None else None,
|
| 643 |
+
"offset_median_sec": round(offset_median, 4) if offset_median is not None else None,
|
| 644 |
+
"offset_within_0.5s": within_05,
|
| 645 |
+
"offset_within_1.0s": within_10,
|
| 646 |
+
"offset_evaluated_count": len(offset_errors),
|
| 647 |
+
"timestamp_tv_mae_sec": tv_mae,
|
| 648 |
+
"timestamp_ta_mae_sec": ta_mae,
|
| 649 |
+
"timestamp_tv_median_sec": tv_median,
|
| 650 |
+
"timestamp_ta_median_sec": ta_median,
|
| 651 |
+
"timestamp_evaluated_tv": len(tv_errors),
|
| 652 |
+
"timestamp_evaluated_ta": len(ta_errors),
|
| 653 |
+
"parse_stats": {"json": json_parsed, "regex_fallback": regex_parsed, "gpt_judge": gpt_parsed},
|
| 654 |
+
}
|
| 655 |
+
|
| 656 |
+
|
| 657 |
+
def print_summary(metrics: Dict[str, Any], label: str) -> None:
|
| 658 |
+
print()
|
| 659 |
+
print(f"{'=' * 60}")
|
| 660 |
+
print(f" Eval Summary: {label}")
|
| 661 |
+
print(f"{'=' * 60}")
|
| 662 |
+
print(f" Total samples: {metrics['total_samples']}")
|
| 663 |
+
print(f" Sync/Desync Accuracy: {metrics['sync_desync_accuracy']:.1%}")
|
| 664 |
+
print(f" 3-Class Accuracy: {metrics['three_class_accuracy']:.1%}")
|
| 665 |
+
if metrics["direction_accuracy_on_desync"] is not None:
|
| 666 |
+
print(f" Direction Acc (desync): {metrics['direction_accuracy_on_desync']:.1%}")
|
| 667 |
+
print(f" ─── Per Category ───")
|
| 668 |
+
pc = metrics["per_category"]
|
| 669 |
+
if pc["synced_accuracy"] is not None:
|
| 670 |
+
print(f" Synced correct: {pc['synced_accuracy']:.1%} ({pc['synced_count']} samples)")
|
| 671 |
+
if pc["delay_accuracy"] is not None:
|
| 672 |
+
print(f" Delay correct: {pc['delay_accuracy']:.1%} ({pc['delay_count']} samples)")
|
| 673 |
+
if pc["early_accuracy"] is not None:
|
| 674 |
+
print(f" Early correct: {pc['early_accuracy']:.1%} ({pc['early_count']} samples)")
|
| 675 |
+
print(f" ─── Offset Estimation ───")
|
| 676 |
+
if metrics["offset_mae_sec"] is not None:
|
| 677 |
+
print(f" MAE: {metrics['offset_mae_sec']:.3f}s")
|
| 678 |
+
print(f" Median Error: {metrics['offset_median_sec']:.3f}s")
|
| 679 |
+
print(f" Within 0.5s: {metrics['offset_within_0.5s']} / {metrics['offset_evaluated_count']}")
|
| 680 |
+
print(f" Within 1.0s: {metrics['offset_within_1.0s']} / {metrics['offset_evaluated_count']}")
|
| 681 |
+
else:
|
| 682 |
+
print(f" (no valid offset predictions)")
|
| 683 |
+
print(f" ─── Timestamp Estimation ───")
|
| 684 |
+
if metrics.get("timestamp_tv_mae_sec") is not None:
|
| 685 |
+
print(f" t_v MAE: {metrics['timestamp_tv_mae_sec']:.3f}s ({metrics['timestamp_evaluated_tv']} samples)")
|
| 686 |
+
print(f" t_v Median Error: {metrics['timestamp_tv_median_sec']:.3f}s")
|
| 687 |
+
else:
|
| 688 |
+
print(f" t_v: (no valid pairs)")
|
| 689 |
+
if metrics.get("timestamp_ta_mae_sec") is not None:
|
| 690 |
+
print(f" t_a MAE: {metrics['timestamp_ta_mae_sec']:.3f}s ({metrics['timestamp_evaluated_ta']} samples)")
|
| 691 |
+
print(f" t_a Median Error: {metrics['timestamp_ta_median_sec']:.3f}s")
|
| 692 |
+
else:
|
| 693 |
+
print(f" t_a: (no valid pairs)")
|
| 694 |
+
print(f" ─── Parse Stats ───")
|
| 695 |
+
ps = metrics["parse_stats"]
|
| 696 |
+
print(f" JSON parsed: {ps['json']}")
|
| 697 |
+
print(f" GPT judge: {ps.get('gpt_judge', 0)}")
|
| 698 |
+
print(f" Regex fallback: {ps['regex_fallback']}")
|
| 699 |
+
print(f"{'=' * 60}")
|
| 700 |
+
|
| 701 |
+
|
| 702 |
+
def main() -> None:
|
| 703 |
+
args = parse_args()
|
| 704 |
+
set_data_root(args.data_root)
|
| 705 |
+
test_jsonl = args.test_jsonl or (DATA_ROOT / "kto_training_data_v2_test.jsonl")
|
| 706 |
+
output_dir = args.output_dir or Path("/home/ubuntu/eval_results/sync")
|
| 707 |
+
|
| 708 |
+
if args.gpt_judge:
|
| 709 |
+
client = _get_openai_client(args.openai_api_key)
|
| 710 |
+
if client is None:
|
| 711 |
+
print("[ERROR] --gpt-judge requires OPENAI_API_KEY env var or --openai-api-key argument.")
|
| 712 |
+
raise SystemExit(1)
|
| 713 |
+
try:
|
| 714 |
+
test_resp = client.chat.completions.create(
|
| 715 |
+
model=args.gpt_model,
|
| 716 |
+
messages=[{"role": "user", "content": "Say OK"}],
|
| 717 |
+
max_completion_tokens=5,
|
| 718 |
+
)
|
| 719 |
+
print(f"[gpt-judge] API verified. Model: {args.gpt_model}")
|
| 720 |
+
except Exception as exc:
|
| 721 |
+
print(f"[ERROR] GPT API check failed: {exc}")
|
| 722 |
+
raise SystemExit(1)
|
| 723 |
+
|
| 724 |
+
label = args.label or (Path(args.adapter).name if args.adapter else Path(args.base_model).name)
|
| 725 |
+
|
| 726 |
+
out_dir = output_dir / label
|
| 727 |
+
out_dir.mkdir(parents=True, exist_ok=True)
|
| 728 |
+
results_jsonl = out_dir / "eval_results.jsonl"
|
| 729 |
+
metrics_json = out_dir / "metrics.json"
|
| 730 |
+
summary_txt = out_dir / "summary.txt"
|
| 731 |
+
|
| 732 |
+
# Load data
|
| 733 |
+
test_data = load_test_data(test_jsonl, args.max_samples)
|
| 734 |
+
print(f"[data] Loaded {len(test_data)} test samples")
|
| 735 |
+
|
| 736 |
+
# Resume support
|
| 737 |
+
processed = set()
|
| 738 |
+
if results_jsonl.exists():
|
| 739 |
+
with open(results_jsonl) as f:
|
| 740 |
+
for line in f:
|
| 741 |
+
obj = json.loads(line)
|
| 742 |
+
processed.add(obj["video"])
|
| 743 |
+
print(f"[resume] {len(processed)} already processed, skipping")
|
| 744 |
+
|
| 745 |
+
# Load model
|
| 746 |
+
model, processor = load_model(args.base_model, args.adapter)
|
| 747 |
+
|
| 748 |
+
# Inference loop
|
| 749 |
+
for item in tqdm(test_data, desc="Evaluating", unit="sample"):
|
| 750 |
+
if item["video"] in processed:
|
| 751 |
+
continue
|
| 752 |
+
|
| 753 |
+
if not os.path.exists(item["video_path"]):
|
| 754 |
+
print(f" [skip] Video not found: {item['video_path']}")
|
| 755 |
+
continue
|
| 756 |
+
|
| 757 |
+
try:
|
| 758 |
+
raw_output = run_inference(
|
| 759 |
+
model, processor, item["video_path"], item["audio_path"],
|
| 760 |
+
args.max_new_tokens, args.temperature,
|
| 761 |
+
)
|
| 762 |
+
except Exception as exc:
|
| 763 |
+
import traceback
|
| 764 |
+
print(f" [error] {item['video']}: {exc}")
|
| 765 |
+
traceback.print_exc()
|
| 766 |
+
raw_output = ""
|
| 767 |
+
|
| 768 |
+
if args.gpt_judge and raw_output:
|
| 769 |
+
gpt_pred = gpt_extract_prediction(
|
| 770 |
+
raw_output, api_key=args.openai_api_key, model=args.gpt_model,
|
| 771 |
+
)
|
| 772 |
+
if gpt_pred is not None:
|
| 773 |
+
pred = gpt_pred
|
| 774 |
+
else:
|
| 775 |
+
pred = extract_prediction(raw_output)
|
| 776 |
+
else:
|
| 777 |
+
pred = extract_prediction(raw_output)
|
| 778 |
+
|
| 779 |
+
result = {
|
| 780 |
+
"video": item["video"],
|
| 781 |
+
"video_path": item["video_path"],
|
| 782 |
+
"gt_synced": item["gt_synced"],
|
| 783 |
+
"gt_direction": item["gt_direction"],
|
| 784 |
+
"gt_offset_sec": item["gt_offset_sec"],
|
| 785 |
+
"gt_t_v": item["gt_t_v"],
|
| 786 |
+
"gt_t_a": item["gt_t_a"],
|
| 787 |
+
"pred_synced": pred["pred_synced"],
|
| 788 |
+
"pred_direction": pred["pred_direction"],
|
| 789 |
+
"pred_offset_sec": pred["pred_offset_sec"],
|
| 790 |
+
"pred_t_v": pred.get("pred_t_v"),
|
| 791 |
+
"pred_t_a": pred.get("pred_t_a"),
|
| 792 |
+
"pred_explanation": pred["pred_explanation"],
|
| 793 |
+
"parse_method": pred["parse_method"],
|
| 794 |
+
"raw_output": raw_output,
|
| 795 |
+
}
|
| 796 |
+
|
| 797 |
+
with open(results_jsonl, "a", encoding="utf-8") as f:
|
| 798 |
+
f.write(json.dumps(result, ensure_ascii=False) + "\n")
|
| 799 |
+
|
| 800 |
+
processed.add(item["video"])
|
| 801 |
+
gc.collect()
|
| 802 |
+
torch.cuda.empty_cache()
|
| 803 |
+
|
| 804 |
+
# Load all results and compute metrics
|
| 805 |
+
all_results = []
|
| 806 |
+
with open(results_jsonl) as f:
|
| 807 |
+
for line in f:
|
| 808 |
+
all_results.append(json.loads(line))
|
| 809 |
+
|
| 810 |
+
metrics = compute_metrics(all_results)
|
| 811 |
+
metrics["eval_config"] = {
|
| 812 |
+
"base_model": args.base_model,
|
| 813 |
+
"adapter": args.adapter,
|
| 814 |
+
"data_root": str(args.data_root),
|
| 815 |
+
"test_jsonl": str(test_jsonl),
|
| 816 |
+
"total_test_samples": len(test_data),
|
| 817 |
+
"max_new_tokens": args.max_new_tokens,
|
| 818 |
+
"temperature": args.temperature,
|
| 819 |
+
"gpt_judge": args.gpt_judge,
|
| 820 |
+
"gpt_model": args.gpt_model if args.gpt_judge else None,
|
| 821 |
+
}
|
| 822 |
+
|
| 823 |
+
with open(metrics_json, "w", encoding="utf-8") as f:
|
| 824 |
+
json.dump(metrics, f, indent=2, ensure_ascii=False)
|
| 825 |
+
|
| 826 |
+
print_summary(metrics, label)
|
| 827 |
+
|
| 828 |
+
with open(summary_txt, "w", encoding="utf-8") as f:
|
| 829 |
+
import io, contextlib
|
| 830 |
+
buf = io.StringIO()
|
| 831 |
+
with contextlib.redirect_stdout(buf):
|
| 832 |
+
print_summary(metrics, label)
|
| 833 |
+
f.write(buf.getvalue())
|
| 834 |
+
|
| 835 |
+
print(f"\n[output] Results JSONL: {results_jsonl}")
|
| 836 |
+
print(f"[output] Metrics JSON: {metrics_json}")
|
| 837 |
+
print(f"[output] Summary: {summary_txt}")
|
| 838 |
+
|
| 839 |
+
|
| 840 |
+
if __name__ == "__main__":
|
| 841 |
+
main()
|
eval/eval_lvbench.py
ADDED
|
@@ -0,0 +1,647 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env python3
|
| 2 |
+
"""
|
| 3 |
+
Evaluate Qwen3-Omni models on the LVBench benchmark.
|
| 4 |
+
|
| 5 |
+
LVBench: 1549 multiple-choice questions over 103 long videos.
|
| 6 |
+
Metrics: accuracy (overall, per video type).
|
| 7 |
+
|
| 8 |
+
Usage:
|
| 9 |
+
python eval_lvbench.py \
|
| 10 |
+
--base-model Qwen/Qwen3-Omni-30B-A3B-Instruct \
|
| 11 |
+
--video-dir /opt/dlami/nvme/lvbench \
|
| 12 |
+
--max-samples 30 --label vanilla_qwen3omni
|
| 13 |
+
|
| 14 |
+
python eval_lvbench.py \
|
| 15 |
+
--base-model Rakancorle11/qwen3omni_full_sft_revised_thinker_key \
|
| 16 |
+
--adapter /opt/dlami/nvme/LlamaFactory/saves/.../adapter \
|
| 17 |
+
--label dpo_v4_8632
|
| 18 |
+
"""
|
| 19 |
+
|
| 20 |
+
from __future__ import annotations
|
| 21 |
+
|
| 22 |
+
import argparse
|
| 23 |
+
import gc
|
| 24 |
+
import json
|
| 25 |
+
import os
|
| 26 |
+
import re
|
| 27 |
+
import tempfile
|
| 28 |
+
import shutil
|
| 29 |
+
from pathlib import Path
|
| 30 |
+
from typing import Any, Dict, List, Optional
|
| 31 |
+
|
| 32 |
+
import torch
|
| 33 |
+
from tqdm import tqdm
|
| 34 |
+
|
| 35 |
+
DEFAULT_VIDEO_DIR = Path("/opt/dlami/nvme/lvbench")
|
| 36 |
+
DEFAULT_OUTPUT_DIR = Path("/home/ubuntu/eval_results/lvbench")
|
| 37 |
+
|
| 38 |
+
VIDEO_TYPES = ["cartoon", "documentary", "live", "selfmedia", "sport", "tv"]
|
| 39 |
+
|
| 40 |
+
MCQ_PROMPT = (
|
| 41 |
+
"Select the best answer to the following multiple-choice question "
|
| 42 |
+
"based on the video. Respond with only the letter (A, B, C, or D) "
|
| 43 |
+
"of the correct option.\n"
|
| 44 |
+
)
|
| 45 |
+
|
| 46 |
+
|
| 47 |
+
def parse_args() -> argparse.Namespace:
|
| 48 |
+
p = argparse.ArgumentParser(description="Evaluate on LVBench benchmark.")
|
| 49 |
+
p.add_argument("--base-model", type=str,
|
| 50 |
+
default="Qwen/Qwen3-Omni-30B-A3B-Instruct")
|
| 51 |
+
p.add_argument("--adapter", type=str, default=None)
|
| 52 |
+
p.add_argument("--video-dir", type=Path, default=DEFAULT_VIDEO_DIR)
|
| 53 |
+
p.add_argument("--output-dir", type=Path, default=DEFAULT_OUTPUT_DIR)
|
| 54 |
+
p.add_argument("--max-samples", type=int, default=-1)
|
| 55 |
+
p.add_argument("--max-new-tokens", type=int, default=32)
|
| 56 |
+
p.add_argument("--temperature", type=float, default=0.0)
|
| 57 |
+
p.add_argument("--label", type=str, default=None)
|
| 58 |
+
p.add_argument("--vllm", action="store_true", default=False,
|
| 59 |
+
help="Use vLLM offline batch inference instead of transformers (much faster).")
|
| 60 |
+
p.add_argument("--tp", type=int, default=None,
|
| 61 |
+
help="Tensor parallel size for vLLM (default: all visible GPUs).")
|
| 62 |
+
p.add_argument("--batch-size", type=int, default=16,
|
| 63 |
+
help="Batch size for vLLM generate().")
|
| 64 |
+
p.add_argument("--gpu-memory-utilization", type=float, default=0.90)
|
| 65 |
+
p.add_argument("--max-model-len", type=int, default=65536)
|
| 66 |
+
return p.parse_args()
|
| 67 |
+
|
| 68 |
+
|
| 69 |
+
# ---------------------------------------------------------------------------
|
| 70 |
+
# Model loading — identical to eval_videomme.py / eval_dpo_sync.py
|
| 71 |
+
# ---------------------------------------------------------------------------
|
| 72 |
+
def load_model(base_model: str, adapter: Optional[str]):
|
| 73 |
+
import json as _json
|
| 74 |
+
from transformers import (
|
| 75 |
+
AutoConfig,
|
| 76 |
+
AutoProcessor,
|
| 77 |
+
Qwen3OmniMoeForConditionalGeneration,
|
| 78 |
+
Qwen3OmniMoeThinkerConfig,
|
| 79 |
+
Qwen3OmniMoeThinkerForConditionalGeneration,
|
| 80 |
+
)
|
| 81 |
+
|
| 82 |
+
print(f"[load] Base model: {base_model}")
|
| 83 |
+
processor = AutoProcessor.from_pretrained(base_model, trust_remote_code=True)
|
| 84 |
+
|
| 85 |
+
model_path = Path(base_model)
|
| 86 |
+
cfg_path = model_path / "config.json" if model_path.exists() else None
|
| 87 |
+
|
| 88 |
+
model_type = None
|
| 89 |
+
if cfg_path and cfg_path.exists():
|
| 90 |
+
with open(cfg_path, "r") as f:
|
| 91 |
+
model_type = _json.load(f).get("model_type")
|
| 92 |
+
if not model_type:
|
| 93 |
+
try:
|
| 94 |
+
from huggingface_hub import hf_hub_download
|
| 95 |
+
cached = hf_hub_download(base_model, "config.json")
|
| 96 |
+
with open(cached, "r") as f:
|
| 97 |
+
model_type = _json.load(f).get("model_type")
|
| 98 |
+
except Exception:
|
| 99 |
+
pass
|
| 100 |
+
|
| 101 |
+
print(f"[load] Detected model_type: {model_type}")
|
| 102 |
+
|
| 103 |
+
if model_type == "qwen3_omni_moe_thinker":
|
| 104 |
+
config = Qwen3OmniMoeThinkerConfig.from_pretrained(base_model)
|
| 105 |
+
model = Qwen3OmniMoeThinkerForConditionalGeneration.from_pretrained(
|
| 106 |
+
base_model, config=config, torch_dtype=torch.bfloat16, device_map="auto",
|
| 107 |
+
)
|
| 108 |
+
else:
|
| 109 |
+
config = AutoConfig.from_pretrained(base_model, trust_remote_code=True)
|
| 110 |
+
model = Qwen3OmniMoeForConditionalGeneration.from_pretrained(
|
| 111 |
+
base_model, config=config, torch_dtype=torch.bfloat16, device_map="auto",
|
| 112 |
+
)
|
| 113 |
+
|
| 114 |
+
if adapter:
|
| 115 |
+
from peft import PeftModel
|
| 116 |
+
from safetensors.torch import load_file, save_file
|
| 117 |
+
|
| 118 |
+
print(f"[load] Loading LoRA adapter: {adapter}")
|
| 119 |
+
adapter_cfg_path = Path(adapter) / "adapter_config.json"
|
| 120 |
+
with open(adapter_cfg_path) as f:
|
| 121 |
+
adapter_cfg = _json.loads(f.read())
|
| 122 |
+
target_modules = adapter_cfg.get("target_modules", [])
|
| 123 |
+
needs_remap = (
|
| 124 |
+
any(t.startswith("model.layers.") for t in target_modules)
|
| 125 |
+
and model_type != "qwen3_omni_moe_thinker"
|
| 126 |
+
)
|
| 127 |
+
|
| 128 |
+
if needs_remap:
|
| 129 |
+
print("[load] Adapter trained on thinker-only; remapping keys...")
|
| 130 |
+
tmp_dir = Path(tempfile.mkdtemp(prefix="adapter_remap_"))
|
| 131 |
+
for fn in Path(adapter).iterdir():
|
| 132 |
+
if fn.is_dir():
|
| 133 |
+
continue
|
| 134 |
+
if fn.name == "adapter_config.json":
|
| 135 |
+
new_targets = []
|
| 136 |
+
for t in target_modules:
|
| 137 |
+
if t.startswith("model.layers."):
|
| 138 |
+
new_targets.append("thinker." + t)
|
| 139 |
+
elif t[0].isdigit():
|
| 140 |
+
new_targets.append("thinker.model.layers." + t)
|
| 141 |
+
else:
|
| 142 |
+
new_targets.append(t)
|
| 143 |
+
adapter_cfg["target_modules"] = new_targets
|
| 144 |
+
with open(tmp_dir / "adapter_config.json", "w") as f:
|
| 145 |
+
json.dump(adapter_cfg, f, indent=2)
|
| 146 |
+
elif fn.suffix == ".safetensors" and "adapter" in fn.name:
|
| 147 |
+
tensors = load_file(str(fn))
|
| 148 |
+
remapped = {}
|
| 149 |
+
for k, v in tensors.items():
|
| 150 |
+
if ".model.layers." in k and ".thinker." not in k:
|
| 151 |
+
new_k = k.replace(
|
| 152 |
+
"base_model.model.model.layers.",
|
| 153 |
+
"base_model.model.thinker.model.layers.",
|
| 154 |
+
)
|
| 155 |
+
remapped[new_k] = v
|
| 156 |
+
else:
|
| 157 |
+
remapped[k] = v
|
| 158 |
+
save_file(remapped, str(tmp_dir / fn.name))
|
| 159 |
+
else:
|
| 160 |
+
shutil.copy2(str(fn), str(tmp_dir / fn.name))
|
| 161 |
+
adapter = str(tmp_dir)
|
| 162 |
+
|
| 163 |
+
model = PeftModel.from_pretrained(model, adapter)
|
| 164 |
+
model = model.merge_and_unload()
|
| 165 |
+
|
| 166 |
+
if hasattr(model, "disable_talker"):
|
| 167 |
+
model.disable_talker()
|
| 168 |
+
model.eval()
|
| 169 |
+
print(f"[load] Model ready ({type(model).__name__}). Device: {model.device}")
|
| 170 |
+
return model, processor
|
| 171 |
+
|
| 172 |
+
|
| 173 |
+
# ---------------------------------------------------------------------------
|
| 174 |
+
# Inference
|
| 175 |
+
# ---------------------------------------------------------------------------
|
| 176 |
+
def run_inference(model, processor, video_path: str, prompt: str,
|
| 177 |
+
max_new_tokens: int, temperature: float) -> str:
|
| 178 |
+
from qwen_omni_utils import process_mm_info
|
| 179 |
+
|
| 180 |
+
tmp_dir = tempfile.mkdtemp(prefix="eval_lvb_")
|
| 181 |
+
masked_video = os.path.join(tmp_dir, "clip.mp4")
|
| 182 |
+
os.symlink(os.path.abspath(video_path), masked_video)
|
| 183 |
+
|
| 184 |
+
conversation = [
|
| 185 |
+
{
|
| 186 |
+
"role": "user",
|
| 187 |
+
"content": [
|
| 188 |
+
{"type": "video", "video": masked_video},
|
| 189 |
+
{"type": "text", "text": prompt},
|
| 190 |
+
],
|
| 191 |
+
}
|
| 192 |
+
]
|
| 193 |
+
|
| 194 |
+
text = processor.apply_chat_template(
|
| 195 |
+
conversation, add_generation_prompt=True, tokenize=False,
|
| 196 |
+
)
|
| 197 |
+
audios, images, videos = process_mm_info(conversation, use_audio_in_video=False)
|
| 198 |
+
inputs = processor(
|
| 199 |
+
text=text, audio=audios, images=images, videos=videos,
|
| 200 |
+
return_tensors="pt", padding=True, use_audio_in_video=False,
|
| 201 |
+
)
|
| 202 |
+
|
| 203 |
+
model_dtype = next(model.parameters()).dtype
|
| 204 |
+
converted = {}
|
| 205 |
+
for k, v in inputs.items():
|
| 206 |
+
if hasattr(v, "to"):
|
| 207 |
+
v = v.to(model.device)
|
| 208 |
+
if torch.is_floating_point(v):
|
| 209 |
+
v = v.to(model_dtype)
|
| 210 |
+
converted[k] = v
|
| 211 |
+
inputs = converted
|
| 212 |
+
|
| 213 |
+
from transformers import Qwen3OmniMoeThinkerForConditionalGeneration
|
| 214 |
+
is_thinker = isinstance(model, Qwen3OmniMoeThinkerForConditionalGeneration)
|
| 215 |
+
if is_thinker:
|
| 216 |
+
gen_kwargs = {"max_new_tokens": max_new_tokens, "do_sample": temperature > 0}
|
| 217 |
+
else:
|
| 218 |
+
gen_kwargs = {
|
| 219 |
+
"thinker_max_new_tokens": max_new_tokens,
|
| 220 |
+
"use_audio_in_video": False,
|
| 221 |
+
"return_audio": False,
|
| 222 |
+
"do_sample": temperature > 0,
|
| 223 |
+
}
|
| 224 |
+
if temperature > 0:
|
| 225 |
+
gen_kwargs["temperature"] = temperature
|
| 226 |
+
gen_kwargs["top_p"] = 0.9
|
| 227 |
+
|
| 228 |
+
with torch.inference_mode():
|
| 229 |
+
output_ids = model.generate(**inputs, **gen_kwargs)
|
| 230 |
+
|
| 231 |
+
if isinstance(output_ids, tuple):
|
| 232 |
+
output_ids = output_ids[0]
|
| 233 |
+
|
| 234 |
+
prompt_len = inputs["input_ids"].shape[1]
|
| 235 |
+
response = processor.batch_decode(
|
| 236 |
+
output_ids[:, prompt_len:], skip_special_tokens=True,
|
| 237 |
+
)[0].strip()
|
| 238 |
+
|
| 239 |
+
shutil.rmtree(tmp_dir, ignore_errors=True)
|
| 240 |
+
return response
|
| 241 |
+
|
| 242 |
+
|
| 243 |
+
def preprocess_video_for_vllm(video_path: str):
|
| 244 |
+
"""Extract video frames using qwen_omni_utils, return numpy array for vLLM.
|
| 245 |
+
Caps at 128 frames to stay within vLLM encoder cache (62720 tokens).
|
| 246 |
+
"""
|
| 247 |
+
from qwen_omni_utils import process_mm_info
|
| 248 |
+
import numpy as np
|
| 249 |
+
|
| 250 |
+
messages = [{
|
| 251 |
+
"role": "user",
|
| 252 |
+
"content": [
|
| 253 |
+
{"type": "video", "video": video_path, "nframes": 128},
|
| 254 |
+
{"type": "text", "text": "placeholder"},
|
| 255 |
+
],
|
| 256 |
+
}]
|
| 257 |
+
audios, images, videos = process_mm_info(messages, use_audio_in_video=False)
|
| 258 |
+
video_tensor = videos[0]
|
| 259 |
+
video_np = (video_tensor * 255).byte().numpy()
|
| 260 |
+
return video_np
|
| 261 |
+
|
| 262 |
+
|
| 263 |
+
SYSTEM_PROMPT = (
|
| 264 |
+
"You are Qwen, a virtual human developed by the Qwen Team, Alibaba "
|
| 265 |
+
"Group, capable of perceiving auditory and visual inputs, as well as "
|
| 266 |
+
"generating text and speech."
|
| 267 |
+
)
|
| 268 |
+
|
| 269 |
+
|
| 270 |
+
def build_vllm_prompt(question: str) -> str:
|
| 271 |
+
return (
|
| 272 |
+
f"<|im_start|>system\n{SYSTEM_PROMPT}<|im_end|>\n"
|
| 273 |
+
f"<|im_start|>user\n"
|
| 274 |
+
f"<|vision_start|><|video_pad|><|vision_end|>"
|
| 275 |
+
f"{question}<|im_end|>\n"
|
| 276 |
+
f"<|im_start|>assistant\n"
|
| 277 |
+
)
|
| 278 |
+
|
| 279 |
+
|
| 280 |
+
def extract_answer(text: str) -> str:
|
| 281 |
+
"""Extract a single letter A/B/C/D from model output."""
|
| 282 |
+
text = text.strip()
|
| 283 |
+
prefixes = [
|
| 284 |
+
"The best answer is", "The correct answer is",
|
| 285 |
+
"The answer is", "The answer", "Best answer:", "Best option:",
|
| 286 |
+
]
|
| 287 |
+
for prefix in prefixes:
|
| 288 |
+
text = text.replace(prefix, "")
|
| 289 |
+
|
| 290 |
+
if len(text.split()) > 10 and not re.search(r"[ABCD]", text):
|
| 291 |
+
return ""
|
| 292 |
+
m = re.search(r"[ABCD]", text)
|
| 293 |
+
return m[0] if m else ""
|
| 294 |
+
|
| 295 |
+
|
| 296 |
+
# ---------------------------------------------------------------------------
|
| 297 |
+
# Dataset loading
|
| 298 |
+
# ---------------------------------------------------------------------------
|
| 299 |
+
def load_lvbench(video_dir: Path, max_samples: int) -> List[Dict[str, Any]]:
|
| 300 |
+
from datasets import load_dataset
|
| 301 |
+
ds = load_dataset("lmms-lab/LVBench", split="train")
|
| 302 |
+
data = []
|
| 303 |
+
skipped = 0
|
| 304 |
+
for row in ds:
|
| 305 |
+
vid = row["key"]
|
| 306 |
+
video_path = video_dir / f"{vid}.mp4"
|
| 307 |
+
if not video_path.exists():
|
| 308 |
+
skipped += 1
|
| 309 |
+
continue
|
| 310 |
+
|
| 311 |
+
prompt = MCQ_PROMPT + row["question"] + "\nThe best answer is:"
|
| 312 |
+
|
| 313 |
+
data.append({
|
| 314 |
+
"uid": row["uid"],
|
| 315 |
+
"video_id": vid,
|
| 316 |
+
"video_path": str(video_path),
|
| 317 |
+
"video_type": row["type"],
|
| 318 |
+
"question_type": row["question_type"],
|
| 319 |
+
"question": row["question"],
|
| 320 |
+
"gt_answer": row["answer"],
|
| 321 |
+
"time_reference": row.get("time_reference", ""),
|
| 322 |
+
"prompt": prompt,
|
| 323 |
+
})
|
| 324 |
+
if skipped:
|
| 325 |
+
print(f"[data] Skipped {skipped} questions (video not found)")
|
| 326 |
+
if max_samples > 0:
|
| 327 |
+
data = data[:max_samples]
|
| 328 |
+
return data
|
| 329 |
+
|
| 330 |
+
|
| 331 |
+
# ---------------------------------------------------------------------------
|
| 332 |
+
# Metrics
|
| 333 |
+
# ---------------------------------------------------------------------------
|
| 334 |
+
def compute_metrics(results: List[Dict[str, Any]]) -> Dict[str, Any]:
|
| 335 |
+
total = len(results)
|
| 336 |
+
if total == 0:
|
| 337 |
+
return {}
|
| 338 |
+
|
| 339 |
+
correct = sum(1 for r in results if r["pred_answer"].upper() == r["gt_answer"].upper())
|
| 340 |
+
overall_acc = correct / total
|
| 341 |
+
|
| 342 |
+
def acc_for(items):
|
| 343 |
+
if not items:
|
| 344 |
+
return None
|
| 345 |
+
c = sum(1 for r in items if r["pred_answer"].upper() == r["gt_answer"].upper())
|
| 346 |
+
return round(c / len(items), 4)
|
| 347 |
+
|
| 348 |
+
per_type = {}
|
| 349 |
+
for vt in VIDEO_TYPES:
|
| 350 |
+
subset = [r for r in results if r["video_type"] == vt]
|
| 351 |
+
if subset:
|
| 352 |
+
per_type[vt] = {"accuracy": acc_for(subset), "count": len(subset)}
|
| 353 |
+
|
| 354 |
+
q_types = set()
|
| 355 |
+
for r in results:
|
| 356 |
+
if isinstance(r.get("question_type"), list):
|
| 357 |
+
q_types.update(r["question_type"])
|
| 358 |
+
elif r.get("question_type"):
|
| 359 |
+
q_types.add(r["question_type"])
|
| 360 |
+
|
| 361 |
+
per_qtype = {}
|
| 362 |
+
for qt in sorted(q_types):
|
| 363 |
+
subset = [r for r in results if qt in (r.get("question_type", [])
|
| 364 |
+
if isinstance(r.get("question_type"), list) else [r.get("question_type")])]
|
| 365 |
+
if subset:
|
| 366 |
+
per_qtype[qt] = {"accuracy": acc_for(subset), "count": len(subset)}
|
| 367 |
+
|
| 368 |
+
return {
|
| 369 |
+
"total_samples": total,
|
| 370 |
+
"overall_accuracy": round(overall_acc, 4),
|
| 371 |
+
"per_video_type": per_type,
|
| 372 |
+
"per_question_type": per_qtype,
|
| 373 |
+
}
|
| 374 |
+
|
| 375 |
+
|
| 376 |
+
def print_summary(metrics: Dict[str, Any], label: str) -> None:
|
| 377 |
+
print()
|
| 378 |
+
print(f"{'=' * 65}")
|
| 379 |
+
print(f" LVBench Summary: {label}")
|
| 380 |
+
print(f"{'=' * 65}")
|
| 381 |
+
print(f" Total samples: {metrics['total_samples']}")
|
| 382 |
+
print(f" Overall Accuracy: {metrics['overall_accuracy']:.1%}")
|
| 383 |
+
|
| 384 |
+
print(f" ─── Per Video Type ───")
|
| 385 |
+
for vt in VIDEO_TYPES:
|
| 386 |
+
if vt in metrics.get("per_video_type", {}):
|
| 387 |
+
d = metrics["per_video_type"][vt]
|
| 388 |
+
print(f" {vt:15s}: {d['accuracy']:.1%} ({d['count']} questions)")
|
| 389 |
+
|
| 390 |
+
print(f" ─── Per Question Type ───")
|
| 391 |
+
for qt, d in sorted(metrics.get("per_question_type", {}).items()):
|
| 392 |
+
print(f" {qt:30s}: {d['accuracy']:.1%} ({d['count']})")
|
| 393 |
+
|
| 394 |
+
print(f"{'=' * 65}")
|
| 395 |
+
|
| 396 |
+
|
| 397 |
+
# ---------------------------------------------------------------------------
|
| 398 |
+
# Main
|
| 399 |
+
# ---------------------------------------------------------------------------
|
| 400 |
+
def main() -> None:
|
| 401 |
+
args = parse_args()
|
| 402 |
+
label = args.label or (
|
| 403 |
+
Path(args.adapter).name if args.adapter
|
| 404 |
+
else Path(args.base_model).name
|
| 405 |
+
)
|
| 406 |
+
|
| 407 |
+
out_dir = args.output_dir / label
|
| 408 |
+
out_dir.mkdir(parents=True, exist_ok=True)
|
| 409 |
+
results_jsonl = out_dir / "eval_results.jsonl"
|
| 410 |
+
metrics_json = out_dir / "metrics.json"
|
| 411 |
+
summary_txt = out_dir / "summary.txt"
|
| 412 |
+
|
| 413 |
+
print("[data] Loading LVBench dataset...")
|
| 414 |
+
test_data = load_lvbench(args.video_dir, args.max_samples)
|
| 415 |
+
print(f"[data] {len(test_data)} questions ready for evaluation")
|
| 416 |
+
|
| 417 |
+
processed = set()
|
| 418 |
+
if results_jsonl.exists():
|
| 419 |
+
with open(results_jsonl) as f:
|
| 420 |
+
for line in f:
|
| 421 |
+
obj = json.loads(line)
|
| 422 |
+
processed.add(obj["uid"])
|
| 423 |
+
print(f"[resume] {len(processed)} already processed, skipping")
|
| 424 |
+
|
| 425 |
+
use_vllm = args.vllm
|
| 426 |
+
model = processor = llm = None
|
| 427 |
+
vllm_preprocess_stats: Dict[str, int] | None = None
|
| 428 |
+
|
| 429 |
+
if use_vllm:
|
| 430 |
+
from vllm import LLM, SamplingParams
|
| 431 |
+
tp = args.tp or torch.cuda.device_count()
|
| 432 |
+
model_path = args.base_model
|
| 433 |
+
print(f"[vllm] Loading {model_path} with tp={tp} ...")
|
| 434 |
+
llm = LLM(
|
| 435 |
+
model=model_path,
|
| 436 |
+
tensor_parallel_size=tp,
|
| 437 |
+
max_model_len=args.max_model_len,
|
| 438 |
+
max_num_seqs=4,
|
| 439 |
+
limit_mm_per_prompt={"video": 1},
|
| 440 |
+
gpu_memory_utilization=args.gpu_memory_utilization,
|
| 441 |
+
dtype="bfloat16",
|
| 442 |
+
trust_remote_code=True,
|
| 443 |
+
)
|
| 444 |
+
sampling_params = SamplingParams(
|
| 445 |
+
temperature=args.temperature if args.temperature > 0 else 0.0,
|
| 446 |
+
top_p=0.9 if args.temperature > 0 else 1.0,
|
| 447 |
+
max_tokens=args.max_new_tokens,
|
| 448 |
+
)
|
| 449 |
+
|
| 450 |
+
print("[vllm] Preprocessing videos ...")
|
| 451 |
+
todo = [item for item in test_data if item["uid"] not in processed]
|
| 452 |
+
preprocessed = {}
|
| 453 |
+
preprocess_failed_paths: set[str] = set()
|
| 454 |
+
|
| 455 |
+
for i, item in enumerate(todo):
|
| 456 |
+
vp = item["video_path"]
|
| 457 |
+
if vp in preprocessed or vp in preprocess_failed_paths:
|
| 458 |
+
continue
|
| 459 |
+
try:
|
| 460 |
+
preprocessed[vp] = preprocess_video_for_vllm(vp)
|
| 461 |
+
except Exception as e:
|
| 462 |
+
preprocess_failed_paths.add(vp)
|
| 463 |
+
print(f" [skip] preprocess error: {Path(vp).name}: {e}")
|
| 464 |
+
if (i + 1) % 50 == 0:
|
| 465 |
+
print(f" Preprocessed {i+1}/{len(todo)} ...")
|
| 466 |
+
|
| 467 |
+
n_pp_skip = sum(1 for item in todo if item["video_path"] in preprocess_failed_paths)
|
| 468 |
+
if preprocess_failed_paths:
|
| 469 |
+
print(
|
| 470 |
+
f"[vllm] Preprocess failed for {len(preprocess_failed_paths)} video(s), "
|
| 471 |
+
f"{n_pp_skip} question(s) will not use vLLM (run continues)."
|
| 472 |
+
)
|
| 473 |
+
vllm_preprocess_stats = {
|
| 474 |
+
"preprocess_failed_videos": len(preprocess_failed_paths),
|
| 475 |
+
"preprocess_skipped_questions": n_pp_skip,
|
| 476 |
+
}
|
| 477 |
+
|
| 478 |
+
vllm_todo = [item for item in todo if item["video_path"] in preprocessed]
|
| 479 |
+
fallback_items = []
|
| 480 |
+
print(f"[vllm] {len(vllm_todo)} questions ready, running inference ...")
|
| 481 |
+
|
| 482 |
+
for i, item in enumerate(vllm_todo):
|
| 483 |
+
if item["uid"] in processed:
|
| 484 |
+
continue
|
| 485 |
+
inp = {
|
| 486 |
+
"prompt": build_vllm_prompt(item["prompt"]),
|
| 487 |
+
"multi_modal_data": {"video": preprocessed[item["video_path"]]},
|
| 488 |
+
}
|
| 489 |
+
try:
|
| 490 |
+
outputs = llm.generate([inp], sampling_params=sampling_params)
|
| 491 |
+
raw_output = outputs[0].outputs[0].text.strip()
|
| 492 |
+
pred = extract_answer(raw_output)
|
| 493 |
+
result = {
|
| 494 |
+
"uid": item["uid"],
|
| 495 |
+
"video_id": item["video_id"],
|
| 496 |
+
"video_type": item["video_type"],
|
| 497 |
+
"question_type": item["question_type"],
|
| 498 |
+
"gt_answer": item["gt_answer"],
|
| 499 |
+
"pred_answer": pred,
|
| 500 |
+
"correct": pred.upper() == item["gt_answer"].upper(),
|
| 501 |
+
"raw_output": raw_output,
|
| 502 |
+
}
|
| 503 |
+
with open(results_jsonl, "a", encoding="utf-8") as f:
|
| 504 |
+
f.write(json.dumps(result, ensure_ascii=False) + "\n")
|
| 505 |
+
processed.add(item["uid"])
|
| 506 |
+
except (ValueError, RuntimeError) as exc:
|
| 507 |
+
if "longer than the maximum model length" in str(exc):
|
| 508 |
+
print(f" [too long] {item['uid']} -> fallback")
|
| 509 |
+
fallback_items.append(item)
|
| 510 |
+
else:
|
| 511 |
+
raise
|
| 512 |
+
|
| 513 |
+
if (i + 1) % 50 == 0:
|
| 514 |
+
print(f" [vllm] [{i+1}/{len(vllm_todo)}] done, {len(fallback_items)} deferred")
|
| 515 |
+
|
| 516 |
+
preprocessed.clear()
|
| 517 |
+
|
| 518 |
+
vllm_results = []
|
| 519 |
+
if results_jsonl.exists():
|
| 520 |
+
with open(results_jsonl) as f:
|
| 521 |
+
for line in f:
|
| 522 |
+
vllm_results.append(json.loads(line))
|
| 523 |
+
if vllm_results:
|
| 524 |
+
vllm_metrics = compute_metrics(vllm_results)
|
| 525 |
+
vllm_metrics_path = out_dir / "metrics_vllm.json"
|
| 526 |
+
with open(vllm_metrics_path, "w", encoding="utf-8") as f:
|
| 527 |
+
json.dump(vllm_metrics, f, indent=2, ensure_ascii=False)
|
| 528 |
+
print(f"[vllm] Intermediate metrics saved to {vllm_metrics_path}")
|
| 529 |
+
print_summary(vllm_metrics, label + " (vllm only)")
|
| 530 |
+
|
| 531 |
+
if fallback_items:
|
| 532 |
+
print(f"[fallback] Running {len(fallback_items)} long-video questions with transformers ...")
|
| 533 |
+
del llm
|
| 534 |
+
gc.collect()
|
| 535 |
+
torch.cuda.empty_cache()
|
| 536 |
+
|
| 537 |
+
model, processor = load_model(args.base_model, args.adapter)
|
| 538 |
+
for item in tqdm(fallback_items, desc="Fallback", unit="q"):
|
| 539 |
+
if item["uid"] in processed:
|
| 540 |
+
continue
|
| 541 |
+
try:
|
| 542 |
+
raw_output = run_inference(
|
| 543 |
+
model, processor, item["video_path"], item["prompt"],
|
| 544 |
+
args.max_new_tokens, args.temperature,
|
| 545 |
+
)
|
| 546 |
+
except Exception as exc:
|
| 547 |
+
import traceback
|
| 548 |
+
print(f" [error] {item['uid']}: {exc}")
|
| 549 |
+
traceback.print_exc()
|
| 550 |
+
raw_output = ""
|
| 551 |
+
|
| 552 |
+
pred = extract_answer(raw_output)
|
| 553 |
+
result = {
|
| 554 |
+
"uid": item["uid"],
|
| 555 |
+
"video_id": item["video_id"],
|
| 556 |
+
"video_type": item["video_type"],
|
| 557 |
+
"question_type": item["question_type"],
|
| 558 |
+
"gt_answer": item["gt_answer"],
|
| 559 |
+
"pred_answer": pred,
|
| 560 |
+
"correct": pred.upper() == item["gt_answer"].upper(),
|
| 561 |
+
"raw_output": raw_output,
|
| 562 |
+
}
|
| 563 |
+
with open(results_jsonl, "a", encoding="utf-8") as f:
|
| 564 |
+
f.write(json.dumps(result, ensure_ascii=False) + "\n")
|
| 565 |
+
processed.add(item["uid"])
|
| 566 |
+
gc.collect()
|
| 567 |
+
torch.cuda.empty_cache()
|
| 568 |
+
|
| 569 |
+
else:
|
| 570 |
+
print("[model] Loading model...")
|
| 571 |
+
model, processor = load_model(args.base_model, args.adapter)
|
| 572 |
+
|
| 573 |
+
for item in tqdm(test_data, desc="LVBench", unit="q"):
|
| 574 |
+
if item["uid"] in processed:
|
| 575 |
+
continue
|
| 576 |
+
|
| 577 |
+
try:
|
| 578 |
+
raw_output = run_inference(
|
| 579 |
+
model, processor, item["video_path"], item["prompt"],
|
| 580 |
+
args.max_new_tokens, args.temperature,
|
| 581 |
+
)
|
| 582 |
+
except Exception as exc:
|
| 583 |
+
import traceback
|
| 584 |
+
print(f" [error] {item['uid']}: {exc}")
|
| 585 |
+
traceback.print_exc()
|
| 586 |
+
raw_output = ""
|
| 587 |
+
|
| 588 |
+
pred = extract_answer(raw_output)
|
| 589 |
+
|
| 590 |
+
result = {
|
| 591 |
+
"uid": item["uid"],
|
| 592 |
+
"video_id": item["video_id"],
|
| 593 |
+
"video_type": item["video_type"],
|
| 594 |
+
"question_type": item["question_type"],
|
| 595 |
+
"gt_answer": item["gt_answer"],
|
| 596 |
+
"pred_answer": pred,
|
| 597 |
+
"correct": pred.upper() == item["gt_answer"].upper(),
|
| 598 |
+
"raw_output": raw_output,
|
| 599 |
+
}
|
| 600 |
+
|
| 601 |
+
with open(results_jsonl, "a", encoding="utf-8") as f:
|
| 602 |
+
f.write(json.dumps(result, ensure_ascii=False) + "\n")
|
| 603 |
+
|
| 604 |
+
processed.add(item["uid"])
|
| 605 |
+
gc.collect()
|
| 606 |
+
torch.cuda.empty_cache()
|
| 607 |
+
|
| 608 |
+
all_results = []
|
| 609 |
+
if results_jsonl.exists():
|
| 610 |
+
with open(results_jsonl) as f:
|
| 611 |
+
for line in f:
|
| 612 |
+
all_results.append(json.loads(line))
|
| 613 |
+
|
| 614 |
+
if not all_results:
|
| 615 |
+
print("[warn] No results to compute metrics from.")
|
| 616 |
+
return
|
| 617 |
+
|
| 618 |
+
metrics = compute_metrics(all_results)
|
| 619 |
+
metrics["eval_config"] = {
|
| 620 |
+
"base_model": args.base_model,
|
| 621 |
+
"adapter": args.adapter,
|
| 622 |
+
"video_dir": str(args.video_dir),
|
| 623 |
+
"max_new_tokens": args.max_new_tokens,
|
| 624 |
+
"temperature": args.temperature,
|
| 625 |
+
}
|
| 626 |
+
if vllm_preprocess_stats is not None:
|
| 627 |
+
metrics["eval_config"]["vllm_preprocess_skips"] = vllm_preprocess_stats
|
| 628 |
+
|
| 629 |
+
with open(metrics_json, "w", encoding="utf-8") as f:
|
| 630 |
+
json.dump(metrics, f, indent=2, ensure_ascii=False)
|
| 631 |
+
|
| 632 |
+
print_summary(metrics, label)
|
| 633 |
+
|
| 634 |
+
with open(summary_txt, "w", encoding="utf-8") as f:
|
| 635 |
+
import io, contextlib
|
| 636 |
+
buf = io.StringIO()
|
| 637 |
+
with contextlib.redirect_stdout(buf):
|
| 638 |
+
print_summary(metrics, label)
|
| 639 |
+
f.write(buf.getvalue())
|
| 640 |
+
|
| 641 |
+
print(f"\n[output] Results: {results_jsonl}")
|
| 642 |
+
print(f"[output] Metrics: {metrics_json}")
|
| 643 |
+
print(f"[output] Summary: {summary_txt}")
|
| 644 |
+
|
| 645 |
+
|
| 646 |
+
if __name__ == "__main__":
|
| 647 |
+
main()
|
eval/eval_videomme.py
ADDED
|
@@ -0,0 +1,679 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env python3
|
| 2 |
+
"""
|
| 3 |
+
Evaluate Qwen3-Omni models on the Video-MME benchmark.
|
| 4 |
+
|
| 5 |
+
Video-MME: 2700 multiple-choice questions over 900 YouTube videos.
|
| 6 |
+
Metrics: accuracy (overall, per-duration, per-domain, per-task-type).
|
| 7 |
+
|
| 8 |
+
Usage:
|
| 9 |
+
python eval_videomme.py \
|
| 10 |
+
--base-model Qwen/Qwen3-Omni-30B-A3B-Instruct \
|
| 11 |
+
--video-dir /opt/dlami/nvme/videomme/data/data \
|
| 12 |
+
--max-samples 30 --label vanilla_qwen3omni
|
| 13 |
+
|
| 14 |
+
python eval_videomme.py \
|
| 15 |
+
--base-model Rakancorle11/qwen3omni_full_sft_revised_thinker_key \
|
| 16 |
+
--adapter /opt/dlami/nvme/LlamaFactory/saves/.../adapter \
|
| 17 |
+
--label dpo_v4_8632
|
| 18 |
+
"""
|
| 19 |
+
|
| 20 |
+
from __future__ import annotations
|
| 21 |
+
|
| 22 |
+
import argparse
|
| 23 |
+
import gc
|
| 24 |
+
import json
|
| 25 |
+
import os
|
| 26 |
+
import re
|
| 27 |
+
import tempfile
|
| 28 |
+
import shutil
|
| 29 |
+
from pathlib import Path
|
| 30 |
+
from typing import Any, Dict, List, Optional
|
| 31 |
+
|
| 32 |
+
import torch
|
| 33 |
+
from tqdm import tqdm
|
| 34 |
+
|
| 35 |
+
DEFAULT_VIDEO_DIR = Path("/opt/dlami/nvme/videomme/data/data")
|
| 36 |
+
DEFAULT_OUTPUT_DIR = Path("/home/ubuntu/eval_results/videomme")
|
| 37 |
+
|
| 38 |
+
VIDEO_TYPES = ["short", "medium", "long"]
|
| 39 |
+
CATEGORIES = [
|
| 40 |
+
"Knowledge", "Film & Television", "Sports Competition",
|
| 41 |
+
"Artistic Performance", "Life Record", "Multilingual",
|
| 42 |
+
]
|
| 43 |
+
TASK_CATEGORIES = [
|
| 44 |
+
"Temporal Perception", "Spatial Perception", "Attribute Perception",
|
| 45 |
+
"Action Recognition", "Object Recognition", "OCR Problems",
|
| 46 |
+
"Counting Problem", "Temporal Reasoning", "Spatial Reasoning",
|
| 47 |
+
"Action Reasoning", "Object Reasoning", "Information Synopsis",
|
| 48 |
+
]
|
| 49 |
+
|
| 50 |
+
MCQ_PROMPT = (
|
| 51 |
+
"Select the best answer to the following multiple-choice question "
|
| 52 |
+
"based on the video. Respond with only the letter (A, B, C, or D) "
|
| 53 |
+
"of the correct option.\n"
|
| 54 |
+
)
|
| 55 |
+
|
| 56 |
+
|
| 57 |
+
def parse_args() -> argparse.Namespace:
|
| 58 |
+
p = argparse.ArgumentParser(description="Evaluate on Video-MME benchmark.")
|
| 59 |
+
p.add_argument("--base-model", type=str,
|
| 60 |
+
default="Qwen/Qwen3-Omni-30B-A3B-Instruct")
|
| 61 |
+
p.add_argument("--adapter", type=str, default=None)
|
| 62 |
+
p.add_argument("--video-dir", type=Path, default=DEFAULT_VIDEO_DIR)
|
| 63 |
+
p.add_argument("--output-dir", type=Path, default=DEFAULT_OUTPUT_DIR)
|
| 64 |
+
p.add_argument("--max-samples", type=int, default=-1)
|
| 65 |
+
p.add_argument("--max-new-tokens", type=int, default=32)
|
| 66 |
+
p.add_argument("--temperature", type=float, default=0.0)
|
| 67 |
+
p.add_argument("--label", type=str, default=None)
|
| 68 |
+
p.add_argument("--vllm", action="store_true", default=False,
|
| 69 |
+
help="Use vLLM offline batch inference instead of transformers (much faster).")
|
| 70 |
+
p.add_argument("--tp", type=int, default=None,
|
| 71 |
+
help="Tensor parallel size for vLLM (default: all visible GPUs).")
|
| 72 |
+
p.add_argument("--batch-size", type=int, default=32,
|
| 73 |
+
help="Batch size for vLLM generate().")
|
| 74 |
+
p.add_argument("--gpu-memory-utilization", type=float, default=0.90)
|
| 75 |
+
p.add_argument("--max-model-len", type=int, default=65536)
|
| 76 |
+
return p.parse_args()
|
| 77 |
+
|
| 78 |
+
|
| 79 |
+
# ---------------------------------------------------------------------------
|
| 80 |
+
# Model loading — reused from eval_dpo_sync.py
|
| 81 |
+
# ---------------------------------------------------------------------------
|
| 82 |
+
def load_model(base_model: str, adapter: Optional[str]):
|
| 83 |
+
import json as _json
|
| 84 |
+
from transformers import (
|
| 85 |
+
AutoConfig,
|
| 86 |
+
AutoProcessor,
|
| 87 |
+
Qwen3OmniMoeForConditionalGeneration,
|
| 88 |
+
Qwen3OmniMoeThinkerConfig,
|
| 89 |
+
Qwen3OmniMoeThinkerForConditionalGeneration,
|
| 90 |
+
)
|
| 91 |
+
|
| 92 |
+
print(f"[load] Base model: {base_model}")
|
| 93 |
+
processor = AutoProcessor.from_pretrained(base_model, trust_remote_code=True)
|
| 94 |
+
|
| 95 |
+
model_path = Path(base_model)
|
| 96 |
+
cfg_path = model_path / "config.json" if model_path.exists() else None
|
| 97 |
+
|
| 98 |
+
model_type = None
|
| 99 |
+
if cfg_path and cfg_path.exists():
|
| 100 |
+
with open(cfg_path, "r") as f:
|
| 101 |
+
model_type = _json.load(f).get("model_type")
|
| 102 |
+
if not model_type:
|
| 103 |
+
try:
|
| 104 |
+
from huggingface_hub import hf_hub_download
|
| 105 |
+
cached = hf_hub_download(base_model, "config.json")
|
| 106 |
+
with open(cached, "r") as f:
|
| 107 |
+
model_type = _json.load(f).get("model_type")
|
| 108 |
+
except Exception:
|
| 109 |
+
pass
|
| 110 |
+
|
| 111 |
+
print(f"[load] Detected model_type: {model_type}")
|
| 112 |
+
|
| 113 |
+
if model_type == "qwen3_omni_moe_thinker":
|
| 114 |
+
config = Qwen3OmniMoeThinkerConfig.from_pretrained(base_model)
|
| 115 |
+
model = Qwen3OmniMoeThinkerForConditionalGeneration.from_pretrained(
|
| 116 |
+
base_model, config=config, torch_dtype=torch.bfloat16, device_map="auto",
|
| 117 |
+
)
|
| 118 |
+
else:
|
| 119 |
+
config = AutoConfig.from_pretrained(base_model, trust_remote_code=True)
|
| 120 |
+
model = Qwen3OmniMoeForConditionalGeneration.from_pretrained(
|
| 121 |
+
base_model, config=config, torch_dtype=torch.bfloat16, device_map="auto",
|
| 122 |
+
)
|
| 123 |
+
|
| 124 |
+
if adapter:
|
| 125 |
+
from peft import PeftModel
|
| 126 |
+
from safetensors.torch import load_file, save_file
|
| 127 |
+
|
| 128 |
+
print(f"[load] Loading LoRA adapter: {adapter}")
|
| 129 |
+
adapter_cfg_path = Path(adapter) / "adapter_config.json"
|
| 130 |
+
with open(adapter_cfg_path) as f:
|
| 131 |
+
adapter_cfg = _json.loads(f.read())
|
| 132 |
+
target_modules = adapter_cfg.get("target_modules", [])
|
| 133 |
+
needs_remap = (
|
| 134 |
+
any(t.startswith("model.layers.") for t in target_modules)
|
| 135 |
+
and model_type != "qwen3_omni_moe_thinker"
|
| 136 |
+
)
|
| 137 |
+
|
| 138 |
+
if needs_remap:
|
| 139 |
+
print("[load] Adapter trained on thinker-only; remapping keys...")
|
| 140 |
+
tmp_dir = Path(tempfile.mkdtemp(prefix="adapter_remap_"))
|
| 141 |
+
for fn in Path(adapter).iterdir():
|
| 142 |
+
if fn.is_dir():
|
| 143 |
+
continue
|
| 144 |
+
if fn.name == "adapter_config.json":
|
| 145 |
+
new_targets = []
|
| 146 |
+
for t in target_modules:
|
| 147 |
+
if t.startswith("model.layers."):
|
| 148 |
+
new_targets.append("thinker." + t)
|
| 149 |
+
elif t[0].isdigit():
|
| 150 |
+
new_targets.append("thinker.model.layers." + t)
|
| 151 |
+
else:
|
| 152 |
+
new_targets.append(t)
|
| 153 |
+
adapter_cfg["target_modules"] = new_targets
|
| 154 |
+
with open(tmp_dir / "adapter_config.json", "w") as f:
|
| 155 |
+
json.dump(adapter_cfg, f, indent=2)
|
| 156 |
+
elif fn.suffix == ".safetensors" and "adapter" in fn.name:
|
| 157 |
+
tensors = load_file(str(fn))
|
| 158 |
+
remapped = {}
|
| 159 |
+
for k, v in tensors.items():
|
| 160 |
+
if ".model.layers." in k and ".thinker." not in k:
|
| 161 |
+
new_k = k.replace(
|
| 162 |
+
"base_model.model.model.layers.",
|
| 163 |
+
"base_model.model.thinker.model.layers.",
|
| 164 |
+
)
|
| 165 |
+
remapped[new_k] = v
|
| 166 |
+
else:
|
| 167 |
+
remapped[k] = v
|
| 168 |
+
save_file(remapped, str(tmp_dir / fn.name))
|
| 169 |
+
else:
|
| 170 |
+
shutil.copy2(str(fn), str(tmp_dir / fn.name))
|
| 171 |
+
adapter = str(tmp_dir)
|
| 172 |
+
|
| 173 |
+
model = PeftModel.from_pretrained(model, adapter)
|
| 174 |
+
model = model.merge_and_unload()
|
| 175 |
+
|
| 176 |
+
if hasattr(model, "disable_talker"):
|
| 177 |
+
model.disable_talker()
|
| 178 |
+
model.eval()
|
| 179 |
+
print(f"[load] Model ready ({type(model).__name__}). Device: {model.device}")
|
| 180 |
+
return model, processor
|
| 181 |
+
|
| 182 |
+
|
| 183 |
+
# ---------------------------------------------------------------------------
|
| 184 |
+
# Inference
|
| 185 |
+
# ---------------------------------------------------------------------------
|
| 186 |
+
def run_inference(model, processor, video_path: str, prompt: str,
|
| 187 |
+
max_new_tokens: int, temperature: float) -> str:
|
| 188 |
+
from qwen_omni_utils import process_mm_info
|
| 189 |
+
|
| 190 |
+
tmp_dir = tempfile.mkdtemp(prefix="eval_vmme_")
|
| 191 |
+
masked_video = os.path.join(tmp_dir, "clip.mp4")
|
| 192 |
+
os.symlink(os.path.abspath(video_path), masked_video)
|
| 193 |
+
|
| 194 |
+
conversation = [
|
| 195 |
+
{
|
| 196 |
+
"role": "user",
|
| 197 |
+
"content": [
|
| 198 |
+
{"type": "video", "video": masked_video},
|
| 199 |
+
{"type": "text", "text": prompt},
|
| 200 |
+
],
|
| 201 |
+
}
|
| 202 |
+
]
|
| 203 |
+
|
| 204 |
+
text = processor.apply_chat_template(
|
| 205 |
+
conversation, add_generation_prompt=True, tokenize=False,
|
| 206 |
+
)
|
| 207 |
+
audios, images, videos = process_mm_info(conversation, use_audio_in_video=False)
|
| 208 |
+
inputs = processor(
|
| 209 |
+
text=text, audio=audios, images=images, videos=videos,
|
| 210 |
+
return_tensors="pt", padding=True, use_audio_in_video=False,
|
| 211 |
+
)
|
| 212 |
+
|
| 213 |
+
model_dtype = next(model.parameters()).dtype
|
| 214 |
+
converted = {}
|
| 215 |
+
for k, v in inputs.items():
|
| 216 |
+
if hasattr(v, "to"):
|
| 217 |
+
v = v.to(model.device)
|
| 218 |
+
if torch.is_floating_point(v):
|
| 219 |
+
v = v.to(model_dtype)
|
| 220 |
+
converted[k] = v
|
| 221 |
+
inputs = converted
|
| 222 |
+
|
| 223 |
+
from transformers import Qwen3OmniMoeThinkerForConditionalGeneration
|
| 224 |
+
is_thinker = isinstance(model, Qwen3OmniMoeThinkerForConditionalGeneration)
|
| 225 |
+
if is_thinker:
|
| 226 |
+
gen_kwargs = {"max_new_tokens": max_new_tokens, "do_sample": temperature > 0}
|
| 227 |
+
else:
|
| 228 |
+
gen_kwargs = {
|
| 229 |
+
"thinker_max_new_tokens": max_new_tokens,
|
| 230 |
+
"use_audio_in_video": False,
|
| 231 |
+
"return_audio": False,
|
| 232 |
+
"do_sample": temperature > 0,
|
| 233 |
+
}
|
| 234 |
+
if temperature > 0:
|
| 235 |
+
gen_kwargs["temperature"] = temperature
|
| 236 |
+
gen_kwargs["top_p"] = 0.9
|
| 237 |
+
|
| 238 |
+
with torch.inference_mode():
|
| 239 |
+
output_ids = model.generate(**inputs, **gen_kwargs)
|
| 240 |
+
|
| 241 |
+
if isinstance(output_ids, tuple):
|
| 242 |
+
output_ids = output_ids[0]
|
| 243 |
+
|
| 244 |
+
prompt_len = inputs["input_ids"].shape[1]
|
| 245 |
+
response = processor.batch_decode(
|
| 246 |
+
output_ids[:, prompt_len:], skip_special_tokens=True,
|
| 247 |
+
)[0].strip()
|
| 248 |
+
|
| 249 |
+
shutil.rmtree(tmp_dir, ignore_errors=True)
|
| 250 |
+
return response
|
| 251 |
+
|
| 252 |
+
|
| 253 |
+
def preprocess_video_for_vllm(video_path: str):
|
| 254 |
+
"""Extract video frames using qwen_omni_utils, return numpy array for vLLM.
|
| 255 |
+
Caps at 128 frames to stay within vLLM encoder cache (62720 tokens).
|
| 256 |
+
"""
|
| 257 |
+
from qwen_omni_utils import process_mm_info
|
| 258 |
+
import numpy as np
|
| 259 |
+
|
| 260 |
+
messages = [{
|
| 261 |
+
"role": "user",
|
| 262 |
+
"content": [
|
| 263 |
+
{"type": "video", "video": video_path, "nframes": 128},
|
| 264 |
+
{"type": "text", "text": "placeholder"},
|
| 265 |
+
],
|
| 266 |
+
}]
|
| 267 |
+
audios, images, videos = process_mm_info(messages, use_audio_in_video=False)
|
| 268 |
+
video_tensor = videos[0]
|
| 269 |
+
video_np = (video_tensor * 255).byte().numpy()
|
| 270 |
+
return video_np
|
| 271 |
+
|
| 272 |
+
|
| 273 |
+
SYSTEM_PROMPT = (
|
| 274 |
+
"You are Qwen, a virtual human developed by the Qwen Team, Alibaba "
|
| 275 |
+
"Group, capable of perceiving auditory and visual inputs, as well as "
|
| 276 |
+
"generating text and speech."
|
| 277 |
+
)
|
| 278 |
+
|
| 279 |
+
|
| 280 |
+
def build_vllm_prompt(question: str) -> str:
|
| 281 |
+
return (
|
| 282 |
+
f"<|im_start|>system\n{SYSTEM_PROMPT}<|im_end|>\n"
|
| 283 |
+
f"<|im_start|>user\n"
|
| 284 |
+
f"<|vision_start|><|video_pad|><|vision_end|>"
|
| 285 |
+
f"{question}<|im_end|>\n"
|
| 286 |
+
f"<|im_start|>assistant\n"
|
| 287 |
+
)
|
| 288 |
+
|
| 289 |
+
|
| 290 |
+
def extract_answer(text: str) -> str:
|
| 291 |
+
"""Extract a single letter A/B/C/D from model output."""
|
| 292 |
+
text = text.strip()
|
| 293 |
+
prefixes = [
|
| 294 |
+
"The best answer is", "The correct answer is",
|
| 295 |
+
"The answer is", "The answer", "Best answer:", "Best option:",
|
| 296 |
+
]
|
| 297 |
+
for prefix in prefixes:
|
| 298 |
+
text = text.replace(prefix, "")
|
| 299 |
+
|
| 300 |
+
if len(text.split()) > 10 and not re.search(r"[ABCD]", text):
|
| 301 |
+
return ""
|
| 302 |
+
m = re.search(r"[ABCD]", text)
|
| 303 |
+
return m[0] if m else ""
|
| 304 |
+
|
| 305 |
+
|
| 306 |
+
# ---------------------------------------------------------------------------
|
| 307 |
+
# Dataset loading
|
| 308 |
+
# ---------------------------------------------------------------------------
|
| 309 |
+
def load_videomme(video_dir: Path, max_samples: int) -> List[Dict[str, Any]]:
|
| 310 |
+
from datasets import load_dataset
|
| 311 |
+
ds = load_dataset("lmms-lab/Video-MME", split="test")
|
| 312 |
+
data = []
|
| 313 |
+
skipped = 0
|
| 314 |
+
for row in ds:
|
| 315 |
+
vid = row["videoID"]
|
| 316 |
+
video_path = video_dir / f"{vid}.mp4"
|
| 317 |
+
if not video_path.exists():
|
| 318 |
+
for ext in [".MP4", ".mkv"]:
|
| 319 |
+
alt = video_dir / f"{vid}{ext}"
|
| 320 |
+
if alt.exists():
|
| 321 |
+
video_path = alt
|
| 322 |
+
break
|
| 323 |
+
if not video_path.exists():
|
| 324 |
+
skipped += 1
|
| 325 |
+
continue
|
| 326 |
+
|
| 327 |
+
options_text = "\n".join(row["options"])
|
| 328 |
+
prompt = MCQ_PROMPT + row["question"] + "\n" + options_text + "\nThe best answer is:"
|
| 329 |
+
|
| 330 |
+
data.append({
|
| 331 |
+
"question_id": row["question_id"],
|
| 332 |
+
"video_id": vid,
|
| 333 |
+
"video_path": str(video_path),
|
| 334 |
+
"duration": row["duration"],
|
| 335 |
+
"domain": row["domain"],
|
| 336 |
+
"sub_category": row["sub_category"],
|
| 337 |
+
"task_type": row["task_type"],
|
| 338 |
+
"question": row["question"],
|
| 339 |
+
"options": row["options"],
|
| 340 |
+
"gt_answer": row["answer"],
|
| 341 |
+
"prompt": prompt,
|
| 342 |
+
})
|
| 343 |
+
if skipped:
|
| 344 |
+
print(f"[data] Skipped {skipped} questions (video not found)")
|
| 345 |
+
if max_samples > 0:
|
| 346 |
+
data = data[:max_samples]
|
| 347 |
+
return data
|
| 348 |
+
|
| 349 |
+
|
| 350 |
+
# ---------------------------------------------------------------------------
|
| 351 |
+
# Metrics
|
| 352 |
+
# ---------------------------------------------------------------------------
|
| 353 |
+
def compute_metrics(results: List[Dict[str, Any]]) -> Dict[str, Any]:
|
| 354 |
+
total = len(results)
|
| 355 |
+
if total == 0:
|
| 356 |
+
return {}
|
| 357 |
+
|
| 358 |
+
correct = sum(1 for r in results if r["pred_answer"].upper() == r["gt_answer"].upper())
|
| 359 |
+
overall_acc = correct / total
|
| 360 |
+
|
| 361 |
+
def acc_for(items):
|
| 362 |
+
if not items:
|
| 363 |
+
return None
|
| 364 |
+
c = sum(1 for r in items if r["pred_answer"].upper() == r["gt_answer"].upper())
|
| 365 |
+
return round(c / len(items), 4)
|
| 366 |
+
|
| 367 |
+
per_duration = {}
|
| 368 |
+
for vt in VIDEO_TYPES:
|
| 369 |
+
subset = [r for r in results if r["duration"] == vt]
|
| 370 |
+
if subset:
|
| 371 |
+
per_duration[vt] = {"accuracy": acc_for(subset), "count": len(subset)}
|
| 372 |
+
|
| 373 |
+
per_domain = {}
|
| 374 |
+
for cat in CATEGORIES:
|
| 375 |
+
subset = [r for r in results if r["domain"] == cat]
|
| 376 |
+
if subset:
|
| 377 |
+
per_domain[cat] = {"accuracy": acc_for(subset), "count": len(subset)}
|
| 378 |
+
|
| 379 |
+
per_task = {}
|
| 380 |
+
for task in TASK_CATEGORIES:
|
| 381 |
+
subset = [r for r in results if r["task_type"] == task]
|
| 382 |
+
if subset:
|
| 383 |
+
per_task[task] = {"accuracy": acc_for(subset), "count": len(subset)}
|
| 384 |
+
|
| 385 |
+
return {
|
| 386 |
+
"total_samples": total,
|
| 387 |
+
"overall_accuracy": round(overall_acc, 4),
|
| 388 |
+
"per_duration": per_duration,
|
| 389 |
+
"per_domain": per_domain,
|
| 390 |
+
"per_task_type": per_task,
|
| 391 |
+
}
|
| 392 |
+
|
| 393 |
+
|
| 394 |
+
def print_summary(metrics: Dict[str, Any], label: str) -> None:
|
| 395 |
+
print()
|
| 396 |
+
print(f"{'=' * 65}")
|
| 397 |
+
print(f" Video-MME Summary: {label}")
|
| 398 |
+
print(f"{'=' * 65}")
|
| 399 |
+
print(f" Total samples: {metrics['total_samples']}")
|
| 400 |
+
print(f" Overall Accuracy: {metrics['overall_accuracy']:.1%}")
|
| 401 |
+
|
| 402 |
+
print(f" ─── Per Duration ───")
|
| 403 |
+
for vt in VIDEO_TYPES:
|
| 404 |
+
if vt in metrics["per_duration"]:
|
| 405 |
+
d = metrics["per_duration"][vt]
|
| 406 |
+
print(f" {vt:8s}: {d['accuracy']:.1%} ({d['count']} questions)")
|
| 407 |
+
|
| 408 |
+
print(f" ─── Per Domain ───")
|
| 409 |
+
for cat in CATEGORIES:
|
| 410 |
+
if cat in metrics["per_domain"]:
|
| 411 |
+
d = metrics["per_domain"][cat]
|
| 412 |
+
print(f" {cat:25s}: {d['accuracy']:.1%} ({d['count']})")
|
| 413 |
+
|
| 414 |
+
print(f" ─── Per Task Type ───")
|
| 415 |
+
for task in TASK_CATEGORIES:
|
| 416 |
+
if task in metrics["per_task_type"]:
|
| 417 |
+
d = metrics["per_task_type"][task]
|
| 418 |
+
print(f" {task:25s}: {d['accuracy']:.1%} ({d['count']})")
|
| 419 |
+
|
| 420 |
+
print(f"{'=' * 65}")
|
| 421 |
+
|
| 422 |
+
|
| 423 |
+
# ---------------------------------------------------------------------------
|
| 424 |
+
# Main
|
| 425 |
+
# ---------------------------------------------------------------------------
|
| 426 |
+
def main() -> None:
|
| 427 |
+
args = parse_args()
|
| 428 |
+
label = args.label or (
|
| 429 |
+
Path(args.adapter).name if args.adapter
|
| 430 |
+
else Path(args.base_model).name
|
| 431 |
+
)
|
| 432 |
+
|
| 433 |
+
out_dir = args.output_dir / label
|
| 434 |
+
out_dir.mkdir(parents=True, exist_ok=True)
|
| 435 |
+
results_jsonl = out_dir / "eval_results.jsonl"
|
| 436 |
+
metrics_json = out_dir / "metrics.json"
|
| 437 |
+
summary_txt = out_dir / "summary.txt"
|
| 438 |
+
|
| 439 |
+
print("[data] Loading Video-MME dataset...")
|
| 440 |
+
test_data = load_videomme(args.video_dir, args.max_samples)
|
| 441 |
+
print(f"[data] {len(test_data)} questions ready for evaluation")
|
| 442 |
+
|
| 443 |
+
processed = set()
|
| 444 |
+
if results_jsonl.exists():
|
| 445 |
+
with open(results_jsonl) as f:
|
| 446 |
+
for line in f:
|
| 447 |
+
obj = json.loads(line)
|
| 448 |
+
processed.add(obj["question_id"])
|
| 449 |
+
print(f"[resume] {len(processed)} already processed, skipping")
|
| 450 |
+
|
| 451 |
+
use_vllm = args.vllm
|
| 452 |
+
model = processor = llm = None
|
| 453 |
+
vllm_preprocess_stats: Dict[str, int] | None = None
|
| 454 |
+
|
| 455 |
+
if use_vllm:
|
| 456 |
+
from vllm import LLM, SamplingParams
|
| 457 |
+
tp = args.tp or torch.cuda.device_count()
|
| 458 |
+
model_path = args.base_model
|
| 459 |
+
print(f"[vllm] Loading {model_path} with tp={tp} ...")
|
| 460 |
+
llm = LLM(
|
| 461 |
+
model=model_path,
|
| 462 |
+
tensor_parallel_size=tp,
|
| 463 |
+
max_model_len=args.max_model_len,
|
| 464 |
+
max_num_seqs=4,
|
| 465 |
+
limit_mm_per_prompt={"video": 1},
|
| 466 |
+
gpu_memory_utilization=args.gpu_memory_utilization,
|
| 467 |
+
dtype="bfloat16",
|
| 468 |
+
trust_remote_code=True,
|
| 469 |
+
)
|
| 470 |
+
sampling_params = SamplingParams(
|
| 471 |
+
temperature=args.temperature if args.temperature > 0 else 0.0,
|
| 472 |
+
top_p=0.9 if args.temperature > 0 else 1.0,
|
| 473 |
+
max_tokens=args.max_new_tokens,
|
| 474 |
+
)
|
| 475 |
+
|
| 476 |
+
print("[vllm] Preprocessing videos ...")
|
| 477 |
+
todo = [item for item in test_data if item["question_id"] not in processed]
|
| 478 |
+
preprocessed = {}
|
| 479 |
+
preprocess_failed_paths: set[str] = set()
|
| 480 |
+
|
| 481 |
+
for i, item in enumerate(todo):
|
| 482 |
+
vp = item["video_path"]
|
| 483 |
+
if vp in preprocessed or vp in preprocess_failed_paths:
|
| 484 |
+
continue
|
| 485 |
+
try:
|
| 486 |
+
preprocessed[vp] = preprocess_video_for_vllm(vp)
|
| 487 |
+
except Exception as e:
|
| 488 |
+
preprocess_failed_paths.add(vp)
|
| 489 |
+
print(f" [skip] preprocess error: {Path(vp).name}: {e}")
|
| 490 |
+
if (i + 1) % 100 == 0:
|
| 491 |
+
print(f" Preprocessed {i+1}/{len(todo)} ...")
|
| 492 |
+
|
| 493 |
+
n_pp_skip = sum(1 for item in todo if item["video_path"] in preprocess_failed_paths)
|
| 494 |
+
if preprocess_failed_paths:
|
| 495 |
+
print(
|
| 496 |
+
f"[vllm] Preprocess failed for {len(preprocess_failed_paths)} video(s), "
|
| 497 |
+
f"{n_pp_skip} question(s) will not use vLLM (run continues)."
|
| 498 |
+
)
|
| 499 |
+
vllm_preprocess_stats = {
|
| 500 |
+
"preprocess_failed_videos": len(preprocess_failed_paths),
|
| 501 |
+
"preprocess_skipped_questions": n_pp_skip,
|
| 502 |
+
}
|
| 503 |
+
|
| 504 |
+
vllm_todo = [item for item in todo if item["video_path"] in preprocessed]
|
| 505 |
+
fallback_items = []
|
| 506 |
+
print(f"[vllm] {len(vllm_todo)} questions ready, running inference ...")
|
| 507 |
+
|
| 508 |
+
for i, item in enumerate(vllm_todo):
|
| 509 |
+
if item["question_id"] in processed:
|
| 510 |
+
continue
|
| 511 |
+
inp = {
|
| 512 |
+
"prompt": build_vllm_prompt(item["prompt"]),
|
| 513 |
+
"multi_modal_data": {"video": preprocessed[item["video_path"]]},
|
| 514 |
+
}
|
| 515 |
+
try:
|
| 516 |
+
outputs = llm.generate([inp], sampling_params=sampling_params)
|
| 517 |
+
raw_output = outputs[0].outputs[0].text.strip()
|
| 518 |
+
pred = extract_answer(raw_output)
|
| 519 |
+
result = {
|
| 520 |
+
"question_id": item["question_id"],
|
| 521 |
+
"video_id": item["video_id"],
|
| 522 |
+
"duration": item["duration"],
|
| 523 |
+
"domain": item["domain"],
|
| 524 |
+
"sub_category": item["sub_category"],
|
| 525 |
+
"task_type": item["task_type"],
|
| 526 |
+
"gt_answer": item["gt_answer"],
|
| 527 |
+
"pred_answer": pred,
|
| 528 |
+
"correct": pred.upper() == item["gt_answer"].upper(),
|
| 529 |
+
"raw_output": raw_output,
|
| 530 |
+
}
|
| 531 |
+
with open(results_jsonl, "a", encoding="utf-8") as f:
|
| 532 |
+
f.write(json.dumps(result, ensure_ascii=False) + "\n")
|
| 533 |
+
processed.add(item["question_id"])
|
| 534 |
+
except (ValueError, RuntimeError) as exc:
|
| 535 |
+
if "longer than the maximum model length" in str(exc):
|
| 536 |
+
print(f" [too long] {item['question_id']} -> fallback")
|
| 537 |
+
fallback_items.append(item)
|
| 538 |
+
else:
|
| 539 |
+
raise
|
| 540 |
+
|
| 541 |
+
if (i + 1) % 100 == 0:
|
| 542 |
+
print(f" [vllm] [{i+1}/{len(vllm_todo)}] done, {len(fallback_items)} deferred")
|
| 543 |
+
|
| 544 |
+
preprocessed.clear()
|
| 545 |
+
|
| 546 |
+
vllm_results = []
|
| 547 |
+
if results_jsonl.exists():
|
| 548 |
+
with open(results_jsonl) as f:
|
| 549 |
+
for line in f:
|
| 550 |
+
vllm_results.append(json.loads(line))
|
| 551 |
+
if vllm_results:
|
| 552 |
+
vllm_metrics = compute_metrics(vllm_results)
|
| 553 |
+
vllm_metrics_path = out_dir / "metrics_vllm.json"
|
| 554 |
+
with open(vllm_metrics_path, "w", encoding="utf-8") as f:
|
| 555 |
+
json.dump(vllm_metrics, f, indent=2, ensure_ascii=False)
|
| 556 |
+
print(f"[vllm] Intermediate metrics saved to {vllm_metrics_path}")
|
| 557 |
+
print_summary(vllm_metrics, label + " (vllm only)")
|
| 558 |
+
|
| 559 |
+
if fallback_items:
|
| 560 |
+
print(f"[fallback] Running {len(fallback_items)} long-video questions with transformers ...")
|
| 561 |
+
del llm
|
| 562 |
+
gc.collect()
|
| 563 |
+
torch.cuda.empty_cache()
|
| 564 |
+
|
| 565 |
+
model, processor = load_model(args.base_model, args.adapter)
|
| 566 |
+
for item in tqdm(fallback_items, desc="Fallback", unit="q"):
|
| 567 |
+
if item["question_id"] in processed:
|
| 568 |
+
continue
|
| 569 |
+
try:
|
| 570 |
+
raw_output = run_inference(
|
| 571 |
+
model, processor, item["video_path"], item["prompt"],
|
| 572 |
+
args.max_new_tokens, args.temperature,
|
| 573 |
+
)
|
| 574 |
+
except Exception as exc:
|
| 575 |
+
import traceback
|
| 576 |
+
print(f" [error] {item['question_id']}: {exc}")
|
| 577 |
+
traceback.print_exc()
|
| 578 |
+
raw_output = ""
|
| 579 |
+
|
| 580 |
+
pred = extract_answer(raw_output)
|
| 581 |
+
result = {
|
| 582 |
+
"question_id": item["question_id"],
|
| 583 |
+
"video_id": item["video_id"],
|
| 584 |
+
"duration": item["duration"],
|
| 585 |
+
"domain": item["domain"],
|
| 586 |
+
"sub_category": item["sub_category"],
|
| 587 |
+
"task_type": item["task_type"],
|
| 588 |
+
"gt_answer": item["gt_answer"],
|
| 589 |
+
"pred_answer": pred,
|
| 590 |
+
"correct": pred.upper() == item["gt_answer"].upper(),
|
| 591 |
+
"raw_output": raw_output,
|
| 592 |
+
}
|
| 593 |
+
with open(results_jsonl, "a", encoding="utf-8") as f:
|
| 594 |
+
f.write(json.dumps(result, ensure_ascii=False) + "\n")
|
| 595 |
+
processed.add(item["question_id"])
|
| 596 |
+
gc.collect()
|
| 597 |
+
torch.cuda.empty_cache()
|
| 598 |
+
|
| 599 |
+
else:
|
| 600 |
+
print("[model] Loading model...")
|
| 601 |
+
model, processor = load_model(args.base_model, args.adapter)
|
| 602 |
+
|
| 603 |
+
for item in tqdm(test_data, desc="Video-MME", unit="q"):
|
| 604 |
+
if item["question_id"] in processed:
|
| 605 |
+
continue
|
| 606 |
+
|
| 607 |
+
try:
|
| 608 |
+
raw_output = run_inference(
|
| 609 |
+
model, processor, item["video_path"], item["prompt"],
|
| 610 |
+
args.max_new_tokens, args.temperature,
|
| 611 |
+
)
|
| 612 |
+
except Exception as exc:
|
| 613 |
+
import traceback
|
| 614 |
+
print(f" [error] {item['question_id']}: {exc}")
|
| 615 |
+
traceback.print_exc()
|
| 616 |
+
raw_output = ""
|
| 617 |
+
|
| 618 |
+
pred = extract_answer(raw_output)
|
| 619 |
+
|
| 620 |
+
result = {
|
| 621 |
+
"question_id": item["question_id"],
|
| 622 |
+
"video_id": item["video_id"],
|
| 623 |
+
"duration": item["duration"],
|
| 624 |
+
"domain": item["domain"],
|
| 625 |
+
"sub_category": item["sub_category"],
|
| 626 |
+
"task_type": item["task_type"],
|
| 627 |
+
"gt_answer": item["gt_answer"],
|
| 628 |
+
"pred_answer": pred,
|
| 629 |
+
"correct": pred.upper() == item["gt_answer"].upper(),
|
| 630 |
+
"raw_output": raw_output,
|
| 631 |
+
}
|
| 632 |
+
|
| 633 |
+
with open(results_jsonl, "a", encoding="utf-8") as f:
|
| 634 |
+
f.write(json.dumps(result, ensure_ascii=False) + "\n")
|
| 635 |
+
|
| 636 |
+
processed.add(item["question_id"])
|
| 637 |
+
gc.collect()
|
| 638 |
+
torch.cuda.empty_cache()
|
| 639 |
+
|
| 640 |
+
all_results = []
|
| 641 |
+
if results_jsonl.exists():
|
| 642 |
+
with open(results_jsonl) as f:
|
| 643 |
+
for line in f:
|
| 644 |
+
all_results.append(json.loads(line))
|
| 645 |
+
|
| 646 |
+
if not all_results:
|
| 647 |
+
print("[warn] No results to compute metrics from.")
|
| 648 |
+
return
|
| 649 |
+
|
| 650 |
+
metrics = compute_metrics(all_results)
|
| 651 |
+
metrics["eval_config"] = {
|
| 652 |
+
"base_model": args.base_model,
|
| 653 |
+
"adapter": args.adapter,
|
| 654 |
+
"video_dir": str(args.video_dir),
|
| 655 |
+
"max_new_tokens": args.max_new_tokens,
|
| 656 |
+
"temperature": args.temperature,
|
| 657 |
+
}
|
| 658 |
+
if vllm_preprocess_stats is not None:
|
| 659 |
+
metrics["eval_config"]["vllm_preprocess_skips"] = vllm_preprocess_stats
|
| 660 |
+
|
| 661 |
+
with open(metrics_json, "w", encoding="utf-8") as f:
|
| 662 |
+
json.dump(metrics, f, indent=2, ensure_ascii=False)
|
| 663 |
+
|
| 664 |
+
print_summary(metrics, label)
|
| 665 |
+
|
| 666 |
+
with open(summary_txt, "w", encoding="utf-8") as f:
|
| 667 |
+
import io, contextlib
|
| 668 |
+
buf = io.StringIO()
|
| 669 |
+
with contextlib.redirect_stdout(buf):
|
| 670 |
+
print_summary(metrics, label)
|
| 671 |
+
f.write(buf.getvalue())
|
| 672 |
+
|
| 673 |
+
print(f"\n[output] Results: {results_jsonl}")
|
| 674 |
+
print(f"[output] Metrics: {metrics_json}")
|
| 675 |
+
print(f"[output] Summary: {summary_txt}")
|
| 676 |
+
|
| 677 |
+
|
| 678 |
+
if __name__ == "__main__":
|
| 679 |
+
main()
|
finevideo/download_finevideo_subset.py
ADDED
|
@@ -0,0 +1,254 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env python3
|
| 2 |
+
"""
|
| 3 |
+
Download only the filtered FineVideo subset (videos) from HuggingFace.
|
| 4 |
+
|
| 5 |
+
Reads the candidate JSONL from filter_finevideo.py, then streams FineVideo
|
| 6 |
+
and saves only matching videos to disk.
|
| 7 |
+
|
| 8 |
+
**Resume:** Re-run the **same command** after Ctrl+C: already-downloaded, non-empty
|
| 9 |
+
``finevideo_*.mp4`` files are skipped; only missing targets are streamed again.
|
| 10 |
+
Writes are atomic (``*.mp4.part`` then rename) so interrupted downloads are not mistaken
|
| 11 |
+
for complete files.
|
| 12 |
+
|
| 13 |
+
With --max-videos N and more than N candidates in the file, subsampling uses the same
|
| 14 |
+
stratified_sample as filter_finevideo.py (by content_parent_category, seed-controlled).
|
| 15 |
+
|
| 16 |
+
Usage:
|
| 17 |
+
python download_finevideo_subset.py
|
| 18 |
+
python download_finevideo_subset.py --candidates /opt/dlami/nvme/finevideo/filtered_candidates.jsonl
|
| 19 |
+
python download_finevideo_subset.py --max-videos 5000
|
| 20 |
+
python download_finevideo_subset.py --max-videos 500 --no-stratify # first N in file order
|
| 21 |
+
"""
|
| 22 |
+
|
| 23 |
+
from __future__ import annotations
|
| 24 |
+
|
| 25 |
+
import argparse
|
| 26 |
+
import json
|
| 27 |
+
import os
|
| 28 |
+
import random
|
| 29 |
+
from pathlib import Path
|
| 30 |
+
|
| 31 |
+
from tqdm import tqdm
|
| 32 |
+
|
| 33 |
+
# Same order-of-magnitude as filter_finevideo (full train split)
|
| 34 |
+
FINEVIDEO_ROW_ESTIMATE = 43_751
|
| 35 |
+
|
| 36 |
+
|
| 37 |
+
def _stratified_sample(candidates: list[dict], target: int, seed: int) -> list[dict]:
|
| 38 |
+
"""Same logic as filter_finevideo.stratified_sample (by content_parent_category)."""
|
| 39 |
+
if target <= 0 or len(candidates) <= target:
|
| 40 |
+
return candidates
|
| 41 |
+
|
| 42 |
+
rng = random.Random(seed)
|
| 43 |
+
by_cat: dict[str, list[dict]] = {}
|
| 44 |
+
for c in candidates:
|
| 45 |
+
cat = c.get("content_parent_category", "Other")
|
| 46 |
+
by_cat.setdefault(cat, []).append(c)
|
| 47 |
+
|
| 48 |
+
per_cat = max(1, target // len(by_cat))
|
| 49 |
+
selected: list[dict] = []
|
| 50 |
+
remainder: list[dict] = []
|
| 51 |
+
for cat, items in by_cat.items():
|
| 52 |
+
rng.shuffle(items)
|
| 53 |
+
selected.extend(items[:per_cat])
|
| 54 |
+
remainder.extend(items[per_cat:])
|
| 55 |
+
|
| 56 |
+
if len(selected) < target:
|
| 57 |
+
rng.shuffle(remainder)
|
| 58 |
+
selected.extend(remainder[: target - len(selected)])
|
| 59 |
+
|
| 60 |
+
return selected[:target]
|
| 61 |
+
|
| 62 |
+
DEFAULT_CANDIDATES = Path("/opt/dlami/nvme/finevideo/filtered_candidates.jsonl")
|
| 63 |
+
DEFAULT_VIDEO_DIR = Path("/opt/dlami/nvme/finevideo/videos")
|
| 64 |
+
|
| 65 |
+
|
| 66 |
+
def _video_path(video_dir: Path, idx: int) -> Path:
|
| 67 |
+
return video_dir / f"finevideo_{idx:06d}.mp4"
|
| 68 |
+
|
| 69 |
+
|
| 70 |
+
def _part_path(video_path: Path) -> Path:
|
| 71 |
+
return video_path.parent / f"{video_path.stem}.mp4.part"
|
| 72 |
+
|
| 73 |
+
|
| 74 |
+
def _is_complete_mp4(path: Path, min_bytes: int) -> bool:
|
| 75 |
+
try:
|
| 76 |
+
return path.is_file() and path.stat().st_size >= min_bytes
|
| 77 |
+
except OSError:
|
| 78 |
+
return False
|
| 79 |
+
|
| 80 |
+
|
| 81 |
+
def _stems_complete_on_disk(video_dir: Path, min_bytes: int) -> set[str]:
|
| 82 |
+
"""Stems finevideo_XXXXXX with a non-empty (>= min_bytes) mp4."""
|
| 83 |
+
stems: set[str] = set()
|
| 84 |
+
for p in video_dir.glob("finevideo_*.mp4"):
|
| 85 |
+
if _is_complete_mp4(p, min_bytes):
|
| 86 |
+
stems.add(p.stem)
|
| 87 |
+
return stems
|
| 88 |
+
|
| 89 |
+
|
| 90 |
+
def parse_args() -> argparse.Namespace:
|
| 91 |
+
p = argparse.ArgumentParser(description="Download filtered FineVideo videos.")
|
| 92 |
+
p.add_argument("--candidates", type=Path, default=DEFAULT_CANDIDATES)
|
| 93 |
+
p.add_argument("--video-dir", type=Path, default=DEFAULT_VIDEO_DIR)
|
| 94 |
+
p.add_argument("--max-videos", type=int, default=0,
|
| 95 |
+
help="Cap downloads. If the file has more candidates, subsample to N: by default "
|
| 96 |
+
"**stratified by content_parent_category** (same as filter_finevideo). "
|
| 97 |
+
"0 = use every line in the file.")
|
| 98 |
+
p.add_argument("--seed", type=int, default=42, help="RNG seed for stratified --max-videos.")
|
| 99 |
+
p.add_argument(
|
| 100 |
+
"--no-stratify",
|
| 101 |
+
action="store_true",
|
| 102 |
+
help="With --max-videos, take the first N unique candidates in JSONL order instead of stratified sampling.",
|
| 103 |
+
)
|
| 104 |
+
p.add_argument("--no-tqdm", action="store_true", help="Disable progress bar while streaming the dataset.")
|
| 105 |
+
p.add_argument(
|
| 106 |
+
"--min-mp4-bytes",
|
| 107 |
+
type=int,
|
| 108 |
+
default=1024,
|
| 109 |
+
help="Treat existing mp4 as downloaded only if size >= this (avoids empty/corrupt stubs).",
|
| 110 |
+
)
|
| 111 |
+
p.add_argument(
|
| 112 |
+
"--force-redownload",
|
| 113 |
+
action="store_true",
|
| 114 |
+
help="Ignore existing mp4 files and overwrite (default: skip complete files = resume).",
|
| 115 |
+
)
|
| 116 |
+
return p.parse_args()
|
| 117 |
+
|
| 118 |
+
|
| 119 |
+
def main() -> None:
|
| 120 |
+
args = parse_args()
|
| 121 |
+
args.video_dir.mkdir(parents=True, exist_ok=True)
|
| 122 |
+
|
| 123 |
+
candidates: list[dict] = []
|
| 124 |
+
with open(args.candidates, encoding="utf-8") as f:
|
| 125 |
+
for line in f:
|
| 126 |
+
line = line.strip()
|
| 127 |
+
if line:
|
| 128 |
+
candidates.append(json.loads(line))
|
| 129 |
+
|
| 130 |
+
# Dedupe by finevideo_idx, keep first occurrence (JSONL order).
|
| 131 |
+
seen_idx: set[int] = set()
|
| 132 |
+
uniq: list[dict] = []
|
| 133 |
+
for c in candidates:
|
| 134 |
+
idx = c["finevideo_idx"]
|
| 135 |
+
if idx in seen_idx:
|
| 136 |
+
continue
|
| 137 |
+
seen_idx.add(idx)
|
| 138 |
+
uniq.append(c)
|
| 139 |
+
candidates = uniq
|
| 140 |
+
|
| 141 |
+
n_in = len(candidates)
|
| 142 |
+
if args.max_videos > 0 and n_in > args.max_videos:
|
| 143 |
+
if args.no_stratify:
|
| 144 |
+
candidates = candidates[: args.max_videos]
|
| 145 |
+
print(f"[sample] First {args.max_videos} candidates in file order (of {n_in} unique).")
|
| 146 |
+
else:
|
| 147 |
+
candidates = _stratified_sample(candidates, args.max_videos, args.seed)
|
| 148 |
+
print(
|
| 149 |
+
f"[sample] Stratified subsample {len(candidates)} from {n_in} unique "
|
| 150 |
+
f"(content_parent_category, seed={args.seed})."
|
| 151 |
+
)
|
| 152 |
+
|
| 153 |
+
target_indices = {c["finevideo_idx"] for c in candidates}
|
| 154 |
+
|
| 155 |
+
if args.force_redownload:
|
| 156 |
+
complete_stems: set[str] = set()
|
| 157 |
+
else:
|
| 158 |
+
complete_stems = _stems_complete_on_disk(args.video_dir, args.min_mp4_bytes)
|
| 159 |
+
|
| 160 |
+
needed_indices = {
|
| 161 |
+
idx for idx in target_indices
|
| 162 |
+
if args.force_redownload or f"finevideo_{idx:06d}" not in complete_stems
|
| 163 |
+
}
|
| 164 |
+
n_have = len(target_indices) - len(needed_indices)
|
| 165 |
+
|
| 166 |
+
if not needed_indices:
|
| 167 |
+
print(f"[skip] All {len(target_indices)} target videos already complete in {args.video_dir} "
|
| 168 |
+
f"(>= {args.min_mp4_bytes} bytes each).")
|
| 169 |
+
return
|
| 170 |
+
|
| 171 |
+
print(
|
| 172 |
+
f"[resume] {n_have} of {len(target_indices)} targets already on disk (complete); "
|
| 173 |
+
f"{len(needed_indices)} left to fetch."
|
| 174 |
+
)
|
| 175 |
+
print("[download] Streaming dataset until those indices are filled ...")
|
| 176 |
+
print(
|
| 177 |
+
"[note] Streaming is sequential (row 0,1,2,...); there is no random access to finevideo_idx. "
|
| 178 |
+
f"The tqdm total ~{FINEVIDEO_ROW_ESTIMATE} counts rows scanned through the train split, "
|
| 179 |
+
f"not how many files you download. Only {len(needed_indices)} mp4(s) will be written; "
|
| 180 |
+
"if your indices are near the end you still must stream up to those rows."
|
| 181 |
+
)
|
| 182 |
+
|
| 183 |
+
from datasets import load_dataset
|
| 184 |
+
|
| 185 |
+
ds = load_dataset("HuggingFaceFV/finevideo", split="train", streaming=True)
|
| 186 |
+
|
| 187 |
+
row_iter = ds
|
| 188 |
+
pbar = None
|
| 189 |
+
if not args.no_tqdm:
|
| 190 |
+
pbar = tqdm(
|
| 191 |
+
ds,
|
| 192 |
+
total=FINEVIDEO_ROW_ESTIMATE,
|
| 193 |
+
desc="Scan rows (sequential)",
|
| 194 |
+
unit="row",
|
| 195 |
+
dynamic_ncols=True,
|
| 196 |
+
)
|
| 197 |
+
row_iter = pbar
|
| 198 |
+
|
| 199 |
+
downloaded = 0
|
| 200 |
+
skipped_existing = 0
|
| 201 |
+
n_need_start = len(needed_indices)
|
| 202 |
+
for idx, sample in enumerate(row_iter):
|
| 203 |
+
if pbar is not None and (idx % 4000 == 0 or idx in needed_indices):
|
| 204 |
+
pbar.set_postfix_str(
|
| 205 |
+
f"row={idx} new_files={downloaded}/{n_need_start} todo_idx={len(needed_indices)}"
|
| 206 |
+
)
|
| 207 |
+
|
| 208 |
+
if idx not in needed_indices:
|
| 209 |
+
continue
|
| 210 |
+
|
| 211 |
+
video_path = _video_path(args.video_dir, idx)
|
| 212 |
+
|
| 213 |
+
if not args.force_redownload and _is_complete_mp4(video_path, args.min_mp4_bytes):
|
| 214 |
+
skipped_existing += 1
|
| 215 |
+
needed_indices.discard(idx)
|
| 216 |
+
if not needed_indices:
|
| 217 |
+
break
|
| 218 |
+
continue
|
| 219 |
+
|
| 220 |
+
mp4_data = sample.get("mp4")
|
| 221 |
+
if mp4_data is None:
|
| 222 |
+
print(f" [warn] idx={idx} has no mp4 data, skipping.")
|
| 223 |
+
needed_indices.discard(idx)
|
| 224 |
+
if not needed_indices:
|
| 225 |
+
break
|
| 226 |
+
continue
|
| 227 |
+
|
| 228 |
+
part_path = _part_path(video_path)
|
| 229 |
+
try:
|
| 230 |
+
part_path.unlink(missing_ok=True)
|
| 231 |
+
with open(part_path, "wb") as f:
|
| 232 |
+
f.write(mp4_data)
|
| 233 |
+
os.replace(part_path, video_path)
|
| 234 |
+
except Exception:
|
| 235 |
+
part_path.unlink(missing_ok=True)
|
| 236 |
+
raise
|
| 237 |
+
|
| 238 |
+
downloaded += 1
|
| 239 |
+
needed_indices.discard(idx)
|
| 240 |
+
|
| 241 |
+
if pbar is None and downloaded % 100 == 0:
|
| 242 |
+
print(f" Downloaded {downloaded} / {n_need_start} ...")
|
| 243 |
+
|
| 244 |
+
if not needed_indices:
|
| 245 |
+
break
|
| 246 |
+
|
| 247 |
+
print(f"\n[done] Downloaded {downloaded} new videos to {args.video_dir}")
|
| 248 |
+
print(f" Already on disk: {skipped_existing}")
|
| 249 |
+
total_on_disk = sum(1 for _ in args.video_dir.glob("*.mp4"))
|
| 250 |
+
print(f" Total videos on disk: {total_on_disk}")
|
| 251 |
+
|
| 252 |
+
|
| 253 |
+
if __name__ == "__main__":
|
| 254 |
+
main()
|
finevideo/extract_audio_finevideo.py
ADDED
|
@@ -0,0 +1,94 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env python3
|
| 2 |
+
"""
|
| 3 |
+
Extract audio (.wav) from downloaded FineVideo .mp4 files using ffmpeg.
|
| 4 |
+
|
| 5 |
+
Usage:
|
| 6 |
+
python extract_audio_finevideo.py
|
| 7 |
+
python extract_audio_finevideo.py --video-dir /opt/dlami/nvme/finevideo/videos --audio-dir /opt/dlami/nvme/finevideo/audio
|
| 8 |
+
python extract_audio_finevideo.py --sample-rate 16000
|
| 9 |
+
"""
|
| 10 |
+
|
| 11 |
+
from __future__ import annotations
|
| 12 |
+
|
| 13 |
+
import argparse
|
| 14 |
+
import subprocess
|
| 15 |
+
from pathlib import Path
|
| 16 |
+
from concurrent.futures import ThreadPoolExecutor, as_completed
|
| 17 |
+
|
| 18 |
+
DEFAULT_VIDEO_DIR = Path("/opt/dlami/nvme/finevideo/videos")
|
| 19 |
+
DEFAULT_AUDIO_DIR = Path("/opt/dlami/nvme/finevideo/audio")
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
def parse_args() -> argparse.Namespace:
|
| 23 |
+
p = argparse.ArgumentParser(description="Extract audio from FineVideo mp4s.")
|
| 24 |
+
p.add_argument("--video-dir", type=Path, default=DEFAULT_VIDEO_DIR)
|
| 25 |
+
p.add_argument("--audio-dir", type=Path, default=DEFAULT_AUDIO_DIR)
|
| 26 |
+
p.add_argument("--sample-rate", type=int, default=16000)
|
| 27 |
+
p.add_argument("--workers", type=int, default=8)
|
| 28 |
+
return p.parse_args()
|
| 29 |
+
|
| 30 |
+
|
| 31 |
+
def extract_one(mp4: Path, wav: Path, sr: int) -> tuple[str, bool, str]:
|
| 32 |
+
if wav.exists():
|
| 33 |
+
return (mp4.name, True, "exists")
|
| 34 |
+
try:
|
| 35 |
+
subprocess.run(
|
| 36 |
+
[
|
| 37 |
+
"ffmpeg", "-y", "-i", str(mp4),
|
| 38 |
+
"-vn", "-acodec", "pcm_s16le",
|
| 39 |
+
"-ar", str(sr), "-ac", "1",
|
| 40 |
+
str(wav),
|
| 41 |
+
],
|
| 42 |
+
capture_output=True, timeout=120,
|
| 43 |
+
check=True,
|
| 44 |
+
)
|
| 45 |
+
return (mp4.name, True, "ok")
|
| 46 |
+
except subprocess.CalledProcessError as e:
|
| 47 |
+
return (mp4.name, False, e.stderr.decode(errors="replace")[:200])
|
| 48 |
+
except subprocess.TimeoutExpired:
|
| 49 |
+
return (mp4.name, False, "timeout")
|
| 50 |
+
|
| 51 |
+
|
| 52 |
+
def main() -> None:
|
| 53 |
+
args = parse_args()
|
| 54 |
+
args.audio_dir.mkdir(parents=True, exist_ok=True)
|
| 55 |
+
|
| 56 |
+
mp4s = sorted(args.video_dir.glob("*.mp4"))
|
| 57 |
+
if not mp4s:
|
| 58 |
+
print(f"[warn] No .mp4 files found in {args.video_dir}")
|
| 59 |
+
return
|
| 60 |
+
|
| 61 |
+
existing = sum(1 for mp4 in mp4s if (args.audio_dir / mp4.with_suffix(".wav").name).exists())
|
| 62 |
+
todo = [(mp4, args.audio_dir / mp4.with_suffix(".wav").name) for mp4 in mp4s
|
| 63 |
+
if not (args.audio_dir / mp4.with_suffix(".wav").name).exists()]
|
| 64 |
+
|
| 65 |
+
print(f"[extract] {len(mp4s)} videos, {existing} already extracted, {len(todo)} to process")
|
| 66 |
+
|
| 67 |
+
if not todo:
|
| 68 |
+
print("[skip] All audio already extracted.")
|
| 69 |
+
return
|
| 70 |
+
|
| 71 |
+
ok = 0
|
| 72 |
+
fail = 0
|
| 73 |
+
with ThreadPoolExecutor(max_workers=args.workers) as pool:
|
| 74 |
+
futures = {
|
| 75 |
+
pool.submit(extract_one, mp4, wav, args.sample_rate): mp4.name
|
| 76 |
+
for mp4, wav in todo
|
| 77 |
+
}
|
| 78 |
+
for i, future in enumerate(as_completed(futures), 1):
|
| 79 |
+
name, success, msg = future.result()
|
| 80 |
+
if success:
|
| 81 |
+
ok += 1
|
| 82 |
+
else:
|
| 83 |
+
fail += 1
|
| 84 |
+
print(f" [fail] {name}: {msg}")
|
| 85 |
+
if i % 500 == 0:
|
| 86 |
+
print(f" Processed {i}/{len(todo)} (ok={ok}, fail={fail})")
|
| 87 |
+
|
| 88 |
+
print(f"\n[done] Extracted {ok} new, {fail} failed, {existing} pre-existing")
|
| 89 |
+
total = sum(1 for _ in args.audio_dir.glob("*.wav"))
|
| 90 |
+
print(f" Total .wav on disk: {total}")
|
| 91 |
+
|
| 92 |
+
|
| 93 |
+
if __name__ == "__main__":
|
| 94 |
+
main()
|
finevideo/filter_finevideo.py
ADDED
|
@@ -0,0 +1,431 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env python3
|
| 2 |
+
"""
|
| 3 |
+
Filter FineVideo metadata to select high-quality candidates for temporal grounding training.
|
| 4 |
+
|
| 5 |
+
Streams or locally scans FineVideo parquet metadata (no video download), applies quality filters,
|
| 6 |
+
and outputs a JSONL of selected video IDs + extracted metadata for downstream use.
|
| 7 |
+
|
| 8 |
+
Speed: **default is streaming** (no full-dataset download; one sequential pass over ~44k rows).
|
| 9 |
+
Use `--local-cache` only if you accept downloading/materializing the **entire** train split first
|
| 10 |
+
(that step alone can take hours on a slow link); it can speed later **re-scans** if the cache
|
| 11 |
+
is already on disk. `--num-proc` only applies with `--local-cache`.
|
| 12 |
+
|
| 13 |
+
Checkpoint: by default each passing row is appended to <output_stem>_all_passing.jsonl as soon
|
| 14 |
+
as it passes (flushed). That file holds **every** row that passes filters during the scan — it is
|
| 15 |
+
**not** capped by --target-count until the scan ends (target-count only trims the final output
|
| 16 |
+
after a full pass). Use --early-stop to stop scanning once you have enough passers.
|
| 17 |
+
|
| 18 |
+
Filters:
|
| 19 |
+
- audioVisualCorrelation >= threshold in at least one scene
|
| 20 |
+
- duration_seconds between min and max
|
| 21 |
+
- >= min_scenes scenes with activities
|
| 22 |
+
|
| 23 |
+
Usage:
|
| 24 |
+
python filter_finevideo.py
|
| 25 |
+
python filter_finevideo.py --min-avc 0.6 --max-duration 240 --target-count 8000
|
| 26 |
+
python filter_finevideo.py --output /opt/dlami/nvme/finevideo/filtered_candidates.jsonl
|
| 27 |
+
python filter_finevideo.py --target-count 1000 --early-stop
|
| 28 |
+
python filter_finevideo.py --sample-from /path/filtered_candidates_all_passing.jsonl
|
| 29 |
+
"""
|
| 30 |
+
|
| 31 |
+
from __future__ import annotations
|
| 32 |
+
|
| 33 |
+
import argparse
|
| 34 |
+
import json
|
| 35 |
+
import os
|
| 36 |
+
import random
|
| 37 |
+
from collections import Counter
|
| 38 |
+
from pathlib import Path
|
| 39 |
+
|
| 40 |
+
from tqdm import tqdm
|
| 41 |
+
|
| 42 |
+
# FineVideo dataset card: ~43,751 videos (used as tqdm total for streaming)
|
| 43 |
+
FINEVIDEO_ROW_ESTIMATE = 43_751
|
| 44 |
+
|
| 45 |
+
DEFAULT_OUTPUT = Path("/opt/dlami/nvme/finevideo/filtered_candidates.jsonl")
|
| 46 |
+
|
| 47 |
+
|
| 48 |
+
def passing_checkpoint_path(output: Path) -> Path:
|
| 49 |
+
"""Sidecar JSONL: every row that passes filters (before stratified sampling)."""
|
| 50 |
+
return output.parent / f"{output.stem}_all_passing.jsonl"
|
| 51 |
+
|
| 52 |
+
|
| 53 |
+
def parse_args() -> argparse.Namespace:
|
| 54 |
+
p = argparse.ArgumentParser(description="Filter FineVideo for temporal grounding training.")
|
| 55 |
+
p.add_argument("--output", type=Path, default=DEFAULT_OUTPUT)
|
| 56 |
+
p.add_argument("--min-avc", type=float, default=0.7,
|
| 57 |
+
help="Minimum audioVisualCorrelation in at least one scene.")
|
| 58 |
+
p.add_argument("--min-duration", type=int, default=30)
|
| 59 |
+
p.add_argument("--max-duration", type=int, default=180)
|
| 60 |
+
p.add_argument("--min-scenes", type=int, default=4,
|
| 61 |
+
help="Minimum number of scenes with activities.")
|
| 62 |
+
p.add_argument("--target-count", type=int, default=10000,
|
| 63 |
+
help="Cap on final JSONL size: after a **full** dataset scan, stratified sample down "
|
| 64 |
+
"to this many (0=keep all passing). Does not stop the scan early unless --early-stop.")
|
| 65 |
+
p.add_argument("--seed", type=int, default=42)
|
| 66 |
+
p.add_argument("--no-tqdm", action="store_true", help="Disable progress bar.")
|
| 67 |
+
p.add_argument(
|
| 68 |
+
"--local-cache",
|
| 69 |
+
action="store_true",
|
| 70 |
+
help="Non-streaming load: downloads/materializes the FULL train split into the HF cache first "
|
| 71 |
+
"(can take many hours), then scans locally. Use only if that full download is acceptable "
|
| 72 |
+
"or the cache is already populated.",
|
| 73 |
+
)
|
| 74 |
+
p.add_argument(
|
| 75 |
+
"--no-checkpoint",
|
| 76 |
+
action="store_true",
|
| 77 |
+
help="Do not write incremental *_all_passing.jsonl (only write final output at end).",
|
| 78 |
+
)
|
| 79 |
+
p.add_argument(
|
| 80 |
+
"--checkpoint-flush-every",
|
| 81 |
+
type=int,
|
| 82 |
+
default=1,
|
| 83 |
+
help="Flush passing-checkpoint file every N rows (1 = durable; larger = faster).",
|
| 84 |
+
)
|
| 85 |
+
p.add_argument(
|
| 86 |
+
"--sample-from",
|
| 87 |
+
type=Path,
|
| 88 |
+
default=None,
|
| 89 |
+
help="Skip HuggingFace scan: load this JSONL, apply stratified sampling, write --output.",
|
| 90 |
+
)
|
| 91 |
+
p.add_argument(
|
| 92 |
+
"--num-proc",
|
| 93 |
+
type=int,
|
| 94 |
+
default=1,
|
| 95 |
+
help="Parallel workers for filter+extract (datasets.map). Only effective with --local-cache; "
|
| 96 |
+
"streaming stays single-threaded. Use 0 for all logical CPUs.",
|
| 97 |
+
)
|
| 98 |
+
p.add_argument(
|
| 99 |
+
"--early-stop",
|
| 100 |
+
action="store_true",
|
| 101 |
+
help="Stop as soon as --target-count rows have passed filters (streaming / single-threaded "
|
| 102 |
+
"scan only). *_all_passing.jsonl will have at most that many lines; skips full-dataset "
|
| 103 |
+
"stratified sampling (order follows dataset stream, not global category balance).",
|
| 104 |
+
)
|
| 105 |
+
args = p.parse_args()
|
| 106 |
+
if args.early_stop and args.target_count <= 0:
|
| 107 |
+
raise SystemExit("[error] --early-stop requires --target-count > 0")
|
| 108 |
+
return args
|
| 109 |
+
|
| 110 |
+
|
| 111 |
+
def passes_filter(meta: dict, args: argparse.Namespace) -> bool:
|
| 112 |
+
duration = meta.get("duration_seconds", 0)
|
| 113 |
+
if not (args.min_duration <= duration <= args.max_duration):
|
| 114 |
+
return False
|
| 115 |
+
|
| 116 |
+
cm = meta.get("content_metadata")
|
| 117 |
+
if not cm or not isinstance(cm, dict):
|
| 118 |
+
return False
|
| 119 |
+
|
| 120 |
+
scenes = cm.get("scenes", [])
|
| 121 |
+
if not scenes:
|
| 122 |
+
return False
|
| 123 |
+
|
| 124 |
+
scenes_with_activities = sum(
|
| 125 |
+
1 for s in scenes
|
| 126 |
+
if s.get("activities") and len(s["activities"]) >= 1
|
| 127 |
+
)
|
| 128 |
+
if scenes_with_activities < args.min_scenes:
|
| 129 |
+
return False
|
| 130 |
+
|
| 131 |
+
max_avc = max(
|
| 132 |
+
(s.get("audioVisualCorrelation", 0.0) for s in scenes),
|
| 133 |
+
default=0.0,
|
| 134 |
+
)
|
| 135 |
+
if max_avc < args.min_avc:
|
| 136 |
+
return False
|
| 137 |
+
|
| 138 |
+
return True
|
| 139 |
+
|
| 140 |
+
|
| 141 |
+
def extract_candidate(meta: dict, idx: int) -> dict:
|
| 142 |
+
"""Extract the fields we need downstream from FineVideo metadata."""
|
| 143 |
+
cm = meta.get("content_metadata", {})
|
| 144 |
+
scenes = cm.get("scenes", [])
|
| 145 |
+
|
| 146 |
+
scene_summaries = []
|
| 147 |
+
for s in scenes:
|
| 148 |
+
avc = s.get("audioVisualCorrelation", 0.0)
|
| 149 |
+
activities = s.get("activities", [])
|
| 150 |
+
scene_summaries.append({
|
| 151 |
+
"sceneId": s.get("sceneId"),
|
| 152 |
+
"title": s.get("title", ""),
|
| 153 |
+
"start": s.get("timestamps", {}).get("start_timestamp"),
|
| 154 |
+
"end": s.get("timestamps", {}).get("end_timestamp"),
|
| 155 |
+
"audioVisualCorrelation": avc,
|
| 156 |
+
"activities": [
|
| 157 |
+
{
|
| 158 |
+
"description": a.get("description", ""),
|
| 159 |
+
"start": a.get("timestamp", {}).get("start_timestamp"),
|
| 160 |
+
"end": a.get("timestamp", {}).get("end_timestamp"),
|
| 161 |
+
}
|
| 162 |
+
for a in activities
|
| 163 |
+
],
|
| 164 |
+
"mood": s.get("mood", {}).get("description", ""),
|
| 165 |
+
})
|
| 166 |
+
|
| 167 |
+
tts = meta.get("timecoded_text_to_speech", [])
|
| 168 |
+
tts_summary = []
|
| 169 |
+
if isinstance(tts, list):
|
| 170 |
+
for seg in tts[:50]:
|
| 171 |
+
tts_summary.append({
|
| 172 |
+
"start": seg.get("start"),
|
| 173 |
+
"end": seg.get("end"),
|
| 174 |
+
"text": seg.get("text", ""),
|
| 175 |
+
})
|
| 176 |
+
|
| 177 |
+
return {
|
| 178 |
+
"finevideo_idx": idx,
|
| 179 |
+
"duration_seconds": meta.get("duration_seconds", 0),
|
| 180 |
+
"content_parent_category": meta.get("content_parent_category", ""),
|
| 181 |
+
"content_fine_category": meta.get("content_fine_category", ""),
|
| 182 |
+
"youtube_title": meta.get("youtube_title", ""),
|
| 183 |
+
"title": cm.get("title", ""),
|
| 184 |
+
"description": cm.get("description", ""),
|
| 185 |
+
"scenes": scene_summaries,
|
| 186 |
+
"timecoded_text_to_speech": tts_summary,
|
| 187 |
+
"qa": cm.get("qAndA", []),
|
| 188 |
+
"fps": cm.get("fps"),
|
| 189 |
+
}
|
| 190 |
+
|
| 191 |
+
|
| 192 |
+
def _map_row_parallel(
|
| 193 |
+
example: dict,
|
| 194 |
+
idx: int,
|
| 195 |
+
*,
|
| 196 |
+
min_avc: float,
|
| 197 |
+
min_duration: int,
|
| 198 |
+
max_duration: int,
|
| 199 |
+
min_scenes: int,
|
| 200 |
+
) -> dict:
|
| 201 |
+
"""Top-level for multiprocessing; returns only _cand_jsonl (non-empty if passed)."""
|
| 202 |
+
ns = argparse.Namespace(
|
| 203 |
+
min_avc=min_avc,
|
| 204 |
+
min_duration=min_duration,
|
| 205 |
+
max_duration=max_duration,
|
| 206 |
+
min_scenes=min_scenes,
|
| 207 |
+
)
|
| 208 |
+
meta = example.get("json")
|
| 209 |
+
if not meta or not isinstance(meta, dict):
|
| 210 |
+
return {"_cand_jsonl": ""}
|
| 211 |
+
if not passes_filter(meta, ns):
|
| 212 |
+
return {"_cand_jsonl": ""}
|
| 213 |
+
cand = extract_candidate(meta, idx)
|
| 214 |
+
return {"_cand_jsonl": json.dumps(cand, ensure_ascii=False)}
|
| 215 |
+
|
| 216 |
+
|
| 217 |
+
def _row_has_candidate(row: dict) -> bool:
|
| 218 |
+
return bool(row.get("_cand_jsonl"))
|
| 219 |
+
|
| 220 |
+
|
| 221 |
+
def stratified_sample(candidates: list[dict], target: int, seed: int) -> list[dict]:
|
| 222 |
+
"""Sample up to target candidates, stratified by content_parent_category."""
|
| 223 |
+
if target <= 0 or len(candidates) <= target:
|
| 224 |
+
return candidates
|
| 225 |
+
|
| 226 |
+
rng = random.Random(seed)
|
| 227 |
+
by_cat: dict[str, list[dict]] = {}
|
| 228 |
+
for c in candidates:
|
| 229 |
+
cat = c.get("content_parent_category", "Other")
|
| 230 |
+
by_cat.setdefault(cat, []).append(c)
|
| 231 |
+
|
| 232 |
+
per_cat = max(1, target // len(by_cat))
|
| 233 |
+
selected = []
|
| 234 |
+
remainder = []
|
| 235 |
+
for cat, items in by_cat.items():
|
| 236 |
+
rng.shuffle(items)
|
| 237 |
+
selected.extend(items[:per_cat])
|
| 238 |
+
remainder.extend(items[per_cat:])
|
| 239 |
+
|
| 240 |
+
if len(selected) < target:
|
| 241 |
+
rng.shuffle(remainder)
|
| 242 |
+
selected.extend(remainder[: target - len(selected)])
|
| 243 |
+
|
| 244 |
+
return selected[:target]
|
| 245 |
+
|
| 246 |
+
|
| 247 |
+
def load_candidates_from_jsonl(path: Path) -> list[dict]:
|
| 248 |
+
rows: list[dict] = []
|
| 249 |
+
with open(path, encoding="utf-8") as f:
|
| 250 |
+
for line in f:
|
| 251 |
+
line = line.strip()
|
| 252 |
+
if not line:
|
| 253 |
+
continue
|
| 254 |
+
rows.append(json.loads(line))
|
| 255 |
+
return rows
|
| 256 |
+
|
| 257 |
+
|
| 258 |
+
def write_final_output(candidates: list[dict], output: Path) -> None:
|
| 259 |
+
output.parent.mkdir(parents=True, exist_ok=True)
|
| 260 |
+
with open(output, "w", encoding="utf-8") as f:
|
| 261 |
+
for c in candidates:
|
| 262 |
+
f.write(json.dumps(c, ensure_ascii=False) + "\n")
|
| 263 |
+
|
| 264 |
+
|
| 265 |
+
def main() -> None:
|
| 266 |
+
args = parse_args()
|
| 267 |
+
args.output.parent.mkdir(parents=True, exist_ok=True)
|
| 268 |
+
|
| 269 |
+
if args.sample_from is not None:
|
| 270 |
+
src = args.sample_from.expanduser().resolve()
|
| 271 |
+
if not src.is_file():
|
| 272 |
+
raise SystemExit(f"[error] --sample-from not found: {src}")
|
| 273 |
+
print(f"[sample-from] Loading {src} ...")
|
| 274 |
+
candidates = load_candidates_from_jsonl(src)
|
| 275 |
+
print(f"[sample-from] Loaded {len(candidates)} rows.")
|
| 276 |
+
if args.target_count > 0 and len(candidates) > args.target_count:
|
| 277 |
+
print(f"[sample] Stratified sampling {args.target_count} from {len(candidates)} ...")
|
| 278 |
+
candidates = stratified_sample(candidates, args.target_count, args.seed)
|
| 279 |
+
write_final_output(candidates, args.output)
|
| 280 |
+
print(f"\n[output] {len(candidates)} candidates written to {args.output}")
|
| 281 |
+
final_cats = Counter(c["content_parent_category"] for c in candidates)
|
| 282 |
+
print(" Category distribution:")
|
| 283 |
+
for cat, count in final_cats.most_common(20):
|
| 284 |
+
print(f" {cat:30s}: {count}")
|
| 285 |
+
return
|
| 286 |
+
|
| 287 |
+
from datasets import load_dataset
|
| 288 |
+
|
| 289 |
+
ck_path = passing_checkpoint_path(args.output)
|
| 290 |
+
use_ck = not args.no_checkpoint
|
| 291 |
+
if use_ck:
|
| 292 |
+
print(f"[checkpoint] Passing rows -> {ck_path} (flush every {args.checkpoint_flush_every})")
|
| 293 |
+
|
| 294 |
+
print(f"[filter] min_avc={args.min_avc}, duration={args.min_duration}-{args.max_duration}s, "
|
| 295 |
+
f"min_scenes={args.min_scenes}")
|
| 296 |
+
num_proc = args.num_proc
|
| 297 |
+
if num_proc == 0:
|
| 298 |
+
num_proc = max(1, os.cpu_count() or 1)
|
| 299 |
+
|
| 300 |
+
if not args.local_cache and num_proc > 1:
|
| 301 |
+
print("[filter] --num-proc > 1 ignored in streaming mode (parallel map needs --local-cache).")
|
| 302 |
+
num_proc = 1
|
| 303 |
+
|
| 304 |
+
if args.early_stop and args.local_cache and num_proc > 1:
|
| 305 |
+
raise SystemExit(
|
| 306 |
+
"[error] --early-stop needs a sequential scan; use default streaming, or "
|
| 307 |
+
"--local-cache with --num-proc 1 (not parallel map)."
|
| 308 |
+
)
|
| 309 |
+
|
| 310 |
+
if args.local_cache:
|
| 311 |
+
print("[filter] Mode: local HF cache — full split download/materialization runs FIRST (can be very slow).")
|
| 312 |
+
ds = load_dataset("HuggingFaceFV/finevideo", split="train", streaming=False)
|
| 313 |
+
n_est = len(ds)
|
| 314 |
+
else:
|
| 315 |
+
print(f"[filter] Mode: streaming (~{FINEVIDEO_ROW_ESTIMATE} rows; network-bound, typical ~20–90+ min).")
|
| 316 |
+
ds = load_dataset("HuggingFaceFV/finevideo", split="train", streaming=True)
|
| 317 |
+
n_est = FINEVIDEO_ROW_ESTIMATE
|
| 318 |
+
|
| 319 |
+
if args.early_stop:
|
| 320 |
+
print(f"[filter] --early-stop: will stop after {args.target_count} passing rows (no full scan).")
|
| 321 |
+
|
| 322 |
+
candidates: list[dict] = []
|
| 323 |
+
total = 0
|
| 324 |
+
passed = 0
|
| 325 |
+
cat_counter: Counter = Counter()
|
| 326 |
+
ck_f = None
|
| 327 |
+
since_flush = 0
|
| 328 |
+
|
| 329 |
+
if use_ck:
|
| 330 |
+
ck_f = open(ck_path, "w", encoding="utf-8")
|
| 331 |
+
|
| 332 |
+
try:
|
| 333 |
+
if args.local_cache and num_proc > 1:
|
| 334 |
+
# Parallel CPU path: map writes an Arrow cache; checkpoint lines are flushed after map+filter.
|
| 335 |
+
print(f"[filter] Parallel map num_proc={num_proc} (HF datasets multiprocessing).")
|
| 336 |
+
if use_ck:
|
| 337 |
+
print("[checkpoint] Sidecar is written after parallel pass (not row-by-row during map).")
|
| 338 |
+
fn_kw = {
|
| 339 |
+
"min_avc": args.min_avc,
|
| 340 |
+
"min_duration": args.min_duration,
|
| 341 |
+
"max_duration": args.max_duration,
|
| 342 |
+
"min_scenes": args.min_scenes,
|
| 343 |
+
}
|
| 344 |
+
cols = list(ds.column_names)
|
| 345 |
+
ds_m = ds.map(
|
| 346 |
+
_map_row_parallel,
|
| 347 |
+
with_indices=True,
|
| 348 |
+
num_proc=num_proc,
|
| 349 |
+
fn_kwargs=fn_kw,
|
| 350 |
+
remove_columns=cols,
|
| 351 |
+
desc="FineVideo filter+extract",
|
| 352 |
+
)
|
| 353 |
+
ds_f = ds_m.filter(_row_has_candidate, num_proc=num_proc)
|
| 354 |
+
total = n_est
|
| 355 |
+
passed = len(ds_f)
|
| 356 |
+
it = ds_f
|
| 357 |
+
if not args.no_tqdm:
|
| 358 |
+
it = tqdm(ds_f, total=passed, desc="Write passing rows", unit="row", dynamic_ncols=True)
|
| 359 |
+
for row in it:
|
| 360 |
+
line = row["_cand_jsonl"]
|
| 361 |
+
cand = json.loads(line)
|
| 362 |
+
cat_counter[cand["content_parent_category"]] += 1
|
| 363 |
+
if args.no_checkpoint:
|
| 364 |
+
candidates.append(cand)
|
| 365 |
+
else:
|
| 366 |
+
assert ck_f is not None
|
| 367 |
+
ck_f.write(line + "\n")
|
| 368 |
+
since_flush += 1
|
| 369 |
+
if since_flush >= args.checkpoint_flush_every:
|
| 370 |
+
ck_f.flush()
|
| 371 |
+
since_flush = 0
|
| 372 |
+
else:
|
| 373 |
+
base_iter = ds
|
| 374 |
+
if not args.no_tqdm:
|
| 375 |
+
base_iter = tqdm(
|
| 376 |
+
ds,
|
| 377 |
+
total=n_est,
|
| 378 |
+
desc="FineVideo scan",
|
| 379 |
+
unit="row",
|
| 380 |
+
dynamic_ncols=True,
|
| 381 |
+
)
|
| 382 |
+
|
| 383 |
+
for idx, sample in enumerate(base_iter):
|
| 384 |
+
total += 1
|
| 385 |
+
meta = sample.get("json")
|
| 386 |
+
if not meta or not isinstance(meta, dict):
|
| 387 |
+
continue
|
| 388 |
+
|
| 389 |
+
if passes_filter(meta, args):
|
| 390 |
+
passed += 1
|
| 391 |
+
cand = extract_candidate(meta, idx)
|
| 392 |
+
if args.no_checkpoint:
|
| 393 |
+
candidates.append(cand)
|
| 394 |
+
else:
|
| 395 |
+
assert ck_f is not None
|
| 396 |
+
ck_f.write(json.dumps(cand, ensure_ascii=False) + "\n")
|
| 397 |
+
since_flush += 1
|
| 398 |
+
if since_flush >= args.checkpoint_flush_every:
|
| 399 |
+
ck_f.flush()
|
| 400 |
+
since_flush = 0
|
| 401 |
+
cat_counter[cand["content_parent_category"]] += 1
|
| 402 |
+
if args.early_stop and passed >= args.target_count:
|
| 403 |
+
break
|
| 404 |
+
finally:
|
| 405 |
+
if ck_f is not None:
|
| 406 |
+
ck_f.flush()
|
| 407 |
+
ck_f.close()
|
| 408 |
+
|
| 409 |
+
print(f"[filter] Scanned {total} total, {passed} passed filters.")
|
| 410 |
+
|
| 411 |
+
if use_ck:
|
| 412 |
+
print(f"[checkpoint] Closing sidecar ({passed} lines). Loading for sampling ...")
|
| 413 |
+
candidates = load_candidates_from_jsonl(ck_path)
|
| 414 |
+
|
| 415 |
+
if args.target_count > 0 and len(candidates) > args.target_count:
|
| 416 |
+
print(f"[sample] Stratified sampling {args.target_count} from {len(candidates)} ...")
|
| 417 |
+
candidates = stratified_sample(candidates, args.target_count, args.seed)
|
| 418 |
+
|
| 419 |
+
write_final_output(candidates, args.output)
|
| 420 |
+
|
| 421 |
+
print(f"\n[output] {len(candidates)} candidates written to {args.output}")
|
| 422 |
+
if use_ck:
|
| 423 |
+
print(f"[checkpoint] Full passing list still at {ck_path} (delete if you only need final subset).")
|
| 424 |
+
print(" Category distribution:")
|
| 425 |
+
final_cats = Counter(c["content_parent_category"] for c in candidates)
|
| 426 |
+
for cat, count in final_cats.most_common(20):
|
| 427 |
+
print(f" {cat:30s}: {count}")
|
| 428 |
+
|
| 429 |
+
|
| 430 |
+
if __name__ == "__main__":
|
| 431 |
+
main()
|
finevideo/generate_finevideo_dpo.py
ADDED
|
@@ -0,0 +1,307 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env python3
|
| 2 |
+
"""
|
| 3 |
+
Generate DPO preference pairs from FineVideo SFT data.
|
| 4 |
+
|
| 5 |
+
Takes the SFT QA pairs (finevideo_sft.jsonl) and creates DPO training data
|
| 6 |
+
where:
|
| 7 |
+
- chosen = the correct temporal grounding answer (from SFT)
|
| 8 |
+
- rejected = a plausible but WRONG answer generated by one of 4 strategies
|
| 9 |
+
|
| 10 |
+
Rejection strategies (applied round-robin to keep distribution balanced):
|
| 11 |
+
1. TIMESTAMP_SHIFT: Shift all timestamps in the answer by a random offset
|
| 12 |
+
2. EVENT_SWAP: Attribute audio to the wrong visual event (swap scene references)
|
| 13 |
+
3. VIDEO_ONLY: Rewrite to ignore audio entirely (visual-only answer)
|
| 14 |
+
4. FALSE_ALIGNMENT: Claim sync/match when the answer describes a mismatch, or vice versa
|
| 15 |
+
|
| 16 |
+
Strategies 1-2 use rule-based text manipulation.
|
| 17 |
+
Strategies 3-4 use an LLM (GPT/Gemini text-only) to rewrite the answer.
|
| 18 |
+
|
| 19 |
+
Usage:
|
| 20 |
+
python generate_finevideo_dpo.py
|
| 21 |
+
python generate_finevideo_dpo.py --sft-input /opt/dlami/nvme/finevideo/finevideo_sft.jsonl
|
| 22 |
+
python generate_finevideo_dpo.py --strategy all --api-key sk-...
|
| 23 |
+
"""
|
| 24 |
+
|
| 25 |
+
from __future__ import annotations
|
| 26 |
+
|
| 27 |
+
import argparse
|
| 28 |
+
import json
|
| 29 |
+
import os
|
| 30 |
+
import random
|
| 31 |
+
import re
|
| 32 |
+
import threading
|
| 33 |
+
from pathlib import Path
|
| 34 |
+
from concurrent.futures import ThreadPoolExecutor, as_completed
|
| 35 |
+
from typing import Optional
|
| 36 |
+
|
| 37 |
+
DEFAULT_SFT_INPUT = Path("/opt/dlami/nvme/finevideo/finevideo_sft.jsonl")
|
| 38 |
+
DEFAULT_OUTPUT = Path("/opt/dlami/nvme/finevideo/finevideo_dpo.json")
|
| 39 |
+
|
| 40 |
+
STRATEGIES = ["timestamp_shift", "event_swap", "video_only", "false_alignment"]
|
| 41 |
+
|
| 42 |
+
|
| 43 |
+
def parse_args() -> argparse.Namespace:
|
| 44 |
+
p = argparse.ArgumentParser(description="Generate DPO data from FineVideo SFT.")
|
| 45 |
+
p.add_argument("--sft-input", type=Path, default=DEFAULT_SFT_INPUT)
|
| 46 |
+
p.add_argument("--output", type=Path, default=DEFAULT_OUTPUT)
|
| 47 |
+
p.add_argument("--strategy", choices=STRATEGIES + ["all"], default="all",
|
| 48 |
+
help="Rejection strategy (default: round-robin all).")
|
| 49 |
+
p.add_argument("--api-key", type=str, default=None,
|
| 50 |
+
help="API key for LLM-based strategies (video_only, false_alignment). "
|
| 51 |
+
"Falls back to OPENAI_API_KEY. Without a key, rule-based fallback is used.")
|
| 52 |
+
p.add_argument("--api-base", type=str, default=None,
|
| 53 |
+
help="Custom base URL for OpenAI-compatible API (e.g. http://14.103.68.46/v1).")
|
| 54 |
+
p.add_argument("--model", type=str, default="gpt-5-mini")
|
| 55 |
+
p.add_argument("--workers", type=int, default=4)
|
| 56 |
+
p.add_argument("--seed", type=int, default=42)
|
| 57 |
+
return p.parse_args()
|
| 58 |
+
|
| 59 |
+
|
| 60 |
+
# ── Strategy 1: Timestamp shift ─────────────────────────────────────────────
|
| 61 |
+
def reject_timestamp_shift(answer: str, rng: random.Random) -> str:
|
| 62 |
+
"""Shift all timestamps in the answer by a random offset."""
|
| 63 |
+
shift = rng.choice([-15, -10, -8, -5, 5, 8, 10, 15, 20, 25])
|
| 64 |
+
|
| 65 |
+
def shift_mmss(m: re.Match) -> str:
|
| 66 |
+
mins = int(m.group(1))
|
| 67 |
+
secs = int(m.group(2))
|
| 68 |
+
total = max(0, mins * 60 + secs + shift)
|
| 69 |
+
new_m, new_s = divmod(total, 60)
|
| 70 |
+
return f"{new_m}:{new_s:02d}"
|
| 71 |
+
|
| 72 |
+
def shift_secs(m: re.Match) -> str:
|
| 73 |
+
val = float(m.group(1))
|
| 74 |
+
new_val = max(0.0, val + shift)
|
| 75 |
+
if new_val == int(new_val):
|
| 76 |
+
return f"{int(new_val)}s"
|
| 77 |
+
return f"{new_val:.1f}s"
|
| 78 |
+
|
| 79 |
+
result = re.sub(r"(\d+):(\d{2})", shift_mmss, answer)
|
| 80 |
+
result = re.sub(r"(\d+\.?\d*)\s*s(?:econds?)?", shift_secs, result)
|
| 81 |
+
|
| 82 |
+
if result == answer:
|
| 83 |
+
result = f"Around the {abs(shift)}-second mark, " + answer[0].lower() + answer[1:]
|
| 84 |
+
|
| 85 |
+
return result
|
| 86 |
+
|
| 87 |
+
|
| 88 |
+
# ── Strategy 2: Event swap ──────────────────────────────────────────────────
|
| 89 |
+
def reject_event_swap(answer: str, rng: random.Random) -> str:
|
| 90 |
+
"""Swap audio and visual event descriptions to create a misalignment."""
|
| 91 |
+
audio_kws = ["hear", "sound", "audio", "noise", "listen", "clap", "music",
|
| 92 |
+
"speak", "voice", "bang", "crash", "thud"]
|
| 93 |
+
visual_kws = ["see", "show", "visual", "appear", "watch", "camera",
|
| 94 |
+
"scene", "display", "view", "look"]
|
| 95 |
+
|
| 96 |
+
sentences = re.split(r'(?<=[.!?])\s+', answer)
|
| 97 |
+
if len(sentences) < 2:
|
| 98 |
+
return answer.replace("audio", "visual").replace("hear", "see") + \
|
| 99 |
+
" The timing does not align with the actual events."
|
| 100 |
+
|
| 101 |
+
audio_sents = [s for s in sentences if any(k in s.lower() for k in audio_kws)]
|
| 102 |
+
visual_sents = [s for s in sentences if any(k in s.lower() for k in visual_kws)]
|
| 103 |
+
|
| 104 |
+
if audio_sents and visual_sents:
|
| 105 |
+
swapped = list(sentences)
|
| 106 |
+
ai = sentences.index(audio_sents[0])
|
| 107 |
+
vi = sentences.index(visual_sents[0])
|
| 108 |
+
swapped[ai], swapped[vi] = swapped[vi], swapped[ai]
|
| 109 |
+
return " ".join(swapped)
|
| 110 |
+
|
| 111 |
+
rng.shuffle(sentences)
|
| 112 |
+
return " ".join(sentences)
|
| 113 |
+
|
| 114 |
+
|
| 115 |
+
# ── Strategy 3: Video-only (LLM rewrite) ────────────────────────────────────
|
| 116 |
+
VIDEO_ONLY_PROMPT = """\
|
| 117 |
+
Rewrite the following answer so that it ONLY describes what is VISIBLE in the \
|
| 118 |
+
video, completely removing all audio references. Replace any mentions of sounds, \
|
| 119 |
+
music, speech, or hearing with visual-only descriptions. Keep timestamps but \
|
| 120 |
+
change audio events to generic visual descriptions.
|
| 121 |
+
|
| 122 |
+
Original answer: {answer}
|
| 123 |
+
|
| 124 |
+
Return ONLY the rewritten answer, nothing else."""
|
| 125 |
+
|
| 126 |
+
|
| 127 |
+
def reject_video_only_llm(answer: str, api_key: str, model: str, base_url: str | None = None) -> Optional[str]:
|
| 128 |
+
from openai import OpenAI
|
| 129 |
+
client = OpenAI(api_key=api_key, **({"base_url": base_url} if base_url else {}))
|
| 130 |
+
resp = client.chat.completions.create(
|
| 131 |
+
model=model,
|
| 132 |
+
messages=[{"role": "user", "content": VIDEO_ONLY_PROMPT.format(answer=answer)}],
|
| 133 |
+
temperature=0.7,
|
| 134 |
+
max_tokens=512,
|
| 135 |
+
)
|
| 136 |
+
return resp.choices[0].message.content.strip()
|
| 137 |
+
|
| 138 |
+
|
| 139 |
+
def reject_video_only_rule(answer: str, rng: random.Random) -> str:
|
| 140 |
+
"""Fallback rule-based: strip audio references."""
|
| 141 |
+
replacements = [
|
| 142 |
+
(r"\bhear\b", "see"), (r"\bheard\b", "seen"), (r"\bhearing\b", "seeing"),
|
| 143 |
+
(r"\bsound\b", "movement"), (r"\bsounds\b", "movements"),
|
| 144 |
+
(r"\bnoise\b", "motion"), (r"\baudio\b", "visual"),
|
| 145 |
+
(r"\blistening\b", "watching"), (r"\blisten\b", "watch"),
|
| 146 |
+
(r"\bclapping\b", "gesturing"), (r"\bmusic\b", "activity"),
|
| 147 |
+
(r"\bspeech\b", "gestures"), (r"\bvoice\b", "figure"),
|
| 148 |
+
]
|
| 149 |
+
result = answer
|
| 150 |
+
for pat, repl in replacements:
|
| 151 |
+
result = re.sub(pat, repl, result, flags=re.IGNORECASE)
|
| 152 |
+
return result
|
| 153 |
+
|
| 154 |
+
|
| 155 |
+
# ── Strategy 4: False alignment (LLM rewrite) ──────────────────────────────
|
| 156 |
+
FALSE_ALIGN_PROMPT = """\
|
| 157 |
+
The following answer describes audio-visual alignment in a video. \
|
| 158 |
+
Rewrite it to give the OPPOSITE conclusion about alignment:
|
| 159 |
+
- If it says events are aligned/synced, rewrite to say they are misaligned.
|
| 160 |
+
- If it says events don't match, rewrite to say they match perfectly.
|
| 161 |
+
- Keep the same timestamps and event descriptions but flip the alignment judgment.
|
| 162 |
+
|
| 163 |
+
Original answer: {answer}
|
| 164 |
+
|
| 165 |
+
Return ONLY the rewritten answer, nothing else."""
|
| 166 |
+
|
| 167 |
+
|
| 168 |
+
def reject_false_alignment_llm(answer: str, api_key: str, model: str, base_url: str | None = None) -> Optional[str]:
|
| 169 |
+
from openai import OpenAI
|
| 170 |
+
client = OpenAI(api_key=api_key, **({"base_url": base_url} if base_url else {}))
|
| 171 |
+
resp = client.chat.completions.create(
|
| 172 |
+
model=model,
|
| 173 |
+
messages=[{"role": "user", "content": FALSE_ALIGN_PROMPT.format(answer=answer)}],
|
| 174 |
+
temperature=0.7,
|
| 175 |
+
max_tokens=512,
|
| 176 |
+
)
|
| 177 |
+
return resp.choices[0].message.content.strip()
|
| 178 |
+
|
| 179 |
+
|
| 180 |
+
def reject_false_alignment_rule(answer: str, rng: random.Random) -> str:
|
| 181 |
+
"""Fallback: negate alignment language."""
|
| 182 |
+
negations = [
|
| 183 |
+
("matches", "does not match"), ("confirms", "contradicts"),
|
| 184 |
+
("aligned", "misaligned"), ("synchronized", "out of sync"),
|
| 185 |
+
("corresponds", "does not correspond"), ("coincides", "does not coincide"),
|
| 186 |
+
("consistent", "inconsistent"),
|
| 187 |
+
]
|
| 188 |
+
result = answer
|
| 189 |
+
applied = False
|
| 190 |
+
for pos, neg in negations:
|
| 191 |
+
if pos in result.lower():
|
| 192 |
+
result = re.sub(re.escape(pos), neg, result, count=1, flags=re.IGNORECASE)
|
| 193 |
+
applied = True
|
| 194 |
+
break
|
| 195 |
+
if not applied:
|
| 196 |
+
result = "The audio and visual events do not align here. " + result
|
| 197 |
+
return result
|
| 198 |
+
|
| 199 |
+
|
| 200 |
+
def generate_rejection(
|
| 201 |
+
answer: str,
|
| 202 |
+
strategy: str,
|
| 203 |
+
rng: random.Random,
|
| 204 |
+
api_key: Optional[str],
|
| 205 |
+
model: str,
|
| 206 |
+
base_url: str | None = None,
|
| 207 |
+
) -> tuple[str, str]:
|
| 208 |
+
"""Generate a rejected answer. Returns (rejected_text, strategy_used)."""
|
| 209 |
+
if strategy == "timestamp_shift":
|
| 210 |
+
return reject_timestamp_shift(answer, rng), strategy
|
| 211 |
+
elif strategy == "event_swap":
|
| 212 |
+
return reject_event_swap(answer, rng), strategy
|
| 213 |
+
elif strategy == "video_only":
|
| 214 |
+
if api_key:
|
| 215 |
+
try:
|
| 216 |
+
r = reject_video_only_llm(answer, api_key, model, base_url)
|
| 217 |
+
if r:
|
| 218 |
+
return r, strategy
|
| 219 |
+
except Exception:
|
| 220 |
+
pass
|
| 221 |
+
return reject_video_only_rule(answer, rng), strategy + "_rule"
|
| 222 |
+
elif strategy == "false_alignment":
|
| 223 |
+
if api_key:
|
| 224 |
+
try:
|
| 225 |
+
r = reject_false_alignment_llm(answer, api_key, model, base_url)
|
| 226 |
+
if r:
|
| 227 |
+
return r, strategy
|
| 228 |
+
except Exception:
|
| 229 |
+
pass
|
| 230 |
+
return reject_false_alignment_rule(answer, rng), strategy + "_rule"
|
| 231 |
+
else:
|
| 232 |
+
return reject_timestamp_shift(answer, rng), "timestamp_shift"
|
| 233 |
+
|
| 234 |
+
|
| 235 |
+
_write_lock = threading.Lock()
|
| 236 |
+
|
| 237 |
+
|
| 238 |
+
def main() -> None:
|
| 239 |
+
args = parse_args()
|
| 240 |
+
args.output.parent.mkdir(parents=True, exist_ok=True)
|
| 241 |
+
|
| 242 |
+
api_key = args.api_key or os.environ.get("OPENAI_API_KEY")
|
| 243 |
+
|
| 244 |
+
sft_entries = []
|
| 245 |
+
with open(args.sft_input, encoding="utf-8") as f:
|
| 246 |
+
for line in f:
|
| 247 |
+
line = line.strip()
|
| 248 |
+
if line:
|
| 249 |
+
sft_entries.append(json.loads(line))
|
| 250 |
+
|
| 251 |
+
print(f"[dpo] Loaded {len(sft_entries)} SFT entries from {args.sft_input}")
|
| 252 |
+
|
| 253 |
+
if args.strategy == "all":
|
| 254 |
+
strategies = STRATEGIES
|
| 255 |
+
else:
|
| 256 |
+
strategies = [args.strategy]
|
| 257 |
+
|
| 258 |
+
rng = random.Random(args.seed)
|
| 259 |
+
dpo_entries = []
|
| 260 |
+
errors = 0
|
| 261 |
+
|
| 262 |
+
for i, entry in enumerate(sft_entries):
|
| 263 |
+
msgs = entry.get("messages", [])
|
| 264 |
+
if len(msgs) < 2:
|
| 265 |
+
errors += 1
|
| 266 |
+
continue
|
| 267 |
+
|
| 268 |
+
question = msgs[0].get("content", "")
|
| 269 |
+
chosen_answer = msgs[1].get("content", "")
|
| 270 |
+
if not chosen_answer:
|
| 271 |
+
errors += 1
|
| 272 |
+
continue
|
| 273 |
+
|
| 274 |
+
strategy = strategies[i % len(strategies)]
|
| 275 |
+
rejected_answer, strat_used = generate_rejection(
|
| 276 |
+
chosen_answer, strategy, rng, api_key, args.model, args.api_base,
|
| 277 |
+
)
|
| 278 |
+
|
| 279 |
+
dpo_entry = {
|
| 280 |
+
"messages": [{"role": "user", "content": question}],
|
| 281 |
+
"chosen": {"role": "assistant", "content": chosen_answer},
|
| 282 |
+
"rejected": {"role": "assistant", "content": rejected_answer},
|
| 283 |
+
"videos": entry.get("videos", []),
|
| 284 |
+
"audios": entry.get("audios", []),
|
| 285 |
+
}
|
| 286 |
+
dpo_entries.append(dpo_entry)
|
| 287 |
+
|
| 288 |
+
if (i + 1) % 1000 == 0:
|
| 289 |
+
print(f" [{i+1}/{len(sft_entries)}] generated {len(dpo_entries)} DPO pairs")
|
| 290 |
+
|
| 291 |
+
print(f"[dpo] Generated {len(dpo_entries)} DPO pairs ({errors} skipped)")
|
| 292 |
+
|
| 293 |
+
with open(args.output, "w", encoding="utf-8") as f:
|
| 294 |
+
json.dump(dpo_entries, f, indent=2, ensure_ascii=False)
|
| 295 |
+
|
| 296 |
+
print(f"[output] {args.output} ({len(dpo_entries)} entries)")
|
| 297 |
+
|
| 298 |
+
strat_dist = {}
|
| 299 |
+
for i, entry in enumerate(sft_entries):
|
| 300 |
+
if i < len(dpo_entries):
|
| 301 |
+
s = strategies[i % len(strategies)]
|
| 302 |
+
strat_dist[s] = strat_dist.get(s, 0) + 1
|
| 303 |
+
print(f" Strategy distribution: {strat_dist}")
|
| 304 |
+
|
| 305 |
+
|
| 306 |
+
if __name__ == "__main__":
|
| 307 |
+
main()
|
finevideo/generate_finevideo_sft.py
ADDED
|
@@ -0,0 +1,606 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env python3
|
| 2 |
+
"""
|
| 3 |
+
Generate SFT training data for dense temporal grounding from FineVideo.
|
| 4 |
+
|
| 5 |
+
Gemini watches + listens to each video and generates 3 QA pairs (one per type).
|
| 6 |
+
Within each type, Gemini varies the phrasing naturally — no two questions
|
| 7 |
+
are asked in the same way.
|
| 8 |
+
|
| 9 |
+
QA types:
|
| 10 |
+
DESCRIBE: "Describe what you see and hear segment by segment"
|
| 11 |
+
(Gemini enriches the simple metadata descriptions into detailed
|
| 12 |
+
audio-visual narratives with timestamps)
|
| 13 |
+
LOCATE: "At what point / When do you hear [sound]?"
|
| 14 |
+
(sound identified by Gemini from actually listening to the video)
|
| 15 |
+
ATTRIBUTE: "What is making the [sound] at [timestamp]?"
|
| 16 |
+
(connect an audio event to its visual source)
|
| 17 |
+
|
| 18 |
+
Input: filtered_candidates.jsonl + downloaded .mp4 files
|
| 19 |
+
Output: finevideo_sft.jsonl (LlamaFactory SFT format, append-only, resume-safe)
|
| 20 |
+
|
| 21 |
+
Usage:
|
| 22 |
+
export GEMINI_API_KEY=...
|
| 23 |
+
python generate_finevideo_sft.py
|
| 24 |
+
python generate_finevideo_sft.py --max-samples 500 --workers 4
|
| 25 |
+
python generate_finevideo_sft.py --model gemini-2.5-pro
|
| 26 |
+
python generate_finevideo_sft.py --api-key KEY1,KEY2,KEY3
|
| 27 |
+
python generate_finevideo_sft.py --api-key KEY1 --api-key KEY2 --api-key KEY3
|
| 28 |
+
|
| 29 |
+
Multi-key: pass comma-separated keys or repeat --api-key. When a key hits its rate
|
| 30 |
+
limit (429/ResourceExhausted), the script rotates to the next key. When ALL keys are
|
| 31 |
+
exhausted it stops gracefully (resume-safe: re-run with new keys to continue).
|
| 32 |
+
"""
|
| 33 |
+
|
| 34 |
+
from __future__ import annotations
|
| 35 |
+
|
| 36 |
+
import argparse
|
| 37 |
+
import json
|
| 38 |
+
import os
|
| 39 |
+
import random
|
| 40 |
+
import re
|
| 41 |
+
import threading
|
| 42 |
+
import time
|
| 43 |
+
from pathlib import Path
|
| 44 |
+
from concurrent.futures import ThreadPoolExecutor, as_completed
|
| 45 |
+
|
| 46 |
+
DEFAULT_CANDIDATES = Path("/opt/dlami/nvme/finevideo/filtered_candidates.jsonl")
|
| 47 |
+
DEFAULT_VIDEO_DIR = Path("/opt/dlami/nvme/finevideo/videos")
|
| 48 |
+
DEFAULT_AUDIO_DIR = Path("/opt/dlami/nvme/finevideo/audio")
|
| 49 |
+
DEFAULT_OUTPUT = Path("/opt/dlami/nvme/finevideo/finevideo_sft.jsonl")
|
| 50 |
+
|
| 51 |
+
SYSTEM_PROMPT = """\
|
| 52 |
+
You are an expert multimodal annotator. You will watch and listen to a video, \
|
| 53 |
+
then generate training data for an AI model that must learn to understand \
|
| 54 |
+
BOTH audio and visual content together.
|
| 55 |
+
|
| 56 |
+
You must actually perceive the video — describe real sounds you hear (speech, \
|
| 57 |
+
music, impacts, ambient noise, machinery, nature, etc.) and real visual events \
|
| 58 |
+
you see. Be specific about timestamps. Never fabricate content.
|
| 59 |
+
"""
|
| 60 |
+
|
| 61 |
+
QA_GENERATION_PROMPT = """\
|
| 62 |
+
Watch and LISTEN to this video carefully. Pay attention to ALL sounds — not just \
|
| 63 |
+
speech, but also environmental sounds, music, impacts, machinery, animals, etc.
|
| 64 |
+
|
| 65 |
+
Reference metadata (scene boundaries and basic descriptions):
|
| 66 |
+
{metadata_text}
|
| 67 |
+
|
| 68 |
+
This video has {n_scenes} distinct scenes with activities. Your job: generate \
|
| 69 |
+
EXACTLY {n_total} question-answer pairs as a JSON array, using VARIED and NATURAL \
|
| 70 |
+
phrasing — do NOT repeat the same question template across pairs.
|
| 71 |
+
|
| 72 |
+
Breakdown:
|
| 73 |
+
- 1 DESCRIBE pair (covers the full video)
|
| 74 |
+
- {n_locate} LOCATE pairs (each about a DIFFERENT sound at a DIFFERENT time)
|
| 75 |
+
- {n_attribute} ATTRIBUTE pairs (each about a DIFFERENT sound source at a DIFFERENT time)
|
| 76 |
+
|
| 77 |
+
=== TYPE 1: DESCRIBE (1 pair) ===
|
| 78 |
+
Ask the viewer to describe what they see AND hear across the video (or a portion). \
|
| 79 |
+
The answer should be a RICH, DETAILED segment-by-segment narration with timestamps, \
|
| 80 |
+
covering both visual actions and audio content (speech, sounds, music).
|
| 81 |
+
|
| 82 |
+
Go far beyond the simple metadata descriptions — be specific about:
|
| 83 |
+
- What exact sounds are present (not just "someone talks", but what they say, \
|
| 84 |
+
their tone, accent, background sounds)
|
| 85 |
+
- Visual details the metadata didn't capture
|
| 86 |
+
- How audio and visual elements interact moment by moment
|
| 87 |
+
|
| 88 |
+
Example question phrasings (vary these, never repeat):
|
| 89 |
+
- "Walk me through what happens in this video, describing both what you see and hear."
|
| 90 |
+
- "Give a detailed audio-visual breakdown of this clip."
|
| 91 |
+
- "Describe the sequence of events, including all sounds and visuals, with timestamps."
|
| 92 |
+
|
| 93 |
+
=== TYPE 2: LOCATE ({n_locate} pairs) ===
|
| 94 |
+
Ask WHEN a specific sound or audio event occurs. You must pick a real sound you \
|
| 95 |
+
actually hear in the video. The answer gives the timestamp and describes what is \
|
| 96 |
+
happening visually at that moment.
|
| 97 |
+
|
| 98 |
+
IMPORTANT: each LOCATE pair must be about a DIFFERENT audio event from a DIFFERENT \
|
| 99 |
+
part of the video. Spread them across the timeline.
|
| 100 |
+
|
| 101 |
+
Example question phrasings (vary these):
|
| 102 |
+
- "At what point in the video do you hear [specific sound]?"
|
| 103 |
+
- "When does the [sound/voice/music] first appear?"
|
| 104 |
+
- "Can you identify the moment when [audio event] happens?"
|
| 105 |
+
- "Around what timestamp do you notice [sound]?"
|
| 106 |
+
|
| 107 |
+
=== TYPE 3: ATTRIBUTE ({n_attribute} pairs) ===
|
| 108 |
+
Ask what visual element is PRODUCING a specific sound at a specific time. \
|
| 109 |
+
The answer identifies the sound source by connecting audio to visual evidence.
|
| 110 |
+
|
| 111 |
+
IMPORTANT: each ATTRIBUTE pair must be about a DIFFERENT sound source at a DIFFERENT \
|
| 112 |
+
timestamp. Cover different scenes.
|
| 113 |
+
|
| 114 |
+
Example question phrasings (vary these):
|
| 115 |
+
- "What is causing the [sound] heard around [time]?"
|
| 116 |
+
- "At [time], there's a [sound] — what in the scene is responsible?"
|
| 117 |
+
- "Can you identify the source of the [sound] at [time]?"
|
| 118 |
+
- "What object or person is making the [sound] near [time]?"
|
| 119 |
+
|
| 120 |
+
=== OUTPUT FORMAT ===
|
| 121 |
+
Return ONLY a valid JSON array, nothing else. Example for {n_total} pairs:
|
| 122 |
+
[
|
| 123 |
+
{{"type": "DESCRIBE", "question": "...", "answer": "..."}},
|
| 124 |
+
{{"type": "LOCATE", "question": "...", "answer": "..."}},
|
| 125 |
+
...
|
| 126 |
+
{{"type": "ATTRIBUTE", "question": "...", "answer": "..."}}
|
| 127 |
+
]
|
| 128 |
+
|
| 129 |
+
Rules:
|
| 130 |
+
- Answers must be DETAILED (3-8 sentences for DESCRIBE, 2-4 for LOCATE/ATTRIBUTE)
|
| 131 |
+
- Use approximate timestamps naturally ("around 0:45", "at roughly 1:20")
|
| 132 |
+
- Describe REAL audio you hear — never guess or copy metadata blindly
|
| 133 |
+
- Every answer must reference both what is SEEN and what is HEARD
|
| 134 |
+
- Do NOT duplicate events — each pair covers a DISTINCT moment/sound
|
| 135 |
+
"""
|
| 136 |
+
|
| 137 |
+
|
| 138 |
+
def parse_args() -> argparse.Namespace:
|
| 139 |
+
p = argparse.ArgumentParser(description="Generate FineVideo SFT data via Gemini.")
|
| 140 |
+
p.add_argument("--candidates", type=Path, default=DEFAULT_CANDIDATES)
|
| 141 |
+
p.add_argument("--video-dir", type=Path, default=DEFAULT_VIDEO_DIR)
|
| 142 |
+
p.add_argument("--audio-dir", type=Path, default=DEFAULT_AUDIO_DIR)
|
| 143 |
+
p.add_argument("--output", type=Path, default=DEFAULT_OUTPUT)
|
| 144 |
+
p.add_argument("--max-samples", type=int, default=500)
|
| 145 |
+
p.add_argument("--workers", type=int, default=4)
|
| 146 |
+
p.add_argument("--api-key", type=str, default=None, action="append",
|
| 147 |
+
help="API key(s). Repeat for multiple keys, or pass comma-separated. "
|
| 148 |
+
"Falls back to GEMINI_API_KEY (or OPENROUTER_API_KEY with --openrouter).")
|
| 149 |
+
p.add_argument("--model", type=str, default="gemini-3.1-pro-preview")
|
| 150 |
+
p.add_argument("--seed", type=int, default=42)
|
| 151 |
+
p.add_argument("--retry", type=int, default=3,
|
| 152 |
+
help="Retries per video on transient errors (default 3).")
|
| 153 |
+
p.add_argument("--skip-errors-from", type=Path, default=None,
|
| 154 |
+
help="Path to a *_errors.jsonl from a previous run; skip those finevideo_idx.")
|
| 155 |
+
p.add_argument(
|
| 156 |
+
"--openrouter",
|
| 157 |
+
action="store_true",
|
| 158 |
+
help="Use OpenRouter (OpenAI-compatible) instead of native Gemini API. "
|
| 159 |
+
"Video is sent as base64 data URL. Model names like google/gemini-2.5-pro-preview.",
|
| 160 |
+
)
|
| 161 |
+
p.add_argument(
|
| 162 |
+
"--api-base",
|
| 163 |
+
type=str,
|
| 164 |
+
default="https://openrouter.ai/api/v1",
|
| 165 |
+
help="Base URL for OpenAI-compatible API (used with --openrouter).",
|
| 166 |
+
)
|
| 167 |
+
return p.parse_args()
|
| 168 |
+
|
| 169 |
+
|
| 170 |
+
def build_metadata_text(candidate: dict) -> str:
|
| 171 |
+
lines = [
|
| 172 |
+
f"Title: {candidate.get('title') or candidate.get('youtube_title', '')}",
|
| 173 |
+
f"Duration: {candidate.get('duration_seconds', '?')}s",
|
| 174 |
+
f"Category: {candidate.get('content_parent_category', '')} / {candidate.get('content_fine_category', '')}",
|
| 175 |
+
"",
|
| 176 |
+
]
|
| 177 |
+
|
| 178 |
+
for s in candidate.get("scenes", []):
|
| 179 |
+
start = s.get("start", "?")
|
| 180 |
+
end = s.get("end", "?")
|
| 181 |
+
lines.append(f"[{start} - {end}] Scene: {s.get('title', '')}")
|
| 182 |
+
for a in s.get("activities", []):
|
| 183 |
+
lines.append(f" Activity: {a.get('description', '')} "
|
| 184 |
+
f"[{a.get('start', '?')} - {a.get('end', '?')}]")
|
| 185 |
+
|
| 186 |
+
tts = candidate.get("timecoded_text_to_speech", [])
|
| 187 |
+
if tts:
|
| 188 |
+
lines.append("")
|
| 189 |
+
lines.append("Speech transcript:")
|
| 190 |
+
for seg in tts[:20]:
|
| 191 |
+
lines.append(f" [{seg.get('start', '?')} - {seg.get('end', '?')}] "
|
| 192 |
+
f"{seg.get('text', '')}")
|
| 193 |
+
if len(tts) > 20:
|
| 194 |
+
lines.append(f" ... ({len(tts) - 20} more)")
|
| 195 |
+
|
| 196 |
+
return "\n".join(lines)
|
| 197 |
+
|
| 198 |
+
|
| 199 |
+
def parse_qa_response(text: str) -> list[dict]:
|
| 200 |
+
for pat in [
|
| 201 |
+
re.compile(r"```(?:json)?\s*(\[.*?\])\s*```", re.DOTALL),
|
| 202 |
+
re.compile(r"(\[\s*\{.*\}\s*\])", re.DOTALL),
|
| 203 |
+
]:
|
| 204 |
+
m = pat.search(text)
|
| 205 |
+
if m:
|
| 206 |
+
try:
|
| 207 |
+
arr = json.loads(m.group(1))
|
| 208 |
+
if isinstance(arr, list) and all(
|
| 209 |
+
isinstance(x, dict) and "question" in x and "answer" in x
|
| 210 |
+
for x in arr
|
| 211 |
+
):
|
| 212 |
+
return arr
|
| 213 |
+
except (json.JSONDecodeError, TypeError):
|
| 214 |
+
continue
|
| 215 |
+
return []
|
| 216 |
+
|
| 217 |
+
|
| 218 |
+
def call_gemini(video_path: str, prompt: str, api_key: str, model: str) -> str:
|
| 219 |
+
from google import genai
|
| 220 |
+
from google.genai import types as genai_types
|
| 221 |
+
|
| 222 |
+
client = genai.Client(api_key=api_key)
|
| 223 |
+
|
| 224 |
+
video_file = client.files.upload(file=video_path)
|
| 225 |
+
while video_file.state.name == "PROCESSING":
|
| 226 |
+
time.sleep(2)
|
| 227 |
+
video_file = client.files.get(name=video_file.name)
|
| 228 |
+
if video_file.state.name == "FAILED":
|
| 229 |
+
raise RuntimeError(f"Gemini file processing failed: {video_file.state}")
|
| 230 |
+
|
| 231 |
+
response = client.models.generate_content(
|
| 232 |
+
model=model,
|
| 233 |
+
contents=[
|
| 234 |
+
genai_types.Content(
|
| 235 |
+
role="user",
|
| 236 |
+
parts=[
|
| 237 |
+
genai_types.Part.from_uri(
|
| 238 |
+
file_uri=video_file.uri,
|
| 239 |
+
mime_type=video_file.mime_type,
|
| 240 |
+
),
|
| 241 |
+
genai_types.Part.from_text(text=prompt),
|
| 242 |
+
],
|
| 243 |
+
)
|
| 244 |
+
],
|
| 245 |
+
config=genai_types.GenerateContentConfig(
|
| 246 |
+
system_instruction=SYSTEM_PROMPT,
|
| 247 |
+
temperature=0.5,
|
| 248 |
+
max_output_tokens=4000,
|
| 249 |
+
),
|
| 250 |
+
)
|
| 251 |
+
try:
|
| 252 |
+
client.files.delete(name=video_file.name)
|
| 253 |
+
except Exception:
|
| 254 |
+
pass
|
| 255 |
+
return response.text
|
| 256 |
+
|
| 257 |
+
|
| 258 |
+
def call_openrouter(
|
| 259 |
+
video_path: str, prompt: str, api_key: str, model: str, api_base: str,
|
| 260 |
+
) -> str:
|
| 261 |
+
import base64
|
| 262 |
+
from openai import OpenAI
|
| 263 |
+
|
| 264 |
+
with open(video_path, "rb") as f:
|
| 265 |
+
b64 = base64.b64encode(f.read()).decode()
|
| 266 |
+
data_url = f"data:video/mp4;base64,{b64}"
|
| 267 |
+
|
| 268 |
+
client = OpenAI(api_key=api_key, base_url=api_base)
|
| 269 |
+
resp = client.chat.completions.create(
|
| 270 |
+
model=model,
|
| 271 |
+
messages=[
|
| 272 |
+
{"role": "system", "content": SYSTEM_PROMPT},
|
| 273 |
+
{
|
| 274 |
+
"role": "user",
|
| 275 |
+
"content": [
|
| 276 |
+
{"type": "video_url", "video_url": {"url": data_url}},
|
| 277 |
+
{"type": "text", "text": prompt},
|
| 278 |
+
],
|
| 279 |
+
},
|
| 280 |
+
],
|
| 281 |
+
temperature=0.5,
|
| 282 |
+
max_tokens=4000,
|
| 283 |
+
)
|
| 284 |
+
return resp.choices[0].message.content
|
| 285 |
+
|
| 286 |
+
|
| 287 |
+
def process_one(
|
| 288 |
+
candidate: dict,
|
| 289 |
+
video_dir: Path,
|
| 290 |
+
audio_dir: Path,
|
| 291 |
+
key_rotator: KeyRotator,
|
| 292 |
+
model: str,
|
| 293 |
+
max_retry: int,
|
| 294 |
+
use_openrouter: bool = False,
|
| 295 |
+
api_base: str = "",
|
| 296 |
+
) -> tuple[list[dict] | None, str]:
|
| 297 |
+
"""Returns (entries_or_None, error_reason). error_reason is '' on success."""
|
| 298 |
+
idx = candidate["finevideo_idx"]
|
| 299 |
+
video_path = video_dir / f"finevideo_{idx:06d}.mp4"
|
| 300 |
+
audio_path = audio_dir / f"finevideo_{idx:06d}.wav"
|
| 301 |
+
|
| 302 |
+
if not video_path.exists():
|
| 303 |
+
return None, f"video_not_found: {video_path}"
|
| 304 |
+
|
| 305 |
+
scenes = candidate.get("scenes", [])
|
| 306 |
+
n_scenes = sum(1 for s in scenes if s.get("activities"))
|
| 307 |
+
n_locate = 2
|
| 308 |
+
n_attribute = 2
|
| 309 |
+
n_total = 1 + n_locate + n_attribute
|
| 310 |
+
|
| 311 |
+
metadata_text = build_metadata_text(candidate)
|
| 312 |
+
prompt = QA_GENERATION_PROMPT.format(
|
| 313 |
+
metadata_text=metadata_text,
|
| 314 |
+
n_scenes=n_scenes,
|
| 315 |
+
n_locate=n_locate,
|
| 316 |
+
n_attribute=n_attribute,
|
| 317 |
+
n_total=n_total,
|
| 318 |
+
)
|
| 319 |
+
|
| 320 |
+
qa_pairs = []
|
| 321 |
+
last_error = ""
|
| 322 |
+
for attempt in range(1 + max_retry):
|
| 323 |
+
try:
|
| 324 |
+
api_key = key_rotator.get()
|
| 325 |
+
except AllKeysExhausted:
|
| 326 |
+
raise
|
| 327 |
+
try:
|
| 328 |
+
if use_openrouter:
|
| 329 |
+
raw = call_openrouter(str(video_path), prompt, api_key, model, api_base)
|
| 330 |
+
else:
|
| 331 |
+
raw = call_gemini(str(video_path), prompt, api_key, model)
|
| 332 |
+
qa_pairs = parse_qa_response(raw)
|
| 333 |
+
if qa_pairs:
|
| 334 |
+
break
|
| 335 |
+
last_error = f"parse_fail: model returned text but no valid QA JSON (len={len(raw)})"
|
| 336 |
+
if attempt < max_retry:
|
| 337 |
+
time.sleep(2)
|
| 338 |
+
except AllKeysExhausted:
|
| 339 |
+
raise
|
| 340 |
+
except Exception as e:
|
| 341 |
+
last_error = f"{type(e).__name__}: {e}"
|
| 342 |
+
if KeyRotator.is_rate_limit(e):
|
| 343 |
+
key_rotator.mark_exhausted(api_key)
|
| 344 |
+
if key_rotator.alive == 0:
|
| 345 |
+
raise AllKeysExhausted("All API keys exhausted (rate-limited).")
|
| 346 |
+
continue
|
| 347 |
+
if attempt < max_retry:
|
| 348 |
+
time.sleep(3 * (attempt + 1))
|
| 349 |
+
else:
|
| 350 |
+
print(f" [error] idx={idx}: {last_error}")
|
| 351 |
+
return None, last_error
|
| 352 |
+
|
| 353 |
+
if not qa_pairs:
|
| 354 |
+
print(f" [parse_fail] idx={idx}: {last_error}")
|
| 355 |
+
return None, last_error
|
| 356 |
+
|
| 357 |
+
results = []
|
| 358 |
+
for qa in qa_pairs:
|
| 359 |
+
entry = {
|
| 360 |
+
"messages": [
|
| 361 |
+
{"role": "user", "content": f"<video><audio>{qa['question']}"},
|
| 362 |
+
{"role": "assistant", "content": qa["answer"]},
|
| 363 |
+
],
|
| 364 |
+
"videos": [str(video_path)],
|
| 365 |
+
"audios": [str(audio_path)] if audio_path.exists() else [],
|
| 366 |
+
"_meta": {
|
| 367 |
+
"finevideo_idx": idx,
|
| 368 |
+
"qa_type": qa.get("type", "?"),
|
| 369 |
+
"category": candidate.get("content_parent_category", ""),
|
| 370 |
+
},
|
| 371 |
+
}
|
| 372 |
+
results.append(entry)
|
| 373 |
+
|
| 374 |
+
return results, ""
|
| 375 |
+
|
| 376 |
+
|
| 377 |
+
_write_lock = threading.Lock()
|
| 378 |
+
|
| 379 |
+
|
| 380 |
+
class AllKeysExhausted(Exception):
|
| 381 |
+
"""Raised when every API key has hit its rate limit."""
|
| 382 |
+
|
| 383 |
+
|
| 384 |
+
class KeyRotator:
|
| 385 |
+
"""Thread-safe round-robin over API keys; marks keys exhausted on 429."""
|
| 386 |
+
|
| 387 |
+
def __init__(self, keys: list[str]):
|
| 388 |
+
self._keys = list(keys)
|
| 389 |
+
self._exhausted: set[int] = set()
|
| 390 |
+
self._idx = 0
|
| 391 |
+
self._lock = threading.Lock()
|
| 392 |
+
|
| 393 |
+
@property
|
| 394 |
+
def total(self) -> int:
|
| 395 |
+
return len(self._keys)
|
| 396 |
+
|
| 397 |
+
@property
|
| 398 |
+
def alive(self) -> int:
|
| 399 |
+
with self._lock:
|
| 400 |
+
return len(self._keys) - len(self._exhausted)
|
| 401 |
+
|
| 402 |
+
def get(self) -> str:
|
| 403 |
+
with self._lock:
|
| 404 |
+
if len(self._exhausted) >= len(self._keys):
|
| 405 |
+
raise AllKeysExhausted("All API keys exhausted (rate-limited).")
|
| 406 |
+
for _ in range(len(self._keys)):
|
| 407 |
+
i = self._idx % len(self._keys)
|
| 408 |
+
self._idx += 1
|
| 409 |
+
if i not in self._exhausted:
|
| 410 |
+
return self._keys[i]
|
| 411 |
+
raise AllKeysExhausted("All API keys exhausted (rate-limited).")
|
| 412 |
+
|
| 413 |
+
def mark_exhausted(self, key: str) -> None:
|
| 414 |
+
with self._lock:
|
| 415 |
+
for i, k in enumerate(self._keys):
|
| 416 |
+
if k == key:
|
| 417 |
+
if i not in self._exhausted:
|
| 418 |
+
self._exhausted.add(i)
|
| 419 |
+
remaining = len(self._keys) - len(self._exhausted)
|
| 420 |
+
masked = key[:6] + "..." + key[-4:]
|
| 421 |
+
print(f" [key-exhausted] {masked} rate-limited; {remaining} key(s) remaining.")
|
| 422 |
+
break
|
| 423 |
+
|
| 424 |
+
@staticmethod
|
| 425 |
+
def is_rate_limit(exc: Exception) -> bool:
|
| 426 |
+
msg = str(exc).lower()
|
| 427 |
+
if "429" in msg or "resource_exhausted" in msg or "resourceexhausted" in msg:
|
| 428 |
+
return True
|
| 429 |
+
if "quota" in msg and ("exceeded" in msg or "limit" in msg):
|
| 430 |
+
return True
|
| 431 |
+
return False
|
| 432 |
+
|
| 433 |
+
|
| 434 |
+
def _error_log_path(output: Path) -> Path:
|
| 435 |
+
return output.parent / f"{output.stem}_errors.jsonl"
|
| 436 |
+
|
| 437 |
+
|
| 438 |
+
def append_results(output: Path, entries: list[dict]) -> None:
|
| 439 |
+
with _write_lock:
|
| 440 |
+
with open(output, "a", encoding="utf-8") as f:
|
| 441 |
+
for e in entries:
|
| 442 |
+
f.write(json.dumps(e, ensure_ascii=False) + "\n")
|
| 443 |
+
|
| 444 |
+
|
| 445 |
+
def append_error(error_log: Path, idx: int, reason: str, detail: str) -> None:
|
| 446 |
+
with _write_lock:
|
| 447 |
+
with open(error_log, "a", encoding="utf-8") as f:
|
| 448 |
+
f.write(json.dumps({
|
| 449 |
+
"finevideo_idx": idx,
|
| 450 |
+
"reason": reason,
|
| 451 |
+
"detail": detail[:500],
|
| 452 |
+
}, ensure_ascii=False) + "\n")
|
| 453 |
+
|
| 454 |
+
|
| 455 |
+
def main() -> None:
|
| 456 |
+
args = parse_args()
|
| 457 |
+
args.output.parent.mkdir(parents=True, exist_ok=True)
|
| 458 |
+
|
| 459 |
+
# Collect API keys: --api-key can be repeated and/or comma-separated.
|
| 460 |
+
raw_keys: list[str] = []
|
| 461 |
+
if args.api_key:
|
| 462 |
+
for k in args.api_key:
|
| 463 |
+
raw_keys.extend(part.strip() for part in k.split(",") if part.strip())
|
| 464 |
+
if not raw_keys:
|
| 465 |
+
env_var = "OPENROUTER_API_KEY" if args.openrouter else "GEMINI_API_KEY"
|
| 466 |
+
env_key = os.environ.get(env_var, "").strip()
|
| 467 |
+
if env_key:
|
| 468 |
+
raw_keys.extend(part.strip() for part in env_key.split(",") if part.strip())
|
| 469 |
+
if not raw_keys:
|
| 470 |
+
hint = "--api-key or $OPENROUTER_API_KEY" if args.openrouter else "--api-key or $GEMINI_API_KEY"
|
| 471 |
+
print(f"[error] No API key. Set {hint}")
|
| 472 |
+
raise SystemExit(1)
|
| 473 |
+
provider = "OpenRouter" if args.openrouter else "Gemini"
|
| 474 |
+
print(f"[keys] {len(raw_keys)} API key(s) loaded ({provider}).")
|
| 475 |
+
key_rotator = KeyRotator(raw_keys)
|
| 476 |
+
|
| 477 |
+
candidates = []
|
| 478 |
+
with open(args.candidates, encoding="utf-8") as f:
|
| 479 |
+
for line in f:
|
| 480 |
+
line = line.strip()
|
| 481 |
+
if line:
|
| 482 |
+
candidates.append(json.loads(line))
|
| 483 |
+
|
| 484 |
+
# Only process candidates whose video is actually on disk (i.e. the subset
|
| 485 |
+
# chosen by download_finevideo_subset.py's stratified sampling).
|
| 486 |
+
before = len(candidates)
|
| 487 |
+
candidates = [
|
| 488 |
+
c for c in candidates
|
| 489 |
+
if (args.video_dir / f"finevideo_{c['finevideo_idx']:06d}.mp4").exists()
|
| 490 |
+
]
|
| 491 |
+
print(f"[candidates] {before} in file, {len(candidates)} have downloaded mp4 -> processing all of them.")
|
| 492 |
+
if args.max_samples > 0 and len(candidates) > args.max_samples:
|
| 493 |
+
rng = random.Random(args.seed)
|
| 494 |
+
rng.shuffle(candidates)
|
| 495 |
+
candidates = candidates[:args.max_samples]
|
| 496 |
+
print(f"[candidates] Capped to --max-samples {args.max_samples}")
|
| 497 |
+
|
| 498 |
+
processed_ids: set[int] = set()
|
| 499 |
+
if args.output.exists():
|
| 500 |
+
with open(args.output, encoding="utf-8") as f:
|
| 501 |
+
for line in f:
|
| 502 |
+
try:
|
| 503 |
+
fid = json.loads(line).get("_meta", {}).get("finevideo_idx")
|
| 504 |
+
if fid is not None:
|
| 505 |
+
processed_ids.add(fid)
|
| 506 |
+
except json.JSONDecodeError:
|
| 507 |
+
pass
|
| 508 |
+
print(f"[resume] {len(processed_ids)} videos already in output (will skip)")
|
| 509 |
+
|
| 510 |
+
skip_ids: set[int] = set()
|
| 511 |
+
if args.skip_errors_from and args.skip_errors_from.exists():
|
| 512 |
+
with open(args.skip_errors_from, encoding="utf-8") as f:
|
| 513 |
+
for line in f:
|
| 514 |
+
try:
|
| 515 |
+
fid = json.loads(line).get("finevideo_idx")
|
| 516 |
+
if fid is not None:
|
| 517 |
+
skip_ids.add(fid)
|
| 518 |
+
except json.JSONDecodeError:
|
| 519 |
+
pass
|
| 520 |
+
print(f"[skip-errors] {len(skip_ids)} videos marked as known failures (will skip)")
|
| 521 |
+
|
| 522 |
+
todo = [c for c in candidates
|
| 523 |
+
if c["finevideo_idx"] not in processed_ids
|
| 524 |
+
and c["finevideo_idx"] not in skip_ids]
|
| 525 |
+
print(f"[sft] {len(todo)} videos to process "
|
| 526 |
+
f"(model={args.model}, workers={args.workers})")
|
| 527 |
+
|
| 528 |
+
if not todo:
|
| 529 |
+
print("[skip] All candidates already processed.")
|
| 530 |
+
return
|
| 531 |
+
|
| 532 |
+
total_qa = 0
|
| 533 |
+
errors = 0
|
| 534 |
+
all_exhausted = False
|
| 535 |
+
error_log = _error_log_path(args.output)
|
| 536 |
+
|
| 537 |
+
with ThreadPoolExecutor(max_workers=args.workers) as pool:
|
| 538 |
+
futures = {
|
| 539 |
+
pool.submit(
|
| 540 |
+
process_one, c, args.video_dir, args.audio_dir,
|
| 541 |
+
key_rotator, args.model, args.retry,
|
| 542 |
+
args.openrouter, args.api_base,
|
| 543 |
+
): c["finevideo_idx"]
|
| 544 |
+
for c in todo
|
| 545 |
+
}
|
| 546 |
+
for i, future in enumerate(as_completed(futures), 1):
|
| 547 |
+
fid = futures[future]
|
| 548 |
+
try:
|
| 549 |
+
entries, err_reason = future.result()
|
| 550 |
+
if entries:
|
| 551 |
+
append_results(args.output, entries)
|
| 552 |
+
total_qa += len(entries)
|
| 553 |
+
else:
|
| 554 |
+
errors += 1
|
| 555 |
+
append_error(error_log, fid, err_reason, "")
|
| 556 |
+
except AllKeysExhausted:
|
| 557 |
+
all_exhausted = True
|
| 558 |
+
print(f"\n[stop] All {key_rotator.total} API key(s) hit rate limit. "
|
| 559 |
+
"Stopping gracefully (progress is saved).")
|
| 560 |
+
pool.shutdown(wait=False, cancel_futures=True)
|
| 561 |
+
break
|
| 562 |
+
except Exception as exc:
|
| 563 |
+
errors += 1
|
| 564 |
+
print(f" [exception] idx={fid}: {exc}")
|
| 565 |
+
append_error(error_log, fid, "exception", str(exc))
|
| 566 |
+
|
| 567 |
+
if i % 20 == 0:
|
| 568 |
+
print(f" [{i}/{len(todo)}] {total_qa} QA pairs, {errors} errors, "
|
| 569 |
+
f"keys alive: {key_rotator.alive}/{key_rotator.total}")
|
| 570 |
+
|
| 571 |
+
total_lines = sum(1 for _ in open(args.output)) if args.output.exists() else 0
|
| 572 |
+
print(f"\n[done] {total_qa} new QA pairs ({errors} errors)")
|
| 573 |
+
print(f" Total entries in file: {total_lines}")
|
| 574 |
+
|
| 575 |
+
type_counts: dict[str, int] = {}
|
| 576 |
+
if args.output.exists():
|
| 577 |
+
with open(args.output) as f:
|
| 578 |
+
for line in f:
|
| 579 |
+
try:
|
| 580 |
+
qt = json.loads(line).get("_meta", {}).get("qa_type", "?")
|
| 581 |
+
type_counts[qt] = type_counts.get(qt, 0) + 1
|
| 582 |
+
except json.JSONDecodeError:
|
| 583 |
+
pass
|
| 584 |
+
print(f" Type distribution: {type_counts}")
|
| 585 |
+
|
| 586 |
+
if errors > 0 and error_log.exists():
|
| 587 |
+
reason_counts: dict[str, int] = {}
|
| 588 |
+
with open(error_log) as f:
|
| 589 |
+
for line in f:
|
| 590 |
+
try:
|
| 591 |
+
r = json.loads(line).get("reason", "unknown")
|
| 592 |
+
tag = r.split(":")[0].strip()
|
| 593 |
+
reason_counts[tag] = reason_counts.get(tag, 0) + 1
|
| 594 |
+
except (json.JSONDecodeError, AttributeError):
|
| 595 |
+
pass
|
| 596 |
+
print(f" Error breakdown: {reason_counts}")
|
| 597 |
+
print(f" Full error log: {error_log}")
|
| 598 |
+
print(f" To skip known failures next run: --skip-errors-from {error_log}")
|
| 599 |
+
|
| 600 |
+
if all_exhausted:
|
| 601 |
+
print("\n[hint] Re-run the SAME command with new/additional --api-key to continue "
|
| 602 |
+
"(resume picks up where it left off).")
|
| 603 |
+
|
| 604 |
+
|
| 605 |
+
if __name__ == "__main__":
|
| 606 |
+
main()
|
hf_upload/COMMANDS.md
ADDED
|
@@ -0,0 +1,168 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Commands (you run these yourself)
|
| 2 |
+
|
| 3 |
+
Assume you cloned the repo and ran `bash setup_env.sh` (installs Anaconda under `~/anaconda3` if conda is missing, then creates the env). On first login after install, run `source ~/anaconda3/etc/profile.d/conda.sh` before `conda activate`.
|
| 4 |
+
|
| 5 |
+
```bash
|
| 6 |
+
conda activate video # or whatever CONDA_ENV you used
|
| 7 |
+
|
| 8 |
+
export REPO_ROOT="$(pwd)" # top of CleverHans-Evaluation clone
|
| 9 |
+
export SCRIPTS="${REPO_ROOT}/scripts"
|
| 10 |
+
export SYNC_TEST="${REPO_ROOT}/data/kto_training_data_v2_test.jsonl"
|
| 11 |
+
|
| 12 |
+
# Layout (fixed across your machines):
|
| 13 |
+
# Data (videos, merged weights, sync media) → fast disk
|
| 14 |
+
# Eval JSONL / metrics / summaries → ubuntu home
|
| 15 |
+
export WORK_ROOT="${WORK_ROOT:-/opt/dlami/nvme}"
|
| 16 |
+
export EVAL_ROOT="${EVAL_ROOT:-/home/ubuntu/eval_results}"
|
| 17 |
+
|
| 18 |
+
export VIDEOMME_DIR="${WORK_ROOT}/videomme"
|
| 19 |
+
export VIDEOMME_VIDEOS="${WORK_ROOT}/videomme/data/data"
|
| 20 |
+
export LVBENCH_VIDEOS="${WORK_ROOT}/lvbench"
|
| 21 |
+
export MERGED_DIR="${WORK_ROOT}/merged_models"
|
| 22 |
+
export DATA_ROOT="${WORK_ROOT}/video_source"
|
| 23 |
+
|
| 24 |
+
# vLLM: Qwen3-Omni audio encoder has 20 heads — use tp that divides 20 (e.g. 4, not 8).
|
| 25 |
+
export TP="${TP:-4}"
|
| 26 |
+
export GPUS="${GPUS:-0,1,2,3}"
|
| 27 |
+
```
|
| 28 |
+
|
| 29 |
+
## 1) Download all data (once per machine)
|
| 30 |
+
|
| 31 |
+
```bash
|
| 32 |
+
bash setup_data.sh
|
| 33 |
+
# Downloads Video-MME, LVBench, sync videos + audio to /opt/dlami/nvme.
|
| 34 |
+
# Or override: WORK_ROOT=/my/disk bash setup_data.sh
|
| 35 |
+
```
|
| 36 |
+
|
| 37 |
+
Or download individually:
|
| 38 |
+
|
| 39 |
+
```bash
|
| 40 |
+
python "${SCRIPTS}/download_videomme.py" --output-dir "${VIDEOMME_DIR}"
|
| 41 |
+
python "${SCRIPTS}/download_lvbench.py" --output-dir "${LVBENCH_VIDEOS}"
|
| 42 |
+
```
|
| 43 |
+
|
| 44 |
+
## 2) Merge DPO LoRA → full model
|
| 45 |
+
|
| 46 |
+
Base for merge:
|
| 47 |
+
|
| 48 |
+
```bash
|
| 49 |
+
export BASE_SFT="Rakancorle11/qwen3omni_full_sft_revised_thinker_key"
|
| 50 |
+
```
|
| 51 |
+
|
| 52 |
+
```bash
|
| 53 |
+
mkdir -p "${MERGED_DIR}"
|
| 54 |
+
|
| 55 |
+
python "${SCRIPTS}/merge_adapter.py" \
|
| 56 |
+
--base-model "${BASE_SFT}" \
|
| 57 |
+
--adapter Rakancorle11/Qwen3Omni-onpolicy-dpo-lora-w_audio_v2_8632 \
|
| 58 |
+
--output "${MERGED_DIR}/dpo_v2_8632"
|
| 59 |
+
|
| 60 |
+
python "${SCRIPTS}/merge_adapter.py" \
|
| 61 |
+
--base-model "${BASE_SFT}" \
|
| 62 |
+
--adapter Rakancorle11/Qwen3Omni-onpolicy-dpo-lora-w_audio_v3_8632 \
|
| 63 |
+
--output "${MERGED_DIR}/dpo_v3_8632"
|
| 64 |
+
|
| 65 |
+
python "${SCRIPTS}/merge_adapter.py" \
|
| 66 |
+
--base-model "${BASE_SFT}" \
|
| 67 |
+
--adapter Rakancorle11/Qwen3Omni-onpolicy-dpo-lora-w_audio_v4_8632 \
|
| 68 |
+
--output "${MERGED_DIR}/dpo_v4_8632"
|
| 69 |
+
|
| 70 |
+
python "${SCRIPTS}/merge_adapter.py" \
|
| 71 |
+
--base-model "${BASE_SFT}" \
|
| 72 |
+
--adapter Rakancorle11/Qwen3Omni-onpolicy-dpo-lora-w_audio_v5_12075 \
|
| 73 |
+
--output "${MERGED_DIR}/dpo_v5_12075"
|
| 74 |
+
```
|
| 75 |
+
|
| 76 |
+
## 3) Video-MME — pick model + label
|
| 77 |
+
|
| 78 |
+
**vLLM (fast)** — `--base-model` must be a **merged** full checkpoint path or a full model id:
|
| 79 |
+
|
| 80 |
+
```bash
|
| 81 |
+
CUDA_VISIBLE_DEVICES="${GPUS}" python "${SCRIPTS}/eval_videomme.py" \
|
| 82 |
+
--base-model Qwen/Qwen3-Omni-30B-A3B-Instruct \
|
| 83 |
+
--video-dir "${VIDEOMME_VIDEOS}" \
|
| 84 |
+
--output-dir "${EVAL_ROOT}/videomme" \
|
| 85 |
+
--vllm --tp "${TP}" \
|
| 86 |
+
--max-samples -1 --label vmme_vanilla
|
| 87 |
+
```
|
| 88 |
+
|
| 89 |
+
```bash
|
| 90 |
+
CUDA_VISIBLE_DEVICES="${GPUS}" python "${SCRIPTS}/eval_videomme.py" \
|
| 91 |
+
--base-model "${BASE_SFT}" \
|
| 92 |
+
--video-dir "${VIDEOMME_VIDEOS}" \
|
| 93 |
+
--output-dir "${EVAL_ROOT}/videomme" \
|
| 94 |
+
--vllm --tp "${TP}" \
|
| 95 |
+
--max-samples -1 --label vmme_full_sft
|
| 96 |
+
```
|
| 97 |
+
|
| 98 |
+
```bash
|
| 99 |
+
CUDA_VISIBLE_DEVICES="${GPUS}" python "${SCRIPTS}/eval_videomme.py" \
|
| 100 |
+
--base-model "${MERGED_DIR}/dpo_v4_8632" \
|
| 101 |
+
--video-dir "${VIDEOMME_VIDEOS}" \
|
| 102 |
+
--output-dir "${EVAL_ROOT}/videomme" \
|
| 103 |
+
--vllm --tp "${TP}" \
|
| 104 |
+
--max-samples -1 --label vmme_dpo_v4_8632
|
| 105 |
+
```
|
| 106 |
+
|
| 107 |
+
**Transformers only** (no `--vllm`):
|
| 108 |
+
|
| 109 |
+
```bash
|
| 110 |
+
CUDA_VISIBLE_DEVICES="${GPUS}" python "${SCRIPTS}/eval_videomme.py" \
|
| 111 |
+
--base-model "${BASE_SFT}" \
|
| 112 |
+
--adapter Rakancorle11/Qwen3Omni-onpolicy-dpo-lora-w_audio_v4_8632 \
|
| 113 |
+
--video-dir "${VIDEOMME_VIDEOS}" \
|
| 114 |
+
--output-dir "${EVAL_ROOT}/videomme" \
|
| 115 |
+
--max-samples -1 --label vmme_dpo_v4_adapter
|
| 116 |
+
```
|
| 117 |
+
|
| 118 |
+
## 4) LVBench — same pattern
|
| 119 |
+
|
| 120 |
+
```bash
|
| 121 |
+
CUDA_VISIBLE_DEVICES="${GPUS}" python "${SCRIPTS}/eval_lvbench.py" \
|
| 122 |
+
--base-model "${MERGED_DIR}/dpo_v4_8632" \
|
| 123 |
+
--video-dir "${LVBENCH_VIDEOS}" \
|
| 124 |
+
--output-dir "${EVAL_ROOT}/lvbench" \
|
| 125 |
+
--vllm --tp "${TP}" \
|
| 126 |
+
--max-samples -1 --label lvb_dpo_v4_8632
|
| 127 |
+
```
|
| 128 |
+
|
| 129 |
+
## 5) In-domain sync — transformers (`--data-root` + `--test-jsonl`)
|
| 130 |
+
|
| 131 |
+
```bash
|
| 132 |
+
CUDA_VISIBLE_DEVICES="${GPUS}" python "${SCRIPTS}/eval_dpo_sync.py" \
|
| 133 |
+
--data-root "${DATA_ROOT}" \
|
| 134 |
+
--base-model "${BASE_SFT}" \
|
| 135 |
+
--adapter Rakancorle11/Qwen3Omni-onpolicy-dpo-lora-w_audio_v4_8632 \
|
| 136 |
+
--test-jsonl "${SYNC_TEST}" \
|
| 137 |
+
--output-dir "${EVAL_ROOT}/sync" \
|
| 138 |
+
--label sync_dpo_v4_8632
|
| 139 |
+
```
|
| 140 |
+
|
| 141 |
+
Omit `--video-dir` / `--output-dir` on Video-MME & LVBench if you keep the same layout (scripts default to nvme videos + `/home/ubuntu/eval_results/...`). Omit `--test-jsonl` if you copied the test file to `${DATA_ROOT}/kto_training_data_v2_test.jsonl`; omit `--output-dir` on sync to use `/home/ubuntu/eval_results/sync`.
|
| 142 |
+
|
| 143 |
+
Optional GPT judge for parsing:
|
| 144 |
+
|
| 145 |
+
```bash
|
| 146 |
+
export OPENAI_API_KEY=sk-...
|
| 147 |
+
python "${SCRIPTS}/eval_dpo_sync.py" \
|
| 148 |
+
--data-root "${DATA_ROOT}" \
|
| 149 |
+
--base-model "${BASE_SFT}" \
|
| 150 |
+
--test-jsonl "${SYNC_TEST}" \
|
| 151 |
+
--output-dir "${EVAL_ROOT}/sync" \
|
| 152 |
+
--label sync_full_sft \
|
| 153 |
+
--gpt-judge
|
| 154 |
+
```
|
| 155 |
+
|
| 156 |
+
## 6) Recompute Video-MME metrics from `eval_results.jsonl`
|
| 157 |
+
|
| 158 |
+
```bash
|
| 159 |
+
python "${SCRIPTS}/compute_videomme_metrics_from_jsonl.py" \
|
| 160 |
+
--jsonl "${EVAL_ROOT}/videomme/vmme_vanilla/eval_results.jsonl" \
|
| 161 |
+
--out "${EVAL_ROOT}/videomme/vmme_vanilla/metrics.json"
|
| 162 |
+
```
|
| 163 |
+
|
| 164 |
+
Results for each run live under:
|
| 165 |
+
|
| 166 |
+
- `${EVAL_ROOT}/videomme/<label>/`
|
| 167 |
+
- `${EVAL_ROOT}/lvbench/<label>/`
|
| 168 |
+
- `${EVAL_ROOT}/sync/<label>/`
|
hf_upload/README.md
ADDED
|
@@ -0,0 +1,80 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
---
|
| 2 |
+
license: apache-2.0
|
| 3 |
+
language:
|
| 4 |
+
- en
|
| 5 |
+
tags:
|
| 6 |
+
- evaluation
|
| 7 |
+
- video
|
| 8 |
+
- multimodal
|
| 9 |
+
---
|
| 10 |
+
|
| 11 |
+
# CleverHans-Evaluation
|
| 12 |
+
|
| 13 |
+
Scripts and a small in-domain test set to evaluate Qwen3-Omni variants on **Video-MME**, **LVBench**, and **audio–video sync** (custom JSONL).
|
| 14 |
+
|
| 15 |
+
## What’s in the repo
|
| 16 |
+
|
| 17 |
+
| Path | Purpose |
|
| 18 |
+
|------|---------|
|
| 19 |
+
| `setup_env.sh` | Installs **Anaconda** if conda is missing, then creates `video` (or `CONDA_ENV`) and pip-installs eval deps |
|
| 20 |
+
| `setup_data.sh` | Downloads **all** eval data: Video-MME, LVBench, and sync eval videos + audio (to `/opt/dlami/nvme`) |
|
| 21 |
+
| `COMMANDS.md` | Copy-paste commands: data download, merge, eval per model/dataset |
|
| 22 |
+
| `data/kto_training_data_v2_test.jsonl` | In-domain sync test (426 lines) |
|
| 23 |
+
| `scripts/*.py` | Download, merge, eval, metrics helpers |
|
| 24 |
+
|
| 25 |
+
## Quick start
|
| 26 |
+
|
| 27 |
+
```bash
|
| 28 |
+
git clone https://huggingface.co/Rakancorle11/CleverHans-Evaluation
|
| 29 |
+
cd CleverHans-Evaluation
|
| 30 |
+
|
| 31 |
+
huggingface-cli login # if needed for gated models
|
| 32 |
+
|
| 33 |
+
chmod +x setup_env.sh setup_data.sh
|
| 34 |
+
bash setup_env.sh # on a machine with no conda: downloads Anaconda to ~/anaconda3 first
|
| 35 |
+
source ~/anaconda3/etc/profile.d/conda.sh # if this is your first shell after install
|
| 36 |
+
conda activate video
|
| 37 |
+
|
| 38 |
+
bash setup_data.sh # downloads Video-MME, LVBench, sync videos + audio to /opt/dlami/nvme
|
| 39 |
+
|
| 40 |
+
# Then follow COMMANDS.md — you choose which model on which benchmark.
|
| 41 |
+
```
|
| 42 |
+
|
| 43 |
+
**Fresh OS notes:** install `wget` before running (`sudo apt install -y wget`). System **ffmpeg** is recommended (`sudo apt install -y ffmpeg`). Override `INSTALL_DIR` / `ANACONDA_VERSION` / `CUDA_INDEX_URL` via environment variables if needed (see comments in `setup_env.sh`).
|
| 44 |
+
|
| 45 |
+
## Models (HF IDs)
|
| 46 |
+
|
| 47 |
+
| Role | Model |
|
| 48 |
+
|------|--------|
|
| 49 |
+
| Vanilla | `Qwen/Qwen3-Omni-30B-A3B-Instruct` |
|
| 50 |
+
| Full SFT (merge / eval base) | `Rakancorle11/qwen3omni_full_sft_revised_thinker_key` |
|
| 51 |
+
| DPO LoRA | `Rakancorle11/Qwen3Omni-onpolicy-dpo-lora-w_audio_v2_8632`, `_v3_8632`, `_v4_8632`, `_v5_12075` |
|
| 52 |
+
|
| 53 |
+
Merge LoRA into a full checkpoint for **vLLM** with `scripts/merge_adapter.py`. For **transformers-only** Video-MME/LVBench you can pass `--adapter` instead of merging.
|
| 54 |
+
|
| 55 |
+
## Data
|
| 56 |
+
|
| 57 |
+
- **Video-MME / LVBench / Sync eval data**: all downloaded by `bash setup_data.sh`.
|
| 58 |
+
- **Sync eval media** (original oops videos, random-shift videos, extracted audio): pulled from `hasnat79/ual_bench`, `Rakancorle11/random_shift_video`, `Rakancorle11/extracted_audio` into `/opt/dlami/nvme/video_source/`.
|
| 59 |
+
|
| 60 |
+
## Default paths (convention)
|
| 61 |
+
|
| 62 |
+
Scripts assume a fixed split on every machine:
|
| 63 |
+
|
| 64 |
+
| What | Where |
|
| 65 |
+
|------|--------|
|
| 66 |
+
| Benchmark videos, merged full models, sync `video_source/` (original + shifted + audio) | `/opt/dlami/nvme/...` |
|
| 67 |
+
| Eval outputs (`eval_results.jsonl`, `metrics.json`, …) | `/home/ubuntu/eval_results/videomme`, `.../lvbench`, `.../sync` |
|
| 68 |
+
|
| 69 |
+
Override with `--video-dir`, `--output-dir`, `--data-root` if your layout differs.
|
| 70 |
+
|
| 71 |
+
## Requirements
|
| 72 |
+
|
| 73 |
+
- Strong GPU(s), ~200GB+ disk for benchmarks + merged weights
|
| 74 |
+
- vLLM: `--tp` must divide **20** (audio encoder heads); e.g. `--tp 4`, not 8
|
| 75 |
+
- `setup_env.sh` uses CUDA 12.4 PyTorch wheels by default; override `CUDA_INDEX_URL` if needed
|
| 76 |
+
|
| 77 |
+
## Notes
|
| 78 |
+
|
| 79 |
+
- Eval scripts **resume** from existing `eval_results.jsonl`.
|
| 80 |
+
- In-domain sync: use `--data-root` so paths are not tied to `/home/ubuntu/video_source`.
|
hf_upload/data/kto_training_data_v2_test.jsonl
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
hf_upload/scripts/compute_videomme_metrics_from_jsonl.py
ADDED
|
@@ -0,0 +1,177 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env python3
|
| 2 |
+
"""
|
| 3 |
+
Recompute Video-MME metrics from eval_results.jsonl (same schema as eval_videomme.py).
|
| 4 |
+
|
| 5 |
+
Example:
|
| 6 |
+
python compute_videomme_metrics_from_jsonl.py \\
|
| 7 |
+
--jsonl /home/ubuntu/video_source/eval_results/videomme/vmme_vanilla/eval_results.jsonl \\
|
| 8 |
+
--out /home/ubuntu/video_source/eval_results/videomme/vmme_vanilla/metrics.json
|
| 9 |
+
"""
|
| 10 |
+
|
| 11 |
+
from __future__ import annotations
|
| 12 |
+
|
| 13 |
+
import argparse
|
| 14 |
+
import json
|
| 15 |
+
from pathlib import Path
|
| 16 |
+
from typing import Any, Dict, List
|
| 17 |
+
|
| 18 |
+
VIDEO_TYPES = ["short", "medium", "long"]
|
| 19 |
+
CATEGORIES = [
|
| 20 |
+
"Knowledge",
|
| 21 |
+
"Film & Television",
|
| 22 |
+
"Sports Competition",
|
| 23 |
+
"Artistic Performance",
|
| 24 |
+
"Life Record",
|
| 25 |
+
"Multilingual",
|
| 26 |
+
]
|
| 27 |
+
TASK_CATEGORIES = [
|
| 28 |
+
"Temporal Perception",
|
| 29 |
+
"Spatial Perception",
|
| 30 |
+
"Attribute Perception",
|
| 31 |
+
"Action Recognition",
|
| 32 |
+
"Object Recognition",
|
| 33 |
+
"OCR Problems",
|
| 34 |
+
"Counting Problem",
|
| 35 |
+
"Temporal Reasoning",
|
| 36 |
+
"Spatial Reasoning",
|
| 37 |
+
"Action Reasoning",
|
| 38 |
+
"Object Reasoning",
|
| 39 |
+
"Information Synopsis",
|
| 40 |
+
]
|
| 41 |
+
|
| 42 |
+
|
| 43 |
+
def load_results(path: Path) -> List[Dict[str, Any]]:
|
| 44 |
+
rows = []
|
| 45 |
+
with open(path, encoding="utf-8") as f:
|
| 46 |
+
for line in f:
|
| 47 |
+
line = line.strip()
|
| 48 |
+
if not line:
|
| 49 |
+
continue
|
| 50 |
+
rows.append(json.loads(line))
|
| 51 |
+
return rows
|
| 52 |
+
|
| 53 |
+
|
| 54 |
+
def compute_metrics(results: List[Dict[str, Any]]) -> Dict[str, Any]:
|
| 55 |
+
total = len(results)
|
| 56 |
+
if total == 0:
|
| 57 |
+
return {}
|
| 58 |
+
|
| 59 |
+
correct = sum(
|
| 60 |
+
1 for r in results if str(r.get("pred_answer", "")).upper() == str(r["gt_answer"]).upper()
|
| 61 |
+
)
|
| 62 |
+
overall_acc = correct / total
|
| 63 |
+
|
| 64 |
+
def acc_for(items: List[Dict[str, Any]]) -> float | None:
|
| 65 |
+
if not items:
|
| 66 |
+
return None
|
| 67 |
+
c = sum(
|
| 68 |
+
1
|
| 69 |
+
for r in items
|
| 70 |
+
if str(r.get("pred_answer", "")).upper() == str(r["gt_answer"]).upper()
|
| 71 |
+
)
|
| 72 |
+
return round(c / len(items), 4)
|
| 73 |
+
|
| 74 |
+
per_duration: Dict[str, Any] = {}
|
| 75 |
+
for vt in VIDEO_TYPES:
|
| 76 |
+
subset = [r for r in results if r.get("duration") == vt]
|
| 77 |
+
if subset:
|
| 78 |
+
per_duration[vt] = {"accuracy": acc_for(subset), "count": len(subset)}
|
| 79 |
+
|
| 80 |
+
per_domain: Dict[str, Any] = {}
|
| 81 |
+
for cat in CATEGORIES:
|
| 82 |
+
subset = [r for r in results if r.get("domain") == cat]
|
| 83 |
+
if subset:
|
| 84 |
+
per_domain[cat] = {"accuracy": acc_for(subset), "count": len(subset)}
|
| 85 |
+
|
| 86 |
+
per_task: Dict[str, Any] = {}
|
| 87 |
+
for task in TASK_CATEGORIES:
|
| 88 |
+
subset = [r for r in results if r.get("task_type") == task]
|
| 89 |
+
if subset:
|
| 90 |
+
per_task[task] = {"accuracy": acc_for(subset), "count": len(subset)}
|
| 91 |
+
|
| 92 |
+
return {
|
| 93 |
+
"total_samples": total,
|
| 94 |
+
"overall_accuracy": round(overall_acc, 4),
|
| 95 |
+
"per_duration": per_duration,
|
| 96 |
+
"per_domain": per_domain,
|
| 97 |
+
"per_task_type": per_task,
|
| 98 |
+
}
|
| 99 |
+
|
| 100 |
+
|
| 101 |
+
def print_summary(metrics: Dict[str, Any], label: str) -> None:
|
| 102 |
+
print()
|
| 103 |
+
print("=" * 65)
|
| 104 |
+
print(f" Video-MME Summary: {label}")
|
| 105 |
+
print("=" * 65)
|
| 106 |
+
print(f" Total samples: {metrics['total_samples']}")
|
| 107 |
+
print(f" Overall Accuracy: {metrics['overall_accuracy']:.1%}")
|
| 108 |
+
print(" --- Per Duration ---")
|
| 109 |
+
for vt in VIDEO_TYPES:
|
| 110 |
+
if vt in metrics["per_duration"]:
|
| 111 |
+
d = metrics["per_duration"][vt]
|
| 112 |
+
print(f" {vt:8s}: {d['accuracy']:.1%} ({d['count']} questions)")
|
| 113 |
+
print(" --- Per Domain ---")
|
| 114 |
+
for cat in CATEGORIES:
|
| 115 |
+
if cat in metrics["per_domain"]:
|
| 116 |
+
d = metrics["per_domain"][cat]
|
| 117 |
+
print(f" {cat:25s}: {d['accuracy']:.1%} ({d['count']})")
|
| 118 |
+
print(" --- Per Task Type ---")
|
| 119 |
+
for task in TASK_CATEGORIES:
|
| 120 |
+
if task in metrics["per_task_type"]:
|
| 121 |
+
d = metrics["per_task_type"][task]
|
| 122 |
+
print(f" {task:25s}: {d['accuracy']:.1%} ({d['count']})")
|
| 123 |
+
print("=" * 65)
|
| 124 |
+
|
| 125 |
+
|
| 126 |
+
def main() -> None:
|
| 127 |
+
p = argparse.ArgumentParser()
|
| 128 |
+
p.add_argument(
|
| 129 |
+
"--jsonl",
|
| 130 |
+
type=Path,
|
| 131 |
+
default=Path("/home/ubuntu/eval_results/videomme/vmme_vanilla/eval_results.jsonl"),
|
| 132 |
+
)
|
| 133 |
+
p.add_argument(
|
| 134 |
+
"--out",
|
| 135 |
+
type=Path,
|
| 136 |
+
default=None,
|
| 137 |
+
help="Write metrics JSON here (default: <jsonl_dir>/metrics.json)",
|
| 138 |
+
)
|
| 139 |
+
p.add_argument("--label", type=str, default="from_jsonl")
|
| 140 |
+
p.add_argument("--base-model", type=str, default="Qwen/Qwen3-Omni-30B-A3B-Instruct")
|
| 141 |
+
p.add_argument("--adapter", type=str, default=None)
|
| 142 |
+
p.add_argument(
|
| 143 |
+
"--video-dir",
|
| 144 |
+
type=str,
|
| 145 |
+
default="/opt/dlami/nvme/videomme/data/data",
|
| 146 |
+
)
|
| 147 |
+
p.add_argument("--max-new-tokens", type=int, default=32)
|
| 148 |
+
p.add_argument("--temperature", type=float, default=0.0)
|
| 149 |
+
args = p.parse_args()
|
| 150 |
+
|
| 151 |
+
out_path = args.out or (args.jsonl.parent / "metrics.json")
|
| 152 |
+
|
| 153 |
+
results = load_results(args.jsonl)
|
| 154 |
+
metrics = compute_metrics(results)
|
| 155 |
+
if not metrics:
|
| 156 |
+
print("[warn] No rows in jsonl.")
|
| 157 |
+
return
|
| 158 |
+
|
| 159 |
+
metrics["eval_config"] = {
|
| 160 |
+
"base_model": args.base_model,
|
| 161 |
+
"adapter": args.adapter,
|
| 162 |
+
"video_dir": args.video_dir,
|
| 163 |
+
"max_new_tokens": args.max_new_tokens,
|
| 164 |
+
"temperature": args.temperature,
|
| 165 |
+
"source_jsonl": str(args.jsonl.resolve()),
|
| 166 |
+
}
|
| 167 |
+
|
| 168 |
+
out_path.parent.mkdir(parents=True, exist_ok=True)
|
| 169 |
+
with open(out_path, "w", encoding="utf-8") as f:
|
| 170 |
+
json.dump(metrics, f, indent=2, ensure_ascii=False)
|
| 171 |
+
|
| 172 |
+
print_summary(metrics, args.label)
|
| 173 |
+
print(f"\n[written] {out_path}")
|
| 174 |
+
|
| 175 |
+
|
| 176 |
+
if __name__ == "__main__":
|
| 177 |
+
main()
|
hf_upload/scripts/download_lvbench.py
ADDED
|
@@ -0,0 +1,62 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env python3
|
| 2 |
+
"""
|
| 3 |
+
Download LVBench videos from Hugging Face (lmms-lab/LVBench).
|
| 4 |
+
|
| 5 |
+
Downloads chunked zip files and extracts them.
|
| 6 |
+
|
| 7 |
+
Usage:
|
| 8 |
+
python download_lvbench.py
|
| 9 |
+
python download_lvbench.py --output-dir /path/to/lvbench
|
| 10 |
+
"""
|
| 11 |
+
|
| 12 |
+
from __future__ import annotations
|
| 13 |
+
|
| 14 |
+
import argparse
|
| 15 |
+
import zipfile
|
| 16 |
+
from pathlib import Path
|
| 17 |
+
|
| 18 |
+
from huggingface_hub import hf_hub_download
|
| 19 |
+
|
| 20 |
+
REPO_ID = "lmms-lab/LVBench"
|
| 21 |
+
DEFAULT_OUTPUT = Path("/opt/dlami/nvme/lvbench")
|
| 22 |
+
|
| 23 |
+
VIDEO_ZIPS = [f"video_chunks/videos_chunk_{i:03d}.zip" for i in range(1, 15)]
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
def parse_args() -> argparse.Namespace:
|
| 27 |
+
p = argparse.ArgumentParser(description="Download LVBench from Hugging Face.")
|
| 28 |
+
p.add_argument("--output-dir", type=Path, default=DEFAULT_OUTPUT)
|
| 29 |
+
return p.parse_args()
|
| 30 |
+
|
| 31 |
+
|
| 32 |
+
def download_and_extract(repo_file: str, extract_to: Path, label: str) -> None:
|
| 33 |
+
extract_to.mkdir(parents=True, exist_ok=True)
|
| 34 |
+
print(f" Downloading {label}: {repo_file} ...")
|
| 35 |
+
local_zip = hf_hub_download(
|
| 36 |
+
repo_id=REPO_ID, filename=repo_file, repo_type="dataset",
|
| 37 |
+
)
|
| 38 |
+
print(f" Extracting to {extract_to} ...")
|
| 39 |
+
with zipfile.ZipFile(local_zip, "r") as zf:
|
| 40 |
+
zf.extractall(extract_to)
|
| 41 |
+
print(f" Done: {repo_file}")
|
| 42 |
+
|
| 43 |
+
|
| 44 |
+
def main() -> None:
|
| 45 |
+
args = parse_args()
|
| 46 |
+
out_dir = args.output_dir
|
| 47 |
+
|
| 48 |
+
existing = sum(1 for f in out_dir.iterdir() if f.suffix == ".mp4") if out_dir.exists() else 0
|
| 49 |
+
if existing >= 100:
|
| 50 |
+
print(f"[skip] Already have {existing} videos in {out_dir}")
|
| 51 |
+
else:
|
| 52 |
+
print(f"Downloading {len(VIDEO_ZIPS)} video chunks...")
|
| 53 |
+
for i, zf in enumerate(VIDEO_ZIPS, 1):
|
| 54 |
+
download_and_extract(zf, out_dir, f"chunk {i}/{len(VIDEO_ZIPS)}")
|
| 55 |
+
|
| 56 |
+
video_count = sum(1 for f in out_dir.iterdir() if f.suffix == ".mp4") if out_dir.exists() else 0
|
| 57 |
+
print(f"\nDone. Total videos on disk: {video_count}")
|
| 58 |
+
print(f" Videos: {out_dir}")
|
| 59 |
+
|
| 60 |
+
|
| 61 |
+
if __name__ == "__main__":
|
| 62 |
+
main()
|
hf_upload/scripts/download_videomme.py
ADDED
|
@@ -0,0 +1,76 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env python3
|
| 2 |
+
"""
|
| 3 |
+
Download Video-MME videos from Hugging Face (lmms-lab/Video-MME).
|
| 4 |
+
|
| 5 |
+
Downloads chunked zip files and extracts them.
|
| 6 |
+
|
| 7 |
+
Usage:
|
| 8 |
+
python download_videomme.py
|
| 9 |
+
python download_videomme.py --output-dir /path/to/videomme
|
| 10 |
+
"""
|
| 11 |
+
|
| 12 |
+
from __future__ import annotations
|
| 13 |
+
|
| 14 |
+
import argparse
|
| 15 |
+
import zipfile
|
| 16 |
+
from pathlib import Path
|
| 17 |
+
|
| 18 |
+
from huggingface_hub import hf_hub_download
|
| 19 |
+
|
| 20 |
+
REPO_ID = "lmms-lab/Video-MME"
|
| 21 |
+
DEFAULT_OUTPUT = Path("/opt/dlami/nvme/videomme")
|
| 22 |
+
|
| 23 |
+
VIDEO_ZIPS = [f"videos_chunked_{i:02d}.zip" for i in range(1, 21)]
|
| 24 |
+
SUBTITLE_ZIP = "subtitle.zip"
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
def parse_args() -> argparse.Namespace:
|
| 28 |
+
p = argparse.ArgumentParser(description="Download Video-MME from Hugging Face.")
|
| 29 |
+
p.add_argument("--output-dir", type=Path, default=DEFAULT_OUTPUT)
|
| 30 |
+
p.add_argument("--skip-subtitles", action="store_true")
|
| 31 |
+
return p.parse_args()
|
| 32 |
+
|
| 33 |
+
|
| 34 |
+
def download_and_extract(repo_file: str, extract_to: Path, label: str) -> None:
|
| 35 |
+
extract_to.mkdir(parents=True, exist_ok=True)
|
| 36 |
+
print(f" Downloading {label}: {repo_file} ...")
|
| 37 |
+
local_zip = hf_hub_download(
|
| 38 |
+
repo_id=REPO_ID, filename=repo_file, repo_type="dataset",
|
| 39 |
+
)
|
| 40 |
+
print(f" Extracting to {extract_to} ...")
|
| 41 |
+
with zipfile.ZipFile(local_zip, "r") as zf:
|
| 42 |
+
zf.extractall(extract_to)
|
| 43 |
+
print(f" Done: {repo_file}")
|
| 44 |
+
|
| 45 |
+
|
| 46 |
+
def main() -> None:
|
| 47 |
+
args = parse_args()
|
| 48 |
+
data_dir = args.output_dir / "data"
|
| 49 |
+
sub_dir = args.output_dir / "subtitle"
|
| 50 |
+
|
| 51 |
+
existing = sum(1 for f in data_dir.iterdir() if f.suffix in (".mp4", ".MP4", ".mkv")) if data_dir.exists() else 0
|
| 52 |
+
if existing >= 900:
|
| 53 |
+
print(f"[skip] Already have {existing} videos in {data_dir}")
|
| 54 |
+
else:
|
| 55 |
+
print(f"[1/2] Downloading {len(VIDEO_ZIPS)} video chunks...")
|
| 56 |
+
for i, zf in enumerate(VIDEO_ZIPS, 1):
|
| 57 |
+
download_and_extract(zf, data_dir, f"chunk {i}/{len(VIDEO_ZIPS)}")
|
| 58 |
+
|
| 59 |
+
if args.skip_subtitles:
|
| 60 |
+
print("[skip] Subtitle download skipped (--skip-subtitles)")
|
| 61 |
+
else:
|
| 62 |
+
existing_subs = sum(1 for f in sub_dir.iterdir() if f.suffix == ".srt") if sub_dir.exists() else 0
|
| 63 |
+
if existing_subs >= 800:
|
| 64 |
+
print(f"[skip] Already have {existing_subs} subtitles in {sub_dir}")
|
| 65 |
+
else:
|
| 66 |
+
print("[2/2] Downloading subtitles...")
|
| 67 |
+
download_and_extract(SUBTITLE_ZIP, sub_dir, "subtitles")
|
| 68 |
+
|
| 69 |
+
video_count = sum(1 for f in data_dir.iterdir() if f.suffix in (".mp4", ".MP4", ".mkv")) if data_dir.exists() else 0
|
| 70 |
+
print(f"\nDone. Total videos on disk: {video_count}")
|
| 71 |
+
print(f" Videos: {data_dir}")
|
| 72 |
+
print(f" Subtitles: {sub_dir}")
|
| 73 |
+
|
| 74 |
+
|
| 75 |
+
if __name__ == "__main__":
|
| 76 |
+
main()
|
hf_upload/scripts/eval_dpo_sync.py
ADDED
|
@@ -0,0 +1,841 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env python3
|
| 2 |
+
"""
|
| 3 |
+
Automated evaluation script for audio-video sync DPO models.
|
| 4 |
+
|
| 5 |
+
Workflow:
|
| 6 |
+
1. Load test set (kto_training_data_v2_test.jsonl)
|
| 7 |
+
2. Parse ground truth from video filenames (synced / delay Xs / early Xs)
|
| 8 |
+
3. Run model inference with a structured JSON prompt
|
| 9 |
+
4. Extract predictions: JSON parse -> GPT-5.4 judge (optional) -> regex fallback
|
| 10 |
+
5. Compute metrics: Sync/Desync Accuracy, Direction Accuracy, Offset MAE
|
| 11 |
+
6. Output a summary table + per-sample JSONL for analysis
|
| 12 |
+
|
| 13 |
+
Usage:
|
| 14 |
+
python eval_dpo_sync.py \
|
| 15 |
+
--base-model Rakancorle11/qwen3omni_full_sft_with_audio_for_dpo \
|
| 16 |
+
--adapter /opt/dlami/nvme/LlamaFactory/saves/Qwen3-Omni-Instruct/dpo/qwen3omni_dpo_lora_with_audio_v4_data_8632
|
| 17 |
+
|
| 18 |
+
# With GPT-5.4 judge fallback:
|
| 19 |
+
python eval_dpo_sync.py \
|
| 20 |
+
--base-model Rakancorle11/qwen3omni_full_sft_revised_thinker_key \
|
| 21 |
+
--adapter /opt/dlami/nvme/.../adapter \
|
| 22 |
+
--gpt-judge --openai-api-key sk-xxx
|
| 23 |
+
|
| 24 |
+
# Evaluate base model without adapter (SFT baseline):
|
| 25 |
+
python eval_dpo_sync.py \
|
| 26 |
+
--base-model Rakancorle11/qwen3omni_full_sft_with_audio_for_dpo
|
| 27 |
+
|
| 28 |
+
# Evaluate vanilla Qwen3-Omni (no SFT, no DPO):
|
| 29 |
+
python eval_dpo_sync.py \
|
| 30 |
+
--base-model Qwen/Qwen3-Omni-30B-A3B-Instruct
|
| 31 |
+
"""
|
| 32 |
+
|
| 33 |
+
from __future__ import annotations
|
| 34 |
+
|
| 35 |
+
import argparse
|
| 36 |
+
import gc
|
| 37 |
+
import json
|
| 38 |
+
import os
|
| 39 |
+
import re
|
| 40 |
+
import time
|
| 41 |
+
from pathlib import Path
|
| 42 |
+
from statistics import mean, median
|
| 43 |
+
from typing import Any, Dict, List, Optional, Tuple
|
| 44 |
+
|
| 45 |
+
import torch
|
| 46 |
+
from tqdm import tqdm
|
| 47 |
+
|
| 48 |
+
# ---------------------------------------------------------------------------
|
| 49 |
+
# GPT-5.4 judge helper
|
| 50 |
+
# ---------------------------------------------------------------------------
|
| 51 |
+
_openai_client = None
|
| 52 |
+
|
| 53 |
+
GPT_JUDGE_SYSTEM = """\
|
| 54 |
+
You are a structured-output extractor. The user will give you a model's free-text \
|
| 55 |
+
response about audio-video synchronization. Extract the following fields and return \
|
| 56 |
+
ONLY valid JSON (no markdown, no explanation):
|
| 57 |
+
|
| 58 |
+
{"synced": <bool>, "direction": "none"|"delay"|"early", "offset_sec": <float>, "t_v": <float or null>, "t_a": <float or null>, "explanation": "<one sentence>"}
|
| 59 |
+
|
| 60 |
+
Rules:
|
| 61 |
+
- synced: true if the model says audio and video are synchronized, false otherwise.
|
| 62 |
+
- direction: "delay" means audio comes AFTER the visual event; "early" means audio \
|
| 63 |
+
comes BEFORE the visual event; "none" if synced is true.
|
| 64 |
+
- offset_sec: estimated time gap in seconds. 0.0 if synced.
|
| 65 |
+
- t_v: the timestamp (in seconds) the model attributes to the VISUAL event. null if not mentioned.
|
| 66 |
+
- t_a: the timestamp (in seconds) the model attributes to the AUDIO event. null if not mentioned.
|
| 67 |
+
- If you cannot determine a field, use the default (true / "none" / 0.0 / null / "").
|
| 68 |
+
"""
|
| 69 |
+
|
| 70 |
+
|
| 71 |
+
def _get_openai_client(api_key: Optional[str] = None):
|
| 72 |
+
global _openai_client
|
| 73 |
+
if _openai_client is not None:
|
| 74 |
+
return _openai_client
|
| 75 |
+
key = api_key or os.environ.get("OPENAI_API_KEY")
|
| 76 |
+
if not key:
|
| 77 |
+
return None
|
| 78 |
+
from openai import OpenAI
|
| 79 |
+
_openai_client = OpenAI(api_key=key)
|
| 80 |
+
return _openai_client
|
| 81 |
+
|
| 82 |
+
|
| 83 |
+
def gpt_extract_prediction(
|
| 84 |
+
raw_output: str,
|
| 85 |
+
api_key: Optional[str] = None,
|
| 86 |
+
model: str = "gpt-5.4",
|
| 87 |
+
) -> Optional[Dict[str, Any]]:
|
| 88 |
+
"""Call GPT to parse raw model output into structured prediction."""
|
| 89 |
+
client = _get_openai_client(api_key)
|
| 90 |
+
if client is None:
|
| 91 |
+
return None
|
| 92 |
+
try:
|
| 93 |
+
resp = client.chat.completions.create(
|
| 94 |
+
model=model,
|
| 95 |
+
messages=[
|
| 96 |
+
{"role": "system", "content": GPT_JUDGE_SYSTEM},
|
| 97 |
+
{"role": "user", "content": raw_output},
|
| 98 |
+
],
|
| 99 |
+
temperature=0.0,
|
| 100 |
+
max_completion_tokens=200,
|
| 101 |
+
)
|
| 102 |
+
text = resp.choices[0].message.content.strip()
|
| 103 |
+
for pat in [
|
| 104 |
+
re.compile(r"```(?:json)?\s*(\{.*?\})\s*```", re.DOTALL),
|
| 105 |
+
re.compile(r"(\{.*?\})", re.DOTALL),
|
| 106 |
+
]:
|
| 107 |
+
m = pat.search(text)
|
| 108 |
+
if m:
|
| 109 |
+
obj = json.loads(m.group(1))
|
| 110 |
+
synced = obj.get("synced")
|
| 111 |
+
if isinstance(synced, str):
|
| 112 |
+
synced = synced.lower() in ("true", "yes", "1")
|
| 113 |
+
direction = str(obj.get("direction", "none")).lower().strip()
|
| 114 |
+
if direction not in ("delay", "early", "none"):
|
| 115 |
+
direction = "none"
|
| 116 |
+
t_v_raw = obj.get("t_v")
|
| 117 |
+
t_a_raw = obj.get("t_a")
|
| 118 |
+
pred_t_v = float(t_v_raw) if t_v_raw is not None else None
|
| 119 |
+
pred_t_a = float(t_a_raw) if t_a_raw is not None else None
|
| 120 |
+
return {
|
| 121 |
+
"pred_synced": bool(synced),
|
| 122 |
+
"pred_direction": direction,
|
| 123 |
+
"pred_offset_sec": float(obj.get("offset_sec", 0.0)),
|
| 124 |
+
"pred_t_v": pred_t_v,
|
| 125 |
+
"pred_t_a": pred_t_a,
|
| 126 |
+
"pred_explanation": str(obj.get("explanation", "")),
|
| 127 |
+
"parse_method": "gpt_judge",
|
| 128 |
+
}
|
| 129 |
+
except Exception as exc:
|
| 130 |
+
print(f" [gpt-judge] API error: {exc}", flush=True)
|
| 131 |
+
return None
|
| 132 |
+
|
| 133 |
+
DATA_ROOT = Path("/opt/dlami/nvme/video_source")
|
| 134 |
+
ORIGINAL_ROOT = DATA_ROOT / "original"
|
| 135 |
+
AUDIO_ROOT = DATA_ROOT / "extracted_audio" / "original"
|
| 136 |
+
|
| 137 |
+
|
| 138 |
+
def set_data_root(root: Path) -> None:
|
| 139 |
+
"""Point path resolution at a new server layout (original/, random_shift_video/, extracted_audio/)."""
|
| 140 |
+
global DATA_ROOT, ORIGINAL_ROOT, AUDIO_ROOT
|
| 141 |
+
DATA_ROOT = root.resolve()
|
| 142 |
+
ORIGINAL_ROOT = DATA_ROOT / "original"
|
| 143 |
+
AUDIO_ROOT = DATA_ROOT / "extracted_audio" / "original"
|
| 144 |
+
|
| 145 |
+
EVAL_PROMPT = """\
|
| 146 |
+
Watch this video and listen to its audio carefully. \
|
| 147 |
+
Determine whether the audio and video tracks are synchronized. \
|
| 148 |
+
If they are not synchronized, identify the direction of the offset \
|
| 149 |
+
(audio delayed or audio early relative to video) and estimate the offset in seconds. \
|
| 150 |
+
Explain your reasoning."""
|
| 151 |
+
|
| 152 |
+
|
| 153 |
+
def parse_args() -> argparse.Namespace:
|
| 154 |
+
p = argparse.ArgumentParser(description="Evaluate DPO sync model on test set.")
|
| 155 |
+
p.add_argument("--base-model", type=str,
|
| 156 |
+
default="Rakancorle11/qwen3omni_full_sft_with_audio_for_dpo")
|
| 157 |
+
p.add_argument("--adapter", type=str, default=None,
|
| 158 |
+
help="Path to LoRA adapter. If omitted, evaluates base model only.")
|
| 159 |
+
p.add_argument(
|
| 160 |
+
"--data-root",
|
| 161 |
+
type=Path,
|
| 162 |
+
default=Path("/opt/dlami/nvme/video_source"),
|
| 163 |
+
help="Directory containing original/, random_shift_video/, extracted_audio/ (video+audio for sync eval).",
|
| 164 |
+
)
|
| 165 |
+
p.add_argument(
|
| 166 |
+
"--test-jsonl",
|
| 167 |
+
type=Path,
|
| 168 |
+
default=None,
|
| 169 |
+
help="Test JSONL (default: <data-root>/kto_training_data_v2_test.jsonl).",
|
| 170 |
+
)
|
| 171 |
+
p.add_argument(
|
| 172 |
+
"--output-dir",
|
| 173 |
+
type=Path,
|
| 174 |
+
default=None,
|
| 175 |
+
help="Eval output root (default: /home/ubuntu/eval_results/sync).",
|
| 176 |
+
)
|
| 177 |
+
p.add_argument("--max-samples", type=int, default=-1,
|
| 178 |
+
help="Limit number of test samples (-1 = all).")
|
| 179 |
+
p.add_argument("--max-new-tokens", type=int, default=256)
|
| 180 |
+
p.add_argument("--temperature", type=float, default=0.0)
|
| 181 |
+
p.add_argument("--batch-size", type=int, default=1)
|
| 182 |
+
p.add_argument("--label", type=str, default=None,
|
| 183 |
+
help="Label for this eval run (used in output filenames).")
|
| 184 |
+
p.add_argument("--gpt-judge", action="store_true", default=False,
|
| 185 |
+
help="Use GPT-5.4 as fallback parser when JSON extraction fails.")
|
| 186 |
+
p.add_argument("--openai-api-key", type=str, default=None,
|
| 187 |
+
help="OpenAI API key. Falls back to OPENAI_API_KEY env var.")
|
| 188 |
+
p.add_argument("--gpt-model", type=str, default="gpt-5.4",
|
| 189 |
+
help="OpenAI model to use for GPT judge (default: gpt-5.4).")
|
| 190 |
+
return p.parse_args()
|
| 191 |
+
|
| 192 |
+
|
| 193 |
+
def parse_ground_truth(video_field: str) -> Dict[str, Any]:
|
| 194 |
+
"""Extract ground truth sync label from video filename."""
|
| 195 |
+
m_delay = re.search(r"_delay_([\d.]+)s\.mp4", video_field)
|
| 196 |
+
m_early = re.search(r"_early_([\d.]+)s\.mp4", video_field)
|
| 197 |
+
if m_delay:
|
| 198 |
+
return {"synced": False, "direction": "delay", "offset_sec": float(m_delay.group(1))}
|
| 199 |
+
elif m_early:
|
| 200 |
+
return {"synced": False, "direction": "early", "offset_sec": float(m_early.group(1))}
|
| 201 |
+
else:
|
| 202 |
+
return {"synced": True, "direction": "none", "offset_sec": 0.0}
|
| 203 |
+
|
| 204 |
+
|
| 205 |
+
def resolve_video_path(video_field: str) -> str:
|
| 206 |
+
if os.path.isabs(video_field) and os.path.exists(video_field):
|
| 207 |
+
return video_field
|
| 208 |
+
candidate_dirs = [
|
| 209 |
+
ORIGINAL_ROOT / "uag_oops",
|
| 210 |
+
DATA_ROOT / "random_shift_video" / "delay",
|
| 211 |
+
DATA_ROOT / "random_shift_video" / "early",
|
| 212 |
+
ORIGINAL_ROOT,
|
| 213 |
+
]
|
| 214 |
+
for d in candidate_dirs:
|
| 215 |
+
c = d / video_field
|
| 216 |
+
if c.exists():
|
| 217 |
+
return str(c)
|
| 218 |
+
return str(ORIGINAL_ROOT / "uag_oops" / video_field)
|
| 219 |
+
|
| 220 |
+
|
| 221 |
+
def resolve_audio_path(video_path: str) -> str:
|
| 222 |
+
video_p = Path(video_path)
|
| 223 |
+
try:
|
| 224 |
+
rel = video_p.relative_to(DATA_ROOT)
|
| 225 |
+
except ValueError:
|
| 226 |
+
rel = Path(video_p.name)
|
| 227 |
+
audio_path = DATA_ROOT / "extracted_audio" / rel.with_suffix(".wav")
|
| 228 |
+
if audio_path.exists():
|
| 229 |
+
return str(audio_path)
|
| 230 |
+
base_stem = re.sub(r"_(delay|early)_[\d.]+s$", "", video_p.stem)
|
| 231 |
+
fallback = DATA_ROOT / "extracted_audio" / "original" / "uag_oops" / (base_stem + ".wav")
|
| 232 |
+
if fallback.exists():
|
| 233 |
+
return str(fallback)
|
| 234 |
+
return str(audio_path)
|
| 235 |
+
|
| 236 |
+
|
| 237 |
+
def extract_timestamps(text: str) -> Tuple[Optional[float], Optional[float]]:
|
| 238 |
+
"""Extract (t_visual, t_audio) from text using 'at/around/about Xs' pattern.
|
| 239 |
+
|
| 240 |
+
If >= 2 timestamps found, first is t_v, second is t_a.
|
| 241 |
+
If exactly 1 timestamp found (common for synced samples), treat as both t_v and t_a.
|
| 242 |
+
"""
|
| 243 |
+
text_lower = text.lower()
|
| 244 |
+
all_times = [(m.start(), float(m.group(1)))
|
| 245 |
+
for m in re.finditer(r"(?:at|around|about)\s+([\d]+\.?\d*)\s*s", text_lower)]
|
| 246 |
+
if len(all_times) >= 2:
|
| 247 |
+
return (all_times[0][1], all_times[1][1])
|
| 248 |
+
if len(all_times) == 1:
|
| 249 |
+
return (all_times[0][1], all_times[0][1])
|
| 250 |
+
return (None, None)
|
| 251 |
+
|
| 252 |
+
|
| 253 |
+
def load_test_data(path: Path, max_samples: int) -> List[Dict[str, Any]]:
|
| 254 |
+
data = []
|
| 255 |
+
with open(path) as f:
|
| 256 |
+
for line in f:
|
| 257 |
+
line = line.strip()
|
| 258 |
+
if not line:
|
| 259 |
+
continue
|
| 260 |
+
obj = json.loads(line)
|
| 261 |
+
video_path = resolve_video_path(obj["video"])
|
| 262 |
+
audio_path = resolve_audio_path(video_path)
|
| 263 |
+
gt = parse_ground_truth(obj["video"])
|
| 264 |
+
gt_t_v, gt_t_a = extract_timestamps(obj.get("chosen", ""))
|
| 265 |
+
data.append({
|
| 266 |
+
"video": obj["video"],
|
| 267 |
+
"video_path": video_path,
|
| 268 |
+
"audio_path": audio_path,
|
| 269 |
+
"prompt": obj["prompt"],
|
| 270 |
+
"chosen": obj["chosen"],
|
| 271 |
+
"rejected": obj["rejected"],
|
| 272 |
+
"gt_synced": gt["synced"],
|
| 273 |
+
"gt_direction": gt["direction"],
|
| 274 |
+
"gt_offset_sec": gt["offset_sec"],
|
| 275 |
+
"gt_t_v": gt_t_v,
|
| 276 |
+
"gt_t_a": gt_t_a,
|
| 277 |
+
})
|
| 278 |
+
if max_samples > 0:
|
| 279 |
+
data = data[:max_samples]
|
| 280 |
+
return data
|
| 281 |
+
|
| 282 |
+
|
| 283 |
+
def extract_prediction(text: str) -> Dict[str, Any]:
|
| 284 |
+
"""Extract structured prediction from model output text."""
|
| 285 |
+
text = text.strip()
|
| 286 |
+
|
| 287 |
+
# Try JSON extraction
|
| 288 |
+
for pattern in [
|
| 289 |
+
re.compile(r"```(?:json)?\s*(\{.*?\})\s*```", re.DOTALL),
|
| 290 |
+
re.compile(r"(\{[^{}]*\"synced\"[^{}]*\})", re.DOTALL),
|
| 291 |
+
re.compile(r"(\{.*?\})", re.DOTALL),
|
| 292 |
+
]:
|
| 293 |
+
m = pattern.search(text)
|
| 294 |
+
if m:
|
| 295 |
+
try:
|
| 296 |
+
obj = json.loads(m.group(1))
|
| 297 |
+
synced = obj.get("synced")
|
| 298 |
+
if isinstance(synced, str):
|
| 299 |
+
synced = synced.lower() in ("true", "yes", "1")
|
| 300 |
+
direction = str(obj.get("direction", "none")).lower().strip()
|
| 301 |
+
if direction not in ("delay", "early", "none"):
|
| 302 |
+
direction = "none"
|
| 303 |
+
offset = float(obj.get("offset_sec", 0.0))
|
| 304 |
+
explanation = str(obj.get("explanation", ""))
|
| 305 |
+
t_v_raw = obj.get("t_v")
|
| 306 |
+
t_a_raw = obj.get("t_a")
|
| 307 |
+
return {
|
| 308 |
+
"pred_synced": bool(synced),
|
| 309 |
+
"pred_direction": direction,
|
| 310 |
+
"pred_offset_sec": offset,
|
| 311 |
+
"pred_t_v": float(t_v_raw) if t_v_raw is not None else None,
|
| 312 |
+
"pred_t_a": float(t_a_raw) if t_a_raw is not None else None,
|
| 313 |
+
"pred_explanation": explanation,
|
| 314 |
+
"parse_method": "json",
|
| 315 |
+
}
|
| 316 |
+
except (json.JSONDecodeError, ValueError, TypeError):
|
| 317 |
+
continue
|
| 318 |
+
|
| 319 |
+
# Fallback: structured extraction from free text
|
| 320 |
+
text_lower = text.lower()
|
| 321 |
+
synced = None
|
| 322 |
+
direction = "none"
|
| 323 |
+
offset = 0.0
|
| 324 |
+
|
| 325 |
+
pred_t_v, pred_t_a = extract_timestamps(text)
|
| 326 |
+
|
| 327 |
+
# Step 1: Determine sync vs desync
|
| 328 |
+
desync_kws = [
|
| 329 |
+
"not synchronized", "not aligned", "desync", "mismatch", "misalign",
|
| 330 |
+
"not in sync", "out of sync", "clearly not", "not well aligned",
|
| 331 |
+
"are not aligned", "audio and visual event are clearly not",
|
| 332 |
+
]
|
| 333 |
+
sync_kws = [
|
| 334 |
+
"synchronized", "well aligned", "well-aligned", "in sync",
|
| 335 |
+
"appear synchronized", "appears synchronized", "closely aligned",
|
| 336 |
+
"audio and video are aligned", "matches closely",
|
| 337 |
+
]
|
| 338 |
+
if any(kw in text_lower for kw in desync_kws):
|
| 339 |
+
synced = False
|
| 340 |
+
elif any(kw in text_lower for kw in sync_kws):
|
| 341 |
+
synced = True
|
| 342 |
+
|
| 343 |
+
if synced is False:
|
| 344 |
+
# Step 2a: Try explicit direction keywords
|
| 345 |
+
delay_kws = ["audio delayed", "audio lags", "audio comes after", "sound comes after",
|
| 346 |
+
"sound is heard later", "audio is delayed", "sound follows"]
|
| 347 |
+
early_kws = ["audio early", "audio leads", "audio comes before", "sound comes before",
|
| 348 |
+
"audio precedes", "sound is heard before", "sound precedes", "audio is early"]
|
| 349 |
+
if any(kw in text_lower for kw in delay_kws):
|
| 350 |
+
direction = "delay"
|
| 351 |
+
elif any(kw in text_lower for kw in early_kws):
|
| 352 |
+
direction = "early"
|
| 353 |
+
|
| 354 |
+
# Step 2b: If no explicit keyword, use extracted timestamps to infer direction
|
| 355 |
+
if direction == "none" and pred_t_v is not None and pred_t_a is not None and pred_t_v != pred_t_a:
|
| 356 |
+
if pred_t_a > pred_t_v:
|
| 357 |
+
direction = "delay"
|
| 358 |
+
else:
|
| 359 |
+
direction = "early"
|
| 360 |
+
offset = abs(pred_t_a - pred_t_v)
|
| 361 |
+
|
| 362 |
+
# Step 2c: Extract offset from gap/separation mentions if not yet found
|
| 363 |
+
if offset == 0.0:
|
| 364 |
+
offset_match = re.search(
|
| 365 |
+
r"(?:gap|separation|offset|mismatch|differ\w*)\s*(?:of\s+)?(?:about\s+|roughly\s+|approximately\s+)?"
|
| 366 |
+
r"([\d]+\.?\d*)\s*s",
|
| 367 |
+
text_lower,
|
| 368 |
+
)
|
| 369 |
+
if not offset_match:
|
| 370 |
+
offset_match = re.search(
|
| 371 |
+
r"(?:about\s+|roughly\s+|approximately\s+)?([\d]+\.?\d*)\s*s\s*"
|
| 372 |
+
r"(?:gap|separation|offset|mismatch|differ)",
|
| 373 |
+
text_lower,
|
| 374 |
+
)
|
| 375 |
+
if offset_match:
|
| 376 |
+
offset = float(offset_match.group(1))
|
| 377 |
+
|
| 378 |
+
if synced is None:
|
| 379 |
+
synced = True
|
| 380 |
+
|
| 381 |
+
return {
|
| 382 |
+
"pred_synced": synced,
|
| 383 |
+
"pred_direction": direction,
|
| 384 |
+
"pred_offset_sec": offset,
|
| 385 |
+
"pred_t_v": pred_t_v,
|
| 386 |
+
"pred_t_a": pred_t_a,
|
| 387 |
+
"pred_explanation": "",
|
| 388 |
+
"parse_method": "regex_fallback",
|
| 389 |
+
}
|
| 390 |
+
|
| 391 |
+
|
| 392 |
+
def load_model(base_model: str, adapter: Optional[str]):
|
| 393 |
+
import json as _json
|
| 394 |
+
from transformers import (
|
| 395 |
+
AutoConfig,
|
| 396 |
+
AutoProcessor,
|
| 397 |
+
Qwen3OmniMoeForConditionalGeneration,
|
| 398 |
+
Qwen3OmniMoeThinkerConfig,
|
| 399 |
+
Qwen3OmniMoeThinkerForConditionalGeneration,
|
| 400 |
+
)
|
| 401 |
+
|
| 402 |
+
print(f"[load] Base model: {base_model}")
|
| 403 |
+
processor = AutoProcessor.from_pretrained(base_model, trust_remote_code=True)
|
| 404 |
+
|
| 405 |
+
model_path = Path(base_model)
|
| 406 |
+
cfg_path = model_path / "config.json" if model_path.exists() else None
|
| 407 |
+
|
| 408 |
+
model_type = None
|
| 409 |
+
if cfg_path and cfg_path.exists():
|
| 410 |
+
with open(cfg_path, "r") as f:
|
| 411 |
+
model_type = _json.load(f).get("model_type")
|
| 412 |
+
|
| 413 |
+
if not model_type:
|
| 414 |
+
try:
|
| 415 |
+
from huggingface_hub import hf_hub_download
|
| 416 |
+
cached = hf_hub_download(base_model, "config.json")
|
| 417 |
+
with open(cached, "r") as f:
|
| 418 |
+
model_type = _json.load(f).get("model_type")
|
| 419 |
+
except Exception:
|
| 420 |
+
pass
|
| 421 |
+
|
| 422 |
+
print(f"[load] Detected model_type: {model_type}")
|
| 423 |
+
|
| 424 |
+
if model_type == "qwen3_omni_moe_thinker":
|
| 425 |
+
config = Qwen3OmniMoeThinkerConfig.from_pretrained(base_model)
|
| 426 |
+
model = Qwen3OmniMoeThinkerForConditionalGeneration.from_pretrained(
|
| 427 |
+
base_model, config=config, torch_dtype=torch.bfloat16, device_map="auto",
|
| 428 |
+
)
|
| 429 |
+
else:
|
| 430 |
+
config = AutoConfig.from_pretrained(base_model, trust_remote_code=True)
|
| 431 |
+
model = Qwen3OmniMoeForConditionalGeneration.from_pretrained(
|
| 432 |
+
base_model, config=config, torch_dtype=torch.bfloat16, device_map="auto",
|
| 433 |
+
)
|
| 434 |
+
|
| 435 |
+
if adapter:
|
| 436 |
+
from peft import PeftModel
|
| 437 |
+
from safetensors.torch import load_file, save_file
|
| 438 |
+
import tempfile, shutil
|
| 439 |
+
|
| 440 |
+
print(f"[load] Loading LoRA adapter: {adapter}")
|
| 441 |
+
|
| 442 |
+
adapter_cfg_path = Path(adapter) / "adapter_config.json"
|
| 443 |
+
with open(adapter_cfg_path) as f:
|
| 444 |
+
adapter_cfg = json.loads(f.read())
|
| 445 |
+
target_modules = adapter_cfg.get("target_modules", [])
|
| 446 |
+
needs_remap = any(t.startswith("model.layers.") for t in target_modules) and model_type != "qwen3_omni_moe_thinker"
|
| 447 |
+
|
| 448 |
+
if needs_remap:
|
| 449 |
+
print("[load] Adapter was trained on thinker-only model; remapping keys to full model...")
|
| 450 |
+
tmp_dir = Path(tempfile.mkdtemp(prefix="adapter_remap_"))
|
| 451 |
+
for fn in Path(adapter).iterdir():
|
| 452 |
+
if fn.is_dir():
|
| 453 |
+
continue
|
| 454 |
+
if fn.name == "adapter_config.json":
|
| 455 |
+
new_targets = []
|
| 456 |
+
for t in target_modules:
|
| 457 |
+
if t.startswith("model.layers."):
|
| 458 |
+
new_targets.append("thinker." + t)
|
| 459 |
+
elif t[0].isdigit():
|
| 460 |
+
new_targets.append("thinker.model.layers." + t)
|
| 461 |
+
else:
|
| 462 |
+
new_targets.append(t)
|
| 463 |
+
adapter_cfg["target_modules"] = new_targets
|
| 464 |
+
with open(tmp_dir / "adapter_config.json", "w") as f:
|
| 465 |
+
json.dump(adapter_cfg, f, indent=2)
|
| 466 |
+
elif fn.suffix == ".safetensors" and "adapter" in fn.name:
|
| 467 |
+
tensors = load_file(str(fn))
|
| 468 |
+
remapped = {}
|
| 469 |
+
for k, v in tensors.items():
|
| 470 |
+
if ".model.layers." in k and ".thinker." not in k:
|
| 471 |
+
new_k = k.replace("base_model.model.model.layers.", "base_model.model.thinker.model.layers.")
|
| 472 |
+
remapped[new_k] = v
|
| 473 |
+
else:
|
| 474 |
+
remapped[k] = v
|
| 475 |
+
save_file(remapped, str(tmp_dir / fn.name))
|
| 476 |
+
else:
|
| 477 |
+
shutil.copy2(str(fn), str(tmp_dir / fn.name))
|
| 478 |
+
adapter = str(tmp_dir)
|
| 479 |
+
|
| 480 |
+
model = PeftModel.from_pretrained(model, adapter)
|
| 481 |
+
model = model.merge_and_unload()
|
| 482 |
+
|
| 483 |
+
if hasattr(model, "disable_talker"):
|
| 484 |
+
model.disable_talker()
|
| 485 |
+
model.eval()
|
| 486 |
+
print(f"[load] Model ready ({type(model).__name__}). Device: {model.device}")
|
| 487 |
+
return model, processor
|
| 488 |
+
|
| 489 |
+
|
| 490 |
+
def run_inference(model, processor, video_path: str, audio_path: str,
|
| 491 |
+
max_new_tokens: int, temperature: float) -> str:
|
| 492 |
+
from qwen_omni_utils import process_mm_info
|
| 493 |
+
import tempfile
|
| 494 |
+
|
| 495 |
+
tmp_dir = tempfile.mkdtemp(prefix="eval_masked_")
|
| 496 |
+
masked_video = os.path.join(tmp_dir, "clip.mp4")
|
| 497 |
+
masked_audio = os.path.join(tmp_dir, "clip.wav")
|
| 498 |
+
os.symlink(os.path.abspath(video_path), masked_video)
|
| 499 |
+
os.symlink(os.path.abspath(audio_path), masked_audio)
|
| 500 |
+
|
| 501 |
+
conversation = [
|
| 502 |
+
{
|
| 503 |
+
"role": "user",
|
| 504 |
+
"content": [
|
| 505 |
+
{"type": "video", "video": masked_video},
|
| 506 |
+
{"type": "audio", "audio": masked_audio},
|
| 507 |
+
{"type": "text", "text": EVAL_PROMPT},
|
| 508 |
+
],
|
| 509 |
+
}
|
| 510 |
+
]
|
| 511 |
+
|
| 512 |
+
text = processor.apply_chat_template(
|
| 513 |
+
conversation, add_generation_prompt=True, tokenize=False,
|
| 514 |
+
)
|
| 515 |
+
audios, images, videos = process_mm_info(conversation, use_audio_in_video=False)
|
| 516 |
+
inputs = processor(
|
| 517 |
+
text=text, audio=audios, images=images, videos=videos,
|
| 518 |
+
return_tensors="pt", padding=True, use_audio_in_video=False,
|
| 519 |
+
)
|
| 520 |
+
|
| 521 |
+
model_dtype = next(model.parameters()).dtype
|
| 522 |
+
converted = {}
|
| 523 |
+
for k, v in inputs.items():
|
| 524 |
+
if hasattr(v, "to"):
|
| 525 |
+
v = v.to(model.device)
|
| 526 |
+
if torch.is_floating_point(v):
|
| 527 |
+
v = v.to(model_dtype)
|
| 528 |
+
converted[k] = v
|
| 529 |
+
inputs = converted
|
| 530 |
+
|
| 531 |
+
from transformers import Qwen3OmniMoeThinkerForConditionalGeneration
|
| 532 |
+
is_thinker = isinstance(model, Qwen3OmniMoeThinkerForConditionalGeneration)
|
| 533 |
+
if is_thinker:
|
| 534 |
+
gen_kwargs = {"max_new_tokens": max_new_tokens, "do_sample": temperature > 0}
|
| 535 |
+
else:
|
| 536 |
+
gen_kwargs = {
|
| 537 |
+
"thinker_max_new_tokens": max_new_tokens,
|
| 538 |
+
"use_audio_in_video": False,
|
| 539 |
+
"return_audio": False,
|
| 540 |
+
"do_sample": temperature > 0,
|
| 541 |
+
}
|
| 542 |
+
if temperature > 0:
|
| 543 |
+
gen_kwargs["temperature"] = temperature
|
| 544 |
+
gen_kwargs["top_p"] = 0.9
|
| 545 |
+
|
| 546 |
+
with torch.inference_mode():
|
| 547 |
+
output_ids = model.generate(**inputs, **gen_kwargs)
|
| 548 |
+
|
| 549 |
+
prompt_len = inputs["input_ids"].shape[1]
|
| 550 |
+
response = processor.batch_decode(
|
| 551 |
+
output_ids[:, prompt_len:], skip_special_tokens=True,
|
| 552 |
+
)[0].strip()
|
| 553 |
+
|
| 554 |
+
import shutil
|
| 555 |
+
shutil.rmtree(tmp_dir, ignore_errors=True)
|
| 556 |
+
|
| 557 |
+
return response
|
| 558 |
+
|
| 559 |
+
|
| 560 |
+
def compute_metrics(results: List[Dict[str, Any]]) -> Dict[str, Any]:
|
| 561 |
+
"""Compute all evaluation metrics."""
|
| 562 |
+
total = len(results)
|
| 563 |
+
if total == 0:
|
| 564 |
+
return {}
|
| 565 |
+
|
| 566 |
+
# 1. Sync/Desync binary accuracy
|
| 567 |
+
sync_correct = sum(1 for r in results if r["pred_synced"] == r["gt_synced"])
|
| 568 |
+
sync_acc = sync_correct / total
|
| 569 |
+
|
| 570 |
+
# 2. Direction accuracy (on desync samples only)
|
| 571 |
+
desync_samples = [r for r in results if not r["gt_synced"]]
|
| 572 |
+
if desync_samples:
|
| 573 |
+
dir_correct = sum(1 for r in desync_samples if r["pred_direction"] == r["gt_direction"])
|
| 574 |
+
dir_acc = dir_correct / len(desync_samples)
|
| 575 |
+
else:
|
| 576 |
+
dir_acc = None
|
| 577 |
+
|
| 578 |
+
# 3. Full 3-class accuracy (synced / delay / early)
|
| 579 |
+
def label(r, prefix):
|
| 580 |
+
if r[f"{prefix}synced"]:
|
| 581 |
+
return "synced"
|
| 582 |
+
return r[f"{prefix}direction"]
|
| 583 |
+
three_class_correct = sum(1 for r in results if label(r, "pred_") == label(r, "gt_"))
|
| 584 |
+
three_class_acc = three_class_correct / total
|
| 585 |
+
|
| 586 |
+
# 4. Offset MAE (on desync samples where model also predicts desync)
|
| 587 |
+
offset_errors = []
|
| 588 |
+
for r in desync_samples:
|
| 589 |
+
if not r["pred_synced"] and r["pred_offset_sec"] > 0:
|
| 590 |
+
offset_errors.append(abs(r["pred_offset_sec"] - r["gt_offset_sec"]))
|
| 591 |
+
offset_mae = mean(offset_errors) if offset_errors else None
|
| 592 |
+
offset_median = median(offset_errors) if offset_errors else None
|
| 593 |
+
|
| 594 |
+
# 5. Per-category breakdown
|
| 595 |
+
synced_samples = [r for r in results if r["gt_synced"]]
|
| 596 |
+
delay_samples = [r for r in results if r["gt_direction"] == "delay"]
|
| 597 |
+
early_samples = [r for r in results if r["gt_direction"] == "early"]
|
| 598 |
+
|
| 599 |
+
synced_acc = (sum(1 for r in synced_samples if r["pred_synced"]) / len(synced_samples)) if synced_samples else None
|
| 600 |
+
delay_acc = (sum(1 for r in delay_samples if not r["pred_synced"] and r["pred_direction"] == "delay") / len(delay_samples)) if delay_samples else None
|
| 601 |
+
early_acc = (sum(1 for r in early_samples if not r["pred_synced"] and r["pred_direction"] == "early") / len(early_samples)) if early_samples else None
|
| 602 |
+
|
| 603 |
+
# 6. Tolerance-based offset accuracy
|
| 604 |
+
within_05 = sum(1 for e in offset_errors if e <= 0.5) if offset_errors else 0
|
| 605 |
+
within_10 = sum(1 for e in offset_errors if e <= 1.0) if offset_errors else 0
|
| 606 |
+
|
| 607 |
+
# 7. Parse method stats
|
| 608 |
+
json_parsed = sum(1 for r in results if r.get("parse_method") == "json")
|
| 609 |
+
regex_parsed = sum(1 for r in results if r.get("parse_method") == "regex_fallback")
|
| 610 |
+
gpt_parsed = sum(1 for r in results if r.get("parse_method") == "gpt_judge")
|
| 611 |
+
|
| 612 |
+
# 8. Timestamp MAE (t_v and t_a separately)
|
| 613 |
+
tv_errors = []
|
| 614 |
+
ta_errors = []
|
| 615 |
+
for r in results:
|
| 616 |
+
gt_tv = r.get("gt_t_v")
|
| 617 |
+
gt_ta = r.get("gt_t_a")
|
| 618 |
+
pred_tv = r.get("pred_t_v")
|
| 619 |
+
pred_ta = r.get("pred_t_a")
|
| 620 |
+
if gt_tv is not None and pred_tv is not None:
|
| 621 |
+
tv_errors.append(abs(pred_tv - gt_tv))
|
| 622 |
+
if gt_ta is not None and pred_ta is not None:
|
| 623 |
+
ta_errors.append(abs(pred_ta - gt_ta))
|
| 624 |
+
tv_mae = round(mean(tv_errors), 4) if tv_errors else None
|
| 625 |
+
ta_mae = round(mean(ta_errors), 4) if ta_errors else None
|
| 626 |
+
tv_median = round(median(tv_errors), 4) if tv_errors else None
|
| 627 |
+
ta_median = round(median(ta_errors), 4) if ta_errors else None
|
| 628 |
+
|
| 629 |
+
return {
|
| 630 |
+
"total_samples": total,
|
| 631 |
+
"sync_desync_accuracy": round(sync_acc, 4),
|
| 632 |
+
"three_class_accuracy": round(three_class_acc, 4),
|
| 633 |
+
"direction_accuracy_on_desync": round(dir_acc, 4) if dir_acc is not None else None,
|
| 634 |
+
"per_category": {
|
| 635 |
+
"synced_accuracy": round(synced_acc, 4) if synced_acc is not None else None,
|
| 636 |
+
"delay_accuracy": round(delay_acc, 4) if delay_acc is not None else None,
|
| 637 |
+
"early_accuracy": round(early_acc, 4) if early_acc is not None else None,
|
| 638 |
+
"synced_count": len(synced_samples),
|
| 639 |
+
"delay_count": len(delay_samples),
|
| 640 |
+
"early_count": len(early_samples),
|
| 641 |
+
},
|
| 642 |
+
"offset_mae_sec": round(offset_mae, 4) if offset_mae is not None else None,
|
| 643 |
+
"offset_median_sec": round(offset_median, 4) if offset_median is not None else None,
|
| 644 |
+
"offset_within_0.5s": within_05,
|
| 645 |
+
"offset_within_1.0s": within_10,
|
| 646 |
+
"offset_evaluated_count": len(offset_errors),
|
| 647 |
+
"timestamp_tv_mae_sec": tv_mae,
|
| 648 |
+
"timestamp_ta_mae_sec": ta_mae,
|
| 649 |
+
"timestamp_tv_median_sec": tv_median,
|
| 650 |
+
"timestamp_ta_median_sec": ta_median,
|
| 651 |
+
"timestamp_evaluated_tv": len(tv_errors),
|
| 652 |
+
"timestamp_evaluated_ta": len(ta_errors),
|
| 653 |
+
"parse_stats": {"json": json_parsed, "regex_fallback": regex_parsed, "gpt_judge": gpt_parsed},
|
| 654 |
+
}
|
| 655 |
+
|
| 656 |
+
|
| 657 |
+
def print_summary(metrics: Dict[str, Any], label: str) -> None:
|
| 658 |
+
print()
|
| 659 |
+
print(f"{'=' * 60}")
|
| 660 |
+
print(f" Eval Summary: {label}")
|
| 661 |
+
print(f"{'=' * 60}")
|
| 662 |
+
print(f" Total samples: {metrics['total_samples']}")
|
| 663 |
+
print(f" Sync/Desync Accuracy: {metrics['sync_desync_accuracy']:.1%}")
|
| 664 |
+
print(f" 3-Class Accuracy: {metrics['three_class_accuracy']:.1%}")
|
| 665 |
+
if metrics["direction_accuracy_on_desync"] is not None:
|
| 666 |
+
print(f" Direction Acc (desync): {metrics['direction_accuracy_on_desync']:.1%}")
|
| 667 |
+
print(f" ─── Per Category ───")
|
| 668 |
+
pc = metrics["per_category"]
|
| 669 |
+
if pc["synced_accuracy"] is not None:
|
| 670 |
+
print(f" Synced correct: {pc['synced_accuracy']:.1%} ({pc['synced_count']} samples)")
|
| 671 |
+
if pc["delay_accuracy"] is not None:
|
| 672 |
+
print(f" Delay correct: {pc['delay_accuracy']:.1%} ({pc['delay_count']} samples)")
|
| 673 |
+
if pc["early_accuracy"] is not None:
|
| 674 |
+
print(f" Early correct: {pc['early_accuracy']:.1%} ({pc['early_count']} samples)")
|
| 675 |
+
print(f" ─── Offset Estimation ───")
|
| 676 |
+
if metrics["offset_mae_sec"] is not None:
|
| 677 |
+
print(f" MAE: {metrics['offset_mae_sec']:.3f}s")
|
| 678 |
+
print(f" Median Error: {metrics['offset_median_sec']:.3f}s")
|
| 679 |
+
print(f" Within 0.5s: {metrics['offset_within_0.5s']} / {metrics['offset_evaluated_count']}")
|
| 680 |
+
print(f" Within 1.0s: {metrics['offset_within_1.0s']} / {metrics['offset_evaluated_count']}")
|
| 681 |
+
else:
|
| 682 |
+
print(f" (no valid offset predictions)")
|
| 683 |
+
print(f" ─── Timestamp Estimation ───")
|
| 684 |
+
if metrics.get("timestamp_tv_mae_sec") is not None:
|
| 685 |
+
print(f" t_v MAE: {metrics['timestamp_tv_mae_sec']:.3f}s ({metrics['timestamp_evaluated_tv']} samples)")
|
| 686 |
+
print(f" t_v Median Error: {metrics['timestamp_tv_median_sec']:.3f}s")
|
| 687 |
+
else:
|
| 688 |
+
print(f" t_v: (no valid pairs)")
|
| 689 |
+
if metrics.get("timestamp_ta_mae_sec") is not None:
|
| 690 |
+
print(f" t_a MAE: {metrics['timestamp_ta_mae_sec']:.3f}s ({metrics['timestamp_evaluated_ta']} samples)")
|
| 691 |
+
print(f" t_a Median Error: {metrics['timestamp_ta_median_sec']:.3f}s")
|
| 692 |
+
else:
|
| 693 |
+
print(f" t_a: (no valid pairs)")
|
| 694 |
+
print(f" ─── Parse Stats ───")
|
| 695 |
+
ps = metrics["parse_stats"]
|
| 696 |
+
print(f" JSON parsed: {ps['json']}")
|
| 697 |
+
print(f" GPT judge: {ps.get('gpt_judge', 0)}")
|
| 698 |
+
print(f" Regex fallback: {ps['regex_fallback']}")
|
| 699 |
+
print(f"{'=' * 60}")
|
| 700 |
+
|
| 701 |
+
|
| 702 |
+
def main() -> None:
|
| 703 |
+
args = parse_args()
|
| 704 |
+
set_data_root(args.data_root)
|
| 705 |
+
test_jsonl = args.test_jsonl or (DATA_ROOT / "kto_training_data_v2_test.jsonl")
|
| 706 |
+
output_dir = args.output_dir or Path("/home/ubuntu/eval_results/sync")
|
| 707 |
+
|
| 708 |
+
if args.gpt_judge:
|
| 709 |
+
client = _get_openai_client(args.openai_api_key)
|
| 710 |
+
if client is None:
|
| 711 |
+
print("[ERROR] --gpt-judge requires OPENAI_API_KEY env var or --openai-api-key argument.")
|
| 712 |
+
raise SystemExit(1)
|
| 713 |
+
try:
|
| 714 |
+
test_resp = client.chat.completions.create(
|
| 715 |
+
model=args.gpt_model,
|
| 716 |
+
messages=[{"role": "user", "content": "Say OK"}],
|
| 717 |
+
max_completion_tokens=5,
|
| 718 |
+
)
|
| 719 |
+
print(f"[gpt-judge] API verified. Model: {args.gpt_model}")
|
| 720 |
+
except Exception as exc:
|
| 721 |
+
print(f"[ERROR] GPT API check failed: {exc}")
|
| 722 |
+
raise SystemExit(1)
|
| 723 |
+
|
| 724 |
+
label = args.label or (Path(args.adapter).name if args.adapter else Path(args.base_model).name)
|
| 725 |
+
|
| 726 |
+
out_dir = output_dir / label
|
| 727 |
+
out_dir.mkdir(parents=True, exist_ok=True)
|
| 728 |
+
results_jsonl = out_dir / "eval_results.jsonl"
|
| 729 |
+
metrics_json = out_dir / "metrics.json"
|
| 730 |
+
summary_txt = out_dir / "summary.txt"
|
| 731 |
+
|
| 732 |
+
# Load data
|
| 733 |
+
test_data = load_test_data(test_jsonl, args.max_samples)
|
| 734 |
+
print(f"[data] Loaded {len(test_data)} test samples")
|
| 735 |
+
|
| 736 |
+
# Resume support
|
| 737 |
+
processed = set()
|
| 738 |
+
if results_jsonl.exists():
|
| 739 |
+
with open(results_jsonl) as f:
|
| 740 |
+
for line in f:
|
| 741 |
+
obj = json.loads(line)
|
| 742 |
+
processed.add(obj["video"])
|
| 743 |
+
print(f"[resume] {len(processed)} already processed, skipping")
|
| 744 |
+
|
| 745 |
+
# Load model
|
| 746 |
+
model, processor = load_model(args.base_model, args.adapter)
|
| 747 |
+
|
| 748 |
+
# Inference loop
|
| 749 |
+
for item in tqdm(test_data, desc="Evaluating", unit="sample"):
|
| 750 |
+
if item["video"] in processed:
|
| 751 |
+
continue
|
| 752 |
+
|
| 753 |
+
if not os.path.exists(item["video_path"]):
|
| 754 |
+
print(f" [skip] Video not found: {item['video_path']}")
|
| 755 |
+
continue
|
| 756 |
+
|
| 757 |
+
try:
|
| 758 |
+
raw_output = run_inference(
|
| 759 |
+
model, processor, item["video_path"], item["audio_path"],
|
| 760 |
+
args.max_new_tokens, args.temperature,
|
| 761 |
+
)
|
| 762 |
+
except Exception as exc:
|
| 763 |
+
import traceback
|
| 764 |
+
print(f" [error] {item['video']}: {exc}")
|
| 765 |
+
traceback.print_exc()
|
| 766 |
+
raw_output = ""
|
| 767 |
+
|
| 768 |
+
if args.gpt_judge and raw_output:
|
| 769 |
+
gpt_pred = gpt_extract_prediction(
|
| 770 |
+
raw_output, api_key=args.openai_api_key, model=args.gpt_model,
|
| 771 |
+
)
|
| 772 |
+
if gpt_pred is not None:
|
| 773 |
+
pred = gpt_pred
|
| 774 |
+
else:
|
| 775 |
+
pred = extract_prediction(raw_output)
|
| 776 |
+
else:
|
| 777 |
+
pred = extract_prediction(raw_output)
|
| 778 |
+
|
| 779 |
+
result = {
|
| 780 |
+
"video": item["video"],
|
| 781 |
+
"video_path": item["video_path"],
|
| 782 |
+
"gt_synced": item["gt_synced"],
|
| 783 |
+
"gt_direction": item["gt_direction"],
|
| 784 |
+
"gt_offset_sec": item["gt_offset_sec"],
|
| 785 |
+
"gt_t_v": item["gt_t_v"],
|
| 786 |
+
"gt_t_a": item["gt_t_a"],
|
| 787 |
+
"pred_synced": pred["pred_synced"],
|
| 788 |
+
"pred_direction": pred["pred_direction"],
|
| 789 |
+
"pred_offset_sec": pred["pred_offset_sec"],
|
| 790 |
+
"pred_t_v": pred.get("pred_t_v"),
|
| 791 |
+
"pred_t_a": pred.get("pred_t_a"),
|
| 792 |
+
"pred_explanation": pred["pred_explanation"],
|
| 793 |
+
"parse_method": pred["parse_method"],
|
| 794 |
+
"raw_output": raw_output,
|
| 795 |
+
}
|
| 796 |
+
|
| 797 |
+
with open(results_jsonl, "a", encoding="utf-8") as f:
|
| 798 |
+
f.write(json.dumps(result, ensure_ascii=False) + "\n")
|
| 799 |
+
|
| 800 |
+
processed.add(item["video"])
|
| 801 |
+
gc.collect()
|
| 802 |
+
torch.cuda.empty_cache()
|
| 803 |
+
|
| 804 |
+
# Load all results and compute metrics
|
| 805 |
+
all_results = []
|
| 806 |
+
with open(results_jsonl) as f:
|
| 807 |
+
for line in f:
|
| 808 |
+
all_results.append(json.loads(line))
|
| 809 |
+
|
| 810 |
+
metrics = compute_metrics(all_results)
|
| 811 |
+
metrics["eval_config"] = {
|
| 812 |
+
"base_model": args.base_model,
|
| 813 |
+
"adapter": args.adapter,
|
| 814 |
+
"data_root": str(args.data_root),
|
| 815 |
+
"test_jsonl": str(test_jsonl),
|
| 816 |
+
"total_test_samples": len(test_data),
|
| 817 |
+
"max_new_tokens": args.max_new_tokens,
|
| 818 |
+
"temperature": args.temperature,
|
| 819 |
+
"gpt_judge": args.gpt_judge,
|
| 820 |
+
"gpt_model": args.gpt_model if args.gpt_judge else None,
|
| 821 |
+
}
|
| 822 |
+
|
| 823 |
+
with open(metrics_json, "w", encoding="utf-8") as f:
|
| 824 |
+
json.dump(metrics, f, indent=2, ensure_ascii=False)
|
| 825 |
+
|
| 826 |
+
print_summary(metrics, label)
|
| 827 |
+
|
| 828 |
+
with open(summary_txt, "w", encoding="utf-8") as f:
|
| 829 |
+
import io, contextlib
|
| 830 |
+
buf = io.StringIO()
|
| 831 |
+
with contextlib.redirect_stdout(buf):
|
| 832 |
+
print_summary(metrics, label)
|
| 833 |
+
f.write(buf.getvalue())
|
| 834 |
+
|
| 835 |
+
print(f"\n[output] Results JSONL: {results_jsonl}")
|
| 836 |
+
print(f"[output] Metrics JSON: {metrics_json}")
|
| 837 |
+
print(f"[output] Summary: {summary_txt}")
|
| 838 |
+
|
| 839 |
+
|
| 840 |
+
if __name__ == "__main__":
|
| 841 |
+
main()
|
hf_upload/scripts/eval_lvbench.py
ADDED
|
@@ -0,0 +1,647 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env python3
|
| 2 |
+
"""
|
| 3 |
+
Evaluate Qwen3-Omni models on the LVBench benchmark.
|
| 4 |
+
|
| 5 |
+
LVBench: 1549 multiple-choice questions over 103 long videos.
|
| 6 |
+
Metrics: accuracy (overall, per video type).
|
| 7 |
+
|
| 8 |
+
Usage:
|
| 9 |
+
python eval_lvbench.py \
|
| 10 |
+
--base-model Qwen/Qwen3-Omni-30B-A3B-Instruct \
|
| 11 |
+
--video-dir /opt/dlami/nvme/lvbench \
|
| 12 |
+
--max-samples 30 --label vanilla_qwen3omni
|
| 13 |
+
|
| 14 |
+
python eval_lvbench.py \
|
| 15 |
+
--base-model Rakancorle11/qwen3omni_full_sft_revised_thinker_key \
|
| 16 |
+
--adapter /opt/dlami/nvme/LlamaFactory/saves/.../adapter \
|
| 17 |
+
--label dpo_v4_8632
|
| 18 |
+
"""
|
| 19 |
+
|
| 20 |
+
from __future__ import annotations
|
| 21 |
+
|
| 22 |
+
import argparse
|
| 23 |
+
import gc
|
| 24 |
+
import json
|
| 25 |
+
import os
|
| 26 |
+
import re
|
| 27 |
+
import tempfile
|
| 28 |
+
import shutil
|
| 29 |
+
from pathlib import Path
|
| 30 |
+
from typing import Any, Dict, List, Optional
|
| 31 |
+
|
| 32 |
+
import torch
|
| 33 |
+
from tqdm import tqdm
|
| 34 |
+
|
| 35 |
+
DEFAULT_VIDEO_DIR = Path("/opt/dlami/nvme/lvbench")
|
| 36 |
+
DEFAULT_OUTPUT_DIR = Path("/home/ubuntu/eval_results/lvbench")
|
| 37 |
+
|
| 38 |
+
VIDEO_TYPES = ["cartoon", "documentary", "live", "selfmedia", "sport", "tv"]
|
| 39 |
+
|
| 40 |
+
MCQ_PROMPT = (
|
| 41 |
+
"Select the best answer to the following multiple-choice question "
|
| 42 |
+
"based on the video. Respond with only the letter (A, B, C, or D) "
|
| 43 |
+
"of the correct option.\n"
|
| 44 |
+
)
|
| 45 |
+
|
| 46 |
+
|
| 47 |
+
def parse_args() -> argparse.Namespace:
|
| 48 |
+
p = argparse.ArgumentParser(description="Evaluate on LVBench benchmark.")
|
| 49 |
+
p.add_argument("--base-model", type=str,
|
| 50 |
+
default="Qwen/Qwen3-Omni-30B-A3B-Instruct")
|
| 51 |
+
p.add_argument("--adapter", type=str, default=None)
|
| 52 |
+
p.add_argument("--video-dir", type=Path, default=DEFAULT_VIDEO_DIR)
|
| 53 |
+
p.add_argument("--output-dir", type=Path, default=DEFAULT_OUTPUT_DIR)
|
| 54 |
+
p.add_argument("--max-samples", type=int, default=-1)
|
| 55 |
+
p.add_argument("--max-new-tokens", type=int, default=32)
|
| 56 |
+
p.add_argument("--temperature", type=float, default=0.0)
|
| 57 |
+
p.add_argument("--label", type=str, default=None)
|
| 58 |
+
p.add_argument("--vllm", action="store_true", default=False,
|
| 59 |
+
help="Use vLLM offline batch inference instead of transformers (much faster).")
|
| 60 |
+
p.add_argument("--tp", type=int, default=None,
|
| 61 |
+
help="Tensor parallel size for vLLM (default: all visible GPUs).")
|
| 62 |
+
p.add_argument("--batch-size", type=int, default=16,
|
| 63 |
+
help="Batch size for vLLM generate().")
|
| 64 |
+
p.add_argument("--gpu-memory-utilization", type=float, default=0.90)
|
| 65 |
+
p.add_argument("--max-model-len", type=int, default=65536)
|
| 66 |
+
return p.parse_args()
|
| 67 |
+
|
| 68 |
+
|
| 69 |
+
# ---------------------------------------------------------------------------
|
| 70 |
+
# Model loading — identical to eval_videomme.py / eval_dpo_sync.py
|
| 71 |
+
# ---------------------------------------------------------------------------
|
| 72 |
+
def load_model(base_model: str, adapter: Optional[str]):
|
| 73 |
+
import json as _json
|
| 74 |
+
from transformers import (
|
| 75 |
+
AutoConfig,
|
| 76 |
+
AutoProcessor,
|
| 77 |
+
Qwen3OmniMoeForConditionalGeneration,
|
| 78 |
+
Qwen3OmniMoeThinkerConfig,
|
| 79 |
+
Qwen3OmniMoeThinkerForConditionalGeneration,
|
| 80 |
+
)
|
| 81 |
+
|
| 82 |
+
print(f"[load] Base model: {base_model}")
|
| 83 |
+
processor = AutoProcessor.from_pretrained(base_model, trust_remote_code=True)
|
| 84 |
+
|
| 85 |
+
model_path = Path(base_model)
|
| 86 |
+
cfg_path = model_path / "config.json" if model_path.exists() else None
|
| 87 |
+
|
| 88 |
+
model_type = None
|
| 89 |
+
if cfg_path and cfg_path.exists():
|
| 90 |
+
with open(cfg_path, "r") as f:
|
| 91 |
+
model_type = _json.load(f).get("model_type")
|
| 92 |
+
if not model_type:
|
| 93 |
+
try:
|
| 94 |
+
from huggingface_hub import hf_hub_download
|
| 95 |
+
cached = hf_hub_download(base_model, "config.json")
|
| 96 |
+
with open(cached, "r") as f:
|
| 97 |
+
model_type = _json.load(f).get("model_type")
|
| 98 |
+
except Exception:
|
| 99 |
+
pass
|
| 100 |
+
|
| 101 |
+
print(f"[load] Detected model_type: {model_type}")
|
| 102 |
+
|
| 103 |
+
if model_type == "qwen3_omni_moe_thinker":
|
| 104 |
+
config = Qwen3OmniMoeThinkerConfig.from_pretrained(base_model)
|
| 105 |
+
model = Qwen3OmniMoeThinkerForConditionalGeneration.from_pretrained(
|
| 106 |
+
base_model, config=config, torch_dtype=torch.bfloat16, device_map="auto",
|
| 107 |
+
)
|
| 108 |
+
else:
|
| 109 |
+
config = AutoConfig.from_pretrained(base_model, trust_remote_code=True)
|
| 110 |
+
model = Qwen3OmniMoeForConditionalGeneration.from_pretrained(
|
| 111 |
+
base_model, config=config, torch_dtype=torch.bfloat16, device_map="auto",
|
| 112 |
+
)
|
| 113 |
+
|
| 114 |
+
if adapter:
|
| 115 |
+
from peft import PeftModel
|
| 116 |
+
from safetensors.torch import load_file, save_file
|
| 117 |
+
|
| 118 |
+
print(f"[load] Loading LoRA adapter: {adapter}")
|
| 119 |
+
adapter_cfg_path = Path(adapter) / "adapter_config.json"
|
| 120 |
+
with open(adapter_cfg_path) as f:
|
| 121 |
+
adapter_cfg = _json.loads(f.read())
|
| 122 |
+
target_modules = adapter_cfg.get("target_modules", [])
|
| 123 |
+
needs_remap = (
|
| 124 |
+
any(t.startswith("model.layers.") for t in target_modules)
|
| 125 |
+
and model_type != "qwen3_omni_moe_thinker"
|
| 126 |
+
)
|
| 127 |
+
|
| 128 |
+
if needs_remap:
|
| 129 |
+
print("[load] Adapter trained on thinker-only; remapping keys...")
|
| 130 |
+
tmp_dir = Path(tempfile.mkdtemp(prefix="adapter_remap_"))
|
| 131 |
+
for fn in Path(adapter).iterdir():
|
| 132 |
+
if fn.is_dir():
|
| 133 |
+
continue
|
| 134 |
+
if fn.name == "adapter_config.json":
|
| 135 |
+
new_targets = []
|
| 136 |
+
for t in target_modules:
|
| 137 |
+
if t.startswith("model.layers."):
|
| 138 |
+
new_targets.append("thinker." + t)
|
| 139 |
+
elif t[0].isdigit():
|
| 140 |
+
new_targets.append("thinker.model.layers." + t)
|
| 141 |
+
else:
|
| 142 |
+
new_targets.append(t)
|
| 143 |
+
adapter_cfg["target_modules"] = new_targets
|
| 144 |
+
with open(tmp_dir / "adapter_config.json", "w") as f:
|
| 145 |
+
json.dump(adapter_cfg, f, indent=2)
|
| 146 |
+
elif fn.suffix == ".safetensors" and "adapter" in fn.name:
|
| 147 |
+
tensors = load_file(str(fn))
|
| 148 |
+
remapped = {}
|
| 149 |
+
for k, v in tensors.items():
|
| 150 |
+
if ".model.layers." in k and ".thinker." not in k:
|
| 151 |
+
new_k = k.replace(
|
| 152 |
+
"base_model.model.model.layers.",
|
| 153 |
+
"base_model.model.thinker.model.layers.",
|
| 154 |
+
)
|
| 155 |
+
remapped[new_k] = v
|
| 156 |
+
else:
|
| 157 |
+
remapped[k] = v
|
| 158 |
+
save_file(remapped, str(tmp_dir / fn.name))
|
| 159 |
+
else:
|
| 160 |
+
shutil.copy2(str(fn), str(tmp_dir / fn.name))
|
| 161 |
+
adapter = str(tmp_dir)
|
| 162 |
+
|
| 163 |
+
model = PeftModel.from_pretrained(model, adapter)
|
| 164 |
+
model = model.merge_and_unload()
|
| 165 |
+
|
| 166 |
+
if hasattr(model, "disable_talker"):
|
| 167 |
+
model.disable_talker()
|
| 168 |
+
model.eval()
|
| 169 |
+
print(f"[load] Model ready ({type(model).__name__}). Device: {model.device}")
|
| 170 |
+
return model, processor
|
| 171 |
+
|
| 172 |
+
|
| 173 |
+
# ---------------------------------------------------------------------------
|
| 174 |
+
# Inference
|
| 175 |
+
# ---------------------------------------------------------------------------
|
| 176 |
+
def run_inference(model, processor, video_path: str, prompt: str,
|
| 177 |
+
max_new_tokens: int, temperature: float) -> str:
|
| 178 |
+
from qwen_omni_utils import process_mm_info
|
| 179 |
+
|
| 180 |
+
tmp_dir = tempfile.mkdtemp(prefix="eval_lvb_")
|
| 181 |
+
masked_video = os.path.join(tmp_dir, "clip.mp4")
|
| 182 |
+
os.symlink(os.path.abspath(video_path), masked_video)
|
| 183 |
+
|
| 184 |
+
conversation = [
|
| 185 |
+
{
|
| 186 |
+
"role": "user",
|
| 187 |
+
"content": [
|
| 188 |
+
{"type": "video", "video": masked_video},
|
| 189 |
+
{"type": "text", "text": prompt},
|
| 190 |
+
],
|
| 191 |
+
}
|
| 192 |
+
]
|
| 193 |
+
|
| 194 |
+
text = processor.apply_chat_template(
|
| 195 |
+
conversation, add_generation_prompt=True, tokenize=False,
|
| 196 |
+
)
|
| 197 |
+
audios, images, videos = process_mm_info(conversation, use_audio_in_video=False)
|
| 198 |
+
inputs = processor(
|
| 199 |
+
text=text, audio=audios, images=images, videos=videos,
|
| 200 |
+
return_tensors="pt", padding=True, use_audio_in_video=False,
|
| 201 |
+
)
|
| 202 |
+
|
| 203 |
+
model_dtype = next(model.parameters()).dtype
|
| 204 |
+
converted = {}
|
| 205 |
+
for k, v in inputs.items():
|
| 206 |
+
if hasattr(v, "to"):
|
| 207 |
+
v = v.to(model.device)
|
| 208 |
+
if torch.is_floating_point(v):
|
| 209 |
+
v = v.to(model_dtype)
|
| 210 |
+
converted[k] = v
|
| 211 |
+
inputs = converted
|
| 212 |
+
|
| 213 |
+
from transformers import Qwen3OmniMoeThinkerForConditionalGeneration
|
| 214 |
+
is_thinker = isinstance(model, Qwen3OmniMoeThinkerForConditionalGeneration)
|
| 215 |
+
if is_thinker:
|
| 216 |
+
gen_kwargs = {"max_new_tokens": max_new_tokens, "do_sample": temperature > 0}
|
| 217 |
+
else:
|
| 218 |
+
gen_kwargs = {
|
| 219 |
+
"thinker_max_new_tokens": max_new_tokens,
|
| 220 |
+
"use_audio_in_video": False,
|
| 221 |
+
"return_audio": False,
|
| 222 |
+
"do_sample": temperature > 0,
|
| 223 |
+
}
|
| 224 |
+
if temperature > 0:
|
| 225 |
+
gen_kwargs["temperature"] = temperature
|
| 226 |
+
gen_kwargs["top_p"] = 0.9
|
| 227 |
+
|
| 228 |
+
with torch.inference_mode():
|
| 229 |
+
output_ids = model.generate(**inputs, **gen_kwargs)
|
| 230 |
+
|
| 231 |
+
if isinstance(output_ids, tuple):
|
| 232 |
+
output_ids = output_ids[0]
|
| 233 |
+
|
| 234 |
+
prompt_len = inputs["input_ids"].shape[1]
|
| 235 |
+
response = processor.batch_decode(
|
| 236 |
+
output_ids[:, prompt_len:], skip_special_tokens=True,
|
| 237 |
+
)[0].strip()
|
| 238 |
+
|
| 239 |
+
shutil.rmtree(tmp_dir, ignore_errors=True)
|
| 240 |
+
return response
|
| 241 |
+
|
| 242 |
+
|
| 243 |
+
def preprocess_video_for_vllm(video_path: str):
|
| 244 |
+
"""Extract video frames using qwen_omni_utils, return numpy array for vLLM.
|
| 245 |
+
Caps at 128 frames to stay within vLLM encoder cache (62720 tokens).
|
| 246 |
+
"""
|
| 247 |
+
from qwen_omni_utils import process_mm_info
|
| 248 |
+
import numpy as np
|
| 249 |
+
|
| 250 |
+
messages = [{
|
| 251 |
+
"role": "user",
|
| 252 |
+
"content": [
|
| 253 |
+
{"type": "video", "video": video_path, "nframes": 128},
|
| 254 |
+
{"type": "text", "text": "placeholder"},
|
| 255 |
+
],
|
| 256 |
+
}]
|
| 257 |
+
audios, images, videos = process_mm_info(messages, use_audio_in_video=False)
|
| 258 |
+
video_tensor = videos[0]
|
| 259 |
+
video_np = (video_tensor * 255).byte().numpy()
|
| 260 |
+
return video_np
|
| 261 |
+
|
| 262 |
+
|
| 263 |
+
SYSTEM_PROMPT = (
|
| 264 |
+
"You are Qwen, a virtual human developed by the Qwen Team, Alibaba "
|
| 265 |
+
"Group, capable of perceiving auditory and visual inputs, as well as "
|
| 266 |
+
"generating text and speech."
|
| 267 |
+
)
|
| 268 |
+
|
| 269 |
+
|
| 270 |
+
def build_vllm_prompt(question: str) -> str:
|
| 271 |
+
return (
|
| 272 |
+
f"<|im_start|>system\n{SYSTEM_PROMPT}<|im_end|>\n"
|
| 273 |
+
f"<|im_start|>user\n"
|
| 274 |
+
f"<|vision_start|><|video_pad|><|vision_end|>"
|
| 275 |
+
f"{question}<|im_end|>\n"
|
| 276 |
+
f"<|im_start|>assistant\n"
|
| 277 |
+
)
|
| 278 |
+
|
| 279 |
+
|
| 280 |
+
def extract_answer(text: str) -> str:
|
| 281 |
+
"""Extract a single letter A/B/C/D from model output."""
|
| 282 |
+
text = text.strip()
|
| 283 |
+
prefixes = [
|
| 284 |
+
"The best answer is", "The correct answer is",
|
| 285 |
+
"The answer is", "The answer", "Best answer:", "Best option:",
|
| 286 |
+
]
|
| 287 |
+
for prefix in prefixes:
|
| 288 |
+
text = text.replace(prefix, "")
|
| 289 |
+
|
| 290 |
+
if len(text.split()) > 10 and not re.search(r"[ABCD]", text):
|
| 291 |
+
return ""
|
| 292 |
+
m = re.search(r"[ABCD]", text)
|
| 293 |
+
return m[0] if m else ""
|
| 294 |
+
|
| 295 |
+
|
| 296 |
+
# ---------------------------------------------------------------------------
|
| 297 |
+
# Dataset loading
|
| 298 |
+
# ---------------------------------------------------------------------------
|
| 299 |
+
def load_lvbench(video_dir: Path, max_samples: int) -> List[Dict[str, Any]]:
|
| 300 |
+
from datasets import load_dataset
|
| 301 |
+
ds = load_dataset("lmms-lab/LVBench", split="train")
|
| 302 |
+
data = []
|
| 303 |
+
skipped = 0
|
| 304 |
+
for row in ds:
|
| 305 |
+
vid = row["key"]
|
| 306 |
+
video_path = video_dir / f"{vid}.mp4"
|
| 307 |
+
if not video_path.exists():
|
| 308 |
+
skipped += 1
|
| 309 |
+
continue
|
| 310 |
+
|
| 311 |
+
prompt = MCQ_PROMPT + row["question"] + "\nThe best answer is:"
|
| 312 |
+
|
| 313 |
+
data.append({
|
| 314 |
+
"uid": row["uid"],
|
| 315 |
+
"video_id": vid,
|
| 316 |
+
"video_path": str(video_path),
|
| 317 |
+
"video_type": row["type"],
|
| 318 |
+
"question_type": row["question_type"],
|
| 319 |
+
"question": row["question"],
|
| 320 |
+
"gt_answer": row["answer"],
|
| 321 |
+
"time_reference": row.get("time_reference", ""),
|
| 322 |
+
"prompt": prompt,
|
| 323 |
+
})
|
| 324 |
+
if skipped:
|
| 325 |
+
print(f"[data] Skipped {skipped} questions (video not found)")
|
| 326 |
+
if max_samples > 0:
|
| 327 |
+
data = data[:max_samples]
|
| 328 |
+
return data
|
| 329 |
+
|
| 330 |
+
|
| 331 |
+
# ---------------------------------------------------------------------------
|
| 332 |
+
# Metrics
|
| 333 |
+
# ---------------------------------------------------------------------------
|
| 334 |
+
def compute_metrics(results: List[Dict[str, Any]]) -> Dict[str, Any]:
|
| 335 |
+
total = len(results)
|
| 336 |
+
if total == 0:
|
| 337 |
+
return {}
|
| 338 |
+
|
| 339 |
+
correct = sum(1 for r in results if r["pred_answer"].upper() == r["gt_answer"].upper())
|
| 340 |
+
overall_acc = correct / total
|
| 341 |
+
|
| 342 |
+
def acc_for(items):
|
| 343 |
+
if not items:
|
| 344 |
+
return None
|
| 345 |
+
c = sum(1 for r in items if r["pred_answer"].upper() == r["gt_answer"].upper())
|
| 346 |
+
return round(c / len(items), 4)
|
| 347 |
+
|
| 348 |
+
per_type = {}
|
| 349 |
+
for vt in VIDEO_TYPES:
|
| 350 |
+
subset = [r for r in results if r["video_type"] == vt]
|
| 351 |
+
if subset:
|
| 352 |
+
per_type[vt] = {"accuracy": acc_for(subset), "count": len(subset)}
|
| 353 |
+
|
| 354 |
+
q_types = set()
|
| 355 |
+
for r in results:
|
| 356 |
+
if isinstance(r.get("question_type"), list):
|
| 357 |
+
q_types.update(r["question_type"])
|
| 358 |
+
elif r.get("question_type"):
|
| 359 |
+
q_types.add(r["question_type"])
|
| 360 |
+
|
| 361 |
+
per_qtype = {}
|
| 362 |
+
for qt in sorted(q_types):
|
| 363 |
+
subset = [r for r in results if qt in (r.get("question_type", [])
|
| 364 |
+
if isinstance(r.get("question_type"), list) else [r.get("question_type")])]
|
| 365 |
+
if subset:
|
| 366 |
+
per_qtype[qt] = {"accuracy": acc_for(subset), "count": len(subset)}
|
| 367 |
+
|
| 368 |
+
return {
|
| 369 |
+
"total_samples": total,
|
| 370 |
+
"overall_accuracy": round(overall_acc, 4),
|
| 371 |
+
"per_video_type": per_type,
|
| 372 |
+
"per_question_type": per_qtype,
|
| 373 |
+
}
|
| 374 |
+
|
| 375 |
+
|
| 376 |
+
def print_summary(metrics: Dict[str, Any], label: str) -> None:
|
| 377 |
+
print()
|
| 378 |
+
print(f"{'=' * 65}")
|
| 379 |
+
print(f" LVBench Summary: {label}")
|
| 380 |
+
print(f"{'=' * 65}")
|
| 381 |
+
print(f" Total samples: {metrics['total_samples']}")
|
| 382 |
+
print(f" Overall Accuracy: {metrics['overall_accuracy']:.1%}")
|
| 383 |
+
|
| 384 |
+
print(f" ─── Per Video Type ───")
|
| 385 |
+
for vt in VIDEO_TYPES:
|
| 386 |
+
if vt in metrics.get("per_video_type", {}):
|
| 387 |
+
d = metrics["per_video_type"][vt]
|
| 388 |
+
print(f" {vt:15s}: {d['accuracy']:.1%} ({d['count']} questions)")
|
| 389 |
+
|
| 390 |
+
print(f" ─── Per Question Type ───")
|
| 391 |
+
for qt, d in sorted(metrics.get("per_question_type", {}).items()):
|
| 392 |
+
print(f" {qt:30s}: {d['accuracy']:.1%} ({d['count']})")
|
| 393 |
+
|
| 394 |
+
print(f"{'=' * 65}")
|
| 395 |
+
|
| 396 |
+
|
| 397 |
+
# ---------------------------------------------------------------------------
|
| 398 |
+
# Main
|
| 399 |
+
# ---------------------------------------------------------------------------
|
| 400 |
+
def main() -> None:
|
| 401 |
+
args = parse_args()
|
| 402 |
+
label = args.label or (
|
| 403 |
+
Path(args.adapter).name if args.adapter
|
| 404 |
+
else Path(args.base_model).name
|
| 405 |
+
)
|
| 406 |
+
|
| 407 |
+
out_dir = args.output_dir / label
|
| 408 |
+
out_dir.mkdir(parents=True, exist_ok=True)
|
| 409 |
+
results_jsonl = out_dir / "eval_results.jsonl"
|
| 410 |
+
metrics_json = out_dir / "metrics.json"
|
| 411 |
+
summary_txt = out_dir / "summary.txt"
|
| 412 |
+
|
| 413 |
+
print("[data] Loading LVBench dataset...")
|
| 414 |
+
test_data = load_lvbench(args.video_dir, args.max_samples)
|
| 415 |
+
print(f"[data] {len(test_data)} questions ready for evaluation")
|
| 416 |
+
|
| 417 |
+
processed = set()
|
| 418 |
+
if results_jsonl.exists():
|
| 419 |
+
with open(results_jsonl) as f:
|
| 420 |
+
for line in f:
|
| 421 |
+
obj = json.loads(line)
|
| 422 |
+
processed.add(obj["uid"])
|
| 423 |
+
print(f"[resume] {len(processed)} already processed, skipping")
|
| 424 |
+
|
| 425 |
+
use_vllm = args.vllm
|
| 426 |
+
model = processor = llm = None
|
| 427 |
+
vllm_preprocess_stats: Dict[str, int] | None = None
|
| 428 |
+
|
| 429 |
+
if use_vllm:
|
| 430 |
+
from vllm import LLM, SamplingParams
|
| 431 |
+
tp = args.tp or torch.cuda.device_count()
|
| 432 |
+
model_path = args.base_model
|
| 433 |
+
print(f"[vllm] Loading {model_path} with tp={tp} ...")
|
| 434 |
+
llm = LLM(
|
| 435 |
+
model=model_path,
|
| 436 |
+
tensor_parallel_size=tp,
|
| 437 |
+
max_model_len=args.max_model_len,
|
| 438 |
+
max_num_seqs=4,
|
| 439 |
+
limit_mm_per_prompt={"video": 1},
|
| 440 |
+
gpu_memory_utilization=args.gpu_memory_utilization,
|
| 441 |
+
dtype="bfloat16",
|
| 442 |
+
trust_remote_code=True,
|
| 443 |
+
)
|
| 444 |
+
sampling_params = SamplingParams(
|
| 445 |
+
temperature=args.temperature if args.temperature > 0 else 0.0,
|
| 446 |
+
top_p=0.9 if args.temperature > 0 else 1.0,
|
| 447 |
+
max_tokens=args.max_new_tokens,
|
| 448 |
+
)
|
| 449 |
+
|
| 450 |
+
print("[vllm] Preprocessing videos ...")
|
| 451 |
+
todo = [item for item in test_data if item["uid"] not in processed]
|
| 452 |
+
preprocessed = {}
|
| 453 |
+
preprocess_failed_paths: set[str] = set()
|
| 454 |
+
|
| 455 |
+
for i, item in enumerate(todo):
|
| 456 |
+
vp = item["video_path"]
|
| 457 |
+
if vp in preprocessed or vp in preprocess_failed_paths:
|
| 458 |
+
continue
|
| 459 |
+
try:
|
| 460 |
+
preprocessed[vp] = preprocess_video_for_vllm(vp)
|
| 461 |
+
except Exception as e:
|
| 462 |
+
preprocess_failed_paths.add(vp)
|
| 463 |
+
print(f" [skip] preprocess error: {Path(vp).name}: {e}")
|
| 464 |
+
if (i + 1) % 50 == 0:
|
| 465 |
+
print(f" Preprocessed {i+1}/{len(todo)} ...")
|
| 466 |
+
|
| 467 |
+
n_pp_skip = sum(1 for item in todo if item["video_path"] in preprocess_failed_paths)
|
| 468 |
+
if preprocess_failed_paths:
|
| 469 |
+
print(
|
| 470 |
+
f"[vllm] Preprocess failed for {len(preprocess_failed_paths)} video(s), "
|
| 471 |
+
f"{n_pp_skip} question(s) will not use vLLM (run continues)."
|
| 472 |
+
)
|
| 473 |
+
vllm_preprocess_stats = {
|
| 474 |
+
"preprocess_failed_videos": len(preprocess_failed_paths),
|
| 475 |
+
"preprocess_skipped_questions": n_pp_skip,
|
| 476 |
+
}
|
| 477 |
+
|
| 478 |
+
vllm_todo = [item for item in todo if item["video_path"] in preprocessed]
|
| 479 |
+
fallback_items = []
|
| 480 |
+
print(f"[vllm] {len(vllm_todo)} questions ready, running inference ...")
|
| 481 |
+
|
| 482 |
+
for i, item in enumerate(vllm_todo):
|
| 483 |
+
if item["uid"] in processed:
|
| 484 |
+
continue
|
| 485 |
+
inp = {
|
| 486 |
+
"prompt": build_vllm_prompt(item["prompt"]),
|
| 487 |
+
"multi_modal_data": {"video": preprocessed[item["video_path"]]},
|
| 488 |
+
}
|
| 489 |
+
try:
|
| 490 |
+
outputs = llm.generate([inp], sampling_params=sampling_params)
|
| 491 |
+
raw_output = outputs[0].outputs[0].text.strip()
|
| 492 |
+
pred = extract_answer(raw_output)
|
| 493 |
+
result = {
|
| 494 |
+
"uid": item["uid"],
|
| 495 |
+
"video_id": item["video_id"],
|
| 496 |
+
"video_type": item["video_type"],
|
| 497 |
+
"question_type": item["question_type"],
|
| 498 |
+
"gt_answer": item["gt_answer"],
|
| 499 |
+
"pred_answer": pred,
|
| 500 |
+
"correct": pred.upper() == item["gt_answer"].upper(),
|
| 501 |
+
"raw_output": raw_output,
|
| 502 |
+
}
|
| 503 |
+
with open(results_jsonl, "a", encoding="utf-8") as f:
|
| 504 |
+
f.write(json.dumps(result, ensure_ascii=False) + "\n")
|
| 505 |
+
processed.add(item["uid"])
|
| 506 |
+
except (ValueError, RuntimeError) as exc:
|
| 507 |
+
if "longer than the maximum model length" in str(exc):
|
| 508 |
+
print(f" [too long] {item['uid']} -> fallback")
|
| 509 |
+
fallback_items.append(item)
|
| 510 |
+
else:
|
| 511 |
+
raise
|
| 512 |
+
|
| 513 |
+
if (i + 1) % 50 == 0:
|
| 514 |
+
print(f" [vllm] [{i+1}/{len(vllm_todo)}] done, {len(fallback_items)} deferred")
|
| 515 |
+
|
| 516 |
+
preprocessed.clear()
|
| 517 |
+
|
| 518 |
+
vllm_results = []
|
| 519 |
+
if results_jsonl.exists():
|
| 520 |
+
with open(results_jsonl) as f:
|
| 521 |
+
for line in f:
|
| 522 |
+
vllm_results.append(json.loads(line))
|
| 523 |
+
if vllm_results:
|
| 524 |
+
vllm_metrics = compute_metrics(vllm_results)
|
| 525 |
+
vllm_metrics_path = out_dir / "metrics_vllm.json"
|
| 526 |
+
with open(vllm_metrics_path, "w", encoding="utf-8") as f:
|
| 527 |
+
json.dump(vllm_metrics, f, indent=2, ensure_ascii=False)
|
| 528 |
+
print(f"[vllm] Intermediate metrics saved to {vllm_metrics_path}")
|
| 529 |
+
print_summary(vllm_metrics, label + " (vllm only)")
|
| 530 |
+
|
| 531 |
+
if fallback_items:
|
| 532 |
+
print(f"[fallback] Running {len(fallback_items)} long-video questions with transformers ...")
|
| 533 |
+
del llm
|
| 534 |
+
gc.collect()
|
| 535 |
+
torch.cuda.empty_cache()
|
| 536 |
+
|
| 537 |
+
model, processor = load_model(args.base_model, args.adapter)
|
| 538 |
+
for item in tqdm(fallback_items, desc="Fallback", unit="q"):
|
| 539 |
+
if item["uid"] in processed:
|
| 540 |
+
continue
|
| 541 |
+
try:
|
| 542 |
+
raw_output = run_inference(
|
| 543 |
+
model, processor, item["video_path"], item["prompt"],
|
| 544 |
+
args.max_new_tokens, args.temperature,
|
| 545 |
+
)
|
| 546 |
+
except Exception as exc:
|
| 547 |
+
import traceback
|
| 548 |
+
print(f" [error] {item['uid']}: {exc}")
|
| 549 |
+
traceback.print_exc()
|
| 550 |
+
raw_output = ""
|
| 551 |
+
|
| 552 |
+
pred = extract_answer(raw_output)
|
| 553 |
+
result = {
|
| 554 |
+
"uid": item["uid"],
|
| 555 |
+
"video_id": item["video_id"],
|
| 556 |
+
"video_type": item["video_type"],
|
| 557 |
+
"question_type": item["question_type"],
|
| 558 |
+
"gt_answer": item["gt_answer"],
|
| 559 |
+
"pred_answer": pred,
|
| 560 |
+
"correct": pred.upper() == item["gt_answer"].upper(),
|
| 561 |
+
"raw_output": raw_output,
|
| 562 |
+
}
|
| 563 |
+
with open(results_jsonl, "a", encoding="utf-8") as f:
|
| 564 |
+
f.write(json.dumps(result, ensure_ascii=False) + "\n")
|
| 565 |
+
processed.add(item["uid"])
|
| 566 |
+
gc.collect()
|
| 567 |
+
torch.cuda.empty_cache()
|
| 568 |
+
|
| 569 |
+
else:
|
| 570 |
+
print("[model] Loading model...")
|
| 571 |
+
model, processor = load_model(args.base_model, args.adapter)
|
| 572 |
+
|
| 573 |
+
for item in tqdm(test_data, desc="LVBench", unit="q"):
|
| 574 |
+
if item["uid"] in processed:
|
| 575 |
+
continue
|
| 576 |
+
|
| 577 |
+
try:
|
| 578 |
+
raw_output = run_inference(
|
| 579 |
+
model, processor, item["video_path"], item["prompt"],
|
| 580 |
+
args.max_new_tokens, args.temperature,
|
| 581 |
+
)
|
| 582 |
+
except Exception as exc:
|
| 583 |
+
import traceback
|
| 584 |
+
print(f" [error] {item['uid']}: {exc}")
|
| 585 |
+
traceback.print_exc()
|
| 586 |
+
raw_output = ""
|
| 587 |
+
|
| 588 |
+
pred = extract_answer(raw_output)
|
| 589 |
+
|
| 590 |
+
result = {
|
| 591 |
+
"uid": item["uid"],
|
| 592 |
+
"video_id": item["video_id"],
|
| 593 |
+
"video_type": item["video_type"],
|
| 594 |
+
"question_type": item["question_type"],
|
| 595 |
+
"gt_answer": item["gt_answer"],
|
| 596 |
+
"pred_answer": pred,
|
| 597 |
+
"correct": pred.upper() == item["gt_answer"].upper(),
|
| 598 |
+
"raw_output": raw_output,
|
| 599 |
+
}
|
| 600 |
+
|
| 601 |
+
with open(results_jsonl, "a", encoding="utf-8") as f:
|
| 602 |
+
f.write(json.dumps(result, ensure_ascii=False) + "\n")
|
| 603 |
+
|
| 604 |
+
processed.add(item["uid"])
|
| 605 |
+
gc.collect()
|
| 606 |
+
torch.cuda.empty_cache()
|
| 607 |
+
|
| 608 |
+
all_results = []
|
| 609 |
+
if results_jsonl.exists():
|
| 610 |
+
with open(results_jsonl) as f:
|
| 611 |
+
for line in f:
|
| 612 |
+
all_results.append(json.loads(line))
|
| 613 |
+
|
| 614 |
+
if not all_results:
|
| 615 |
+
print("[warn] No results to compute metrics from.")
|
| 616 |
+
return
|
| 617 |
+
|
| 618 |
+
metrics = compute_metrics(all_results)
|
| 619 |
+
metrics["eval_config"] = {
|
| 620 |
+
"base_model": args.base_model,
|
| 621 |
+
"adapter": args.adapter,
|
| 622 |
+
"video_dir": str(args.video_dir),
|
| 623 |
+
"max_new_tokens": args.max_new_tokens,
|
| 624 |
+
"temperature": args.temperature,
|
| 625 |
+
}
|
| 626 |
+
if vllm_preprocess_stats is not None:
|
| 627 |
+
metrics["eval_config"]["vllm_preprocess_skips"] = vllm_preprocess_stats
|
| 628 |
+
|
| 629 |
+
with open(metrics_json, "w", encoding="utf-8") as f:
|
| 630 |
+
json.dump(metrics, f, indent=2, ensure_ascii=False)
|
| 631 |
+
|
| 632 |
+
print_summary(metrics, label)
|
| 633 |
+
|
| 634 |
+
with open(summary_txt, "w", encoding="utf-8") as f:
|
| 635 |
+
import io, contextlib
|
| 636 |
+
buf = io.StringIO()
|
| 637 |
+
with contextlib.redirect_stdout(buf):
|
| 638 |
+
print_summary(metrics, label)
|
| 639 |
+
f.write(buf.getvalue())
|
| 640 |
+
|
| 641 |
+
print(f"\n[output] Results: {results_jsonl}")
|
| 642 |
+
print(f"[output] Metrics: {metrics_json}")
|
| 643 |
+
print(f"[output] Summary: {summary_txt}")
|
| 644 |
+
|
| 645 |
+
|
| 646 |
+
if __name__ == "__main__":
|
| 647 |
+
main()
|
hf_upload/scripts/eval_videomme.py
ADDED
|
@@ -0,0 +1,679 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env python3
|
| 2 |
+
"""
|
| 3 |
+
Evaluate Qwen3-Omni models on the Video-MME benchmark.
|
| 4 |
+
|
| 5 |
+
Video-MME: 2700 multiple-choice questions over 900 YouTube videos.
|
| 6 |
+
Metrics: accuracy (overall, per-duration, per-domain, per-task-type).
|
| 7 |
+
|
| 8 |
+
Usage:
|
| 9 |
+
python eval_videomme.py \
|
| 10 |
+
--base-model Qwen/Qwen3-Omni-30B-A3B-Instruct \
|
| 11 |
+
--video-dir /opt/dlami/nvme/videomme/data/data \
|
| 12 |
+
--max-samples 30 --label vanilla_qwen3omni
|
| 13 |
+
|
| 14 |
+
python eval_videomme.py \
|
| 15 |
+
--base-model Rakancorle11/qwen3omni_full_sft_revised_thinker_key \
|
| 16 |
+
--adapter /opt/dlami/nvme/LlamaFactory/saves/.../adapter \
|
| 17 |
+
--label dpo_v4_8632
|
| 18 |
+
"""
|
| 19 |
+
|
| 20 |
+
from __future__ import annotations
|
| 21 |
+
|
| 22 |
+
import argparse
|
| 23 |
+
import gc
|
| 24 |
+
import json
|
| 25 |
+
import os
|
| 26 |
+
import re
|
| 27 |
+
import tempfile
|
| 28 |
+
import shutil
|
| 29 |
+
from pathlib import Path
|
| 30 |
+
from typing import Any, Dict, List, Optional
|
| 31 |
+
|
| 32 |
+
import torch
|
| 33 |
+
from tqdm import tqdm
|
| 34 |
+
|
| 35 |
+
DEFAULT_VIDEO_DIR = Path("/opt/dlami/nvme/videomme/data/data")
|
| 36 |
+
DEFAULT_OUTPUT_DIR = Path("/home/ubuntu/eval_results/videomme")
|
| 37 |
+
|
| 38 |
+
VIDEO_TYPES = ["short", "medium", "long"]
|
| 39 |
+
CATEGORIES = [
|
| 40 |
+
"Knowledge", "Film & Television", "Sports Competition",
|
| 41 |
+
"Artistic Performance", "Life Record", "Multilingual",
|
| 42 |
+
]
|
| 43 |
+
TASK_CATEGORIES = [
|
| 44 |
+
"Temporal Perception", "Spatial Perception", "Attribute Perception",
|
| 45 |
+
"Action Recognition", "Object Recognition", "OCR Problems",
|
| 46 |
+
"Counting Problem", "Temporal Reasoning", "Spatial Reasoning",
|
| 47 |
+
"Action Reasoning", "Object Reasoning", "Information Synopsis",
|
| 48 |
+
]
|
| 49 |
+
|
| 50 |
+
MCQ_PROMPT = (
|
| 51 |
+
"Select the best answer to the following multiple-choice question "
|
| 52 |
+
"based on the video. Respond with only the letter (A, B, C, or D) "
|
| 53 |
+
"of the correct option.\n"
|
| 54 |
+
)
|
| 55 |
+
|
| 56 |
+
|
| 57 |
+
def parse_args() -> argparse.Namespace:
|
| 58 |
+
p = argparse.ArgumentParser(description="Evaluate on Video-MME benchmark.")
|
| 59 |
+
p.add_argument("--base-model", type=str,
|
| 60 |
+
default="Qwen/Qwen3-Omni-30B-A3B-Instruct")
|
| 61 |
+
p.add_argument("--adapter", type=str, default=None)
|
| 62 |
+
p.add_argument("--video-dir", type=Path, default=DEFAULT_VIDEO_DIR)
|
| 63 |
+
p.add_argument("--output-dir", type=Path, default=DEFAULT_OUTPUT_DIR)
|
| 64 |
+
p.add_argument("--max-samples", type=int, default=-1)
|
| 65 |
+
p.add_argument("--max-new-tokens", type=int, default=32)
|
| 66 |
+
p.add_argument("--temperature", type=float, default=0.0)
|
| 67 |
+
p.add_argument("--label", type=str, default=None)
|
| 68 |
+
p.add_argument("--vllm", action="store_true", default=False,
|
| 69 |
+
help="Use vLLM offline batch inference instead of transformers (much faster).")
|
| 70 |
+
p.add_argument("--tp", type=int, default=None,
|
| 71 |
+
help="Tensor parallel size for vLLM (default: all visible GPUs).")
|
| 72 |
+
p.add_argument("--batch-size", type=int, default=32,
|
| 73 |
+
help="Batch size for vLLM generate().")
|
| 74 |
+
p.add_argument("--gpu-memory-utilization", type=float, default=0.90)
|
| 75 |
+
p.add_argument("--max-model-len", type=int, default=65536)
|
| 76 |
+
return p.parse_args()
|
| 77 |
+
|
| 78 |
+
|
| 79 |
+
# ---------------------------------------------------------------------------
|
| 80 |
+
# Model loading — reused from eval_dpo_sync.py
|
| 81 |
+
# ---------------------------------------------------------------------------
|
| 82 |
+
def load_model(base_model: str, adapter: Optional[str]):
|
| 83 |
+
import json as _json
|
| 84 |
+
from transformers import (
|
| 85 |
+
AutoConfig,
|
| 86 |
+
AutoProcessor,
|
| 87 |
+
Qwen3OmniMoeForConditionalGeneration,
|
| 88 |
+
Qwen3OmniMoeThinkerConfig,
|
| 89 |
+
Qwen3OmniMoeThinkerForConditionalGeneration,
|
| 90 |
+
)
|
| 91 |
+
|
| 92 |
+
print(f"[load] Base model: {base_model}")
|
| 93 |
+
processor = AutoProcessor.from_pretrained(base_model, trust_remote_code=True)
|
| 94 |
+
|
| 95 |
+
model_path = Path(base_model)
|
| 96 |
+
cfg_path = model_path / "config.json" if model_path.exists() else None
|
| 97 |
+
|
| 98 |
+
model_type = None
|
| 99 |
+
if cfg_path and cfg_path.exists():
|
| 100 |
+
with open(cfg_path, "r") as f:
|
| 101 |
+
model_type = _json.load(f).get("model_type")
|
| 102 |
+
if not model_type:
|
| 103 |
+
try:
|
| 104 |
+
from huggingface_hub import hf_hub_download
|
| 105 |
+
cached = hf_hub_download(base_model, "config.json")
|
| 106 |
+
with open(cached, "r") as f:
|
| 107 |
+
model_type = _json.load(f).get("model_type")
|
| 108 |
+
except Exception:
|
| 109 |
+
pass
|
| 110 |
+
|
| 111 |
+
print(f"[load] Detected model_type: {model_type}")
|
| 112 |
+
|
| 113 |
+
if model_type == "qwen3_omni_moe_thinker":
|
| 114 |
+
config = Qwen3OmniMoeThinkerConfig.from_pretrained(base_model)
|
| 115 |
+
model = Qwen3OmniMoeThinkerForConditionalGeneration.from_pretrained(
|
| 116 |
+
base_model, config=config, torch_dtype=torch.bfloat16, device_map="auto",
|
| 117 |
+
)
|
| 118 |
+
else:
|
| 119 |
+
config = AutoConfig.from_pretrained(base_model, trust_remote_code=True)
|
| 120 |
+
model = Qwen3OmniMoeForConditionalGeneration.from_pretrained(
|
| 121 |
+
base_model, config=config, torch_dtype=torch.bfloat16, device_map="auto",
|
| 122 |
+
)
|
| 123 |
+
|
| 124 |
+
if adapter:
|
| 125 |
+
from peft import PeftModel
|
| 126 |
+
from safetensors.torch import load_file, save_file
|
| 127 |
+
|
| 128 |
+
print(f"[load] Loading LoRA adapter: {adapter}")
|
| 129 |
+
adapter_cfg_path = Path(adapter) / "adapter_config.json"
|
| 130 |
+
with open(adapter_cfg_path) as f:
|
| 131 |
+
adapter_cfg = _json.loads(f.read())
|
| 132 |
+
target_modules = adapter_cfg.get("target_modules", [])
|
| 133 |
+
needs_remap = (
|
| 134 |
+
any(t.startswith("model.layers.") for t in target_modules)
|
| 135 |
+
and model_type != "qwen3_omni_moe_thinker"
|
| 136 |
+
)
|
| 137 |
+
|
| 138 |
+
if needs_remap:
|
| 139 |
+
print("[load] Adapter trained on thinker-only; remapping keys...")
|
| 140 |
+
tmp_dir = Path(tempfile.mkdtemp(prefix="adapter_remap_"))
|
| 141 |
+
for fn in Path(adapter).iterdir():
|
| 142 |
+
if fn.is_dir():
|
| 143 |
+
continue
|
| 144 |
+
if fn.name == "adapter_config.json":
|
| 145 |
+
new_targets = []
|
| 146 |
+
for t in target_modules:
|
| 147 |
+
if t.startswith("model.layers."):
|
| 148 |
+
new_targets.append("thinker." + t)
|
| 149 |
+
elif t[0].isdigit():
|
| 150 |
+
new_targets.append("thinker.model.layers." + t)
|
| 151 |
+
else:
|
| 152 |
+
new_targets.append(t)
|
| 153 |
+
adapter_cfg["target_modules"] = new_targets
|
| 154 |
+
with open(tmp_dir / "adapter_config.json", "w") as f:
|
| 155 |
+
json.dump(adapter_cfg, f, indent=2)
|
| 156 |
+
elif fn.suffix == ".safetensors" and "adapter" in fn.name:
|
| 157 |
+
tensors = load_file(str(fn))
|
| 158 |
+
remapped = {}
|
| 159 |
+
for k, v in tensors.items():
|
| 160 |
+
if ".model.layers." in k and ".thinker." not in k:
|
| 161 |
+
new_k = k.replace(
|
| 162 |
+
"base_model.model.model.layers.",
|
| 163 |
+
"base_model.model.thinker.model.layers.",
|
| 164 |
+
)
|
| 165 |
+
remapped[new_k] = v
|
| 166 |
+
else:
|
| 167 |
+
remapped[k] = v
|
| 168 |
+
save_file(remapped, str(tmp_dir / fn.name))
|
| 169 |
+
else:
|
| 170 |
+
shutil.copy2(str(fn), str(tmp_dir / fn.name))
|
| 171 |
+
adapter = str(tmp_dir)
|
| 172 |
+
|
| 173 |
+
model = PeftModel.from_pretrained(model, adapter)
|
| 174 |
+
model = model.merge_and_unload()
|
| 175 |
+
|
| 176 |
+
if hasattr(model, "disable_talker"):
|
| 177 |
+
model.disable_talker()
|
| 178 |
+
model.eval()
|
| 179 |
+
print(f"[load] Model ready ({type(model).__name__}). Device: {model.device}")
|
| 180 |
+
return model, processor
|
| 181 |
+
|
| 182 |
+
|
| 183 |
+
# ---------------------------------------------------------------------------
|
| 184 |
+
# Inference
|
| 185 |
+
# ---------------------------------------------------------------------------
|
| 186 |
+
def run_inference(model, processor, video_path: str, prompt: str,
|
| 187 |
+
max_new_tokens: int, temperature: float) -> str:
|
| 188 |
+
from qwen_omni_utils import process_mm_info
|
| 189 |
+
|
| 190 |
+
tmp_dir = tempfile.mkdtemp(prefix="eval_vmme_")
|
| 191 |
+
masked_video = os.path.join(tmp_dir, "clip.mp4")
|
| 192 |
+
os.symlink(os.path.abspath(video_path), masked_video)
|
| 193 |
+
|
| 194 |
+
conversation = [
|
| 195 |
+
{
|
| 196 |
+
"role": "user",
|
| 197 |
+
"content": [
|
| 198 |
+
{"type": "video", "video": masked_video},
|
| 199 |
+
{"type": "text", "text": prompt},
|
| 200 |
+
],
|
| 201 |
+
}
|
| 202 |
+
]
|
| 203 |
+
|
| 204 |
+
text = processor.apply_chat_template(
|
| 205 |
+
conversation, add_generation_prompt=True, tokenize=False,
|
| 206 |
+
)
|
| 207 |
+
audios, images, videos = process_mm_info(conversation, use_audio_in_video=False)
|
| 208 |
+
inputs = processor(
|
| 209 |
+
text=text, audio=audios, images=images, videos=videos,
|
| 210 |
+
return_tensors="pt", padding=True, use_audio_in_video=False,
|
| 211 |
+
)
|
| 212 |
+
|
| 213 |
+
model_dtype = next(model.parameters()).dtype
|
| 214 |
+
converted = {}
|
| 215 |
+
for k, v in inputs.items():
|
| 216 |
+
if hasattr(v, "to"):
|
| 217 |
+
v = v.to(model.device)
|
| 218 |
+
if torch.is_floating_point(v):
|
| 219 |
+
v = v.to(model_dtype)
|
| 220 |
+
converted[k] = v
|
| 221 |
+
inputs = converted
|
| 222 |
+
|
| 223 |
+
from transformers import Qwen3OmniMoeThinkerForConditionalGeneration
|
| 224 |
+
is_thinker = isinstance(model, Qwen3OmniMoeThinkerForConditionalGeneration)
|
| 225 |
+
if is_thinker:
|
| 226 |
+
gen_kwargs = {"max_new_tokens": max_new_tokens, "do_sample": temperature > 0}
|
| 227 |
+
else:
|
| 228 |
+
gen_kwargs = {
|
| 229 |
+
"thinker_max_new_tokens": max_new_tokens,
|
| 230 |
+
"use_audio_in_video": False,
|
| 231 |
+
"return_audio": False,
|
| 232 |
+
"do_sample": temperature > 0,
|
| 233 |
+
}
|
| 234 |
+
if temperature > 0:
|
| 235 |
+
gen_kwargs["temperature"] = temperature
|
| 236 |
+
gen_kwargs["top_p"] = 0.9
|
| 237 |
+
|
| 238 |
+
with torch.inference_mode():
|
| 239 |
+
output_ids = model.generate(**inputs, **gen_kwargs)
|
| 240 |
+
|
| 241 |
+
if isinstance(output_ids, tuple):
|
| 242 |
+
output_ids = output_ids[0]
|
| 243 |
+
|
| 244 |
+
prompt_len = inputs["input_ids"].shape[1]
|
| 245 |
+
response = processor.batch_decode(
|
| 246 |
+
output_ids[:, prompt_len:], skip_special_tokens=True,
|
| 247 |
+
)[0].strip()
|
| 248 |
+
|
| 249 |
+
shutil.rmtree(tmp_dir, ignore_errors=True)
|
| 250 |
+
return response
|
| 251 |
+
|
| 252 |
+
|
| 253 |
+
def preprocess_video_for_vllm(video_path: str):
|
| 254 |
+
"""Extract video frames using qwen_omni_utils, return numpy array for vLLM.
|
| 255 |
+
Caps at 128 frames to stay within vLLM encoder cache (62720 tokens).
|
| 256 |
+
"""
|
| 257 |
+
from qwen_omni_utils import process_mm_info
|
| 258 |
+
import numpy as np
|
| 259 |
+
|
| 260 |
+
messages = [{
|
| 261 |
+
"role": "user",
|
| 262 |
+
"content": [
|
| 263 |
+
{"type": "video", "video": video_path, "nframes": 128},
|
| 264 |
+
{"type": "text", "text": "placeholder"},
|
| 265 |
+
],
|
| 266 |
+
}]
|
| 267 |
+
audios, images, videos = process_mm_info(messages, use_audio_in_video=False)
|
| 268 |
+
video_tensor = videos[0]
|
| 269 |
+
video_np = (video_tensor * 255).byte().numpy()
|
| 270 |
+
return video_np
|
| 271 |
+
|
| 272 |
+
|
| 273 |
+
SYSTEM_PROMPT = (
|
| 274 |
+
"You are Qwen, a virtual human developed by the Qwen Team, Alibaba "
|
| 275 |
+
"Group, capable of perceiving auditory and visual inputs, as well as "
|
| 276 |
+
"generating text and speech."
|
| 277 |
+
)
|
| 278 |
+
|
| 279 |
+
|
| 280 |
+
def build_vllm_prompt(question: str) -> str:
|
| 281 |
+
return (
|
| 282 |
+
f"<|im_start|>system\n{SYSTEM_PROMPT}<|im_end|>\n"
|
| 283 |
+
f"<|im_start|>user\n"
|
| 284 |
+
f"<|vision_start|><|video_pad|><|vision_end|>"
|
| 285 |
+
f"{question}<|im_end|>\n"
|
| 286 |
+
f"<|im_start|>assistant\n"
|
| 287 |
+
)
|
| 288 |
+
|
| 289 |
+
|
| 290 |
+
def extract_answer(text: str) -> str:
|
| 291 |
+
"""Extract a single letter A/B/C/D from model output."""
|
| 292 |
+
text = text.strip()
|
| 293 |
+
prefixes = [
|
| 294 |
+
"The best answer is", "The correct answer is",
|
| 295 |
+
"The answer is", "The answer", "Best answer:", "Best option:",
|
| 296 |
+
]
|
| 297 |
+
for prefix in prefixes:
|
| 298 |
+
text = text.replace(prefix, "")
|
| 299 |
+
|
| 300 |
+
if len(text.split()) > 10 and not re.search(r"[ABCD]", text):
|
| 301 |
+
return ""
|
| 302 |
+
m = re.search(r"[ABCD]", text)
|
| 303 |
+
return m[0] if m else ""
|
| 304 |
+
|
| 305 |
+
|
| 306 |
+
# ---------------------------------------------------------------------------
|
| 307 |
+
# Dataset loading
|
| 308 |
+
# ---------------------------------------------------------------------------
|
| 309 |
+
def load_videomme(video_dir: Path, max_samples: int) -> List[Dict[str, Any]]:
|
| 310 |
+
from datasets import load_dataset
|
| 311 |
+
ds = load_dataset("lmms-lab/Video-MME", split="test")
|
| 312 |
+
data = []
|
| 313 |
+
skipped = 0
|
| 314 |
+
for row in ds:
|
| 315 |
+
vid = row["videoID"]
|
| 316 |
+
video_path = video_dir / f"{vid}.mp4"
|
| 317 |
+
if not video_path.exists():
|
| 318 |
+
for ext in [".MP4", ".mkv"]:
|
| 319 |
+
alt = video_dir / f"{vid}{ext}"
|
| 320 |
+
if alt.exists():
|
| 321 |
+
video_path = alt
|
| 322 |
+
break
|
| 323 |
+
if not video_path.exists():
|
| 324 |
+
skipped += 1
|
| 325 |
+
continue
|
| 326 |
+
|
| 327 |
+
options_text = "\n".join(row["options"])
|
| 328 |
+
prompt = MCQ_PROMPT + row["question"] + "\n" + options_text + "\nThe best answer is:"
|
| 329 |
+
|
| 330 |
+
data.append({
|
| 331 |
+
"question_id": row["question_id"],
|
| 332 |
+
"video_id": vid,
|
| 333 |
+
"video_path": str(video_path),
|
| 334 |
+
"duration": row["duration"],
|
| 335 |
+
"domain": row["domain"],
|
| 336 |
+
"sub_category": row["sub_category"],
|
| 337 |
+
"task_type": row["task_type"],
|
| 338 |
+
"question": row["question"],
|
| 339 |
+
"options": row["options"],
|
| 340 |
+
"gt_answer": row["answer"],
|
| 341 |
+
"prompt": prompt,
|
| 342 |
+
})
|
| 343 |
+
if skipped:
|
| 344 |
+
print(f"[data] Skipped {skipped} questions (video not found)")
|
| 345 |
+
if max_samples > 0:
|
| 346 |
+
data = data[:max_samples]
|
| 347 |
+
return data
|
| 348 |
+
|
| 349 |
+
|
| 350 |
+
# ---------------------------------------------------------------------------
|
| 351 |
+
# Metrics
|
| 352 |
+
# ---------------------------------------------------------------------------
|
| 353 |
+
def compute_metrics(results: List[Dict[str, Any]]) -> Dict[str, Any]:
|
| 354 |
+
total = len(results)
|
| 355 |
+
if total == 0:
|
| 356 |
+
return {}
|
| 357 |
+
|
| 358 |
+
correct = sum(1 for r in results if r["pred_answer"].upper() == r["gt_answer"].upper())
|
| 359 |
+
overall_acc = correct / total
|
| 360 |
+
|
| 361 |
+
def acc_for(items):
|
| 362 |
+
if not items:
|
| 363 |
+
return None
|
| 364 |
+
c = sum(1 for r in items if r["pred_answer"].upper() == r["gt_answer"].upper())
|
| 365 |
+
return round(c / len(items), 4)
|
| 366 |
+
|
| 367 |
+
per_duration = {}
|
| 368 |
+
for vt in VIDEO_TYPES:
|
| 369 |
+
subset = [r for r in results if r["duration"] == vt]
|
| 370 |
+
if subset:
|
| 371 |
+
per_duration[vt] = {"accuracy": acc_for(subset), "count": len(subset)}
|
| 372 |
+
|
| 373 |
+
per_domain = {}
|
| 374 |
+
for cat in CATEGORIES:
|
| 375 |
+
subset = [r for r in results if r["domain"] == cat]
|
| 376 |
+
if subset:
|
| 377 |
+
per_domain[cat] = {"accuracy": acc_for(subset), "count": len(subset)}
|
| 378 |
+
|
| 379 |
+
per_task = {}
|
| 380 |
+
for task in TASK_CATEGORIES:
|
| 381 |
+
subset = [r for r in results if r["task_type"] == task]
|
| 382 |
+
if subset:
|
| 383 |
+
per_task[task] = {"accuracy": acc_for(subset), "count": len(subset)}
|
| 384 |
+
|
| 385 |
+
return {
|
| 386 |
+
"total_samples": total,
|
| 387 |
+
"overall_accuracy": round(overall_acc, 4),
|
| 388 |
+
"per_duration": per_duration,
|
| 389 |
+
"per_domain": per_domain,
|
| 390 |
+
"per_task_type": per_task,
|
| 391 |
+
}
|
| 392 |
+
|
| 393 |
+
|
| 394 |
+
def print_summary(metrics: Dict[str, Any], label: str) -> None:
|
| 395 |
+
print()
|
| 396 |
+
print(f"{'=' * 65}")
|
| 397 |
+
print(f" Video-MME Summary: {label}")
|
| 398 |
+
print(f"{'=' * 65}")
|
| 399 |
+
print(f" Total samples: {metrics['total_samples']}")
|
| 400 |
+
print(f" Overall Accuracy: {metrics['overall_accuracy']:.1%}")
|
| 401 |
+
|
| 402 |
+
print(f" ─── Per Duration ───")
|
| 403 |
+
for vt in VIDEO_TYPES:
|
| 404 |
+
if vt in metrics["per_duration"]:
|
| 405 |
+
d = metrics["per_duration"][vt]
|
| 406 |
+
print(f" {vt:8s}: {d['accuracy']:.1%} ({d['count']} questions)")
|
| 407 |
+
|
| 408 |
+
print(f" ─── Per Domain ───")
|
| 409 |
+
for cat in CATEGORIES:
|
| 410 |
+
if cat in metrics["per_domain"]:
|
| 411 |
+
d = metrics["per_domain"][cat]
|
| 412 |
+
print(f" {cat:25s}: {d['accuracy']:.1%} ({d['count']})")
|
| 413 |
+
|
| 414 |
+
print(f" ─── Per Task Type ───")
|
| 415 |
+
for task in TASK_CATEGORIES:
|
| 416 |
+
if task in metrics["per_task_type"]:
|
| 417 |
+
d = metrics["per_task_type"][task]
|
| 418 |
+
print(f" {task:25s}: {d['accuracy']:.1%} ({d['count']})")
|
| 419 |
+
|
| 420 |
+
print(f"{'=' * 65}")
|
| 421 |
+
|
| 422 |
+
|
| 423 |
+
# ---------------------------------------------------------------------------
|
| 424 |
+
# Main
|
| 425 |
+
# ---------------------------------------------------------------------------
|
| 426 |
+
def main() -> None:
|
| 427 |
+
args = parse_args()
|
| 428 |
+
label = args.label or (
|
| 429 |
+
Path(args.adapter).name if args.adapter
|
| 430 |
+
else Path(args.base_model).name
|
| 431 |
+
)
|
| 432 |
+
|
| 433 |
+
out_dir = args.output_dir / label
|
| 434 |
+
out_dir.mkdir(parents=True, exist_ok=True)
|
| 435 |
+
results_jsonl = out_dir / "eval_results.jsonl"
|
| 436 |
+
metrics_json = out_dir / "metrics.json"
|
| 437 |
+
summary_txt = out_dir / "summary.txt"
|
| 438 |
+
|
| 439 |
+
print("[data] Loading Video-MME dataset...")
|
| 440 |
+
test_data = load_videomme(args.video_dir, args.max_samples)
|
| 441 |
+
print(f"[data] {len(test_data)} questions ready for evaluation")
|
| 442 |
+
|
| 443 |
+
processed = set()
|
| 444 |
+
if results_jsonl.exists():
|
| 445 |
+
with open(results_jsonl) as f:
|
| 446 |
+
for line in f:
|
| 447 |
+
obj = json.loads(line)
|
| 448 |
+
processed.add(obj["question_id"])
|
| 449 |
+
print(f"[resume] {len(processed)} already processed, skipping")
|
| 450 |
+
|
| 451 |
+
use_vllm = args.vllm
|
| 452 |
+
model = processor = llm = None
|
| 453 |
+
vllm_preprocess_stats: Dict[str, int] | None = None
|
| 454 |
+
|
| 455 |
+
if use_vllm:
|
| 456 |
+
from vllm import LLM, SamplingParams
|
| 457 |
+
tp = args.tp or torch.cuda.device_count()
|
| 458 |
+
model_path = args.base_model
|
| 459 |
+
print(f"[vllm] Loading {model_path} with tp={tp} ...")
|
| 460 |
+
llm = LLM(
|
| 461 |
+
model=model_path,
|
| 462 |
+
tensor_parallel_size=tp,
|
| 463 |
+
max_model_len=args.max_model_len,
|
| 464 |
+
max_num_seqs=4,
|
| 465 |
+
limit_mm_per_prompt={"video": 1},
|
| 466 |
+
gpu_memory_utilization=args.gpu_memory_utilization,
|
| 467 |
+
dtype="bfloat16",
|
| 468 |
+
trust_remote_code=True,
|
| 469 |
+
)
|
| 470 |
+
sampling_params = SamplingParams(
|
| 471 |
+
temperature=args.temperature if args.temperature > 0 else 0.0,
|
| 472 |
+
top_p=0.9 if args.temperature > 0 else 1.0,
|
| 473 |
+
max_tokens=args.max_new_tokens,
|
| 474 |
+
)
|
| 475 |
+
|
| 476 |
+
print("[vllm] Preprocessing videos ...")
|
| 477 |
+
todo = [item for item in test_data if item["question_id"] not in processed]
|
| 478 |
+
preprocessed = {}
|
| 479 |
+
preprocess_failed_paths: set[str] = set()
|
| 480 |
+
|
| 481 |
+
for i, item in enumerate(todo):
|
| 482 |
+
vp = item["video_path"]
|
| 483 |
+
if vp in preprocessed or vp in preprocess_failed_paths:
|
| 484 |
+
continue
|
| 485 |
+
try:
|
| 486 |
+
preprocessed[vp] = preprocess_video_for_vllm(vp)
|
| 487 |
+
except Exception as e:
|
| 488 |
+
preprocess_failed_paths.add(vp)
|
| 489 |
+
print(f" [skip] preprocess error: {Path(vp).name}: {e}")
|
| 490 |
+
if (i + 1) % 100 == 0:
|
| 491 |
+
print(f" Preprocessed {i+1}/{len(todo)} ...")
|
| 492 |
+
|
| 493 |
+
n_pp_skip = sum(1 for item in todo if item["video_path"] in preprocess_failed_paths)
|
| 494 |
+
if preprocess_failed_paths:
|
| 495 |
+
print(
|
| 496 |
+
f"[vllm] Preprocess failed for {len(preprocess_failed_paths)} video(s), "
|
| 497 |
+
f"{n_pp_skip} question(s) will not use vLLM (run continues)."
|
| 498 |
+
)
|
| 499 |
+
vllm_preprocess_stats = {
|
| 500 |
+
"preprocess_failed_videos": len(preprocess_failed_paths),
|
| 501 |
+
"preprocess_skipped_questions": n_pp_skip,
|
| 502 |
+
}
|
| 503 |
+
|
| 504 |
+
vllm_todo = [item for item in todo if item["video_path"] in preprocessed]
|
| 505 |
+
fallback_items = []
|
| 506 |
+
print(f"[vllm] {len(vllm_todo)} questions ready, running inference ...")
|
| 507 |
+
|
| 508 |
+
for i, item in enumerate(vllm_todo):
|
| 509 |
+
if item["question_id"] in processed:
|
| 510 |
+
continue
|
| 511 |
+
inp = {
|
| 512 |
+
"prompt": build_vllm_prompt(item["prompt"]),
|
| 513 |
+
"multi_modal_data": {"video": preprocessed[item["video_path"]]},
|
| 514 |
+
}
|
| 515 |
+
try:
|
| 516 |
+
outputs = llm.generate([inp], sampling_params=sampling_params)
|
| 517 |
+
raw_output = outputs[0].outputs[0].text.strip()
|
| 518 |
+
pred = extract_answer(raw_output)
|
| 519 |
+
result = {
|
| 520 |
+
"question_id": item["question_id"],
|
| 521 |
+
"video_id": item["video_id"],
|
| 522 |
+
"duration": item["duration"],
|
| 523 |
+
"domain": item["domain"],
|
| 524 |
+
"sub_category": item["sub_category"],
|
| 525 |
+
"task_type": item["task_type"],
|
| 526 |
+
"gt_answer": item["gt_answer"],
|
| 527 |
+
"pred_answer": pred,
|
| 528 |
+
"correct": pred.upper() == item["gt_answer"].upper(),
|
| 529 |
+
"raw_output": raw_output,
|
| 530 |
+
}
|
| 531 |
+
with open(results_jsonl, "a", encoding="utf-8") as f:
|
| 532 |
+
f.write(json.dumps(result, ensure_ascii=False) + "\n")
|
| 533 |
+
processed.add(item["question_id"])
|
| 534 |
+
except (ValueError, RuntimeError) as exc:
|
| 535 |
+
if "longer than the maximum model length" in str(exc):
|
| 536 |
+
print(f" [too long] {item['question_id']} -> fallback")
|
| 537 |
+
fallback_items.append(item)
|
| 538 |
+
else:
|
| 539 |
+
raise
|
| 540 |
+
|
| 541 |
+
if (i + 1) % 100 == 0:
|
| 542 |
+
print(f" [vllm] [{i+1}/{len(vllm_todo)}] done, {len(fallback_items)} deferred")
|
| 543 |
+
|
| 544 |
+
preprocessed.clear()
|
| 545 |
+
|
| 546 |
+
vllm_results = []
|
| 547 |
+
if results_jsonl.exists():
|
| 548 |
+
with open(results_jsonl) as f:
|
| 549 |
+
for line in f:
|
| 550 |
+
vllm_results.append(json.loads(line))
|
| 551 |
+
if vllm_results:
|
| 552 |
+
vllm_metrics = compute_metrics(vllm_results)
|
| 553 |
+
vllm_metrics_path = out_dir / "metrics_vllm.json"
|
| 554 |
+
with open(vllm_metrics_path, "w", encoding="utf-8") as f:
|
| 555 |
+
json.dump(vllm_metrics, f, indent=2, ensure_ascii=False)
|
| 556 |
+
print(f"[vllm] Intermediate metrics saved to {vllm_metrics_path}")
|
| 557 |
+
print_summary(vllm_metrics, label + " (vllm only)")
|
| 558 |
+
|
| 559 |
+
if fallback_items:
|
| 560 |
+
print(f"[fallback] Running {len(fallback_items)} long-video questions with transformers ...")
|
| 561 |
+
del llm
|
| 562 |
+
gc.collect()
|
| 563 |
+
torch.cuda.empty_cache()
|
| 564 |
+
|
| 565 |
+
model, processor = load_model(args.base_model, args.adapter)
|
| 566 |
+
for item in tqdm(fallback_items, desc="Fallback", unit="q"):
|
| 567 |
+
if item["question_id"] in processed:
|
| 568 |
+
continue
|
| 569 |
+
try:
|
| 570 |
+
raw_output = run_inference(
|
| 571 |
+
model, processor, item["video_path"], item["prompt"],
|
| 572 |
+
args.max_new_tokens, args.temperature,
|
| 573 |
+
)
|
| 574 |
+
except Exception as exc:
|
| 575 |
+
import traceback
|
| 576 |
+
print(f" [error] {item['question_id']}: {exc}")
|
| 577 |
+
traceback.print_exc()
|
| 578 |
+
raw_output = ""
|
| 579 |
+
|
| 580 |
+
pred = extract_answer(raw_output)
|
| 581 |
+
result = {
|
| 582 |
+
"question_id": item["question_id"],
|
| 583 |
+
"video_id": item["video_id"],
|
| 584 |
+
"duration": item["duration"],
|
| 585 |
+
"domain": item["domain"],
|
| 586 |
+
"sub_category": item["sub_category"],
|
| 587 |
+
"task_type": item["task_type"],
|
| 588 |
+
"gt_answer": item["gt_answer"],
|
| 589 |
+
"pred_answer": pred,
|
| 590 |
+
"correct": pred.upper() == item["gt_answer"].upper(),
|
| 591 |
+
"raw_output": raw_output,
|
| 592 |
+
}
|
| 593 |
+
with open(results_jsonl, "a", encoding="utf-8") as f:
|
| 594 |
+
f.write(json.dumps(result, ensure_ascii=False) + "\n")
|
| 595 |
+
processed.add(item["question_id"])
|
| 596 |
+
gc.collect()
|
| 597 |
+
torch.cuda.empty_cache()
|
| 598 |
+
|
| 599 |
+
else:
|
| 600 |
+
print("[model] Loading model...")
|
| 601 |
+
model, processor = load_model(args.base_model, args.adapter)
|
| 602 |
+
|
| 603 |
+
for item in tqdm(test_data, desc="Video-MME", unit="q"):
|
| 604 |
+
if item["question_id"] in processed:
|
| 605 |
+
continue
|
| 606 |
+
|
| 607 |
+
try:
|
| 608 |
+
raw_output = run_inference(
|
| 609 |
+
model, processor, item["video_path"], item["prompt"],
|
| 610 |
+
args.max_new_tokens, args.temperature,
|
| 611 |
+
)
|
| 612 |
+
except Exception as exc:
|
| 613 |
+
import traceback
|
| 614 |
+
print(f" [error] {item['question_id']}: {exc}")
|
| 615 |
+
traceback.print_exc()
|
| 616 |
+
raw_output = ""
|
| 617 |
+
|
| 618 |
+
pred = extract_answer(raw_output)
|
| 619 |
+
|
| 620 |
+
result = {
|
| 621 |
+
"question_id": item["question_id"],
|
| 622 |
+
"video_id": item["video_id"],
|
| 623 |
+
"duration": item["duration"],
|
| 624 |
+
"domain": item["domain"],
|
| 625 |
+
"sub_category": item["sub_category"],
|
| 626 |
+
"task_type": item["task_type"],
|
| 627 |
+
"gt_answer": item["gt_answer"],
|
| 628 |
+
"pred_answer": pred,
|
| 629 |
+
"correct": pred.upper() == item["gt_answer"].upper(),
|
| 630 |
+
"raw_output": raw_output,
|
| 631 |
+
}
|
| 632 |
+
|
| 633 |
+
with open(results_jsonl, "a", encoding="utf-8") as f:
|
| 634 |
+
f.write(json.dumps(result, ensure_ascii=False) + "\n")
|
| 635 |
+
|
| 636 |
+
processed.add(item["question_id"])
|
| 637 |
+
gc.collect()
|
| 638 |
+
torch.cuda.empty_cache()
|
| 639 |
+
|
| 640 |
+
all_results = []
|
| 641 |
+
if results_jsonl.exists():
|
| 642 |
+
with open(results_jsonl) as f:
|
| 643 |
+
for line in f:
|
| 644 |
+
all_results.append(json.loads(line))
|
| 645 |
+
|
| 646 |
+
if not all_results:
|
| 647 |
+
print("[warn] No results to compute metrics from.")
|
| 648 |
+
return
|
| 649 |
+
|
| 650 |
+
metrics = compute_metrics(all_results)
|
| 651 |
+
metrics["eval_config"] = {
|
| 652 |
+
"base_model": args.base_model,
|
| 653 |
+
"adapter": args.adapter,
|
| 654 |
+
"video_dir": str(args.video_dir),
|
| 655 |
+
"max_new_tokens": args.max_new_tokens,
|
| 656 |
+
"temperature": args.temperature,
|
| 657 |
+
}
|
| 658 |
+
if vllm_preprocess_stats is not None:
|
| 659 |
+
metrics["eval_config"]["vllm_preprocess_skips"] = vllm_preprocess_stats
|
| 660 |
+
|
| 661 |
+
with open(metrics_json, "w", encoding="utf-8") as f:
|
| 662 |
+
json.dump(metrics, f, indent=2, ensure_ascii=False)
|
| 663 |
+
|
| 664 |
+
print_summary(metrics, label)
|
| 665 |
+
|
| 666 |
+
with open(summary_txt, "w", encoding="utf-8") as f:
|
| 667 |
+
import io, contextlib
|
| 668 |
+
buf = io.StringIO()
|
| 669 |
+
with contextlib.redirect_stdout(buf):
|
| 670 |
+
print_summary(metrics, label)
|
| 671 |
+
f.write(buf.getvalue())
|
| 672 |
+
|
| 673 |
+
print(f"\n[output] Results: {results_jsonl}")
|
| 674 |
+
print(f"[output] Metrics: {metrics_json}")
|
| 675 |
+
print(f"[output] Summary: {summary_txt}")
|
| 676 |
+
|
| 677 |
+
|
| 678 |
+
if __name__ == "__main__":
|
| 679 |
+
main()
|
hf_upload/scripts/merge_adapter.py
ADDED
|
@@ -0,0 +1,139 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env python3
|
| 2 |
+
"""
|
| 3 |
+
Merge a LoRA adapter into the base Qwen3-Omni model and save full weights.
|
| 4 |
+
|
| 5 |
+
Handles thinker-only adapter key remapping automatically.
|
| 6 |
+
|
| 7 |
+
Usage:
|
| 8 |
+
python merge_adapter.py \
|
| 9 |
+
--base-model Rakancorle11/qwen3omni_full_sft_revised_thinker_key \
|
| 10 |
+
--adapter /opt/dlami/nvme/LlamaFactory/saves/Qwen3-Omni-Instruct/dpo/qwen3omni_dpo_lora_with_audio_v4_data_8632 \
|
| 11 |
+
--output /opt/dlami/nvme/merged_models/dpo_v4_8632
|
| 12 |
+
"""
|
| 13 |
+
|
| 14 |
+
from __future__ import annotations
|
| 15 |
+
|
| 16 |
+
import argparse
|
| 17 |
+
import json
|
| 18 |
+
import shutil
|
| 19 |
+
import tempfile
|
| 20 |
+
from pathlib import Path
|
| 21 |
+
|
| 22 |
+
import torch
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
def parse_args() -> argparse.Namespace:
|
| 26 |
+
p = argparse.ArgumentParser(description="Merge LoRA adapter into base model.")
|
| 27 |
+
p.add_argument("--base-model", type=str, required=True)
|
| 28 |
+
p.add_argument("--adapter", type=str, required=True)
|
| 29 |
+
p.add_argument("--output", type=str, required=True)
|
| 30 |
+
return p.parse_args()
|
| 31 |
+
|
| 32 |
+
|
| 33 |
+
def main() -> None:
|
| 34 |
+
args = parse_args()
|
| 35 |
+
|
| 36 |
+
from transformers import (
|
| 37 |
+
AutoConfig,
|
| 38 |
+
AutoProcessor,
|
| 39 |
+
Qwen3OmniMoeForConditionalGeneration,
|
| 40 |
+
Qwen3OmniMoeThinkerConfig,
|
| 41 |
+
Qwen3OmniMoeThinkerForConditionalGeneration,
|
| 42 |
+
)
|
| 43 |
+
|
| 44 |
+
print(f"[1/5] Loading processor from {args.base_model} ...")
|
| 45 |
+
processor = AutoProcessor.from_pretrained(args.base_model, trust_remote_code=True)
|
| 46 |
+
|
| 47 |
+
model_path = Path(args.base_model)
|
| 48 |
+
cfg_path = model_path / "config.json" if model_path.exists() else None
|
| 49 |
+
model_type = None
|
| 50 |
+
if cfg_path and cfg_path.exists():
|
| 51 |
+
with open(cfg_path) as f:
|
| 52 |
+
model_type = json.load(f).get("model_type")
|
| 53 |
+
if not model_type:
|
| 54 |
+
try:
|
| 55 |
+
from huggingface_hub import hf_hub_download
|
| 56 |
+
cached = hf_hub_download(args.base_model, "config.json")
|
| 57 |
+
with open(cached) as f:
|
| 58 |
+
model_type = json.load(f).get("model_type")
|
| 59 |
+
except Exception:
|
| 60 |
+
pass
|
| 61 |
+
print(f" model_type: {model_type}")
|
| 62 |
+
|
| 63 |
+
print(f"[2/5] Loading base model ...")
|
| 64 |
+
if model_type == "qwen3_omni_moe_thinker":
|
| 65 |
+
config = Qwen3OmniMoeThinkerConfig.from_pretrained(args.base_model)
|
| 66 |
+
model = Qwen3OmniMoeThinkerForConditionalGeneration.from_pretrained(
|
| 67 |
+
args.base_model, config=config, torch_dtype=torch.bfloat16, device_map="cpu",
|
| 68 |
+
)
|
| 69 |
+
else:
|
| 70 |
+
config = AutoConfig.from_pretrained(args.base_model, trust_remote_code=True)
|
| 71 |
+
model = Qwen3OmniMoeForConditionalGeneration.from_pretrained(
|
| 72 |
+
args.base_model, config=config, torch_dtype=torch.bfloat16, device_map="cpu",
|
| 73 |
+
)
|
| 74 |
+
|
| 75 |
+
print(f"[3/5] Loading and remapping LoRA adapter: {args.adapter} ...")
|
| 76 |
+
from peft import PeftModel
|
| 77 |
+
from safetensors.torch import load_file, save_file
|
| 78 |
+
|
| 79 |
+
adapter_cfg_path = Path(args.adapter) / "adapter_config.json"
|
| 80 |
+
with open(adapter_cfg_path) as f:
|
| 81 |
+
adapter_cfg = json.load(f)
|
| 82 |
+
target_modules = adapter_cfg.get("target_modules", [])
|
| 83 |
+
needs_remap = (
|
| 84 |
+
any(t.startswith("model.layers.") for t in target_modules)
|
| 85 |
+
and model_type != "qwen3_omni_moe_thinker"
|
| 86 |
+
)
|
| 87 |
+
|
| 88 |
+
adapter_path = args.adapter
|
| 89 |
+
if needs_remap:
|
| 90 |
+
print(" Adapter was trained on thinker-only model; remapping keys...")
|
| 91 |
+
tmp_dir = Path(tempfile.mkdtemp(prefix="adapter_remap_"))
|
| 92 |
+
for fn in Path(args.adapter).iterdir():
|
| 93 |
+
if fn.is_dir():
|
| 94 |
+
continue
|
| 95 |
+
if fn.name == "adapter_config.json":
|
| 96 |
+
new_targets = []
|
| 97 |
+
for t in target_modules:
|
| 98 |
+
if t.startswith("model.layers."):
|
| 99 |
+
new_targets.append("thinker." + t)
|
| 100 |
+
elif t[0].isdigit():
|
| 101 |
+
new_targets.append("thinker.model.layers." + t)
|
| 102 |
+
else:
|
| 103 |
+
new_targets.append(t)
|
| 104 |
+
adapter_cfg["target_modules"] = new_targets
|
| 105 |
+
with open(tmp_dir / "adapter_config.json", "w") as f:
|
| 106 |
+
json.dump(adapter_cfg, f, indent=2)
|
| 107 |
+
elif fn.suffix == ".safetensors" and "adapter" in fn.name:
|
| 108 |
+
tensors = load_file(str(fn))
|
| 109 |
+
remapped = {}
|
| 110 |
+
for k, v in tensors.items():
|
| 111 |
+
if ".model.layers." in k and ".thinker." not in k:
|
| 112 |
+
new_k = k.replace(
|
| 113 |
+
"base_model.model.model.layers.",
|
| 114 |
+
"base_model.model.thinker.model.layers.",
|
| 115 |
+
)
|
| 116 |
+
remapped[new_k] = v
|
| 117 |
+
else:
|
| 118 |
+
remapped[k] = v
|
| 119 |
+
save_file(remapped, str(tmp_dir / fn.name))
|
| 120 |
+
else:
|
| 121 |
+
shutil.copy2(str(fn), str(tmp_dir / fn.name))
|
| 122 |
+
adapter_path = str(tmp_dir)
|
| 123 |
+
|
| 124 |
+
model = PeftModel.from_pretrained(model, adapter_path)
|
| 125 |
+
|
| 126 |
+
print(f"[4/5] Merging and unloading LoRA weights ...")
|
| 127 |
+
model = model.merge_and_unload()
|
| 128 |
+
|
| 129 |
+
out_path = Path(args.output)
|
| 130 |
+
out_path.mkdir(parents=True, exist_ok=True)
|
| 131 |
+
print(f"[5/5] Saving merged model to {out_path} ...")
|
| 132 |
+
model.save_pretrained(out_path, safe_serialization=True)
|
| 133 |
+
processor.save_pretrained(out_path)
|
| 134 |
+
|
| 135 |
+
print(f"\nDone. Merged model saved to: {out_path}")
|
| 136 |
+
|
| 137 |
+
|
| 138 |
+
if __name__ == "__main__":
|
| 139 |
+
main()
|
hf_upload/setup_data.sh
ADDED
|
@@ -0,0 +1,179 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env bash
|
| 2 |
+
# Download all evaluation data to /opt/dlami/nvme (or WORK_ROOT).
|
| 3 |
+
#
|
| 4 |
+
# Downloads:
|
| 5 |
+
# 1) Video-MME benchmark videos
|
| 6 |
+
# 2) LVBench benchmark videos
|
| 7 |
+
# 3) Sync eval: original oops videos, random-shift videos, extracted audio
|
| 8 |
+
# 4) kto_training_data_v2_test.jsonl (sync eval test set, also in repo data/)
|
| 9 |
+
#
|
| 10 |
+
# Requires: conda env already set up (bash setup_env.sh first).
|
| 11 |
+
#
|
| 12 |
+
# Usage:
|
| 13 |
+
# bash setup_data.sh
|
| 14 |
+
# WORK_ROOT=/my/fast/disk bash setup_data.sh
|
| 15 |
+
#
|
| 16 |
+
set -euo pipefail
|
| 17 |
+
|
| 18 |
+
WORK_ROOT="${WORK_ROOT:-/opt/dlami/nvme}"
|
| 19 |
+
CONDA_ENV="${CONDA_ENV:-video}"
|
| 20 |
+
INSTALL_DIR="${INSTALL_DIR:-${HOME}/anaconda3}"
|
| 21 |
+
HF_CACHE="${HF_CACHE:-${HOME}/hf_datasets}"
|
| 22 |
+
REPO_ROOT="$(cd "$(dirname "$0")" && pwd)"
|
| 23 |
+
|
| 24 |
+
# Sync eval paths (align with eval_dpo_sync.py --data-root default)
|
| 25 |
+
DATA_ROOT="${WORK_ROOT}/video_source"
|
| 26 |
+
ORIGINAL_ROOT="${DATA_ROOT}/original"
|
| 27 |
+
ORIGINAL_OOPS_DIR="${ORIGINAL_ROOT}/uag_oops"
|
| 28 |
+
RANDOM_SHIFT_DIR="${DATA_ROOT}/random_shift_video"
|
| 29 |
+
EXTRACTED_AUDIO_DIR="${DATA_ROOT}/extracted_audio"
|
| 30 |
+
|
| 31 |
+
log() { echo -e "\n\033[1;36m>>> $*\033[0m"; }
|
| 32 |
+
|
| 33 |
+
# ── Activate conda ──────────────────────────────────────────────────────────
|
| 34 |
+
if [[ -f "${INSTALL_DIR}/etc/profile.d/conda.sh" ]]; then
|
| 35 |
+
source "${INSTALL_DIR}/etc/profile.d/conda.sh"
|
| 36 |
+
elif command -v conda &>/dev/null; then
|
| 37 |
+
eval "$(conda shell.bash hook)"
|
| 38 |
+
else
|
| 39 |
+
echo "Error: conda not found. Run setup_env.sh first."
|
| 40 |
+
exit 1
|
| 41 |
+
fi
|
| 42 |
+
conda activate "${CONDA_ENV}"
|
| 43 |
+
|
| 44 |
+
# ── Detect HF CLI ───────────────────────────────────────────────────────────
|
| 45 |
+
if command -v hf &>/dev/null; then
|
| 46 |
+
HF_CLI="hf"
|
| 47 |
+
elif command -v huggingface-cli &>/dev/null; then
|
| 48 |
+
HF_CLI="huggingface-cli"
|
| 49 |
+
else
|
| 50 |
+
echo "Error: neither 'hf' nor 'huggingface-cli' found. Run setup_env.sh first."
|
| 51 |
+
exit 1
|
| 52 |
+
fi
|
| 53 |
+
echo "Using HF CLI: ${HF_CLI}"
|
| 54 |
+
|
| 55 |
+
# ── Helpers ─────────────────────────────────────────────────────────────────
|
| 56 |
+
has_mp4_files() { compgen -G "$1/*.mp4" &>/dev/null; }
|
| 57 |
+
has_wav_files() { find "$1" -name "*.wav" -print -quit 2>/dev/null | grep -q .; }
|
| 58 |
+
|
| 59 |
+
flatten_mp4s() {
|
| 60 |
+
local dir="$1"
|
| 61 |
+
shopt -s globstar nullglob
|
| 62 |
+
for src in "${dir}"/**/*.mp4; do
|
| 63 |
+
[[ "$(dirname "$src")" != "${dir}" ]] && mv -n "$src" "${dir}/"
|
| 64 |
+
done
|
| 65 |
+
shopt -u globstar nullglob
|
| 66 |
+
}
|
| 67 |
+
|
| 68 |
+
###############################################################################
|
| 69 |
+
# 1) Video-MME
|
| 70 |
+
###############################################################################
|
| 71 |
+
log "[1/5] Video-MME"
|
| 72 |
+
VIDEOMME_DIR="${WORK_ROOT}/videomme"
|
| 73 |
+
if [[ -d "${VIDEOMME_DIR}/data/data" ]] && has_mp4_files "${VIDEOMME_DIR}/data/data"; then
|
| 74 |
+
echo " Already present at ${VIDEOMME_DIR}/data/data; skipping."
|
| 75 |
+
else
|
| 76 |
+
python "${REPO_ROOT}/scripts/download_videomme.py" --output-dir "${VIDEOMME_DIR}"
|
| 77 |
+
fi
|
| 78 |
+
|
| 79 |
+
###############################################################################
|
| 80 |
+
# 2) LVBench
|
| 81 |
+
###############################################################################
|
| 82 |
+
log "[2/5] LVBench"
|
| 83 |
+
LVBENCH_DIR="${WORK_ROOT}/lvbench"
|
| 84 |
+
if [[ -d "${LVBENCH_DIR}" ]] && has_mp4_files "${LVBENCH_DIR}"; then
|
| 85 |
+
echo " Already present at ${LVBENCH_DIR}; skipping."
|
| 86 |
+
else
|
| 87 |
+
python "${REPO_ROOT}/scripts/download_lvbench.py" --output-dir "${LVBENCH_DIR}"
|
| 88 |
+
fi
|
| 89 |
+
|
| 90 |
+
###############################################################################
|
| 91 |
+
# 3) Original oops videos (hasnat79/ual_bench → original/uag_oops/)
|
| 92 |
+
###############################################################################
|
| 93 |
+
log "[3/5] Original oops videos (sync eval)"
|
| 94 |
+
mkdir -p "${ORIGINAL_ROOT}" "${HF_CACHE}/ual_bench"
|
| 95 |
+
if has_mp4_files "${ORIGINAL_OOPS_DIR}"; then
|
| 96 |
+
echo " Already present at ${ORIGINAL_OOPS_DIR}; skipping."
|
| 97 |
+
else
|
| 98 |
+
"${HF_CLI}" download hasnat79/ual_bench \
|
| 99 |
+
--repo-type dataset \
|
| 100 |
+
--include "uag_oops.tar" \
|
| 101 |
+
--local-dir "${HF_CACHE}/ual_bench"
|
| 102 |
+
tar -xf "${HF_CACHE}/ual_bench/uag_oops.tar" -C "${ORIGINAL_ROOT}"
|
| 103 |
+
fi
|
| 104 |
+
|
| 105 |
+
###############################################################################
|
| 106 |
+
# 4) Random-shift videos (Rakancorle11/random_shift_video → random_shift_video/{delay,early}/)
|
| 107 |
+
###############################################################################
|
| 108 |
+
log "[4/5] Random-shift videos (sync eval)"
|
| 109 |
+
mkdir -p "${RANDOM_SHIFT_DIR}" "${HF_CACHE}/random_shift_video"
|
| 110 |
+
if has_mp4_files "${RANDOM_SHIFT_DIR}/delay" && has_mp4_files "${RANDOM_SHIFT_DIR}/early"; then
|
| 111 |
+
echo " Already present; skipping."
|
| 112 |
+
else
|
| 113 |
+
"${HF_CLI}" download Rakancorle11/random_shift_video \
|
| 114 |
+
--repo-type dataset \
|
| 115 |
+
--local-dir "${HF_CACHE}/random_shift_video"
|
| 116 |
+
|
| 117 |
+
mkdir -p "${RANDOM_SHIFT_DIR}/delay" "${RANDOM_SHIFT_DIR}/early"
|
| 118 |
+
tar -xzf "${HF_CACHE}/random_shift_video/delay.tar.gz" -C "${RANDOM_SHIFT_DIR}/delay"
|
| 119 |
+
tar -xzf "${HF_CACHE}/random_shift_video/early.tar.gz" -C "${RANDOM_SHIFT_DIR}/early"
|
| 120 |
+
|
| 121 |
+
if [[ -f "${HF_CACHE}/random_shift_video/metadata/shift_metadata.jsonl" ]]; then
|
| 122 |
+
cp "${HF_CACHE}/random_shift_video/metadata/shift_metadata.jsonl" "${RANDOM_SHIFT_DIR}/"
|
| 123 |
+
fi
|
| 124 |
+
|
| 125 |
+
flatten_mp4s "${RANDOM_SHIFT_DIR}/delay"
|
| 126 |
+
flatten_mp4s "${RANDOM_SHIFT_DIR}/early"
|
| 127 |
+
fi
|
| 128 |
+
|
| 129 |
+
###############################################################################
|
| 130 |
+
# 5) Extracted audio WAVs (Rakancorle11/extracted_audio → extracted_audio/)
|
| 131 |
+
###############################################################################
|
| 132 |
+
log "[5/5] Extracted audio (sync eval)"
|
| 133 |
+
mkdir -p "${EXTRACTED_AUDIO_DIR}" "${HF_CACHE}/extracted_audio"
|
| 134 |
+
if has_wav_files "${EXTRACTED_AUDIO_DIR}"; then
|
| 135 |
+
echo " Already present; skipping."
|
| 136 |
+
else
|
| 137 |
+
"${HF_CLI}" download Rakancorle11/extracted_audio \
|
| 138 |
+
--repo-type dataset \
|
| 139 |
+
--include "extracted_audio.tar.gz" \
|
| 140 |
+
--local-dir "${HF_CACHE}/extracted_audio"
|
| 141 |
+
tar -xzf "${HF_CACHE}/extracted_audio/extracted_audio.tar.gz" -C "${DATA_ROOT}"
|
| 142 |
+
fi
|
| 143 |
+
|
| 144 |
+
###############################################################################
|
| 145 |
+
# Copy test JSONL into DATA_ROOT (so --test-jsonl can default)
|
| 146 |
+
###############################################################################
|
| 147 |
+
TEST_JSONL_SRC="${REPO_ROOT}/data/kto_training_data_v2_test.jsonl"
|
| 148 |
+
TEST_JSONL_DST="${DATA_ROOT}/kto_training_data_v2_test.jsonl"
|
| 149 |
+
if [[ -f "${TEST_JSONL_SRC}" ]] && [[ ! -f "${TEST_JSONL_DST}" ]]; then
|
| 150 |
+
cp "${TEST_JSONL_SRC}" "${TEST_JSONL_DST}"
|
| 151 |
+
echo "Copied test JSONL → ${TEST_JSONL_DST}"
|
| 152 |
+
fi
|
| 153 |
+
|
| 154 |
+
###############################################################################
|
| 155 |
+
# Summary
|
| 156 |
+
###############################################################################
|
| 157 |
+
echo ""
|
| 158 |
+
echo "========================================"
|
| 159 |
+
echo " Data setup complete"
|
| 160 |
+
echo "========================================"
|
| 161 |
+
echo " Video-MME videos: ${VIDEOMME_DIR}/data/data/"
|
| 162 |
+
echo " LVBench videos: ${LVBENCH_DIR}/"
|
| 163 |
+
echo " Sync originals: ${ORIGINAL_OOPS_DIR}/"
|
| 164 |
+
echo " Sync random_shift: ${RANDOM_SHIFT_DIR}/{delay,early}/"
|
| 165 |
+
echo " Sync extracted_audio: ${EXTRACTED_AUDIO_DIR}/"
|
| 166 |
+
echo " Sync test JSONL: ${TEST_JSONL_DST}"
|
| 167 |
+
echo ""
|
| 168 |
+
echo " original mp4: $(find "${ORIGINAL_OOPS_DIR}" -name '*.mp4' 2>/dev/null | wc -l)"
|
| 169 |
+
echo " random_shift delay: $(find "${RANDOM_SHIFT_DIR}/delay" -name '*.mp4' 2>/dev/null | wc -l)"
|
| 170 |
+
echo " random_shift early: $(find "${RANDOM_SHIFT_DIR}/early" -name '*.mp4' 2>/dev/null | wc -l)"
|
| 171 |
+
echo " extracted_audio wav: $(find "${EXTRACTED_AUDIO_DIR}" -name '*.wav' 2>/dev/null | wc -l)"
|
| 172 |
+
echo ""
|
| 173 |
+
echo "FineVideo pipeline: run these manually (requires Gemini API keys):"
|
| 174 |
+
echo " python code-202604/finevideo/filter_finevideo.py --target-count 1000 --early-stop"
|
| 175 |
+
echo " python code-202604/finevideo/download_finevideo_subset.py --max-videos 500"
|
| 176 |
+
echo " python code-202604/finevideo/extract_audio_finevideo.py"
|
| 177 |
+
echo " python code-202604/finevideo/generate_finevideo_sft.py --api-key YOUR_KEY"
|
| 178 |
+
echo " python code-202604/finevideo/generate_finevideo_dpo.py"
|
| 179 |
+
echo "========================================"
|
hf_upload/setup_env.sh
ADDED
|
@@ -0,0 +1,126 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env bash
|
| 2 |
+
# CleverHans-Evaluation — environment only (no data, no eval).
|
| 3 |
+
#
|
| 4 |
+
# On a machine *without* conda: downloads and installs Anaconda under INSTALL_DIR,
|
| 5 |
+
# then creates CONDA_ENV and pip-installs eval dependencies (aligned with ~/set_up_env.sh).
|
| 6 |
+
#
|
| 7 |
+
# On a machine that already has conda: skips the installer and only creates/updates the env.
|
| 8 |
+
#
|
| 9 |
+
# Usage:
|
| 10 |
+
# bash setup_env.sh
|
| 11 |
+
# INSTALL_DIR=/opt/conda CONDA_ENV=cleverhans bash setup_env.sh
|
| 12 |
+
# CUDA_INDEX_URL=https://download.pytorch.org/whl/cu124 bash setup_env.sh
|
| 13 |
+
#
|
| 14 |
+
set -euo pipefail
|
| 15 |
+
|
| 16 |
+
# ----- Config (override with env vars) -----
|
| 17 |
+
CONDA_ENV="${CONDA_ENV:-video}"
|
| 18 |
+
PYTHON_VER="${PYTHON_VER:-3.12}"
|
| 19 |
+
INSTALL_DIR="${INSTALL_DIR:-${HOME}/anaconda3}"
|
| 20 |
+
INSTALLER_PATH="${INSTALLER_PATH:-${HOME}/anaconda3_installer.sh}"
|
| 21 |
+
|
| 22 |
+
ANACONDA_VERSION="${ANACONDA_VERSION:-Anaconda3-2025.12-2-Linux-x86_64.sh}"
|
| 23 |
+
ANACONDA_URL="${ANACONDA_URL:-https://repo.anaconda.com/archive/${ANACONDA_VERSION}}"
|
| 24 |
+
|
| 25 |
+
CUDA_INDEX_URL="${CUDA_INDEX_URL:-https://download.pytorch.org/whl/cu124}"
|
| 26 |
+
# -------------------------------------------
|
| 27 |
+
|
| 28 |
+
log() { echo "[setup_env] $*"; }
|
| 29 |
+
|
| 30 |
+
log "Checking architecture..."
|
| 31 |
+
ARCH="$(uname -m)"
|
| 32 |
+
if [[ "${ARCH}" != "x86_64" ]]; then
|
| 33 |
+
echo "Error: this Anaconda installer path supports x86_64 only (found ${ARCH})."
|
| 34 |
+
echo "Install conda manually for your arch, then re-run this script."
|
| 35 |
+
exit 1
|
| 36 |
+
fi
|
| 37 |
+
|
| 38 |
+
bootstrap_conda() {
|
| 39 |
+
if command -v conda &>/dev/null; then
|
| 40 |
+
log "conda already on PATH: $(command -v conda)"
|
| 41 |
+
return 0
|
| 42 |
+
fi
|
| 43 |
+
|
| 44 |
+
if [[ -x "${INSTALL_DIR}/bin/conda" ]]; then
|
| 45 |
+
log "Using existing conda at ${INSTALL_DIR}"
|
| 46 |
+
# shellcheck source=/dev/null
|
| 47 |
+
source "${INSTALL_DIR}/etc/profile.d/conda.sh"
|
| 48 |
+
return 0
|
| 49 |
+
fi
|
| 50 |
+
|
| 51 |
+
log "No conda found. Downloading Anaconda installer..."
|
| 52 |
+
if ! command -v wget &>/dev/null; then
|
| 53 |
+
echo "Error: wget not found. Install wget (e.g. apt install wget) and retry."
|
| 54 |
+
exit 1
|
| 55 |
+
fi
|
| 56 |
+
|
| 57 |
+
wget -O "${INSTALLER_PATH}" "${ANACONDA_URL}"
|
| 58 |
+
|
| 59 |
+
log "Installing Anaconda to ${INSTALL_DIR} (batch mode)..."
|
| 60 |
+
bash "${INSTALLER_PATH}" -b -p "${INSTALL_DIR}"
|
| 61 |
+
|
| 62 |
+
# shellcheck source=/dev/null
|
| 63 |
+
source "${INSTALL_DIR}/etc/profile.d/conda.sh"
|
| 64 |
+
|
| 65 |
+
log "Initializing conda for bash (future shells)..."
|
| 66 |
+
"${INSTALL_DIR}/bin/conda" init bash >/dev/null 2>&1 || true
|
| 67 |
+
|
| 68 |
+
log "Anaconda installed. You can remove the installer: rm -f ${INSTALLER_PATH}"
|
| 69 |
+
}
|
| 70 |
+
|
| 71 |
+
bootstrap_conda
|
| 72 |
+
|
| 73 |
+
# Ensure hook is available whether conda came from PATH or fresh install
|
| 74 |
+
if ! command -v conda &>/dev/null; then
|
| 75 |
+
if [[ -f "${INSTALL_DIR}/etc/profile.d/conda.sh" ]]; then
|
| 76 |
+
# shellcheck source=/dev/null
|
| 77 |
+
source "${INSTALL_DIR}/etc/profile.d/conda.sh"
|
| 78 |
+
else
|
| 79 |
+
echo "Error: conda still not available after install."
|
| 80 |
+
exit 1
|
| 81 |
+
fi
|
| 82 |
+
else
|
| 83 |
+
eval "$(conda shell.bash hook)"
|
| 84 |
+
fi
|
| 85 |
+
|
| 86 |
+
log "Accepting Anaconda channel Terms of Service (if supported by this conda)..."
|
| 87 |
+
conda tos accept --override-channels --channel https://repo.anaconda.com/pkgs/main 2>/dev/null || true
|
| 88 |
+
conda tos accept --override-channels --channel https://repo.anaconda.com/pkgs/r 2>/dev/null || true
|
| 89 |
+
|
| 90 |
+
log "Creating conda environment: ${CONDA_ENV} (python=${PYTHON_VER})..."
|
| 91 |
+
if conda env list | awk '{print $1}' | grep -Fxq "${CONDA_ENV}"; then
|
| 92 |
+
log "Environment '${CONDA_ENV}' already exists; activating."
|
| 93 |
+
conda activate "${CONDA_ENV}"
|
| 94 |
+
else
|
| 95 |
+
conda create -n "${CONDA_ENV}" "python=${PYTHON_VER}" -y
|
| 96 |
+
conda activate "${CONDA_ENV}"
|
| 97 |
+
fi
|
| 98 |
+
|
| 99 |
+
log "Installing Python packages for Video-MME / LVBench / sync eval + vLLM..."
|
| 100 |
+
python -m pip install --upgrade pip
|
| 101 |
+
|
| 102 |
+
# GPU PyTorch (adjust CUDA_INDEX_URL if your driver needs another cu12x build)
|
| 103 |
+
pip install torch torchvision torchaudio --index-url "${CUDA_INDEX_URL}"
|
| 104 |
+
|
| 105 |
+
pip install transformers accelerate peft safetensors
|
| 106 |
+
pip install -U "huggingface_hub[cli]"
|
| 107 |
+
pip install datasets tqdm openai
|
| 108 |
+
pip install qwen-omni-utils
|
| 109 |
+
pip install soundfile librosa
|
| 110 |
+
pip install google-genai
|
| 111 |
+
# Helpful for video decode paths used by torch/qwen stack (optional but recommended)
|
| 112 |
+
pip install decord opencv-python-headless
|
| 113 |
+
pip install torchcodec || echo "[setup_env] Warning: torchcodec failed; decode may fall back to torchvision."
|
| 114 |
+
|
| 115 |
+
pip install vllm
|
| 116 |
+
|
| 117 |
+
log "Done."
|
| 118 |
+
echo ""
|
| 119 |
+
echo " Active env: ${CONDA_ENV}"
|
| 120 |
+
echo " Python: $(command -v python)"
|
| 121 |
+
echo " Conda base: ${INSTALL_DIR}"
|
| 122 |
+
echo ""
|
| 123 |
+
echo "Next: conda activate ${CONDA_ENV}"
|
| 124 |
+
echo " Then follow README.md and COMMANDS.md (data download + eval commands)."
|
| 125 |
+
echo ""
|
| 126 |
+
echo "Tip: install system ffmpeg if missing (Ubuntu: sudo apt install -y ffmpeg)."
|
merge_adapter.py
ADDED
|
@@ -0,0 +1,139 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env python3
|
| 2 |
+
"""
|
| 3 |
+
Merge a LoRA adapter into the base Qwen3-Omni model and save full weights.
|
| 4 |
+
|
| 5 |
+
Handles thinker-only adapter key remapping automatically.
|
| 6 |
+
|
| 7 |
+
Usage:
|
| 8 |
+
python merge_adapter.py \
|
| 9 |
+
--base-model Rakancorle11/qwen3omni_full_sft_revised_thinker_key \
|
| 10 |
+
--adapter /opt/dlami/nvme/LlamaFactory/saves/Qwen3-Omni-Instruct/dpo/qwen3omni_dpo_lora_with_audio_v4_data_8632 \
|
| 11 |
+
--output /opt/dlami/nvme/merged_models/dpo_v4_8632
|
| 12 |
+
"""
|
| 13 |
+
|
| 14 |
+
from __future__ import annotations
|
| 15 |
+
|
| 16 |
+
import argparse
|
| 17 |
+
import json
|
| 18 |
+
import shutil
|
| 19 |
+
import tempfile
|
| 20 |
+
from pathlib import Path
|
| 21 |
+
|
| 22 |
+
import torch
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
def parse_args() -> argparse.Namespace:
|
| 26 |
+
p = argparse.ArgumentParser(description="Merge LoRA adapter into base model.")
|
| 27 |
+
p.add_argument("--base-model", type=str, required=True)
|
| 28 |
+
p.add_argument("--adapter", type=str, required=True)
|
| 29 |
+
p.add_argument("--output", type=str, required=True)
|
| 30 |
+
return p.parse_args()
|
| 31 |
+
|
| 32 |
+
|
| 33 |
+
def main() -> None:
|
| 34 |
+
args = parse_args()
|
| 35 |
+
|
| 36 |
+
from transformers import (
|
| 37 |
+
AutoConfig,
|
| 38 |
+
AutoProcessor,
|
| 39 |
+
Qwen3OmniMoeForConditionalGeneration,
|
| 40 |
+
Qwen3OmniMoeThinkerConfig,
|
| 41 |
+
Qwen3OmniMoeThinkerForConditionalGeneration,
|
| 42 |
+
)
|
| 43 |
+
|
| 44 |
+
print(f"[1/5] Loading processor from {args.base_model} ...")
|
| 45 |
+
processor = AutoProcessor.from_pretrained(args.base_model, trust_remote_code=True)
|
| 46 |
+
|
| 47 |
+
model_path = Path(args.base_model)
|
| 48 |
+
cfg_path = model_path / "config.json" if model_path.exists() else None
|
| 49 |
+
model_type = None
|
| 50 |
+
if cfg_path and cfg_path.exists():
|
| 51 |
+
with open(cfg_path) as f:
|
| 52 |
+
model_type = json.load(f).get("model_type")
|
| 53 |
+
if not model_type:
|
| 54 |
+
try:
|
| 55 |
+
from huggingface_hub import hf_hub_download
|
| 56 |
+
cached = hf_hub_download(args.base_model, "config.json")
|
| 57 |
+
with open(cached) as f:
|
| 58 |
+
model_type = json.load(f).get("model_type")
|
| 59 |
+
except Exception:
|
| 60 |
+
pass
|
| 61 |
+
print(f" model_type: {model_type}")
|
| 62 |
+
|
| 63 |
+
print(f"[2/5] Loading base model ...")
|
| 64 |
+
if model_type == "qwen3_omni_moe_thinker":
|
| 65 |
+
config = Qwen3OmniMoeThinkerConfig.from_pretrained(args.base_model)
|
| 66 |
+
model = Qwen3OmniMoeThinkerForConditionalGeneration.from_pretrained(
|
| 67 |
+
args.base_model, config=config, torch_dtype=torch.bfloat16, device_map="cpu",
|
| 68 |
+
)
|
| 69 |
+
else:
|
| 70 |
+
config = AutoConfig.from_pretrained(args.base_model, trust_remote_code=True)
|
| 71 |
+
model = Qwen3OmniMoeForConditionalGeneration.from_pretrained(
|
| 72 |
+
args.base_model, config=config, torch_dtype=torch.bfloat16, device_map="cpu",
|
| 73 |
+
)
|
| 74 |
+
|
| 75 |
+
print(f"[3/5] Loading and remapping LoRA adapter: {args.adapter} ...")
|
| 76 |
+
from peft import PeftModel
|
| 77 |
+
from safetensors.torch import load_file, save_file
|
| 78 |
+
|
| 79 |
+
adapter_cfg_path = Path(args.adapter) / "adapter_config.json"
|
| 80 |
+
with open(adapter_cfg_path) as f:
|
| 81 |
+
adapter_cfg = json.load(f)
|
| 82 |
+
target_modules = adapter_cfg.get("target_modules", [])
|
| 83 |
+
needs_remap = (
|
| 84 |
+
any(t.startswith("model.layers.") for t in target_modules)
|
| 85 |
+
and model_type != "qwen3_omni_moe_thinker"
|
| 86 |
+
)
|
| 87 |
+
|
| 88 |
+
adapter_path = args.adapter
|
| 89 |
+
if needs_remap:
|
| 90 |
+
print(" Adapter was trained on thinker-only model; remapping keys...")
|
| 91 |
+
tmp_dir = Path(tempfile.mkdtemp(prefix="adapter_remap_"))
|
| 92 |
+
for fn in Path(args.adapter).iterdir():
|
| 93 |
+
if fn.is_dir():
|
| 94 |
+
continue
|
| 95 |
+
if fn.name == "adapter_config.json":
|
| 96 |
+
new_targets = []
|
| 97 |
+
for t in target_modules:
|
| 98 |
+
if t.startswith("model.layers."):
|
| 99 |
+
new_targets.append("thinker." + t)
|
| 100 |
+
elif t[0].isdigit():
|
| 101 |
+
new_targets.append("thinker.model.layers." + t)
|
| 102 |
+
else:
|
| 103 |
+
new_targets.append(t)
|
| 104 |
+
adapter_cfg["target_modules"] = new_targets
|
| 105 |
+
with open(tmp_dir / "adapter_config.json", "w") as f:
|
| 106 |
+
json.dump(adapter_cfg, f, indent=2)
|
| 107 |
+
elif fn.suffix == ".safetensors" and "adapter" in fn.name:
|
| 108 |
+
tensors = load_file(str(fn))
|
| 109 |
+
remapped = {}
|
| 110 |
+
for k, v in tensors.items():
|
| 111 |
+
if ".model.layers." in k and ".thinker." not in k:
|
| 112 |
+
new_k = k.replace(
|
| 113 |
+
"base_model.model.model.layers.",
|
| 114 |
+
"base_model.model.thinker.model.layers.",
|
| 115 |
+
)
|
| 116 |
+
remapped[new_k] = v
|
| 117 |
+
else:
|
| 118 |
+
remapped[k] = v
|
| 119 |
+
save_file(remapped, str(tmp_dir / fn.name))
|
| 120 |
+
else:
|
| 121 |
+
shutil.copy2(str(fn), str(tmp_dir / fn.name))
|
| 122 |
+
adapter_path = str(tmp_dir)
|
| 123 |
+
|
| 124 |
+
model = PeftModel.from_pretrained(model, adapter_path)
|
| 125 |
+
|
| 126 |
+
print(f"[4/5] Merging and unloading LoRA weights ...")
|
| 127 |
+
model = model.merge_and_unload()
|
| 128 |
+
|
| 129 |
+
out_path = Path(args.output)
|
| 130 |
+
out_path.mkdir(parents=True, exist_ok=True)
|
| 131 |
+
print(f"[5/5] Saving merged model to {out_path} ...")
|
| 132 |
+
model.save_pretrained(out_path, safe_serialization=True)
|
| 133 |
+
processor.save_pretrained(out_path)
|
| 134 |
+
|
| 135 |
+
print(f"\nDone. Merged model saved to: {out_path}")
|
| 136 |
+
|
| 137 |
+
|
| 138 |
+
if __name__ == "__main__":
|
| 139 |
+
main()
|
run_qwen3omni_sync_diag_3prompts.sh
ADDED
|
@@ -0,0 +1,94 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env bash
|
| 2 |
+
set -euo pipefail
|
| 3 |
+
|
| 4 |
+
# Usage:
|
| 5 |
+
# bash /home/ubuntu/run_qwen_sync_diagnostics.sh \
|
| 6 |
+
# "/home/ubuntu/data_pair/34-5" \
|
| 7 |
+
# "34 Funny Kid Nominees - FailArmy Hall Of Fame (May 2017)5"
|
| 8 |
+
#
|
| 9 |
+
# It expects these 3 files in the folder:
|
| 10 |
+
# <base>.mp4
|
| 11 |
+
# <base>_early2s.mp4
|
| 12 |
+
# <base>_delay2s.mp4
|
| 13 |
+
|
| 14 |
+
TARGET_DIR="${1:-/home/ubuntu/data_pair/34-5}"
|
| 15 |
+
BASE_NAME="${2:-34 Funny Kid Nominees - FailArmy Hall Of Fame (May 2017)5}"
|
| 16 |
+
OUT_DIR="${TARGET_DIR}/results_sync_eval"
|
| 17 |
+
|
| 18 |
+
ORIG="${TARGET_DIR}/${BASE_NAME}.mp4"
|
| 19 |
+
EARLY="${TARGET_DIR}/${BASE_NAME}_early2s.mp4"
|
| 20 |
+
DELAY="${TARGET_DIR}/${BASE_NAME}_delay2s.mp4"
|
| 21 |
+
|
| 22 |
+
mkdir -p "${OUT_DIR}"
|
| 23 |
+
|
| 24 |
+
for f in "${ORIG}" "${EARLY}" "${DELAY}"; do
|
| 25 |
+
if [[ ! -f "${f}" ]]; then
|
| 26 |
+
echo "Missing file: ${f}"
|
| 27 |
+
exit 1
|
| 28 |
+
fi
|
| 29 |
+
done
|
| 30 |
+
|
| 31 |
+
read -r -d '' PROMPT_1 <<'EOF' || true
|
| 32 |
+
You are evaluating audio-video temporal consistency.
|
| 33 |
+
Task:
|
| 34 |
+
1) Briefly describe key visual events in time order.
|
| 35 |
+
2) Identify the most salient sound event and when it occurs.
|
| 36 |
+
3) Judge whether audio is synchronized with visuals.
|
| 37 |
+
4) If not synchronized, estimate signed offset in seconds:
|
| 38 |
+
- Positive offset => audio lags behind video.
|
| 39 |
+
- Negative offset => audio leads video.
|
| 40 |
+
Return strictly in this format:
|
| 41 |
+
SYNC_JUDGMENT: <synced|not_synced|uncertain>
|
| 42 |
+
ESTIMATED_OFFSET_SECONDS: <number or unknown>
|
| 43 |
+
EVIDENCE: <short explanation>
|
| 44 |
+
EOF
|
| 45 |
+
|
| 46 |
+
read -r -d '' PROMPT_2 <<'EOF' || true
|
| 47 |
+
Check whether this video looks manipulated in audio timing.
|
| 48 |
+
Focus on impact moments (falls, hits, collisions) and whether corresponding sounds align.
|
| 49 |
+
Return strictly:
|
| 50 |
+
MANIPULATION_SUSPECTED: <yes|no|uncertain>
|
| 51 |
+
LIKELY_TYPE: <audio_early|audio_delay|none|unknown>
|
| 52 |
+
ESTIMATED_SHIFT_SECONDS: <number or unknown>
|
| 53 |
+
CONFIDENCE: <0-100>
|
| 54 |
+
REASON: <one short paragraph>
|
| 55 |
+
EOF
|
| 56 |
+
|
| 57 |
+
read -r -d '' PROMPT_3 <<'EOF' || true
|
| 58 |
+
Evaluate the audio-visual synchronization of this video. Choose ONLY ONE of the following options and provide a one-sentence justification:
|
| 59 |
+
[A] Normal: Audio and video are perfectly synchronized.
|
| 60 |
+
[B] Early Audio: The sound of the main event happens BEFORE the visual action.
|
| 61 |
+
[C] Delayed Audio: The sound of the main event happens AFTER the visual action.
|
| 62 |
+
|
| 63 |
+
Your choice:
|
| 64 |
+
EOF
|
| 65 |
+
|
| 66 |
+
run_one() {
|
| 67 |
+
local label="$1"
|
| 68 |
+
local video="$2"
|
| 69 |
+
local prompt_id="$3"
|
| 70 |
+
local prompt_text="$4"
|
| 71 |
+
local out_file="${OUT_DIR}/${label}_prompt${prompt_id}.txt"
|
| 72 |
+
|
| 73 |
+
python /home/ubuntu/test_qwen3omni_audio_video.py \
|
| 74 |
+
--video "${video}" \
|
| 75 |
+
--cases av \
|
| 76 |
+
--prompt "${prompt_text}" \
|
| 77 |
+
| tee "${out_file}"
|
| 78 |
+
}
|
| 79 |
+
|
| 80 |
+
echo "Running sync diagnostics into: ${OUT_DIR}"
|
| 81 |
+
|
| 82 |
+
run_one "original" "${ORIG}" "1" "${PROMPT_1}"
|
| 83 |
+
run_one "early2s" "${EARLY}" "1" "${PROMPT_1}"
|
| 84 |
+
run_one "delay2s" "${DELAY}" "1" "${PROMPT_1}"
|
| 85 |
+
|
| 86 |
+
run_one "original" "${ORIG}" "2" "${PROMPT_2}"
|
| 87 |
+
run_one "early2s" "${EARLY}" "2" "${PROMPT_2}"
|
| 88 |
+
run_one "delay2s" "${DELAY}" "2" "${PROMPT_2}"
|
| 89 |
+
|
| 90 |
+
run_one "original" "${ORIG}" "3" "${PROMPT_3}"
|
| 91 |
+
run_one "early2s" "${EARLY}" "3" "${PROMPT_3}"
|
| 92 |
+
run_one "delay2s" "${DELAY}" "3" "${PROMPT_3}"
|
| 93 |
+
|
| 94 |
+
echo "Done. Results in: ${OUT_DIR}"
|
sft_model_convert_and_onpolicy_dpo/convert_thinker_to_full.py
ADDED
|
@@ -0,0 +1,189 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env python3
|
| 2 |
+
"""
|
| 3 |
+
Convert a LlamaFactory SFT thinker-only checkpoint back into a full
|
| 4 |
+
Qwen3OmniMoeForConditionalGeneration checkpoint that AutoModel, vLLM,
|
| 5 |
+
and other tools can load directly.
|
| 6 |
+
|
| 7 |
+
What it does:
|
| 8 |
+
1. Reads SFT thinker weights (keys like model.layers.xxx)
|
| 9 |
+
2. Adds 'thinker.' prefix to every key
|
| 10 |
+
3. Copies talker/code2wav weights from the vanilla model
|
| 11 |
+
4. Uses vanilla config/tokenizer/processor (model_type: qwen3_omni_moe)
|
| 12 |
+
5. Writes everything to a new output directory
|
| 13 |
+
|
| 14 |
+
The original SFT checkpoint is NEVER modified.
|
| 15 |
+
|
| 16 |
+
Usage:
|
| 17 |
+
python convert_thinker_to_full.py
|
| 18 |
+
python convert_thinker_to_full.py --output /path/to/output
|
| 19 |
+
"""
|
| 20 |
+
|
| 21 |
+
import argparse
|
| 22 |
+
import json
|
| 23 |
+
import os
|
| 24 |
+
import shutil
|
| 25 |
+
from collections import OrderedDict
|
| 26 |
+
from pathlib import Path
|
| 27 |
+
|
| 28 |
+
VANILLA = Path("/home/ubuntu/.cache/huggingface/hub/models--Qwen--Qwen3-Omni-30B-A3B-Instruct/snapshots/26291f793822fb6be9555850f06dfe95f2d7e695")
|
| 29 |
+
SFT_THINKER = Path("/opt/dlami/nvme/qwen3omni_sft_thinker_backup")
|
| 30 |
+
DEFAULT_OUTPUT = Path("/opt/dlami/nvme/qwen3omni_full_sft_converted")
|
| 31 |
+
|
| 32 |
+
|
| 33 |
+
def main():
|
| 34 |
+
parser = argparse.ArgumentParser(description="Convert thinker checkpoint to full Qwen3OmniMoe")
|
| 35 |
+
parser.add_argument("--sft", type=Path, default=SFT_THINKER, help="SFT thinker checkpoint dir")
|
| 36 |
+
parser.add_argument("--vanilla", type=Path, default=VANILLA, help="Vanilla full model dir")
|
| 37 |
+
parser.add_argument("--output", type=Path, default=DEFAULT_OUTPUT, help="Output directory")
|
| 38 |
+
args = parser.parse_args()
|
| 39 |
+
|
| 40 |
+
from safetensors import safe_open
|
| 41 |
+
from safetensors.torch import save_file
|
| 42 |
+
|
| 43 |
+
out = args.output
|
| 44 |
+
out.mkdir(parents=True, exist_ok=True)
|
| 45 |
+
print(f"SFT thinker: {args.sft}")
|
| 46 |
+
print(f"Vanilla: {args.vanilla}")
|
| 47 |
+
print(f"Output: {out}")
|
| 48 |
+
|
| 49 |
+
# --- Step 1: Copy config/tokenizer/processor from vanilla ---
|
| 50 |
+
config_files = [
|
| 51 |
+
"config.json", "generation_config.json", "tokenizer.json",
|
| 52 |
+
"tokenizer_config.json", "processor_config.json",
|
| 53 |
+
"chat_template.jinja", "preprocessor_config.json",
|
| 54 |
+
]
|
| 55 |
+
for fname in config_files:
|
| 56 |
+
src = args.vanilla / fname
|
| 57 |
+
if src.exists():
|
| 58 |
+
shutil.copy2(src, out / fname)
|
| 59 |
+
print(f" Copied config: {fname}")
|
| 60 |
+
|
| 61 |
+
# --- Step 2: Load index files ---
|
| 62 |
+
sft_index = json.load(open(args.sft / "model.safetensors.index.json"))
|
| 63 |
+
vanilla_index = json.load(open(args.vanilla / "model.safetensors.index.json"))
|
| 64 |
+
|
| 65 |
+
sft_wm = sft_index["weight_map"]
|
| 66 |
+
vanilla_wm = vanilla_index["weight_map"]
|
| 67 |
+
|
| 68 |
+
sft_shards = sorted(set(sft_wm.values()))
|
| 69 |
+
vanilla_shards = sorted(set(vanilla_wm.values()))
|
| 70 |
+
|
| 71 |
+
print(f"\n SFT: {len(sft_wm)} keys in {len(sft_shards)} shards")
|
| 72 |
+
print(f" Vanilla: {len(vanilla_wm)} keys in {len(vanilla_shards)} shards")
|
| 73 |
+
|
| 74 |
+
# --- Step 3: Write SFT weights with thinker. prefix ---
|
| 75 |
+
new_weight_map = OrderedDict()
|
| 76 |
+
shard_counter = 1
|
| 77 |
+
|
| 78 |
+
for sft_shard in sft_shards:
|
| 79 |
+
new_shard_name = f"model-{shard_counter:05d}-of-99999.safetensors"
|
| 80 |
+
print(f"\n Processing SFT shard: {sft_shard} -> {new_shard_name}")
|
| 81 |
+
|
| 82 |
+
tensors = OrderedDict()
|
| 83 |
+
with safe_open(args.sft / sft_shard, framework="pt", device="cpu") as f:
|
| 84 |
+
for key in f.keys():
|
| 85 |
+
new_key = f"thinker.{key}"
|
| 86 |
+
tensors[new_key] = f.get_tensor(key)
|
| 87 |
+
new_weight_map[new_key] = new_shard_name
|
| 88 |
+
|
| 89 |
+
save_file(tensors, out / new_shard_name)
|
| 90 |
+
size_gb = os.path.getsize(out / new_shard_name) / 1e9
|
| 91 |
+
print(f" Wrote {len(tensors)} tensors ({size_gb:.1f} GB)")
|
| 92 |
+
del tensors
|
| 93 |
+
shard_counter += 1
|
| 94 |
+
|
| 95 |
+
# --- Step 4: Copy non-thinker weights from vanilla ---
|
| 96 |
+
non_thinker_keys = {k: v for k, v in vanilla_wm.items() if not k.startswith("thinker.")}
|
| 97 |
+
non_thinker_shards = sorted(set(non_thinker_keys.values()))
|
| 98 |
+
|
| 99 |
+
for vanilla_shard in non_thinker_shards:
|
| 100 |
+
keys_in_shard = [k for k, v in vanilla_wm.items() if v == vanilla_shard]
|
| 101 |
+
nt_keys = [k for k in keys_in_shard if not k.startswith("thinker.")]
|
| 102 |
+
t_keys = [k for k in keys_in_shard if k.startswith("thinker.")]
|
| 103 |
+
|
| 104 |
+
new_shard_name = f"model-{shard_counter:05d}-of-99999.safetensors"
|
| 105 |
+
|
| 106 |
+
if t_keys:
|
| 107 |
+
# Mixed shard: extract only non-thinker tensors
|
| 108 |
+
print(f"\n Extracting non-thinker from mixed shard: {vanilla_shard} -> {new_shard_name}")
|
| 109 |
+
tensors = OrderedDict()
|
| 110 |
+
with safe_open(args.vanilla / vanilla_shard, framework="pt", device="cpu") as f:
|
| 111 |
+
for key in nt_keys:
|
| 112 |
+
tensors[key] = f.get_tensor(key)
|
| 113 |
+
new_weight_map[key] = new_shard_name
|
| 114 |
+
save_file(tensors, out / new_shard_name)
|
| 115 |
+
size_gb = os.path.getsize(out / new_shard_name) / 1e9
|
| 116 |
+
print(f" Wrote {len(tensors)} tensors ({size_gb:.1f} GB)")
|
| 117 |
+
del tensors
|
| 118 |
+
else:
|
| 119 |
+
# Pure non-thinker shard: symlink
|
| 120 |
+
print(f"\n Symlinking non-thinker shard: {vanilla_shard} -> {new_shard_name}")
|
| 121 |
+
os.symlink(args.vanilla / vanilla_shard, out / new_shard_name)
|
| 122 |
+
for key in nt_keys:
|
| 123 |
+
new_weight_map[key] = new_shard_name
|
| 124 |
+
|
| 125 |
+
shard_counter += 1
|
| 126 |
+
|
| 127 |
+
# --- Step 5: Fix shard count in filenames ---
|
| 128 |
+
total_shards = shard_counter - 1
|
| 129 |
+
final_weight_map = OrderedDict()
|
| 130 |
+
for key, shard_name in new_weight_map.items():
|
| 131 |
+
old_num = int(shard_name.split("-")[1])
|
| 132 |
+
new_name = f"model-{old_num:05d}-of-{total_shards:05d}.safetensors"
|
| 133 |
+
final_weight_map[key] = new_name
|
| 134 |
+
|
| 135 |
+
# Rename files
|
| 136 |
+
for i in range(1, total_shards + 1):
|
| 137 |
+
old_name = f"model-{i:05d}-of-99999.safetensors"
|
| 138 |
+
new_name = f"model-{i:05d}-of-{total_shards:05d}.safetensors"
|
| 139 |
+
old_path = out / old_name
|
| 140 |
+
new_path = out / new_name
|
| 141 |
+
if old_path.exists() or old_path.is_symlink():
|
| 142 |
+
old_path.rename(new_path)
|
| 143 |
+
|
| 144 |
+
# --- Step 6: Write index ---
|
| 145 |
+
new_index = {
|
| 146 |
+
"metadata": {"total_size": sum(
|
| 147 |
+
os.path.getsize(out / f) if not os.path.islink(out / f)
|
| 148 |
+
else os.path.getsize(os.readlink(out / f))
|
| 149 |
+
for f in set(final_weight_map.values())
|
| 150 |
+
)},
|
| 151 |
+
"weight_map": final_weight_map,
|
| 152 |
+
}
|
| 153 |
+
with open(out / "model.safetensors.index.json", "w") as f:
|
| 154 |
+
json.dump(new_index, f, indent=2)
|
| 155 |
+
|
| 156 |
+
print(f"\n Index: {len(final_weight_map)} keys across {total_shards} shards")
|
| 157 |
+
|
| 158 |
+
# --- Step 7: Verify key counts ---
|
| 159 |
+
thinker_count = sum(1 for k in final_weight_map if k.startswith("thinker."))
|
| 160 |
+
talker_count = sum(1 for k in final_weight_map if k.startswith("talker."))
|
| 161 |
+
code2wav_count = sum(1 for k in final_weight_map if k.startswith("code2wav."))
|
| 162 |
+
print(f" thinker: {thinker_count} talker: {talker_count} code2wav: {code2wav_count}")
|
| 163 |
+
|
| 164 |
+
vanilla_thinker = sum(1 for k in vanilla_wm if k.startswith("thinker."))
|
| 165 |
+
vanilla_talker = sum(1 for k in vanilla_wm if k.startswith("talker."))
|
| 166 |
+
vanilla_code2wav = sum(1 for k in vanilla_wm if k.startswith("code2wav."))
|
| 167 |
+
|
| 168 |
+
ok = True
|
| 169 |
+
if thinker_count != vanilla_thinker:
|
| 170 |
+
print(f" WARNING: thinker count mismatch! expected {vanilla_thinker}, got {thinker_count}")
|
| 171 |
+
ok = False
|
| 172 |
+
if talker_count != vanilla_talker:
|
| 173 |
+
print(f" WARNING: talker count mismatch! expected {vanilla_talker}, got {talker_count}")
|
| 174 |
+
ok = False
|
| 175 |
+
if code2wav_count != vanilla_code2wav:
|
| 176 |
+
print(f" WARNING: code2wav count mismatch! expected {vanilla_code2wav}, got {code2wav_count}")
|
| 177 |
+
ok = False
|
| 178 |
+
|
| 179 |
+
if ok:
|
| 180 |
+
print("\n All key counts match vanilla. Conversion successful!")
|
| 181 |
+
else:
|
| 182 |
+
print("\n KEY COUNT MISMATCH - check output carefully!")
|
| 183 |
+
|
| 184 |
+
print(f"\nOutput directory: {out}")
|
| 185 |
+
print("Done.")
|
| 186 |
+
|
| 187 |
+
|
| 188 |
+
if __name__ == "__main__":
|
| 189 |
+
main()
|
sft_model_convert_and_onpolicy_dpo/generate_onpolicy_dpo.py
ADDED
|
@@ -0,0 +1,422 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env python3
|
| 2 |
+
"""
|
| 3 |
+
On-policy DPO data augmentation pipeline.
|
| 4 |
+
|
| 5 |
+
Three stages:
|
| 6 |
+
Stage 1: Load SFT-trained Qwen3-Omni, run inference on training videos
|
| 7 |
+
to collect model responses. Output: raw_responses.jsonl
|
| 8 |
+
Stage 2: Use GPT-5.4 as judge to evaluate each model response against
|
| 9 |
+
ground truth label (synced / delay Xs / early Xs).
|
| 10 |
+
Output: judged_responses.jsonl
|
| 11 |
+
Stage 3: Filter wrong responses as rejected, pair with ground truth chosen,
|
| 12 |
+
merge with existing DPO data. Output: dpo_video_train_onpolicy.json
|
| 13 |
+
|
| 14 |
+
Usage:
|
| 15 |
+
python generate_onpolicy_dpo.py --stage 1 # run inference (GPU needed)
|
| 16 |
+
python generate_onpolicy_dpo.py --stage 2 # run LLM judge (OPENAI_API_KEY needed)
|
| 17 |
+
python generate_onpolicy_dpo.py --stage 3 # filter & merge
|
| 18 |
+
python generate_onpolicy_dpo.py --stage all # run all stages
|
| 19 |
+
"""
|
| 20 |
+
|
| 21 |
+
from __future__ import annotations
|
| 22 |
+
|
| 23 |
+
import argparse
|
| 24 |
+
import gc
|
| 25 |
+
import json
|
| 26 |
+
import logging
|
| 27 |
+
import os
|
| 28 |
+
import re
|
| 29 |
+
import time
|
| 30 |
+
import traceback
|
| 31 |
+
from pathlib import Path
|
| 32 |
+
|
| 33 |
+
import torch
|
| 34 |
+
from tqdm import tqdm
|
| 35 |
+
|
| 36 |
+
LOG = logging.getLogger("onpolicy_dpo")
|
| 37 |
+
|
| 38 |
+
# ── Paths ──
|
| 39 |
+
DATA_ROOT = Path("/home/ubuntu/video_source")
|
| 40 |
+
ORIGINAL_ROOT = DATA_ROOT / "original"
|
| 41 |
+
INPUT_JSONL = DATA_ROOT / "kto_training_data_v2_train.jsonl"
|
| 42 |
+
EXISTING_DPO = Path("/home/ubuntu/LlamaFactory/data/dpo_video_train_with_audio.json")
|
| 43 |
+
RAW_RESPONSES = DATA_ROOT / "onpolicy_raw_responses.jsonl"
|
| 44 |
+
JUDGED_RESPONSES = DATA_ROOT / "onpolicy_judged_responses.jsonl"
|
| 45 |
+
OUTPUT_DPO = Path("/home/ubuntu/LlamaFactory/data/dpo_video_train_onpolicy.json")
|
| 46 |
+
EXTRACTED_AUDIO_ROOT = DATA_ROOT / "extracted_audio"
|
| 47 |
+
|
| 48 |
+
VANILLA_MODEL = "Qwen/Qwen3-Omni-30B-A3B-Instruct"
|
| 49 |
+
SFT_MODEL_PATH = "/opt/dlami/nvme/qwen3omni_full_sft_converted"
|
| 50 |
+
|
| 51 |
+
|
| 52 |
+
def resolve_video_path(video_field: str) -> str:
|
| 53 |
+
if os.path.isabs(video_field):
|
| 54 |
+
return video_field
|
| 55 |
+
return str(ORIGINAL_ROOT / video_field)
|
| 56 |
+
|
| 57 |
+
|
| 58 |
+
def get_audio_path(video_path: str) -> str:
|
| 59 |
+
rel = os.path.relpath(video_path, str(DATA_ROOT))
|
| 60 |
+
return str(EXTRACTED_AUDIO_ROOT / (os.path.splitext(rel)[0] + ".wav"))
|
| 61 |
+
|
| 62 |
+
|
| 63 |
+
def parse_ground_truth_label(video_path: str) -> dict:
|
| 64 |
+
if "/original/" in video_path:
|
| 65 |
+
return {"is_synced": True, "direction": None, "offset_sec": 0.0}
|
| 66 |
+
m = re.search(r"_(delay|early)_([\d.]+)s\.", video_path)
|
| 67 |
+
if m:
|
| 68 |
+
return {"is_synced": False, "direction": m.group(1), "offset_sec": float(m.group(2))}
|
| 69 |
+
return {"is_synced": True, "direction": None, "offset_sec": 0.0}
|
| 70 |
+
|
| 71 |
+
|
| 72 |
+
def read_input_data() -> list[dict]:
|
| 73 |
+
data = []
|
| 74 |
+
with open(INPUT_JSONL) as f:
|
| 75 |
+
for line in f:
|
| 76 |
+
line = line.strip()
|
| 77 |
+
if not line:
|
| 78 |
+
continue
|
| 79 |
+
obj = json.loads(line)
|
| 80 |
+
obj["video"] = resolve_video_path(obj["video"])
|
| 81 |
+
data.append(obj)
|
| 82 |
+
return data
|
| 83 |
+
|
| 84 |
+
|
| 85 |
+
# ═══════════════════════════════════════════════════════════════
|
| 86 |
+
# Stage 1: Inference
|
| 87 |
+
# ═══════════════════════════════════════════════════════════════
|
| 88 |
+
|
| 89 |
+
def load_model(model_choice: str):
|
| 90 |
+
"""Returns (model, processor).
|
| 91 |
+
|
| 92 |
+
Both vanilla and SFT are full Qwen3OmniMoeForConditionalGeneration
|
| 93 |
+
(SFT was converted from thinker-only via convert_thinker_to_full.py).
|
| 94 |
+
"""
|
| 95 |
+
from transformers import AutoProcessor, Qwen3OmniMoeForConditionalGeneration
|
| 96 |
+
|
| 97 |
+
model_path = VANILLA_MODEL if model_choice == "vanilla" else SFT_MODEL_PATH
|
| 98 |
+
LOG.info("Loading model: %s", model_path)
|
| 99 |
+
processor = AutoProcessor.from_pretrained(model_path)
|
| 100 |
+
model = Qwen3OmniMoeForConditionalGeneration.from_pretrained(
|
| 101 |
+
model_path, torch_dtype=torch.bfloat16, device_map="auto",
|
| 102 |
+
)
|
| 103 |
+
model.eval()
|
| 104 |
+
return model, processor
|
| 105 |
+
|
| 106 |
+
|
| 107 |
+
def run_inference(model, processor,
|
| 108 |
+
video_path: str, audio_path: str, question: str,
|
| 109 |
+
temperature: float, max_new_tokens: int) -> str:
|
| 110 |
+
"""Single-sample inference using full Qwen3OmniMoeForConditionalGeneration."""
|
| 111 |
+
from qwen_omni_utils import process_mm_info
|
| 112 |
+
|
| 113 |
+
messages = [
|
| 114 |
+
{
|
| 115 |
+
"role": "user",
|
| 116 |
+
"content": [
|
| 117 |
+
{"type": "video", "video": video_path},
|
| 118 |
+
{"type": "audio", "audio": audio_path},
|
| 119 |
+
{"type": "text", "text": question},
|
| 120 |
+
],
|
| 121 |
+
},
|
| 122 |
+
]
|
| 123 |
+
|
| 124 |
+
text = processor.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
|
| 125 |
+
audios, images, videos = process_mm_info(messages, use_audio_in_video=False)
|
| 126 |
+
inputs = processor(
|
| 127 |
+
text=[text], audio=audios, images=images, videos=videos,
|
| 128 |
+
return_tensors="pt", padding=True,
|
| 129 |
+
)
|
| 130 |
+
|
| 131 |
+
model_dtype = next(model.parameters()).dtype
|
| 132 |
+
for k, v in list(inputs.items()):
|
| 133 |
+
if hasattr(v, "to"):
|
| 134 |
+
v = v.to(model.device)
|
| 135 |
+
if torch.is_floating_point(v):
|
| 136 |
+
v = v.to(model_dtype)
|
| 137 |
+
inputs[k] = v
|
| 138 |
+
|
| 139 |
+
do_sample = temperature > 0
|
| 140 |
+
gen_kwargs = {
|
| 141 |
+
"thinker_max_new_tokens": max_new_tokens,
|
| 142 |
+
"return_audio": False,
|
| 143 |
+
"use_audio_in_video": False,
|
| 144 |
+
"do_sample": do_sample,
|
| 145 |
+
}
|
| 146 |
+
if do_sample:
|
| 147 |
+
gen_kwargs["temperature"] = temperature
|
| 148 |
+
gen_kwargs["top_p"] = 0.9
|
| 149 |
+
|
| 150 |
+
with torch.inference_mode():
|
| 151 |
+
result = model.generate(**inputs, **gen_kwargs)
|
| 152 |
+
|
| 153 |
+
output_ids = result[0] if isinstance(result, tuple) else result
|
| 154 |
+
prompt_len = inputs["input_ids"].shape[1]
|
| 155 |
+
return processor.batch_decode(
|
| 156 |
+
output_ids[:, prompt_len:], skip_special_tokens=True
|
| 157 |
+
)[0].strip()
|
| 158 |
+
|
| 159 |
+
|
| 160 |
+
def stage1_inference(args):
|
| 161 |
+
LOG.info("=== Stage 1: Inference ===")
|
| 162 |
+
data = read_input_data()
|
| 163 |
+
LOG.info("Loaded %d samples from %s", len(data), INPUT_JSONL)
|
| 164 |
+
|
| 165 |
+
model, processor = load_model(args.model_choice)
|
| 166 |
+
LOG.info("Model loaded.")
|
| 167 |
+
|
| 168 |
+
processed = set()
|
| 169 |
+
processed_count = 0
|
| 170 |
+
if RAW_RESPONSES.exists():
|
| 171 |
+
with open(RAW_RESPONSES) as f:
|
| 172 |
+
for line in f:
|
| 173 |
+
obj = json.loads(line)
|
| 174 |
+
if obj.get("responses") and any(
|
| 175 |
+
isinstance(r, dict) and r.get("response") for r in obj["responses"]
|
| 176 |
+
):
|
| 177 |
+
processed.add(obj["video"])
|
| 178 |
+
LOG.info("Resuming: %d already done", len(processed))
|
| 179 |
+
|
| 180 |
+
temperatures = args.temperatures
|
| 181 |
+
|
| 182 |
+
for item in tqdm(data, desc="Inference"):
|
| 183 |
+
video_path = item["video"]
|
| 184 |
+
if video_path in processed:
|
| 185 |
+
continue
|
| 186 |
+
|
| 187 |
+
audio_path = get_audio_path(video_path)
|
| 188 |
+
if not os.path.exists(audio_path):
|
| 189 |
+
LOG.warning("Audio not found, skipping: %s", audio_path)
|
| 190 |
+
continue
|
| 191 |
+
|
| 192 |
+
question = item["prompt"]
|
| 193 |
+
ground_truth = item["chosen"]
|
| 194 |
+
label = parse_ground_truth_label(video_path)
|
| 195 |
+
|
| 196 |
+
responses = []
|
| 197 |
+
for temp in temperatures:
|
| 198 |
+
try:
|
| 199 |
+
resp = run_inference(
|
| 200 |
+
model, processor,
|
| 201 |
+
video_path, audio_path, question,
|
| 202 |
+
temperature=temp, max_new_tokens=args.max_new_tokens,
|
| 203 |
+
)
|
| 204 |
+
responses.append({"temperature": temp, "response": resp})
|
| 205 |
+
except Exception as e:
|
| 206 |
+
LOG.warning("Failed on %s (temp=%.1f):", video_path, temp)
|
| 207 |
+
traceback.print_exc()
|
| 208 |
+
|
| 209 |
+
gc.collect()
|
| 210 |
+
torch.cuda.empty_cache()
|
| 211 |
+
|
| 212 |
+
out = {
|
| 213 |
+
"video": video_path,
|
| 214 |
+
"audio": audio_path,
|
| 215 |
+
"question": question,
|
| 216 |
+
"ground_truth": ground_truth,
|
| 217 |
+
"label": label,
|
| 218 |
+
"responses": responses,
|
| 219 |
+
}
|
| 220 |
+
with open(RAW_RESPONSES, "a") as f:
|
| 221 |
+
f.write(json.dumps(out, ensure_ascii=False) + "\n")
|
| 222 |
+
if responses:
|
| 223 |
+
processed.add(video_path)
|
| 224 |
+
processed_count += 1
|
| 225 |
+
if args.max_samples > 0 and processed_count >= args.max_samples:
|
| 226 |
+
break
|
| 227 |
+
|
| 228 |
+
LOG.info("Stage 1 done. %d samples. Wrote %s", processed_count, RAW_RESPONSES)
|
| 229 |
+
|
| 230 |
+
|
| 231 |
+
# ═══════════════════════════════════════════════════════════════
|
| 232 |
+
# Stage 2: LLM Judge (GPT-5.4)
|
| 233 |
+
# ═══════════════════════════════════════════════════════════════
|
| 234 |
+
|
| 235 |
+
OFFSET_TOL_SEC = 1.0
|
| 236 |
+
|
| 237 |
+
JUDGE_PROMPT = """\
|
| 238 |
+
You are evaluating a model's response about audio-video synchronization.
|
| 239 |
+
|
| 240 |
+
## Ground truth about this video:
|
| 241 |
+
- Synchronized: {is_synced}
|
| 242 |
+
- If not synchronized — direction: {direction}, offset: {offset_sec}s
|
| 243 |
+
|
| 244 |
+
## Model's response:
|
| 245 |
+
\"\"\"{response}\"\"\"
|
| 246 |
+
|
| 247 |
+
## Task:
|
| 248 |
+
Determine if the model's response is CORRECT or INCORRECT.
|
| 249 |
+
|
| 250 |
+
A response is CORRECT if:
|
| 251 |
+
1. For synced videos: model says it's synchronized/aligned (minor tolerance claims are OK)
|
| 252 |
+
2. For not-synced videos: model says it's NOT synchronized AND gets the direction right (delay vs early).
|
| 253 |
+
Additionally, if the model mentions/estimates the audio-video gap (numeric or clearly implied), it should be within ±{offset_tol_sec}s of the true offset.
|
| 254 |
+
|
| 255 |
+
A response is INCORRECT if:
|
| 256 |
+
1. For synced videos: model says it's not synchronized or claims a significant gap
|
| 257 |
+
2. For not-synced videos: model says it's synchronized, or gets the direction wrong
|
| 258 |
+
OR (if the model provides a gap estimate) the absolute gap error is > {offset_tol_sec}s.
|
| 259 |
+
|
| 260 |
+
Output ONLY a JSON object, no markdown fences:
|
| 261 |
+
{{"correct": true or false, "reason": "brief explanation"}}"""
|
| 262 |
+
|
| 263 |
+
|
| 264 |
+
def judge_with_openai(response: str, label: dict, client) -> dict:
|
| 265 |
+
prompt = JUDGE_PROMPT.format(
|
| 266 |
+
is_synced=label["is_synced"],
|
| 267 |
+
direction=label.get("direction") or "N/A",
|
| 268 |
+
offset_sec=label.get("offset_sec", 0),
|
| 269 |
+
offset_tol_sec=OFFSET_TOL_SEC,
|
| 270 |
+
response=response,
|
| 271 |
+
)
|
| 272 |
+
|
| 273 |
+
result = client.chat.completions.create(
|
| 274 |
+
model="gpt-5.4",
|
| 275 |
+
messages=[{"role": "user", "content": prompt}],
|
| 276 |
+
temperature=0,
|
| 277 |
+
max_tokens=200,
|
| 278 |
+
)
|
| 279 |
+
|
| 280 |
+
text = result.choices[0].message.content.strip()
|
| 281 |
+
text = re.sub(r"^```json\s*", "", text)
|
| 282 |
+
text = re.sub(r"\s*```$", "", text)
|
| 283 |
+
|
| 284 |
+
try:
|
| 285 |
+
return json.loads(text)
|
| 286 |
+
except json.JSONDecodeError:
|
| 287 |
+
LOG.warning("Failed to parse judge output: %s", text)
|
| 288 |
+
return {"correct": None, "reason": text}
|
| 289 |
+
|
| 290 |
+
|
| 291 |
+
def stage2_judge(args):
|
| 292 |
+
LOG.info("=== Stage 2: LLM Judge (GPT-5.4) ===")
|
| 293 |
+
|
| 294 |
+
from openai import OpenAI
|
| 295 |
+
api_key = os.environ.get("OPENAI_API_KEY")
|
| 296 |
+
if not api_key:
|
| 297 |
+
raise ValueError("Set OPENAI_API_KEY environment variable")
|
| 298 |
+
client = OpenAI(api_key=api_key)
|
| 299 |
+
|
| 300 |
+
raw_data = []
|
| 301 |
+
with open(RAW_RESPONSES) as f:
|
| 302 |
+
for line in f:
|
| 303 |
+
raw_data.append(json.loads(line))
|
| 304 |
+
LOG.info("Loaded %d raw response records", len(raw_data))
|
| 305 |
+
|
| 306 |
+
already_judged = set()
|
| 307 |
+
if JUDGED_RESPONSES.exists():
|
| 308 |
+
with open(JUDGED_RESPONSES) as f:
|
| 309 |
+
for line in f:
|
| 310 |
+
obj = json.loads(line)
|
| 311 |
+
already_judged.add((obj["video"], obj["response"][:80]))
|
| 312 |
+
LOG.info("Resuming: %d already judged", len(already_judged))
|
| 313 |
+
|
| 314 |
+
for record in tqdm(raw_data, desc="Judging"):
|
| 315 |
+
label = record["label"]
|
| 316 |
+
for resp_obj in record["responses"]:
|
| 317 |
+
resp_text = resp_obj["response"]
|
| 318 |
+
key = (record["video"], resp_text[:80])
|
| 319 |
+
if key in already_judged:
|
| 320 |
+
continue
|
| 321 |
+
|
| 322 |
+
try:
|
| 323 |
+
verdict = judge_with_openai(resp_text, label, client)
|
| 324 |
+
except Exception as e:
|
| 325 |
+
LOG.warning("Judge failed for %s: %s", record["video"], e)
|
| 326 |
+
time.sleep(2)
|
| 327 |
+
continue
|
| 328 |
+
|
| 329 |
+
out = {
|
| 330 |
+
"video": record["video"],
|
| 331 |
+
"audio": record["audio"],
|
| 332 |
+
"question": record["question"],
|
| 333 |
+
"ground_truth": record["ground_truth"],
|
| 334 |
+
"label": label,
|
| 335 |
+
"response": resp_text,
|
| 336 |
+
"temperature": resp_obj["temperature"],
|
| 337 |
+
"judge": verdict,
|
| 338 |
+
}
|
| 339 |
+
with open(JUDGED_RESPONSES, "a") as f:
|
| 340 |
+
f.write(json.dumps(out, ensure_ascii=False) + "\n")
|
| 341 |
+
already_judged.add(key)
|
| 342 |
+
|
| 343 |
+
time.sleep(0.05)
|
| 344 |
+
|
| 345 |
+
LOG.info("Stage 2 done. Wrote %s", JUDGED_RESPONSES)
|
| 346 |
+
|
| 347 |
+
|
| 348 |
+
# ═══════════════════════════════════════════════════════════════
|
| 349 |
+
# Stage 3: Filter & Merge
|
| 350 |
+
# ═══════════════════════════════════════════════════════════════
|
| 351 |
+
|
| 352 |
+
def stage3_merge(args):
|
| 353 |
+
LOG.info("=== Stage 3: Filter & Merge ===")
|
| 354 |
+
|
| 355 |
+
judged = []
|
| 356 |
+
with open(JUDGED_RESPONSES) as f:
|
| 357 |
+
for line in f:
|
| 358 |
+
judged.append(json.loads(line))
|
| 359 |
+
LOG.info("Loaded %d judged records", len(judged))
|
| 360 |
+
|
| 361 |
+
new_pairs = []
|
| 362 |
+
seen = set()
|
| 363 |
+
for record in judged:
|
| 364 |
+
verdict = record.get("judge", {})
|
| 365 |
+
if verdict.get("correct") is not False:
|
| 366 |
+
continue
|
| 367 |
+
|
| 368 |
+
dedup_key = (record["video"], record["response"][:80])
|
| 369 |
+
if dedup_key in seen:
|
| 370 |
+
continue
|
| 371 |
+
seen.add(dedup_key)
|
| 372 |
+
|
| 373 |
+
pair = {
|
| 374 |
+
"messages": [
|
| 375 |
+
{"role": "user", "content": "<video><audio>" + record["question"]}
|
| 376 |
+
],
|
| 377 |
+
"chosen": {"role": "assistant", "content": record["ground_truth"]},
|
| 378 |
+
"rejected": {"role": "assistant", "content": record["response"]},
|
| 379 |
+
"videos": [record["video"]],
|
| 380 |
+
"audios": [record["audio"]],
|
| 381 |
+
}
|
| 382 |
+
new_pairs.append(pair)
|
| 383 |
+
|
| 384 |
+
LOG.info("New on-policy DPO pairs: %d (from %d judged)", len(new_pairs), len(judged))
|
| 385 |
+
|
| 386 |
+
with open(EXISTING_DPO) as f:
|
| 387 |
+
existing = json.load(f)
|
| 388 |
+
LOG.info("Existing DPO pairs: %d", len(existing))
|
| 389 |
+
|
| 390 |
+
merged = existing + new_pairs
|
| 391 |
+
LOG.info("Merged total: %d pairs", len(merged))
|
| 392 |
+
|
| 393 |
+
with open(OUTPUT_DPO, "w") as f:
|
| 394 |
+
json.dump(merged, f, indent=2, ensure_ascii=False)
|
| 395 |
+
LOG.info("Wrote %s", OUTPUT_DPO)
|
| 396 |
+
|
| 397 |
+
|
| 398 |
+
# ═══════════════════════════════════════════════════════════════
|
| 399 |
+
|
| 400 |
+
def main():
|
| 401 |
+
logging.basicConfig(level=logging.INFO, format="%(asctime)s | %(levelname)s | %(message)s")
|
| 402 |
+
|
| 403 |
+
parser = argparse.ArgumentParser(description="On-policy DPO data augmentation")
|
| 404 |
+
parser.add_argument("--stage", type=str, default="all", choices=["1", "2", "3", "all"])
|
| 405 |
+
parser.add_argument("--model-choice", type=str, default="sft", choices=["vanilla", "sft"],
|
| 406 |
+
help="Which model to use for stage 1 inference")
|
| 407 |
+
parser.add_argument("--temperatures", type=float, nargs="+", default=[0.3, 0.7, 1.0])
|
| 408 |
+
parser.add_argument("--max-new-tokens", type=int, default=256)
|
| 409 |
+
parser.add_argument("--max-samples", type=int, default=-1,
|
| 410 |
+
help="Stage 1 only: max videos to process (-1 = all)")
|
| 411 |
+
args = parser.parse_args()
|
| 412 |
+
|
| 413 |
+
if args.stage in ("1", "all"):
|
| 414 |
+
stage1_inference(args)
|
| 415 |
+
if args.stage in ("2", "all"):
|
| 416 |
+
stage2_judge(args)
|
| 417 |
+
if args.stage in ("3", "all"):
|
| 418 |
+
stage3_merge(args)
|
| 419 |
+
|
| 420 |
+
|
| 421 |
+
if __name__ == "__main__":
|
| 422 |
+
main()
|
sft_model_convert_and_onpolicy_dpo/stage1_vllm.py
ADDED
|
@@ -0,0 +1,269 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env python3
|
| 2 |
+
"""
|
| 3 |
+
Stage 1 inference using vLLM for massive speedup.
|
| 4 |
+
Output format identical to generate_onpolicy_dpo.py Stage 1.
|
| 5 |
+
|
| 6 |
+
Uses qwen_omni_utils.process_mm_info() for video/audio preprocessing
|
| 7 |
+
(same as run_onpolicy_inference.py / HF path) so frame sampling, resize,
|
| 8 |
+
audio loading are identical. Only the generation engine differs (vLLM
|
| 9 |
+
continuous batching vs HF sequential generate).
|
| 10 |
+
|
| 11 |
+
Usage:
|
| 12 |
+
# SFT thinker checkpoint, all GPUs, all data:
|
| 13 |
+
python stage1_vllm.py
|
| 14 |
+
|
| 15 |
+
# Vanilla model, 3 samples only:
|
| 16 |
+
python stage1_vllm.py --model vanilla --max-samples 3
|
| 17 |
+
|
| 18 |
+
# Control tensor parallelism:
|
| 19 |
+
python stage1_vllm.py --tp 4 --batch-size 64
|
| 20 |
+
|
| 21 |
+
After this finishes, run:
|
| 22 |
+
python generate_onpolicy_dpo.py --stage 2 # LLM judge
|
| 23 |
+
python generate_onpolicy_dpo.py --stage 3 # filter & merge
|
| 24 |
+
"""
|
| 25 |
+
|
| 26 |
+
import argparse
|
| 27 |
+
import json
|
| 28 |
+
import os
|
| 29 |
+
import re
|
| 30 |
+
from pathlib import Path
|
| 31 |
+
|
| 32 |
+
import numpy as np
|
| 33 |
+
|
| 34 |
+
DATA_ROOT = Path("/home/ubuntu/video_source")
|
| 35 |
+
ORIGINAL_ROOT = DATA_ROOT / "original"
|
| 36 |
+
INPUT_JSONL = DATA_ROOT / "kto_training_data_v2_train.jsonl"
|
| 37 |
+
AUDIO_ROOT = DATA_ROOT / "extracted_audio"
|
| 38 |
+
OUTPUT_JSONL = DATA_ROOT / "onpolicy_raw_responses.jsonl"
|
| 39 |
+
|
| 40 |
+
VANILLA_MODEL = "Qwen/Qwen3-Omni-30B-A3B-Instruct"
|
| 41 |
+
SFT_MODEL_PATH = "Rakancorle11/qwen3omni_full_sft_revised_thinker_key"
|
| 42 |
+
|
| 43 |
+
SYSTEM_PROMPT = (
|
| 44 |
+
"You are Qwen, a virtual human developed by the Qwen Team, Alibaba "
|
| 45 |
+
"Group, capable of perceiving auditory and visual inputs, as well as "
|
| 46 |
+
"generating text and speech."
|
| 47 |
+
)
|
| 48 |
+
|
| 49 |
+
|
| 50 |
+
def resolve_video(v: str) -> str:
|
| 51 |
+
return v if os.path.isabs(v) else str(ORIGINAL_ROOT / v)
|
| 52 |
+
|
| 53 |
+
|
| 54 |
+
def get_audio(video_path: str) -> str:
|
| 55 |
+
rel = os.path.relpath(video_path, str(DATA_ROOT))
|
| 56 |
+
return str(AUDIO_ROOT / (os.path.splitext(rel)[0] + ".wav"))
|
| 57 |
+
|
| 58 |
+
|
| 59 |
+
def parse_label(video_path: str) -> dict:
|
| 60 |
+
if "/original/" in video_path:
|
| 61 |
+
return {"is_synced": True, "direction": None, "offset_sec": 0.0}
|
| 62 |
+
m = re.search(r"_(delay|early)_([\d.]+)s\.", video_path)
|
| 63 |
+
if m:
|
| 64 |
+
return {"is_synced": False, "direction": m.group(1), "offset_sec": float(m.group(2))}
|
| 65 |
+
return {"is_synced": True, "direction": None, "offset_sec": 0.0}
|
| 66 |
+
|
| 67 |
+
|
| 68 |
+
def preprocess_sample(video_path: str, audio_path: str):
|
| 69 |
+
"""
|
| 70 |
+
Use qwen_omni_utils.process_mm_info to extract video frames + audio
|
| 71 |
+
exactly the same way as run_onpolicy_inference.py / HF path.
|
| 72 |
+
|
| 73 |
+
Returns (video_np, audio_tuple) or raises on failure.
|
| 74 |
+
- video_np: np.ndarray shape (T, C, H, W) uint8
|
| 75 |
+
- audio_tuple: (np.ndarray, sample_rate)
|
| 76 |
+
"""
|
| 77 |
+
from qwen_omni_utils import process_mm_info
|
| 78 |
+
|
| 79 |
+
messages = [
|
| 80 |
+
{
|
| 81 |
+
"role": "user",
|
| 82 |
+
"content": [
|
| 83 |
+
{"type": "video", "video": video_path},
|
| 84 |
+
{"type": "audio", "audio": audio_path},
|
| 85 |
+
{"type": "text", "text": "placeholder"},
|
| 86 |
+
],
|
| 87 |
+
},
|
| 88 |
+
]
|
| 89 |
+
audios, images, videos = process_mm_info(messages, use_audio_in_video=False)
|
| 90 |
+
|
| 91 |
+
# videos is a list of torch.Tensor (T, C, H, W) float32 [0-1] range from
|
| 92 |
+
# qwen_omni_utils. vLLM expects np.ndarray uint8 (T, C, H, W) or (T, H, W, C).
|
| 93 |
+
# Convert to numpy, keeping (T, C, H, W) which is what vLLM's Qwen3-Omni
|
| 94 |
+
# processor expects (it does its own normalization).
|
| 95 |
+
video_tensor = videos[0] # torch.Tensor (T, C, H, W), float [0, 1]
|
| 96 |
+
video_np = (video_tensor * 255).byte().numpy() # uint8 (T, C, H, W)
|
| 97 |
+
|
| 98 |
+
# audios is a list of np.ndarray from process_audio_info.
|
| 99 |
+
# vLLM expects (np.ndarray, sample_rate). Qwen3-Omni audio encoder uses 16kHz.
|
| 100 |
+
audio_np = audios[0] # np.ndarray float32
|
| 101 |
+
if hasattr(audio_np, "numpy"):
|
| 102 |
+
audio_np = audio_np.numpy()
|
| 103 |
+
audio_tuple = (audio_np, 16000)
|
| 104 |
+
|
| 105 |
+
return video_np, audio_tuple
|
| 106 |
+
|
| 107 |
+
|
| 108 |
+
def build_prompt(question: str) -> str:
|
| 109 |
+
"""Build raw prompt with placeholder tokens. vLLM expands pad tokens to correct count."""
|
| 110 |
+
return (
|
| 111 |
+
f"<|im_start|>system\n{SYSTEM_PROMPT}<|im_end|>\n"
|
| 112 |
+
f"<|im_start|>user\n"
|
| 113 |
+
f"<|vision_start|><|video_pad|><|vision_end|>"
|
| 114 |
+
f"<|audio_start|><|audio_pad|><|audio_end|>"
|
| 115 |
+
f"{question}<|im_end|>\n"
|
| 116 |
+
f"<|im_start|>assistant\n"
|
| 117 |
+
)
|
| 118 |
+
|
| 119 |
+
|
| 120 |
+
def main():
|
| 121 |
+
parser = argparse.ArgumentParser(description="Stage 1 with vLLM")
|
| 122 |
+
parser.add_argument("--model", choices=["vanilla", "sft"], default="sft")
|
| 123 |
+
parser.add_argument("--tp", type=int, default=None,
|
| 124 |
+
help="Tensor parallel size (default: all visible GPUs)")
|
| 125 |
+
parser.add_argument("--max-samples", type=int, default=-1)
|
| 126 |
+
parser.add_argument("--max-new-tokens", type=int, default=256)
|
| 127 |
+
parser.add_argument("--temperatures", type=float, nargs="+", default=[0.3, 0.7, 1.0])
|
| 128 |
+
parser.add_argument("--batch-size", type=int, default=64,
|
| 129 |
+
help="How many prompts to submit per llm.generate() call")
|
| 130 |
+
parser.add_argument("--max-model-len", type=int, default=12800)
|
| 131 |
+
parser.add_argument("--gpu-memory-utilization", type=float, default=0.90)
|
| 132 |
+
args = parser.parse_args()
|
| 133 |
+
|
| 134 |
+
import torch
|
| 135 |
+
from vllm import LLM, SamplingParams
|
| 136 |
+
|
| 137 |
+
if args.tp is None:
|
| 138 |
+
args.tp = torch.cuda.device_count()
|
| 139 |
+
|
| 140 |
+
model_path = SFT_MODEL_PATH if args.model == "sft" else VANILLA_MODEL
|
| 141 |
+
print(f"Loading model: {model_path} (tp={args.tp})")
|
| 142 |
+
|
| 143 |
+
llm = LLM(
|
| 144 |
+
model=model_path,
|
| 145 |
+
tensor_parallel_size=args.tp,
|
| 146 |
+
max_model_len=args.max_model_len,
|
| 147 |
+
max_num_seqs=args.batch_size,
|
| 148 |
+
limit_mm_per_prompt={"audio": 1, "video": 1},
|
| 149 |
+
gpu_memory_utilization=args.gpu_memory_utilization,
|
| 150 |
+
dtype="bfloat16",
|
| 151 |
+
trust_remote_code=True,
|
| 152 |
+
)
|
| 153 |
+
|
| 154 |
+
# Load input data
|
| 155 |
+
data = []
|
| 156 |
+
with open(INPUT_JSONL) as f:
|
| 157 |
+
for line in f:
|
| 158 |
+
line = line.strip()
|
| 159 |
+
if line:
|
| 160 |
+
obj = json.loads(line)
|
| 161 |
+
obj["video"] = resolve_video(obj["video"])
|
| 162 |
+
data.append(obj)
|
| 163 |
+
print(f"Loaded {len(data)} samples")
|
| 164 |
+
|
| 165 |
+
# Resume
|
| 166 |
+
processed = set()
|
| 167 |
+
if OUTPUT_JSONL.exists():
|
| 168 |
+
with open(OUTPUT_JSONL) as f:
|
| 169 |
+
for line in f:
|
| 170 |
+
obj = json.loads(line)
|
| 171 |
+
if obj.get("responses") and any(
|
| 172 |
+
isinstance(r, dict) and r.get("response") for r in obj["responses"]
|
| 173 |
+
):
|
| 174 |
+
processed.add(obj["video"])
|
| 175 |
+
print(f"Resuming: {len(processed)} already done")
|
| 176 |
+
|
| 177 |
+
# Filter to unprocessed
|
| 178 |
+
todo = []
|
| 179 |
+
for item in data:
|
| 180 |
+
if item["video"] in processed:
|
| 181 |
+
continue
|
| 182 |
+
audio_path = get_audio(item["video"])
|
| 183 |
+
if not os.path.exists(audio_path):
|
| 184 |
+
continue
|
| 185 |
+
todo.append(item)
|
| 186 |
+
if args.max_samples > 0:
|
| 187 |
+
todo = todo[:args.max_samples]
|
| 188 |
+
print(f"To process: {len(todo)} samples")
|
| 189 |
+
|
| 190 |
+
# Pre-extract all video/audio features (same as HF path)
|
| 191 |
+
print("Pre-extracting video/audio features with qwen_omni_utils...")
|
| 192 |
+
preprocessed = {}
|
| 193 |
+
skipped = 0
|
| 194 |
+
for i, item in enumerate(todo):
|
| 195 |
+
video_path = item["video"]
|
| 196 |
+
audio_path = get_audio(video_path)
|
| 197 |
+
try:
|
| 198 |
+
video_np, audio_tuple = preprocess_sample(video_path, audio_path)
|
| 199 |
+
preprocessed[video_path] = (video_np, audio_tuple)
|
| 200 |
+
except Exception as e:
|
| 201 |
+
print(f" [{i+1}/{len(todo)}] Skip (preprocess error): {video_path}: {e}")
|
| 202 |
+
skipped += 1
|
| 203 |
+
print(f"Preprocessed: {len(preprocessed)} ok, {skipped} skipped")
|
| 204 |
+
|
| 205 |
+
# Filter todo to only successfully preprocessed
|
| 206 |
+
todo = [item for item in todo if item["video"] in preprocessed]
|
| 207 |
+
|
| 208 |
+
for temp in args.temperatures:
|
| 209 |
+
sampling_params = SamplingParams(
|
| 210 |
+
temperature=temp if temp > 0 else 0.0,
|
| 211 |
+
top_p=0.9 if temp > 0 else 1.0,
|
| 212 |
+
max_tokens=args.max_new_tokens,
|
| 213 |
+
)
|
| 214 |
+
|
| 215 |
+
for batch_start in range(0, len(todo), args.batch_size):
|
| 216 |
+
batch = todo[batch_start:batch_start + args.batch_size]
|
| 217 |
+
inputs_list = []
|
| 218 |
+
|
| 219 |
+
for item in batch:
|
| 220 |
+
video_np, audio_tuple = preprocessed[item["video"]]
|
| 221 |
+
inputs_list.append({
|
| 222 |
+
"prompt": build_prompt(item["prompt"]),
|
| 223 |
+
"multi_modal_data": {
|
| 224 |
+
"video": video_np,
|
| 225 |
+
"audio": audio_tuple,
|
| 226 |
+
},
|
| 227 |
+
})
|
| 228 |
+
|
| 229 |
+
outputs = llm.generate(inputs_list, sampling_params=sampling_params)
|
| 230 |
+
|
| 231 |
+
for out_idx, item in enumerate(batch):
|
| 232 |
+
resp_text = outputs[out_idx].outputs[0].text.strip()
|
| 233 |
+
|
| 234 |
+
if temp == args.temperatures[0]:
|
| 235 |
+
video_path = item["video"]
|
| 236 |
+
record = {
|
| 237 |
+
"video": video_path,
|
| 238 |
+
"audio": get_audio(video_path),
|
| 239 |
+
"question": item["prompt"],
|
| 240 |
+
"ground_truth": item["chosen"],
|
| 241 |
+
"label": parse_label(video_path),
|
| 242 |
+
"responses": [{"temperature": temp, "response": resp_text}],
|
| 243 |
+
}
|
| 244 |
+
item["_record"] = record
|
| 245 |
+
else:
|
| 246 |
+
if "_record" in item:
|
| 247 |
+
item["_record"]["responses"].append(
|
| 248 |
+
{"temperature": temp, "response": resp_text}
|
| 249 |
+
)
|
| 250 |
+
|
| 251 |
+
done = batch_start + len(batch)
|
| 252 |
+
print(f" temp={temp:.1f} done={done}/{len(todo)}")
|
| 253 |
+
|
| 254 |
+
written = 0
|
| 255 |
+
with open(OUTPUT_JSONL, "a") as f:
|
| 256 |
+
for item in todo:
|
| 257 |
+
rec = item.get("_record")
|
| 258 |
+
if rec and rec["responses"]:
|
| 259 |
+
f.write(json.dumps(rec, ensure_ascii=False) + "\n")
|
| 260 |
+
written += 1
|
| 261 |
+
|
| 262 |
+
# Free preprocessed data
|
| 263 |
+
preprocessed.clear()
|
| 264 |
+
|
| 265 |
+
print(f"Done. Wrote {written} records to {OUTPUT_JSONL}")
|
| 266 |
+
|
| 267 |
+
|
| 268 |
+
if __name__ == "__main__":
|
| 269 |
+
main()
|
sync_research/audio_llm_benchmark.py
ADDED
|
@@ -0,0 +1,1463 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env python3
|
| 2 |
+
from audio_llm_benchmark_shifted import main
|
| 3 |
+
|
| 4 |
+
|
| 5 |
+
if __name__ == "__main__":
|
| 6 |
+
main()
|
| 7 |
+
'''
|
| 8 |
+
"""
|
| 9 |
+
Audio-LLM benchmark on shifted delay/early sets (126 videos from 63 source ids).
|
| 10 |
+
|
| 11 |
+
Outputs align with previous stress-test style:
|
| 12 |
+
- delay_results.jsonl
|
| 13 |
+
- early_results.jsonl
|
| 14 |
+
- summary.json
|
| 15 |
+
- missing_or_errors.json
|
| 16 |
+
|
| 17 |
+
And organized into four subfolders under output root:
|
| 18 |
+
- audio/ (extracted audio files)
|
| 19 |
+
- gemini/ (Gemini-only per-split results)
|
| 20 |
+
- qwen2/ (Qwen2-only per-split results)
|
| 21 |
+
- qwen3/ (Qwen3-only per-split results)
|
| 22 |
+
"""
|
| 23 |
+
|
| 24 |
+
from __future__ import annotations
|
| 25 |
+
|
| 26 |
+
import argparse
|
| 27 |
+
import csv
|
| 28 |
+
import json
|
| 29 |
+
import os
|
| 30 |
+
import re
|
| 31 |
+
import subprocess
|
| 32 |
+
import time
|
| 33 |
+
from pathlib import Path
|
| 34 |
+
from typing import Any, Dict, List, Optional, Tuple
|
| 35 |
+
|
| 36 |
+
import librosa
|
| 37 |
+
import numpy as np
|
| 38 |
+
import torch
|
| 39 |
+
from tqdm import tqdm
|
| 40 |
+
from transformers import AutoModelForCausalLM, AutoProcessor
|
| 41 |
+
|
| 42 |
+
try:
|
| 43 |
+
import google.generativeai as genai
|
| 44 |
+
except Exception: # pragma: no cover
|
| 45 |
+
genai = None
|
| 46 |
+
|
| 47 |
+
|
| 48 |
+
UNIVERSAL_PROMPT = (
|
| 49 |
+
"Listen to this audio clip carefully. You are an expert in acoustic forensics. "
|
| 50 |
+
"Locate the EXACT timestamp (in seconds) of the loudest, most energetic PHYSICAL IMPACT sound "
|
| 51 |
+
"(e.g., body hitting ground, object breaking). Actively ignore background music, laughter, and speech. "
|
| 52 |
+
'Output strictly in JSON format: {"audio_timestamp": 2.5, "sound_description": "brief description"}'
|
| 53 |
+
)
|
| 54 |
+
|
| 55 |
+
DEFAULT_SOURCE_CSV = Path(
|
| 56 |
+
"/home/ubuntu/video_source/result/gemini_video_oc_frame/gemini_validated_accept_lt1p0.csv"
|
| 57 |
+
)
|
| 58 |
+
DEFAULT_DELAY_DIR = Path("/home/ubuntu/video_source/shifted/delay")
|
| 59 |
+
DEFAULT_EARLY_DIR = Path("/home/ubuntu/video_source/shifted/early")
|
| 60 |
+
DEFAULT_OUTPUT_DIR = Path("/home/ubuntu/video_source/result/audio_llm_benchmark")
|
| 61 |
+
|
| 62 |
+
DEFAULT_AUDIO_SR = 16000
|
| 63 |
+
DEFAULT_AUDIO_FORMAT = "wav"
|
| 64 |
+
DEFAULT_SEED = 42
|
| 65 |
+
DEFAULT_SAMPLE_SIZE = 0 # 0 = all rows
|
| 66 |
+
|
| 67 |
+
GEMINI_MODEL = "gemini-3.1-pro-preview"
|
| 68 |
+
QWEN2_MODEL_ID = "Qwen/Qwen2-Audio-7B-Instruct"
|
| 69 |
+
QWEN3_MODEL_ID = "Qwen/Qwen3-Omni-30B-A3B-Instruct"
|
| 70 |
+
|
| 71 |
+
GEMINI_FILE_READY_TIMEOUT_SECONDS = 120
|
| 72 |
+
GEMINI_FILE_POLL_SECONDS = 2
|
| 73 |
+
|
| 74 |
+
|
| 75 |
+
def parse_args() -> argparse.Namespace:
|
| 76 |
+
p = argparse.ArgumentParser(description="Benchmark Gemini/Qwen2/Qwen3 on shifted audio grounding.")
|
| 77 |
+
p.add_argument("--source-csv", type=Path, default=DEFAULT_SOURCE_CSV)
|
| 78 |
+
p.add_argument("--delay-dir", type=Path, default=DEFAULT_DELAY_DIR)
|
| 79 |
+
p.add_argument("--early-dir", type=Path, default=DEFAULT_EARLY_DIR)
|
| 80 |
+
p.add_argument("--output-dir", type=Path, default=DEFAULT_OUTPUT_DIR)
|
| 81 |
+
p.add_argument("--audio-sr", type=int, default=DEFAULT_AUDIO_SR)
|
| 82 |
+
p.add_argument("--audio-format", choices=["wav", "mp3"], default=DEFAULT_AUDIO_FORMAT)
|
| 83 |
+
p.add_argument("--sample-size", type=int, default=DEFAULT_SAMPLE_SIZE, help="0 means all rows in source CSV")
|
| 84 |
+
p.add_argument("--seed", type=int, default=DEFAULT_SEED)
|
| 85 |
+
p.add_argument("--delay-shift", type=float, default=2.0)
|
| 86 |
+
p.add_argument("--early-shift", type=float, default=-2.0)
|
| 87 |
+
p.add_argument("--disable-local-models", action="store_true")
|
| 88 |
+
p.add_argument("--overwrite-output", action="store_true")
|
| 89 |
+
return p.parse_args()
|
| 90 |
+
|
| 91 |
+
|
| 92 |
+
def ensure_ffmpeg() -> None:
|
| 93 |
+
subprocess.run(
|
| 94 |
+
["ffmpeg", "-version"],
|
| 95 |
+
check=True,
|
| 96 |
+
stdout=subprocess.PIPE,
|
| 97 |
+
stderr=subprocess.PIPE,
|
| 98 |
+
text=True,
|
| 99 |
+
)
|
| 100 |
+
|
| 101 |
+
|
| 102 |
+
def _extract_json_object(text: str) -> Dict[str, Any]:
|
| 103 |
+
text = text.strip()
|
| 104 |
+
fence_match = re.search(r"```(?:json)?\s*(\{.*?\})\s*```", text, flags=re.DOTALL)
|
| 105 |
+
if fence_match:
|
| 106 |
+
return json.loads(fence_match.group(1))
|
| 107 |
+
try:
|
| 108 |
+
return json.loads(text)
|
| 109 |
+
except json.JSONDecodeError:
|
| 110 |
+
pass
|
| 111 |
+
start = text.find("{")
|
| 112 |
+
end = text.rfind("}")
|
| 113 |
+
if start != -1 and end != -1 and end > start:
|
| 114 |
+
return json.loads(text[start : end + 1])
|
| 115 |
+
raise ValueError("No valid JSON object found in model output.")
|
| 116 |
+
|
| 117 |
+
|
| 118 |
+
def _coerce_audio_result(data: Dict[str, Any]) -> Tuple[float, str]:
|
| 119 |
+
if "audio_timestamp" not in data or "sound_description" not in data:
|
| 120 |
+
raise ValueError(f"Missing keys in JSON output: {data}")
|
| 121 |
+
ts = float(data["audio_timestamp"])
|
| 122 |
+
if not np.isfinite(ts):
|
| 123 |
+
raise ValueError(f"audio_timestamp is not finite: {ts}")
|
| 124 |
+
desc = str(data["sound_description"]).strip()
|
| 125 |
+
if not desc:
|
| 126 |
+
raise ValueError("sound_description is empty.")
|
| 127 |
+
return ts, desc
|
| 128 |
+
|
| 129 |
+
|
| 130 |
+
def append_jsonl(path: Path, payload: Dict[str, Any]) -> None:
|
| 131 |
+
path.parent.mkdir(parents=True, exist_ok=True)
|
| 132 |
+
with path.open("a", encoding="utf-8") as f:
|
| 133 |
+
f.write(json.dumps(payload, ensure_ascii=False) + "\n")
|
| 134 |
+
|
| 135 |
+
|
| 136 |
+
def load_source_rows(path: Path, sample_size: int, seed: int) -> List[Tuple[str, float]]:
|
| 137 |
+
if not path.exists():
|
| 138 |
+
raise FileNotFoundError(f"source CSV not found: {path}")
|
| 139 |
+
rows: List[Tuple[str, float]] = []
|
| 140 |
+
with path.open("r", encoding="utf-8", newline="") as f:
|
| 141 |
+
reader = csv.DictReader(f)
|
| 142 |
+
fields = reader.fieldnames or []
|
| 143 |
+
if "video_id" not in fields or "t_gemini" not in fields:
|
| 144 |
+
raise ValueError("source CSV must include columns: video_id, t_gemini")
|
| 145 |
+
for r in reader:
|
| 146 |
+
video_id = str(r.get("video_id", "")).strip()
|
| 147 |
+
if not video_id:
|
| 148 |
+
continue
|
| 149 |
+
try:
|
| 150 |
+
t_visual = float(r.get("t_gemini", ""))
|
| 151 |
+
except (TypeError, ValueError):
|
| 152 |
+
continue
|
| 153 |
+
rows.append((video_id, t_visual))
|
| 154 |
+
if not rows:
|
| 155 |
+
raise ValueError("No valid rows parsed from source CSV.")
|
| 156 |
+
|
| 157 |
+
if sample_size > 0 and len(rows) > sample_size:
|
| 158 |
+
rng = np.random.default_rng(seed)
|
| 159 |
+
idx = rng.choice(len(rows), size=sample_size, replace=False)
|
| 160 |
+
rows = [rows[int(i)] for i in idx]
|
| 161 |
+
rows.sort(key=lambda x: x[0])
|
| 162 |
+
return rows
|
| 163 |
+
|
| 164 |
+
|
| 165 |
+
def index_by_filename(root: Path) -> Dict[str, Path]:
|
| 166 |
+
if not root.exists():
|
| 167 |
+
raise FileNotFoundError(f"directory not found: {root}")
|
| 168 |
+
out: Dict[str, Path] = {}
|
| 169 |
+
for p in root.rglob("*.mp4"):
|
| 170 |
+
out.setdefault(p.name, p)
|
| 171 |
+
return out
|
| 172 |
+
|
| 173 |
+
|
| 174 |
+
def shifted_filename(video_id: str, split: str) -> str:
|
| 175 |
+
p = Path(video_id)
|
| 176 |
+
suffix = p.suffix if p.suffix else ".mp4"
|
| 177 |
+
stem = p.stem if p.suffix else p.name
|
| 178 |
+
if split == "delay":
|
| 179 |
+
return f"{stem}_delay2s{suffix}"
|
| 180 |
+
if split == "early":
|
| 181 |
+
return f"{stem}_early2s{suffix}"
|
| 182 |
+
raise ValueError(f"Unknown split: {split}")
|
| 183 |
+
|
| 184 |
+
|
| 185 |
+
def extract_audio_file(video_path: Path, dst_path: Path, sr: int, fmt: str) -> None:
|
| 186 |
+
dst_path.parent.mkdir(parents=True, exist_ok=True)
|
| 187 |
+
cmd = [
|
| 188 |
+
"ffmpeg",
|
| 189 |
+
"-y",
|
| 190 |
+
"-i",
|
| 191 |
+
str(video_path),
|
| 192 |
+
"-vn",
|
| 193 |
+
"-ac",
|
| 194 |
+
"1",
|
| 195 |
+
"-ar",
|
| 196 |
+
str(sr),
|
| 197 |
+
]
|
| 198 |
+
if fmt == "wav":
|
| 199 |
+
cmd += ["-c:a", "pcm_s16le"]
|
| 200 |
+
else:
|
| 201 |
+
cmd += ["-c:a", "libmp3lame", "-q:a", "2"]
|
| 202 |
+
cmd.append(str(dst_path))
|
| 203 |
+
subprocess.run(cmd, check=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True)
|
| 204 |
+
|
| 205 |
+
|
| 206 |
+
def wait_for_gemini_file_active(file_name: str) -> None:
|
| 207 |
+
deadline = time.time() + GEMINI_FILE_READY_TIMEOUT_SECONDS
|
| 208 |
+
while time.time() < deadline:
|
| 209 |
+
file_obj = genai.get_file(file_name)
|
| 210 |
+
state_obj = getattr(file_obj, "state", None)
|
| 211 |
+
state_name = str(getattr(state_obj, "name", state_obj))
|
| 212 |
+
if state_name == "ACTIVE":
|
| 213 |
+
return
|
| 214 |
+
if state_name in {"FAILED", "CANCELLED"}:
|
| 215 |
+
raise RuntimeError(f"Gemini file processing failed: state={state_name}")
|
| 216 |
+
time.sleep(GEMINI_FILE_POLL_SECONDS)
|
| 217 |
+
raise TimeoutError("Gemini file did not become ACTIVE in time.")
|
| 218 |
+
|
| 219 |
+
|
| 220 |
+
def gemini_audio_grounding(audio_path: Path, gemini_api_key: str) -> Tuple[float, str]:
|
| 221 |
+
if genai is None:
|
| 222 |
+
raise RuntimeError("google-generativeai is not installed.")
|
| 223 |
+
if not gemini_api_key:
|
| 224 |
+
raise RuntimeError("GEMINI_API_KEY is missing.")
|
| 225 |
+
genai.configure(api_key=gemini_api_key)
|
| 226 |
+
model = genai.GenerativeModel(GEMINI_MODEL)
|
| 227 |
+
uploaded = genai.upload_file(path=str(audio_path))
|
| 228 |
+
try:
|
| 229 |
+
wait_for_gemini_file_active(uploaded.name)
|
| 230 |
+
resp = model.generate_content([UNIVERSAL_PROMPT, uploaded])
|
| 231 |
+
raw_text = getattr(resp, "text", "") or ""
|
| 232 |
+
if not raw_text:
|
| 233 |
+
raise ValueError("Gemini response text is empty.")
|
| 234 |
+
parsed = _extract_json_object(raw_text)
|
| 235 |
+
return _coerce_audio_result(parsed)
|
| 236 |
+
finally:
|
| 237 |
+
try:
|
| 238 |
+
genai.delete_file(uploaded.name)
|
| 239 |
+
except Exception:
|
| 240 |
+
pass
|
| 241 |
+
|
| 242 |
+
|
| 243 |
+
def load_qwen_model_and_processor(model_id: str) -> Tuple[Any, Any]:
|
| 244 |
+
processor = AutoProcessor.from_pretrained(model_id, trust_remote_code=True)
|
| 245 |
+
model = AutoModelForCausalLM.from_pretrained(
|
| 246 |
+
model_id,
|
| 247 |
+
torch_dtype=torch.bfloat16 if torch.cuda.is_available() else torch.float32,
|
| 248 |
+
device_map="auto" if torch.cuda.is_available() else None,
|
| 249 |
+
trust_remote_code=True,
|
| 250 |
+
)
|
| 251 |
+
model.eval()
|
| 252 |
+
return model, processor
|
| 253 |
+
|
| 254 |
+
|
| 255 |
+
def _run_qwen_audio_chat(model: Any, processor: Any, audio_path: Path, prompt: str, label: str) -> str:
|
| 256 |
+
messages = [
|
| 257 |
+
{
|
| 258 |
+
"role": "user",
|
| 259 |
+
"content": [
|
| 260 |
+
{"type": "audio", "audio": str(audio_path)},
|
| 261 |
+
{"type": "text", "text": prompt},
|
| 262 |
+
],
|
| 263 |
+
}
|
| 264 |
+
]
|
| 265 |
+
try:
|
| 266 |
+
text_prompt = processor.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
|
| 267 |
+
except Exception:
|
| 268 |
+
text_prompt = prompt
|
| 269 |
+
|
| 270 |
+
audio_arr, sr = librosa.load(str(audio_path), sr=None, mono=True)
|
| 271 |
+
if audio_arr.size == 0:
|
| 272 |
+
raise RuntimeError(f"{label}: extracted audio is empty.")
|
| 273 |
+
|
| 274 |
+
last_exc: Optional[Exception] = None
|
| 275 |
+
inputs = None
|
| 276 |
+
for kw in [
|
| 277 |
+
{"text": [text_prompt], "audio": [audio_arr], "sampling_rate": sr},
|
| 278 |
+
{"text": [text_prompt], "audios": [audio_arr], "sampling_rate": sr},
|
| 279 |
+
]:
|
| 280 |
+
try:
|
| 281 |
+
inputs = processor(return_tensors="pt", padding=True, **kw)
|
| 282 |
+
break
|
| 283 |
+
except Exception as exc:
|
| 284 |
+
last_exc = exc
|
| 285 |
+
if inputs is None:
|
| 286 |
+
raise RuntimeError(f"{label}: failed to build processor inputs: {last_exc}")
|
| 287 |
+
|
| 288 |
+
dev = model.device if hasattr(model, "device") else torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
| 289 |
+
for k, v in list(inputs.items()):
|
| 290 |
+
if torch.is_tensor(v):
|
| 291 |
+
inputs[k] = v.to(dev)
|
| 292 |
+
|
| 293 |
+
with torch.no_grad():
|
| 294 |
+
output_ids = model.generate(**inputs, max_new_tokens=256, do_sample=False)
|
| 295 |
+
input_len = inputs["input_ids"].shape[1] if "input_ids" in inputs else 0
|
| 296 |
+
gen_ids = output_ids[:, input_len:] if input_len > 0 else output_ids
|
| 297 |
+
return processor.batch_decode(gen_ids, skip_special_tokens=True)[0].strip()
|
| 298 |
+
|
| 299 |
+
|
| 300 |
+
def qwen2_audio_grounding(audio_path: Path, model: Any, processor: Any) -> Tuple[float, str]:
|
| 301 |
+
parsed = _extract_json_object(_run_qwen_audio_chat(model, processor, audio_path, UNIVERSAL_PROMPT, "Qwen2-Audio"))
|
| 302 |
+
return _coerce_audio_result(parsed)
|
| 303 |
+
|
| 304 |
+
|
| 305 |
+
def qwen3_audio_grounding(audio_path: Path, model: Any, processor: Any) -> Tuple[float, str]:
|
| 306 |
+
parsed = _extract_json_object(_run_qwen_audio_chat(model, processor, audio_path, UNIVERSAL_PROMPT, "Qwen3-Omni"))
|
| 307 |
+
return _coerce_audio_result(parsed)
|
| 308 |
+
|
| 309 |
+
|
| 310 |
+
def safe_call(fn, *args, **kwargs) -> Tuple[Optional[Tuple[float, str]], Optional[str]]:
|
| 311 |
+
try:
|
| 312 |
+
return fn(*args, **kwargs), None
|
| 313 |
+
except Exception as exc:
|
| 314 |
+
return None, str(exc)
|
| 315 |
+
|
| 316 |
+
|
| 317 |
+
def summarize_predictions(rows: List[Dict[str, Any]], key: str) -> Dict[str, Any]:
|
| 318 |
+
vals = [r.get(key) for r in rows]
|
| 319 |
+
with_pred = [v for v in vals if v is not None]
|
| 320 |
+
return {
|
| 321 |
+
"count": len(rows),
|
| 322 |
+
"with_prediction": len(with_pred),
|
| 323 |
+
"without_prediction": len(rows) - len(with_pred),
|
| 324 |
+
}
|
| 325 |
+
|
| 326 |
+
|
| 327 |
+
def main() -> None:
|
| 328 |
+
args = parse_args()
|
| 329 |
+
ensure_ffmpeg()
|
| 330 |
+
|
| 331 |
+
gemini_api_key = os.getenv("GEMINI_API_KEY", "").strip()
|
| 332 |
+
if not gemini_api_key:
|
| 333 |
+
raise RuntimeError("GEMINI_API_KEY is required.")
|
| 334 |
+
|
| 335 |
+
source_rows = load_source_rows(args.source_csv, args.sample_size, args.seed)
|
| 336 |
+
delay_index = index_by_filename(args.delay_dir)
|
| 337 |
+
early_index = index_by_filename(args.early_dir)
|
| 338 |
+
|
| 339 |
+
out_root = args.output_dir
|
| 340 |
+
audio_dir = out_root / "audio"
|
| 341 |
+
gemini_dir = out_root / "gemini"
|
| 342 |
+
qwen2_dir = out_root / "qwen2"
|
| 343 |
+
qwen3_dir = out_root / "qwen3"
|
| 344 |
+
for d in [audio_dir, gemini_dir, qwen2_dir, qwen3_dir]:
|
| 345 |
+
d.mkdir(parents=True, exist_ok=True)
|
| 346 |
+
|
| 347 |
+
delay_results_path = out_root / "delay_results.jsonl"
|
| 348 |
+
early_results_path = out_root / "early_results.jsonl"
|
| 349 |
+
missing_path = out_root / "missing_or_errors.json"
|
| 350 |
+
summary_path = out_root / "summary.json"
|
| 351 |
+
sampled_path = out_root / "sampled_10_from_gemini.csv"
|
| 352 |
+
|
| 353 |
+
model_out = {
|
| 354 |
+
"gemini_delay": gemini_dir / "delay_results.jsonl",
|
| 355 |
+
"gemini_early": gemini_dir / "early_results.jsonl",
|
| 356 |
+
"qwen2_delay": qwen2_dir / "delay_results.jsonl",
|
| 357 |
+
"qwen2_early": qwen2_dir / "early_results.jsonl",
|
| 358 |
+
"qwen3_delay": qwen3_dir / "delay_results.jsonl",
|
| 359 |
+
"qwen3_early": qwen3_dir / "early_results.jsonl",
|
| 360 |
+
}
|
| 361 |
+
|
| 362 |
+
if args.overwrite_output:
|
| 363 |
+
for p in [delay_results_path, early_results_path, missing_path, summary_path, sampled_path, *model_out.values()]:
|
| 364 |
+
if p.exists():
|
| 365 |
+
p.unlink()
|
| 366 |
+
|
| 367 |
+
qwen2_model = qwen2_processor = None
|
| 368 |
+
qwen3_model = qwen3_processor = None
|
| 369 |
+
if not args.disable_local_models:
|
| 370 |
+
try:
|
| 371 |
+
qwen2_model, qwen2_processor = load_qwen_model_and_processor(QWEN2_MODEL_ID)
|
| 372 |
+
except Exception:
|
| 373 |
+
qwen2_model = qwen2_processor = None
|
| 374 |
+
try:
|
| 375 |
+
qwen3_model, qwen3_processor = load_qwen_model_and_processor(QWEN3_MODEL_ID)
|
| 376 |
+
except Exception:
|
| 377 |
+
qwen3_model = qwen3_processor = None
|
| 378 |
+
|
| 379 |
+
with sampled_path.open("w", encoding="utf-8", newline="") as f:
|
| 380 |
+
writer = csv.DictWriter(f, fieldnames=["video_id", "t_visual"])
|
| 381 |
+
writer.writeheader()
|
| 382 |
+
for video_id, t_visual in source_rows:
|
| 383 |
+
writer.writerow({"video_id": video_id, "t_visual": round(t_visual, 3)})
|
| 384 |
+
|
| 385 |
+
delay_rows: List[Dict[str, Any]] = []
|
| 386 |
+
early_rows: List[Dict[str, Any]] = []
|
| 387 |
+
missing: List[Dict[str, Any]] = []
|
| 388 |
+
|
| 389 |
+
for video_id, t_visual in tqdm(source_rows, desc="Audio LLM Benchmark", unit="video"):
|
| 390 |
+
for split, shift, idx, combined_list, combined_path in [
|
| 391 |
+
("delay", args.delay_shift, delay_index, delay_rows, delay_results_path),
|
| 392 |
+
("early", args.early_shift, early_index, early_rows, early_results_path),
|
| 393 |
+
]:
|
| 394 |
+
shifted_name = shifted_filename(video_id, split)
|
| 395 |
+
video_path = idx.get(shifted_name)
|
| 396 |
+
if video_path is None:
|
| 397 |
+
missing.append({"split": split, "video_id": video_id, "reason": "file_not_found"})
|
| 398 |
+
continue
|
| 399 |
+
|
| 400 |
+
audio_path = audio_dir / split / f"{Path(video_id).stem}.{args.audio_format}"
|
| 401 |
+
try:
|
| 402 |
+
extract_audio_file(video_path, audio_path, args.audio_sr, args.audio_format)
|
| 403 |
+
except Exception as exc:
|
| 404 |
+
missing.append({"split": split, "video_id": video_id, "reason": f"audio_extract_failed: {exc}"})
|
| 405 |
+
continue
|
| 406 |
+
|
| 407 |
+
gemini_res, gemini_err = safe_call(gemini_audio_grounding, audio_path, gemini_api_key)
|
| 408 |
+
qwen2_res = qwen2_err = None
|
| 409 |
+
qwen3_res = qwen3_err = None
|
| 410 |
+
if qwen2_model is not None and qwen2_processor is not None:
|
| 411 |
+
qwen2_res, qwen2_err = safe_call(qwen2_audio_grounding, audio_path, qwen2_model, qwen2_processor)
|
| 412 |
+
if qwen3_model is not None and qwen3_processor is not None:
|
| 413 |
+
qwen3_res, qwen3_err = safe_call(qwen3_audio_grounding, audio_path, qwen3_model, qwen3_processor)
|
| 414 |
+
|
| 415 |
+
if gemini_err:
|
| 416 |
+
missing.append({"split": split, "video_id": video_id, "reason": f"gemini_error: {gemini_err}"})
|
| 417 |
+
if qwen2_err:
|
| 418 |
+
missing.append({"split": split, "video_id": video_id, "reason": f"qwen2_error: {qwen2_err}"})
|
| 419 |
+
if qwen3_err:
|
| 420 |
+
missing.append({"split": split, "video_id": video_id, "reason": f"qwen3_error: {qwen3_err}"})
|
| 421 |
+
|
| 422 |
+
expected_t_audio = float(t_visual) + float(shift)
|
| 423 |
+
combined_row = {
|
| 424 |
+
"video_path": str(video_path),
|
| 425 |
+
"video_id": video_path.name,
|
| 426 |
+
"source_video_id": video_id,
|
| 427 |
+
"t_visual": float(t_visual),
|
| 428 |
+
"shift_amount": float(shift),
|
| 429 |
+
"expected_t_audio": expected_t_audio,
|
| 430 |
+
"gemini_t": round(gemini_res[0], 3) if gemini_res else None,
|
| 431 |
+
"qwen2_t": round(qwen2_res[0], 3) if qwen2_res else None,
|
| 432 |
+
"qwen3_t": round(qwen3_res[0], 3) if qwen3_res else None,
|
| 433 |
+
"gemini_desc": gemini_res[1] if gemini_res else None,
|
| 434 |
+
"qwen2_desc": qwen2_res[1] if qwen2_res else None,
|
| 435 |
+
"qwen3_desc": qwen3_res[1] if qwen3_res else None,
|
| 436 |
+
"audio_path": str(audio_path),
|
| 437 |
+
}
|
| 438 |
+
append_jsonl(combined_path, combined_row)
|
| 439 |
+
combined_list.append(combined_row)
|
| 440 |
+
|
| 441 |
+
append_jsonl(
|
| 442 |
+
model_out[f"gemini_{split}"],
|
| 443 |
+
{
|
| 444 |
+
"video_id": video_path.name,
|
| 445 |
+
"source_video_id": video_id,
|
| 446 |
+
"t_visual": float(t_visual),
|
| 447 |
+
"shift_amount": float(shift),
|
| 448 |
+
"expected_t_audio": expected_t_audio,
|
| 449 |
+
"detected_t_audio": round(gemini_res[0], 3) if gemini_res else None,
|
| 450 |
+
"sound_description": gemini_res[1] if gemini_res else None,
|
| 451 |
+
"audio_path": str(audio_path),
|
| 452 |
+
},
|
| 453 |
+
)
|
| 454 |
+
append_jsonl(
|
| 455 |
+
model_out[f"qwen2_{split}"],
|
| 456 |
+
{
|
| 457 |
+
"video_id": video_path.name,
|
| 458 |
+
"source_video_id": video_id,
|
| 459 |
+
"t_visual": float(t_visual),
|
| 460 |
+
"shift_amount": float(shift),
|
| 461 |
+
"expected_t_audio": expected_t_audio,
|
| 462 |
+
"detected_t_audio": round(qwen2_res[0], 3) if qwen2_res else None,
|
| 463 |
+
"sound_description": qwen2_res[1] if qwen2_res else None,
|
| 464 |
+
"audio_path": str(audio_path),
|
| 465 |
+
},
|
| 466 |
+
)
|
| 467 |
+
append_jsonl(
|
| 468 |
+
model_out[f"qwen3_{split}"],
|
| 469 |
+
{
|
| 470 |
+
"video_id": video_path.name,
|
| 471 |
+
"source_video_id": video_id,
|
| 472 |
+
"t_visual": float(t_visual),
|
| 473 |
+
"shift_amount": float(shift),
|
| 474 |
+
"expected_t_audio": expected_t_audio,
|
| 475 |
+
"detected_t_audio": round(qwen3_res[0], 3) if qwen3_res else None,
|
| 476 |
+
"sound_description": qwen3_res[1] if qwen3_res else None,
|
| 477 |
+
"audio_path": str(audio_path),
|
| 478 |
+
},
|
| 479 |
+
)
|
| 480 |
+
|
| 481 |
+
with missing_path.open("w", encoding="utf-8") as f:
|
| 482 |
+
json.dump(missing, f, ensure_ascii=False, indent=2)
|
| 483 |
+
|
| 484 |
+
summary = {
|
| 485 |
+
"source_csv": str(args.source_csv),
|
| 486 |
+
"sample_size": len(source_rows),
|
| 487 |
+
"seed": args.seed,
|
| 488 |
+
"delay_shift": args.delay_shift,
|
| 489 |
+
"early_shift": args.early_shift,
|
| 490 |
+
"delay_summary": {
|
| 491 |
+
"count": len(delay_rows),
|
| 492 |
+
"gemini": summarize_predictions(delay_rows, "gemini_t"),
|
| 493 |
+
"qwen2": summarize_predictions(delay_rows, "qwen2_t"),
|
| 494 |
+
"qwen3": summarize_predictions(delay_rows, "qwen3_t"),
|
| 495 |
+
},
|
| 496 |
+
"early_summary": {
|
| 497 |
+
"count": len(early_rows),
|
| 498 |
+
"gemini": summarize_predictions(early_rows, "gemini_t"),
|
| 499 |
+
"qwen2": summarize_predictions(early_rows, "qwen2_t"),
|
| 500 |
+
"qwen3": summarize_predictions(early_rows, "qwen3_t"),
|
| 501 |
+
},
|
| 502 |
+
"missing_or_errors_count": len(missing),
|
| 503 |
+
"outputs": {
|
| 504 |
+
"delay_jsonl": str(delay_results_path),
|
| 505 |
+
"early_jsonl": str(early_results_path),
|
| 506 |
+
"missing_or_errors_json": str(missing_path),
|
| 507 |
+
"sampled_csv": str(sampled_path),
|
| 508 |
+
"audio_dir": str(audio_dir),
|
| 509 |
+
"gemini_dir": str(gemini_dir),
|
| 510 |
+
"qwen2_dir": str(qwen2_dir),
|
| 511 |
+
"qwen3_dir": str(qwen3_dir),
|
| 512 |
+
},
|
| 513 |
+
}
|
| 514 |
+
with summary_path.open("w", encoding="utf-8") as f:
|
| 515 |
+
json.dump(summary, f, ensure_ascii=False, indent=2)
|
| 516 |
+
|
| 517 |
+
print("========== Audio LLM Benchmark Summary ==========")
|
| 518 |
+
print(f"Sampled source videos: {len(source_rows)}")
|
| 519 |
+
print(f"Delay evaluated rows: {len(delay_rows)}")
|
| 520 |
+
print(f"Early evaluated rows: {len(early_rows)}")
|
| 521 |
+
print(f"Missing/errors: {len(missing)}")
|
| 522 |
+
print(f"Summary JSON: {summary_path}")
|
| 523 |
+
print("================================================")
|
| 524 |
+
|
| 525 |
+
|
| 526 |
+
if __name__ == "__main__":
|
| 527 |
+
main()
|
| 528 |
+
#!/usr/bin/env python3
|
| 529 |
+
"""
|
| 530 |
+
Audio-LLM benchmark on shifted delay/early sets (126 videos from 63 source ids).
|
| 531 |
+
|
| 532 |
+
Outputs are aligned with previous stress-test style:
|
| 533 |
+
- delay_results.jsonl
|
| 534 |
+
- early_results.jsonl
|
| 535 |
+
- summary.json
|
| 536 |
+
- missing_or_errors.json
|
| 537 |
+
|
| 538 |
+
And results are organized under four subfolders in output root:
|
| 539 |
+
- audio/ (extracted wav files)
|
| 540 |
+
- gemini/ (model-only jsonl)
|
| 541 |
+
- qwen2/ (model-only jsonl)
|
| 542 |
+
- qwen3/ (model-only jsonl)
|
| 543 |
+
"""
|
| 544 |
+
|
| 545 |
+
from __future__ import annotations
|
| 546 |
+
|
| 547 |
+
import argparse
|
| 548 |
+
import csv
|
| 549 |
+
import json
|
| 550 |
+
import os
|
| 551 |
+
import re
|
| 552 |
+
import subprocess
|
| 553 |
+
import time
|
| 554 |
+
from pathlib import Path
|
| 555 |
+
from statistics import mean
|
| 556 |
+
from typing import Any, Dict, List, Optional, Tuple
|
| 557 |
+
|
| 558 |
+
import librosa
|
| 559 |
+
import numpy as np
|
| 560 |
+
import torch
|
| 561 |
+
from tqdm import tqdm
|
| 562 |
+
from transformers import AutoModelForCausalLM, AutoProcessor
|
| 563 |
+
|
| 564 |
+
try:
|
| 565 |
+
import google.generativeai as genai
|
| 566 |
+
except Exception: # pragma: no cover
|
| 567 |
+
genai = None
|
| 568 |
+
|
| 569 |
+
|
| 570 |
+
UNIVERSAL_PROMPT = (
|
| 571 |
+
"Listen to this audio clip carefully. You are an expert in acoustic forensics. "
|
| 572 |
+
"Locate the EXACT timestamp (in seconds) of the loudest, most energetic PHYSICAL IMPACT sound "
|
| 573 |
+
"(e.g., body hitting ground, object breaking). Actively ignore background music, laughter, and speech. "
|
| 574 |
+
'Output strictly in JSON format: {"audio_timestamp": 2.5, "sound_description": "brief description"}'
|
| 575 |
+
)
|
| 576 |
+
|
| 577 |
+
DEFAULT_SOURCE_CSV = Path(
|
| 578 |
+
"/home/ubuntu/video_source/result/gemini_video_oc_frame/gemini_validated_accept_lt1p0.csv"
|
| 579 |
+
)
|
| 580 |
+
DEFAULT_DELAY_DIR = Path("/home/ubuntu/video_source/shifted/delay")
|
| 581 |
+
DEFAULT_EARLY_DIR = Path("/home/ubuntu/video_source/shifted/early")
|
| 582 |
+
DEFAULT_OUTPUT_DIR = Path("/home/ubuntu/video_source/result/audio_llm_benchmark")
|
| 583 |
+
|
| 584 |
+
DEFAULT_AUDIO_SR = 16000
|
| 585 |
+
DEFAULT_AUDIO_FORMAT = "wav"
|
| 586 |
+
DEFAULT_SEED = 42
|
| 587 |
+
DEFAULT_SAMPLE_SIZE = 0 # 0 means all
|
| 588 |
+
|
| 589 |
+
GEMINI_MODEL = "gemini-3.1-pro-preview"
|
| 590 |
+
QWEN2_MODEL_ID = "Qwen/Qwen2-Audio-7B-Instruct"
|
| 591 |
+
QWEN3_MODEL_ID = "Qwen/Qwen3-Omni-30B-A3B-Instruct"
|
| 592 |
+
|
| 593 |
+
GEMINI_FILE_READY_TIMEOUT_SECONDS = 120
|
| 594 |
+
GEMINI_FILE_POLL_SECONDS = 2
|
| 595 |
+
|
| 596 |
+
|
| 597 |
+
def parse_args() -> argparse.Namespace:
|
| 598 |
+
p = argparse.ArgumentParser(description="Benchmark Gemini/Qwen2/Qwen3 on shifted audio grounding.")
|
| 599 |
+
p.add_argument("--source-csv", type=Path, default=DEFAULT_SOURCE_CSV)
|
| 600 |
+
p.add_argument("--delay-dir", type=Path, default=DEFAULT_DELAY_DIR)
|
| 601 |
+
p.add_argument("--early-dir", type=Path, default=DEFAULT_EARLY_DIR)
|
| 602 |
+
p.add_argument("--output-dir", type=Path, default=DEFAULT_OUTPUT_DIR)
|
| 603 |
+
p.add_argument("--audio-sr", type=int, default=DEFAULT_AUDIO_SR)
|
| 604 |
+
p.add_argument("--audio-format", choices=["wav", "mp3"], default=DEFAULT_AUDIO_FORMAT)
|
| 605 |
+
p.add_argument("--sample-size", type=int, default=DEFAULT_SAMPLE_SIZE, help="0 means all rows in source-csv")
|
| 606 |
+
p.add_argument("--seed", type=int, default=DEFAULT_SEED)
|
| 607 |
+
p.add_argument("--delay-shift", type=float, default=2.0)
|
| 608 |
+
p.add_argument("--early-shift", type=float, default=-2.0)
|
| 609 |
+
p.add_argument("--disable-local-models", action="store_true")
|
| 610 |
+
p.add_argument("--overwrite-output", action="store_true")
|
| 611 |
+
return p.parse_args()
|
| 612 |
+
|
| 613 |
+
|
| 614 |
+
def ensure_ffmpeg() -> None:
|
| 615 |
+
subprocess.run(
|
| 616 |
+
["ffmpeg", "-version"],
|
| 617 |
+
check=True,
|
| 618 |
+
stdout=subprocess.PIPE,
|
| 619 |
+
stderr=subprocess.PIPE,
|
| 620 |
+
text=True,
|
| 621 |
+
)
|
| 622 |
+
|
| 623 |
+
|
| 624 |
+
def _extract_json_object(text: str) -> Dict[str, Any]:
|
| 625 |
+
text = text.strip()
|
| 626 |
+
fence_match = re.search(r"```(?:json)?\s*(\{.*?\})\s*```", text, flags=re.DOTALL)
|
| 627 |
+
if fence_match:
|
| 628 |
+
return json.loads(fence_match.group(1))
|
| 629 |
+
try:
|
| 630 |
+
return json.loads(text)
|
| 631 |
+
except json.JSONDecodeError:
|
| 632 |
+
pass
|
| 633 |
+
start = text.find("{")
|
| 634 |
+
end = text.rfind("}")
|
| 635 |
+
if start != -1 and end != -1 and end > start:
|
| 636 |
+
return json.loads(text[start : end + 1])
|
| 637 |
+
raise ValueError("No valid JSON object found in model output.")
|
| 638 |
+
|
| 639 |
+
|
| 640 |
+
def _coerce_audio_result(data: Dict[str, Any]) -> Tuple[float, str]:
|
| 641 |
+
if "audio_timestamp" not in data or "sound_description" not in data:
|
| 642 |
+
raise ValueError(f"Missing keys in JSON output: {data}")
|
| 643 |
+
ts = float(data["audio_timestamp"])
|
| 644 |
+
if not np.isfinite(ts):
|
| 645 |
+
raise ValueError(f"audio_timestamp is not finite: {ts}")
|
| 646 |
+
desc = str(data["sound_description"]).strip()
|
| 647 |
+
if not desc:
|
| 648 |
+
raise ValueError("sound_description is empty.")
|
| 649 |
+
return ts, desc
|
| 650 |
+
|
| 651 |
+
|
| 652 |
+
def append_jsonl(path: Path, payload: Dict[str, Any]) -> None:
|
| 653 |
+
path.parent.mkdir(parents=True, exist_ok=True)
|
| 654 |
+
with path.open("a", encoding="utf-8") as f:
|
| 655 |
+
f.write(json.dumps(payload, ensure_ascii=False) + "\n")
|
| 656 |
+
|
| 657 |
+
|
| 658 |
+
def load_source_rows(path: Path, sample_size: int, seed: int) -> List[Tuple[str, float]]:
|
| 659 |
+
if not path.exists():
|
| 660 |
+
raise FileNotFoundError(f"source CSV not found: {path}")
|
| 661 |
+
|
| 662 |
+
rows: List[Tuple[str, float]] = []
|
| 663 |
+
with path.open("r", encoding="utf-8", newline="") as f:
|
| 664 |
+
reader = csv.DictReader(f)
|
| 665 |
+
fields = reader.fieldnames or []
|
| 666 |
+
if "video_id" not in fields or "t_gemini" not in fields:
|
| 667 |
+
raise ValueError("source CSV must include columns: video_id, t_gemini")
|
| 668 |
+
for r in reader:
|
| 669 |
+
video_id = str(r.get("video_id", "")).strip()
|
| 670 |
+
if not video_id:
|
| 671 |
+
continue
|
| 672 |
+
try:
|
| 673 |
+
t_visual = float(r.get("t_gemini", ""))
|
| 674 |
+
except (TypeError, ValueError):
|
| 675 |
+
continue
|
| 676 |
+
rows.append((video_id, t_visual))
|
| 677 |
+
|
| 678 |
+
if not rows:
|
| 679 |
+
raise ValueError("No valid rows parsed from source CSV.")
|
| 680 |
+
|
| 681 |
+
if sample_size > 0 and len(rows) > sample_size:
|
| 682 |
+
rng = np.random.default_rng(seed)
|
| 683 |
+
idx = rng.choice(len(rows), size=sample_size, replace=False)
|
| 684 |
+
rows = [rows[int(i)] for i in idx]
|
| 685 |
+
rows.sort(key=lambda x: x[0])
|
| 686 |
+
return rows
|
| 687 |
+
|
| 688 |
+
|
| 689 |
+
def index_by_filename(root: Path) -> Dict[str, Path]:
|
| 690 |
+
if not root.exists():
|
| 691 |
+
raise FileNotFoundError(f"directory not found: {root}")
|
| 692 |
+
out: Dict[str, Path] = {}
|
| 693 |
+
for p in root.rglob("*.mp4"):
|
| 694 |
+
out.setdefault(p.name, p)
|
| 695 |
+
return out
|
| 696 |
+
|
| 697 |
+
|
| 698 |
+
def shifted_filename(video_id: str, split: str) -> str:
|
| 699 |
+
p = Path(video_id)
|
| 700 |
+
suffix = p.suffix if p.suffix else ".mp4"
|
| 701 |
+
stem = p.stem if p.suffix else p.name
|
| 702 |
+
if split == "delay":
|
| 703 |
+
return f"{stem}_delay2s{suffix}"
|
| 704 |
+
if split == "early":
|
| 705 |
+
return f"{stem}_early2s{suffix}"
|
| 706 |
+
raise ValueError(f"Unknown split: {split}")
|
| 707 |
+
|
| 708 |
+
|
| 709 |
+
def extract_audio_file(video_path: Path, dst_path: Path, sr: int, fmt: str) -> None:
|
| 710 |
+
dst_path.parent.mkdir(parents=True, exist_ok=True)
|
| 711 |
+
cmd = [
|
| 712 |
+
"ffmpeg",
|
| 713 |
+
"-y",
|
| 714 |
+
"-i",
|
| 715 |
+
str(video_path),
|
| 716 |
+
"-vn",
|
| 717 |
+
"-ac",
|
| 718 |
+
"1",
|
| 719 |
+
"-ar",
|
| 720 |
+
str(sr),
|
| 721 |
+
]
|
| 722 |
+
if fmt == "wav":
|
| 723 |
+
cmd += ["-c:a", "pcm_s16le"]
|
| 724 |
+
else:
|
| 725 |
+
cmd += ["-c:a", "libmp3lame", "-q:a", "2"]
|
| 726 |
+
cmd.append(str(dst_path))
|
| 727 |
+
subprocess.run(cmd, check=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True)
|
| 728 |
+
|
| 729 |
+
|
| 730 |
+
def wait_for_gemini_file_active(file_name: str) -> None:
|
| 731 |
+
deadline = time.time() + GEMINI_FILE_READY_TIMEOUT_SECONDS
|
| 732 |
+
while time.time() < deadline:
|
| 733 |
+
file_obj = genai.get_file(file_name)
|
| 734 |
+
state_obj = getattr(file_obj, "state", None)
|
| 735 |
+
state_name = str(getattr(state_obj, "name", state_obj))
|
| 736 |
+
if state_name == "ACTIVE":
|
| 737 |
+
return
|
| 738 |
+
if state_name in {"FAILED", "CANCELLED"}:
|
| 739 |
+
raise RuntimeError(f"Gemini file processing failed: state={state_name}")
|
| 740 |
+
time.sleep(GEMINI_FILE_POLL_SECONDS)
|
| 741 |
+
raise TimeoutError("Gemini file did not become ACTIVE in time.")
|
| 742 |
+
|
| 743 |
+
|
| 744 |
+
def gemini_audio_grounding(audio_path: Path, gemini_api_key: str) -> Tuple[float, str]:
|
| 745 |
+
if genai is None:
|
| 746 |
+
raise RuntimeError("google-generativeai is not installed.")
|
| 747 |
+
if not gemini_api_key:
|
| 748 |
+
raise RuntimeError("GEMINI_API_KEY is missing.")
|
| 749 |
+
|
| 750 |
+
genai.configure(api_key=gemini_api_key)
|
| 751 |
+
model = genai.GenerativeModel(GEMINI_MODEL)
|
| 752 |
+
uploaded = genai.upload_file(path=str(audio_path))
|
| 753 |
+
try:
|
| 754 |
+
wait_for_gemini_file_active(uploaded.name)
|
| 755 |
+
resp = model.generate_content([UNIVERSAL_PROMPT, uploaded])
|
| 756 |
+
raw_text = getattr(resp, "text", "") or ""
|
| 757 |
+
if not raw_text:
|
| 758 |
+
raise ValueError("Gemini response text is empty.")
|
| 759 |
+
parsed = _extract_json_object(raw_text)
|
| 760 |
+
return _coerce_audio_result(parsed)
|
| 761 |
+
finally:
|
| 762 |
+
try:
|
| 763 |
+
genai.delete_file(uploaded.name)
|
| 764 |
+
except Exception:
|
| 765 |
+
pass
|
| 766 |
+
|
| 767 |
+
|
| 768 |
+
def load_qwen_model_and_processor(model_id: str) -> Tuple[Any, Any]:
|
| 769 |
+
processor = AutoProcessor.from_pretrained(model_id, trust_remote_code=True)
|
| 770 |
+
model = AutoModelForCausalLM.from_pretrained(
|
| 771 |
+
model_id,
|
| 772 |
+
torch_dtype=torch.bfloat16 if torch.cuda.is_available() else torch.float32,
|
| 773 |
+
device_map="auto" if torch.cuda.is_available() else None,
|
| 774 |
+
trust_remote_code=True,
|
| 775 |
+
)
|
| 776 |
+
model.eval()
|
| 777 |
+
return model, processor
|
| 778 |
+
|
| 779 |
+
|
| 780 |
+
def _run_qwen_audio_chat(model: Any, processor: Any, audio_path: Path, prompt: str, label: str) -> str:
|
| 781 |
+
messages = [
|
| 782 |
+
{
|
| 783 |
+
"role": "user",
|
| 784 |
+
"content": [
|
| 785 |
+
{"type": "audio", "audio": str(audio_path)},
|
| 786 |
+
{"type": "text", "text": prompt},
|
| 787 |
+
],
|
| 788 |
+
}
|
| 789 |
+
]
|
| 790 |
+
try:
|
| 791 |
+
text_prompt = processor.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
|
| 792 |
+
except Exception:
|
| 793 |
+
text_prompt = prompt
|
| 794 |
+
|
| 795 |
+
audio_arr, sr = librosa.load(str(audio_path), sr=None, mono=True)
|
| 796 |
+
if audio_arr.size == 0:
|
| 797 |
+
raise RuntimeError(f"{label}: extracted audio is empty.")
|
| 798 |
+
|
| 799 |
+
last_exc: Optional[Exception] = None
|
| 800 |
+
candidate_kwargs = [
|
| 801 |
+
{"text": [text_prompt], "audio": [audio_arr], "sampling_rate": sr},
|
| 802 |
+
{"text": [text_prompt], "audios": [audio_arr], "sampling_rate": sr},
|
| 803 |
+
]
|
| 804 |
+
inputs = None
|
| 805 |
+
for kw in candidate_kwargs:
|
| 806 |
+
try:
|
| 807 |
+
inputs = processor(return_tensors="pt", padding=True, **kw)
|
| 808 |
+
break
|
| 809 |
+
except Exception as exc:
|
| 810 |
+
last_exc = exc
|
| 811 |
+
if inputs is None:
|
| 812 |
+
raise RuntimeError(f"{label}: failed to build processor inputs: {last_exc}")
|
| 813 |
+
|
| 814 |
+
dev = model.device if hasattr(model, "device") else torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
| 815 |
+
for k, v in list(inputs.items()):
|
| 816 |
+
if torch.is_tensor(v):
|
| 817 |
+
inputs[k] = v.to(dev)
|
| 818 |
+
|
| 819 |
+
with torch.no_grad():
|
| 820 |
+
output_ids = model.generate(**inputs, max_new_tokens=256, do_sample=False)
|
| 821 |
+
input_len = inputs["input_ids"].shape[1] if "input_ids" in inputs else 0
|
| 822 |
+
gen_ids = output_ids[:, input_len:] if input_len > 0 else output_ids
|
| 823 |
+
text = processor.batch_decode(gen_ids, skip_special_tokens=True)[0].strip()
|
| 824 |
+
return text
|
| 825 |
+
|
| 826 |
+
|
| 827 |
+
def qwen2_audio_grounding(audio_path: Path, model: Any, processor: Any) -> Tuple[float, str]:
|
| 828 |
+
raw_text = _run_qwen_audio_chat(model, processor, audio_path, UNIVERSAL_PROMPT, "Qwen2-Audio")
|
| 829 |
+
parsed = _extract_json_object(raw_text)
|
| 830 |
+
return _coerce_audio_result(parsed)
|
| 831 |
+
|
| 832 |
+
|
| 833 |
+
def qwen3_audio_grounding(audio_path: Path, model: Any, processor: Any) -> Tuple[float, str]:
|
| 834 |
+
raw_text = _run_qwen_audio_chat(model, processor, audio_path, UNIVERSAL_PROMPT, "Qwen3-Omni")
|
| 835 |
+
parsed = _extract_json_object(raw_text)
|
| 836 |
+
return _coerce_audio_result(parsed)
|
| 837 |
+
|
| 838 |
+
|
| 839 |
+
def safe_call(fn, *args, **kwargs) -> Tuple[Optional[Tuple[float, str]], Optional[str]]:
|
| 840 |
+
try:
|
| 841 |
+
return fn(*args, **kwargs), None
|
| 842 |
+
except Exception as exc:
|
| 843 |
+
return None, str(exc)
|
| 844 |
+
|
| 845 |
+
|
| 846 |
+
def summarize_errors(rows: List[Dict[str, Any]], model_key: str) -> Dict[str, Any]:
|
| 847 |
+
vals = [r.get(model_key) for r in rows]
|
| 848 |
+
with_pred = [float(v) for v in vals if v is not None]
|
| 849 |
+
return {
|
| 850 |
+
"count": len(rows),
|
| 851 |
+
"with_prediction": len(with_pred),
|
| 852 |
+
"without_prediction": len(rows) - len(with_pred),
|
| 853 |
+
}
|
| 854 |
+
|
| 855 |
+
|
| 856 |
+
def main() -> None:
|
| 857 |
+
args = parse_args()
|
| 858 |
+
ensure_ffmpeg()
|
| 859 |
+
|
| 860 |
+
gemini_api_key = os.getenv("GEMINI_API_KEY", "").strip()
|
| 861 |
+
if not gemini_api_key:
|
| 862 |
+
raise RuntimeError("GEMINI_API_KEY is required.")
|
| 863 |
+
|
| 864 |
+
rows = load_source_rows(args.source_csv, args.sample_size, args.seed)
|
| 865 |
+
delay_index = index_by_filename(args.delay_dir)
|
| 866 |
+
early_index = index_by_filename(args.early_dir)
|
| 867 |
+
|
| 868 |
+
out_root = args.output_dir
|
| 869 |
+
audio_dir = out_root / "audio"
|
| 870 |
+
gemini_dir = out_root / "gemini"
|
| 871 |
+
qwen2_dir = out_root / "qwen2"
|
| 872 |
+
qwen3_dir = out_root / "qwen3"
|
| 873 |
+
for d in [audio_dir, gemini_dir, qwen2_dir, qwen3_dir]:
|
| 874 |
+
d.mkdir(parents=True, exist_ok=True)
|
| 875 |
+
|
| 876 |
+
delay_results_path = out_root / "delay_results.jsonl"
|
| 877 |
+
early_results_path = out_root / "early_results.jsonl"
|
| 878 |
+
missing_path = out_root / "missing_or_errors.json"
|
| 879 |
+
summary_path = out_root / "summary.json"
|
| 880 |
+
sampled_path = out_root / "sampled_10_from_gemini.csv"
|
| 881 |
+
|
| 882 |
+
model_out = {
|
| 883 |
+
"gemini_delay": gemini_dir / "delay_results.jsonl",
|
| 884 |
+
"gemini_early": gemini_dir / "early_results.jsonl",
|
| 885 |
+
"qwen2_delay": qwen2_dir / "delay_results.jsonl",
|
| 886 |
+
"qwen2_early": qwen2_dir / "early_results.jsonl",
|
| 887 |
+
"qwen3_delay": qwen3_dir / "delay_results.jsonl",
|
| 888 |
+
"qwen3_early": qwen3_dir / "early_results.jsonl",
|
| 889 |
+
}
|
| 890 |
+
|
| 891 |
+
if args.overwrite_output:
|
| 892 |
+
for p in [delay_results_path, early_results_path, missing_path, summary_path, sampled_path, *model_out.values()]:
|
| 893 |
+
if p.exists():
|
| 894 |
+
p.unlink()
|
| 895 |
+
|
| 896 |
+
qwen2_model = qwen2_processor = None
|
| 897 |
+
qwen3_model = qwen3_processor = None
|
| 898 |
+
if not args.disable_local_models:
|
| 899 |
+
try:
|
| 900 |
+
qwen2_model, qwen2_processor = load_qwen_model_and_processor(QWEN2_MODEL_ID)
|
| 901 |
+
except Exception:
|
| 902 |
+
qwen2_model = qwen2_processor = None
|
| 903 |
+
try:
|
| 904 |
+
qwen3_model, qwen3_processor = load_qwen_model_and_processor(QWEN3_MODEL_ID)
|
| 905 |
+
except Exception:
|
| 906 |
+
qwen3_model = qwen3_processor = None
|
| 907 |
+
|
| 908 |
+
delay_rows: List[Dict[str, Any]] = []
|
| 909 |
+
early_rows: List[Dict[str, Any]] = []
|
| 910 |
+
missing: List[Dict[str, Any]] = []
|
| 911 |
+
|
| 912 |
+
with sampled_path.open("w", encoding="utf-8", newline="") as f:
|
| 913 |
+
writer = csv.DictWriter(f, fieldnames=["video_id", "t_visual"])
|
| 914 |
+
writer.writeheader()
|
| 915 |
+
for video_id, t_visual in rows:
|
| 916 |
+
writer.writerow({"video_id": video_id, "t_visual": round(t_visual, 3)})
|
| 917 |
+
|
| 918 |
+
for video_id, t_visual in tqdm(rows, desc="Audio LLM Benchmark", unit="video"):
|
| 919 |
+
for split, shift, idx, out_path, combined_rows, model_suffix in [
|
| 920 |
+
("delay", args.delay_shift, delay_index, delay_results_path, delay_rows, "delay"),
|
| 921 |
+
("early", args.early_shift, early_index, early_results_path, early_rows, "early"),
|
| 922 |
+
]:
|
| 923 |
+
shifted_name = shifted_filename(video_id, split)
|
| 924 |
+
video_path = idx.get(shifted_name)
|
| 925 |
+
if video_path is None:
|
| 926 |
+
missing.append({"split": split, "video_id": video_id, "reason": "file_not_found"})
|
| 927 |
+
continue
|
| 928 |
+
|
| 929 |
+
audio_path = audio_dir / split / f"{Path(video_id).stem}.{args.audio_format}"
|
| 930 |
+
try:
|
| 931 |
+
extract_audio_file(video_path, audio_path, args.audio_sr, args.audio_format)
|
| 932 |
+
except Exception as exc:
|
| 933 |
+
missing.append({"split": split, "video_id": video_id, "reason": f"audio_extract_failed: {exc}"})
|
| 934 |
+
continue
|
| 935 |
+
|
| 936 |
+
gemini_res, gemini_err = safe_call(gemini_audio_grounding, audio_path, gemini_api_key)
|
| 937 |
+
qwen2_res = qwen2_err = None
|
| 938 |
+
qwen3_res = qwen3_err = None
|
| 939 |
+
|
| 940 |
+
if qwen2_model is not None and qwen2_processor is not None:
|
| 941 |
+
qwen2_res, qwen2_err = safe_call(qwen2_audio_grounding, audio_path, qwen2_model, qwen2_processor)
|
| 942 |
+
if qwen3_model is not None and qwen3_processor is not None:
|
| 943 |
+
qwen3_res, qwen3_err = safe_call(qwen3_audio_grounding, audio_path, qwen3_model, qwen3_processor)
|
| 944 |
+
|
| 945 |
+
if gemini_err:
|
| 946 |
+
missing.append({"split": split, "video_id": video_id, "reason": f"gemini_error: {gemini_err}"})
|
| 947 |
+
if qwen2_err:
|
| 948 |
+
missing.append({"split": split, "video_id": video_id, "reason": f"qwen2_error: {qwen2_err}"})
|
| 949 |
+
if qwen3_err:
|
| 950 |
+
missing.append({"split": split, "video_id": video_id, "reason": f"qwen3_error: {qwen3_err}"})
|
| 951 |
+
|
| 952 |
+
expected_t_audio = float(t_visual) + float(shift)
|
| 953 |
+
row = {
|
| 954 |
+
"video_path": str(video_path),
|
| 955 |
+
"video_id": video_path.name,
|
| 956 |
+
"source_video_id": video_id,
|
| 957 |
+
"t_visual": float(t_visual),
|
| 958 |
+
"shift_amount": float(shift),
|
| 959 |
+
"expected_t_audio": expected_t_audio,
|
| 960 |
+
"gemini_t": round(gemini_res[0], 3) if gemini_res else None,
|
| 961 |
+
"qwen2_t": round(qwen2_res[0], 3) if qwen2_res else None,
|
| 962 |
+
"qwen3_t": round(qwen3_res[0], 3) if qwen3_res else None,
|
| 963 |
+
"gemini_desc": gemini_res[1] if gemini_res else None,
|
| 964 |
+
"qwen2_desc": qwen2_res[1] if qwen2_res else None,
|
| 965 |
+
"qwen3_desc": qwen3_res[1] if qwen3_res else None,
|
| 966 |
+
"audio_path": str(audio_path),
|
| 967 |
+
}
|
| 968 |
+
append_jsonl(out_path, row)
|
| 969 |
+
combined_rows.append(row)
|
| 970 |
+
|
| 971 |
+
# Per-model jsonl outputs under dedicated subfolders.
|
| 972 |
+
append_jsonl(
|
| 973 |
+
model_out[f"gemini_{model_suffix}"],
|
| 974 |
+
{
|
| 975 |
+
"video_id": video_path.name,
|
| 976 |
+
"source_video_id": video_id,
|
| 977 |
+
"t_visual": float(t_visual),
|
| 978 |
+
"shift_amount": float(shift),
|
| 979 |
+
"expected_t_audio": expected_t_audio,
|
| 980 |
+
"detected_t_audio": round(gemini_res[0], 3) if gemini_res else None,
|
| 981 |
+
"sound_description": gemini_res[1] if gemini_res else None,
|
| 982 |
+
"audio_path": str(audio_path),
|
| 983 |
+
},
|
| 984 |
+
)
|
| 985 |
+
append_jsonl(
|
| 986 |
+
model_out[f"qwen2_{model_suffix}"],
|
| 987 |
+
{
|
| 988 |
+
"video_id": video_path.name,
|
| 989 |
+
"source_video_id": video_id,
|
| 990 |
+
"t_visual": float(t_visual),
|
| 991 |
+
"shift_amount": float(shift),
|
| 992 |
+
"expected_t_audio": expected_t_audio,
|
| 993 |
+
"detected_t_audio": round(qwen2_res[0], 3) if qwen2_res else None,
|
| 994 |
+
"sound_description": qwen2_res[1] if qwen2_res else None,
|
| 995 |
+
"audio_path": str(audio_path),
|
| 996 |
+
},
|
| 997 |
+
)
|
| 998 |
+
append_jsonl(
|
| 999 |
+
model_out[f"qwen3_{model_suffix}"],
|
| 1000 |
+
{
|
| 1001 |
+
"video_id": video_path.name,
|
| 1002 |
+
"source_video_id": video_id,
|
| 1003 |
+
"t_visual": float(t_visual),
|
| 1004 |
+
"shift_amount": float(shift),
|
| 1005 |
+
"expected_t_audio": expected_t_audio,
|
| 1006 |
+
"detected_t_audio": round(qwen3_res[0], 3) if qwen3_res else None,
|
| 1007 |
+
"sound_description": qwen3_res[1] if qwen3_res else None,
|
| 1008 |
+
"audio_path": str(audio_path),
|
| 1009 |
+
},
|
| 1010 |
+
)
|
| 1011 |
+
|
| 1012 |
+
with missing_path.open("w", encoding="utf-8") as f:
|
| 1013 |
+
json.dump(missing, f, ensure_ascii=False, indent=2)
|
| 1014 |
+
|
| 1015 |
+
summary = {
|
| 1016 |
+
"source_csv": str(args.source_csv),
|
| 1017 |
+
"sample_size": len(rows),
|
| 1018 |
+
"seed": args.seed,
|
| 1019 |
+
"delay_shift": args.delay_shift,
|
| 1020 |
+
"early_shift": args.early_shift,
|
| 1021 |
+
"delay_summary": {
|
| 1022 |
+
"gemini": summarize_errors(delay_rows, "gemini_t"),
|
| 1023 |
+
"qwen2": summarize_errors(delay_rows, "qwen2_t"),
|
| 1024 |
+
"qwen3": summarize_errors(delay_rows, "qwen3_t"),
|
| 1025 |
+
"count": len(delay_rows),
|
| 1026 |
+
},
|
| 1027 |
+
"early_summary": {
|
| 1028 |
+
"gemini": summarize_errors(early_rows, "gemini_t"),
|
| 1029 |
+
"qwen2": summarize_errors(early_rows, "qwen2_t"),
|
| 1030 |
+
"qwen3": summarize_errors(early_rows, "qwen3_t"),
|
| 1031 |
+
"count": len(early_rows),
|
| 1032 |
+
},
|
| 1033 |
+
"missing_or_errors_count": len(missing),
|
| 1034 |
+
"outputs": {
|
| 1035 |
+
"delay_jsonl": str(delay_results_path),
|
| 1036 |
+
"early_jsonl": str(early_results_path),
|
| 1037 |
+
"missing_or_errors_json": str(missing_path),
|
| 1038 |
+
"sampled_csv": str(sampled_path),
|
| 1039 |
+
"audio_dir": str(audio_dir),
|
| 1040 |
+
"gemini_dir": str(gemini_dir),
|
| 1041 |
+
"qwen2_dir": str(qwen2_dir),
|
| 1042 |
+
"qwen3_dir": str(qwen3_dir),
|
| 1043 |
+
},
|
| 1044 |
+
}
|
| 1045 |
+
with summary_path.open("w", encoding="utf-8") as f:
|
| 1046 |
+
json.dump(summary, f, ensure_ascii=False, indent=2)
|
| 1047 |
+
|
| 1048 |
+
print("========== Audio LLM Benchmark Summary ==========")
|
| 1049 |
+
print(f"Sampled source videos: {len(rows)}")
|
| 1050 |
+
print(f"Delay evaluated rows: {len(delay_rows)}")
|
| 1051 |
+
print(f"Early evaluated rows: {len(early_rows)}")
|
| 1052 |
+
print(f"Missing/errors: {len(missing)}")
|
| 1053 |
+
print(f"Summary: {summary_path}")
|
| 1054 |
+
print("================================================")
|
| 1055 |
+
|
| 1056 |
+
|
| 1057 |
+
if __name__ == "__main__":
|
| 1058 |
+
main()
|
| 1059 |
+
#!/usr/bin/env python3
|
| 1060 |
+
"""
|
| 1061 |
+
Audio-LLM benchmark for physical-impact timestamp grounding.
|
| 1062 |
+
|
| 1063 |
+
Models:
|
| 1064 |
+
- Gemini 3.1 Pro (google-generativeai, API)
|
| 1065 |
+
- Qwen2-Audio-7B-Instruct (local transformers)
|
| 1066 |
+
- Qwen3-Omni-30B-A3B-Instruct (local transformers)
|
| 1067 |
+
"""
|
| 1068 |
+
|
| 1069 |
+
from __future__ import annotations
|
| 1070 |
+
|
| 1071 |
+
import argparse
|
| 1072 |
+
import json
|
| 1073 |
+
import os
|
| 1074 |
+
import re
|
| 1075 |
+
import subprocess
|
| 1076 |
+
import tempfile
|
| 1077 |
+
import time
|
| 1078 |
+
from dataclasses import dataclass
|
| 1079 |
+
from pathlib import Path
|
| 1080 |
+
from typing import Any, Dict, List, Optional, Tuple
|
| 1081 |
+
|
| 1082 |
+
import librosa
|
| 1083 |
+
import numpy as np
|
| 1084 |
+
import torch
|
| 1085 |
+
from tqdm import tqdm
|
| 1086 |
+
from transformers import AutoModelForCausalLM, AutoProcessor
|
| 1087 |
+
|
| 1088 |
+
try:
|
| 1089 |
+
import google.generativeai as genai
|
| 1090 |
+
except Exception: # pragma: no cover
|
| 1091 |
+
genai = None
|
| 1092 |
+
|
| 1093 |
+
|
| 1094 |
+
UNIVERSAL_PROMPT = (
|
| 1095 |
+
"Listen to this audio clip carefully. You are an expert in acoustic forensics. "
|
| 1096 |
+
"Locate the EXACT timestamp (in seconds) of the loudest, most energetic PHYSICAL IMPACT sound "
|
| 1097 |
+
"(e.g., body hitting ground, object breaking). Actively ignore background music, laughter, and speech. "
|
| 1098 |
+
'Output strictly in JSON format: {"audio_timestamp": 2.5, "sound_description": "brief description"}'
|
| 1099 |
+
)
|
| 1100 |
+
|
| 1101 |
+
DEFAULT_INPUT_DIR = Path("/home/ubuntu/video_source/shifted/delay")
|
| 1102 |
+
DEFAULT_OUTPUT_JSONL = Path("/home/ubuntu/video_source/result/audio_llm_benchmark.jsonl")
|
| 1103 |
+
DEFAULT_MAX_VIDEOS = 0
|
| 1104 |
+
DEFAULT_AUDIO_SR = 16000
|
| 1105 |
+
DEFAULT_AUDIO_FORMAT = "wav"
|
| 1106 |
+
DEFAULT_SEED = 42
|
| 1107 |
+
|
| 1108 |
+
GEMINI_MODEL = "gemini-3.1-pro-preview"
|
| 1109 |
+
QWEN2_MODEL_ID = "Qwen/Qwen2-Audio-7B-Instruct"
|
| 1110 |
+
QWEN3_MODEL_ID = "Qwen/Qwen3-Omni-30B-A3B-Instruct"
|
| 1111 |
+
|
| 1112 |
+
GEMINI_FILE_READY_TIMEOUT_SECONDS = 120
|
| 1113 |
+
GEMINI_FILE_POLL_SECONDS = 2
|
| 1114 |
+
|
| 1115 |
+
|
| 1116 |
+
@dataclass
|
| 1117 |
+
class AudioResult:
|
| 1118 |
+
audio_timestamp: float
|
| 1119 |
+
sound_description: str
|
| 1120 |
+
|
| 1121 |
+
|
| 1122 |
+
def parse_args() -> argparse.Namespace:
|
| 1123 |
+
p = argparse.ArgumentParser(description="Benchmark Gemini/Qwen2/Qwen3 on audio grounding.")
|
| 1124 |
+
p.add_argument("--input-dir", type=Path, default=DEFAULT_INPUT_DIR, help="Folder of videos (.mp4)")
|
| 1125 |
+
p.add_argument("--output-jsonl", type=Path, default=DEFAULT_OUTPUT_JSONL, help="Benchmark output JSONL")
|
| 1126 |
+
p.add_argument("--audio-sr", type=int, default=DEFAULT_AUDIO_SR, help="Extracted audio sample rate")
|
| 1127 |
+
p.add_argument(
|
| 1128 |
+
"--audio-format",
|
| 1129 |
+
choices=["wav", "mp3"],
|
| 1130 |
+
default=DEFAULT_AUDIO_FORMAT,
|
| 1131 |
+
help="Temporary extracted audio format",
|
| 1132 |
+
)
|
| 1133 |
+
p.add_argument("--max-videos", type=int, default=DEFAULT_MAX_VIDEOS, help="0 means all videos")
|
| 1134 |
+
p.add_argument("--seed", type=int, default=DEFAULT_SEED, help="Random seed for selection")
|
| 1135 |
+
p.add_argument("--overwrite-output", action="store_true", help="Delete existing output JSONL before run")
|
| 1136 |
+
p.add_argument(
|
| 1137 |
+
"--save-extracted-audio-dir",
|
| 1138 |
+
type=Path,
|
| 1139 |
+
default=None,
|
| 1140 |
+
help="If set, keep extracted audio files in this directory for inspection",
|
| 1141 |
+
)
|
| 1142 |
+
p.add_argument(
|
| 1143 |
+
"--disable-local-models",
|
| 1144 |
+
action="store_true",
|
| 1145 |
+
help="Only run Gemini (skip Qwen2/Qwen3 loading)",
|
| 1146 |
+
)
|
| 1147 |
+
return p.parse_args()
|
| 1148 |
+
|
| 1149 |
+
|
| 1150 |
+
def ensure_ffmpeg() -> None:
|
| 1151 |
+
subprocess.run(
|
| 1152 |
+
["ffmpeg", "-version"],
|
| 1153 |
+
check=True,
|
| 1154 |
+
stdout=subprocess.PIPE,
|
| 1155 |
+
stderr=subprocess.PIPE,
|
| 1156 |
+
text=True,
|
| 1157 |
+
)
|
| 1158 |
+
|
| 1159 |
+
|
| 1160 |
+
def _extract_json_object(text: str) -> Dict[str, Any]:
|
| 1161 |
+
text = text.strip()
|
| 1162 |
+
fence_match = re.search(r"```(?:json)?\s*(\{.*?\})\s*```", text, flags=re.DOTALL)
|
| 1163 |
+
if fence_match:
|
| 1164 |
+
return json.loads(fence_match.group(1))
|
| 1165 |
+
try:
|
| 1166 |
+
return json.loads(text)
|
| 1167 |
+
except json.JSONDecodeError:
|
| 1168 |
+
pass
|
| 1169 |
+
start = text.find("{")
|
| 1170 |
+
end = text.rfind("}")
|
| 1171 |
+
if start != -1 and end != -1 and end > start:
|
| 1172 |
+
return json.loads(text[start : end + 1])
|
| 1173 |
+
raise ValueError("No valid JSON object found in model output.")
|
| 1174 |
+
|
| 1175 |
+
|
| 1176 |
+
def _coerce_audio_result(data: Dict[str, Any]) -> AudioResult:
|
| 1177 |
+
if "audio_timestamp" not in data or "sound_description" not in data:
|
| 1178 |
+
raise ValueError(f"Missing keys in JSON output: {data}")
|
| 1179 |
+
ts = float(data["audio_timestamp"])
|
| 1180 |
+
if not np.isfinite(ts):
|
| 1181 |
+
raise ValueError(f"audio_timestamp is not finite: {ts}")
|
| 1182 |
+
desc = str(data["sound_description"]).strip()
|
| 1183 |
+
if not desc:
|
| 1184 |
+
raise ValueError("sound_description is empty.")
|
| 1185 |
+
return AudioResult(audio_timestamp=ts, sound_description=desc)
|
| 1186 |
+
|
| 1187 |
+
|
| 1188 |
+
def append_jsonl(path: Path, payload: Dict[str, Any]) -> None:
|
| 1189 |
+
path.parent.mkdir(parents=True, exist_ok=True)
|
| 1190 |
+
with path.open("a", encoding="utf-8") as f:
|
| 1191 |
+
f.write(json.dumps(payload, ensure_ascii=False) + "\n")
|
| 1192 |
+
|
| 1193 |
+
|
| 1194 |
+
def extract_audio_file(video_path: Path, sr: int, fmt: str, save_dir: Optional[Path]) -> Path:
|
| 1195 |
+
suffix = f".{fmt}"
|
| 1196 |
+
if save_dir is not None:
|
| 1197 |
+
save_dir.mkdir(parents=True, exist_ok=True)
|
| 1198 |
+
safe_name = f"{video_path.stem}.{fmt}"
|
| 1199 |
+
out_path = save_dir / safe_name
|
| 1200 |
+
else:
|
| 1201 |
+
tmp = tempfile.NamedTemporaryFile(prefix="bench_audio_", suffix=suffix, delete=False)
|
| 1202 |
+
out_path = Path(tmp.name)
|
| 1203 |
+
tmp.close()
|
| 1204 |
+
|
| 1205 |
+
cmd = [
|
| 1206 |
+
"ffmpeg",
|
| 1207 |
+
"-y",
|
| 1208 |
+
"-i",
|
| 1209 |
+
str(video_path),
|
| 1210 |
+
"-vn",
|
| 1211 |
+
"-ac",
|
| 1212 |
+
"1",
|
| 1213 |
+
"-ar",
|
| 1214 |
+
str(sr),
|
| 1215 |
+
]
|
| 1216 |
+
if fmt == "wav":
|
| 1217 |
+
cmd += ["-c:a", "pcm_s16le"]
|
| 1218 |
+
else:
|
| 1219 |
+
cmd += ["-c:a", "libmp3lame", "-q:a", "2"]
|
| 1220 |
+
cmd.append(str(out_path))
|
| 1221 |
+
subprocess.run(cmd, check=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True)
|
| 1222 |
+
return out_path
|
| 1223 |
+
|
| 1224 |
+
|
| 1225 |
+
def wait_for_gemini_file_active(file_name: str) -> None:
|
| 1226 |
+
deadline = time.time() + GEMINI_FILE_READY_TIMEOUT_SECONDS
|
| 1227 |
+
while time.time() < deadline:
|
| 1228 |
+
file_obj = genai.get_file(file_name)
|
| 1229 |
+
state_obj = getattr(file_obj, "state", None)
|
| 1230 |
+
state_name = str(getattr(state_obj, "name", state_obj))
|
| 1231 |
+
if state_name == "ACTIVE":
|
| 1232 |
+
return
|
| 1233 |
+
if state_name in {"FAILED", "CANCELLED"}:
|
| 1234 |
+
raise RuntimeError(f"Gemini file processing failed: state={state_name}")
|
| 1235 |
+
time.sleep(GEMINI_FILE_POLL_SECONDS)
|
| 1236 |
+
raise TimeoutError("Gemini file did not become ACTIVE in time.")
|
| 1237 |
+
|
| 1238 |
+
|
| 1239 |
+
def gemini_audio_grounding(audio_path: Path, gemini_api_key: str) -> AudioResult:
|
| 1240 |
+
if genai is None:
|
| 1241 |
+
raise RuntimeError("google-generativeai is not installed.")
|
| 1242 |
+
if not gemini_api_key:
|
| 1243 |
+
raise RuntimeError("GEMINI_API_KEY is missing.")
|
| 1244 |
+
genai.configure(api_key=gemini_api_key)
|
| 1245 |
+
model = genai.GenerativeModel(GEMINI_MODEL)
|
| 1246 |
+
uploaded = genai.upload_file(path=str(audio_path))
|
| 1247 |
+
try:
|
| 1248 |
+
wait_for_gemini_file_active(uploaded.name)
|
| 1249 |
+
resp = model.generate_content([UNIVERSAL_PROMPT, uploaded])
|
| 1250 |
+
raw_text = getattr(resp, "text", "") or ""
|
| 1251 |
+
if not raw_text:
|
| 1252 |
+
raise ValueError("Gemini response text is empty.")
|
| 1253 |
+
parsed = _extract_json_object(raw_text)
|
| 1254 |
+
return _coerce_audio_result(parsed)
|
| 1255 |
+
finally:
|
| 1256 |
+
try:
|
| 1257 |
+
genai.delete_file(uploaded.name)
|
| 1258 |
+
except Exception:
|
| 1259 |
+
pass
|
| 1260 |
+
|
| 1261 |
+
|
| 1262 |
+
def load_qwen_model_and_processor(model_id: str) -> Tuple[Any, Any]:
|
| 1263 |
+
"""
|
| 1264 |
+
Best-effort universal loader for Qwen audio models with remote code.
|
| 1265 |
+
"""
|
| 1266 |
+
processor = AutoProcessor.from_pretrained(model_id, trust_remote_code=True)
|
| 1267 |
+
model = AutoModelForCausalLM.from_pretrained(
|
| 1268 |
+
model_id,
|
| 1269 |
+
torch_dtype=torch.bfloat16 if torch.cuda.is_available() else torch.float32,
|
| 1270 |
+
device_map="auto" if torch.cuda.is_available() else None,
|
| 1271 |
+
trust_remote_code=True,
|
| 1272 |
+
)
|
| 1273 |
+
model.eval()
|
| 1274 |
+
return model, processor
|
| 1275 |
+
|
| 1276 |
+
|
| 1277 |
+
def _run_qwen_audio_chat(
|
| 1278 |
+
model: Any,
|
| 1279 |
+
processor: Any,
|
| 1280 |
+
audio_path: Path,
|
| 1281 |
+
prompt: str,
|
| 1282 |
+
model_label: str,
|
| 1283 |
+
) -> str:
|
| 1284 |
+
"""
|
| 1285 |
+
Generic chat-template path used by both Qwen2-Audio and Qwen3-Omni.
|
| 1286 |
+
|
| 1287 |
+
This intentionally keeps robust fallbacks because Qwen audio model
|
| 1288 |
+
templates differ across transformers/model revisions.
|
| 1289 |
+
"""
|
| 1290 |
+
# 1) Build multimodal chat message.
|
| 1291 |
+
messages = [
|
| 1292 |
+
{
|
| 1293 |
+
"role": "user",
|
| 1294 |
+
"content": [
|
| 1295 |
+
{"type": "audio", "audio": str(audio_path)},
|
| 1296 |
+
{"type": "text", "text": prompt},
|
| 1297 |
+
],
|
| 1298 |
+
}
|
| 1299 |
+
]
|
| 1300 |
+
|
| 1301 |
+
# 2) Render chat text by processor template (if available).
|
| 1302 |
+
try:
|
| 1303 |
+
text_prompt = processor.apply_chat_template(
|
| 1304 |
+
messages,
|
| 1305 |
+
tokenize=False,
|
| 1306 |
+
add_generation_prompt=True,
|
| 1307 |
+
)
|
| 1308 |
+
except Exception:
|
| 1309 |
+
text_prompt = prompt
|
| 1310 |
+
|
| 1311 |
+
# 3) Load audio as waveform for processor input.
|
| 1312 |
+
audio_arr, sr = librosa.load(str(audio_path), sr=None, mono=True)
|
| 1313 |
+
if audio_arr.size == 0:
|
| 1314 |
+
raise RuntimeError(f"{model_label}: extracted audio is empty.")
|
| 1315 |
+
|
| 1316 |
+
# 4) Try common processor signatures across versions.
|
| 1317 |
+
last_exc: Optional[Exception] = None
|
| 1318 |
+
candidate_kwargs = [
|
| 1319 |
+
{"text": [text_prompt], "audio": [audio_arr], "sampling_rate": sr},
|
| 1320 |
+
{"text": [text_prompt], "audios": [audio_arr], "sampling_rate": sr},
|
| 1321 |
+
]
|
| 1322 |
+
|
| 1323 |
+
for kw in candidate_kwargs:
|
| 1324 |
+
try:
|
| 1325 |
+
inputs = processor(return_tensors="pt", padding=True, **kw)
|
| 1326 |
+
break
|
| 1327 |
+
except Exception as exc:
|
| 1328 |
+
last_exc = exc
|
| 1329 |
+
else:
|
| 1330 |
+
raise RuntimeError(f"{model_label}: failed to build processor inputs: {last_exc}")
|
| 1331 |
+
|
| 1332 |
+
# Move tensor inputs to model device when possible.
|
| 1333 |
+
if hasattr(model, "device"):
|
| 1334 |
+
dev = model.device
|
| 1335 |
+
else:
|
| 1336 |
+
dev = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
| 1337 |
+
for k, v in list(inputs.items()):
|
| 1338 |
+
if torch.is_tensor(v):
|
| 1339 |
+
inputs[k] = v.to(dev)
|
| 1340 |
+
|
| 1341 |
+
with torch.no_grad():
|
| 1342 |
+
output_ids = model.generate(**inputs, max_new_tokens=256, do_sample=False)
|
| 1343 |
+
|
| 1344 |
+
# Decode only newly generated tokens when possible.
|
| 1345 |
+
input_len = inputs["input_ids"].shape[1] if "input_ids" in inputs else 0
|
| 1346 |
+
gen_ids = output_ids[:, input_len:] if input_len > 0 else output_ids
|
| 1347 |
+
text = processor.batch_decode(gen_ids, skip_special_tokens=True)[0].strip()
|
| 1348 |
+
return text
|
| 1349 |
+
|
| 1350 |
+
|
| 1351 |
+
def qwen2_audio_grounding(audio_path: Path, model: Any, processor: Any) -> AudioResult:
|
| 1352 |
+
raw_text = _run_qwen_audio_chat(
|
| 1353 |
+
model=model,
|
| 1354 |
+
processor=processor,
|
| 1355 |
+
audio_path=audio_path,
|
| 1356 |
+
prompt=UNIVERSAL_PROMPT,
|
| 1357 |
+
model_label="Qwen2-Audio",
|
| 1358 |
+
)
|
| 1359 |
+
parsed = _extract_json_object(raw_text)
|
| 1360 |
+
return _coerce_audio_result(parsed)
|
| 1361 |
+
|
| 1362 |
+
|
| 1363 |
+
def qwen3_omni_grounding(audio_path: Path, model: Any, processor: Any) -> AudioResult:
|
| 1364 |
+
raw_text = _run_qwen_audio_chat(
|
| 1365 |
+
model=model,
|
| 1366 |
+
processor=processor,
|
| 1367 |
+
audio_path=audio_path,
|
| 1368 |
+
prompt=UNIVERSAL_PROMPT,
|
| 1369 |
+
model_label="Qwen3-Omni",
|
| 1370 |
+
)
|
| 1371 |
+
parsed = _extract_json_object(raw_text)
|
| 1372 |
+
return _coerce_audio_result(parsed)
|
| 1373 |
+
|
| 1374 |
+
|
| 1375 |
+
def safe_call(fn, *args, **kwargs) -> Optional[AudioResult]:
|
| 1376 |
+
try:
|
| 1377 |
+
return fn(*args, **kwargs)
|
| 1378 |
+
except Exception:
|
| 1379 |
+
return None
|
| 1380 |
+
|
| 1381 |
+
|
| 1382 |
+
def select_videos(input_dir: Path, max_videos: int, seed: int) -> List[Path]:
|
| 1383 |
+
if not input_dir.exists():
|
| 1384 |
+
raise FileNotFoundError(f"Input folder not found: {input_dir}")
|
| 1385 |
+
videos = sorted(input_dir.rglob("*.mp4"))
|
| 1386 |
+
if not videos:
|
| 1387 |
+
raise FileNotFoundError(f"No mp4 files found under: {input_dir}")
|
| 1388 |
+
if max_videos and len(videos) > max_videos:
|
| 1389 |
+
rng = np.random.default_rng(seed)
|
| 1390 |
+
idx = rng.choice(len(videos), size=max_videos, replace=False)
|
| 1391 |
+
videos = sorted([videos[int(i)] for i in idx], key=lambda p: p.name)
|
| 1392 |
+
return videos
|
| 1393 |
+
|
| 1394 |
+
|
| 1395 |
+
def main() -> None:
|
| 1396 |
+
args = parse_args()
|
| 1397 |
+
ensure_ffmpeg()
|
| 1398 |
+
|
| 1399 |
+
gemini_api_key = os.getenv("GEMINI_API_KEY", "").strip()
|
| 1400 |
+
if not gemini_api_key:
|
| 1401 |
+
raise RuntimeError("GEMINI_API_KEY is required.")
|
| 1402 |
+
|
| 1403 |
+
if args.overwrite_output and args.output_jsonl.exists():
|
| 1404 |
+
args.output_jsonl.unlink()
|
| 1405 |
+
|
| 1406 |
+
videos = select_videos(args.input_dir, args.max_videos, args.seed)
|
| 1407 |
+
|
| 1408 |
+
qwen2_model = qwen2_processor = None
|
| 1409 |
+
qwen3_model = qwen3_processor = None
|
| 1410 |
+
if not args.disable_local_models:
|
| 1411 |
+
try:
|
| 1412 |
+
qwen2_model, qwen2_processor = load_qwen_model_and_processor(QWEN2_MODEL_ID)
|
| 1413 |
+
except Exception:
|
| 1414 |
+
qwen2_model = qwen2_processor = None
|
| 1415 |
+
try:
|
| 1416 |
+
qwen3_model, qwen3_processor = load_qwen_model_and_processor(QWEN3_MODEL_ID)
|
| 1417 |
+
except Exception:
|
| 1418 |
+
qwen3_model = qwen3_processor = None
|
| 1419 |
+
|
| 1420 |
+
for video_path in tqdm(videos, desc="Audio LLM Benchmark", unit="video"):
|
| 1421 |
+
audio_path = None
|
| 1422 |
+
try:
|
| 1423 |
+
audio_path = extract_audio_file(
|
| 1424 |
+
video_path,
|
| 1425 |
+
sr=args.audio_sr,
|
| 1426 |
+
fmt=args.audio_format,
|
| 1427 |
+
save_dir=args.save_extracted_audio_dir,
|
| 1428 |
+
)
|
| 1429 |
+
|
| 1430 |
+
gemini_res = safe_call(gemini_audio_grounding, audio_path, gemini_api_key)
|
| 1431 |
+
|
| 1432 |
+
qwen2_res = None
|
| 1433 |
+
if qwen2_model is not None and qwen2_processor is not None:
|
| 1434 |
+
qwen2_res = safe_call(qwen2_audio_grounding, audio_path, qwen2_model, qwen2_processor)
|
| 1435 |
+
|
| 1436 |
+
qwen3_res = None
|
| 1437 |
+
if qwen3_model is not None and qwen3_processor is not None:
|
| 1438 |
+
qwen3_res = safe_call(qwen3_omni_grounding, audio_path, qwen3_model, qwen3_processor)
|
| 1439 |
+
|
| 1440 |
+
row = {
|
| 1441 |
+
"video": video_path.name,
|
| 1442 |
+
"gemini_t": round(gemini_res.audio_timestamp, 3) if gemini_res else None,
|
| 1443 |
+
"qwen2_t": round(qwen2_res.audio_timestamp, 3) if qwen2_res else None,
|
| 1444 |
+
"qwen3_t": round(qwen3_res.audio_timestamp, 3) if qwen3_res else None,
|
| 1445 |
+
"gemini_desc": gemini_res.sound_description if gemini_res else None,
|
| 1446 |
+
"qwen2_desc": qwen2_res.sound_description if qwen2_res else None,
|
| 1447 |
+
"qwen3_desc": qwen3_res.sound_description if qwen3_res else None,
|
| 1448 |
+
}
|
| 1449 |
+
append_jsonl(args.output_jsonl, row)
|
| 1450 |
+
finally:
|
| 1451 |
+
if (
|
| 1452 |
+
audio_path is not None
|
| 1453 |
+
and audio_path.exists()
|
| 1454 |
+
and args.save_extracted_audio_dir is None
|
| 1455 |
+
):
|
| 1456 |
+
audio_path.unlink(missing_ok=True)
|
| 1457 |
+
|
| 1458 |
+
print(f"Done. Results written to: {args.output_jsonl}")
|
| 1459 |
+
|
| 1460 |
+
|
| 1461 |
+
if __name__ == "__main__":
|
| 1462 |
+
main()
|
| 1463 |
+
'''
|
sync_research/audio_llm_benchmark_shifted.py
ADDED
|
@@ -0,0 +1,621 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env python3
|
| 2 |
+
from __future__ import annotations
|
| 3 |
+
|
| 4 |
+
import argparse
|
| 5 |
+
import ast
|
| 6 |
+
import csv
|
| 7 |
+
import json
|
| 8 |
+
import os
|
| 9 |
+
import re
|
| 10 |
+
import subprocess
|
| 11 |
+
import time
|
| 12 |
+
from pathlib import Path
|
| 13 |
+
from typing import Any, Dict, List, Optional, Tuple
|
| 14 |
+
|
| 15 |
+
import librosa
|
| 16 |
+
import numpy as np
|
| 17 |
+
import torch
|
| 18 |
+
from tqdm import tqdm
|
| 19 |
+
from transformers import (
|
| 20 |
+
AutoProcessor,
|
| 21 |
+
Qwen2AudioForConditionalGeneration,
|
| 22 |
+
Qwen3OmniMoeProcessor,
|
| 23 |
+
Qwen3OmniMoeForConditionalGeneration,
|
| 24 |
+
)
|
| 25 |
+
from qwen_omni_utils import process_mm_info
|
| 26 |
+
|
| 27 |
+
try:
|
| 28 |
+
from google import genai as google_genai
|
| 29 |
+
except Exception: # pragma: no cover
|
| 30 |
+
google_genai = None
|
| 31 |
+
|
| 32 |
+
|
| 33 |
+
UNIVERSAL_PROMPT = (
|
| 34 |
+
"Listen to this audio clip carefully. You are an expert in acoustic forensics. "
|
| 35 |
+
"Locate the EXACT timestamp (in seconds) of the loudest, most energetic PHYSICAL IMPACT sound "
|
| 36 |
+
"(e.g., body hitting ground, object breaking). Actively ignore background music, laughter, and speech. "
|
| 37 |
+
'Output strictly in JSON format: {"audio_timestamp": 2.5, "sound_description": "brief description"}'
|
| 38 |
+
)
|
| 39 |
+
|
| 40 |
+
DEFAULT_SOURCE_CSV = Path(
|
| 41 |
+
"/home/ubuntu/video_source/result/gemini_video_oc_frame/gemini_validated_accept_lt1p0.csv"
|
| 42 |
+
)
|
| 43 |
+
DEFAULT_DELAY_DIR = Path("/home/ubuntu/video_source/shifted/delay")
|
| 44 |
+
DEFAULT_EARLY_DIR = Path("/home/ubuntu/video_source/shifted/early")
|
| 45 |
+
DEFAULT_OUTPUT_DIR = Path("/home/ubuntu/video_source/result/audio_llm_benchmark")
|
| 46 |
+
|
| 47 |
+
GEMINI_MODEL = "gemini-3.1-pro-preview"
|
| 48 |
+
QWEN2_MODEL_ID = "Qwen/Qwen2-Audio-7B-Instruct"
|
| 49 |
+
QWEN3_MODEL_ID = "Qwen/Qwen3-Omni-30B-A3B-Instruct"
|
| 50 |
+
|
| 51 |
+
|
| 52 |
+
def parse_args() -> argparse.Namespace:
|
| 53 |
+
p = argparse.ArgumentParser(description="Shifted audio benchmark for Gemini/Qwen2/Qwen3.")
|
| 54 |
+
p.add_argument("--source-csv", type=Path, default=DEFAULT_SOURCE_CSV)
|
| 55 |
+
p.add_argument("--delay-dir", type=Path, default=DEFAULT_DELAY_DIR)
|
| 56 |
+
p.add_argument("--early-dir", type=Path, default=DEFAULT_EARLY_DIR)
|
| 57 |
+
p.add_argument("--output-dir", type=Path, default=DEFAULT_OUTPUT_DIR)
|
| 58 |
+
p.add_argument("--audio-sr", type=int, default=16000)
|
| 59 |
+
p.add_argument("--audio-format", choices=["wav", "mp3"], default="wav")
|
| 60 |
+
p.add_argument("--sample-size", type=int, default=0, help="0 = use all source rows")
|
| 61 |
+
p.add_argument("--seed", type=int, default=42)
|
| 62 |
+
p.add_argument("--delay-shift", type=float, default=2.0)
|
| 63 |
+
p.add_argument("--early-shift", type=float, default=-2.0)
|
| 64 |
+
p.add_argument("--disable-local-models", action="store_true")
|
| 65 |
+
p.add_argument("--overwrite-output", action="store_true")
|
| 66 |
+
return p.parse_args()
|
| 67 |
+
|
| 68 |
+
|
| 69 |
+
def ensure_ffmpeg() -> None:
|
| 70 |
+
subprocess.run(
|
| 71 |
+
["ffmpeg", "-version"],
|
| 72 |
+
check=True,
|
| 73 |
+
stdout=subprocess.PIPE,
|
| 74 |
+
stderr=subprocess.PIPE,
|
| 75 |
+
text=True,
|
| 76 |
+
)
|
| 77 |
+
|
| 78 |
+
|
| 79 |
+
def _extract_json_object(text: str) -> Dict[str, Any]:
|
| 80 |
+
text = text.strip()
|
| 81 |
+
fence_match = re.search(r"```(?:json)?\s*(\{.*?\})\s*```", text, flags=re.DOTALL)
|
| 82 |
+
if fence_match:
|
| 83 |
+
return json.loads(fence_match.group(1))
|
| 84 |
+
|
| 85 |
+
def _parse_json_like(candidate: str) -> Dict[str, Any]:
|
| 86 |
+
obj = json.loads(candidate)
|
| 87 |
+
if isinstance(obj, str):
|
| 88 |
+
obj = json.loads(obj)
|
| 89 |
+
if not isinstance(obj, dict):
|
| 90 |
+
raise ValueError("Parsed value is not a JSON object")
|
| 91 |
+
return obj
|
| 92 |
+
|
| 93 |
+
# strict JSON first
|
| 94 |
+
try:
|
| 95 |
+
return _parse_json_like(text)
|
| 96 |
+
except Exception:
|
| 97 |
+
pass
|
| 98 |
+
|
| 99 |
+
# common Qwen escaped JSON variant: {\"audio_timestamp\": \"2.50\", ...}
|
| 100 |
+
unescaped = text.replace('\\"', '"')
|
| 101 |
+
try:
|
| 102 |
+
return _parse_json_like(unescaped)
|
| 103 |
+
except Exception:
|
| 104 |
+
pass
|
| 105 |
+
|
| 106 |
+
# parse JSON object substring if model adds extra prose
|
| 107 |
+
start = text.find("{")
|
| 108 |
+
end = text.rfind("}")
|
| 109 |
+
if start != -1 and end != -1 and end > start:
|
| 110 |
+
snippet = text[start : end + 1]
|
| 111 |
+
for candidate in (snippet, snippet.replace('\\"', '"')):
|
| 112 |
+
try:
|
| 113 |
+
return _parse_json_like(candidate)
|
| 114 |
+
except Exception:
|
| 115 |
+
continue
|
| 116 |
+
# fallback: python-dict-like output with single quotes
|
| 117 |
+
try:
|
| 118 |
+
obj = ast.literal_eval(snippet)
|
| 119 |
+
if isinstance(obj, dict):
|
| 120 |
+
return obj
|
| 121 |
+
except Exception:
|
| 122 |
+
pass
|
| 123 |
+
|
| 124 |
+
raise ValueError(f"No valid JSON object found in model output: {text[:240]}")
|
| 125 |
+
|
| 126 |
+
|
| 127 |
+
def _coerce_result(data: Dict[str, Any]) -> Tuple[float, str]:
|
| 128 |
+
ts = float(data["audio_timestamp"])
|
| 129 |
+
desc = str(data["sound_description"]).strip()
|
| 130 |
+
if not np.isfinite(ts):
|
| 131 |
+
raise ValueError(f"Invalid audio_timestamp: {ts}")
|
| 132 |
+
if not desc:
|
| 133 |
+
raise ValueError("sound_description is empty")
|
| 134 |
+
return ts, desc
|
| 135 |
+
|
| 136 |
+
|
| 137 |
+
def append_jsonl(path: Path, row: Dict[str, Any]) -> None:
|
| 138 |
+
path.parent.mkdir(parents=True, exist_ok=True)
|
| 139 |
+
with path.open("a", encoding="utf-8") as f:
|
| 140 |
+
f.write(json.dumps(row, ensure_ascii=False) + "\n")
|
| 141 |
+
|
| 142 |
+
|
| 143 |
+
def append_jsonl_line(path: Path, row: Dict[str, Any]) -> None:
|
| 144 |
+
path.parent.mkdir(parents=True, exist_ok=True)
|
| 145 |
+
with path.open("a", encoding="utf-8") as f:
|
| 146 |
+
f.write(json.dumps(row, ensure_ascii=False) + "\n")
|
| 147 |
+
|
| 148 |
+
|
| 149 |
+
def load_source_rows(path: Path, sample_size: int, seed: int) -> List[Tuple[str, float]]:
|
| 150 |
+
if not path.exists():
|
| 151 |
+
raise FileNotFoundError(f"source csv not found: {path}")
|
| 152 |
+
out: List[Tuple[str, float]] = []
|
| 153 |
+
with path.open("r", encoding="utf-8", newline="") as f:
|
| 154 |
+
reader = csv.DictReader(f)
|
| 155 |
+
fields = reader.fieldnames or []
|
| 156 |
+
if "video_id" not in fields or "t_gemini" not in fields:
|
| 157 |
+
raise ValueError("source csv must include video_id and t_gemini")
|
| 158 |
+
for r in reader:
|
| 159 |
+
video_id = str(r.get("video_id", "")).strip()
|
| 160 |
+
if not video_id:
|
| 161 |
+
continue
|
| 162 |
+
try:
|
| 163 |
+
t_visual = float(r.get("t_gemini", ""))
|
| 164 |
+
except (TypeError, ValueError):
|
| 165 |
+
continue
|
| 166 |
+
out.append((video_id, t_visual))
|
| 167 |
+
if not out:
|
| 168 |
+
raise ValueError("no valid rows in source csv")
|
| 169 |
+
if sample_size > 0 and len(out) > sample_size:
|
| 170 |
+
rng = np.random.default_rng(seed)
|
| 171 |
+
idx = rng.choice(len(out), size=sample_size, replace=False)
|
| 172 |
+
out = [out[int(i)] for i in idx]
|
| 173 |
+
out.sort(key=lambda x: x[0])
|
| 174 |
+
return out
|
| 175 |
+
|
| 176 |
+
|
| 177 |
+
def index_by_filename(root: Path) -> Dict[str, Path]:
|
| 178 |
+
if not root.exists():
|
| 179 |
+
raise FileNotFoundError(f"directory not found: {root}")
|
| 180 |
+
out: Dict[str, Path] = {}
|
| 181 |
+
for p in root.rglob("*.mp4"):
|
| 182 |
+
out.setdefault(p.name, p)
|
| 183 |
+
return out
|
| 184 |
+
|
| 185 |
+
|
| 186 |
+
def shifted_filename(video_id: str, split: str) -> str:
|
| 187 |
+
p = Path(video_id)
|
| 188 |
+
suffix = p.suffix if p.suffix else ".mp4"
|
| 189 |
+
stem = p.stem if p.suffix else p.name
|
| 190 |
+
if split == "delay":
|
| 191 |
+
return f"{stem}_delay2s{suffix}"
|
| 192 |
+
if split == "early":
|
| 193 |
+
return f"{stem}_early2s{suffix}"
|
| 194 |
+
raise ValueError(f"Unknown split: {split}")
|
| 195 |
+
|
| 196 |
+
|
| 197 |
+
def extract_audio_file(video_path: Path, out_path: Path, sr: int, fmt: str) -> None:
|
| 198 |
+
out_path.parent.mkdir(parents=True, exist_ok=True)
|
| 199 |
+
cmd = ["ffmpeg", "-y", "-i", str(video_path), "-vn", "-ac", "1", "-ar", str(sr)]
|
| 200 |
+
if fmt == "wav":
|
| 201 |
+
cmd += ["-c:a", "pcm_s16le"]
|
| 202 |
+
else:
|
| 203 |
+
cmd += ["-c:a", "libmp3lame", "-q:a", "2"]
|
| 204 |
+
cmd.append(str(out_path))
|
| 205 |
+
subprocess.run(cmd, check=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True)
|
| 206 |
+
|
| 207 |
+
|
| 208 |
+
def gemini_infer(audio_path: Path, api_key: str) -> Tuple[float, str]:
|
| 209 |
+
if google_genai is None:
|
| 210 |
+
raise RuntimeError("google-genai is not installed (pip install google-genai)")
|
| 211 |
+
if not api_key:
|
| 212 |
+
raise RuntimeError("GEMINI_API_KEY is missing")
|
| 213 |
+
client = google_genai.Client(api_key=api_key)
|
| 214 |
+
uploaded = client.files.upload(file=str(audio_path))
|
| 215 |
+
try:
|
| 216 |
+
last_exc: Optional[Exception] = None
|
| 217 |
+
for _ in range(8):
|
| 218 |
+
try:
|
| 219 |
+
resp = client.models.generate_content(
|
| 220 |
+
model=GEMINI_MODEL,
|
| 221 |
+
contents=[UNIVERSAL_PROMPT, uploaded],
|
| 222 |
+
)
|
| 223 |
+
raw = (getattr(resp, "text", "") or "").strip()
|
| 224 |
+
parsed = _extract_json_object(raw)
|
| 225 |
+
return _coerce_result(parsed)
|
| 226 |
+
except Exception as exc: # transient "file not ready" is common
|
| 227 |
+
last_exc = exc
|
| 228 |
+
time.sleep(2)
|
| 229 |
+
raise RuntimeError(f"Gemini inference failed after retries: {last_exc}")
|
| 230 |
+
finally:
|
| 231 |
+
try:
|
| 232 |
+
client.files.delete(name=uploaded.name)
|
| 233 |
+
except Exception:
|
| 234 |
+
pass
|
| 235 |
+
|
| 236 |
+
|
| 237 |
+
def load_qwen2(model_id: str) -> Tuple[Any, Any]:
|
| 238 |
+
processor = AutoProcessor.from_pretrained(model_id, trust_remote_code=True)
|
| 239 |
+
model = Qwen2AudioForConditionalGeneration.from_pretrained(
|
| 240 |
+
model_id,
|
| 241 |
+
torch_dtype=torch.bfloat16 if torch.cuda.is_available() else torch.float32,
|
| 242 |
+
device_map="auto" if torch.cuda.is_available() else None,
|
| 243 |
+
trust_remote_code=True,
|
| 244 |
+
)
|
| 245 |
+
model.eval()
|
| 246 |
+
return model, processor
|
| 247 |
+
|
| 248 |
+
|
| 249 |
+
def load_qwen3(model_id: str) -> Tuple[Any, Any]:
|
| 250 |
+
processor = Qwen3OmniMoeProcessor.from_pretrained(model_id, trust_remote_code=True)
|
| 251 |
+
model = Qwen3OmniMoeForConditionalGeneration.from_pretrained(
|
| 252 |
+
model_id,
|
| 253 |
+
dtype="auto",
|
| 254 |
+
device_map="auto" if torch.cuda.is_available() else None,
|
| 255 |
+
trust_remote_code=True,
|
| 256 |
+
)
|
| 257 |
+
# Official recommendation when only text output is needed.
|
| 258 |
+
model.disable_talker()
|
| 259 |
+
model.eval()
|
| 260 |
+
return model, processor
|
| 261 |
+
|
| 262 |
+
|
| 263 |
+
def qwen_infer(audio_path: Path, model: Any, processor: Any, label: str) -> Tuple[float, str]:
|
| 264 |
+
messages = [
|
| 265 |
+
{
|
| 266 |
+
"role": "user",
|
| 267 |
+
"content": [{"type": "audio", "audio": str(audio_path)}, {"type": "text", "text": UNIVERSAL_PROMPT}],
|
| 268 |
+
}
|
| 269 |
+
]
|
| 270 |
+
try:
|
| 271 |
+
text_prompt = processor.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
|
| 272 |
+
except Exception:
|
| 273 |
+
text_prompt = UNIVERSAL_PROMPT
|
| 274 |
+
|
| 275 |
+
audio_arr, sr = librosa.load(str(audio_path), sr=None, mono=True)
|
| 276 |
+
if audio_arr.size == 0:
|
| 277 |
+
raise RuntimeError(f"{label}: empty audio")
|
| 278 |
+
|
| 279 |
+
inputs = None
|
| 280 |
+
last_exc: Optional[Exception] = None
|
| 281 |
+
for kw in [
|
| 282 |
+
{"text": [text_prompt], "audio": [audio_arr], "sampling_rate": sr},
|
| 283 |
+
{"text": [text_prompt], "audios": [audio_arr], "sampling_rate": sr},
|
| 284 |
+
]:
|
| 285 |
+
try:
|
| 286 |
+
inputs = processor(return_tensors="pt", padding=True, **kw)
|
| 287 |
+
break
|
| 288 |
+
except Exception as exc:
|
| 289 |
+
last_exc = exc
|
| 290 |
+
if inputs is None:
|
| 291 |
+
raise RuntimeError(f"{label}: processor input failed: {last_exc}")
|
| 292 |
+
|
| 293 |
+
dev = model.device if hasattr(model, "device") else torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
| 294 |
+
model_dtype = getattr(model, "dtype", None)
|
| 295 |
+
if model_dtype is None:
|
| 296 |
+
try:
|
| 297 |
+
model_dtype = next(model.parameters()).dtype
|
| 298 |
+
except Exception:
|
| 299 |
+
model_dtype = None
|
| 300 |
+
for k, v in list(inputs.items()):
|
| 301 |
+
if torch.is_tensor(v):
|
| 302 |
+
tensor = v.to(dev)
|
| 303 |
+
# Qwen3-Omni expects floating features to match model dtype (e.g., bfloat16 on GPU).
|
| 304 |
+
if tensor.is_floating_point() and model_dtype is not None:
|
| 305 |
+
tensor = tensor.to(model_dtype)
|
| 306 |
+
inputs[k] = tensor
|
| 307 |
+
|
| 308 |
+
with torch.no_grad():
|
| 309 |
+
out_ids = model.generate(**inputs, max_new_tokens=256, do_sample=False)
|
| 310 |
+
in_len = inputs["input_ids"].shape[1] if "input_ids" in inputs else 0
|
| 311 |
+
gen_ids = out_ids[:, in_len:] if in_len > 0 else out_ids
|
| 312 |
+
raw = processor.batch_decode(gen_ids, skip_special_tokens=True)[0].strip()
|
| 313 |
+
parsed = _extract_json_object(raw)
|
| 314 |
+
return _coerce_result(parsed)
|
| 315 |
+
|
| 316 |
+
|
| 317 |
+
def qwen3_infer(audio_path: Path, model: Any, processor: Any, label: str) -> Tuple[float, str]:
|
| 318 |
+
conversation = [
|
| 319 |
+
{
|
| 320 |
+
"role": "user",
|
| 321 |
+
"content": [
|
| 322 |
+
{"type": "audio", "audio": str(audio_path)},
|
| 323 |
+
{"type": "text", "text": UNIVERSAL_PROMPT},
|
| 324 |
+
],
|
| 325 |
+
}
|
| 326 |
+
]
|
| 327 |
+
text = processor.apply_chat_template(conversation, add_generation_prompt=True, tokenize=False)
|
| 328 |
+
audios, images, videos = process_mm_info(conversation, use_audio_in_video=False)
|
| 329 |
+
inputs = processor(
|
| 330 |
+
text=text,
|
| 331 |
+
audio=audios,
|
| 332 |
+
images=images,
|
| 333 |
+
videos=videos,
|
| 334 |
+
return_tensors="pt",
|
| 335 |
+
padding=True,
|
| 336 |
+
use_audio_in_video=False,
|
| 337 |
+
)
|
| 338 |
+
inputs = inputs.to(model.device).to(model.dtype)
|
| 339 |
+
with torch.no_grad():
|
| 340 |
+
out_ids = model.generate(
|
| 341 |
+
**inputs,
|
| 342 |
+
return_audio=False,
|
| 343 |
+
use_audio_in_video=False,
|
| 344 |
+
max_new_tokens=256,
|
| 345 |
+
do_sample=False,
|
| 346 |
+
)
|
| 347 |
+
in_len = inputs["input_ids"].shape[1] if "input_ids" in inputs else 0
|
| 348 |
+
gen_ids = out_ids[:, in_len:] if in_len > 0 else out_ids
|
| 349 |
+
raw = processor.batch_decode(gen_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0].strip()
|
| 350 |
+
parsed = _extract_json_object(raw)
|
| 351 |
+
return _coerce_result(parsed)
|
| 352 |
+
|
| 353 |
+
|
| 354 |
+
def safe_call(fn, *args, **kwargs) -> Tuple[Optional[Tuple[float, str]], Optional[str]]:
|
| 355 |
+
try:
|
| 356 |
+
return fn(*args, **kwargs), None
|
| 357 |
+
except Exception as exc:
|
| 358 |
+
return None, str(exc)
|
| 359 |
+
|
| 360 |
+
|
| 361 |
+
def summarize(rows: List[Dict[str, Any]], key: str) -> Dict[str, Any]:
|
| 362 |
+
vals = [r.get(key) for r in rows]
|
| 363 |
+
ok = [v for v in vals if v is not None]
|
| 364 |
+
errors: List[float] = []
|
| 365 |
+
for r in rows:
|
| 366 |
+
pred = r.get(key)
|
| 367 |
+
exp = r.get("expected_t_audio")
|
| 368 |
+
if pred is None or exp is None:
|
| 369 |
+
continue
|
| 370 |
+
errors.append(abs(float(pred) - float(exp)))
|
| 371 |
+
errors_sorted = sorted(errors)
|
| 372 |
+
return {
|
| 373 |
+
"count": len(rows),
|
| 374 |
+
"with_prediction": len(ok),
|
| 375 |
+
"without_prediction": len(rows) - len(ok),
|
| 376 |
+
"mae_sec": round(mean(errors), 4) if errors else None,
|
| 377 |
+
"median_like_sec": round(errors_sorted[len(errors_sorted) // 2], 4) if errors_sorted else None,
|
| 378 |
+
"within_0_3_sec": sum(1 for e in errors if e <= 0.3),
|
| 379 |
+
"within_0_5_sec": sum(1 for e in errors if e <= 0.5),
|
| 380 |
+
"within_1_0_sec": sum(1 for e in errors if e <= 1.0),
|
| 381 |
+
}
|
| 382 |
+
|
| 383 |
+
|
| 384 |
+
def main() -> None:
|
| 385 |
+
args = parse_args()
|
| 386 |
+
ensure_ffmpeg()
|
| 387 |
+
|
| 388 |
+
gemini_key = os.getenv("GEMINI_API_KEY", "").strip()
|
| 389 |
+
if not gemini_key:
|
| 390 |
+
raise RuntimeError("GEMINI_API_KEY is required")
|
| 391 |
+
|
| 392 |
+
source_rows = load_source_rows(args.source_csv, args.sample_size, args.seed)
|
| 393 |
+
delay_index = index_by_filename(args.delay_dir)
|
| 394 |
+
early_index = index_by_filename(args.early_dir)
|
| 395 |
+
|
| 396 |
+
root = args.output_dir
|
| 397 |
+
audio_dir = root / "audio"
|
| 398 |
+
gemini_dir = root / "gemini"
|
| 399 |
+
qwen2_dir = root / "qwen2"
|
| 400 |
+
qwen3_dir = root / "qwen3"
|
| 401 |
+
for d in [audio_dir, gemini_dir, qwen2_dir, qwen3_dir]:
|
| 402 |
+
d.mkdir(parents=True, exist_ok=True)
|
| 403 |
+
|
| 404 |
+
delay_out = root / "delay_results.jsonl"
|
| 405 |
+
early_out = root / "early_results.jsonl"
|
| 406 |
+
missing_out = root / "missing_or_errors.json"
|
| 407 |
+
missing_live_out = root / "missing_or_errors.jsonl"
|
| 408 |
+
summary_out = root / "summary.json"
|
| 409 |
+
sampled_out = root / "sampled_10_from_gemini.csv"
|
| 410 |
+
|
| 411 |
+
model_files = {
|
| 412 |
+
"gemini_delay": gemini_dir / "delay_results.jsonl",
|
| 413 |
+
"gemini_early": gemini_dir / "early_results.jsonl",
|
| 414 |
+
"qwen2_delay": qwen2_dir / "delay_results.jsonl",
|
| 415 |
+
"qwen2_early": qwen2_dir / "early_results.jsonl",
|
| 416 |
+
"qwen3_delay": qwen3_dir / "delay_results.jsonl",
|
| 417 |
+
"qwen3_early": qwen3_dir / "early_results.jsonl",
|
| 418 |
+
}
|
| 419 |
+
if args.overwrite_output:
|
| 420 |
+
for p in [delay_out, early_out, missing_out, missing_live_out, summary_out, sampled_out, *model_files.values()]:
|
| 421 |
+
if p.exists():
|
| 422 |
+
p.unlink()
|
| 423 |
+
|
| 424 |
+
qwen2_model = qwen2_proc = None
|
| 425 |
+
qwen3_model = qwen3_proc = None
|
| 426 |
+
qwen2_init_error: Optional[str] = None
|
| 427 |
+
qwen3_init_error: Optional[str] = None
|
| 428 |
+
if not args.disable_local_models:
|
| 429 |
+
try:
|
| 430 |
+
qwen2_model, qwen2_proc = load_qwen2(QWEN2_MODEL_ID)
|
| 431 |
+
print(f"[init] loaded Qwen2: {QWEN2_MODEL_ID}")
|
| 432 |
+
except Exception as exc:
|
| 433 |
+
qwen2_model = qwen2_proc = None
|
| 434 |
+
qwen2_init_error = str(exc)
|
| 435 |
+
print(f"[init] failed to load Qwen2: {QWEN2_MODEL_ID} | {exc}")
|
| 436 |
+
try:
|
| 437 |
+
qwen3_model, qwen3_proc = load_qwen3(QWEN3_MODEL_ID)
|
| 438 |
+
print(f"[init] loaded Qwen3: {QWEN3_MODEL_ID}")
|
| 439 |
+
except Exception as exc:
|
| 440 |
+
qwen3_model = qwen3_proc = None
|
| 441 |
+
qwen3_init_error = str(exc)
|
| 442 |
+
print(f"[init] failed to load Qwen3: {QWEN3_MODEL_ID} | {exc}")
|
| 443 |
+
else:
|
| 444 |
+
print("[init] local models disabled; running Gemini only")
|
| 445 |
+
|
| 446 |
+
with sampled_out.open("w", encoding="utf-8", newline="") as f:
|
| 447 |
+
w = csv.DictWriter(f, fieldnames=["video_id", "t_visual"])
|
| 448 |
+
w.writeheader()
|
| 449 |
+
for video_id, t_visual in source_rows:
|
| 450 |
+
w.writerow({"video_id": video_id, "t_visual": round(t_visual, 3)})
|
| 451 |
+
|
| 452 |
+
delay_rows: List[Dict[str, Any]] = []
|
| 453 |
+
early_rows: List[Dict[str, Any]] = []
|
| 454 |
+
missing: List[Dict[str, Any]] = []
|
| 455 |
+
|
| 456 |
+
for video_id, t_visual in tqdm(source_rows, desc="Audio LLM Benchmark", unit="video"):
|
| 457 |
+
for split, shift, idx, combined, combined_path in [
|
| 458 |
+
("delay", args.delay_shift, delay_index, delay_rows, delay_out),
|
| 459 |
+
("early", args.early_shift, early_index, early_rows, early_out),
|
| 460 |
+
]:
|
| 461 |
+
shifted_name = shifted_filename(video_id, split)
|
| 462 |
+
video_path = idx.get(shifted_name)
|
| 463 |
+
if video_path is None:
|
| 464 |
+
err_row = {"split": split, "video_id": video_id, "reason": "file_not_found"}
|
| 465 |
+
missing.append(err_row)
|
| 466 |
+
append_jsonl_line(missing_live_out, err_row)
|
| 467 |
+
continue
|
| 468 |
+
|
| 469 |
+
audio_path = audio_dir / split / f"{Path(video_id).stem}.{args.audio_format}"
|
| 470 |
+
try:
|
| 471 |
+
extract_audio_file(video_path, audio_path, args.audio_sr, args.audio_format)
|
| 472 |
+
except Exception as exc:
|
| 473 |
+
err_row = {"split": split, "video_id": video_id, "reason": f"audio_extract_failed: {exc}"}
|
| 474 |
+
missing.append(err_row)
|
| 475 |
+
append_jsonl_line(missing_live_out, err_row)
|
| 476 |
+
continue
|
| 477 |
+
|
| 478 |
+
gemini_res, gemini_err = safe_call(gemini_infer, audio_path, gemini_key)
|
| 479 |
+
qwen2_res = qwen2_err = None
|
| 480 |
+
qwen3_res = qwen3_err = None
|
| 481 |
+
if qwen2_model is not None and qwen2_proc is not None:
|
| 482 |
+
qwen2_res, qwen2_err = safe_call(qwen_infer, audio_path, qwen2_model, qwen2_proc, "Qwen2")
|
| 483 |
+
else:
|
| 484 |
+
qwen2_err = f"model_unavailable: {qwen2_init_error or 'load_failed_or_disabled'}"
|
| 485 |
+
if qwen3_model is not None and qwen3_proc is not None:
|
| 486 |
+
qwen3_res, qwen3_err = safe_call(qwen3_infer, audio_path, qwen3_model, qwen3_proc, "Qwen3")
|
| 487 |
+
else:
|
| 488 |
+
qwen3_err = f"model_unavailable: {qwen3_init_error or 'load_failed_or_disabled'}"
|
| 489 |
+
|
| 490 |
+
if gemini_err:
|
| 491 |
+
err_row = {"split": split, "video_id": video_id, "reason": f"gemini_error: {gemini_err}"}
|
| 492 |
+
missing.append(err_row)
|
| 493 |
+
append_jsonl_line(missing_live_out, err_row)
|
| 494 |
+
if qwen2_err:
|
| 495 |
+
err_row = {"split": split, "video_id": video_id, "reason": f"qwen2_error: {qwen2_err}"}
|
| 496 |
+
missing.append(err_row)
|
| 497 |
+
append_jsonl_line(missing_live_out, err_row)
|
| 498 |
+
if qwen3_err:
|
| 499 |
+
err_row = {"split": split, "video_id": video_id, "reason": f"qwen3_error: {qwen3_err}"}
|
| 500 |
+
missing.append(err_row)
|
| 501 |
+
append_jsonl_line(missing_live_out, err_row)
|
| 502 |
+
|
| 503 |
+
expected_t = float(t_visual) + float(shift)
|
| 504 |
+
gemini_abs_error = abs(gemini_res[0] - expected_t) if gemini_res else None
|
| 505 |
+
qwen2_abs_error = abs(qwen2_res[0] - expected_t) if qwen2_res else None
|
| 506 |
+
qwen3_abs_error = abs(qwen3_res[0] - expected_t) if qwen3_res else None
|
| 507 |
+
row = {
|
| 508 |
+
"video_path": str(video_path),
|
| 509 |
+
"video_id": video_path.name,
|
| 510 |
+
"source_video_id": video_id,
|
| 511 |
+
"t_visual": float(t_visual),
|
| 512 |
+
"shift_amount": float(shift),
|
| 513 |
+
"expected_t_audio": expected_t,
|
| 514 |
+
"gemini_t": round(gemini_res[0], 3) if gemini_res else None,
|
| 515 |
+
"qwen2_t": round(qwen2_res[0], 3) if qwen2_res else None,
|
| 516 |
+
"qwen3_t": round(qwen3_res[0], 3) if qwen3_res else None,
|
| 517 |
+
"gemini_abs_error_sec": round(gemini_abs_error, 3) if gemini_abs_error is not None else None,
|
| 518 |
+
"qwen2_abs_error_sec": round(qwen2_abs_error, 3) if qwen2_abs_error is not None else None,
|
| 519 |
+
"qwen3_abs_error_sec": round(qwen3_abs_error, 3) if qwen3_abs_error is not None else None,
|
| 520 |
+
"gemini_desc": gemini_res[1] if gemini_res else None,
|
| 521 |
+
"qwen2_desc": qwen2_res[1] if qwen2_res else None,
|
| 522 |
+
"qwen3_desc": qwen3_res[1] if qwen3_res else None,
|
| 523 |
+
"gemini_error": gemini_err,
|
| 524 |
+
"qwen2_error": qwen2_err,
|
| 525 |
+
"qwen3_error": qwen3_err,
|
| 526 |
+
"audio_path": str(audio_path),
|
| 527 |
+
}
|
| 528 |
+
append_jsonl(combined_path, row)
|
| 529 |
+
combined.append(row)
|
| 530 |
+
|
| 531 |
+
append_jsonl(
|
| 532 |
+
model_files[f"gemini_{split}"],
|
| 533 |
+
{
|
| 534 |
+
"video_id": video_path.name,
|
| 535 |
+
"source_video_id": video_id,
|
| 536 |
+
"t_visual": float(t_visual),
|
| 537 |
+
"shift_amount": float(shift),
|
| 538 |
+
"expected_t_audio": expected_t,
|
| 539 |
+
"detected_t_audio": round(gemini_res[0], 3) if gemini_res else None,
|
| 540 |
+
"abs_error_sec": round(gemini_abs_error, 3) if gemini_abs_error is not None else None,
|
| 541 |
+
"sound_description": gemini_res[1] if gemini_res else None,
|
| 542 |
+
"audio_path": str(audio_path),
|
| 543 |
+
},
|
| 544 |
+
)
|
| 545 |
+
append_jsonl(
|
| 546 |
+
model_files[f"qwen2_{split}"],
|
| 547 |
+
{
|
| 548 |
+
"video_id": video_path.name,
|
| 549 |
+
"source_video_id": video_id,
|
| 550 |
+
"t_visual": float(t_visual),
|
| 551 |
+
"shift_amount": float(shift),
|
| 552 |
+
"expected_t_audio": expected_t,
|
| 553 |
+
"detected_t_audio": round(qwen2_res[0], 3) if qwen2_res else None,
|
| 554 |
+
"abs_error_sec": round(qwen2_abs_error, 3) if qwen2_abs_error is not None else None,
|
| 555 |
+
"sound_description": qwen2_res[1] if qwen2_res else None,
|
| 556 |
+
"audio_path": str(audio_path),
|
| 557 |
+
},
|
| 558 |
+
)
|
| 559 |
+
append_jsonl(
|
| 560 |
+
model_files[f"qwen3_{split}"],
|
| 561 |
+
{
|
| 562 |
+
"video_id": video_path.name,
|
| 563 |
+
"source_video_id": video_id,
|
| 564 |
+
"t_visual": float(t_visual),
|
| 565 |
+
"shift_amount": float(shift),
|
| 566 |
+
"expected_t_audio": expected_t,
|
| 567 |
+
"detected_t_audio": round(qwen3_res[0], 3) if qwen3_res else None,
|
| 568 |
+
"abs_error_sec": round(qwen3_abs_error, 3) if qwen3_abs_error is not None else None,
|
| 569 |
+
"sound_description": qwen3_res[1] if qwen3_res else None,
|
| 570 |
+
"audio_path": str(audio_path),
|
| 571 |
+
},
|
| 572 |
+
)
|
| 573 |
+
|
| 574 |
+
with missing_out.open("w", encoding="utf-8") as f:
|
| 575 |
+
json.dump(missing, f, ensure_ascii=False, indent=2)
|
| 576 |
+
|
| 577 |
+
summary = {
|
| 578 |
+
"source_csv": str(args.source_csv),
|
| 579 |
+
"sample_size": len(source_rows),
|
| 580 |
+
"seed": args.seed,
|
| 581 |
+
"delay_shift": args.delay_shift,
|
| 582 |
+
"early_shift": args.early_shift,
|
| 583 |
+
"delay_summary": {
|
| 584 |
+
"count": len(delay_rows),
|
| 585 |
+
"gemini": summarize(delay_rows, "gemini_t"),
|
| 586 |
+
"qwen2": summarize(delay_rows, "qwen2_t"),
|
| 587 |
+
"qwen3": summarize(delay_rows, "qwen3_t"),
|
| 588 |
+
},
|
| 589 |
+
"early_summary": {
|
| 590 |
+
"count": len(early_rows),
|
| 591 |
+
"gemini": summarize(early_rows, "gemini_t"),
|
| 592 |
+
"qwen2": summarize(early_rows, "qwen2_t"),
|
| 593 |
+
"qwen3": summarize(early_rows, "qwen3_t"),
|
| 594 |
+
},
|
| 595 |
+
"missing_or_errors_count": len(missing),
|
| 596 |
+
"outputs": {
|
| 597 |
+
"delay_jsonl": str(delay_out),
|
| 598 |
+
"early_jsonl": str(early_out),
|
| 599 |
+
"missing_or_errors_json": str(missing_out),
|
| 600 |
+
"sampled_csv": str(sampled_out),
|
| 601 |
+
"audio_dir": str(audio_dir),
|
| 602 |
+
"gemini_dir": str(gemini_dir),
|
| 603 |
+
"qwen2_dir": str(qwen2_dir),
|
| 604 |
+
"qwen3_dir": str(qwen3_dir),
|
| 605 |
+
},
|
| 606 |
+
}
|
| 607 |
+
with summary_out.open("w", encoding="utf-8") as f:
|
| 608 |
+
json.dump(summary, f, ensure_ascii=False, indent=2)
|
| 609 |
+
|
| 610 |
+
print("========== Audio LLM Benchmark Summary ==========")
|
| 611 |
+
print(f"Sampled source videos: {len(source_rows)}")
|
| 612 |
+
print(f"Delay evaluated rows: {len(delay_rows)}")
|
| 613 |
+
print(f"Early evaluated rows: {len(early_rows)}")
|
| 614 |
+
print(f"Missing/errors: {len(missing)}")
|
| 615 |
+
print(f"Summary JSON: {summary_out}")
|
| 616 |
+
print("================================================")
|
| 617 |
+
|
| 618 |
+
|
| 619 |
+
if __name__ == "__main__":
|
| 620 |
+
main()
|
| 621 |
+
|
sync_research/batch_shift_videos.py
ADDED
|
@@ -0,0 +1,200 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env python3
|
| 2 |
+
import argparse
|
| 3 |
+
import os
|
| 4 |
+
import subprocess
|
| 5 |
+
from pathlib import Path
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
VIDEO_EXTS = {".mp4", ".mov", ".mkv", ".avi", ".webm", ".m4v"}
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
def run_cmd(cmd):
|
| 12 |
+
return subprocess.run(cmd, check=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True)
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
def get_duration_seconds(video_path: Path) -> float:
|
| 16 |
+
cmd = [
|
| 17 |
+
"ffprobe",
|
| 18 |
+
"-v",
|
| 19 |
+
"error",
|
| 20 |
+
"-show_entries",
|
| 21 |
+
"format=duration",
|
| 22 |
+
"-of",
|
| 23 |
+
"default=noprint_wrappers=1:nokey=1",
|
| 24 |
+
str(video_path),
|
| 25 |
+
]
|
| 26 |
+
out = run_cmd(cmd).stdout.strip()
|
| 27 |
+
return float(out)
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
def has_audio_stream(video_path: Path) -> bool:
|
| 31 |
+
cmd = [
|
| 32 |
+
"ffprobe",
|
| 33 |
+
"-v",
|
| 34 |
+
"error",
|
| 35 |
+
"-select_streams",
|
| 36 |
+
"a:0",
|
| 37 |
+
"-show_entries",
|
| 38 |
+
"stream=index",
|
| 39 |
+
"-of",
|
| 40 |
+
"csv=p=0",
|
| 41 |
+
str(video_path),
|
| 42 |
+
]
|
| 43 |
+
out = run_cmd(cmd).stdout.strip()
|
| 44 |
+
return bool(out)
|
| 45 |
+
|
| 46 |
+
|
| 47 |
+
def make_delay_plus_2s(src: Path, dst: Path, duration: float):
|
| 48 |
+
# Delay audio by 2s; keep final duration unchanged.
|
| 49 |
+
cmd = [
|
| 50 |
+
"ffmpeg",
|
| 51 |
+
"-y",
|
| 52 |
+
"-i",
|
| 53 |
+
str(src),
|
| 54 |
+
"-map",
|
| 55 |
+
"0:v:0",
|
| 56 |
+
"-map",
|
| 57 |
+
"0:a:0",
|
| 58 |
+
"-c:v",
|
| 59 |
+
"copy",
|
| 60 |
+
"-af",
|
| 61 |
+
f"adelay=2000:all=1,atrim=0:{duration}",
|
| 62 |
+
"-c:a",
|
| 63 |
+
"aac",
|
| 64 |
+
str(dst),
|
| 65 |
+
]
|
| 66 |
+
run_cmd(cmd)
|
| 67 |
+
|
| 68 |
+
|
| 69 |
+
def make_advance_minus_2s(src: Path, dst: Path, duration: float):
|
| 70 |
+
# Advance audio by 2s; the last 2s become silence.
|
| 71 |
+
cmd = [
|
| 72 |
+
"ffmpeg",
|
| 73 |
+
"-y",
|
| 74 |
+
"-i",
|
| 75 |
+
str(src),
|
| 76 |
+
"-map",
|
| 77 |
+
"0:v:0",
|
| 78 |
+
"-map",
|
| 79 |
+
"0:a:0",
|
| 80 |
+
"-c:v",
|
| 81 |
+
"copy",
|
| 82 |
+
"-af",
|
| 83 |
+
f"atrim=start=2,asetpts=PTS-STARTPTS,apad=pad_dur=2,atrim=0:{duration}",
|
| 84 |
+
"-c:a",
|
| 85 |
+
"aac",
|
| 86 |
+
str(dst),
|
| 87 |
+
]
|
| 88 |
+
run_cmd(cmd)
|
| 89 |
+
|
| 90 |
+
|
| 91 |
+
def iter_videos(root: Path):
|
| 92 |
+
for dirpath, _, filenames in os.walk(root):
|
| 93 |
+
for name in filenames:
|
| 94 |
+
p = Path(dirpath) / name
|
| 95 |
+
if p.suffix.lower() in VIDEO_EXTS:
|
| 96 |
+
yield p
|
| 97 |
+
|
| 98 |
+
|
| 99 |
+
def purge_directory(root: Path):
|
| 100 |
+
if not root.exists():
|
| 101 |
+
return
|
| 102 |
+
for dirpath, _, filenames in os.walk(root):
|
| 103 |
+
for name in filenames:
|
| 104 |
+
(Path(dirpath) / name).unlink()
|
| 105 |
+
|
| 106 |
+
|
| 107 |
+
def main():
|
| 108 |
+
parser = argparse.ArgumentParser(
|
| 109 |
+
description="Generate +2s delay and -2s advance audio versions for all videos."
|
| 110 |
+
)
|
| 111 |
+
parser.add_argument(
|
| 112 |
+
"--input-dir",
|
| 113 |
+
default="/home/ubuntu/uag_oops_data/uag_oops",
|
| 114 |
+
help="Source video directory.",
|
| 115 |
+
)
|
| 116 |
+
parser.add_argument(
|
| 117 |
+
"--out-delay-dir",
|
| 118 |
+
default="/home/ubuntu/uag_oops_data/delay",
|
| 119 |
+
help="Output directory for +2s delay versions.",
|
| 120 |
+
)
|
| 121 |
+
parser.add_argument(
|
| 122 |
+
"--out-advance-dir",
|
| 123 |
+
default="/home/ubuntu/uag_oops_data/early",
|
| 124 |
+
help="Output directory for -2s advance versions.",
|
| 125 |
+
)
|
| 126 |
+
parser.add_argument(
|
| 127 |
+
"--overwrite",
|
| 128 |
+
action="store_true",
|
| 129 |
+
help="Overwrite output if file exists.",
|
| 130 |
+
)
|
| 131 |
+
parser.add_argument(
|
| 132 |
+
"--no-purge-output",
|
| 133 |
+
action="store_true",
|
| 134 |
+
help="Do not purge output directories before generation.",
|
| 135 |
+
)
|
| 136 |
+
args = parser.parse_args()
|
| 137 |
+
|
| 138 |
+
input_dir = Path(args.input_dir)
|
| 139 |
+
out_delay_dir = Path(args.out_delay_dir)
|
| 140 |
+
out_advance_dir = Path(args.out_advance_dir)
|
| 141 |
+
|
| 142 |
+
if not input_dir.exists():
|
| 143 |
+
raise FileNotFoundError(f"Input directory not found: {input_dir}")
|
| 144 |
+
|
| 145 |
+
videos = sorted(iter_videos(input_dir))
|
| 146 |
+
if not videos:
|
| 147 |
+
print(f"No videos found in: {input_dir}")
|
| 148 |
+
return
|
| 149 |
+
|
| 150 |
+
out_delay_dir.mkdir(parents=True, exist_ok=True)
|
| 151 |
+
out_advance_dir.mkdir(parents=True, exist_ok=True)
|
| 152 |
+
purge_output = not args.no_purge_output
|
| 153 |
+
if purge_output:
|
| 154 |
+
print("Purging output directories first...")
|
| 155 |
+
purge_directory(out_delay_dir)
|
| 156 |
+
purge_directory(out_advance_dir)
|
| 157 |
+
|
| 158 |
+
print(f"Found {len(videos)} videos.")
|
| 159 |
+
skipped_no_audio = 0
|
| 160 |
+
processed = 0
|
| 161 |
+
|
| 162 |
+
for idx, src in enumerate(videos, start=1):
|
| 163 |
+
rel = src.relative_to(input_dir)
|
| 164 |
+
delay_name = f"{rel.stem}_delay2s{rel.suffix}"
|
| 165 |
+
advance_name = f"{rel.stem}_early2s{rel.suffix}"
|
| 166 |
+
dst_delay = out_delay_dir / rel.parent / delay_name
|
| 167 |
+
dst_advance = out_advance_dir / rel.parent / advance_name
|
| 168 |
+
dst_delay.parent.mkdir(parents=True, exist_ok=True)
|
| 169 |
+
dst_advance.parent.mkdir(parents=True, exist_ok=True)
|
| 170 |
+
|
| 171 |
+
if (not args.overwrite) and dst_delay.exists() and dst_advance.exists():
|
| 172 |
+
print(f"[{idx}/{len(videos)}] Skip existing: {rel}")
|
| 173 |
+
continue
|
| 174 |
+
|
| 175 |
+
if not has_audio_stream(src):
|
| 176 |
+
skipped_no_audio += 1
|
| 177 |
+
print(f"[{idx}/{len(videos)}] Skip no-audio: {rel}")
|
| 178 |
+
continue
|
| 179 |
+
|
| 180 |
+
duration = get_duration_seconds(src)
|
| 181 |
+
print(f"[{idx}/{len(videos)}] Processing: {rel}")
|
| 182 |
+
try:
|
| 183 |
+
if args.overwrite or not dst_delay.exists():
|
| 184 |
+
make_delay_plus_2s(src, dst_delay, duration)
|
| 185 |
+
if args.overwrite or not dst_advance.exists():
|
| 186 |
+
make_advance_minus_2s(src, dst_advance, duration)
|
| 187 |
+
processed += 1
|
| 188 |
+
except subprocess.CalledProcessError as exc:
|
| 189 |
+
print(f" Failed: {rel}")
|
| 190 |
+
print(exc.stderr[-500:])
|
| 191 |
+
|
| 192 |
+
print("\nDone.")
|
| 193 |
+
print(f"Processed videos: {processed}")
|
| 194 |
+
print(f"Skipped no-audio: {skipped_no_audio}")
|
| 195 |
+
print(f"Delay output dir: {out_delay_dir}")
|
| 196 |
+
print(f"Advance output dir: {out_advance_dir}")
|
| 197 |
+
|
| 198 |
+
|
| 199 |
+
if __name__ == "__main__":
|
| 200 |
+
main()
|