diff --git a/data_gen/av_mcqa_pipeline/01_select_finevideo.py b/data_gen/av_mcqa_pipeline/01_select_finevideo.py new file mode 100644 index 0000000000000000000000000000000000000000..8c25e0e52cb86bce7e62a52f429afdc89f2e23c3 --- /dev/null +++ b/data_gen/av_mcqa_pipeline/01_select_finevideo.py @@ -0,0 +1,223 @@ +#!/usr/bin/env python3 +""" +Step 1: Select FineVideo videos for AV-MCQA generation, then download +the missing ones from Hugging Face. + +Selection rules: + - source pool: filtered_candidates_all_passing.jsonl (4616 entries) + - EXCLUDE videos already used in existing local cache (1000 in + filtered_candidates.jsonl), so new generation gets *new* videos + - mean audioVisualCorrelation per scene >= MIN_MEAN_AVC + - duration in [MIN_DURATION, MAX_DURATION] + - balanced across content_parent_category (max MAX_PER_CATEGORY each) + - target ~TOTAL_TARGET videos + +Download missing videos via streaming the FineVideo HF dataset, saving: + - /opt/dlami/nvme/finevideo_av_mcqa/videos/finevideo_{idx:06d}.mp4 + - /opt/dlami/nvme/finevideo_av_mcqa/audio/finevideo_{idx:06d}.wav (extracted via ffmpeg) + +Output: /opt/dlami/nvme/av_mcqa/01_selected_videos.jsonl (one selected video per line) +""" + +import json +import os +import subprocess +from collections import defaultdict +from pathlib import Path + +CANDIDATES_ALL = "/opt/dlami/nvme/finevideo/filtered_candidates_all_passing.jsonl" +CANDIDATES_LOCAL = "/opt/dlami/nvme/finevideo/filtered_candidates.jsonl" # exclude these (already used) + +NEW_VIDEO_DIR = "/opt/dlami/nvme/finevideo_av_mcqa/videos" +NEW_AUDIO_DIR = "/opt/dlami/nvme/finevideo_av_mcqa/audio" +OUTPUT_FILE = "/opt/dlami/nvme/av_mcqa/01_selected_videos.jsonl" + +MIN_MEAN_AVC = 0.85 +MIN_DURATION = 30 +MAX_DURATION = 180 +MAX_PER_CATEGORY = 100 +TOTAL_TARGET = 800 + + +def fname(idx: int) -> str: + return f"finevideo_{idx:06d}" + + +def select_targets() -> list[dict]: + """Pick target videos by filtering metadata only (no downloads yet).""" + all_pool = [] + with open(CANDIDATES_ALL) as f: + for line in f: + all_pool.append(json.loads(line)) + + already_local_idx = set() + with open(CANDIDATES_LOCAL) as f: + for line in f: + already_local_idx.add(json.loads(line)["finevideo_idx"]) + + print(f"Pool: {len(all_pool)} (all_passing), excluding {len(already_local_idx)} already-used local") + + fresh = [v for v in all_pool if v["finevideo_idx"] not in already_local_idx] + print(f"Fresh candidates (not yet locally used): {len(fresh)}") + + selected = [] + for v in fresh: + d = v["duration_seconds"] + if d < MIN_DURATION or d > MAX_DURATION: + continue + avcs = [s["audioVisualCorrelation"] for s in v["scenes"] + if "audioVisualCorrelation" in s + and 0.0 <= s["audioVisualCorrelation"] <= 1.0] + if not avcs: + continue + m = sum(avcs) / len(avcs) + if m < MIN_MEAN_AVC: + continue + v["_mean_avc"] = m + selected.append(v) + + print(f"After AVC/duration filter: {len(selected)}") + + # Balance per category + by_cat = defaultdict(list) + for v in selected: + by_cat[v.get("content_parent_category", "Unknown")].append(v) + for cat in by_cat: + by_cat[cat].sort(key=lambda x: -x["_mean_avc"]) + + print("\nPer-category counts (after AVC filter):") + for cat in sorted(by_cat): + print(f" {cat:30s} {len(by_cat[cat])}") + + balanced = [] + for cat, vs in by_cat.items(): + balanced.extend(vs[:MAX_PER_CATEGORY]) + balanced.sort(key=lambda x: -x["_mean_avc"]) + balanced = balanced[:TOTAL_TARGET] + + print(f"\nFinal selection: {len(balanced)} videos") + final_cat = defaultdict(int) + for v in balanced: + final_cat[v["content_parent_category"]] += 1 + for cat, n in sorted(final_cat.items(), key=lambda x: -x[1]): + print(f" {cat:30s} {n}") + + return balanced + + +def extract_audio(mp4_path: str, wav_path: str) -> bool: + """ffmpeg: extract audio to mono 16k wav.""" + try: + subprocess.run( + ["ffmpeg", "-y", "-loglevel", "error", + "-i", mp4_path, "-vn", + "-ac", "1", "-ar", "16000", wav_path], + check=True, + ) + return True + except subprocess.CalledProcessError: + return False + + +def download_videos(targets: list[dict]) -> list[dict]: + """Stream the FineVideo HF dataset and save matching mp4 + extract wav.""" + os.makedirs(NEW_VIDEO_DIR, exist_ok=True) + os.makedirs(NEW_AUDIO_DIR, exist_ok=True) + + target_map = {v["finevideo_idx"]: v for v in targets} + + # Skip if already downloaded + remaining = {} + ready = [] + for idx, v in target_map.items(): + mp4_path = os.path.join(NEW_VIDEO_DIR, f"{fname(idx)}.mp4") + wav_path = os.path.join(NEW_AUDIO_DIR, f"{fname(idx)}.wav") + if os.path.exists(mp4_path) and os.path.exists(wav_path): + v["_video_path"] = mp4_path + v["_audio_path"] = wav_path + v["_filename"] = fname(idx) + ready.append(v) + else: + remaining[idx] = v + + print(f"\nAlready downloaded: {len(ready)}") + print(f"To download: {len(remaining)}") + + if not remaining: + return ready + + # Stream the HF dataset; pick out matching indices. + # NOTE: streaming through a multi-TB dataset to find sparse targets + # can be slow. If you run this often, consider sharded downloads. + from datasets import load_dataset + + ds = load_dataset("HuggingFaceFV/finevideo", split="train", streaming=True) + + saved = 0 + target_idx_set = set(remaining) + for i, sample in enumerate(ds): + if i not in target_idx_set: + continue + + try: + mp4_bytes = sample["mp4"] + mp4_path = os.path.join(NEW_VIDEO_DIR, f"{fname(i)}.mp4") + with open(mp4_path, "wb") as f: + f.write(mp4_bytes) + + wav_path = os.path.join(NEW_AUDIO_DIR, f"{fname(i)}.wav") + if not extract_audio(mp4_path, wav_path): + # video has no audio? skip + os.remove(mp4_path) + continue + + v = remaining[i] + v["_video_path"] = mp4_path + v["_audio_path"] = wav_path + v["_filename"] = fname(i) + ready.append(v) + saved += 1 + target_idx_set.discard(i) + + if saved % 20 == 0: + print(f" downloaded {saved}/{len(remaining)} (last idx={i})") + + if not target_idx_set: + break + except Exception as e: + print(f" download failed idx={i}: {e}") + + print(f"\nDownloaded {saved} new videos") + if target_idx_set: + print(f"WARN: {len(target_idx_set)} targets not found in stream " + f"(dataset may be sharded differently)") + return ready + + +def main(): + os.makedirs(os.path.dirname(OUTPUT_FILE), exist_ok=True) + targets = select_targets() + + ready = download_videos(targets) + print(f"\nTotal ready (downloaded + with audio): {len(ready)}") + + with open(OUTPUT_FILE, "w") as f: + for v in ready: + slim = { + "finevideo_idx": v["finevideo_idx"], + "filename": v["_filename"], + "video_path": v["_video_path"], + "audio_path": v["_audio_path"], + "duration_seconds": v["duration_seconds"], + "mean_avc": round(v["_mean_avc"], 3), + "content_parent_category": v["content_parent_category"], + "content_fine_category": v["content_fine_category"], + "youtube_title": v.get("youtube_title", ""), + "scenes": v["scenes"], + } + f.write(json.dumps(slim, ensure_ascii=False) + "\n") + print(f"\nWrote {OUTPUT_FILE}") + + +if __name__ == "__main__": + main() diff --git a/data_gen/av_mcqa_pipeline/01b_select_local_finevideo.py b/data_gen/av_mcqa_pipeline/01b_select_local_finevideo.py new file mode 100644 index 0000000000000000000000000000000000000000..ec2dc641baa23bbc799edcdd2f2cbfc69d46978b --- /dev/null +++ b/data_gen/av_mcqa_pipeline/01b_select_local_finevideo.py @@ -0,0 +1,70 @@ +#!/usr/bin/env python3 +""" +Step 1b: Select the 1000 LOCAL FineVideo videos (already downloaded, +already used in finevideo_dpo) for additional AV-MCQA generation. + +These videos already have long-description DPO pairs, but AV-MCQA is a +completely different format (short 4-choice questions requiring audio), +so reusing the same videos is fine. + +Output: /opt/dlami/nvme/av_mcqa/01b_selected_local_videos.jsonl +""" + +import json +import os + +CANDIDATES_LOCAL = "/opt/dlami/nvme/finevideo/filtered_candidates.jsonl" +VIDEO_DIR = "/opt/dlami/nvme/finevideo/videos" +AUDIO_DIR = "/opt/dlami/nvme/finevideo/audio" +OUTPUT_FILE = "/opt/dlami/nvme/av_mcqa/01b_selected_local_videos.jsonl" + + +def fname(idx: int) -> str: + return f"finevideo_{idx:06d}" + + +def main(): + os.makedirs(os.path.dirname(OUTPUT_FILE), exist_ok=True) + + candidates = [] + with open(CANDIDATES_LOCAL) as f: + for line in f: + candidates.append(json.loads(line)) + print(f"Local candidates: {len(candidates)}") + + ready = [] + for v in candidates: + fn = fname(v["finevideo_idx"]) + mp4 = os.path.join(VIDEO_DIR, f"{fn}.mp4") + wav = os.path.join(AUDIO_DIR, f"{fn}.wav") + if not os.path.exists(mp4) or not os.path.exists(wav): + continue + + avcs = [s["audioVisualCorrelation"] for s in v["scenes"] + if "audioVisualCorrelation" in s + and 0.0 <= s["audioVisualCorrelation"] <= 1.0] + mean_avc = sum(avcs) / len(avcs) if avcs else 0.0 + + ready.append({ + "finevideo_idx": v["finevideo_idx"], + "filename": fn, + "video_path": mp4, + "audio_path": wav, + "duration_seconds": v["duration_seconds"], + "mean_avc": round(mean_avc, 3), + "content_parent_category": v.get("content_parent_category", "Unknown"), + "content_fine_category": v.get("content_fine_category", "Unknown"), + "youtube_title": v.get("youtube_title", ""), + "scenes": v["scenes"], + }) + + print(f"Videos with mp4+wav: {len(ready)}") + + with open(OUTPUT_FILE, "w") as f: + for v in ready: + f.write(json.dumps(v, ensure_ascii=False) + "\n") + print(f"Wrote {OUTPUT_FILE}") + + +if __name__ == "__main__": + main() diff --git a/data_gen/av_mcqa_pipeline/02_gemini_generate_av_mcqa.py b/data_gen/av_mcqa_pipeline/02_gemini_generate_av_mcqa.py new file mode 100644 index 0000000000000000000000000000000000000000..196f10d6e62b2e24036a6ef4a5d4e6e95e4f4349 --- /dev/null +++ b/data_gen/av_mcqa_pipeline/02_gemini_generate_av_mcqa.py @@ -0,0 +1,323 @@ +#!/usr/bin/env python3 +""" +Step 2: For each selected FineVideo video, ask a video-capable model +(audio + visual) to generate Daily-Omni-style AV-MCQAs. + +Backends, in priority order: + 1. Gemini direct API (google-genai), with up to 3 keys rotating. + - GOOGLE_API_KEY_1 / GOOGLE_API_KEY_2 / GOOGLE_API_KEY_3 + - or fallback to GOOGLE_API_KEY (single) + 2. OpenRouter (openai-compatible), used when ALL Gemini keys hit + quota / rate-limit / 5xx. + - OPENROUTER_API_KEY + - default model: google/gemini-2.5-pro (overridable) + +Output JSONL per video. Resumable. + +Usage: + export GOOGLE_API_KEY_1=... + export GOOGLE_API_KEY_2=... + export GOOGLE_API_KEY_3=... + export OPENROUTER_API_KEY=... + python 02_gemini_generate_av_mcqa.py +""" + +import os +import json +import time +import base64 +import threading +from concurrent.futures import ThreadPoolExecutor, as_completed +from pathlib import Path + +INPUT_FILE = os.environ.get("AV_MCQA_INPUT", "/opt/dlami/nvme/av_mcqa/01_selected_videos.jsonl") +OUTPUT_FILE = os.environ.get("AV_MCQA_OUTPUT", "/opt/dlami/nvme/av_mcqa/02_gemini_generated.jsonl") + +GEMINI_MODEL = os.environ.get("GEMINI_MODEL", "gemini-3.1-pro-preview") +OPENROUTER_MODEL = os.environ.get("OPENROUTER_MODEL", "google/gemini-3.1-pro-preview") + +MAX_WORKERS = 6 +QUESTIONS_PER_VIDEO = 3 + +GEN_PROMPT = """You are designing multiple-choice questions for a video benchmark that tests audio-visual reasoning. + +Watch and listen to the attached video carefully. Then generate exactly {N} multiple-choice questions of the following types (vary across types if possible): + +1. AV Event Alignment: "What visual element appeared when [audio event]?" or "Which audio event matches [visual event]?" +2. Event Sequence: chronological order of audio-visual events. +3. Context Understanding: what audio + visual elements coexist at a moment. +4. Inference: based on what you see AND hear, infer purpose, emotion, intent, or relationship. +5. Reasoning: causal reasoning combining audio and visual evidence. +6. Comparative: contrast two AV elements or moments. + +STRICT REQUIREMENTS for each question: +- Must REQUIRE both audio AND video to answer correctly. A model with only video frames (no audio) MUST NOT be able to answer. +- 4 plausible options (A/B/C/D). Distractors must be semantically close, not silly. +- Provide the correct letter and a brief evidence note ("audio: ..., visual: ..."). +- Avoid yes/no questions and trivia answerable from title alone. + +Return STRICT JSON, no prose: +{{ + "qa_pairs": [ + {{ + "type": "AV Event Alignment", + "question": "...", + "options": {{"A": "...", "B": "...", "C": "...", "D": "..."}}, + "answer": "B", + "audio_evidence": "around 0:12, the speaker says X", + "visual_evidence": "at the same moment, Y appears on screen" + }} + ] +}} +""" + + +# ============================================================================= +# Backend pool: round-robin over Gemini keys, fallback to OpenRouter +# ============================================================================= + +class GeminiBackend: + """One Gemini key wrapper. Marks itself disabled on quota error.""" + + def __init__(self, api_key: str, label: str): + self.label = label + self.api_key = api_key + self.disabled = False + self.disabled_reason = "" + self._client = None + self._lock = threading.Lock() + + def client(self): + if self._client is None: + from google import genai + self._client = genai.Client(api_key=self.api_key) + return self._client + + def call(self, video_path: str, prompt_text: str) -> dict: + from google.genai import types + c = self.client() + uploaded = c.files.upload(file=video_path) + for _ in range(60): + f = c.files.get(name=uploaded.name) + if f.state.name == "ACTIVE": + uploaded = f + break + time.sleep(1) + + try: + resp = c.models.generate_content( + model=GEMINI_MODEL, + contents=[uploaded, prompt_text], + config=types.GenerateContentConfig( + temperature=0.4, + response_mime_type="application/json", + ), + ) + return json.loads(resp.text) + finally: + try: + c.files.delete(name=uploaded.name) + except Exception: + pass + + +class OpenRouterBackend: + """OpenRouter Gemini fallback. Sends video as inline file (URL or base64).""" + + def __init__(self, api_key: str): + self.api_key = api_key + self._client = None + + def client(self): + if self._client is None: + from openai import OpenAI + self._client = OpenAI( + base_url="https://openrouter.ai/api/v1", + api_key=self.api_key, + default_headers={ + "HTTP-Referer": "https://localhost", + "X-Title": "av-mcqa-pipeline", + }, + ) + return self._client + + def call(self, video_path: str, prompt_text: str) -> dict: + c = self.client() + with open(video_path, "rb") as f: + data = f.read() + b64 = base64.b64encode(data).decode("utf-8") + + # OpenRouter Gemini supports file_data via inputs + # Format: input as "file" type with data URL + resp = c.chat.completions.create( + model=OPENROUTER_MODEL, + messages=[{ + "role": "user", + "content": [ + {"type": "file", + "file": {"file_data": f"data:video/mp4;base64,{b64}", + "filename": os.path.basename(video_path)}}, + {"type": "text", "text": prompt_text}, + ], + }], + response_format={"type": "json_object"}, + temperature=0.4, + ) + text = resp.choices[0].message.content or "{}" + return json.loads(text) + + +class BackendPool: + """Try Gemini keys round-robin; fall back to OpenRouter when all dead.""" + + QUOTA_KEYWORDS = ("quota", "rate", "exhaust", "429", "RESOURCE_EXHAUSTED", + "exceeded", "limit") + + def __init__(self): + self.gemini_backends: list[GeminiBackend] = [] + for i in (1, 2, 3): + k = os.environ.get(f"GOOGLE_API_KEY_{i}") + if k: + self.gemini_backends.append(GeminiBackend(k, f"gemini_{i}")) + if not self.gemini_backends: + k = os.environ.get("GOOGLE_API_KEY") + if k: + self.gemini_backends.append(GeminiBackend(k, "gemini_default")) + + or_key = os.environ.get("OPENROUTER_API_KEY") + self.openrouter = OpenRouterBackend(or_key) if or_key else None + + if not self.gemini_backends and not self.openrouter: + raise RuntimeError( + "No API keys found. Set GOOGLE_API_KEY_1/2/3 or GOOGLE_API_KEY, " + "and/or OPENROUTER_API_KEY." + ) + + self._next = 0 + self._lock = threading.Lock() + print(f"Backends initialized: gemini={len(self.gemini_backends)}, " + f"openrouter={'yes' if self.openrouter else 'no'}") + + def _is_quota_error(self, err: Exception) -> bool: + s = str(err).lower() + return any(k.lower() in s for k in self.QUOTA_KEYWORDS) + + def _next_alive_gemini(self) -> GeminiBackend | None: + with self._lock: + n = len(self.gemini_backends) + for _ in range(n): + b = self.gemini_backends[self._next % n] + self._next += 1 + if not b.disabled: + return b + return None + + def call(self, video_path: str, prompt_text: str) -> dict: + # Try alive Gemini keys + last_err = None + for _ in range(len(self.gemini_backends) + 1): + b = self._next_alive_gemini() + if b is None: + break + try: + return b.call(video_path, prompt_text) + except Exception as e: + last_err = e + if self._is_quota_error(e): + b.disabled = True + b.disabled_reason = str(e)[:200] + print(f" [{b.label}] disabled (quota): {b.disabled_reason}") + continue + else: + # Non-quota error: try next backend, but don't disable this key + print(f" [{b.label}] transient error: {e}") + continue + + # All Gemini exhausted -> OpenRouter + if self.openrouter: + for attempt in range(3): + try: + return self.openrouter.call(video_path, prompt_text) + except Exception as e: + last_err = e + if attempt < 2: + time.sleep(2 ** (attempt + 1)) + + raise RuntimeError(f"All backends failed; last error: {last_err}") + + +# ============================================================================= +# Driver +# ============================================================================= + +def load_done() -> set: + done = set() + if os.path.exists(OUTPUT_FILE): + with open(OUTPUT_FILE) as f: + for line in f: + try: + done.add(json.loads(line)["finevideo_idx"]) + except Exception: + pass + return done + + +def process_one(pool: BackendPool, video_meta: dict) -> dict: + idx = video_meta["finevideo_idx"] + prompt = GEN_PROMPT.format(N=QUESTIONS_PER_VIDEO) + try: + data = pool.call(video_meta["video_path"], prompt) + qa_pairs = data.get("qa_pairs", []) + return { + "finevideo_idx": idx, + "filename": video_meta["filename"], + "video_path": video_meta["video_path"], + "audio_path": video_meta["audio_path"], + "duration_seconds": video_meta["duration_seconds"], + "mean_avc": video_meta["mean_avc"], + "content_parent_category": video_meta["content_parent_category"], + "content_fine_category": video_meta["content_fine_category"], + "qa_pairs": qa_pairs, + } + except Exception as e: + return {"finevideo_idx": idx, "error": str(e)} + + +def main(): + os.makedirs(os.path.dirname(OUTPUT_FILE), exist_ok=True) + pool = BackendPool() + + videos = [] + with open(INPUT_FILE) as f: + for line in f: + videos.append(json.loads(line)) + print(f"Input videos: {len(videos)}") + + done = load_done() + todo = [v for v in videos if v["finevideo_idx"] not in done] + print(f"Resuming: {len(done)} done, {len(todo)} todo") + if not todo: + return + + success = errors = 0 + with open(OUTPUT_FILE, "a") as out_f: + with ThreadPoolExecutor(max_workers=MAX_WORKERS) as ex: + futs = {ex.submit(process_one, pool, v): v for v in todo} + for i, fut in enumerate(as_completed(futs), 1): + rec = fut.result() + if "error" in rec: + errors += 1 + print(f" [{i}/{len(todo)}] ERROR idx={rec['finevideo_idx']}: {rec['error'][:200]}") + else: + success += 1 + out_f.write(json.dumps(rec, ensure_ascii=False) + "\n") + out_f.flush() + if i % 20 == 0: + print(f" [{i}/{len(todo)}] success={success} errors={errors}") + + print(f"\nDone. success={success} errors={errors}") + print(f"Output: {OUTPUT_FILE}") + + +if __name__ == "__main__": + main() diff --git a/data_gen/av_mcqa_pipeline/02b_openrouter_generate_local.py b/data_gen/av_mcqa_pipeline/02b_openrouter_generate_local.py new file mode 100644 index 0000000000000000000000000000000000000000..da71398cd6ed00f42fd8d00cff93230688b01df1 --- /dev/null +++ b/data_gen/av_mcqa_pipeline/02b_openrouter_generate_local.py @@ -0,0 +1,170 @@ +#!/usr/bin/env python3 +""" +Step 2b: Generate AV-MCQA for local 1000 FineVideo videos using +OpenRouter only (Gemini quota exhausted). + +Reads: /opt/dlami/nvme/av_mcqa/01b_selected_local_videos.jsonl +Writes: /opt/dlami/nvme/av_mcqa/02b_gemini_generated_local.jsonl + +Usage: + export OPENROUTER_API_KEY=... + python 02b_openrouter_generate_local.py +""" + +import os +import json +import time +import base64 +from concurrent.futures import ThreadPoolExecutor, as_completed + +OPENROUTER_MODEL = os.environ.get("OPENROUTER_MODEL", "google/gemini-3.1-pro-preview") + +INPUT_FILE = "/opt/dlami/nvme/av_mcqa/01b_selected_local_videos.jsonl" +OUTPUT_FILE = "/opt/dlami/nvme/av_mcqa/02b_gemini_generated_local.jsonl" + +MAX_WORKERS = 6 +QUESTIONS_PER_VIDEO = 3 + +GEN_PROMPT = """You are designing multiple-choice questions for a video benchmark that tests audio-visual reasoning. + +Watch and listen to the attached video carefully. Then generate exactly {N} multiple-choice questions of the following types (vary across types if possible): + +1. AV Event Alignment: "What visual element appeared when [audio event]?" or "Which audio event matches [visual event]?" +2. Event Sequence: chronological order of audio-visual events. +3. Context Understanding: what audio + visual elements coexist at a moment. +4. Inference: based on what you see AND hear, infer purpose, emotion, intent, or relationship. +5. Reasoning: causal reasoning combining audio and visual evidence. +6. Comparative: contrast two AV elements or moments. + +STRICT REQUIREMENTS for each question: +- Must REQUIRE both audio AND video to answer correctly. A model with only video frames (no audio) MUST NOT be able to answer. +- 4 plausible options (A/B/C/D). Distractors must be semantically close, not silly. +- Provide the correct letter and a brief evidence note ("audio: ..., visual: ..."). +- Avoid yes/no questions and trivia answerable from title alone. + +Return STRICT JSON, no prose: +{{ + "qa_pairs": [ + {{ + "type": "AV Event Alignment", + "question": "...", + "options": {{"A": "...", "B": "...", "C": "...", "D": "..."}}, + "answer": "B", + "audio_evidence": "around 0:12, the speaker says X", + "visual_evidence": "at the same moment, Y appears on screen" + }} + ] +}} +""" + + +def load_done() -> set: + done = set() + if os.path.exists(OUTPUT_FILE): + with open(OUTPUT_FILE) as f: + for line in f: + try: + done.add(json.loads(line)["finevideo_idx"]) + except Exception: + pass + return done + + +def get_client(): + from openai import OpenAI + return OpenAI( + base_url="https://openrouter.ai/api/v1", + api_key=os.environ["OPENROUTER_API_KEY"], + default_headers={ + "HTTP-Referer": "https://localhost", + "X-Title": "av-mcqa-pipeline", + }, + ) + + +def call_openrouter(client, video_path: str, prompt_text: str) -> dict: + with open(video_path, "rb") as f: + b64 = base64.b64encode(f.read()).decode("utf-8") + + resp = client.chat.completions.create( + model=OPENROUTER_MODEL, + messages=[{ + "role": "user", + "content": [ + {"type": "file", + "file": {"file_data": f"data:video/mp4;base64,{b64}", + "filename": os.path.basename(video_path)}}, + {"type": "text", "text": prompt_text}, + ], + }], + response_format={"type": "json_object"}, + temperature=0.4, + ) + text = resp.choices[0].message.content or "{}" + return json.loads(text) + + +def process_one(client, video_meta: dict) -> dict: + idx = video_meta["finevideo_idx"] + prompt = GEN_PROMPT.format(N=QUESTIONS_PER_VIDEO) + for attempt in range(3): + try: + data = call_openrouter(client, video_meta["video_path"], prompt) + qa_pairs = data.get("qa_pairs", []) + return { + "finevideo_idx": idx, + "filename": video_meta["filename"], + "video_path": video_meta["video_path"], + "audio_path": video_meta["audio_path"], + "duration_seconds": video_meta["duration_seconds"], + "mean_avc": video_meta["mean_avc"], + "content_parent_category": video_meta["content_parent_category"], + "content_fine_category": video_meta["content_fine_category"], + "qa_pairs": qa_pairs, + } + except Exception as e: + if attempt < 2: + time.sleep(2 ** (attempt + 1)) + else: + return {"finevideo_idx": idx, "error": str(e)} + + +def main(): + os.makedirs(os.path.dirname(OUTPUT_FILE), exist_ok=True) + client = get_client() + + videos = [] + with open(INPUT_FILE) as f: + for line in f: + videos.append(json.loads(line)) + print(f"Input videos: {len(videos)}") + + done = load_done() + todo = [v for v in videos if v["finevideo_idx"] not in done] + print(f"Resuming: {len(done)} done, {len(todo)} todo") + if not todo: + print("All done.") + return + + success = errors = 0 + with open(OUTPUT_FILE, "a") as out_f: + with ThreadPoolExecutor(max_workers=MAX_WORKERS) as ex: + futs = {ex.submit(process_one, client, v): v for v in todo} + for i, fut in enumerate(as_completed(futs), 1): + rec = fut.result() + if "error" in rec: + errors += 1 + print(f" [{i}/{len(todo)}] ERROR idx={rec['finevideo_idx']}: {rec['error'][:200]}") + else: + success += 1 + out_f.write(json.dumps(rec, ensure_ascii=False) + "\n") + out_f.flush() + if i % 20 == 0: + print(f" [{i}/{len(todo)}] success={success} errors={errors}") + + print(f"\nDone. success={success} errors={errors}") + print(f"Output: {OUTPUT_FILE}") + + +if __name__ == "__main__": + main() diff --git a/data_gen/av_mcqa_pipeline/03_gpt5_video_only_filter.py b/data_gen/av_mcqa_pipeline/03_gpt5_video_only_filter.py new file mode 100644 index 0000000000000000000000000000000000000000..f426b9cfb8ae45d7dceda4fa21176191254d3f26 --- /dev/null +++ b/data_gen/av_mcqa_pipeline/03_gpt5_video_only_filter.py @@ -0,0 +1,170 @@ +#!/usr/bin/env python3 +""" +Step 3: For each Gemini-generated MCQA, give GPT-5.4 the same question +plus VIDEO FRAMES ONLY (no audio). If GPT picks the correct answer with +high confidence, the question is answerable without audio -> drop it. + +Surviving questions are guaranteed to require audio + visual jointly. + +Usage: + export OPENAI_API_KEY="sk-..." + python 03_gpt5_video_only_filter.py +""" + +import json +import os +import re +import time +import base64 +import io +from concurrent.futures import ThreadPoolExecutor, as_completed +from pathlib import Path + +import av +import numpy as np +from openai import OpenAI + +INPUT_FILE = "/opt/dlami/nvme/av_mcqa/02_gemini_generated.jsonl" +OUTPUT_FILE = "/opt/dlami/nvme/av_mcqa/03_filtered_must_audio.jsonl" + +MODEL = "gpt-5.4-2026-03-05" +MAX_WORKERS = 8 +NUM_FRAMES = 12 +ANSWER_RE = re.compile(r"\b([A-D])\b") + + +client = OpenAI() + + +def extract_frames(video_path: str, num_frames: int) -> list[bytes]: + container = av.open(video_path) + stream = next(s for s in container.streams if s.type == "video") + total = stream.frames or 300 + indices = set(np.linspace(0, total - 1, num_frames).astype(int).tolist()) + frames = [] + for i, frame in enumerate(container.decode(stream)): + if i in indices: + buf = io.BytesIO() + frame.to_image().save(buf, format="JPEG", quality=80) + frames.append(buf.getvalue()) + if len(frames) >= num_frames: + break + container.close() + return frames + + +def gpt_video_only_answer(video_path: str, question: str, options: dict) -> str: + """Send frames + question, get a single-letter answer.""" + frames = extract_frames(video_path, NUM_FRAMES) + image_msgs = [] + for f in frames: + b64 = base64.b64encode(f).decode("utf-8") + image_msgs.append({ + "type": "image_url", + "image_url": {"url": f"data:image/jpeg;base64,{b64}", "detail": "low"}, + }) + + options_text = "\n".join(f"{k}. {v}" for k, v in options.items()) + user_text = ( + f"You are watching a silent video (no audio). " + f"Below are evenly-spaced frames. Answer the multiple-choice question by responding with only the letter A, B, C, or D.\n\n" + f"Question: {question}\n\n" + f"Options:\n{options_text}\n\n" + f"Answer with a single letter:" + ) + + resp = client.chat.completions.create( + model=MODEL, + messages=[ + {"role": "system", "content": "You answer multiple-choice questions by responding with only one letter."}, + {"role": "user", "content": [{"type": "text", "text": user_text}, *image_msgs]}, + ], + max_completion_tokens=4, + temperature=0.0, + ) + raw = (resp.choices[0].message.content or "").strip().upper() + m = ANSWER_RE.search(raw) + return m.group(1) if m else "" + + +def process_video_record(rec: dict) -> dict: + """Filter qa_pairs of one video; keep only those GPT (video-only) gets WRONG.""" + survived = [] + for qa in rec.get("qa_pairs", []): + question = qa.get("question", "") + options = qa.get("options", {}) + correct = qa.get("answer", "") + if not (question and options and correct): + continue + + try: + for attempt in range(3): + try: + pred = gpt_video_only_answer(rec["video_path"], question, options) + break + except Exception as e: + if attempt < 2: + time.sleep(2 ** (attempt + 1)) + else: + raise + except Exception as e: + qa["_filter_error"] = str(e) + continue + + qa["_video_only_pred"] = pred + # KEEP only if GPT (video-only) is wrong -> question genuinely needs audio + if pred != correct: + survived.append(qa) + + rec["qa_pairs"] = survived + return rec + + +def main(): + os.makedirs(os.path.dirname(OUTPUT_FILE), exist_ok=True) + + records = [] + with open(INPUT_FILE) as f: + for line in f: + records.append(json.loads(line)) + print(f"Input video records: {len(records)}") + + # Resume + done_idx = set() + if os.path.exists(OUTPUT_FILE): + with open(OUTPUT_FILE) as f: + for line in f: + try: + done_idx.add(json.loads(line)["finevideo_idx"]) + except Exception: + pass + todo = [r for r in records if r["finevideo_idx"] not in done_idx] + print(f"Resuming: {len(done_idx)} done, {len(todo)} todo") + if not todo: + print("All done.") + return + + total_in = 0 + total_out = 0 + with open(OUTPUT_FILE, "a") as out_f: + with ThreadPoolExecutor(max_workers=MAX_WORKERS) as ex: + futures = {ex.submit(process_video_record, r): r for r in todo} + for i, fut in enumerate(as_completed(futures), 1): + rec = fut.result() + in_n = sum(1 for q in rec.get("qa_pairs", []) if "question" in q) + sum( + 1 for q in rec.get("qa_pairs", []) if q.get("_filter_error") + ) # not exact but indicative + out_n = len(rec["qa_pairs"]) + total_in += len(rec.get("qa_pairs", [])) if False else 0 # not used + total_out += out_n + out_f.write(json.dumps(rec, ensure_ascii=False) + "\n") + out_f.flush() + if i % 20 == 0: + print(f" [{i}/{len(todo)}] surviving qa total so far: {total_out}") + + print(f"\nDone. Surviving qa pairs: {total_out}") + print(f"Output: {OUTPUT_FILE}") + + +if __name__ == "__main__": + main() diff --git a/data_gen/av_mcqa_pipeline/03b_gpt5_filter_local.py b/data_gen/av_mcqa_pipeline/03b_gpt5_filter_local.py new file mode 100644 index 0000000000000000000000000000000000000000..232dde9fe639bdbffd2cb6b9daf943e5f526b3ec --- /dev/null +++ b/data_gen/av_mcqa_pipeline/03b_gpt5_filter_local.py @@ -0,0 +1,150 @@ +#!/usr/bin/env python3 +""" +Step 3b: GPT-5.4 video-only filter for the local 1000 FineVideo batch. +Same logic as 03_gpt5_video_only_filter.py but reads/writes different files. + +Reads: /opt/dlami/nvme/av_mcqa/02b_gemini_generated_local.jsonl +Writes: /opt/dlami/nvme/av_mcqa/03b_filtered_must_audio_local.jsonl +""" + +import json +import os +import re +import time +import base64 +import io +from concurrent.futures import ThreadPoolExecutor, as_completed + +import av +import numpy as np +from openai import OpenAI + +INPUT_FILE = "/opt/dlami/nvme/av_mcqa/02b_gemini_generated_local.jsonl" +OUTPUT_FILE = "/opt/dlami/nvme/av_mcqa/03b_filtered_must_audio_local.jsonl" + +MODEL = "gpt-5.4-2026-03-05" +MAX_WORKERS = 8 +NUM_FRAMES = 12 +ANSWER_RE = re.compile(r"\b([A-D])\b") + +client = OpenAI() + + +def extract_frames(video_path: str, num_frames: int) -> list[bytes]: + container = av.open(video_path) + stream = next(s for s in container.streams if s.type == "video") + total = stream.frames or 300 + indices = set(np.linspace(0, total - 1, num_frames).astype(int).tolist()) + frames = [] + for i, frame in enumerate(container.decode(stream)): + if i in indices: + buf = io.BytesIO() + frame.to_image().save(buf, format="JPEG", quality=80) + frames.append(buf.getvalue()) + if len(frames) >= num_frames: + break + container.close() + return frames + + +def gpt_video_only_answer(video_path: str, question: str, options: dict) -> str: + frames = extract_frames(video_path, NUM_FRAMES) + image_msgs = [] + for f in frames: + b64 = base64.b64encode(f).decode("utf-8") + image_msgs.append({ + "type": "image_url", + "image_url": {"url": f"data:image/jpeg;base64,{b64}", "detail": "low"}, + }) + + options_text = "\n".join(f"{k}. {v}" for k, v in options.items()) + user_text = ( + f"You are watching a silent video (no audio). " + f"Below are evenly-spaced frames. Answer the multiple-choice question by responding with only the letter A, B, C, or D.\n\n" + f"Question: {question}\n\n" + f"Options:\n{options_text}\n\n" + f"Answer with a single letter:" + ) + + resp = client.chat.completions.create( + model=MODEL, + messages=[ + {"role": "system", "content": "You answer multiple-choice questions by responding with only one letter."}, + {"role": "user", "content": [{"type": "text", "text": user_text}, *image_msgs]}, + ], + max_completion_tokens=4, + temperature=0.0, + ) + raw = (resp.choices[0].message.content or "").strip().upper() + m = ANSWER_RE.search(raw) + return m.group(1) if m else "" + + +def process_video_record(rec: dict) -> dict: + survived = [] + for qa in rec.get("qa_pairs", []): + question = qa.get("question", "") + options = qa.get("options", {}) + correct = qa.get("answer", "") + if not (question and options and correct): + continue + try: + for attempt in range(3): + try: + pred = gpt_video_only_answer(rec["video_path"], question, options) + break + except Exception as e: + if attempt < 2: + time.sleep(2 ** (attempt + 1)) + else: + raise + except Exception as e: + continue + qa["_video_only_pred"] = pred + if pred != correct: + survived.append(qa) + rec["qa_pairs"] = survived + return rec + + +def main(): + os.makedirs(os.path.dirname(OUTPUT_FILE), exist_ok=True) + + records = [] + with open(INPUT_FILE) as f: + for line in f: + records.append(json.loads(line)) + print(f"Input video records: {len(records)}") + + done_idx = set() + if os.path.exists(OUTPUT_FILE): + with open(OUTPUT_FILE) as f: + for line in f: + try: + done_idx.add(json.loads(line)["finevideo_idx"]) + except Exception: + pass + todo = [r for r in records if r["finevideo_idx"] not in done_idx] + print(f"Resuming: {len(done_idx)} done, {len(todo)} todo") + if not todo: + print("All done.") + return + + total_out = 0 + with open(OUTPUT_FILE, "a") as out_f: + with ThreadPoolExecutor(max_workers=MAX_WORKERS) as ex: + futures = {ex.submit(process_video_record, r): r for r in todo} + for i, fut in enumerate(as_completed(futures), 1): + rec = fut.result() + total_out += len(rec["qa_pairs"]) + out_f.write(json.dumps(rec, ensure_ascii=False) + "\n") + out_f.flush() + if i % 20 == 0: + print(f" [{i}/{len(todo)}] surviving qa total so far: {total_out}") + + print(f"\nDone. Surviving qa pairs: {total_out}") + print(f"Output: {OUTPUT_FILE}") + + +if __name__ == "__main__": + main() diff --git a/data_gen/av_mcqa_pipeline/04_assemble_dpo.py b/data_gen/av_mcqa_pipeline/04_assemble_dpo.py new file mode 100644 index 0000000000000000000000000000000000000000..225c4f4f66ecf81957af0a0a0a11df1b67d6478e --- /dev/null +++ b/data_gen/av_mcqa_pipeline/04_assemble_dpo.py @@ -0,0 +1,92 @@ +#!/usr/bin/env python3 +""" +Step 4: Convert filtered AV-MCQAs into LlamaFactory DPO format. + +For each surviving QA: + chosen = correct option (letter + full text) + rejected = randomly picked wrong option (letter + full text) +""" + +import json +import os +import random +from collections import Counter + +random.seed(42) + +INPUT_FILES = [ + "/opt/dlami/nvme/av_mcqa/03_filtered_must_audio.jsonl", # batch 1: 767 new videos + "/opt/dlami/nvme/av_mcqa/03b_filtered_must_audio_local.jsonl", # batch 2: 1000 local videos +] +OUTPUT_FILE = "/home/ubuntu/LlamaFactory/data/dpo_finevideo_av_mcqa.json" + + +def make_text(letter: str, options: dict) -> str: + return f"{letter}. {options.get(letter, '').strip()}" + + +def main(): + records = [] + for input_file in INPUT_FILES: + if not os.path.exists(input_file): + print(f"Skipping (not found): {input_file}") + continue + with open(input_file) as f: + for line in f: + records.append(json.loads(line)) + print(f"Loaded {input_file}") + + dpo = [] + type_counter = Counter() + cat_counter = Counter() + for rec in records: + v_path = rec["video_path"] + a_path = rec["audio_path"] + cat = rec.get("content_parent_category", "Unknown") + for qa in rec.get("qa_pairs", []): + q = qa.get("question", "").strip() + opts = qa.get("options", {}) + correct = qa.get("answer", "").strip() + if not (q and len(opts) >= 4 and correct in opts): + continue + wrong = [k for k in opts if k != correct] + if not wrong: + continue + rej = random.choice(wrong) + + options_text = "\n".join(f"{k}. {opts[k]}" for k in sorted(opts)) + user_content = ( + f"