| |
| from audio_llm_benchmark_shifted import main |
|
|
|
|
| if __name__ == "__main__": |
| main() |
| ''' |
| """ |
| Audio-LLM benchmark on shifted delay/early sets (126 videos from 63 source ids). |
| |
| Outputs align with previous stress-test style: |
| - delay_results.jsonl |
| - early_results.jsonl |
| - summary.json |
| - missing_or_errors.json |
| |
| And organized into four subfolders under output root: |
| - audio/ (extracted audio files) |
| - gemini/ (Gemini-only per-split results) |
| - qwen2/ (Qwen2-only per-split results) |
| - qwen3/ (Qwen3-only per-split results) |
| """ |
| |
| from __future__ import annotations |
| |
| import argparse |
| import csv |
| import json |
| import os |
| import re |
| import subprocess |
| import time |
| from pathlib import Path |
| from typing import Any, Dict, List, Optional, Tuple |
| |
| import librosa |
| import numpy as np |
| import torch |
| from tqdm import tqdm |
| from transformers import AutoModelForCausalLM, AutoProcessor |
| |
| try: |
| import google.generativeai as genai |
| except Exception: # pragma: no cover |
| genai = None |
| |
| |
| UNIVERSAL_PROMPT = ( |
| "Listen to this audio clip carefully. You are an expert in acoustic forensics. " |
| "Locate the EXACT timestamp (in seconds) of the loudest, most energetic PHYSICAL IMPACT sound " |
| "(e.g., body hitting ground, object breaking). Actively ignore background music, laughter, and speech. " |
| 'Output strictly in JSON format: {"audio_timestamp": 2.5, "sound_description": "brief description"}' |
| ) |
| |
| DEFAULT_SOURCE_CSV = Path( |
| "/home/ubuntu/video_source/result/gemini_video_oc_frame/gemini_validated_accept_lt1p0.csv" |
| ) |
| DEFAULT_DELAY_DIR = Path("/home/ubuntu/video_source/shifted/delay") |
| DEFAULT_EARLY_DIR = Path("/home/ubuntu/video_source/shifted/early") |
| DEFAULT_OUTPUT_DIR = Path("/home/ubuntu/video_source/result/audio_llm_benchmark") |
| |
| DEFAULT_AUDIO_SR = 16000 |
| DEFAULT_AUDIO_FORMAT = "wav" |
| DEFAULT_SEED = 42 |
| DEFAULT_SAMPLE_SIZE = 0 # 0 = all rows |
| |
| GEMINI_MODEL = "gemini-3.1-pro-preview" |
| QWEN2_MODEL_ID = "Qwen/Qwen2-Audio-7B-Instruct" |
| QWEN3_MODEL_ID = "Qwen/Qwen3-Omni-30B-A3B-Instruct" |
| |
| GEMINI_FILE_READY_TIMEOUT_SECONDS = 120 |
| GEMINI_FILE_POLL_SECONDS = 2 |
| |
| |
| def parse_args() -> argparse.Namespace: |
| p = argparse.ArgumentParser(description="Benchmark Gemini/Qwen2/Qwen3 on shifted audio grounding.") |
| p.add_argument("--source-csv", type=Path, default=DEFAULT_SOURCE_CSV) |
| p.add_argument("--delay-dir", type=Path, default=DEFAULT_DELAY_DIR) |
| p.add_argument("--early-dir", type=Path, default=DEFAULT_EARLY_DIR) |
| p.add_argument("--output-dir", type=Path, default=DEFAULT_OUTPUT_DIR) |
| p.add_argument("--audio-sr", type=int, default=DEFAULT_AUDIO_SR) |
| p.add_argument("--audio-format", choices=["wav", "mp3"], default=DEFAULT_AUDIO_FORMAT) |
| p.add_argument("--sample-size", type=int, default=DEFAULT_SAMPLE_SIZE, help="0 means all rows in source CSV") |
| p.add_argument("--seed", type=int, default=DEFAULT_SEED) |
| p.add_argument("--delay-shift", type=float, default=2.0) |
| p.add_argument("--early-shift", type=float, default=-2.0) |
| p.add_argument("--disable-local-models", action="store_true") |
| p.add_argument("--overwrite-output", action="store_true") |
| return p.parse_args() |
| |
| |
| def ensure_ffmpeg() -> None: |
| subprocess.run( |
| ["ffmpeg", "-version"], |
| check=True, |
| stdout=subprocess.PIPE, |
| stderr=subprocess.PIPE, |
| text=True, |
| ) |
| |
| |
| def _extract_json_object(text: str) -> Dict[str, Any]: |
| text = text.strip() |
| fence_match = re.search(r"```(?:json)?\s*(\{.*?\})\s*```", text, flags=re.DOTALL) |
| if fence_match: |
| return json.loads(fence_match.group(1)) |
| try: |
| return json.loads(text) |
| except json.JSONDecodeError: |
| pass |
| start = text.find("{") |
| end = text.rfind("}") |
| if start != -1 and end != -1 and end > start: |
| return json.loads(text[start : end + 1]) |
| raise ValueError("No valid JSON object found in model output.") |
| |
| |
| def _coerce_audio_result(data: Dict[str, Any]) -> Tuple[float, str]: |
| if "audio_timestamp" not in data or "sound_description" not in data: |
| raise ValueError(f"Missing keys in JSON output: {data}") |
| ts = float(data["audio_timestamp"]) |
| if not np.isfinite(ts): |
| raise ValueError(f"audio_timestamp is not finite: {ts}") |
| desc = str(data["sound_description"]).strip() |
| if not desc: |
| raise ValueError("sound_description is empty.") |
| return ts, desc |
| |
| |
| def append_jsonl(path: Path, payload: Dict[str, Any]) -> None: |
| path.parent.mkdir(parents=True, exist_ok=True) |
| with path.open("a", encoding="utf-8") as f: |
| f.write(json.dumps(payload, ensure_ascii=False) + "\n") |
| |
| |
| def load_source_rows(path: Path, sample_size: int, seed: int) -> List[Tuple[str, float]]: |
| if not path.exists(): |
| raise FileNotFoundError(f"source CSV not found: {path}") |
| rows: List[Tuple[str, float]] = [] |
| with path.open("r", encoding="utf-8", newline="") as f: |
| reader = csv.DictReader(f) |
| fields = reader.fieldnames or [] |
| if "video_id" not in fields or "t_gemini" not in fields: |
| raise ValueError("source CSV must include columns: video_id, t_gemini") |
| for r in reader: |
| video_id = str(r.get("video_id", "")).strip() |
| if not video_id: |
| continue |
| try: |
| t_visual = float(r.get("t_gemini", "")) |
| except (TypeError, ValueError): |
| continue |
| rows.append((video_id, t_visual)) |
| if not rows: |
| raise ValueError("No valid rows parsed from source CSV.") |
| |
| if sample_size > 0 and len(rows) > sample_size: |
| rng = np.random.default_rng(seed) |
| idx = rng.choice(len(rows), size=sample_size, replace=False) |
| rows = [rows[int(i)] for i in idx] |
| rows.sort(key=lambda x: x[0]) |
| return rows |
| |
| |
| def index_by_filename(root: Path) -> Dict[str, Path]: |
| if not root.exists(): |
| raise FileNotFoundError(f"directory not found: {root}") |
| out: Dict[str, Path] = {} |
| for p in root.rglob("*.mp4"): |
| out.setdefault(p.name, p) |
| return out |
| |
| |
| def shifted_filename(video_id: str, split: str) -> str: |
| p = Path(video_id) |
| suffix = p.suffix if p.suffix else ".mp4" |
| stem = p.stem if p.suffix else p.name |
| if split == "delay": |
| return f"{stem}_delay2s{suffix}" |
| if split == "early": |
| return f"{stem}_early2s{suffix}" |
| raise ValueError(f"Unknown split: {split}") |
| |
| |
| def extract_audio_file(video_path: Path, dst_path: Path, sr: int, fmt: str) -> None: |
| dst_path.parent.mkdir(parents=True, exist_ok=True) |
| cmd = [ |
| "ffmpeg", |
| "-y", |
| "-i", |
| str(video_path), |
| "-vn", |
| "-ac", |
| "1", |
| "-ar", |
| str(sr), |
| ] |
| if fmt == "wav": |
| cmd += ["-c:a", "pcm_s16le"] |
| else: |
| cmd += ["-c:a", "libmp3lame", "-q:a", "2"] |
| cmd.append(str(dst_path)) |
| subprocess.run(cmd, check=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True) |
| |
| |
| def wait_for_gemini_file_active(file_name: str) -> None: |
| deadline = time.time() + GEMINI_FILE_READY_TIMEOUT_SECONDS |
| while time.time() < deadline: |
| file_obj = genai.get_file(file_name) |
| state_obj = getattr(file_obj, "state", None) |
| state_name = str(getattr(state_obj, "name", state_obj)) |
| if state_name == "ACTIVE": |
| return |
| if state_name in {"FAILED", "CANCELLED"}: |
| raise RuntimeError(f"Gemini file processing failed: state={state_name}") |
| time.sleep(GEMINI_FILE_POLL_SECONDS) |
| raise TimeoutError("Gemini file did not become ACTIVE in time.") |
| |
| |
| def gemini_audio_grounding(audio_path: Path, gemini_api_key: str) -> Tuple[float, str]: |
| if genai is None: |
| raise RuntimeError("google-generativeai is not installed.") |
| if not gemini_api_key: |
| raise RuntimeError("GEMINI_API_KEY is missing.") |
| genai.configure(api_key=gemini_api_key) |
| model = genai.GenerativeModel(GEMINI_MODEL) |
| uploaded = genai.upload_file(path=str(audio_path)) |
| try: |
| wait_for_gemini_file_active(uploaded.name) |
| resp = model.generate_content([UNIVERSAL_PROMPT, uploaded]) |
| raw_text = getattr(resp, "text", "") or "" |
| if not raw_text: |
| raise ValueError("Gemini response text is empty.") |
| parsed = _extract_json_object(raw_text) |
| return _coerce_audio_result(parsed) |
| finally: |
| try: |
| genai.delete_file(uploaded.name) |
| except Exception: |
| pass |
| |
| |
| def load_qwen_model_and_processor(model_id: str) -> Tuple[Any, Any]: |
| processor = AutoProcessor.from_pretrained(model_id, trust_remote_code=True) |
| model = AutoModelForCausalLM.from_pretrained( |
| model_id, |
| torch_dtype=torch.bfloat16 if torch.cuda.is_available() else torch.float32, |
| device_map="auto" if torch.cuda.is_available() else None, |
| trust_remote_code=True, |
| ) |
| model.eval() |
| return model, processor |
| |
| |
| def _run_qwen_audio_chat(model: Any, processor: Any, audio_path: Path, prompt: str, label: str) -> str: |
| messages = [ |
| { |
| "role": "user", |
| "content": [ |
| {"type": "audio", "audio": str(audio_path)}, |
| {"type": "text", "text": prompt}, |
| ], |
| } |
| ] |
| try: |
| text_prompt = processor.apply_chat_template(messages, tokenize=False, add_generation_prompt=True) |
| except Exception: |
| text_prompt = prompt |
| |
| audio_arr, sr = librosa.load(str(audio_path), sr=None, mono=True) |
| if audio_arr.size == 0: |
| raise RuntimeError(f"{label}: extracted audio is empty.") |
| |
| last_exc: Optional[Exception] = None |
| inputs = None |
| for kw in [ |
| {"text": [text_prompt], "audio": [audio_arr], "sampling_rate": sr}, |
| {"text": [text_prompt], "audios": [audio_arr], "sampling_rate": sr}, |
| ]: |
| try: |
| inputs = processor(return_tensors="pt", padding=True, **kw) |
| break |
| except Exception as exc: |
| last_exc = exc |
| if inputs is None: |
| raise RuntimeError(f"{label}: failed to build processor inputs: {last_exc}") |
| |
| dev = model.device if hasattr(model, "device") else torch.device("cuda" if torch.cuda.is_available() else "cpu") |
| for k, v in list(inputs.items()): |
| if torch.is_tensor(v): |
| inputs[k] = v.to(dev) |
| |
| with torch.no_grad(): |
| output_ids = model.generate(**inputs, max_new_tokens=256, do_sample=False) |
| input_len = inputs["input_ids"].shape[1] if "input_ids" in inputs else 0 |
| gen_ids = output_ids[:, input_len:] if input_len > 0 else output_ids |
| return processor.batch_decode(gen_ids, skip_special_tokens=True)[0].strip() |
| |
| |
| def qwen2_audio_grounding(audio_path: Path, model: Any, processor: Any) -> Tuple[float, str]: |
| parsed = _extract_json_object(_run_qwen_audio_chat(model, processor, audio_path, UNIVERSAL_PROMPT, "Qwen2-Audio")) |
| return _coerce_audio_result(parsed) |
| |
| |
| def qwen3_audio_grounding(audio_path: Path, model: Any, processor: Any) -> Tuple[float, str]: |
| parsed = _extract_json_object(_run_qwen_audio_chat(model, processor, audio_path, UNIVERSAL_PROMPT, "Qwen3-Omni")) |
| return _coerce_audio_result(parsed) |
| |
| |
| def safe_call(fn, *args, **kwargs) -> Tuple[Optional[Tuple[float, str]], Optional[str]]: |
| try: |
| return fn(*args, **kwargs), None |
| except Exception as exc: |
| return None, str(exc) |
| |
| |
| def summarize_predictions(rows: List[Dict[str, Any]], key: str) -> Dict[str, Any]: |
| vals = [r.get(key) for r in rows] |
| with_pred = [v for v in vals if v is not None] |
| return { |
| "count": len(rows), |
| "with_prediction": len(with_pred), |
| "without_prediction": len(rows) - len(with_pred), |
| } |
| |
| |
| def main() -> None: |
| args = parse_args() |
| ensure_ffmpeg() |
| |
| gemini_api_key = os.getenv("GEMINI_API_KEY", "").strip() |
| if not gemini_api_key: |
| raise RuntimeError("GEMINI_API_KEY is required.") |
| |
| source_rows = load_source_rows(args.source_csv, args.sample_size, args.seed) |
| delay_index = index_by_filename(args.delay_dir) |
| early_index = index_by_filename(args.early_dir) |
| |
| out_root = args.output_dir |
| audio_dir = out_root / "audio" |
| gemini_dir = out_root / "gemini" |
| qwen2_dir = out_root / "qwen2" |
| qwen3_dir = out_root / "qwen3" |
| for d in [audio_dir, gemini_dir, qwen2_dir, qwen3_dir]: |
| d.mkdir(parents=True, exist_ok=True) |
| |
| delay_results_path = out_root / "delay_results.jsonl" |
| early_results_path = out_root / "early_results.jsonl" |
| missing_path = out_root / "missing_or_errors.json" |
| summary_path = out_root / "summary.json" |
| sampled_path = out_root / "sampled_10_from_gemini.csv" |
| |
| model_out = { |
| "gemini_delay": gemini_dir / "delay_results.jsonl", |
| "gemini_early": gemini_dir / "early_results.jsonl", |
| "qwen2_delay": qwen2_dir / "delay_results.jsonl", |
| "qwen2_early": qwen2_dir / "early_results.jsonl", |
| "qwen3_delay": qwen3_dir / "delay_results.jsonl", |
| "qwen3_early": qwen3_dir / "early_results.jsonl", |
| } |
| |
| if args.overwrite_output: |
| for p in [delay_results_path, early_results_path, missing_path, summary_path, sampled_path, *model_out.values()]: |
| if p.exists(): |
| p.unlink() |
| |
| qwen2_model = qwen2_processor = None |
| qwen3_model = qwen3_processor = None |
| if not args.disable_local_models: |
| try: |
| qwen2_model, qwen2_processor = load_qwen_model_and_processor(QWEN2_MODEL_ID) |
| except Exception: |
| qwen2_model = qwen2_processor = None |
| try: |
| qwen3_model, qwen3_processor = load_qwen_model_and_processor(QWEN3_MODEL_ID) |
| except Exception: |
| qwen3_model = qwen3_processor = None |
| |
| with sampled_path.open("w", encoding="utf-8", newline="") as f: |
| writer = csv.DictWriter(f, fieldnames=["video_id", "t_visual"]) |
| writer.writeheader() |
| for video_id, t_visual in source_rows: |
| writer.writerow({"video_id": video_id, "t_visual": round(t_visual, 3)}) |
| |
| delay_rows: List[Dict[str, Any]] = [] |
| early_rows: List[Dict[str, Any]] = [] |
| missing: List[Dict[str, Any]] = [] |
| |
| for video_id, t_visual in tqdm(source_rows, desc="Audio LLM Benchmark", unit="video"): |
| for split, shift, idx, combined_list, combined_path in [ |
| ("delay", args.delay_shift, delay_index, delay_rows, delay_results_path), |
| ("early", args.early_shift, early_index, early_rows, early_results_path), |
| ]: |
| shifted_name = shifted_filename(video_id, split) |
| video_path = idx.get(shifted_name) |
| if video_path is None: |
| missing.append({"split": split, "video_id": video_id, "reason": "file_not_found"}) |
| continue |
| |
| audio_path = audio_dir / split / f"{Path(video_id).stem}.{args.audio_format}" |
| try: |
| extract_audio_file(video_path, audio_path, args.audio_sr, args.audio_format) |
| except Exception as exc: |
| missing.append({"split": split, "video_id": video_id, "reason": f"audio_extract_failed: {exc}"}) |
| continue |
| |
| gemini_res, gemini_err = safe_call(gemini_audio_grounding, audio_path, gemini_api_key) |
| qwen2_res = qwen2_err = None |
| qwen3_res = qwen3_err = None |
| if qwen2_model is not None and qwen2_processor is not None: |
| qwen2_res, qwen2_err = safe_call(qwen2_audio_grounding, audio_path, qwen2_model, qwen2_processor) |
| if qwen3_model is not None and qwen3_processor is not None: |
| qwen3_res, qwen3_err = safe_call(qwen3_audio_grounding, audio_path, qwen3_model, qwen3_processor) |
| |
| if gemini_err: |
| missing.append({"split": split, "video_id": video_id, "reason": f"gemini_error: {gemini_err}"}) |
| if qwen2_err: |
| missing.append({"split": split, "video_id": video_id, "reason": f"qwen2_error: {qwen2_err}"}) |
| if qwen3_err: |
| missing.append({"split": split, "video_id": video_id, "reason": f"qwen3_error: {qwen3_err}"}) |
| |
| expected_t_audio = float(t_visual) + float(shift) |
| combined_row = { |
| "video_path": str(video_path), |
| "video_id": video_path.name, |
| "source_video_id": video_id, |
| "t_visual": float(t_visual), |
| "shift_amount": float(shift), |
| "expected_t_audio": expected_t_audio, |
| "gemini_t": round(gemini_res[0], 3) if gemini_res else None, |
| "qwen2_t": round(qwen2_res[0], 3) if qwen2_res else None, |
| "qwen3_t": round(qwen3_res[0], 3) if qwen3_res else None, |
| "gemini_desc": gemini_res[1] if gemini_res else None, |
| "qwen2_desc": qwen2_res[1] if qwen2_res else None, |
| "qwen3_desc": qwen3_res[1] if qwen3_res else None, |
| "audio_path": str(audio_path), |
| } |
| append_jsonl(combined_path, combined_row) |
| combined_list.append(combined_row) |
| |
| append_jsonl( |
| model_out[f"gemini_{split}"], |
| { |
| "video_id": video_path.name, |
| "source_video_id": video_id, |
| "t_visual": float(t_visual), |
| "shift_amount": float(shift), |
| "expected_t_audio": expected_t_audio, |
| "detected_t_audio": round(gemini_res[0], 3) if gemini_res else None, |
| "sound_description": gemini_res[1] if gemini_res else None, |
| "audio_path": str(audio_path), |
| }, |
| ) |
| append_jsonl( |
| model_out[f"qwen2_{split}"], |
| { |
| "video_id": video_path.name, |
| "source_video_id": video_id, |
| "t_visual": float(t_visual), |
| "shift_amount": float(shift), |
| "expected_t_audio": expected_t_audio, |
| "detected_t_audio": round(qwen2_res[0], 3) if qwen2_res else None, |
| "sound_description": qwen2_res[1] if qwen2_res else None, |
| "audio_path": str(audio_path), |
| }, |
| ) |
| append_jsonl( |
| model_out[f"qwen3_{split}"], |
| { |
| "video_id": video_path.name, |
| "source_video_id": video_id, |
| "t_visual": float(t_visual), |
| "shift_amount": float(shift), |
| "expected_t_audio": expected_t_audio, |
| "detected_t_audio": round(qwen3_res[0], 3) if qwen3_res else None, |
| "sound_description": qwen3_res[1] if qwen3_res else None, |
| "audio_path": str(audio_path), |
| }, |
| ) |
| |
| with missing_path.open("w", encoding="utf-8") as f: |
| json.dump(missing, f, ensure_ascii=False, indent=2) |
| |
| summary = { |
| "source_csv": str(args.source_csv), |
| "sample_size": len(source_rows), |
| "seed": args.seed, |
| "delay_shift": args.delay_shift, |
| "early_shift": args.early_shift, |
| "delay_summary": { |
| "count": len(delay_rows), |
| "gemini": summarize_predictions(delay_rows, "gemini_t"), |
| "qwen2": summarize_predictions(delay_rows, "qwen2_t"), |
| "qwen3": summarize_predictions(delay_rows, "qwen3_t"), |
| }, |
| "early_summary": { |
| "count": len(early_rows), |
| "gemini": summarize_predictions(early_rows, "gemini_t"), |
| "qwen2": summarize_predictions(early_rows, "qwen2_t"), |
| "qwen3": summarize_predictions(early_rows, "qwen3_t"), |
| }, |
| "missing_or_errors_count": len(missing), |
| "outputs": { |
| "delay_jsonl": str(delay_results_path), |
| "early_jsonl": str(early_results_path), |
| "missing_or_errors_json": str(missing_path), |
| "sampled_csv": str(sampled_path), |
| "audio_dir": str(audio_dir), |
| "gemini_dir": str(gemini_dir), |
| "qwen2_dir": str(qwen2_dir), |
| "qwen3_dir": str(qwen3_dir), |
| }, |
| } |
| with summary_path.open("w", encoding="utf-8") as f: |
| json.dump(summary, f, ensure_ascii=False, indent=2) |
| |
| print("========== Audio LLM Benchmark Summary ==========") |
| print(f"Sampled source videos: {len(source_rows)}") |
| print(f"Delay evaluated rows: {len(delay_rows)}") |
| print(f"Early evaluated rows: {len(early_rows)}") |
| print(f"Missing/errors: {len(missing)}") |
| print(f"Summary JSON: {summary_path}") |
| print("================================================") |
| |
| |
| if __name__ == "__main__": |
| main() |
| #!/usr/bin/env python3 |
| """ |
| Audio-LLM benchmark on shifted delay/early sets (126 videos from 63 source ids). |
| |
| Outputs are aligned with previous stress-test style: |
| - delay_results.jsonl |
| - early_results.jsonl |
| - summary.json |
| - missing_or_errors.json |
| |
| And results are organized under four subfolders in output root: |
| - audio/ (extracted wav files) |
| - gemini/ (model-only jsonl) |
| - qwen2/ (model-only jsonl) |
| - qwen3/ (model-only jsonl) |
| """ |
| |
| from __future__ import annotations |
| |
| import argparse |
| import csv |
| import json |
| import os |
| import re |
| import subprocess |
| import time |
| from pathlib import Path |
| from statistics import mean |
| from typing import Any, Dict, List, Optional, Tuple |
| |
| import librosa |
| import numpy as np |
| import torch |
| from tqdm import tqdm |
| from transformers import AutoModelForCausalLM, AutoProcessor |
| |
| try: |
| import google.generativeai as genai |
| except Exception: # pragma: no cover |
| genai = None |
| |
| |
| UNIVERSAL_PROMPT = ( |
| "Listen to this audio clip carefully. You are an expert in acoustic forensics. " |
| "Locate the EXACT timestamp (in seconds) of the loudest, most energetic PHYSICAL IMPACT sound " |
| "(e.g., body hitting ground, object breaking). Actively ignore background music, laughter, and speech. " |
| 'Output strictly in JSON format: {"audio_timestamp": 2.5, "sound_description": "brief description"}' |
| ) |
| |
| DEFAULT_SOURCE_CSV = Path( |
| "/home/ubuntu/video_source/result/gemini_video_oc_frame/gemini_validated_accept_lt1p0.csv" |
| ) |
| DEFAULT_DELAY_DIR = Path("/home/ubuntu/video_source/shifted/delay") |
| DEFAULT_EARLY_DIR = Path("/home/ubuntu/video_source/shifted/early") |
| DEFAULT_OUTPUT_DIR = Path("/home/ubuntu/video_source/result/audio_llm_benchmark") |
| |
| DEFAULT_AUDIO_SR = 16000 |
| DEFAULT_AUDIO_FORMAT = "wav" |
| DEFAULT_SEED = 42 |
| DEFAULT_SAMPLE_SIZE = 0 # 0 means all |
| |
| GEMINI_MODEL = "gemini-3.1-pro-preview" |
| QWEN2_MODEL_ID = "Qwen/Qwen2-Audio-7B-Instruct" |
| QWEN3_MODEL_ID = "Qwen/Qwen3-Omni-30B-A3B-Instruct" |
| |
| GEMINI_FILE_READY_TIMEOUT_SECONDS = 120 |
| GEMINI_FILE_POLL_SECONDS = 2 |
| |
| |
| def parse_args() -> argparse.Namespace: |
| p = argparse.ArgumentParser(description="Benchmark Gemini/Qwen2/Qwen3 on shifted audio grounding.") |
| p.add_argument("--source-csv", type=Path, default=DEFAULT_SOURCE_CSV) |
| p.add_argument("--delay-dir", type=Path, default=DEFAULT_DELAY_DIR) |
| p.add_argument("--early-dir", type=Path, default=DEFAULT_EARLY_DIR) |
| p.add_argument("--output-dir", type=Path, default=DEFAULT_OUTPUT_DIR) |
| p.add_argument("--audio-sr", type=int, default=DEFAULT_AUDIO_SR) |
| p.add_argument("--audio-format", choices=["wav", "mp3"], default=DEFAULT_AUDIO_FORMAT) |
| p.add_argument("--sample-size", type=int, default=DEFAULT_SAMPLE_SIZE, help="0 means all rows in source-csv") |
| p.add_argument("--seed", type=int, default=DEFAULT_SEED) |
| p.add_argument("--delay-shift", type=float, default=2.0) |
| p.add_argument("--early-shift", type=float, default=-2.0) |
| p.add_argument("--disable-local-models", action="store_true") |
| p.add_argument("--overwrite-output", action="store_true") |
| return p.parse_args() |
| |
| |
| def ensure_ffmpeg() -> None: |
| subprocess.run( |
| ["ffmpeg", "-version"], |
| check=True, |
| stdout=subprocess.PIPE, |
| stderr=subprocess.PIPE, |
| text=True, |
| ) |
| |
| |
| def _extract_json_object(text: str) -> Dict[str, Any]: |
| text = text.strip() |
| fence_match = re.search(r"```(?:json)?\s*(\{.*?\})\s*```", text, flags=re.DOTALL) |
| if fence_match: |
| return json.loads(fence_match.group(1)) |
| try: |
| return json.loads(text) |
| except json.JSONDecodeError: |
| pass |
| start = text.find("{") |
| end = text.rfind("}") |
| if start != -1 and end != -1 and end > start: |
| return json.loads(text[start : end + 1]) |
| raise ValueError("No valid JSON object found in model output.") |
| |
| |
| def _coerce_audio_result(data: Dict[str, Any]) -> Tuple[float, str]: |
| if "audio_timestamp" not in data or "sound_description" not in data: |
| raise ValueError(f"Missing keys in JSON output: {data}") |
| ts = float(data["audio_timestamp"]) |
| if not np.isfinite(ts): |
| raise ValueError(f"audio_timestamp is not finite: {ts}") |
| desc = str(data["sound_description"]).strip() |
| if not desc: |
| raise ValueError("sound_description is empty.") |
| return ts, desc |
| |
| |
| def append_jsonl(path: Path, payload: Dict[str, Any]) -> None: |
| path.parent.mkdir(parents=True, exist_ok=True) |
| with path.open("a", encoding="utf-8") as f: |
| f.write(json.dumps(payload, ensure_ascii=False) + "\n") |
| |
| |
| def load_source_rows(path: Path, sample_size: int, seed: int) -> List[Tuple[str, float]]: |
| if not path.exists(): |
| raise FileNotFoundError(f"source CSV not found: {path}") |
| |
| rows: List[Tuple[str, float]] = [] |
| with path.open("r", encoding="utf-8", newline="") as f: |
| reader = csv.DictReader(f) |
| fields = reader.fieldnames or [] |
| if "video_id" not in fields or "t_gemini" not in fields: |
| raise ValueError("source CSV must include columns: video_id, t_gemini") |
| for r in reader: |
| video_id = str(r.get("video_id", "")).strip() |
| if not video_id: |
| continue |
| try: |
| t_visual = float(r.get("t_gemini", "")) |
| except (TypeError, ValueError): |
| continue |
| rows.append((video_id, t_visual)) |
| |
| if not rows: |
| raise ValueError("No valid rows parsed from source CSV.") |
| |
| if sample_size > 0 and len(rows) > sample_size: |
| rng = np.random.default_rng(seed) |
| idx = rng.choice(len(rows), size=sample_size, replace=False) |
| rows = [rows[int(i)] for i in idx] |
| rows.sort(key=lambda x: x[0]) |
| return rows |
| |
| |
| def index_by_filename(root: Path) -> Dict[str, Path]: |
| if not root.exists(): |
| raise FileNotFoundError(f"directory not found: {root}") |
| out: Dict[str, Path] = {} |
| for p in root.rglob("*.mp4"): |
| out.setdefault(p.name, p) |
| return out |
| |
| |
| def shifted_filename(video_id: str, split: str) -> str: |
| p = Path(video_id) |
| suffix = p.suffix if p.suffix else ".mp4" |
| stem = p.stem if p.suffix else p.name |
| if split == "delay": |
| return f"{stem}_delay2s{suffix}" |
| if split == "early": |
| return f"{stem}_early2s{suffix}" |
| raise ValueError(f"Unknown split: {split}") |
| |
| |
| def extract_audio_file(video_path: Path, dst_path: Path, sr: int, fmt: str) -> None: |
| dst_path.parent.mkdir(parents=True, exist_ok=True) |
| cmd = [ |
| "ffmpeg", |
| "-y", |
| "-i", |
| str(video_path), |
| "-vn", |
| "-ac", |
| "1", |
| "-ar", |
| str(sr), |
| ] |
| if fmt == "wav": |
| cmd += ["-c:a", "pcm_s16le"] |
| else: |
| cmd += ["-c:a", "libmp3lame", "-q:a", "2"] |
| cmd.append(str(dst_path)) |
| subprocess.run(cmd, check=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True) |
| |
| |
| def wait_for_gemini_file_active(file_name: str) -> None: |
| deadline = time.time() + GEMINI_FILE_READY_TIMEOUT_SECONDS |
| while time.time() < deadline: |
| file_obj = genai.get_file(file_name) |
| state_obj = getattr(file_obj, "state", None) |
| state_name = str(getattr(state_obj, "name", state_obj)) |
| if state_name == "ACTIVE": |
| return |
| if state_name in {"FAILED", "CANCELLED"}: |
| raise RuntimeError(f"Gemini file processing failed: state={state_name}") |
| time.sleep(GEMINI_FILE_POLL_SECONDS) |
| raise TimeoutError("Gemini file did not become ACTIVE in time.") |
| |
| |
| def gemini_audio_grounding(audio_path: Path, gemini_api_key: str) -> Tuple[float, str]: |
| if genai is None: |
| raise RuntimeError("google-generativeai is not installed.") |
| if not gemini_api_key: |
| raise RuntimeError("GEMINI_API_KEY is missing.") |
| |
| genai.configure(api_key=gemini_api_key) |
| model = genai.GenerativeModel(GEMINI_MODEL) |
| uploaded = genai.upload_file(path=str(audio_path)) |
| try: |
| wait_for_gemini_file_active(uploaded.name) |
| resp = model.generate_content([UNIVERSAL_PROMPT, uploaded]) |
| raw_text = getattr(resp, "text", "") or "" |
| if not raw_text: |
| raise ValueError("Gemini response text is empty.") |
| parsed = _extract_json_object(raw_text) |
| return _coerce_audio_result(parsed) |
| finally: |
| try: |
| genai.delete_file(uploaded.name) |
| except Exception: |
| pass |
| |
| |
| def load_qwen_model_and_processor(model_id: str) -> Tuple[Any, Any]: |
| processor = AutoProcessor.from_pretrained(model_id, trust_remote_code=True) |
| model = AutoModelForCausalLM.from_pretrained( |
| model_id, |
| torch_dtype=torch.bfloat16 if torch.cuda.is_available() else torch.float32, |
| device_map="auto" if torch.cuda.is_available() else None, |
| trust_remote_code=True, |
| ) |
| model.eval() |
| return model, processor |
| |
| |
| def _run_qwen_audio_chat(model: Any, processor: Any, audio_path: Path, prompt: str, label: str) -> str: |
| messages = [ |
| { |
| "role": "user", |
| "content": [ |
| {"type": "audio", "audio": str(audio_path)}, |
| {"type": "text", "text": prompt}, |
| ], |
| } |
| ] |
| try: |
| text_prompt = processor.apply_chat_template(messages, tokenize=False, add_generation_prompt=True) |
| except Exception: |
| text_prompt = prompt |
| |
| audio_arr, sr = librosa.load(str(audio_path), sr=None, mono=True) |
| if audio_arr.size == 0: |
| raise RuntimeError(f"{label}: extracted audio is empty.") |
| |
| last_exc: Optional[Exception] = None |
| candidate_kwargs = [ |
| {"text": [text_prompt], "audio": [audio_arr], "sampling_rate": sr}, |
| {"text": [text_prompt], "audios": [audio_arr], "sampling_rate": sr}, |
| ] |
| inputs = None |
| for kw in candidate_kwargs: |
| try: |
| inputs = processor(return_tensors="pt", padding=True, **kw) |
| break |
| except Exception as exc: |
| last_exc = exc |
| if inputs is None: |
| raise RuntimeError(f"{label}: failed to build processor inputs: {last_exc}") |
| |
| dev = model.device if hasattr(model, "device") else torch.device("cuda" if torch.cuda.is_available() else "cpu") |
| for k, v in list(inputs.items()): |
| if torch.is_tensor(v): |
| inputs[k] = v.to(dev) |
| |
| with torch.no_grad(): |
| output_ids = model.generate(**inputs, max_new_tokens=256, do_sample=False) |
| input_len = inputs["input_ids"].shape[1] if "input_ids" in inputs else 0 |
| gen_ids = output_ids[:, input_len:] if input_len > 0 else output_ids |
| text = processor.batch_decode(gen_ids, skip_special_tokens=True)[0].strip() |
| return text |
| |
| |
| def qwen2_audio_grounding(audio_path: Path, model: Any, processor: Any) -> Tuple[float, str]: |
| raw_text = _run_qwen_audio_chat(model, processor, audio_path, UNIVERSAL_PROMPT, "Qwen2-Audio") |
| parsed = _extract_json_object(raw_text) |
| return _coerce_audio_result(parsed) |
| |
| |
| def qwen3_audio_grounding(audio_path: Path, model: Any, processor: Any) -> Tuple[float, str]: |
| raw_text = _run_qwen_audio_chat(model, processor, audio_path, UNIVERSAL_PROMPT, "Qwen3-Omni") |
| parsed = _extract_json_object(raw_text) |
| return _coerce_audio_result(parsed) |
| |
| |
| def safe_call(fn, *args, **kwargs) -> Tuple[Optional[Tuple[float, str]], Optional[str]]: |
| try: |
| return fn(*args, **kwargs), None |
| except Exception as exc: |
| return None, str(exc) |
| |
| |
| def summarize_errors(rows: List[Dict[str, Any]], model_key: str) -> Dict[str, Any]: |
| vals = [r.get(model_key) for r in rows] |
| with_pred = [float(v) for v in vals if v is not None] |
| return { |
| "count": len(rows), |
| "with_prediction": len(with_pred), |
| "without_prediction": len(rows) - len(with_pred), |
| } |
| |
| |
| def main() -> None: |
| args = parse_args() |
| ensure_ffmpeg() |
| |
| gemini_api_key = os.getenv("GEMINI_API_KEY", "").strip() |
| if not gemini_api_key: |
| raise RuntimeError("GEMINI_API_KEY is required.") |
| |
| rows = load_source_rows(args.source_csv, args.sample_size, args.seed) |
| delay_index = index_by_filename(args.delay_dir) |
| early_index = index_by_filename(args.early_dir) |
| |
| out_root = args.output_dir |
| audio_dir = out_root / "audio" |
| gemini_dir = out_root / "gemini" |
| qwen2_dir = out_root / "qwen2" |
| qwen3_dir = out_root / "qwen3" |
| for d in [audio_dir, gemini_dir, qwen2_dir, qwen3_dir]: |
| d.mkdir(parents=True, exist_ok=True) |
| |
| delay_results_path = out_root / "delay_results.jsonl" |
| early_results_path = out_root / "early_results.jsonl" |
| missing_path = out_root / "missing_or_errors.json" |
| summary_path = out_root / "summary.json" |
| sampled_path = out_root / "sampled_10_from_gemini.csv" |
| |
| model_out = { |
| "gemini_delay": gemini_dir / "delay_results.jsonl", |
| "gemini_early": gemini_dir / "early_results.jsonl", |
| "qwen2_delay": qwen2_dir / "delay_results.jsonl", |
| "qwen2_early": qwen2_dir / "early_results.jsonl", |
| "qwen3_delay": qwen3_dir / "delay_results.jsonl", |
| "qwen3_early": qwen3_dir / "early_results.jsonl", |
| } |
| |
| if args.overwrite_output: |
| for p in [delay_results_path, early_results_path, missing_path, summary_path, sampled_path, *model_out.values()]: |
| if p.exists(): |
| p.unlink() |
| |
| qwen2_model = qwen2_processor = None |
| qwen3_model = qwen3_processor = None |
| if not args.disable_local_models: |
| try: |
| qwen2_model, qwen2_processor = load_qwen_model_and_processor(QWEN2_MODEL_ID) |
| except Exception: |
| qwen2_model = qwen2_processor = None |
| try: |
| qwen3_model, qwen3_processor = load_qwen_model_and_processor(QWEN3_MODEL_ID) |
| except Exception: |
| qwen3_model = qwen3_processor = None |
| |
| delay_rows: List[Dict[str, Any]] = [] |
| early_rows: List[Dict[str, Any]] = [] |
| missing: List[Dict[str, Any]] = [] |
| |
| with sampled_path.open("w", encoding="utf-8", newline="") as f: |
| writer = csv.DictWriter(f, fieldnames=["video_id", "t_visual"]) |
| writer.writeheader() |
| for video_id, t_visual in rows: |
| writer.writerow({"video_id": video_id, "t_visual": round(t_visual, 3)}) |
| |
| for video_id, t_visual in tqdm(rows, desc="Audio LLM Benchmark", unit="video"): |
| for split, shift, idx, out_path, combined_rows, model_suffix in [ |
| ("delay", args.delay_shift, delay_index, delay_results_path, delay_rows, "delay"), |
| ("early", args.early_shift, early_index, early_results_path, early_rows, "early"), |
| ]: |
| shifted_name = shifted_filename(video_id, split) |
| video_path = idx.get(shifted_name) |
| if video_path is None: |
| missing.append({"split": split, "video_id": video_id, "reason": "file_not_found"}) |
| continue |
| |
| audio_path = audio_dir / split / f"{Path(video_id).stem}.{args.audio_format}" |
| try: |
| extract_audio_file(video_path, audio_path, args.audio_sr, args.audio_format) |
| except Exception as exc: |
| missing.append({"split": split, "video_id": video_id, "reason": f"audio_extract_failed: {exc}"}) |
| continue |
| |
| gemini_res, gemini_err = safe_call(gemini_audio_grounding, audio_path, gemini_api_key) |
| qwen2_res = qwen2_err = None |
| qwen3_res = qwen3_err = None |
| |
| if qwen2_model is not None and qwen2_processor is not None: |
| qwen2_res, qwen2_err = safe_call(qwen2_audio_grounding, audio_path, qwen2_model, qwen2_processor) |
| if qwen3_model is not None and qwen3_processor is not None: |
| qwen3_res, qwen3_err = safe_call(qwen3_audio_grounding, audio_path, qwen3_model, qwen3_processor) |
| |
| if gemini_err: |
| missing.append({"split": split, "video_id": video_id, "reason": f"gemini_error: {gemini_err}"}) |
| if qwen2_err: |
| missing.append({"split": split, "video_id": video_id, "reason": f"qwen2_error: {qwen2_err}"}) |
| if qwen3_err: |
| missing.append({"split": split, "video_id": video_id, "reason": f"qwen3_error: {qwen3_err}"}) |
| |
| expected_t_audio = float(t_visual) + float(shift) |
| row = { |
| "video_path": str(video_path), |
| "video_id": video_path.name, |
| "source_video_id": video_id, |
| "t_visual": float(t_visual), |
| "shift_amount": float(shift), |
| "expected_t_audio": expected_t_audio, |
| "gemini_t": round(gemini_res[0], 3) if gemini_res else None, |
| "qwen2_t": round(qwen2_res[0], 3) if qwen2_res else None, |
| "qwen3_t": round(qwen3_res[0], 3) if qwen3_res else None, |
| "gemini_desc": gemini_res[1] if gemini_res else None, |
| "qwen2_desc": qwen2_res[1] if qwen2_res else None, |
| "qwen3_desc": qwen3_res[1] if qwen3_res else None, |
| "audio_path": str(audio_path), |
| } |
| append_jsonl(out_path, row) |
| combined_rows.append(row) |
| |
| # Per-model jsonl outputs under dedicated subfolders. |
| append_jsonl( |
| model_out[f"gemini_{model_suffix}"], |
| { |
| "video_id": video_path.name, |
| "source_video_id": video_id, |
| "t_visual": float(t_visual), |
| "shift_amount": float(shift), |
| "expected_t_audio": expected_t_audio, |
| "detected_t_audio": round(gemini_res[0], 3) if gemini_res else None, |
| "sound_description": gemini_res[1] if gemini_res else None, |
| "audio_path": str(audio_path), |
| }, |
| ) |
| append_jsonl( |
| model_out[f"qwen2_{model_suffix}"], |
| { |
| "video_id": video_path.name, |
| "source_video_id": video_id, |
| "t_visual": float(t_visual), |
| "shift_amount": float(shift), |
| "expected_t_audio": expected_t_audio, |
| "detected_t_audio": round(qwen2_res[0], 3) if qwen2_res else None, |
| "sound_description": qwen2_res[1] if qwen2_res else None, |
| "audio_path": str(audio_path), |
| }, |
| ) |
| append_jsonl( |
| model_out[f"qwen3_{model_suffix}"], |
| { |
| "video_id": video_path.name, |
| "source_video_id": video_id, |
| "t_visual": float(t_visual), |
| "shift_amount": float(shift), |
| "expected_t_audio": expected_t_audio, |
| "detected_t_audio": round(qwen3_res[0], 3) if qwen3_res else None, |
| "sound_description": qwen3_res[1] if qwen3_res else None, |
| "audio_path": str(audio_path), |
| }, |
| ) |
| |
| with missing_path.open("w", encoding="utf-8") as f: |
| json.dump(missing, f, ensure_ascii=False, indent=2) |
| |
| summary = { |
| "source_csv": str(args.source_csv), |
| "sample_size": len(rows), |
| "seed": args.seed, |
| "delay_shift": args.delay_shift, |
| "early_shift": args.early_shift, |
| "delay_summary": { |
| "gemini": summarize_errors(delay_rows, "gemini_t"), |
| "qwen2": summarize_errors(delay_rows, "qwen2_t"), |
| "qwen3": summarize_errors(delay_rows, "qwen3_t"), |
| "count": len(delay_rows), |
| }, |
| "early_summary": { |
| "gemini": summarize_errors(early_rows, "gemini_t"), |
| "qwen2": summarize_errors(early_rows, "qwen2_t"), |
| "qwen3": summarize_errors(early_rows, "qwen3_t"), |
| "count": len(early_rows), |
| }, |
| "missing_or_errors_count": len(missing), |
| "outputs": { |
| "delay_jsonl": str(delay_results_path), |
| "early_jsonl": str(early_results_path), |
| "missing_or_errors_json": str(missing_path), |
| "sampled_csv": str(sampled_path), |
| "audio_dir": str(audio_dir), |
| "gemini_dir": str(gemini_dir), |
| "qwen2_dir": str(qwen2_dir), |
| "qwen3_dir": str(qwen3_dir), |
| }, |
| } |
| with summary_path.open("w", encoding="utf-8") as f: |
| json.dump(summary, f, ensure_ascii=False, indent=2) |
| |
| print("========== Audio LLM Benchmark Summary ==========") |
| print(f"Sampled source videos: {len(rows)}") |
| print(f"Delay evaluated rows: {len(delay_rows)}") |
| print(f"Early evaluated rows: {len(early_rows)}") |
| print(f"Missing/errors: {len(missing)}") |
| print(f"Summary: {summary_path}") |
| print("================================================") |
| |
| |
| if __name__ == "__main__": |
| main() |
| #!/usr/bin/env python3 |
| """ |
| Audio-LLM benchmark for physical-impact timestamp grounding. |
| |
| Models: |
| - Gemini 3.1 Pro (google-generativeai, API) |
| - Qwen2-Audio-7B-Instruct (local transformers) |
| - Qwen3-Omni-30B-A3B-Instruct (local transformers) |
| """ |
| |
| from __future__ import annotations |
| |
| import argparse |
| import json |
| import os |
| import re |
| import subprocess |
| import tempfile |
| import time |
| from dataclasses import dataclass |
| from pathlib import Path |
| from typing import Any, Dict, List, Optional, Tuple |
| |
| import librosa |
| import numpy as np |
| import torch |
| from tqdm import tqdm |
| from transformers import AutoModelForCausalLM, AutoProcessor |
| |
| try: |
| import google.generativeai as genai |
| except Exception: # pragma: no cover |
| genai = None |
| |
| |
| UNIVERSAL_PROMPT = ( |
| "Listen to this audio clip carefully. You are an expert in acoustic forensics. " |
| "Locate the EXACT timestamp (in seconds) of the loudest, most energetic PHYSICAL IMPACT sound " |
| "(e.g., body hitting ground, object breaking). Actively ignore background music, laughter, and speech. " |
| 'Output strictly in JSON format: {"audio_timestamp": 2.5, "sound_description": "brief description"}' |
| ) |
| |
| DEFAULT_INPUT_DIR = Path("/home/ubuntu/video_source/shifted/delay") |
| DEFAULT_OUTPUT_JSONL = Path("/home/ubuntu/video_source/result/audio_llm_benchmark.jsonl") |
| DEFAULT_MAX_VIDEOS = 0 |
| DEFAULT_AUDIO_SR = 16000 |
| DEFAULT_AUDIO_FORMAT = "wav" |
| DEFAULT_SEED = 42 |
| |
| GEMINI_MODEL = "gemini-3.1-pro-preview" |
| QWEN2_MODEL_ID = "Qwen/Qwen2-Audio-7B-Instruct" |
| QWEN3_MODEL_ID = "Qwen/Qwen3-Omni-30B-A3B-Instruct" |
| |
| GEMINI_FILE_READY_TIMEOUT_SECONDS = 120 |
| GEMINI_FILE_POLL_SECONDS = 2 |
| |
| |
| @dataclass |
| class AudioResult: |
| audio_timestamp: float |
| sound_description: str |
| |
| |
| def parse_args() -> argparse.Namespace: |
| p = argparse.ArgumentParser(description="Benchmark Gemini/Qwen2/Qwen3 on audio grounding.") |
| p.add_argument("--input-dir", type=Path, default=DEFAULT_INPUT_DIR, help="Folder of videos (.mp4)") |
| p.add_argument("--output-jsonl", type=Path, default=DEFAULT_OUTPUT_JSONL, help="Benchmark output JSONL") |
| p.add_argument("--audio-sr", type=int, default=DEFAULT_AUDIO_SR, help="Extracted audio sample rate") |
| p.add_argument( |
| "--audio-format", |
| choices=["wav", "mp3"], |
| default=DEFAULT_AUDIO_FORMAT, |
| help="Temporary extracted audio format", |
| ) |
| p.add_argument("--max-videos", type=int, default=DEFAULT_MAX_VIDEOS, help="0 means all videos") |
| p.add_argument("--seed", type=int, default=DEFAULT_SEED, help="Random seed for selection") |
| p.add_argument("--overwrite-output", action="store_true", help="Delete existing output JSONL before run") |
| p.add_argument( |
| "--save-extracted-audio-dir", |
| type=Path, |
| default=None, |
| help="If set, keep extracted audio files in this directory for inspection", |
| ) |
| p.add_argument( |
| "--disable-local-models", |
| action="store_true", |
| help="Only run Gemini (skip Qwen2/Qwen3 loading)", |
| ) |
| return p.parse_args() |
| |
| |
| def ensure_ffmpeg() -> None: |
| subprocess.run( |
| ["ffmpeg", "-version"], |
| check=True, |
| stdout=subprocess.PIPE, |
| stderr=subprocess.PIPE, |
| text=True, |
| ) |
| |
| |
| def _extract_json_object(text: str) -> Dict[str, Any]: |
| text = text.strip() |
| fence_match = re.search(r"```(?:json)?\s*(\{.*?\})\s*```", text, flags=re.DOTALL) |
| if fence_match: |
| return json.loads(fence_match.group(1)) |
| try: |
| return json.loads(text) |
| except json.JSONDecodeError: |
| pass |
| start = text.find("{") |
| end = text.rfind("}") |
| if start != -1 and end != -1 and end > start: |
| return json.loads(text[start : end + 1]) |
| raise ValueError("No valid JSON object found in model output.") |
| |
| |
| def _coerce_audio_result(data: Dict[str, Any]) -> AudioResult: |
| if "audio_timestamp" not in data or "sound_description" not in data: |
| raise ValueError(f"Missing keys in JSON output: {data}") |
| ts = float(data["audio_timestamp"]) |
| if not np.isfinite(ts): |
| raise ValueError(f"audio_timestamp is not finite: {ts}") |
| desc = str(data["sound_description"]).strip() |
| if not desc: |
| raise ValueError("sound_description is empty.") |
| return AudioResult(audio_timestamp=ts, sound_description=desc) |
| |
| |
| def append_jsonl(path: Path, payload: Dict[str, Any]) -> None: |
| path.parent.mkdir(parents=True, exist_ok=True) |
| with path.open("a", encoding="utf-8") as f: |
| f.write(json.dumps(payload, ensure_ascii=False) + "\n") |
| |
| |
| def extract_audio_file(video_path: Path, sr: int, fmt: str, save_dir: Optional[Path]) -> Path: |
| suffix = f".{fmt}" |
| if save_dir is not None: |
| save_dir.mkdir(parents=True, exist_ok=True) |
| safe_name = f"{video_path.stem}.{fmt}" |
| out_path = save_dir / safe_name |
| else: |
| tmp = tempfile.NamedTemporaryFile(prefix="bench_audio_", suffix=suffix, delete=False) |
| out_path = Path(tmp.name) |
| tmp.close() |
| |
| cmd = [ |
| "ffmpeg", |
| "-y", |
| "-i", |
| str(video_path), |
| "-vn", |
| "-ac", |
| "1", |
| "-ar", |
| str(sr), |
| ] |
| if fmt == "wav": |
| cmd += ["-c:a", "pcm_s16le"] |
| else: |
| cmd += ["-c:a", "libmp3lame", "-q:a", "2"] |
| cmd.append(str(out_path)) |
| subprocess.run(cmd, check=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True) |
| return out_path |
| |
| |
| def wait_for_gemini_file_active(file_name: str) -> None: |
| deadline = time.time() + GEMINI_FILE_READY_TIMEOUT_SECONDS |
| while time.time() < deadline: |
| file_obj = genai.get_file(file_name) |
| state_obj = getattr(file_obj, "state", None) |
| state_name = str(getattr(state_obj, "name", state_obj)) |
| if state_name == "ACTIVE": |
| return |
| if state_name in {"FAILED", "CANCELLED"}: |
| raise RuntimeError(f"Gemini file processing failed: state={state_name}") |
| time.sleep(GEMINI_FILE_POLL_SECONDS) |
| raise TimeoutError("Gemini file did not become ACTIVE in time.") |
| |
| |
| def gemini_audio_grounding(audio_path: Path, gemini_api_key: str) -> AudioResult: |
| if genai is None: |
| raise RuntimeError("google-generativeai is not installed.") |
| if not gemini_api_key: |
| raise RuntimeError("GEMINI_API_KEY is missing.") |
| genai.configure(api_key=gemini_api_key) |
| model = genai.GenerativeModel(GEMINI_MODEL) |
| uploaded = genai.upload_file(path=str(audio_path)) |
| try: |
| wait_for_gemini_file_active(uploaded.name) |
| resp = model.generate_content([UNIVERSAL_PROMPT, uploaded]) |
| raw_text = getattr(resp, "text", "") or "" |
| if not raw_text: |
| raise ValueError("Gemini response text is empty.") |
| parsed = _extract_json_object(raw_text) |
| return _coerce_audio_result(parsed) |
| finally: |
| try: |
| genai.delete_file(uploaded.name) |
| except Exception: |
| pass |
| |
| |
| def load_qwen_model_and_processor(model_id: str) -> Tuple[Any, Any]: |
| """ |
| Best-effort universal loader for Qwen audio models with remote code. |
| """ |
| processor = AutoProcessor.from_pretrained(model_id, trust_remote_code=True) |
| model = AutoModelForCausalLM.from_pretrained( |
| model_id, |
| torch_dtype=torch.bfloat16 if torch.cuda.is_available() else torch.float32, |
| device_map="auto" if torch.cuda.is_available() else None, |
| trust_remote_code=True, |
| ) |
| model.eval() |
| return model, processor |
| |
| |
| def _run_qwen_audio_chat( |
| model: Any, |
| processor: Any, |
| audio_path: Path, |
| prompt: str, |
| model_label: str, |
| ) -> str: |
| """ |
| Generic chat-template path used by both Qwen2-Audio and Qwen3-Omni. |
| |
| This intentionally keeps robust fallbacks because Qwen audio model |
| templates differ across transformers/model revisions. |
| """ |
| # 1) Build multimodal chat message. |
| messages = [ |
| { |
| "role": "user", |
| "content": [ |
| {"type": "audio", "audio": str(audio_path)}, |
| {"type": "text", "text": prompt}, |
| ], |
| } |
| ] |
| |
| # 2) Render chat text by processor template (if available). |
| try: |
| text_prompt = processor.apply_chat_template( |
| messages, |
| tokenize=False, |
| add_generation_prompt=True, |
| ) |
| except Exception: |
| text_prompt = prompt |
| |
| # 3) Load audio as waveform for processor input. |
| audio_arr, sr = librosa.load(str(audio_path), sr=None, mono=True) |
| if audio_arr.size == 0: |
| raise RuntimeError(f"{model_label}: extracted audio is empty.") |
| |
| # 4) Try common processor signatures across versions. |
| last_exc: Optional[Exception] = None |
| candidate_kwargs = [ |
| {"text": [text_prompt], "audio": [audio_arr], "sampling_rate": sr}, |
| {"text": [text_prompt], "audios": [audio_arr], "sampling_rate": sr}, |
| ] |
| |
| for kw in candidate_kwargs: |
| try: |
| inputs = processor(return_tensors="pt", padding=True, **kw) |
| break |
| except Exception as exc: |
| last_exc = exc |
| else: |
| raise RuntimeError(f"{model_label}: failed to build processor inputs: {last_exc}") |
| |
| # Move tensor inputs to model device when possible. |
| if hasattr(model, "device"): |
| dev = model.device |
| else: |
| dev = torch.device("cuda" if torch.cuda.is_available() else "cpu") |
| for k, v in list(inputs.items()): |
| if torch.is_tensor(v): |
| inputs[k] = v.to(dev) |
| |
| with torch.no_grad(): |
| output_ids = model.generate(**inputs, max_new_tokens=256, do_sample=False) |
| |
| # Decode only newly generated tokens when possible. |
| input_len = inputs["input_ids"].shape[1] if "input_ids" in inputs else 0 |
| gen_ids = output_ids[:, input_len:] if input_len > 0 else output_ids |
| text = processor.batch_decode(gen_ids, skip_special_tokens=True)[0].strip() |
| return text |
| |
| |
| def qwen2_audio_grounding(audio_path: Path, model: Any, processor: Any) -> AudioResult: |
| raw_text = _run_qwen_audio_chat( |
| model=model, |
| processor=processor, |
| audio_path=audio_path, |
| prompt=UNIVERSAL_PROMPT, |
| model_label="Qwen2-Audio", |
| ) |
| parsed = _extract_json_object(raw_text) |
| return _coerce_audio_result(parsed) |
| |
| |
| def qwen3_omni_grounding(audio_path: Path, model: Any, processor: Any) -> AudioResult: |
| raw_text = _run_qwen_audio_chat( |
| model=model, |
| processor=processor, |
| audio_path=audio_path, |
| prompt=UNIVERSAL_PROMPT, |
| model_label="Qwen3-Omni", |
| ) |
| parsed = _extract_json_object(raw_text) |
| return _coerce_audio_result(parsed) |
| |
| |
| def safe_call(fn, *args, **kwargs) -> Optional[AudioResult]: |
| try: |
| return fn(*args, **kwargs) |
| except Exception: |
| return None |
| |
| |
| def select_videos(input_dir: Path, max_videos: int, seed: int) -> List[Path]: |
| if not input_dir.exists(): |
| raise FileNotFoundError(f"Input folder not found: {input_dir}") |
| videos = sorted(input_dir.rglob("*.mp4")) |
| if not videos: |
| raise FileNotFoundError(f"No mp4 files found under: {input_dir}") |
| if max_videos and len(videos) > max_videos: |
| rng = np.random.default_rng(seed) |
| idx = rng.choice(len(videos), size=max_videos, replace=False) |
| videos = sorted([videos[int(i)] for i in idx], key=lambda p: p.name) |
| return videos |
| |
| |
| def main() -> None: |
| args = parse_args() |
| ensure_ffmpeg() |
| |
| gemini_api_key = os.getenv("GEMINI_API_KEY", "").strip() |
| if not gemini_api_key: |
| raise RuntimeError("GEMINI_API_KEY is required.") |
| |
| if args.overwrite_output and args.output_jsonl.exists(): |
| args.output_jsonl.unlink() |
| |
| videos = select_videos(args.input_dir, args.max_videos, args.seed) |
| |
| qwen2_model = qwen2_processor = None |
| qwen3_model = qwen3_processor = None |
| if not args.disable_local_models: |
| try: |
| qwen2_model, qwen2_processor = load_qwen_model_and_processor(QWEN2_MODEL_ID) |
| except Exception: |
| qwen2_model = qwen2_processor = None |
| try: |
| qwen3_model, qwen3_processor = load_qwen_model_and_processor(QWEN3_MODEL_ID) |
| except Exception: |
| qwen3_model = qwen3_processor = None |
| |
| for video_path in tqdm(videos, desc="Audio LLM Benchmark", unit="video"): |
| audio_path = None |
| try: |
| audio_path = extract_audio_file( |
| video_path, |
| sr=args.audio_sr, |
| fmt=args.audio_format, |
| save_dir=args.save_extracted_audio_dir, |
| ) |
| |
| gemini_res = safe_call(gemini_audio_grounding, audio_path, gemini_api_key) |
| |
| qwen2_res = None |
| if qwen2_model is not None and qwen2_processor is not None: |
| qwen2_res = safe_call(qwen2_audio_grounding, audio_path, qwen2_model, qwen2_processor) |
| |
| qwen3_res = None |
| if qwen3_model is not None and qwen3_processor is not None: |
| qwen3_res = safe_call(qwen3_omni_grounding, audio_path, qwen3_model, qwen3_processor) |
| |
| row = { |
| "video": video_path.name, |
| "gemini_t": round(gemini_res.audio_timestamp, 3) if gemini_res else None, |
| "qwen2_t": round(qwen2_res.audio_timestamp, 3) if qwen2_res else None, |
| "qwen3_t": round(qwen3_res.audio_timestamp, 3) if qwen3_res else None, |
| "gemini_desc": gemini_res.sound_description if gemini_res else None, |
| "qwen2_desc": qwen2_res.sound_description if qwen2_res else None, |
| "qwen3_desc": qwen3_res.sound_description if qwen3_res else None, |
| } |
| append_jsonl(args.output_jsonl, row) |
| finally: |
| if ( |
| audio_path is not None |
| and audio_path.exists() |
| and args.save_extracted_audio_dir is None |
| ): |
| audio_path.unlink(missing_ok=True) |
| |
| print(f"Done. Results written to: {args.output_jsonl}") |
| |
| |
| if __name__ == "__main__": |
| main() |
| ''' |
|
|