| |
| """ |
| Evaluate Qwen3-Omni models on the LVBench benchmark. |
| |
| LVBench: 1549 multiple-choice questions over 103 long videos. |
| Metrics: accuracy (overall, per video type). |
| |
| Usage: |
| python eval_lvbench.py \ |
| --base-model Qwen/Qwen3-Omni-30B-A3B-Instruct \ |
| --video-dir /opt/dlami/nvme/lvbench \ |
| --max-samples 30 --label vanilla_qwen3omni |
| |
| python eval_lvbench.py \ |
| --base-model Rakancorle11/qwen3omni_full_sft_revised_thinker_key \ |
| --adapter /opt/dlami/nvme/LlamaFactory/saves/.../adapter \ |
| --label dpo_v4_8632 |
| """ |
|
|
| from __future__ import annotations |
|
|
| import argparse |
| import gc |
| import json |
| import os |
| import re |
| import tempfile |
| import shutil |
| from pathlib import Path |
| from typing import Any, Dict, List, Optional |
|
|
| import torch |
| from tqdm import tqdm |
|
|
| DEFAULT_VIDEO_DIR = Path("/opt/dlami/nvme/lvbench") |
| DEFAULT_OUTPUT_DIR = Path("/home/ubuntu/eval_results/lvbench") |
|
|
| VIDEO_TYPES = ["cartoon", "documentary", "live", "selfmedia", "sport", "tv"] |
|
|
| MCQ_PROMPT = ( |
| "Select the best answer to the following multiple-choice question " |
| "based on the video. Respond with only the letter (A, B, C, or D) " |
| "of the correct option.\n" |
| ) |
|
|
|
|
| def parse_args() -> argparse.Namespace: |
| p = argparse.ArgumentParser(description="Evaluate on LVBench benchmark.") |
| p.add_argument("--base-model", type=str, |
| default="Qwen/Qwen3-Omni-30B-A3B-Instruct") |
| p.add_argument("--adapter", type=str, default=None) |
| p.add_argument("--video-dir", type=Path, default=DEFAULT_VIDEO_DIR) |
| p.add_argument("--output-dir", type=Path, default=DEFAULT_OUTPUT_DIR) |
| p.add_argument("--max-samples", type=int, default=-1) |
| p.add_argument("--max-new-tokens", type=int, default=32) |
| p.add_argument("--temperature", type=float, default=0.0) |
| p.add_argument("--label", type=str, default=None) |
| p.add_argument("--vllm", action="store_true", default=False, |
| help="Use vLLM offline batch inference instead of transformers (much faster).") |
| p.add_argument("--tp", type=int, default=None, |
| help="Tensor parallel size for vLLM (default: all visible GPUs).") |
| p.add_argument("--batch-size", type=int, default=16, |
| help="Batch size for vLLM generate().") |
| p.add_argument("--gpu-memory-utilization", type=float, default=0.90) |
| p.add_argument("--max-model-len", type=int, default=65536) |
| return p.parse_args() |
|
|
|
|
| |
| |
| |
| def load_model(base_model: str, adapter: Optional[str]): |
| import json as _json |
| from transformers import ( |
| AutoConfig, |
| AutoProcessor, |
| Qwen3OmniMoeForConditionalGeneration, |
| Qwen3OmniMoeThinkerConfig, |
| Qwen3OmniMoeThinkerForConditionalGeneration, |
| ) |
|
|
| print(f"[load] Base model: {base_model}") |
| processor = AutoProcessor.from_pretrained(base_model, trust_remote_code=True) |
|
|
| model_path = Path(base_model) |
| cfg_path = model_path / "config.json" if model_path.exists() else None |
|
|
| model_type = None |
| if cfg_path and cfg_path.exists(): |
| with open(cfg_path, "r") as f: |
| model_type = _json.load(f).get("model_type") |
| if not model_type: |
| try: |
| from huggingface_hub import hf_hub_download |
| cached = hf_hub_download(base_model, "config.json") |
| with open(cached, "r") as f: |
| model_type = _json.load(f).get("model_type") |
| except Exception: |
| pass |
|
|
| print(f"[load] Detected model_type: {model_type}") |
|
|
| if model_type == "qwen3_omni_moe_thinker": |
| config = Qwen3OmniMoeThinkerConfig.from_pretrained(base_model) |
| model = Qwen3OmniMoeThinkerForConditionalGeneration.from_pretrained( |
| base_model, config=config, torch_dtype=torch.bfloat16, device_map="auto", |
| ) |
| else: |
| config = AutoConfig.from_pretrained(base_model, trust_remote_code=True) |
| model = Qwen3OmniMoeForConditionalGeneration.from_pretrained( |
| base_model, config=config, torch_dtype=torch.bfloat16, device_map="auto", |
| ) |
|
|
| if adapter: |
| from peft import PeftModel |
| from safetensors.torch import load_file, save_file |
|
|
| print(f"[load] Loading LoRA adapter: {adapter}") |
| adapter_cfg_path = Path(adapter) / "adapter_config.json" |
| with open(adapter_cfg_path) as f: |
| adapter_cfg = _json.loads(f.read()) |
| target_modules = adapter_cfg.get("target_modules", []) |
| needs_remap = ( |
| any(t.startswith("model.layers.") for t in target_modules) |
| and model_type != "qwen3_omni_moe_thinker" |
| ) |
|
|
| if needs_remap: |
| print("[load] Adapter trained on thinker-only; remapping keys...") |
| tmp_dir = Path(tempfile.mkdtemp(prefix="adapter_remap_")) |
| for fn in Path(adapter).iterdir(): |
| if fn.is_dir(): |
| continue |
| if fn.name == "adapter_config.json": |
| new_targets = [] |
| for t in target_modules: |
| if t.startswith("model.layers."): |
| new_targets.append("thinker." + t) |
| elif t[0].isdigit(): |
| new_targets.append("thinker.model.layers." + t) |
| else: |
| new_targets.append(t) |
| adapter_cfg["target_modules"] = new_targets |
| with open(tmp_dir / "adapter_config.json", "w") as f: |
| json.dump(adapter_cfg, f, indent=2) |
| elif fn.suffix == ".safetensors" and "adapter" in fn.name: |
| tensors = load_file(str(fn)) |
| remapped = {} |
| for k, v in tensors.items(): |
| if ".model.layers." in k and ".thinker." not in k: |
| new_k = k.replace( |
| "base_model.model.model.layers.", |
| "base_model.model.thinker.model.layers.", |
| ) |
| remapped[new_k] = v |
| else: |
| remapped[k] = v |
| save_file(remapped, str(tmp_dir / fn.name)) |
| else: |
| shutil.copy2(str(fn), str(tmp_dir / fn.name)) |
| adapter = str(tmp_dir) |
|
|
| model = PeftModel.from_pretrained(model, adapter) |
| model = model.merge_and_unload() |
|
|
| if hasattr(model, "disable_talker"): |
| model.disable_talker() |
| model.eval() |
| print(f"[load] Model ready ({type(model).__name__}). Device: {model.device}") |
| return model, processor |
|
|
|
|
| |
| |
| |
| def run_inference(model, processor, video_path: str, prompt: str, |
| max_new_tokens: int, temperature: float) -> str: |
| from qwen_omni_utils import process_mm_info |
|
|
| tmp_dir = tempfile.mkdtemp(prefix="eval_lvb_") |
| masked_video = os.path.join(tmp_dir, "clip.mp4") |
| os.symlink(os.path.abspath(video_path), masked_video) |
|
|
| conversation = [ |
| { |
| "role": "user", |
| "content": [ |
| {"type": "video", "video": masked_video}, |
| {"type": "text", "text": prompt}, |
| ], |
| } |
| ] |
|
|
| text = processor.apply_chat_template( |
| conversation, add_generation_prompt=True, tokenize=False, |
| ) |
| audios, images, videos = process_mm_info(conversation, use_audio_in_video=False) |
| inputs = processor( |
| text=text, audio=audios, images=images, videos=videos, |
| return_tensors="pt", padding=True, use_audio_in_video=False, |
| ) |
|
|
| model_dtype = next(model.parameters()).dtype |
| converted = {} |
| for k, v in inputs.items(): |
| if hasattr(v, "to"): |
| v = v.to(model.device) |
| if torch.is_floating_point(v): |
| v = v.to(model_dtype) |
| converted[k] = v |
| inputs = converted |
|
|
| from transformers import Qwen3OmniMoeThinkerForConditionalGeneration |
| is_thinker = isinstance(model, Qwen3OmniMoeThinkerForConditionalGeneration) |
| if is_thinker: |
| gen_kwargs = {"max_new_tokens": max_new_tokens, "do_sample": temperature > 0} |
| else: |
| gen_kwargs = { |
| "thinker_max_new_tokens": max_new_tokens, |
| "use_audio_in_video": False, |
| "return_audio": False, |
| "do_sample": temperature > 0, |
| } |
| if temperature > 0: |
| gen_kwargs["temperature"] = temperature |
| gen_kwargs["top_p"] = 0.9 |
|
|
| with torch.inference_mode(): |
| output_ids = model.generate(**inputs, **gen_kwargs) |
|
|
| if isinstance(output_ids, tuple): |
| output_ids = output_ids[0] |
|
|
| prompt_len = inputs["input_ids"].shape[1] |
| response = processor.batch_decode( |
| output_ids[:, prompt_len:], skip_special_tokens=True, |
| )[0].strip() |
|
|
| shutil.rmtree(tmp_dir, ignore_errors=True) |
| return response |
|
|
|
|
| def preprocess_video_for_vllm(video_path: str): |
| """Extract video frames using qwen_omni_utils, return numpy array for vLLM. |
| Caps at 128 frames to stay within vLLM encoder cache (62720 tokens). |
| """ |
| from qwen_omni_utils import process_mm_info |
| import numpy as np |
|
|
| messages = [{ |
| "role": "user", |
| "content": [ |
| {"type": "video", "video": video_path, "nframes": 128}, |
| {"type": "text", "text": "placeholder"}, |
| ], |
| }] |
| audios, images, videos = process_mm_info(messages, use_audio_in_video=False) |
| video_tensor = videos[0] |
| video_np = (video_tensor * 255).byte().numpy() |
| return video_np |
|
|
|
|
| SYSTEM_PROMPT = ( |
| "You are Qwen, a virtual human developed by the Qwen Team, Alibaba " |
| "Group, capable of perceiving auditory and visual inputs, as well as " |
| "generating text and speech." |
| ) |
|
|
|
|
| def build_vllm_prompt(question: str) -> str: |
| return ( |
| f"<|im_start|>system\n{SYSTEM_PROMPT}<|im_end|>\n" |
| f"<|im_start|>user\n" |
| f"<|vision_start|><|video_pad|><|vision_end|>" |
| f"{question}<|im_end|>\n" |
| f"<|im_start|>assistant\n" |
| ) |
|
|
|
|
| def extract_answer(text: str) -> str: |
| """Extract a single letter A/B/C/D from model output.""" |
| text = text.strip() |
| prefixes = [ |
| "The best answer is", "The correct answer is", |
| "The answer is", "The answer", "Best answer:", "Best option:", |
| ] |
| for prefix in prefixes: |
| text = text.replace(prefix, "") |
|
|
| if len(text.split()) > 10 and not re.search(r"[ABCD]", text): |
| return "" |
| m = re.search(r"[ABCD]", text) |
| return m[0] if m else "" |
|
|
|
|
| |
| |
| |
| def load_lvbench(video_dir: Path, max_samples: int) -> List[Dict[str, Any]]: |
| from datasets import load_dataset |
| ds = load_dataset("lmms-lab/LVBench", split="train") |
| data = [] |
| skipped = 0 |
| for row in ds: |
| vid = row["key"] |
| video_path = video_dir / f"{vid}.mp4" |
| if not video_path.exists(): |
| skipped += 1 |
| continue |
|
|
| prompt = MCQ_PROMPT + row["question"] + "\nThe best answer is:" |
|
|
| data.append({ |
| "uid": row["uid"], |
| "video_id": vid, |
| "video_path": str(video_path), |
| "video_type": row["type"], |
| "question_type": row["question_type"], |
| "question": row["question"], |
| "gt_answer": row["answer"], |
| "time_reference": row.get("time_reference", ""), |
| "prompt": prompt, |
| }) |
| if skipped: |
| print(f"[data] Skipped {skipped} questions (video not found)") |
| if max_samples > 0: |
| data = data[:max_samples] |
| return data |
|
|
|
|
| |
| |
| |
| def compute_metrics(results: List[Dict[str, Any]]) -> Dict[str, Any]: |
| total = len(results) |
| if total == 0: |
| return {} |
|
|
| correct = sum(1 for r in results if r["pred_answer"].upper() == r["gt_answer"].upper()) |
| overall_acc = correct / total |
|
|
| def acc_for(items): |
| if not items: |
| return None |
| c = sum(1 for r in items if r["pred_answer"].upper() == r["gt_answer"].upper()) |
| return round(c / len(items), 4) |
|
|
| per_type = {} |
| for vt in VIDEO_TYPES: |
| subset = [r for r in results if r["video_type"] == vt] |
| if subset: |
| per_type[vt] = {"accuracy": acc_for(subset), "count": len(subset)} |
|
|
| q_types = set() |
| for r in results: |
| if isinstance(r.get("question_type"), list): |
| q_types.update(r["question_type"]) |
| elif r.get("question_type"): |
| q_types.add(r["question_type"]) |
|
|
| per_qtype = {} |
| for qt in sorted(q_types): |
| subset = [r for r in results if qt in (r.get("question_type", []) |
| if isinstance(r.get("question_type"), list) else [r.get("question_type")])] |
| if subset: |
| per_qtype[qt] = {"accuracy": acc_for(subset), "count": len(subset)} |
|
|
| return { |
| "total_samples": total, |
| "overall_accuracy": round(overall_acc, 4), |
| "per_video_type": per_type, |
| "per_question_type": per_qtype, |
| } |
|
|
|
|
| def print_summary(metrics: Dict[str, Any], label: str) -> None: |
| print() |
| print(f"{'=' * 65}") |
| print(f" LVBench Summary: {label}") |
| print(f"{'=' * 65}") |
| print(f" Total samples: {metrics['total_samples']}") |
| print(f" Overall Accuracy: {metrics['overall_accuracy']:.1%}") |
|
|
| print(f" ─── Per Video Type ───") |
| for vt in VIDEO_TYPES: |
| if vt in metrics.get("per_video_type", {}): |
| d = metrics["per_video_type"][vt] |
| print(f" {vt:15s}: {d['accuracy']:.1%} ({d['count']} questions)") |
|
|
| print(f" ─── Per Question Type ───") |
| for qt, d in sorted(metrics.get("per_question_type", {}).items()): |
| print(f" {qt:30s}: {d['accuracy']:.1%} ({d['count']})") |
|
|
| print(f"{'=' * 65}") |
|
|
|
|
| |
| |
| |
| def main() -> None: |
| args = parse_args() |
| label = args.label or ( |
| Path(args.adapter).name if args.adapter |
| else Path(args.base_model).name |
| ) |
|
|
| out_dir = args.output_dir / label |
| out_dir.mkdir(parents=True, exist_ok=True) |
| results_jsonl = out_dir / "eval_results.jsonl" |
| metrics_json = out_dir / "metrics.json" |
| summary_txt = out_dir / "summary.txt" |
|
|
| print("[data] Loading LVBench dataset...") |
| test_data = load_lvbench(args.video_dir, args.max_samples) |
| print(f"[data] {len(test_data)} questions ready for evaluation") |
|
|
| processed = set() |
| if results_jsonl.exists(): |
| with open(results_jsonl) as f: |
| for line in f: |
| obj = json.loads(line) |
| processed.add(obj["uid"]) |
| print(f"[resume] {len(processed)} already processed, skipping") |
|
|
| use_vllm = args.vllm |
| model = processor = llm = None |
| vllm_preprocess_stats: Dict[str, int] | None = None |
|
|
| if use_vllm: |
| from vllm import LLM, SamplingParams |
| tp = args.tp or torch.cuda.device_count() |
| model_path = args.base_model |
| print(f"[vllm] Loading {model_path} with tp={tp} ...") |
| llm = LLM( |
| model=model_path, |
| tensor_parallel_size=tp, |
| max_model_len=args.max_model_len, |
| max_num_seqs=4, |
| limit_mm_per_prompt={"video": 1}, |
| gpu_memory_utilization=args.gpu_memory_utilization, |
| dtype="bfloat16", |
| trust_remote_code=True, |
| ) |
| sampling_params = SamplingParams( |
| temperature=args.temperature if args.temperature > 0 else 0.0, |
| top_p=0.9 if args.temperature > 0 else 1.0, |
| max_tokens=args.max_new_tokens, |
| ) |
|
|
| print("[vllm] Preprocessing videos ...") |
| todo = [item for item in test_data if item["uid"] not in processed] |
| preprocessed = {} |
| preprocess_failed_paths: set[str] = set() |
|
|
| for i, item in enumerate(todo): |
| vp = item["video_path"] |
| if vp in preprocessed or vp in preprocess_failed_paths: |
| continue |
| try: |
| preprocessed[vp] = preprocess_video_for_vllm(vp) |
| except Exception as e: |
| preprocess_failed_paths.add(vp) |
| print(f" [skip] preprocess error: {Path(vp).name}: {e}") |
| if (i + 1) % 50 == 0: |
| print(f" Preprocessed {i+1}/{len(todo)} ...") |
|
|
| n_pp_skip = sum(1 for item in todo if item["video_path"] in preprocess_failed_paths) |
| if preprocess_failed_paths: |
| print( |
| f"[vllm] Preprocess failed for {len(preprocess_failed_paths)} video(s), " |
| f"{n_pp_skip} question(s) will not use vLLM (run continues)." |
| ) |
| vllm_preprocess_stats = { |
| "preprocess_failed_videos": len(preprocess_failed_paths), |
| "preprocess_skipped_questions": n_pp_skip, |
| } |
|
|
| vllm_todo = [item for item in todo if item["video_path"] in preprocessed] |
| fallback_items = [] |
| print(f"[vllm] {len(vllm_todo)} questions ready, running inference ...") |
|
|
| for i, item in enumerate(vllm_todo): |
| if item["uid"] in processed: |
| continue |
| inp = { |
| "prompt": build_vllm_prompt(item["prompt"]), |
| "multi_modal_data": {"video": preprocessed[item["video_path"]]}, |
| } |
| try: |
| outputs = llm.generate([inp], sampling_params=sampling_params) |
| raw_output = outputs[0].outputs[0].text.strip() |
| pred = extract_answer(raw_output) |
| result = { |
| "uid": item["uid"], |
| "video_id": item["video_id"], |
| "video_type": item["video_type"], |
| "question_type": item["question_type"], |
| "gt_answer": item["gt_answer"], |
| "pred_answer": pred, |
| "correct": pred.upper() == item["gt_answer"].upper(), |
| "raw_output": raw_output, |
| } |
| with open(results_jsonl, "a", encoding="utf-8") as f: |
| f.write(json.dumps(result, ensure_ascii=False) + "\n") |
| processed.add(item["uid"]) |
| except (ValueError, RuntimeError) as exc: |
| if "longer than the maximum model length" in str(exc): |
| print(f" [too long] {item['uid']} -> fallback") |
| fallback_items.append(item) |
| else: |
| raise |
|
|
| if (i + 1) % 50 == 0: |
| print(f" [vllm] [{i+1}/{len(vllm_todo)}] done, {len(fallback_items)} deferred") |
|
|
| preprocessed.clear() |
|
|
| vllm_results = [] |
| if results_jsonl.exists(): |
| with open(results_jsonl) as f: |
| for line in f: |
| vllm_results.append(json.loads(line)) |
| if vllm_results: |
| vllm_metrics = compute_metrics(vllm_results) |
| vllm_metrics_path = out_dir / "metrics_vllm.json" |
| with open(vllm_metrics_path, "w", encoding="utf-8") as f: |
| json.dump(vllm_metrics, f, indent=2, ensure_ascii=False) |
| print(f"[vllm] Intermediate metrics saved to {vllm_metrics_path}") |
| print_summary(vllm_metrics, label + " (vllm only)") |
|
|
| if fallback_items: |
| print(f"[fallback] Running {len(fallback_items)} long-video questions with transformers ...") |
| del llm |
| gc.collect() |
| torch.cuda.empty_cache() |
|
|
| model, processor = load_model(args.base_model, args.adapter) |
| for item in tqdm(fallback_items, desc="Fallback", unit="q"): |
| if item["uid"] in processed: |
| continue |
| try: |
| raw_output = run_inference( |
| model, processor, item["video_path"], item["prompt"], |
| args.max_new_tokens, args.temperature, |
| ) |
| except Exception as exc: |
| import traceback |
| print(f" [error] {item['uid']}: {exc}") |
| traceback.print_exc() |
| raw_output = "" |
|
|
| pred = extract_answer(raw_output) |
| result = { |
| "uid": item["uid"], |
| "video_id": item["video_id"], |
| "video_type": item["video_type"], |
| "question_type": item["question_type"], |
| "gt_answer": item["gt_answer"], |
| "pred_answer": pred, |
| "correct": pred.upper() == item["gt_answer"].upper(), |
| "raw_output": raw_output, |
| } |
| with open(results_jsonl, "a", encoding="utf-8") as f: |
| f.write(json.dumps(result, ensure_ascii=False) + "\n") |
| processed.add(item["uid"]) |
| gc.collect() |
| torch.cuda.empty_cache() |
|
|
| else: |
| print("[model] Loading model...") |
| model, processor = load_model(args.base_model, args.adapter) |
|
|
| for item in tqdm(test_data, desc="LVBench", unit="q"): |
| if item["uid"] in processed: |
| continue |
|
|
| try: |
| raw_output = run_inference( |
| model, processor, item["video_path"], item["prompt"], |
| args.max_new_tokens, args.temperature, |
| ) |
| except Exception as exc: |
| import traceback |
| print(f" [error] {item['uid']}: {exc}") |
| traceback.print_exc() |
| raw_output = "" |
|
|
| pred = extract_answer(raw_output) |
|
|
| result = { |
| "uid": item["uid"], |
| "video_id": item["video_id"], |
| "video_type": item["video_type"], |
| "question_type": item["question_type"], |
| "gt_answer": item["gt_answer"], |
| "pred_answer": pred, |
| "correct": pred.upper() == item["gt_answer"].upper(), |
| "raw_output": raw_output, |
| } |
|
|
| with open(results_jsonl, "a", encoding="utf-8") as f: |
| f.write(json.dumps(result, ensure_ascii=False) + "\n") |
|
|
| processed.add(item["uid"]) |
| gc.collect() |
| torch.cuda.empty_cache() |
|
|
| all_results = [] |
| if results_jsonl.exists(): |
| with open(results_jsonl) as f: |
| for line in f: |
| all_results.append(json.loads(line)) |
|
|
| if not all_results: |
| print("[warn] No results to compute metrics from.") |
| return |
|
|
| metrics = compute_metrics(all_results) |
| metrics["eval_config"] = { |
| "base_model": args.base_model, |
| "adapter": args.adapter, |
| "video_dir": str(args.video_dir), |
| "max_new_tokens": args.max_new_tokens, |
| "temperature": args.temperature, |
| } |
| if vllm_preprocess_stats is not None: |
| metrics["eval_config"]["vllm_preprocess_skips"] = vllm_preprocess_stats |
|
|
| with open(metrics_json, "w", encoding="utf-8") as f: |
| json.dump(metrics, f, indent=2, ensure_ascii=False) |
|
|
| print_summary(metrics, label) |
|
|
| with open(summary_txt, "w", encoding="utf-8") as f: |
| import io, contextlib |
| buf = io.StringIO() |
| with contextlib.redirect_stdout(buf): |
| print_summary(metrics, label) |
| f.write(buf.getvalue()) |
|
|
| print(f"\n[output] Results: {results_jsonl}") |
| print(f"[output] Metrics: {metrics_json}") |
| print(f"[output] Summary: {summary_txt}") |
|
|
|
|
| if __name__ == "__main__": |
| main() |
|
|