| |
| """Evaluate MiniCPM-o 4.5 on VGG-Sound Sync (out-of-domain sync). |
| |
| Reuses the data loader, MCQ / freetext prompts, answer parsers, GPT judge, |
| and metrics from CleverHans-Evaluation's eval_vggsoundsync.py. Only the |
| inference path is replaced with MiniCPM-o. |
| """ |
| from __future__ import annotations |
|
|
| import _common |
|
|
| import argparse |
| import gc |
| import io |
| import contextlib |
| import json |
| from pathlib import Path |
|
|
| import torch |
| from tqdm import tqdm |
|
|
| ch = _common.ch("vggsoundsync") |
| MCQ_PROMPT = ch.MCQ_PROMPT |
| FREETEXT_PROMPT = ch.FREETEXT_PROMPT |
| load_test_data = ch.load_test_data |
| extract_mcq_answer = ch.extract_mcq_answer |
| extract_freetext_prediction = ch.extract_freetext_prediction |
| gpt_extract_prediction = ch.gpt_extract_prediction |
| _get_openai_client = ch._get_openai_client |
| compute_metrics = ch.compute_metrics |
| print_summary = ch.print_summary |
| _build_result = ch._build_result |
| DEFAULT_OUTPUT_DIR = ch.DEFAULT_OUTPUT_DIR |
|
|
| from minicpmo_inference import load_model, run_inference |
|
|
|
|
| def parse_args() -> argparse.Namespace: |
| p = argparse.ArgumentParser(description="Evaluate MiniCPM-o on VGG-Sound Sync.") |
| p.add_argument("--model-id", type=str, default="openbmb/MiniCPM-o-4_5") |
| p.add_argument("--test-jsonl", type=Path, required=True, |
| help="test.jsonl from prepare_vggsoundsync.py") |
| p.add_argument("--output-dir", type=Path, |
| default=Path("/home/ubuntu/eval_results/vggsoundsync_minicpmo")) |
| p.add_argument("--mode", choices=["mcq", "freetext"], default="mcq") |
| p.add_argument("--max-samples", type=int, default=-1) |
| p.add_argument("--max-new-tokens", type=int, default=64) |
| p.add_argument("--temperature", type=float, default=0.0) |
| p.add_argument("--label", type=str, default="minicpmo_vggsync") |
| p.add_argument("--max-frames", type=int, default=32) |
| p.add_argument("--fps", type=float, default=2.0) |
| p.add_argument("--attn", type=str, default="flash_attention_2", |
| choices=["sdpa", "flash_attention_2", "eager"]) |
| |
| p.add_argument("--vllm", action="store_true", default=False, |
| help="(no-op for MiniCPM-o 4.5; auto-falls back to transformers).") |
| p.add_argument("--tp", type=int, default=None) |
| p.add_argument("--gpu-memory-utilization", type=float, default=0.90) |
| p.add_argument("--max-model-len", type=int, default=65536) |
| p.add_argument("--batch-size", type=int, default=16) |
| p.add_argument("--gpt-judge", action="store_true", default=False) |
| p.add_argument("--openai-api-key", type=str, default=None) |
| p.add_argument("--gpt-model", type=str, default="gpt-5.4") |
| |
| p.add_argument("--shard", type=int, default=0) |
| p.add_argument("--num-shards", type=int, default=1) |
| return p.parse_args() |
|
|
|
|
| def _extract_pred(raw_output, mode, gpt_judge, api_key, gpt_model, answer_map=None): |
| if mode == "mcq": |
| return extract_mcq_answer(raw_output, answer_map=answer_map) |
| if gpt_judge and raw_output: |
| gpt_pred = gpt_extract_prediction(raw_output, api_key=api_key, model=gpt_model) |
| if gpt_pred is not None: |
| return gpt_pred |
| return extract_freetext_prediction(raw_output) |
|
|
|
|
| def main() -> None: |
| args = parse_args() |
| default_prompt = MCQ_PROMPT if args.mode == "mcq" else FREETEXT_PROMPT |
|
|
| if args.vllm: |
| print("[warn] --vllm requested but MiniCPM-o 4.5 multimodal vLLM is not " |
| "supported upstream yet; falling back to transformers.") |
|
|
| if args.gpt_judge and args.mode == "freetext": |
| if _get_openai_client(args.openai_api_key) is None: |
| print("[ERROR] --gpt-judge requires OPENAI_API_KEY or --openai-api-key.") |
| raise SystemExit(1) |
|
|
| out_dir = args.output_dir / args.label |
| out_dir.mkdir(parents=True, exist_ok=True) |
| shard_suffix = (f".shard{args.shard}of{args.num_shards}" |
| if args.num_shards > 1 else "") |
| results_jsonl = out_dir / f"eval_results{shard_suffix}.jsonl" |
| metrics_json = out_dir / "metrics.json" |
| summary_txt = out_dir / "summary.txt" |
|
|
| test_data = load_test_data(args.test_jsonl, args.max_samples) |
| if args.num_shards > 1: |
| test_data = [x for i, x in enumerate(test_data) if i % args.num_shards == args.shard] |
| print(f"[shard] shard {args.shard}/{args.num_shards}: {len(test_data)} samples (mode={args.mode})") |
| else: |
| print(f"[data] {len(test_data)} samples loaded (mode={args.mode})") |
|
|
| processed: set = set() |
| if results_jsonl.exists(): |
| with open(results_jsonl) as f: |
| for line in f: |
| obj = json.loads(line) |
| processed.add(obj["uid"]) |
| print(f"[resume] {len(processed)} already processed") |
|
|
| model, tokenizer = load_model(args.model_id, attn_implementation=args.attn, |
| init_audio=True) |
|
|
| for item in tqdm(test_data, desc="VGGSync", unit="sample"): |
| if item["uid"] in processed: |
| continue |
|
|
| item_prompt = item.get("mcq_prompt", default_prompt) if args.mode == "mcq" else default_prompt |
| item_answer_map = item.get("mcq_answer_map") if args.mode == "mcq" else None |
|
|
| try: |
| raw_output = run_inference( |
| model, tokenizer, |
| video_path=item["video_path"], |
| audio_path=item["audio_path"], |
| prompt=item_prompt, |
| max_new_tokens=args.max_new_tokens, |
| temperature=args.temperature, |
| max_frames=args.max_frames, |
| fps=args.fps, |
| ) |
| except Exception as exc: |
| import traceback |
| print(f" [error] {item['uid']}: {exc}") |
| traceback.print_exc() |
| raw_output = "" |
|
|
| pred = _extract_pred(raw_output, args.mode, args.gpt_judge, |
| args.openai_api_key, args.gpt_model, |
| answer_map=item_answer_map) |
| result = _build_result(item, pred, raw_output, args.mode) |
|
|
| with open(results_jsonl, "a", encoding="utf-8") as f: |
| f.write(json.dumps(result, ensure_ascii=False) + "\n") |
|
|
| processed.add(item["uid"]) |
| gc.collect() |
| torch.cuda.empty_cache() |
|
|
| if args.num_shards > 1: |
| print(f"\n[shard {args.shard}/{args.num_shards}] Done. Results: {results_jsonl}") |
| print(f"[shard] Run merge_shards.py --bench vggsoundsync --label-dir {out_dir}") |
| return |
|
|
| all_results = [] |
| if results_jsonl.exists(): |
| with open(results_jsonl) as f: |
| for line in f: |
| all_results.append(json.loads(line)) |
|
|
| metrics = compute_metrics(all_results) |
| metrics["eval_config"] = { |
| "model_id": args.model_id, |
| "mode": args.mode, |
| "test_jsonl": str(args.test_jsonl), |
| "max_new_tokens": args.max_new_tokens, |
| "temperature": args.temperature, |
| "max_frames": args.max_frames, |
| "fps": args.fps, |
| "attn": args.attn, |
| "gpt_judge": args.gpt_judge, |
| } |
|
|
| with open(metrics_json, "w", encoding="utf-8") as f: |
| json.dump(metrics, f, indent=2, ensure_ascii=False) |
|
|
| print_summary(metrics, args.label) |
| with open(summary_txt, "w", encoding="utf-8") as f: |
| buf = io.StringIO() |
| with contextlib.redirect_stdout(buf): |
| print_summary(metrics, args.label) |
| f.write(buf.getvalue()) |
|
|
| print(f"\n[output] Results: {results_jsonl}") |
| print(f"[output] Metrics: {metrics_json}") |
| print(f"[output] Summary: {summary_txt}") |
|
|
|
|
| if __name__ == "__main__": |
| main() |
|
|