| import argparse |
| import json |
| import math |
| import os |
| import time |
| from pathlib import Path |
|
|
| import torch |
| from PIL import Image |
| from transformers import AutoTokenizer |
|
|
| from eval.vqa.run_shared_vision_guided_textvqa import ( |
| build_decode_model, |
| build_guide_attention_question, |
| configure_model, |
| extract_shared_raw_visual_tokens, |
| load_model, |
| make_generation_config, |
| make_guide_attention_generation_config, |
| project_visual_tokens, |
| resolve_hf_snapshot, |
| run_decode_answer, |
| run_guide_branch, |
| run_guide_two_pass_explicit_branch, |
| ) |
| from internvl.train.dataset import build_transform, dynamic_preprocess |
|
|
|
|
| def load_case_records(case_json_path: str, source_jsonl_path: str): |
| with open(case_json_path) as f: |
| case_items = json.load(f) |
| cases_by_qid = {item["question_id"]: item for item in case_items} |
|
|
| source_by_qid = {} |
| with open(source_jsonl_path) as f: |
| for line in f: |
| item = json.loads(line) |
| if item["question_id"] in cases_by_qid: |
| source_by_qid[item["question_id"]] = item |
|
|
| missing = sorted(set(cases_by_qid) - set(source_by_qid)) |
| if missing: |
| raise RuntimeError(f"Missing {len(missing)} source records, e.g. {missing[:5]}") |
|
|
| return [source_by_qid[qid] | {"gt_answers": cases_by_qid[qid]["gt_answers"], "gt_answer_counts": cases_by_qid[qid]["gt_answer_counts"]} for qid in sorted(cases_by_qid)] |
|
|
|
|
| def load_pixels(image_path: str, image_size: int, dynamic: bool, use_thumbnail: bool, max_num: int): |
| transform = build_transform(is_train=False, input_size=image_size) |
| image = Image.open(image_path).convert("RGB") |
| if dynamic: |
| images = dynamic_preprocess(image, image_size=image_size, use_thumbnail=use_thumbnail, max_num=max_num) |
| else: |
| images = [image] |
| return torch.stack([transform(img) for img in images]) |
|
|
|
|
| def vqa_score(prediction: str, gt_answers): |
| normalized_prediction = prediction.strip().lower() |
| matches = sum(1 for answer in gt_answers if answer.strip().lower() == normalized_prediction) |
| return min(1.0, matches / 3.0) |
|
|
|
|
| def main(): |
| parser = argparse.ArgumentParser() |
| parser.add_argument("--case-json", required=True) |
| parser.add_argument("--source-jsonl", required=True) |
| parser.add_argument("--guide-checkpoint", required=True) |
| parser.add_argument("--large-checkpoint", required=True) |
| parser.add_argument("--out-dir", required=True) |
| parser.add_argument("--run-name", required=True) |
| parser.add_argument("--limit", type=int, default=None) |
| parser.add_argument("--max-new-tokens", type=int, default=10) |
| parser.add_argument("--num-beams", type=int, default=1) |
| parser.add_argument("--temperature", type=float, default=0.0) |
| parser.add_argument("--dynamic", action="store_true") |
| parser.add_argument("--max-num", type=int, default=6) |
| parser.add_argument("--log-every", type=int, default=10) |
| parser.add_argument("--seed", type=int, default=0) |
| parser.add_argument("--large-model-prune-layer", type=float, default=0.0) |
| parser.add_argument("--large-model-prune-ratio", type=float, default=0.4) |
| parser.add_argument( |
| "--large-model-prune-selection", |
| type=str, |
| choices=["topk", "random", "similarity_greedy"], |
| default="topk", |
| ) |
| parser.add_argument("--consistency-token-ratio", type=float, default=0.05) |
| parser.add_argument( |
| "--guide-reasoning-mode", |
| type=str, |
| choices=["none", "short_cot", "explicit_cot", "two_pass_explicit"], |
| default="none", |
| ) |
| parser.add_argument("--guide-reasoning-max-new-tokens", type=int, default=1024) |
| parser.add_argument("--guide-reasoning-temperature", type=float, default=0.0) |
| parser.add_argument( |
| "--guide-attention-source", |
| type=str, |
| choices=["default", "reasoning", "answer", "combined"], |
| default="default", |
| ) |
| parser.add_argument("--guide-question-attention-weight", type=float, default=1.0) |
| parser.add_argument("--guide-reasoning-attention-weight", type=float, default=1.0) |
| parser.add_argument("--guide-answer-attention-weight", type=float, default=1.0) |
| parser.add_argument("--auto", action="store_true") |
| parser.add_argument("--load-in-8bit", action="store_true") |
| parser.add_argument("--load-in-4bit", action="store_true") |
| parser.add_argument("--use-flash-attn", action="store_true") |
| args = parser.parse_args() |
|
|
| if not torch.cuda.is_available(): |
| raise RuntimeError("CUDA is required.") |
|
|
| torch.manual_seed(args.seed) |
| records = load_case_records(args.case_json, args.source_jsonl) |
| if args.limit is not None: |
| records = records[: args.limit] |
|
|
| guide_checkpoint = resolve_hf_snapshot(args.guide_checkpoint) |
| large_checkpoint = resolve_hf_snapshot(args.large_checkpoint) |
| guide_tokenizer = AutoTokenizer.from_pretrained(guide_checkpoint, trust_remote_code=True, use_fast=False) |
| guide_config = configure_model(guide_checkpoint, use_flash_attn=args.use_flash_attn) |
| guide_model = load_model( |
| guide_checkpoint, |
| guide_config, |
| auto=args.auto, |
| load_in_8bit=args.load_in_8bit, |
| load_in_4bit=args.load_in_4bit, |
| ) |
| decode_model, large_tokenizer = build_decode_model( |
| guide_model, |
| large_checkpoint, |
| use_flash_attn=args.use_flash_attn, |
| auto=args.auto, |
| load_in_8bit=args.load_in_8bit, |
| load_in_4bit=args.load_in_4bit, |
| ) |
|
|
| image_size = guide_model.config.force_image_size or guide_model.config.vision_config.image_size |
| generation_config = make_generation_config(args) |
| guide_attention_generation_config = make_guide_attention_generation_config(generation_config, args) |
|
|
| os.makedirs(args.out_dir, exist_ok=True) |
| results = [] |
|
|
| for idx, record in enumerate(records): |
| question = record["question"] + " Answer the question using a single word or phrase." |
| pixel_values = load_pixels( |
| image_path=record["image"], |
| image_size=image_size, |
| dynamic=args.dynamic, |
| use_thumbnail=guide_model.config.use_thumbnail, |
| max_num=args.max_num, |
| ) |
| guide_attention_question = build_guide_attention_question(question, args) |
|
|
| torch.cuda.synchronize() |
| start = time.time() |
| raw_visual_tokens = extract_shared_raw_visual_tokens(guide_model, pixel_values) |
| guide_visual_tokens = project_visual_tokens(guide_model, raw_visual_tokens) |
| guide_attention_token_counts = {} |
| if args.guide_reasoning_mode == "two_pass_explicit": |
| ( |
| guide_answer, |
| guide_scores, |
| consistency_score, |
| visual_token_importance, |
| _guide_reasoning, |
| _guide_reasoning_filter_debug, |
| guide_attention_token_counts, |
| ) = ( |
| run_guide_two_pass_explicit_branch( |
| guide_model, |
| guide_tokenizer, |
| guide_visual_tokens, |
| question, |
| guide_attention_generation_config, |
| generation_config, |
| args.consistency_token_ratio, |
| args, |
| ) |
| ) |
| else: |
| ( |
| guide_answer, |
| guide_scores, |
| consistency_score, |
| visual_token_importance, |
| _question_visual_token_importance, |
| _answer_visual_token_importance, |
| guide_attention_token_counts, |
| ) = run_guide_branch( |
| guide_model, |
| guide_tokenizer, |
| guide_visual_tokens, |
| guide_attention_question, |
| guide_attention_generation_config, |
| args.consistency_token_ratio, |
| args, |
| ) |
| torch.cuda.synchronize() |
| small_model_time = time.time() - start |
|
|
| scores = torch.concatenate(guide_scores, dim=0) |
| scores, _ = scores.softmax(dim=-1).max(dim=-1) |
| original_confidence = math.pow(torch.prod(scores).item(), 1 / len(scores)) |
|
|
| torch.cuda.synchronize() |
| start = time.time() |
| large_visual_tokens = project_visual_tokens(decode_model, raw_visual_tokens) |
| large_answer = run_decode_answer( |
| decode_model, |
| large_tokenizer, |
| large_visual_tokens, |
| question, |
| generation_config, |
| visual_token_importance, |
| args, |
| ) |
| torch.cuda.synchronize() |
| large_model_time = time.time() - start |
|
|
| score = vqa_score(large_answer, record["gt_answers"]) |
| results.append( |
| { |
| "question_id": record["question_id"], |
| "question": record["question"], |
| "image": record["image"], |
| "gt_answers": record["gt_answers"], |
| "gt_answer_counts": record["gt_answer_counts"], |
| "small_answer": guide_answer, |
| "large_answer": large_answer, |
| "vqa_score": score, |
| "small_model_time": small_model_time, |
| "large_model_time": large_model_time, |
| "original_confidence": original_confidence, |
| "consistency_score": consistency_score.item(), |
| "visual_token_count": int(visual_token_importance.shape[0]), |
| "kept_visual_token_count": max(1, int(visual_token_importance.shape[0] * args.large_model_prune_ratio)), |
| "guide_attention_token_counts": guide_attention_token_counts, |
| } |
| ) |
| if (idx + 1) % args.log_every == 0 or idx + 1 == len(records): |
| print( |
| f"[{idx + 1}/{len(records)}] qid={record['question_id']} " |
| f"score={score:.3f} small={guide_answer[:80]!r} large={large_answer!r}" |
| ) |
|
|
| accuracy = sum(item["vqa_score"] for item in results) / max(len(results), 1) |
| result_path = Path(args.out_dir) / f"{args.run_name}.json" |
| summary_path = Path(args.out_dir) / f"{args.run_name}.summary.json" |
| with open(result_path, "w") as f: |
| json.dump(results, f, ensure_ascii=False, indent=2) |
| summary = { |
| "count": len(results), |
| "accuracy": accuracy, |
| "guide_checkpoint": guide_checkpoint, |
| "large_checkpoint": large_checkpoint, |
| "case_json": args.case_json, |
| "source_jsonl": args.source_jsonl, |
| "guide_reasoning_mode": args.guide_reasoning_mode, |
| "guide_attention_source": args.guide_attention_source, |
| "guide_question_attention_weight": args.guide_question_attention_weight, |
| "guide_reasoning_attention_weight": args.guide_reasoning_attention_weight, |
| "guide_answer_attention_weight": args.guide_answer_attention_weight, |
| "large_model_prune_ratio": args.large_model_prune_ratio, |
| "results_file": str(result_path), |
| } |
| with open(summary_path, "w") as f: |
| json.dump(summary, f, ensure_ascii=False, indent=2) |
|
|
| print(f"accuracy: {accuracy:.6f}") |
| print(f"results_file: {result_path}") |
| print(f"summary_file: {summary_path}") |
|
|
|
|
| if __name__ == "__main__": |
| main() |
|
|