sgl / eval /vqa /run_shared_vision_guided_textvqa.py
xiaohaoWillX's picture
Add files using upload-large-folder tool
5316f3e verified
import argparse
import inspect
import json
import math
import os
import random
import re
import sys
import time
from functools import wraps
from pathlib import Path
from typing import Dict, List, Optional, Tuple
import torch
from PIL import Image
from transformers import AutoTokenizer
from transformers.generation.logits_process import LogitsProcessorList
REPO_ROOT = Path(__file__).resolve().parents[2]
DEFAULT_UPSTREAM_SGL_ROOT = Path(os.environ.get("UPSTREAM_SGL_ROOT", "/home/yf/snap/SGL"))
if str(DEFAULT_UPSTREAM_SGL_ROOT) not in sys.path:
sys.path.insert(0, str(DEFAULT_UPSTREAM_SGL_ROOT))
eval_vqa_path = DEFAULT_UPSTREAM_SGL_ROOT / "eval" / "vqa"
if str(eval_vqa_path) not in sys.path:
sys.path.insert(0, str(eval_vqa_path))
from internvl.conversation import get_conv_template
from internvl.model.internvl_chat import InternVLChatModel
from internvl.model.internvl_chat.configuration_internvl_chat import InternVLChatConfig
from internvl.train.dataset import build_transform, dynamic_preprocess
from textvqa_eval import TextVQAAccuracyEvaluator
BASE_PROMPT = "Answer the question using a single word or phrase."
BASE_PROMPT_SUFFIX = " " + BASE_PROMPT
HIDDEN_REASONING_INSTRUCTION = (
"Think through the relevant visual evidence and any text in the image step by step internally before answering."
)
EXPLICIT_REASONING_INSTRUCTION = (
"Explain your reasoning step by step using the relevant visual evidence and any text in the image."
)
DEFAULT_FINAL_ANSWER_INSTRUCTION = "Provide the final answer only."
GUIDE_ATTENTION_COT_PROMPT_TEMPLATE = """You are solving a TextVQA task.
Read the image carefully, especially visible text.
Reason through the answer in at least 5 explicit steps.
Do not skip the reasoning.
Question: {question}
1.
2.
3.
4.
5.
Final answer:"""
GUIDE_ATTENTION_REASONING_ONLY_PROMPT_TEMPLATE = """You are solving a TextVQA task.
Read the image carefully, especially all visible text.
Reason using only evidence from the image and OCR text.
You must output exactly 5 numbered reasoning steps.
Each step must be a short sentence.
Do not provide the final answer.
Do not provide a summary.
Do not output any text other than the 5 numbered steps.
Question: {question}
1. Identify the most relevant visible text or object.
2. Explain how that evidence relates to the question.
3. Check for another supporting clue in the image.
4. Resolve any ambiguity using the strongest evidence.
5. State the final reasoning conclusion without giving the final answer."""
GUIDE_ATTENTION_EXPLICIT_COT_INSTRUCTION = (
"First reason step by step using the relevant visual evidence and OCR text. "
"Then end with a new line in the exact format: Answer: <short answer>."
)
GUIDE_TEXT_HINT_INSTRUCTION = (
"Give a very short guide hint grounded in the image and OCR text. Use a short phrase, not a full sentence."
)
GUIDED_DECODE_INSTRUCTION = (
"Use the guide hint only if it matches the image. Answer the question using a single word or phrase."
)
REASONING_FILTER_STOPWORDS = {
"a", "an", "and", "are", "as", "at", "be", "because", "but", "by", "for", "from", "has",
"have", "if", "in", "into", "is", "it", "its", "of", "on", "or", "that", "the", "their",
"there", "this", "those", "to", "was", "were", "with",
}
REASONING_FILTER_TEMPLATE_WORDS = {
"answer", "conclusion", "directly", "evidence", "final", "identify", "indicating",
"question", "reason", "reasoning", "relates", "relevant", "resolve", "shows", "state",
"strongest", "supporting", "supports", "using", "visible",
}
REASONING_FILTER_POSITION_WORDS = {
"left", "right", "top", "bottom", "middle", "center", "centre", "upper", "lower",
}
REASONING_FILTER_COLOR_WORDS = {
"black", "blue", "brown", "gold", "gray", "green", "grey", "orange", "pink",
"purple", "red", "silver", "white", "yellow",
}
REASONING_FILTER_KEEP_POS = {"NOUN", "PROPN", "ADJ"}
SPACY_REASONING_NLP = None
SPACY_REASONING_LOAD_ATTEMPTED = False
SPACY_REASONING_FALLBACK_WARNED = False
def resolve_hf_snapshot(path: str) -> str:
path = os.path.abspath(path)
config_path = os.path.join(path, "config.json")
if os.path.isfile(config_path):
return path
refs_main = os.path.join(path, "refs", "main")
if os.path.isfile(refs_main):
with open(refs_main) as f:
revision = f.read().strip()
snapshot_path = os.path.join(path, "snapshots", revision)
if os.path.isfile(os.path.join(snapshot_path, "config.json")):
return snapshot_path
raise FileNotFoundError(f"Could not resolve checkpoint snapshot from: {path}")
def configure_model(checkpoint_path: str, use_flash_attn: bool) -> InternVLChatConfig:
checkpoint_path = resolve_hf_snapshot(checkpoint_path)
config = InternVLChatConfig.from_json_file(os.path.join(checkpoint_path, "config.json"))
llm_arch = config.llm_config.architectures[0]
if llm_arch == "InternLM2ForCausalLM":
config.llm_config.attn_implementation = "eager"
else:
config.llm_config._attn_implementation = "eager"
config.vision_config.use_flash_attn = use_flash_attn
return config
def patch_internlm2_sample_signature(model: InternVLChatModel) -> None:
language_model_cls = model.language_model.__class__
sample_fn = getattr(language_model_cls, "_sample", None)
if sample_fn is None or getattr(sample_fn, "_sgl_logits_warper_compat", False):
return
signature = inspect.signature(sample_fn)
logits_warper_param = signature.parameters.get("logits_warper")
if logits_warper_param is None or logits_warper_param.default is not inspect._empty:
return
@wraps(sample_fn)
def compat_sample(
self,
input_ids: torch.LongTensor,
logits_processor,
stopping_criteria,
generation_config,
synced_gpus: bool,
streamer=None,
logits_warper=None,
**model_kwargs,
):
# transformers>=4.49 folds samplers into logits_processor and no longer
# passes logits_warper to custom _sample overrides.
if logits_warper is None:
logits_warper = LogitsProcessorList()
return sample_fn(
self,
input_ids=input_ids,
logits_processor=logits_processor,
stopping_criteria=stopping_criteria,
generation_config=generation_config,
synced_gpus=synced_gpus,
streamer=streamer,
logits_warper=logits_warper,
**model_kwargs,
)
compat_sample._sgl_logits_warper_compat = True
language_model_cls._sample = compat_sample
def load_model(
checkpoint_path: str,
config: InternVLChatConfig,
auto: bool,
load_in_8bit: bool,
load_in_4bit: bool,
) -> InternVLChatModel:
checkpoint_path = resolve_hf_snapshot(checkpoint_path)
kwargs = {"device_map": "auto"} if auto else {}
model = InternVLChatModel.from_pretrained(
checkpoint_path,
config=config,
low_cpu_mem_usage=True,
torch_dtype=torch.bfloat16,
load_in_8bit=load_in_8bit,
load_in_4bit=load_in_4bit,
**kwargs,
).eval()
if not auto and not load_in_8bit and not load_in_4bit:
model = model.cuda()
patch_internlm2_sample_signature(model)
return model
def build_decode_model(
guide_model: InternVLChatModel,
large_checkpoint: str,
use_flash_attn: bool,
auto: bool,
load_in_8bit: bool,
load_in_4bit: bool,
) -> Tuple[InternVLChatModel, AutoTokenizer]:
large_checkpoint = resolve_hf_snapshot(large_checkpoint)
large_config = configure_model(large_checkpoint, use_flash_attn=use_flash_attn)
large_source = load_model(
large_checkpoint,
large_config,
auto=auto,
load_in_8bit=load_in_8bit,
load_in_4bit=load_in_4bit,
)
decode_model = InternVLChatModel(
large_config,
vision_model=guide_model.vision_model,
language_model=large_source.language_model,
)
decode_model.config.vision_config = guide_model.config.vision_config
decode_model.vision_model.config = guide_model.config.vision_config
decode_model.mlp1 = large_source.mlp1
decode_model.template = large_source.template
decode_model.system_message = large_source.system_message
decode_model.num_image_token = large_source.num_image_token
decode_model.ps_version = guide_model.ps_version
decode_model.select_layer = guide_model.select_layer
decode_model.downsample_ratio = guide_model.downsample_ratio
decode_model.img_context_token_id = large_source.img_context_token_id
decode_model.eval()
patch_internlm2_sample_signature(decode_model)
large_tokenizer = AutoTokenizer.from_pretrained(
large_checkpoint,
trust_remote_code=True,
use_fast=False,
)
return decode_model, large_tokenizer
def model_text_device(model: InternVLChatModel) -> torch.device:
return next(model.language_model.get_input_embeddings().parameters()).device
def model_vision_device(model: InternVLChatModel) -> torch.device:
return next(model.vision_model.parameters()).device
def resolve_image_path(image_path: str, data_root: str, jsonl_dir: str) -> str:
candidates = []
if os.path.isabs(image_path):
candidates.append(image_path)
candidates.append(os.path.join(data_root, image_path))
if image_path.startswith("data/"):
candidates.append(os.path.join(data_root, image_path[len("data/"):]))
candidates.append(os.path.join(jsonl_dir, image_path))
candidates.append(os.path.join(jsonl_dir, os.path.basename(image_path)))
for candidate in candidates:
if os.path.exists(candidate):
return candidate
raise FileNotFoundError(f"Could not resolve image path: {image_path}")
class TextVQADataset:
def __init__(self, jsonl_path: str, data_root: str, image_size: int, dynamic: bool, use_thumbnail: bool, max_num: int):
with open(jsonl_path) as f:
self.items = [json.loads(line) for line in f if line.strip()]
self.jsonl_dir = os.path.dirname(jsonl_path)
self.data_root = data_root
self.image_size = image_size
self.dynamic = dynamic
self.use_thumbnail = use_thumbnail
self.max_num = max_num
self.transform = build_transform(is_train=False, input_size=image_size)
def __len__(self) -> int:
return len(self.items)
def __getitem__(self, idx: int) -> Dict[str, object]:
item = self.items[idx]
image_path = resolve_image_path(item["image"], self.data_root, self.jsonl_dir)
image = Image.open(image_path).convert("RGB")
if self.dynamic:
images = dynamic_preprocess(
image,
image_size=self.image_size,
use_thumbnail=self.use_thumbnail,
max_num=self.max_num,
)
else:
images = [image]
pixel_values = torch.stack([self.transform(img) for img in images])
return {
"question_id": item["question_id"],
"question": item["question"],
"pixel_values": pixel_values,
"annotation": item.get("answer", ""),
}
def load_annotations(annotation_file: str) -> Dict[int, List[str]]:
with open(annotation_file) as f:
annotations = json.load(f)["annotations"]
return {
item["question_id"]: [answer["answer"] for answer in item["answers"]]
for item in annotations
}
def build_query(model: InternVLChatModel, tokenizer, question: str, num_patches: int):
img_context_token = "<IMG_CONTEXT>"
img_start_token = "<img>"
img_end_token = "</img>"
if "<image>" not in question:
question = "<image>\n" + question
model.img_context_token_id = tokenizer.convert_tokens_to_ids(img_context_token)
template = get_conv_template(model.template)
template.system_message = model.system_message
template.append_message(template.roles[0], question)
template.append_message(template.roles[1], None)
query = template.get_prompt()
image_tokens = img_start_token + img_context_token * model.num_image_token * num_patches + img_end_token
query = query.replace("<image>", image_tokens, 1)
return query, template
@torch.inference_mode()
def extract_shared_raw_visual_tokens(model: InternVLChatModel, pixel_values: torch.Tensor) -> torch.Tensor:
vision_device = model_vision_device(model)
pixel_values = pixel_values.to(device=vision_device, dtype=torch.bfloat16)
if model.select_layer == -1:
vit_embeds = model.vision_model(
pixel_values=pixel_values,
output_hidden_states=False,
return_dict=True,
).last_hidden_state
else:
vit_embeds = model.vision_model(
pixel_values=pixel_values,
output_hidden_states=True,
return_dict=True,
).hidden_states[model.select_layer]
vit_embeds = vit_embeds[:, 1:, :]
h = w = int(vit_embeds.shape[1] ** 0.5)
vit_embeds = vit_embeds.reshape(vit_embeds.shape[0], h, w, -1)
vit_embeds = model.pixel_shuffle(vit_embeds, scale_factor=model.downsample_ratio)
return vit_embeds.reshape(vit_embeds.shape[0], -1, vit_embeds.shape[-1])
@torch.inference_mode()
def project_visual_tokens(model: InternVLChatModel, raw_visual_tokens: torch.Tensor) -> torch.Tensor:
mlp_device = next(model.mlp1.parameters()).device
raw_visual_tokens = raw_visual_tokens.to(device=mlp_device, dtype=torch.bfloat16)
return model.mlp1(raw_visual_tokens)
@torch.inference_mode()
def build_input_embeds_from_visual_features(
model: InternVLChatModel,
input_ids: torch.Tensor,
visual_features: torch.Tensor,
) -> Tuple[torch.Tensor, torch.Tensor]:
input_embeds = model.language_model.get_input_embeddings()(input_ids)
batch_size, seq_len, hidden_size = input_embeds.shape
flat_input_embeds = input_embeds.reshape(batch_size * seq_len, hidden_size)
flat_input_ids = input_ids.reshape(batch_size * seq_len)
selected = flat_input_ids == model.img_context_token_id
if selected.sum().item() == 0:
raise ValueError("No image context tokens found in input_ids.")
flat_input_embeds[selected] = visual_features.reshape(-1, hidden_size).to(flat_input_embeds.device)
return flat_input_embeds.reshape(batch_size, seq_len, hidden_size), flat_input_ids
@torch.inference_mode()
def run_guide_generation(
model: InternVLChatModel,
tokenizer,
projected_visual_tokens: torch.Tensor,
question: str,
generation_config: dict,
) -> Dict[str, object]:
query, template = build_query(model, tokenizer, question, projected_visual_tokens.shape[0])
model_inputs = tokenizer(query, return_tensors="pt")
input_device = model_text_device(model)
input_ids = model_inputs["input_ids"].to(input_device)
attention_mask = model_inputs["attention_mask"].to(input_device)
eos_token_id = tokenizer.convert_tokens_to_ids(template.sep)
input_embeds, flat_input_ids = build_input_embeds_from_visual_features(model, input_ids, projected_visual_tokens)
visual_token_index = (input_ids == model.img_context_token_id).view(-1).nonzero()
visual_start_index, visual_end_index = visual_token_index[0], visual_token_index[-1]
run_config = dict(generation_config)
run_config["eos_token_id"] = eos_token_id
outputs = model.language_model.generate(
inputs_embeds=input_embeds,
attention_mask=attention_mask,
generation_config=None,
output_hidden_states=None,
return_dict=None,
use_cache=True,
visual_token_index=(visual_start_index, visual_end_index),
**run_config,
)
response = tokenizer.batch_decode(outputs["sequences"], skip_special_tokens=True)[0]
response = response.split(template.sep)[0].strip()
return {
"response": response,
"outputs": outputs,
"input_embeds": input_embeds,
"flat_input_ids": flat_input_ids,
"attention_mask": attention_mask,
"visual_token_index": (visual_start_index, visual_end_index),
}
def aggregate_attention_from_step(attentions, visual_token_index: Tuple[int, int]) -> torch.Tensor:
visual_start_index, visual_end_index = visual_token_index
visual_token_num = visual_end_index - visual_start_index + 1
visual_token_importance = None
for attention in attentions:
if attention is None:
continue
if visual_token_importance is None:
visual_token_importance = torch.zeros(
visual_token_num,
device=attention.device,
dtype=torch.float32,
)
merged_attention = attention[0].sum(dim=0)
if attention.shape[2] != 1:
visual_token_importance += merged_attention[
visual_end_index + 1 :,
visual_start_index : visual_end_index + 1,
].sum(dim=0)
else:
visual_token_importance += merged_attention[
0:1,
visual_start_index : visual_end_index + 1,
].sum(dim=0)
if visual_token_importance is None:
raise RuntimeError("Guide model did not return layer attentions for the current decoding step.")
return visual_token_importance
def count_attention_query_tokens_from_step(attentions, visual_token_index: Tuple[int, int]) -> int:
_, visual_end_index = visual_token_index
for attention in attentions:
if attention is None:
continue
query_length = int(attention.shape[2])
if query_length != 1:
return max(query_length - int(visual_end_index) - 1, 0)
return 1
return 0
def count_generated_tokens(outputs) -> int:
sequences = getattr(outputs, "sequences", None)
if sequences is None and isinstance(outputs, dict):
sequences = outputs.get("sequences")
if sequences is None:
return 0
if sequences.ndim == 0:
return 0
return int(sequences.shape[-1])
def count_attention_query_tokens_from_generation_outputs(
outputs,
visual_token_index: Tuple[int, int],
step_mask: Optional[List[bool]] = None,
) -> int:
attentions = getattr(outputs, "attentions", None)
if not attentions:
return 0
token_count = 0
for step_idx, step_attentions in enumerate(attentions):
if step_mask is not None and (step_idx >= len(step_mask) or not step_mask[step_idx]):
continue
token_count += count_attention_query_tokens_from_step(step_attentions, visual_token_index)
if token_count == 0 and step_mask is not None:
return count_attention_query_tokens_from_generation_outputs(outputs, visual_token_index, step_mask=None)
return token_count
def count_question_and_answer_attention_query_tokens(
outputs,
visual_token_index: Tuple[int, int],
) -> Tuple[int, int]:
attentions = getattr(outputs, "attentions", None)
if not attentions:
return 0, 0
question_token_count = 0
answer_token_count = 0
for step_idx, step_attentions in enumerate(attentions):
step_token_count = count_attention_query_tokens_from_step(step_attentions, visual_token_index)
if step_idx == 0:
question_token_count += step_token_count
else:
answer_token_count += step_token_count
return question_token_count, answer_token_count
def get_reasoning_spacy_nlp():
global SPACY_REASONING_NLP, SPACY_REASONING_LOAD_ATTEMPTED
if SPACY_REASONING_LOAD_ATTEMPTED:
return SPACY_REASONING_NLP
SPACY_REASONING_LOAD_ATTEMPTED = True
try:
import spacy
SPACY_REASONING_NLP = spacy.load("en_core_web_sm", disable=["parser", "lemmatizer"])
except Exception:
SPACY_REASONING_NLP = None
return SPACY_REASONING_NLP
def should_keep_reasoning_heuristic_token(token_text: str) -> bool:
stripped = token_text.strip()
if not stripped:
return False
lowered = stripped.lower()
if re.fullmatch(r"\d+[.)]?", stripped):
return False
if lowered in REASONING_FILTER_STOPWORDS or lowered in REASONING_FILTER_TEMPLATE_WORDS:
return False
if lowered in REASONING_FILTER_POSITION_WORDS or lowered in REASONING_FILTER_COLOR_WORDS:
return True
if any(ch.isdigit() for ch in stripped):
return True
if any(ch.isupper() for ch in stripped):
return True
if any(ch in ".:/-@&" for ch in stripped):
return True
alpha_count = sum(ch.isalpha() for ch in stripped)
return alpha_count >= 4
def should_keep_reasoning_doc_token(token) -> bool:
stripped = token.text.strip()
if not stripped:
return False
lowered = stripped.lower()
if token.is_punct or token.is_space:
return False
if lowered in REASONING_FILTER_STOPWORDS or lowered in REASONING_FILTER_TEMPLATE_WORDS:
return False
if token.pos_ in REASONING_FILTER_KEEP_POS:
return True
return False
def build_generated_token_spans(tokenizer, generated_ids: torch.Tensor) -> Tuple[str, List[Tuple[int, int]]]:
decoded_text = ""
token_spans: List[Tuple[int, int]] = []
for token_id in generated_ids.detach().cpu().tolist():
piece = tokenizer.decode([int(token_id)], skip_special_tokens=True, clean_up_tokenization_spaces=False)
start = len(decoded_text)
decoded_text += piece
token_spans.append((start, len(decoded_text)))
return decoded_text, token_spans
def analyze_reasoning_filter(text: str, args) -> Tuple[List[Tuple[int, int]], str, List[Dict[str, object]]]:
if args.guide_reasoning_filter_mode == "none":
return [], "none", []
if args.guide_reasoning_filter_mode == "pos_ner":
nlp = get_reasoning_spacy_nlp()
if nlp is not None:
doc = nlp(text)
token_analysis = []
intervals = [
(token.idx, token.idx + len(token))
for token in doc
if should_keep_reasoning_doc_token(token)
]
for token in doc:
token_analysis.append(
{
"text": token.text,
"lemma": token.lemma_,
"pos": token.pos_,
"tag": token.tag_,
"dep": token.dep_,
"ent_type": token.ent_type_,
"like_num": bool(getattr(token, "like_num", False)),
"like_url": bool(getattr(token, "like_url", False)),
"is_stop": bool(token.is_stop),
"keep": should_keep_reasoning_doc_token(token),
}
)
return intervals, "spacy_pos_ner", token_analysis
token_analysis = []
intervals = [
(match.start(), match.end())
for match in re.finditer(r"\S+", text)
if should_keep_reasoning_heuristic_token(match.group(0))
]
for match in re.finditer(r"\S+", text):
token_text = match.group(0)
token_analysis.append(
{
"text": token_text,
"lemma": token_text.lower(),
"pos": "",
"tag": "",
"dep": "",
"ent_type": "",
"like_num": any(ch.isdigit() for ch in token_text),
"like_url": "http" in token_text.lower() or "www." in token_text.lower(),
"is_stop": token_text.lower() in REASONING_FILTER_STOPWORDS,
"keep": should_keep_reasoning_heuristic_token(token_text),
}
)
return intervals, "heuristic_fallback", token_analysis
def build_reasoning_attention_step_mask_and_debug(tokenizer, outputs, args) -> Tuple[Optional[List[bool]], Dict[str, object]]:
if args.guide_reasoning_filter_mode == "none":
return None, {"backend": "none", "kept_tokens": [], "token_analysis": []}
sequences = outputs["sequences"][0]
decoded_text, token_spans = build_generated_token_spans(tokenizer, sequences)
intervals, backend, token_analysis = analyze_reasoning_filter(decoded_text, args)
global SPACY_REASONING_FALLBACK_WARNED
if backend == "heuristic_fallback" and not SPACY_REASONING_FALLBACK_WARNED:
print("Warning: spaCy POS/NER model unavailable; guide reasoning filter is using heuristic fallback.")
SPACY_REASONING_FALLBACK_WARNED = True
debug_info = {
"backend": backend,
"token_analysis": token_analysis,
"kept_tokens": [token["text"] for token in token_analysis if token.get("keep")],
}
if not intervals:
return None, debug_info
step_mask = []
for start, end in token_spans:
if start == end:
step_mask.append(False)
continue
keep = any(start < interval_end and end > interval_start for interval_start, interval_end in intervals)
step_mask.append(keep)
debug_info["step_mask"] = step_mask
if not any(step_mask):
return None, debug_info
return step_mask, debug_info
def aggregate_attention_from_generation_outputs(
outputs,
visual_token_index: Tuple[int, int],
step_mask: Optional[List[bool]] = None,
) -> torch.Tensor:
aggregated = getattr(outputs, "aggregated_viusal_token_attention", None)
if aggregated is not None and step_mask is None:
return aggregated.detach().float()
attentions = getattr(outputs, "attentions", None)
if not attentions:
raise RuntimeError("Guide generation did not return attentions; enable output_attentions.")
visual_token_importance = None
for step_idx, step_attentions in enumerate(attentions):
if step_mask is not None and (step_idx >= len(step_mask) or not step_mask[step_idx]):
continue
step_importance = aggregate_attention_from_step(step_attentions, visual_token_index)
if visual_token_importance is None:
visual_token_importance = step_importance
else:
visual_token_importance = visual_token_importance + step_importance
if visual_token_importance is None:
if step_mask is not None:
return aggregate_attention_from_generation_outputs(outputs, visual_token_index, step_mask=None)
raise RuntimeError("Guide generation returned no attention steps.")
return visual_token_importance
def aggregate_question_and_answer_attention_from_generation_outputs(
outputs,
visual_token_index: Tuple[int, int],
) -> Tuple[torch.Tensor, torch.Tensor]:
attentions = getattr(outputs, "attentions", None)
if not attentions:
raise RuntimeError("Guide generation did not return attentions; enable output_attentions.")
question_visual_token_importance = None
answer_visual_token_importance = None
for step_idx, step_attentions in enumerate(attentions):
step_importance = aggregate_attention_from_step(step_attentions, visual_token_index)
if step_idx == 0:
if question_visual_token_importance is None:
question_visual_token_importance = step_importance
else:
question_visual_token_importance = question_visual_token_importance + step_importance
else:
if answer_visual_token_importance is None:
answer_visual_token_importance = step_importance
else:
answer_visual_token_importance = answer_visual_token_importance + step_importance
if question_visual_token_importance is None and answer_visual_token_importance is None:
raise RuntimeError("Guide generation returned no attention steps.")
if question_visual_token_importance is None:
question_visual_token_importance = torch.zeros_like(answer_visual_token_importance)
if answer_visual_token_importance is None:
answer_visual_token_importance = torch.zeros_like(question_visual_token_importance)
return question_visual_token_importance, answer_visual_token_importance
@torch.inference_mode()
def compute_consistency_score(
model: InternVLChatModel,
input_embeds: torch.Tensor,
flat_input_ids: torch.Tensor,
attention_mask: torch.Tensor,
generated_ids: torch.Tensor,
visual_token_importance: torch.Tensor,
visual_token_index: Tuple[int, int],
consistency_token_ratio: float,
large_model_prune_selection: str,
) -> torch.Tensor:
visual_start_index, visual_end_index = visual_token_index
new_input_ids_ = generated_ids
new_token_num = new_input_ids_.shape[-1]
new_input_embedding = torch.concatenate(
(input_embeds, model.language_model.get_input_embeddings()(new_input_ids_).unsqueeze(0)),
dim=1,
)
new_attention_mask = torch.concatenate(
(
attention_mask,
torch.ones((1, new_input_ids_.shape[0]), device=attention_mask.device, dtype=attention_mask.dtype),
),
dim=-1,
)
new_input_ids = torch.concatenate((flat_input_ids, new_input_ids_), dim=-1)
consistency_generate_kwargs = {
"large_model_prune_layer": 0.0,
"large_model_prune_ratio": consistency_token_ratio,
"large_model_prune_selection": large_model_prune_selection,
"visual_token_index": (visual_start_index, visual_end_index),
"visual_token_importance": visual_token_importance,
"inputs_embeds": new_input_embedding,
"attention_mask": new_attention_mask,
"output_scores": False,
"output_attentions": False,
"return_dict_in_generate": False,
"use_cache": True,
}
consistency_generate_kwargs["inputs_embeds"] = new_input_embedding
consistency_generate_kwargs["attention_mask"] = new_attention_mask
consistency_generate_kwargs["output_scores"] = False
consistency_generate_kwargs["output_attentions"] = False
consistency_generate_kwargs = model.language_model._get_initial_cache_position(new_input_ids, consistency_generate_kwargs)
model_inputs = model.language_model.prepare_inputs_for_generation(new_input_ids, **consistency_generate_kwargs)
consistency_output = model.language_model.forward(**model_inputs, return_dict=True)
consistency_score = torch.gather(
consistency_output["logits"][:, -new_token_num - 1 : -1, :].softmax(dim=-1),
index=new_input_ids_[None, :, None],
dim=-1,
)
return torch.prod(consistency_score)
@torch.inference_mode()
def run_guide_branch(
model: InternVLChatModel,
tokenizer,
projected_visual_tokens: torch.Tensor,
question: str,
generation_config: dict,
consistency_token_ratio: float,
args,
) -> Tuple[str, List[torch.Tensor], torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor, Dict[str, int]]:
generation_result = run_guide_generation(
model,
tokenizer,
projected_visual_tokens,
question,
generation_config,
)
outputs = generation_result["outputs"]
question_visual_token_importance, answer_visual_token_importance = (
aggregate_question_and_answer_attention_from_generation_outputs(
outputs,
generation_result["visual_token_index"],
)
)
question_attention_token_count, answer_attention_token_count = count_question_and_answer_attention_query_tokens(
outputs,
generation_result["visual_token_index"],
)
visual_token_importance = combine_question_and_answer_attention(
question_visual_token_importance,
answer_visual_token_importance,
args,
)
if args.large_model_prune_selection == "similarity_greedy":
consistency_score = torch.tensor(1.0, device=visual_token_importance.device)
else:
consistency_score = compute_consistency_score(
model,
generation_result["input_embeds"],
generation_result["flat_input_ids"],
generation_result["attention_mask"],
outputs["sequences"][0],
visual_token_importance,
generation_result["visual_token_index"],
consistency_token_ratio,
args.large_model_prune_selection,
)
return (
generation_result["response"],
outputs.scores,
consistency_score,
visual_token_importance,
question_visual_token_importance,
answer_visual_token_importance,
{
"question_attention_token_count": question_attention_token_count,
"answer_attention_token_count": answer_attention_token_count,
"reasoning_attention_token_count": 0,
"guide_answer_generated_token_count": count_generated_tokens(outputs),
"guide_reasoning_generated_token_count": 0,
},
)
@torch.inference_mode()
def run_decode_branch(
model: InternVLChatModel,
tokenizer,
projected_visual_tokens: torch.Tensor,
question: str,
generation_config: dict,
visual_token_importance: torch.Tensor,
large_model_prune_layer: float,
large_model_prune_ratio: float,
large_model_prune_selection: str,
) -> str:
query, template = build_query(model, tokenizer, question, projected_visual_tokens.shape[0])
model_inputs = tokenizer(query, return_tensors="pt")
input_device = model_text_device(model)
input_ids = model_inputs["input_ids"].to(input_device)
attention_mask = model_inputs["attention_mask"].to(input_device)
eos_token_id = tokenizer.convert_tokens_to_ids(template.sep)
input_embeds, _ = build_input_embeds_from_visual_features(model, input_ids, projected_visual_tokens)
visual_token_index = (input_ids == model.img_context_token_id).view(-1).nonzero()
visual_start_index, visual_end_index = visual_token_index[0], visual_token_index[-1]
run_config = dict(generation_config)
run_config["eos_token_id"] = eos_token_id
run_config["return_dict_in_generate"] = False
run_config["output_scores"] = False
run_config["output_attentions"] = False
run_config["large_model_prune_layer"] = large_model_prune_layer
run_config["large_model_prune_ratio"] = large_model_prune_ratio
run_config["large_model_prune_selection"] = large_model_prune_selection
run_config["visual_token_importance"] = visual_token_importance
run_config["visual_token_index"] = (visual_start_index, visual_end_index)
output_ids = model.language_model.generate(
inputs_embeds=input_embeds,
attention_mask=attention_mask,
generation_config=None,
output_hidden_states=None,
return_dict=None,
use_cache=True,
**run_config,
)
response = tokenizer.batch_decode(output_ids, skip_special_tokens=True)[0]
return response.split(template.sep)[0].strip()
def make_generation_config(args) -> dict:
generation_config = {
"num_beams": args.num_beams,
"max_new_tokens": args.max_new_tokens,
"min_new_tokens": 1,
"do_sample": args.temperature > 0,
"return_dict_in_generate": True,
"output_scores": True,
"output_attentions": True,
}
if args.temperature > 0:
generation_config["temperature"] = args.temperature
return generation_config
def append_instruction(question: str, instruction: str) -> str:
instruction = instruction.strip()
if not instruction:
return question
return f"{question.rstrip()}\n{instruction}"
def make_reasoning_generation_config(base_generation_config: dict, args) -> dict:
generation_config = dict(base_generation_config)
generation_config["max_new_tokens"] = args.reasoning_max_new_tokens
generation_config["return_dict_in_generate"] = True
generation_config["output_scores"] = True
generation_config["output_attentions"] = True
temperature = args.reasoning_temperature
generation_config["do_sample"] = temperature > 0
if temperature > 0:
generation_config["temperature"] = temperature
else:
generation_config.pop("temperature", None)
return generation_config
def make_custom_generation_config(
base_generation_config: dict,
max_new_tokens: int,
temperature: float,
return_dict_in_generate: bool,
output_scores: bool,
output_attentions: bool,
) -> dict:
generation_config = dict(base_generation_config)
generation_config["max_new_tokens"] = max_new_tokens
generation_config["return_dict_in_generate"] = return_dict_in_generate
generation_config["output_scores"] = output_scores
generation_config["output_attentions"] = output_attentions
generation_config["do_sample"] = temperature > 0
if temperature > 0:
generation_config["temperature"] = temperature
else:
generation_config.pop("temperature", None)
return generation_config
def normalize_generated_text(text: str) -> str:
return " ".join(text.strip().split())
def strip_base_prompt(question: str) -> str:
if question.endswith(BASE_PROMPT_SUFFIX):
return question[: -len(BASE_PROMPT_SUFFIX)].rstrip()
return question
def summarize_visual_token_importance(visual_token_importance: torch.Tensor, topk: int) -> Dict[str, object]:
values = visual_token_importance.detach().float().view(-1).cpu()
total = values.sum().item()
if total > 0:
normalized = values / total
else:
normalized = torch.full_like(values, 1.0 / max(values.numel(), 1))
topk = min(topk, normalized.numel())
top_values, top_indices = torch.topk(normalized, k=topk)
entropy = -(normalized * torch.clamp(normalized, min=1e-12).log()).sum().item()
return {
"raw_sum": total,
"entropy": entropy,
"max_weight": normalized.max().item(),
"top_indices": top_indices.tolist(),
"top_weights": top_values.tolist(),
"weights": normalized.tolist(),
}
def normalize_visual_token_importance(visual_token_importance: torch.Tensor) -> torch.Tensor:
visual_token_importance = visual_token_importance.detach().float()
total = visual_token_importance.sum()
if total.item() > 0:
return visual_token_importance / total
return torch.full_like(visual_token_importance, 1.0 / max(visual_token_importance.numel(), 1))
def prepare_decode_visual_token_importance(
visual_token_importance: torch.Tensor,
selection_mode: str,
) -> torch.Tensor:
raw_importance = visual_token_importance.detach().float()
if selection_mode in {"topk", "similarity_greedy"}:
return raw_importance
if selection_mode == "random":
return torch.rand_like(raw_importance)
raise ValueError(f"Unsupported large model prune selection mode: {selection_mode}")
def maybe_normalize_visual_token_importance(visual_token_importance: torch.Tensor, args) -> torch.Tensor:
if args.guide_attention_aggregation_mode == "normalized":
return normalize_visual_token_importance(visual_token_importance)
return visual_token_importance.detach().float()
def combine_question_and_answer_attention(
question_visual_token_importance: torch.Tensor,
answer_visual_token_importance: torch.Tensor,
args,
) -> torch.Tensor:
question_weight = args.guide_question_attention_weight
answer_weight = args.guide_answer_attention_weight
if question_weight == 0 and answer_weight == 0:
raise ValueError("At least one guide question/answer attention weight must be > 0.")
return (
question_weight * maybe_normalize_visual_token_importance(question_visual_token_importance, args)
+ answer_weight * maybe_normalize_visual_token_importance(answer_visual_token_importance, args)
)
def resolve_guide_attention_source(args) -> str:
if args.guide_attention_source != "default":
return args.guide_attention_source
if args.guide_reasoning_mode == "two_pass_explicit":
return "combined"
return "answer"
def combine_reasoning_and_answer_attention(
reasoning_visual_token_importance: torch.Tensor,
answer_visual_token_importance: torch.Tensor,
args,
) -> torch.Tensor:
attention_source = resolve_guide_attention_source(args)
if attention_source == "reasoning":
return args.guide_reasoning_attention_weight * maybe_normalize_visual_token_importance(
reasoning_visual_token_importance,
args,
)
if attention_source == "answer":
return args.guide_answer_attention_weight * maybe_normalize_visual_token_importance(
answer_visual_token_importance,
args,
)
reasoning_weight = args.guide_reasoning_attention_weight
answer_weight = args.guide_answer_attention_weight
if reasoning_weight == 0 and answer_weight == 0:
raise ValueError("At least one guide attention weight must be > 0.")
return (
reasoning_weight * maybe_normalize_visual_token_importance(reasoning_visual_token_importance, args)
+ answer_weight * maybe_normalize_visual_token_importance(answer_visual_token_importance, args)
)
def combine_question_reasoning_and_answer_attention(
question_visual_token_importance: torch.Tensor,
reasoning_visual_token_importance: torch.Tensor,
answer_visual_token_importance: torch.Tensor,
args,
) -> torch.Tensor:
attention_source = resolve_guide_attention_source(args)
if attention_source == "reasoning":
return args.guide_reasoning_attention_weight * maybe_normalize_visual_token_importance(
reasoning_visual_token_importance,
args,
)
if attention_source == "answer":
return combine_question_and_answer_attention(
question_visual_token_importance,
answer_visual_token_importance,
args,
)
return combine_question_and_answer_attention(
question_visual_token_importance,
answer_visual_token_importance,
args,
) + args.guide_reasoning_attention_weight * reasoning_visual_token_importance.detach().float()
def build_guide_attention_question(question: str, args) -> str:
if args.guide_reasoning_mode == "short_cot":
return GUIDE_ATTENTION_COT_PROMPT_TEMPLATE.replace("{question}", strip_base_prompt(question))
if args.guide_reasoning_mode == "explicit_cot":
return append_instruction(strip_base_prompt(question), GUIDE_ATTENTION_EXPLICIT_COT_INSTRUCTION)
return question
def build_guide_reasoning_question(question: str) -> str:
return GUIDE_ATTENTION_REASONING_ONLY_PROMPT_TEMPLATE.replace(
"{question}",
strip_base_prompt(question),
)
def build_guide_text_question(question: str) -> str:
return append_instruction(question, GUIDE_TEXT_HINT_INSTRUCTION)
def build_decode_question(question: str, guide_text_hint: Optional[str]) -> str:
if not guide_text_hint:
return question
return append_instruction(
question,
f"Guide hint: {guide_text_hint}\n{GUIDED_DECODE_INSTRUCTION}",
)
def make_guide_attention_generation_config(base_generation_config: dict, args) -> dict:
if args.guide_reasoning_mode in {"short_cot", "explicit_cot", "two_pass_explicit"}:
return make_custom_generation_config(
base_generation_config,
max_new_tokens=args.guide_reasoning_max_new_tokens,
temperature=args.guide_reasoning_temperature,
return_dict_in_generate=True,
output_scores=True,
output_attentions=True,
)
return dict(base_generation_config)
def make_guide_text_generation_config(base_generation_config: dict, args) -> dict:
return make_custom_generation_config(
base_generation_config,
max_new_tokens=args.guide_text_max_new_tokens,
temperature=args.guide_text_temperature,
return_dict_in_generate=False,
output_scores=False,
output_attentions=False,
)
@torch.inference_mode()
def run_text_generation_branch(
model: InternVLChatModel,
tokenizer,
projected_visual_tokens: torch.Tensor,
question: str,
generation_config: dict,
) -> str:
query, template = build_query(model, tokenizer, question, projected_visual_tokens.shape[0])
model_inputs = tokenizer(query, return_tensors="pt")
input_device = model_text_device(model)
input_ids = model_inputs["input_ids"].to(input_device)
attention_mask = model_inputs["attention_mask"].to(input_device)
eos_token_id = tokenizer.convert_tokens_to_ids(template.sep)
input_embeds, _ = build_input_embeds_from_visual_features(model, input_ids, projected_visual_tokens)
run_config = dict(generation_config)
run_config["eos_token_id"] = eos_token_id
output_ids = model.language_model.generate(
inputs_embeds=input_embeds,
attention_mask=attention_mask,
generation_config=None,
output_hidden_states=None,
return_dict=None,
use_cache=True,
**run_config,
)
response = tokenizer.batch_decode(output_ids, skip_special_tokens=True)[0]
return response.split(template.sep)[0].strip()
def run_decode_answer(
model: InternVLChatModel,
tokenizer,
projected_visual_tokens: torch.Tensor,
question: str,
generation_config: dict,
visual_token_importance: torch.Tensor,
args,
) -> str:
return run_decode_branch(
model,
tokenizer,
projected_visual_tokens,
question,
generation_config,
prepare_decode_visual_token_importance(
visual_token_importance,
args.large_model_prune_selection,
),
args.large_model_prune_layer,
args.large_model_prune_ratio,
args.large_model_prune_selection,
)
@torch.inference_mode()
def run_guide_two_pass_explicit_branch(
model: InternVLChatModel,
tokenizer,
projected_visual_tokens: torch.Tensor,
question: str,
reasoning_generation_config: dict,
answer_generation_config: dict,
consistency_token_ratio: float,
args,
) -> Tuple[str, List[torch.Tensor], torch.Tensor, torch.Tensor, str, Dict[str, object], Dict[str, int]]:
answer_result = run_guide_generation(
model,
tokenizer,
projected_visual_tokens,
question,
answer_generation_config,
)
reasoning_result = run_guide_generation(
model,
tokenizer,
projected_visual_tokens,
build_guide_reasoning_question(question),
reasoning_generation_config,
)
reasoning = reasoning_result["response"]
reasoning_step_mask, reasoning_filter_debug = build_reasoning_attention_step_mask_and_debug(
tokenizer,
reasoning_result["outputs"],
args,
)
reasoning_visual_token_importance = aggregate_attention_from_generation_outputs(
reasoning_result["outputs"],
reasoning_result["visual_token_index"],
reasoning_step_mask,
)
reasoning_attention_token_count = count_attention_query_tokens_from_generation_outputs(
reasoning_result["outputs"],
reasoning_result["visual_token_index"],
reasoning_step_mask,
)
question_visual_token_importance, answer_visual_token_importance = (
aggregate_question_and_answer_attention_from_generation_outputs(
answer_result["outputs"],
answer_result["visual_token_index"],
)
)
question_attention_token_count, answer_attention_token_count = count_question_and_answer_attention_query_tokens(
answer_result["outputs"],
answer_result["visual_token_index"],
)
visual_token_importance = combine_question_reasoning_and_answer_attention(
question_visual_token_importance,
reasoning_visual_token_importance,
answer_visual_token_importance,
args,
)
if args.large_model_prune_selection == "similarity_greedy":
consistency_score = torch.tensor(1.0, device=visual_token_importance.device)
else:
consistency_score = compute_consistency_score(
model,
answer_result["input_embeds"],
answer_result["flat_input_ids"],
answer_result["attention_mask"],
answer_result["outputs"]["sequences"][0],
visual_token_importance,
answer_result["visual_token_index"],
consistency_token_ratio,
args.large_model_prune_selection,
)
return (
answer_result["response"],
answer_result["outputs"].scores,
consistency_score,
visual_token_importance,
reasoning,
reasoning_filter_debug,
{
"question_attention_token_count": question_attention_token_count,
"answer_attention_token_count": answer_attention_token_count,
"reasoning_attention_token_count": reasoning_attention_token_count,
"guide_answer_generated_token_count": count_generated_tokens(answer_result["outputs"]),
"guide_reasoning_generated_token_count": count_generated_tokens(reasoning_result["outputs"]),
},
)
def generate_with_reasoning(
guide_model: InternVLChatModel,
guide_tokenizer,
decode_model: InternVLChatModel,
large_tokenizer,
projected_visual_tokens: torch.Tensor,
question: str,
generation_config: dict,
reasoning_generation_config: dict,
visual_token_importance: torch.Tensor,
args,
) -> Tuple[str, str]:
reasoning_question = append_instruction(question, EXPLICIT_REASONING_INSTRUCTION)
reasoning = run_decode_answer(
decode_model,
large_tokenizer,
projected_visual_tokens,
reasoning_question,
reasoning_generation_config,
visual_token_importance,
args,
)
final_question = append_instruction(
question,
f"Reasoning:\n{reasoning}\n{DEFAULT_FINAL_ANSWER_INSTRUCTION}",
)
answer = run_decode_answer(
decode_model,
large_tokenizer,
projected_visual_tokens,
final_question,
generation_config,
visual_token_importance,
args,
)
return answer, reasoning
def evaluate(args):
guide_checkpoint = resolve_hf_snapshot(args.guide_checkpoint)
large_checkpoint = resolve_hf_snapshot(args.large_checkpoint)
guide_tokenizer = AutoTokenizer.from_pretrained(guide_checkpoint, trust_remote_code=True, use_fast=False)
guide_config = configure_model(guide_checkpoint, use_flash_attn=args.use_flash_attn)
guide_model = load_model(
guide_checkpoint,
guide_config,
auto=args.auto,
load_in_8bit=args.load_in_8bit,
load_in_4bit=args.load_in_4bit,
)
decode_model, large_tokenizer = build_decode_model(
guide_model,
large_checkpoint,
use_flash_attn=args.use_flash_attn,
auto=args.auto,
load_in_8bit=args.load_in_8bit,
load_in_4bit=args.load_in_4bit,
)
guide_image_size = guide_model.config.force_image_size or guide_model.config.vision_config.image_size
large_image_size = decode_model.config.force_image_size or decode_model.config.vision_config.image_size
if guide_image_size != large_image_size:
raise ValueError(f"Guide and decode image size mismatch: {guide_image_size} vs {large_image_size}")
if guide_model.num_image_token != decode_model.num_image_token:
raise ValueError(
f"Guide and decode image token count mismatch: {guide_model.num_image_token} vs {decode_model.num_image_token}"
)
data_root = os.path.abspath(args.data_root)
textvqa_root = os.path.abspath(args.textvqa_root) if args.textvqa_root else os.path.join(data_root, "data", "textvqa")
dataset = TextVQADataset(
jsonl_path=os.path.join(textvqa_root, "textvqa_val.jsonl"),
data_root=data_root,
image_size=guide_image_size,
dynamic=args.dynamic,
use_thumbnail=guide_model.config.use_thumbnail,
max_num=args.max_num,
)
question_id_to_answers = load_annotations(os.path.join(textvqa_root, "textvqa_val_annotations.json"))
generation_config = make_generation_config(args)
guide_attention_generation_config = make_guide_attention_generation_config(generation_config, args)
guide_text_generation_config = None
if args.guide_text_mode != "none":
guide_text_generation_config = make_guide_text_generation_config(generation_config, args)
reasoning_generation_config = None
if args.reasoning_mode == "two_pass":
reasoning_generation_config = make_reasoning_generation_config(generation_config, args)
num_items = len(dataset) if args.limit is None else min(len(dataset), args.limit)
results = []
filter_debug_results = []
for idx in range(num_items):
sample = dataset[idx]
question = sample["question"] + " " + BASE_PROMPT
pixel_values = sample["pixel_values"]
guide_attention_question = build_guide_attention_question(question, args)
torch.cuda.synchronize()
start = time.time()
raw_visual_tokens = extract_shared_raw_visual_tokens(guide_model, pixel_values)
guide_visual_tokens = project_visual_tokens(guide_model, raw_visual_tokens)
guide_reasoning = None
guide_reasoning_filter_debug = {"backend": "none", "kept_tokens": [], "token_analysis": []}
guide_attention_token_counts = {
"question_attention_token_count": 0,
"answer_attention_token_count": 0,
"reasoning_attention_token_count": 0,
"guide_answer_generated_token_count": 0,
"guide_reasoning_generated_token_count": 0,
}
question_visual_token_importance = None
answer_visual_token_importance = None
if args.guide_reasoning_mode == "two_pass_explicit":
(
guide_answer,
guide_scores,
consistency_score,
visual_token_importance,
guide_reasoning,
guide_reasoning_filter_debug,
guide_attention_token_counts,
) = (
run_guide_two_pass_explicit_branch(
guide_model,
guide_tokenizer,
guide_visual_tokens,
question,
guide_attention_generation_config,
generation_config,
args.consistency_token_ratio,
args,
)
)
else:
(
guide_answer,
guide_scores,
consistency_score,
visual_token_importance,
question_visual_token_importance,
answer_visual_token_importance,
guide_attention_token_counts,
) = run_guide_branch(
guide_model,
guide_tokenizer,
guide_visual_tokens,
guide_attention_question,
guide_attention_generation_config,
args.consistency_token_ratio,
args,
)
guide_text_hint = None
if args.guide_text_mode != "none":
if guide_text_generation_config is None:
raise ValueError("guide_text_generation_config is required when guide_text_mode is enabled.")
guide_text_hint = normalize_generated_text(
run_text_generation_branch(
guide_model,
guide_tokenizer,
guide_visual_tokens,
build_guide_text_question(question),
guide_text_generation_config,
)
)
torch.cuda.synchronize()
end = time.time()
small_model_time = end - start
scores = torch.concatenate(guide_scores, dim=0)
scores, _ = scores.softmax(dim=-1).max(dim=-1)
original_confidence = math.pow(torch.prod(scores).item(), 1 / len(scores))
torch.cuda.synchronize()
start = time.time()
large_visual_tokens = project_visual_tokens(decode_model, raw_visual_tokens)
decode_question = build_decode_question(question, guide_text_hint)
reasoning = None
if args.reasoning_mode == "none":
large_answer = run_decode_answer(
decode_model,
large_tokenizer,
large_visual_tokens,
decode_question,
generation_config,
visual_token_importance,
args,
)
elif args.reasoning_mode == "prompt":
prompted_question = append_instruction(decode_question, HIDDEN_REASONING_INSTRUCTION)
large_answer = run_decode_answer(
decode_model,
large_tokenizer,
large_visual_tokens,
prompted_question,
generation_config,
visual_token_importance,
args,
)
else:
if reasoning_generation_config is None:
raise ValueError("reasoning_generation_config is required when reasoning_mode='two_pass'.")
large_answer, reasoning = generate_with_reasoning(
guide_model,
guide_tokenizer,
decode_model,
large_tokenizer,
large_visual_tokens,
decode_question,
generation_config,
reasoning_generation_config,
visual_token_importance,
args,
)
torch.cuda.synchronize()
end = time.time()
large_model_time = end - start
visual_token_count = visual_token_importance.shape[0]
kept_visual_token_count = max(1, int(visual_token_count * args.large_model_prune_ratio))
result_item = {
"question_id": sample["question_id"],
"question": sample["question"],
"answer": large_answer,
"pred_answer": large_answer,
"gt_answers": question_id_to_answers[sample["question_id"]],
"small_answer": guide_answer,
"guide_attention_output": guide_answer,
"large_answer": large_answer,
"small_model_time": small_model_time,
"large_model_time": large_model_time,
"original_confidence": original_confidence,
"consistency_score": consistency_score.item(),
"visual_token_count": visual_token_count,
"kept_visual_token_count": kept_visual_token_count,
"guide_attention_token_counts": guide_attention_token_counts,
}
if args.save_visual_token_importance:
result_item["visual_token_importance_stats"] = summarize_visual_token_importance(
visual_token_importance,
topk=args.visual_token_importance_topk,
)
if question_visual_token_importance is not None:
result_item["question_visual_token_importance_stats"] = summarize_visual_token_importance(
question_visual_token_importance,
topk=args.visual_token_importance_topk,
)
if answer_visual_token_importance is not None:
result_item["answer_visual_token_importance_stats"] = summarize_visual_token_importance(
answer_visual_token_importance,
topk=args.visual_token_importance_topk,
)
if guide_text_hint is not None:
result_item["guide_text_hint"] = guide_text_hint
if args.save_reasoning and guide_reasoning is not None:
result_item["guide_reasoning"] = guide_reasoning
if args.save_reasoning and reasoning is not None:
result_item["large_reasoning"] = reasoning
results.append(result_item)
filter_debug_results.append(
{
"question_id": sample["question_id"],
"question": sample["question"],
"small_answer": guide_answer,
"large_answer": large_answer,
"guide_reasoning": guide_reasoning,
"guide_reasoning_filter_mode": args.guide_reasoning_filter_mode,
"guide_reasoning_filter_backend": guide_reasoning_filter_debug.get("backend", "none"),
"kept_tokens": guide_reasoning_filter_debug.get("kept_tokens", []),
"token_analysis": guide_reasoning_filter_debug.get("token_analysis", []),
}
)
if (idx + 1) % args.log_every == 0 or idx + 1 == num_items:
status = (
f"[{idx + 1}/{num_items}] question_id={sample['question_id']} "
f"small={guide_answer} large={large_answer} kept={kept_visual_token_count}/{visual_token_count}"
)
if guide_text_hint is not None:
status += f" hint={guide_text_hint}"
print(status)
sys.stdout.flush()
evaluator = TextVQAAccuracyEvaluator()
accuracy = evaluator.eval_pred_list(results)
os.makedirs(args.out_dir, exist_ok=True)
run_name = args.run_name or "textvqa_shared_vision_2bguide_8btext"
result_path = os.path.join(args.out_dir, f"{run_name}.json")
summary_path = os.path.join(args.out_dir, f"{run_name}.summary.json")
filter_debug_path = os.path.join(args.out_dir, f"{run_name}.filter_debug.json")
with open(result_path, "w") as f:
json.dump(results, f, ensure_ascii=False, indent=2)
with open(filter_debug_path, "w") as f:
json.dump(filter_debug_results, f, ensure_ascii=False, indent=2)
token_count_keys = [
"question_attention_token_count",
"answer_attention_token_count",
"reasoning_attention_token_count",
"guide_answer_generated_token_count",
"guide_reasoning_generated_token_count",
]
avg_guide_attention_token_counts = {
key: (
sum(item.get("guide_attention_token_counts", {}).get(key, 0) for item in results)
/ max(len(results), 1)
)
for key in token_count_keys
}
summary = {
"mode": "shared_vision_guided",
"guide_checkpoint": guide_checkpoint,
"large_checkpoint": large_checkpoint,
"count": num_items,
"accuracy": accuracy,
"large_model_prune_layer": args.large_model_prune_layer,
"large_model_prune_ratio": args.large_model_prune_ratio,
"large_model_prune_selection": args.large_model_prune_selection,
"consistency_token_ratio": args.consistency_token_ratio,
"guide_reasoning_mode": args.guide_reasoning_mode,
"guide_reasoning_max_new_tokens": args.guide_reasoning_max_new_tokens,
"guide_reasoning_filter_mode": args.guide_reasoning_filter_mode,
"guide_attention_aggregation_mode": args.guide_attention_aggregation_mode,
"guide_attention_source": resolve_guide_attention_source(args),
"guide_reasoning_attention_weight": args.guide_reasoning_attention_weight,
"guide_answer_attention_weight": args.guide_answer_attention_weight,
"guide_question_attention_weight": args.guide_question_attention_weight,
"guide_text_mode": args.guide_text_mode,
"guide_text_max_new_tokens": args.guide_text_max_new_tokens,
"avg_guide_attention_token_counts": avg_guide_attention_token_counts,
"avg_small_model_time": sum(item["small_model_time"] for item in results) / max(len(results), 1),
"avg_large_model_time": sum(item["large_model_time"] for item in results) / max(len(results), 1),
"results_file": result_path,
"filter_debug_file": filter_debug_path,
}
with open(summary_path, "w") as f:
json.dump(summary, f, ensure_ascii=False, indent=2)
print(f"accuracy: {accuracy:.6f}")
print(f"results_file: {result_path}")
print(f"summary_file: {summary_path}")
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--guide-checkpoint", type=str, required=True)
parser.add_argument("--large-checkpoint", type=str, required=True)
parser.add_argument("--data-root", type=str, default=str(REPO_ROOT))
parser.add_argument("--textvqa-root", type=str, default="")
parser.add_argument("--out-dir", type=str, default=str(REPO_ROOT / "outputs" / "shared_vision_guided"))
parser.add_argument("--run-name", type=str, default="")
parser.add_argument("--limit", type=int, default=None)
parser.add_argument("--max-new-tokens", type=int, default=10)
parser.add_argument("--num-beams", type=int, default=1)
parser.add_argument("--temperature", type=float, default=0.0)
parser.add_argument("--reasoning-mode", type=str, choices=["none", "prompt", "two_pass"], default="none")
parser.add_argument("--reasoning-max-new-tokens", type=int, default=64)
parser.add_argument("--reasoning-temperature", type=float, default=0.0)
parser.add_argument("--save-reasoning", action="store_true")
parser.add_argument(
"--guide-reasoning-mode",
type=str,
choices=["none", "short_cot", "explicit_cot", "two_pass_explicit"],
default="none",
)
parser.add_argument("--guide-reasoning-max-new-tokens", type=int, default=1024)
parser.add_argument("--guide-reasoning-temperature", type=float, default=0.0)
parser.add_argument(
"--guide-reasoning-filter-mode",
type=str,
choices=["none", "pos_ner"],
default="none",
)
parser.add_argument(
"--guide-attention-source",
type=str,
choices=["default", "reasoning", "answer", "combined"],
default="default",
)
parser.add_argument(
"--guide-attention-aggregation-mode",
type=str,
choices=["raw", "normalized"],
default="raw",
)
parser.add_argument("--guide-question-attention-weight", type=float, default=1.0)
parser.add_argument("--guide-reasoning-attention-weight", type=float, default=1.0)
parser.add_argument("--guide-answer-attention-weight", type=float, default=1.0)
parser.add_argument("--guide-text-mode", type=str, choices=["none", "short_rationale"], default="none")
parser.add_argument("--guide-text-max-new-tokens", type=int, default=12)
parser.add_argument("--guide-text-temperature", type=float, default=0.0)
parser.add_argument("--save-visual-token-importance", action="store_true")
parser.add_argument("--visual-token-importance-topk", type=int, default=16)
parser.add_argument("--dynamic", action="store_true")
parser.add_argument("--max-num", type=int, default=6)
parser.add_argument("--log-every", type=int, default=20)
parser.add_argument("--seed", type=int, default=0)
parser.add_argument("--large-model-prune-layer", type=float, default=0.0)
parser.add_argument("--large-model-prune-ratio", type=float, default=0.4)
parser.add_argument(
"--large-model-prune-selection",
type=str,
choices=["topk", "random", "similarity_greedy"],
default="topk",
)
parser.add_argument("--consistency-token-ratio", type=float, default=0.05)
parser.add_argument("--auto", action="store_true")
parser.add_argument("--load-in-8bit", action="store_true")
parser.add_argument("--load-in-4bit", action="store_true")
parser.add_argument("--use-flash-attn", action="store_true")
args = parser.parse_args()
if not torch.cuda.is_available():
raise RuntimeError("CUDA is required for shared-vision guided evaluation.")
if args.large_model_prune_ratio <= 0 or args.large_model_prune_ratio > 1:
raise ValueError("large-model-prune-ratio must be in (0, 1].")
if args.consistency_token_ratio <= 0 or args.consistency_token_ratio > 1:
raise ValueError("consistency-token-ratio must be in (0, 1].")
if args.guide_reasoning_attention_weight < 0 or args.guide_answer_attention_weight < 0:
raise ValueError("guide reasoning/answer attention weights must be >= 0.")
if args.guide_question_attention_weight < 0:
raise ValueError("guide question attention weight must be >= 0.")
if args.guide_reasoning_mode == "two_pass_explicit":
attention_source = resolve_guide_attention_source(args)
if attention_source == "reasoning" and args.guide_reasoning_attention_weight == 0:
raise ValueError("guide_reasoning_attention_weight must be > 0 when guide-attention-source=reasoning.")
if (
attention_source == "answer"
and args.guide_question_attention_weight == 0
and args.guide_answer_attention_weight == 0
):
raise ValueError(
"At least one of guide_question_attention_weight or guide_answer_attention_weight "
"must be > 0 when guide-attention-source=answer."
)
if (
attention_source == "combined"
and args.guide_question_attention_weight == 0
and args.guide_reasoning_attention_weight == 0
and args.guide_answer_attention_weight == 0
):
raise ValueError("At least one guide attention weight must be > 0 for two_pass_explicit.")
if (
args.guide_reasoning_mode != "two_pass_explicit"
and args.guide_question_attention_weight == 0
and args.guide_answer_attention_weight == 0
):
raise ValueError("At least one guide question/answer attention weight must be > 0.")
random.seed(args.seed)
torch.manual_seed(args.seed)
evaluate(args)
if __name__ == "__main__":
main()