diff --git a/eval/vqa/run_full_textvqa_native.sh b/eval/vqa/run_full_textvqa_native.sh new file mode 100644 index 0000000000000000000000000000000000000000..bcde85b58d3a130d228f7fc56a6caa256f01d188 --- /dev/null +++ b/eval/vqa/run_full_textvqa_native.sh @@ -0,0 +1,34 @@ +#!/usr/bin/env bash +set -euo pipefail + +if [[ $# -ne 1 ]]; then + echo "Usage: $0 RUN_ROOT" >&2 + exit 1 +fi + +RUN_ROOT="$1" +mkdir -p "$RUN_ROOT" + +SCRIPT_DIR="$(cd -- "$(dirname -- "${BASH_SOURCE[0]}")" && pwd)" +REPO_ROOT="$(cd -- "${SCRIPT_DIR}/../.." && pwd)" + +export CUDA_VISIBLE_DEVICES=0 +export PYTHONPATH="${REPO_ROOT}:${PYTHONPATH:-}" + +PY=${PYTHON_BIN:-python} +SCRIPT="${REPO_ROOT}/eval/vqa/run_single_model_native.py" +CHECKPOINT_ROOT=${CHECKPOINT_ROOT:-"${REPO_ROOT}/checkpoints"} + +"$PY" "$SCRIPT" \ + --checkpoint "${CHECKPOINT_ROOT}/models--OpenGVLab--InternVL2-2B" \ + --mode textvqa_eval \ + --dataset textvqa_val \ + --run-name native_textvqa_2b_full \ + --out-dir "$RUN_ROOT" | tee "$RUN_ROOT/native_textvqa_2b_full.log" + +"$PY" "$SCRIPT" \ + --checkpoint "${CHECKPOINT_ROOT}/models--OpenGVLab--InternVL2-8B" \ + --mode textvqa_eval \ + --dataset textvqa_val \ + --run-name native_textvqa_8b_full \ + --out-dir "$RUN_ROOT" | tee "$RUN_ROOT/native_textvqa_8b_full.log" diff --git a/eval/vqa/run_shared_vision_guided_textvqa.py b/eval/vqa/run_shared_vision_guided_textvqa.py new file mode 100644 index 0000000000000000000000000000000000000000..e39d79370a76c3ee8d36b639a0b7e1995c63e5c6 --- /dev/null +++ b/eval/vqa/run_shared_vision_guided_textvqa.py @@ -0,0 +1,1751 @@ +import argparse +import inspect +import json +import math +import os +import random +import re +import sys +import time +from functools import wraps +from pathlib import Path +from typing import Dict, List, Optional, Tuple + +import torch +from PIL import Image +from transformers import AutoTokenizer +from transformers.generation.logits_process import LogitsProcessorList + + +REPO_ROOT = Path(__file__).resolve().parents[2] +DEFAULT_UPSTREAM_SGL_ROOT = Path(os.environ.get("UPSTREAM_SGL_ROOT", "/home/yf/snap/SGL")) +if str(DEFAULT_UPSTREAM_SGL_ROOT) not in sys.path: + sys.path.insert(0, str(DEFAULT_UPSTREAM_SGL_ROOT)) +eval_vqa_path = DEFAULT_UPSTREAM_SGL_ROOT / "eval" / "vqa" +if str(eval_vqa_path) not in sys.path: + sys.path.insert(0, str(eval_vqa_path)) + +from internvl.conversation import get_conv_template +from internvl.model.internvl_chat import InternVLChatModel +from internvl.model.internvl_chat.configuration_internvl_chat import InternVLChatConfig +from internvl.train.dataset import build_transform, dynamic_preprocess +from textvqa_eval import TextVQAAccuracyEvaluator + + +BASE_PROMPT = "Answer the question using a single word or phrase." +BASE_PROMPT_SUFFIX = " " + BASE_PROMPT +HIDDEN_REASONING_INSTRUCTION = ( + "Think through the relevant visual evidence and any text in the image step by step internally before answering." +) +EXPLICIT_REASONING_INSTRUCTION = ( + "Explain your reasoning step by step using the relevant visual evidence and any text in the image." +) +DEFAULT_FINAL_ANSWER_INSTRUCTION = "Provide the final answer only." +GUIDE_ATTENTION_COT_PROMPT_TEMPLATE = """You are solving a TextVQA task. +Read the image carefully, especially visible text. +Reason through the answer in at least 5 explicit steps. +Do not skip the reasoning. +Question: {question} +1. +2. +3. +4. +5. +Final answer:""" +GUIDE_ATTENTION_REASONING_ONLY_PROMPT_TEMPLATE = """You are solving a TextVQA task. + +Read the image carefully, especially all visible text. +Reason using only evidence from the image and OCR text. +You must output exactly 5 numbered reasoning steps. +Each step must be a short sentence. +Do not provide the final answer. +Do not provide a summary. +Do not output any text other than the 5 numbered steps. + +Question: {question} + +1. Identify the most relevant visible text or object. +2. Explain how that evidence relates to the question. +3. Check for another supporting clue in the image. +4. Resolve any ambiguity using the strongest evidence. +5. State the final reasoning conclusion without giving the final answer.""" +GUIDE_ATTENTION_EXPLICIT_COT_INSTRUCTION = ( + "First reason step by step using the relevant visual evidence and OCR text. " + "Then end with a new line in the exact format: Answer: ." +) +GUIDE_TEXT_HINT_INSTRUCTION = ( + "Give a very short guide hint grounded in the image and OCR text. Use a short phrase, not a full sentence." +) +GUIDED_DECODE_INSTRUCTION = ( + "Use the guide hint only if it matches the image. Answer the question using a single word or phrase." +) + +REASONING_FILTER_STOPWORDS = { + "a", "an", "and", "are", "as", "at", "be", "because", "but", "by", "for", "from", "has", + "have", "if", "in", "into", "is", "it", "its", "of", "on", "or", "that", "the", "their", + "there", "this", "those", "to", "was", "were", "with", +} +REASONING_FILTER_TEMPLATE_WORDS = { + "answer", "conclusion", "directly", "evidence", "final", "identify", "indicating", + "question", "reason", "reasoning", "relates", "relevant", "resolve", "shows", "state", + "strongest", "supporting", "supports", "using", "visible", +} +REASONING_FILTER_POSITION_WORDS = { + "left", "right", "top", "bottom", "middle", "center", "centre", "upper", "lower", +} +REASONING_FILTER_COLOR_WORDS = { + "black", "blue", "brown", "gold", "gray", "green", "grey", "orange", "pink", + "purple", "red", "silver", "white", "yellow", +} +REASONING_FILTER_KEEP_POS = {"NOUN", "PROPN", "ADJ"} +SPACY_REASONING_NLP = None +SPACY_REASONING_LOAD_ATTEMPTED = False +SPACY_REASONING_FALLBACK_WARNED = False + + +def resolve_hf_snapshot(path: str) -> str: + path = os.path.abspath(path) + config_path = os.path.join(path, "config.json") + if os.path.isfile(config_path): + return path + + refs_main = os.path.join(path, "refs", "main") + if os.path.isfile(refs_main): + with open(refs_main) as f: + revision = f.read().strip() + snapshot_path = os.path.join(path, "snapshots", revision) + if os.path.isfile(os.path.join(snapshot_path, "config.json")): + return snapshot_path + + raise FileNotFoundError(f"Could not resolve checkpoint snapshot from: {path}") + + +def configure_model(checkpoint_path: str, use_flash_attn: bool) -> InternVLChatConfig: + checkpoint_path = resolve_hf_snapshot(checkpoint_path) + config = InternVLChatConfig.from_json_file(os.path.join(checkpoint_path, "config.json")) + llm_arch = config.llm_config.architectures[0] + if llm_arch == "InternLM2ForCausalLM": + config.llm_config.attn_implementation = "eager" + else: + config.llm_config._attn_implementation = "eager" + config.vision_config.use_flash_attn = use_flash_attn + return config + + +def patch_internlm2_sample_signature(model: InternVLChatModel) -> None: + language_model_cls = model.language_model.__class__ + sample_fn = getattr(language_model_cls, "_sample", None) + if sample_fn is None or getattr(sample_fn, "_sgl_logits_warper_compat", False): + return + + signature = inspect.signature(sample_fn) + logits_warper_param = signature.parameters.get("logits_warper") + if logits_warper_param is None or logits_warper_param.default is not inspect._empty: + return + + @wraps(sample_fn) + def compat_sample( + self, + input_ids: torch.LongTensor, + logits_processor, + stopping_criteria, + generation_config, + synced_gpus: bool, + streamer=None, + logits_warper=None, + **model_kwargs, + ): + # transformers>=4.49 folds samplers into logits_processor and no longer + # passes logits_warper to custom _sample overrides. + if logits_warper is None: + logits_warper = LogitsProcessorList() + return sample_fn( + self, + input_ids=input_ids, + logits_processor=logits_processor, + stopping_criteria=stopping_criteria, + generation_config=generation_config, + synced_gpus=synced_gpus, + streamer=streamer, + logits_warper=logits_warper, + **model_kwargs, + ) + + compat_sample._sgl_logits_warper_compat = True + language_model_cls._sample = compat_sample + + +def load_model( + checkpoint_path: str, + config: InternVLChatConfig, + auto: bool, + load_in_8bit: bool, + load_in_4bit: bool, +) -> InternVLChatModel: + checkpoint_path = resolve_hf_snapshot(checkpoint_path) + kwargs = {"device_map": "auto"} if auto else {} + model = InternVLChatModel.from_pretrained( + checkpoint_path, + config=config, + low_cpu_mem_usage=True, + torch_dtype=torch.bfloat16, + load_in_8bit=load_in_8bit, + load_in_4bit=load_in_4bit, + **kwargs, + ).eval() + if not auto and not load_in_8bit and not load_in_4bit: + model = model.cuda() + patch_internlm2_sample_signature(model) + return model + + +def build_decode_model( + guide_model: InternVLChatModel, + large_checkpoint: str, + use_flash_attn: bool, + auto: bool, + load_in_8bit: bool, + load_in_4bit: bool, +) -> Tuple[InternVLChatModel, AutoTokenizer]: + large_checkpoint = resolve_hf_snapshot(large_checkpoint) + large_config = configure_model(large_checkpoint, use_flash_attn=use_flash_attn) + large_source = load_model( + large_checkpoint, + large_config, + auto=auto, + load_in_8bit=load_in_8bit, + load_in_4bit=load_in_4bit, + ) + + decode_model = InternVLChatModel( + large_config, + vision_model=guide_model.vision_model, + language_model=large_source.language_model, + ) + decode_model.config.vision_config = guide_model.config.vision_config + decode_model.vision_model.config = guide_model.config.vision_config + decode_model.mlp1 = large_source.mlp1 + decode_model.template = large_source.template + decode_model.system_message = large_source.system_message + decode_model.num_image_token = large_source.num_image_token + decode_model.ps_version = guide_model.ps_version + decode_model.select_layer = guide_model.select_layer + decode_model.downsample_ratio = guide_model.downsample_ratio + decode_model.img_context_token_id = large_source.img_context_token_id + decode_model.eval() + patch_internlm2_sample_signature(decode_model) + + large_tokenizer = AutoTokenizer.from_pretrained( + large_checkpoint, + trust_remote_code=True, + use_fast=False, + ) + return decode_model, large_tokenizer + + +def model_text_device(model: InternVLChatModel) -> torch.device: + return next(model.language_model.get_input_embeddings().parameters()).device + + +def model_vision_device(model: InternVLChatModel) -> torch.device: + return next(model.vision_model.parameters()).device + + +def resolve_image_path(image_path: str, data_root: str, jsonl_dir: str) -> str: + candidates = [] + if os.path.isabs(image_path): + candidates.append(image_path) + candidates.append(os.path.join(data_root, image_path)) + if image_path.startswith("data/"): + candidates.append(os.path.join(data_root, image_path[len("data/"):])) + candidates.append(os.path.join(jsonl_dir, image_path)) + candidates.append(os.path.join(jsonl_dir, os.path.basename(image_path))) + + for candidate in candidates: + if os.path.exists(candidate): + return candidate + raise FileNotFoundError(f"Could not resolve image path: {image_path}") + + +class TextVQADataset: + def __init__(self, jsonl_path: str, data_root: str, image_size: int, dynamic: bool, use_thumbnail: bool, max_num: int): + with open(jsonl_path) as f: + self.items = [json.loads(line) for line in f if line.strip()] + self.jsonl_dir = os.path.dirname(jsonl_path) + self.data_root = data_root + self.image_size = image_size + self.dynamic = dynamic + self.use_thumbnail = use_thumbnail + self.max_num = max_num + self.transform = build_transform(is_train=False, input_size=image_size) + + def __len__(self) -> int: + return len(self.items) + + def __getitem__(self, idx: int) -> Dict[str, object]: + item = self.items[idx] + image_path = resolve_image_path(item["image"], self.data_root, self.jsonl_dir) + image = Image.open(image_path).convert("RGB") + if self.dynamic: + images = dynamic_preprocess( + image, + image_size=self.image_size, + use_thumbnail=self.use_thumbnail, + max_num=self.max_num, + ) + else: + images = [image] + pixel_values = torch.stack([self.transform(img) for img in images]) + return { + "question_id": item["question_id"], + "question": item["question"], + "pixel_values": pixel_values, + "annotation": item.get("answer", ""), + } + + +def load_annotations(annotation_file: str) -> Dict[int, List[str]]: + with open(annotation_file) as f: + annotations = json.load(f)["annotations"] + return { + item["question_id"]: [answer["answer"] for answer in item["answers"]] + for item in annotations + } + + +def build_query(model: InternVLChatModel, tokenizer, question: str, num_patches: int): + img_context_token = "" + img_start_token = "" + img_end_token = "" + + if "" not in question: + question = "\n" + question + + model.img_context_token_id = tokenizer.convert_tokens_to_ids(img_context_token) + + template = get_conv_template(model.template) + template.system_message = model.system_message + template.append_message(template.roles[0], question) + template.append_message(template.roles[1], None) + query = template.get_prompt() + + image_tokens = img_start_token + img_context_token * model.num_image_token * num_patches + img_end_token + query = query.replace("", image_tokens, 1) + return query, template + + +@torch.inference_mode() +def extract_shared_raw_visual_tokens(model: InternVLChatModel, pixel_values: torch.Tensor) -> torch.Tensor: + vision_device = model_vision_device(model) + pixel_values = pixel_values.to(device=vision_device, dtype=torch.bfloat16) + if model.select_layer == -1: + vit_embeds = model.vision_model( + pixel_values=pixel_values, + output_hidden_states=False, + return_dict=True, + ).last_hidden_state + else: + vit_embeds = model.vision_model( + pixel_values=pixel_values, + output_hidden_states=True, + return_dict=True, + ).hidden_states[model.select_layer] + vit_embeds = vit_embeds[:, 1:, :] + h = w = int(vit_embeds.shape[1] ** 0.5) + vit_embeds = vit_embeds.reshape(vit_embeds.shape[0], h, w, -1) + vit_embeds = model.pixel_shuffle(vit_embeds, scale_factor=model.downsample_ratio) + return vit_embeds.reshape(vit_embeds.shape[0], -1, vit_embeds.shape[-1]) + + +@torch.inference_mode() +def project_visual_tokens(model: InternVLChatModel, raw_visual_tokens: torch.Tensor) -> torch.Tensor: + mlp_device = next(model.mlp1.parameters()).device + raw_visual_tokens = raw_visual_tokens.to(device=mlp_device, dtype=torch.bfloat16) + return model.mlp1(raw_visual_tokens) + + +@torch.inference_mode() +def build_input_embeds_from_visual_features( + model: InternVLChatModel, + input_ids: torch.Tensor, + visual_features: torch.Tensor, +) -> Tuple[torch.Tensor, torch.Tensor]: + input_embeds = model.language_model.get_input_embeddings()(input_ids) + batch_size, seq_len, hidden_size = input_embeds.shape + flat_input_embeds = input_embeds.reshape(batch_size * seq_len, hidden_size) + flat_input_ids = input_ids.reshape(batch_size * seq_len) + selected = flat_input_ids == model.img_context_token_id + if selected.sum().item() == 0: + raise ValueError("No image context tokens found in input_ids.") + flat_input_embeds[selected] = visual_features.reshape(-1, hidden_size).to(flat_input_embeds.device) + return flat_input_embeds.reshape(batch_size, seq_len, hidden_size), flat_input_ids + + +@torch.inference_mode() +def run_guide_generation( + model: InternVLChatModel, + tokenizer, + projected_visual_tokens: torch.Tensor, + question: str, + generation_config: dict, +) -> Dict[str, object]: + query, template = build_query(model, tokenizer, question, projected_visual_tokens.shape[0]) + model_inputs = tokenizer(query, return_tensors="pt") + input_device = model_text_device(model) + input_ids = model_inputs["input_ids"].to(input_device) + attention_mask = model_inputs["attention_mask"].to(input_device) + eos_token_id = tokenizer.convert_tokens_to_ids(template.sep) + input_embeds, flat_input_ids = build_input_embeds_from_visual_features(model, input_ids, projected_visual_tokens) + + visual_token_index = (input_ids == model.img_context_token_id).view(-1).nonzero() + visual_start_index, visual_end_index = visual_token_index[0], visual_token_index[-1] + + run_config = dict(generation_config) + run_config["eos_token_id"] = eos_token_id + + outputs = model.language_model.generate( + inputs_embeds=input_embeds, + attention_mask=attention_mask, + generation_config=None, + output_hidden_states=None, + return_dict=None, + use_cache=True, + visual_token_index=(visual_start_index, visual_end_index), + **run_config, + ) + response = tokenizer.batch_decode(outputs["sequences"], skip_special_tokens=True)[0] + response = response.split(template.sep)[0].strip() + return { + "response": response, + "outputs": outputs, + "input_embeds": input_embeds, + "flat_input_ids": flat_input_ids, + "attention_mask": attention_mask, + "visual_token_index": (visual_start_index, visual_end_index), + } + + +def aggregate_attention_from_step(attentions, visual_token_index: Tuple[int, int]) -> torch.Tensor: + visual_start_index, visual_end_index = visual_token_index + visual_token_num = visual_end_index - visual_start_index + 1 + visual_token_importance = None + + for attention in attentions: + if attention is None: + continue + if visual_token_importance is None: + visual_token_importance = torch.zeros( + visual_token_num, + device=attention.device, + dtype=torch.float32, + ) + + merged_attention = attention[0].sum(dim=0) + if attention.shape[2] != 1: + visual_token_importance += merged_attention[ + visual_end_index + 1 :, + visual_start_index : visual_end_index + 1, + ].sum(dim=0) + else: + visual_token_importance += merged_attention[ + 0:1, + visual_start_index : visual_end_index + 1, + ].sum(dim=0) + + if visual_token_importance is None: + raise RuntimeError("Guide model did not return layer attentions for the current decoding step.") + return visual_token_importance + + +def count_attention_query_tokens_from_step(attentions, visual_token_index: Tuple[int, int]) -> int: + _, visual_end_index = visual_token_index + for attention in attentions: + if attention is None: + continue + query_length = int(attention.shape[2]) + if query_length != 1: + return max(query_length - int(visual_end_index) - 1, 0) + return 1 + return 0 + + +def count_generated_tokens(outputs) -> int: + sequences = getattr(outputs, "sequences", None) + if sequences is None and isinstance(outputs, dict): + sequences = outputs.get("sequences") + if sequences is None: + return 0 + if sequences.ndim == 0: + return 0 + return int(sequences.shape[-1]) + + +def count_attention_query_tokens_from_generation_outputs( + outputs, + visual_token_index: Tuple[int, int], + step_mask: Optional[List[bool]] = None, +) -> int: + attentions = getattr(outputs, "attentions", None) + if not attentions: + return 0 + + token_count = 0 + for step_idx, step_attentions in enumerate(attentions): + if step_mask is not None and (step_idx >= len(step_mask) or not step_mask[step_idx]): + continue + token_count += count_attention_query_tokens_from_step(step_attentions, visual_token_index) + if token_count == 0 and step_mask is not None: + return count_attention_query_tokens_from_generation_outputs(outputs, visual_token_index, step_mask=None) + return token_count + + +def count_question_and_answer_attention_query_tokens( + outputs, + visual_token_index: Tuple[int, int], +) -> Tuple[int, int]: + attentions = getattr(outputs, "attentions", None) + if not attentions: + return 0, 0 + + question_token_count = 0 + answer_token_count = 0 + for step_idx, step_attentions in enumerate(attentions): + step_token_count = count_attention_query_tokens_from_step(step_attentions, visual_token_index) + if step_idx == 0: + question_token_count += step_token_count + else: + answer_token_count += step_token_count + return question_token_count, answer_token_count + + +def get_reasoning_spacy_nlp(): + global SPACY_REASONING_NLP, SPACY_REASONING_LOAD_ATTEMPTED + if SPACY_REASONING_LOAD_ATTEMPTED: + return SPACY_REASONING_NLP + + SPACY_REASONING_LOAD_ATTEMPTED = True + try: + import spacy + + SPACY_REASONING_NLP = spacy.load("en_core_web_sm", disable=["parser", "lemmatizer"]) + except Exception: + SPACY_REASONING_NLP = None + return SPACY_REASONING_NLP + + +def should_keep_reasoning_heuristic_token(token_text: str) -> bool: + stripped = token_text.strip() + if not stripped: + return False + + lowered = stripped.lower() + if re.fullmatch(r"\d+[.)]?", stripped): + return False + if lowered in REASONING_FILTER_STOPWORDS or lowered in REASONING_FILTER_TEMPLATE_WORDS: + return False + if lowered in REASONING_FILTER_POSITION_WORDS or lowered in REASONING_FILTER_COLOR_WORDS: + return True + if any(ch.isdigit() for ch in stripped): + return True + if any(ch.isupper() for ch in stripped): + return True + if any(ch in ".:/-@&" for ch in stripped): + return True + alpha_count = sum(ch.isalpha() for ch in stripped) + return alpha_count >= 4 + + +def should_keep_reasoning_doc_token(token) -> bool: + stripped = token.text.strip() + if not stripped: + return False + + lowered = stripped.lower() + if token.is_punct or token.is_space: + return False + if lowered in REASONING_FILTER_STOPWORDS or lowered in REASONING_FILTER_TEMPLATE_WORDS: + return False + if token.pos_ in REASONING_FILTER_KEEP_POS: + return True + return False + + +def build_generated_token_spans(tokenizer, generated_ids: torch.Tensor) -> Tuple[str, List[Tuple[int, int]]]: + decoded_text = "" + token_spans: List[Tuple[int, int]] = [] + for token_id in generated_ids.detach().cpu().tolist(): + piece = tokenizer.decode([int(token_id)], skip_special_tokens=True, clean_up_tokenization_spaces=False) + start = len(decoded_text) + decoded_text += piece + token_spans.append((start, len(decoded_text))) + return decoded_text, token_spans + + +def analyze_reasoning_filter(text: str, args) -> Tuple[List[Tuple[int, int]], str, List[Dict[str, object]]]: + if args.guide_reasoning_filter_mode == "none": + return [], "none", [] + + if args.guide_reasoning_filter_mode == "pos_ner": + nlp = get_reasoning_spacy_nlp() + if nlp is not None: + doc = nlp(text) + token_analysis = [] + intervals = [ + (token.idx, token.idx + len(token)) + for token in doc + if should_keep_reasoning_doc_token(token) + ] + for token in doc: + token_analysis.append( + { + "text": token.text, + "lemma": token.lemma_, + "pos": token.pos_, + "tag": token.tag_, + "dep": token.dep_, + "ent_type": token.ent_type_, + "like_num": bool(getattr(token, "like_num", False)), + "like_url": bool(getattr(token, "like_url", False)), + "is_stop": bool(token.is_stop), + "keep": should_keep_reasoning_doc_token(token), + } + ) + return intervals, "spacy_pos_ner", token_analysis + + token_analysis = [] + intervals = [ + (match.start(), match.end()) + for match in re.finditer(r"\S+", text) + if should_keep_reasoning_heuristic_token(match.group(0)) + ] + for match in re.finditer(r"\S+", text): + token_text = match.group(0) + token_analysis.append( + { + "text": token_text, + "lemma": token_text.lower(), + "pos": "", + "tag": "", + "dep": "", + "ent_type": "", + "like_num": any(ch.isdigit() for ch in token_text), + "like_url": "http" in token_text.lower() or "www." in token_text.lower(), + "is_stop": token_text.lower() in REASONING_FILTER_STOPWORDS, + "keep": should_keep_reasoning_heuristic_token(token_text), + } + ) + return intervals, "heuristic_fallback", token_analysis + + +def build_reasoning_attention_step_mask_and_debug(tokenizer, outputs, args) -> Tuple[Optional[List[bool]], Dict[str, object]]: + if args.guide_reasoning_filter_mode == "none": + return None, {"backend": "none", "kept_tokens": [], "token_analysis": []} + + sequences = outputs["sequences"][0] + decoded_text, token_spans = build_generated_token_spans(tokenizer, sequences) + intervals, backend, token_analysis = analyze_reasoning_filter(decoded_text, args) + + global SPACY_REASONING_FALLBACK_WARNED + if backend == "heuristic_fallback" and not SPACY_REASONING_FALLBACK_WARNED: + print("Warning: spaCy POS/NER model unavailable; guide reasoning filter is using heuristic fallback.") + SPACY_REASONING_FALLBACK_WARNED = True + + debug_info = { + "backend": backend, + "token_analysis": token_analysis, + "kept_tokens": [token["text"] for token in token_analysis if token.get("keep")], + } + if not intervals: + return None, debug_info + + step_mask = [] + for start, end in token_spans: + if start == end: + step_mask.append(False) + continue + keep = any(start < interval_end and end > interval_start for interval_start, interval_end in intervals) + step_mask.append(keep) + + debug_info["step_mask"] = step_mask + if not any(step_mask): + return None, debug_info + return step_mask, debug_info + + +def aggregate_attention_from_generation_outputs( + outputs, + visual_token_index: Tuple[int, int], + step_mask: Optional[List[bool]] = None, +) -> torch.Tensor: + aggregated = getattr(outputs, "aggregated_viusal_token_attention", None) + if aggregated is not None and step_mask is None: + return aggregated.detach().float() + + attentions = getattr(outputs, "attentions", None) + if not attentions: + raise RuntimeError("Guide generation did not return attentions; enable output_attentions.") + + visual_token_importance = None + for step_idx, step_attentions in enumerate(attentions): + if step_mask is not None and (step_idx >= len(step_mask) or not step_mask[step_idx]): + continue + step_importance = aggregate_attention_from_step(step_attentions, visual_token_index) + if visual_token_importance is None: + visual_token_importance = step_importance + else: + visual_token_importance = visual_token_importance + step_importance + + if visual_token_importance is None: + if step_mask is not None: + return aggregate_attention_from_generation_outputs(outputs, visual_token_index, step_mask=None) + raise RuntimeError("Guide generation returned no attention steps.") + return visual_token_importance + + +def aggregate_question_and_answer_attention_from_generation_outputs( + outputs, + visual_token_index: Tuple[int, int], +) -> Tuple[torch.Tensor, torch.Tensor]: + attentions = getattr(outputs, "attentions", None) + if not attentions: + raise RuntimeError("Guide generation did not return attentions; enable output_attentions.") + + question_visual_token_importance = None + answer_visual_token_importance = None + for step_idx, step_attentions in enumerate(attentions): + step_importance = aggregate_attention_from_step(step_attentions, visual_token_index) + if step_idx == 0: + if question_visual_token_importance is None: + question_visual_token_importance = step_importance + else: + question_visual_token_importance = question_visual_token_importance + step_importance + else: + if answer_visual_token_importance is None: + answer_visual_token_importance = step_importance + else: + answer_visual_token_importance = answer_visual_token_importance + step_importance + + if question_visual_token_importance is None and answer_visual_token_importance is None: + raise RuntimeError("Guide generation returned no attention steps.") + if question_visual_token_importance is None: + question_visual_token_importance = torch.zeros_like(answer_visual_token_importance) + if answer_visual_token_importance is None: + answer_visual_token_importance = torch.zeros_like(question_visual_token_importance) + return question_visual_token_importance, answer_visual_token_importance + + +@torch.inference_mode() +def compute_consistency_score( + model: InternVLChatModel, + input_embeds: torch.Tensor, + flat_input_ids: torch.Tensor, + attention_mask: torch.Tensor, + generated_ids: torch.Tensor, + visual_token_importance: torch.Tensor, + visual_token_index: Tuple[int, int], + consistency_token_ratio: float, + large_model_prune_selection: str, +) -> torch.Tensor: + visual_start_index, visual_end_index = visual_token_index + new_input_ids_ = generated_ids + new_token_num = new_input_ids_.shape[-1] + new_input_embedding = torch.concatenate( + (input_embeds, model.language_model.get_input_embeddings()(new_input_ids_).unsqueeze(0)), + dim=1, + ) + new_attention_mask = torch.concatenate( + ( + attention_mask, + torch.ones((1, new_input_ids_.shape[0]), device=attention_mask.device, dtype=attention_mask.dtype), + ), + dim=-1, + ) + new_input_ids = torch.concatenate((flat_input_ids, new_input_ids_), dim=-1) + consistency_generate_kwargs = { + "large_model_prune_layer": 0.0, + "large_model_prune_ratio": consistency_token_ratio, + "large_model_prune_selection": large_model_prune_selection, + "visual_token_index": (visual_start_index, visual_end_index), + "visual_token_importance": visual_token_importance, + "inputs_embeds": new_input_embedding, + "attention_mask": new_attention_mask, + "output_scores": False, + "output_attentions": False, + "return_dict_in_generate": False, + "use_cache": True, + } + consistency_generate_kwargs["inputs_embeds"] = new_input_embedding + consistency_generate_kwargs["attention_mask"] = new_attention_mask + consistency_generate_kwargs["output_scores"] = False + consistency_generate_kwargs["output_attentions"] = False + consistency_generate_kwargs = model.language_model._get_initial_cache_position(new_input_ids, consistency_generate_kwargs) + model_inputs = model.language_model.prepare_inputs_for_generation(new_input_ids, **consistency_generate_kwargs) + consistency_output = model.language_model.forward(**model_inputs, return_dict=True) + consistency_score = torch.gather( + consistency_output["logits"][:, -new_token_num - 1 : -1, :].softmax(dim=-1), + index=new_input_ids_[None, :, None], + dim=-1, + ) + return torch.prod(consistency_score) + + +@torch.inference_mode() +def run_guide_branch( + model: InternVLChatModel, + tokenizer, + projected_visual_tokens: torch.Tensor, + question: str, + generation_config: dict, + consistency_token_ratio: float, + args, +) -> Tuple[str, List[torch.Tensor], torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor, Dict[str, int]]: + generation_result = run_guide_generation( + model, + tokenizer, + projected_visual_tokens, + question, + generation_config, + ) + outputs = generation_result["outputs"] + question_visual_token_importance, answer_visual_token_importance = ( + aggregate_question_and_answer_attention_from_generation_outputs( + outputs, + generation_result["visual_token_index"], + ) + ) + question_attention_token_count, answer_attention_token_count = count_question_and_answer_attention_query_tokens( + outputs, + generation_result["visual_token_index"], + ) + visual_token_importance = combine_question_and_answer_attention( + question_visual_token_importance, + answer_visual_token_importance, + args, + ) + if args.large_model_prune_selection == "similarity_greedy": + consistency_score = torch.tensor(1.0, device=visual_token_importance.device) + else: + consistency_score = compute_consistency_score( + model, + generation_result["input_embeds"], + generation_result["flat_input_ids"], + generation_result["attention_mask"], + outputs["sequences"][0], + visual_token_importance, + generation_result["visual_token_index"], + consistency_token_ratio, + args.large_model_prune_selection, + ) + return ( + generation_result["response"], + outputs.scores, + consistency_score, + visual_token_importance, + question_visual_token_importance, + answer_visual_token_importance, + { + "question_attention_token_count": question_attention_token_count, + "answer_attention_token_count": answer_attention_token_count, + "reasoning_attention_token_count": 0, + "guide_answer_generated_token_count": count_generated_tokens(outputs), + "guide_reasoning_generated_token_count": 0, + }, + ) + + +@torch.inference_mode() +def run_decode_branch( + model: InternVLChatModel, + tokenizer, + projected_visual_tokens: torch.Tensor, + question: str, + generation_config: dict, + visual_token_importance: torch.Tensor, + large_model_prune_layer: float, + large_model_prune_ratio: float, + large_model_prune_selection: str, +) -> str: + query, template = build_query(model, tokenizer, question, projected_visual_tokens.shape[0]) + model_inputs = tokenizer(query, return_tensors="pt") + input_device = model_text_device(model) + input_ids = model_inputs["input_ids"].to(input_device) + attention_mask = model_inputs["attention_mask"].to(input_device) + eos_token_id = tokenizer.convert_tokens_to_ids(template.sep) + input_embeds, _ = build_input_embeds_from_visual_features(model, input_ids, projected_visual_tokens) + + visual_token_index = (input_ids == model.img_context_token_id).view(-1).nonzero() + visual_start_index, visual_end_index = visual_token_index[0], visual_token_index[-1] + + run_config = dict(generation_config) + run_config["eos_token_id"] = eos_token_id + run_config["return_dict_in_generate"] = False + run_config["output_scores"] = False + run_config["output_attentions"] = False + run_config["large_model_prune_layer"] = large_model_prune_layer + run_config["large_model_prune_ratio"] = large_model_prune_ratio + run_config["large_model_prune_selection"] = large_model_prune_selection + run_config["visual_token_importance"] = visual_token_importance + run_config["visual_token_index"] = (visual_start_index, visual_end_index) + + output_ids = model.language_model.generate( + inputs_embeds=input_embeds, + attention_mask=attention_mask, + generation_config=None, + output_hidden_states=None, + return_dict=None, + use_cache=True, + **run_config, + ) + response = tokenizer.batch_decode(output_ids, skip_special_tokens=True)[0] + return response.split(template.sep)[0].strip() + + +def make_generation_config(args) -> dict: + generation_config = { + "num_beams": args.num_beams, + "max_new_tokens": args.max_new_tokens, + "min_new_tokens": 1, + "do_sample": args.temperature > 0, + "return_dict_in_generate": True, + "output_scores": True, + "output_attentions": True, + } + if args.temperature > 0: + generation_config["temperature"] = args.temperature + return generation_config + + +def append_instruction(question: str, instruction: str) -> str: + instruction = instruction.strip() + if not instruction: + return question + return f"{question.rstrip()}\n{instruction}" + + +def make_reasoning_generation_config(base_generation_config: dict, args) -> dict: + generation_config = dict(base_generation_config) + generation_config["max_new_tokens"] = args.reasoning_max_new_tokens + generation_config["return_dict_in_generate"] = True + generation_config["output_scores"] = True + generation_config["output_attentions"] = True + temperature = args.reasoning_temperature + generation_config["do_sample"] = temperature > 0 + if temperature > 0: + generation_config["temperature"] = temperature + else: + generation_config.pop("temperature", None) + return generation_config + + +def make_custom_generation_config( + base_generation_config: dict, + max_new_tokens: int, + temperature: float, + return_dict_in_generate: bool, + output_scores: bool, + output_attentions: bool, +) -> dict: + generation_config = dict(base_generation_config) + generation_config["max_new_tokens"] = max_new_tokens + generation_config["return_dict_in_generate"] = return_dict_in_generate + generation_config["output_scores"] = output_scores + generation_config["output_attentions"] = output_attentions + generation_config["do_sample"] = temperature > 0 + if temperature > 0: + generation_config["temperature"] = temperature + else: + generation_config.pop("temperature", None) + return generation_config + + +def normalize_generated_text(text: str) -> str: + return " ".join(text.strip().split()) + + +def strip_base_prompt(question: str) -> str: + if question.endswith(BASE_PROMPT_SUFFIX): + return question[: -len(BASE_PROMPT_SUFFIX)].rstrip() + return question + + +def summarize_visual_token_importance(visual_token_importance: torch.Tensor, topk: int) -> Dict[str, object]: + values = visual_token_importance.detach().float().view(-1).cpu() + total = values.sum().item() + if total > 0: + normalized = values / total + else: + normalized = torch.full_like(values, 1.0 / max(values.numel(), 1)) + + topk = min(topk, normalized.numel()) + top_values, top_indices = torch.topk(normalized, k=topk) + entropy = -(normalized * torch.clamp(normalized, min=1e-12).log()).sum().item() + return { + "raw_sum": total, + "entropy": entropy, + "max_weight": normalized.max().item(), + "top_indices": top_indices.tolist(), + "top_weights": top_values.tolist(), + "weights": normalized.tolist(), + } + + +def normalize_visual_token_importance(visual_token_importance: torch.Tensor) -> torch.Tensor: + visual_token_importance = visual_token_importance.detach().float() + total = visual_token_importance.sum() + if total.item() > 0: + return visual_token_importance / total + return torch.full_like(visual_token_importance, 1.0 / max(visual_token_importance.numel(), 1)) + + +def prepare_decode_visual_token_importance( + visual_token_importance: torch.Tensor, + selection_mode: str, +) -> torch.Tensor: + raw_importance = visual_token_importance.detach().float() + if selection_mode in {"topk", "similarity_greedy"}: + return raw_importance + if selection_mode == "random": + return torch.rand_like(raw_importance) + raise ValueError(f"Unsupported large model prune selection mode: {selection_mode}") + + +def maybe_normalize_visual_token_importance(visual_token_importance: torch.Tensor, args) -> torch.Tensor: + if args.guide_attention_aggregation_mode == "normalized": + return normalize_visual_token_importance(visual_token_importance) + return visual_token_importance.detach().float() + + +def combine_question_and_answer_attention( + question_visual_token_importance: torch.Tensor, + answer_visual_token_importance: torch.Tensor, + args, +) -> torch.Tensor: + question_weight = args.guide_question_attention_weight + answer_weight = args.guide_answer_attention_weight + if question_weight == 0 and answer_weight == 0: + raise ValueError("At least one guide question/answer attention weight must be > 0.") + + return ( + question_weight * maybe_normalize_visual_token_importance(question_visual_token_importance, args) + + answer_weight * maybe_normalize_visual_token_importance(answer_visual_token_importance, args) + ) + + +def resolve_guide_attention_source(args) -> str: + if args.guide_attention_source != "default": + return args.guide_attention_source + if args.guide_reasoning_mode == "two_pass_explicit": + return "combined" + return "answer" + + +def combine_reasoning_and_answer_attention( + reasoning_visual_token_importance: torch.Tensor, + answer_visual_token_importance: torch.Tensor, + args, +) -> torch.Tensor: + attention_source = resolve_guide_attention_source(args) + if attention_source == "reasoning": + return args.guide_reasoning_attention_weight * maybe_normalize_visual_token_importance( + reasoning_visual_token_importance, + args, + ) + if attention_source == "answer": + return args.guide_answer_attention_weight * maybe_normalize_visual_token_importance( + answer_visual_token_importance, + args, + ) + + reasoning_weight = args.guide_reasoning_attention_weight + answer_weight = args.guide_answer_attention_weight + if reasoning_weight == 0 and answer_weight == 0: + raise ValueError("At least one guide attention weight must be > 0.") + + return ( + reasoning_weight * maybe_normalize_visual_token_importance(reasoning_visual_token_importance, args) + + answer_weight * maybe_normalize_visual_token_importance(answer_visual_token_importance, args) + ) + + +def combine_question_reasoning_and_answer_attention( + question_visual_token_importance: torch.Tensor, + reasoning_visual_token_importance: torch.Tensor, + answer_visual_token_importance: torch.Tensor, + args, +) -> torch.Tensor: + attention_source = resolve_guide_attention_source(args) + if attention_source == "reasoning": + return args.guide_reasoning_attention_weight * maybe_normalize_visual_token_importance( + reasoning_visual_token_importance, + args, + ) + if attention_source == "answer": + return combine_question_and_answer_attention( + question_visual_token_importance, + answer_visual_token_importance, + args, + ) + + return combine_question_and_answer_attention( + question_visual_token_importance, + answer_visual_token_importance, + args, + ) + args.guide_reasoning_attention_weight * reasoning_visual_token_importance.detach().float() + + +def build_guide_attention_question(question: str, args) -> str: + if args.guide_reasoning_mode == "short_cot": + return GUIDE_ATTENTION_COT_PROMPT_TEMPLATE.replace("{question}", strip_base_prompt(question)) + if args.guide_reasoning_mode == "explicit_cot": + return append_instruction(strip_base_prompt(question), GUIDE_ATTENTION_EXPLICIT_COT_INSTRUCTION) + return question + + +def build_guide_reasoning_question(question: str) -> str: + return GUIDE_ATTENTION_REASONING_ONLY_PROMPT_TEMPLATE.replace( + "{question}", + strip_base_prompt(question), + ) + + +def build_guide_text_question(question: str) -> str: + return append_instruction(question, GUIDE_TEXT_HINT_INSTRUCTION) + + +def build_decode_question(question: str, guide_text_hint: Optional[str]) -> str: + if not guide_text_hint: + return question + return append_instruction( + question, + f"Guide hint: {guide_text_hint}\n{GUIDED_DECODE_INSTRUCTION}", + ) + + +def make_guide_attention_generation_config(base_generation_config: dict, args) -> dict: + if args.guide_reasoning_mode in {"short_cot", "explicit_cot", "two_pass_explicit"}: + return make_custom_generation_config( + base_generation_config, + max_new_tokens=args.guide_reasoning_max_new_tokens, + temperature=args.guide_reasoning_temperature, + return_dict_in_generate=True, + output_scores=True, + output_attentions=True, + ) + return dict(base_generation_config) + + +def make_guide_text_generation_config(base_generation_config: dict, args) -> dict: + return make_custom_generation_config( + base_generation_config, + max_new_tokens=args.guide_text_max_new_tokens, + temperature=args.guide_text_temperature, + return_dict_in_generate=False, + output_scores=False, + output_attentions=False, + ) + + +@torch.inference_mode() +def run_text_generation_branch( + model: InternVLChatModel, + tokenizer, + projected_visual_tokens: torch.Tensor, + question: str, + generation_config: dict, +) -> str: + query, template = build_query(model, tokenizer, question, projected_visual_tokens.shape[0]) + model_inputs = tokenizer(query, return_tensors="pt") + input_device = model_text_device(model) + input_ids = model_inputs["input_ids"].to(input_device) + attention_mask = model_inputs["attention_mask"].to(input_device) + eos_token_id = tokenizer.convert_tokens_to_ids(template.sep) + input_embeds, _ = build_input_embeds_from_visual_features(model, input_ids, projected_visual_tokens) + + run_config = dict(generation_config) + run_config["eos_token_id"] = eos_token_id + output_ids = model.language_model.generate( + inputs_embeds=input_embeds, + attention_mask=attention_mask, + generation_config=None, + output_hidden_states=None, + return_dict=None, + use_cache=True, + **run_config, + ) + response = tokenizer.batch_decode(output_ids, skip_special_tokens=True)[0] + return response.split(template.sep)[0].strip() + + +def run_decode_answer( + model: InternVLChatModel, + tokenizer, + projected_visual_tokens: torch.Tensor, + question: str, + generation_config: dict, + visual_token_importance: torch.Tensor, + args, +) -> str: + return run_decode_branch( + model, + tokenizer, + projected_visual_tokens, + question, + generation_config, + prepare_decode_visual_token_importance( + visual_token_importance, + args.large_model_prune_selection, + ), + args.large_model_prune_layer, + args.large_model_prune_ratio, + args.large_model_prune_selection, + ) + + +@torch.inference_mode() +def run_guide_two_pass_explicit_branch( + model: InternVLChatModel, + tokenizer, + projected_visual_tokens: torch.Tensor, + question: str, + reasoning_generation_config: dict, + answer_generation_config: dict, + consistency_token_ratio: float, + args, +) -> Tuple[str, List[torch.Tensor], torch.Tensor, torch.Tensor, str, Dict[str, object], Dict[str, int]]: + answer_result = run_guide_generation( + model, + tokenizer, + projected_visual_tokens, + question, + answer_generation_config, + ) + reasoning_result = run_guide_generation( + model, + tokenizer, + projected_visual_tokens, + build_guide_reasoning_question(question), + reasoning_generation_config, + ) + reasoning = reasoning_result["response"] + + reasoning_step_mask, reasoning_filter_debug = build_reasoning_attention_step_mask_and_debug( + tokenizer, + reasoning_result["outputs"], + args, + ) + reasoning_visual_token_importance = aggregate_attention_from_generation_outputs( + reasoning_result["outputs"], + reasoning_result["visual_token_index"], + reasoning_step_mask, + ) + reasoning_attention_token_count = count_attention_query_tokens_from_generation_outputs( + reasoning_result["outputs"], + reasoning_result["visual_token_index"], + reasoning_step_mask, + ) + question_visual_token_importance, answer_visual_token_importance = ( + aggregate_question_and_answer_attention_from_generation_outputs( + answer_result["outputs"], + answer_result["visual_token_index"], + ) + ) + question_attention_token_count, answer_attention_token_count = count_question_and_answer_attention_query_tokens( + answer_result["outputs"], + answer_result["visual_token_index"], + ) + visual_token_importance = combine_question_reasoning_and_answer_attention( + question_visual_token_importance, + reasoning_visual_token_importance, + answer_visual_token_importance, + args, + ) + if args.large_model_prune_selection == "similarity_greedy": + consistency_score = torch.tensor(1.0, device=visual_token_importance.device) + else: + consistency_score = compute_consistency_score( + model, + answer_result["input_embeds"], + answer_result["flat_input_ids"], + answer_result["attention_mask"], + answer_result["outputs"]["sequences"][0], + visual_token_importance, + answer_result["visual_token_index"], + consistency_token_ratio, + args.large_model_prune_selection, + ) + return ( + answer_result["response"], + answer_result["outputs"].scores, + consistency_score, + visual_token_importance, + reasoning, + reasoning_filter_debug, + { + "question_attention_token_count": question_attention_token_count, + "answer_attention_token_count": answer_attention_token_count, + "reasoning_attention_token_count": reasoning_attention_token_count, + "guide_answer_generated_token_count": count_generated_tokens(answer_result["outputs"]), + "guide_reasoning_generated_token_count": count_generated_tokens(reasoning_result["outputs"]), + }, + ) + + +def generate_with_reasoning( + guide_model: InternVLChatModel, + guide_tokenizer, + decode_model: InternVLChatModel, + large_tokenizer, + projected_visual_tokens: torch.Tensor, + question: str, + generation_config: dict, + reasoning_generation_config: dict, + visual_token_importance: torch.Tensor, + args, +) -> Tuple[str, str]: + reasoning_question = append_instruction(question, EXPLICIT_REASONING_INSTRUCTION) + reasoning = run_decode_answer( + decode_model, + large_tokenizer, + projected_visual_tokens, + reasoning_question, + reasoning_generation_config, + visual_token_importance, + args, + ) + final_question = append_instruction( + question, + f"Reasoning:\n{reasoning}\n{DEFAULT_FINAL_ANSWER_INSTRUCTION}", + ) + answer = run_decode_answer( + decode_model, + large_tokenizer, + projected_visual_tokens, + final_question, + generation_config, + visual_token_importance, + args, + ) + return answer, reasoning + + +def evaluate(args): + guide_checkpoint = resolve_hf_snapshot(args.guide_checkpoint) + large_checkpoint = resolve_hf_snapshot(args.large_checkpoint) + + guide_tokenizer = AutoTokenizer.from_pretrained(guide_checkpoint, trust_remote_code=True, use_fast=False) + guide_config = configure_model(guide_checkpoint, use_flash_attn=args.use_flash_attn) + guide_model = load_model( + guide_checkpoint, + guide_config, + auto=args.auto, + load_in_8bit=args.load_in_8bit, + load_in_4bit=args.load_in_4bit, + ) + decode_model, large_tokenizer = build_decode_model( + guide_model, + large_checkpoint, + use_flash_attn=args.use_flash_attn, + auto=args.auto, + load_in_8bit=args.load_in_8bit, + load_in_4bit=args.load_in_4bit, + ) + + guide_image_size = guide_model.config.force_image_size or guide_model.config.vision_config.image_size + large_image_size = decode_model.config.force_image_size or decode_model.config.vision_config.image_size + if guide_image_size != large_image_size: + raise ValueError(f"Guide and decode image size mismatch: {guide_image_size} vs {large_image_size}") + if guide_model.num_image_token != decode_model.num_image_token: + raise ValueError( + f"Guide and decode image token count mismatch: {guide_model.num_image_token} vs {decode_model.num_image_token}" + ) + + data_root = os.path.abspath(args.data_root) + textvqa_root = os.path.abspath(args.textvqa_root) if args.textvqa_root else os.path.join(data_root, "data", "textvqa") + dataset = TextVQADataset( + jsonl_path=os.path.join(textvqa_root, "textvqa_val.jsonl"), + data_root=data_root, + image_size=guide_image_size, + dynamic=args.dynamic, + use_thumbnail=guide_model.config.use_thumbnail, + max_num=args.max_num, + ) + question_id_to_answers = load_annotations(os.path.join(textvqa_root, "textvqa_val_annotations.json")) + generation_config = make_generation_config(args) + guide_attention_generation_config = make_guide_attention_generation_config(generation_config, args) + guide_text_generation_config = None + if args.guide_text_mode != "none": + guide_text_generation_config = make_guide_text_generation_config(generation_config, args) + reasoning_generation_config = None + if args.reasoning_mode == "two_pass": + reasoning_generation_config = make_reasoning_generation_config(generation_config, args) + + num_items = len(dataset) if args.limit is None else min(len(dataset), args.limit) + results = [] + filter_debug_results = [] + + for idx in range(num_items): + sample = dataset[idx] + question = sample["question"] + " " + BASE_PROMPT + pixel_values = sample["pixel_values"] + guide_attention_question = build_guide_attention_question(question, args) + + torch.cuda.synchronize() + start = time.time() + raw_visual_tokens = extract_shared_raw_visual_tokens(guide_model, pixel_values) + guide_visual_tokens = project_visual_tokens(guide_model, raw_visual_tokens) + guide_reasoning = None + guide_reasoning_filter_debug = {"backend": "none", "kept_tokens": [], "token_analysis": []} + guide_attention_token_counts = { + "question_attention_token_count": 0, + "answer_attention_token_count": 0, + "reasoning_attention_token_count": 0, + "guide_answer_generated_token_count": 0, + "guide_reasoning_generated_token_count": 0, + } + question_visual_token_importance = None + answer_visual_token_importance = None + if args.guide_reasoning_mode == "two_pass_explicit": + ( + guide_answer, + guide_scores, + consistency_score, + visual_token_importance, + guide_reasoning, + guide_reasoning_filter_debug, + guide_attention_token_counts, + ) = ( + run_guide_two_pass_explicit_branch( + guide_model, + guide_tokenizer, + guide_visual_tokens, + question, + guide_attention_generation_config, + generation_config, + args.consistency_token_ratio, + args, + ) + ) + else: + ( + guide_answer, + guide_scores, + consistency_score, + visual_token_importance, + question_visual_token_importance, + answer_visual_token_importance, + guide_attention_token_counts, + ) = run_guide_branch( + guide_model, + guide_tokenizer, + guide_visual_tokens, + guide_attention_question, + guide_attention_generation_config, + args.consistency_token_ratio, + args, + ) + guide_text_hint = None + if args.guide_text_mode != "none": + if guide_text_generation_config is None: + raise ValueError("guide_text_generation_config is required when guide_text_mode is enabled.") + guide_text_hint = normalize_generated_text( + run_text_generation_branch( + guide_model, + guide_tokenizer, + guide_visual_tokens, + build_guide_text_question(question), + guide_text_generation_config, + ) + ) + torch.cuda.synchronize() + end = time.time() + small_model_time = end - start + + scores = torch.concatenate(guide_scores, dim=0) + scores, _ = scores.softmax(dim=-1).max(dim=-1) + original_confidence = math.pow(torch.prod(scores).item(), 1 / len(scores)) + + torch.cuda.synchronize() + start = time.time() + large_visual_tokens = project_visual_tokens(decode_model, raw_visual_tokens) + decode_question = build_decode_question(question, guide_text_hint) + reasoning = None + if args.reasoning_mode == "none": + large_answer = run_decode_answer( + decode_model, + large_tokenizer, + large_visual_tokens, + decode_question, + generation_config, + visual_token_importance, + args, + ) + elif args.reasoning_mode == "prompt": + prompted_question = append_instruction(decode_question, HIDDEN_REASONING_INSTRUCTION) + large_answer = run_decode_answer( + decode_model, + large_tokenizer, + large_visual_tokens, + prompted_question, + generation_config, + visual_token_importance, + args, + ) + else: + if reasoning_generation_config is None: + raise ValueError("reasoning_generation_config is required when reasoning_mode='two_pass'.") + large_answer, reasoning = generate_with_reasoning( + guide_model, + guide_tokenizer, + decode_model, + large_tokenizer, + large_visual_tokens, + decode_question, + generation_config, + reasoning_generation_config, + visual_token_importance, + args, + ) + torch.cuda.synchronize() + end = time.time() + large_model_time = end - start + + visual_token_count = visual_token_importance.shape[0] + kept_visual_token_count = max(1, int(visual_token_count * args.large_model_prune_ratio)) + result_item = { + "question_id": sample["question_id"], + "question": sample["question"], + "answer": large_answer, + "pred_answer": large_answer, + "gt_answers": question_id_to_answers[sample["question_id"]], + "small_answer": guide_answer, + "guide_attention_output": guide_answer, + "large_answer": large_answer, + "small_model_time": small_model_time, + "large_model_time": large_model_time, + "original_confidence": original_confidence, + "consistency_score": consistency_score.item(), + "visual_token_count": visual_token_count, + "kept_visual_token_count": kept_visual_token_count, + "guide_attention_token_counts": guide_attention_token_counts, + } + if args.save_visual_token_importance: + result_item["visual_token_importance_stats"] = summarize_visual_token_importance( + visual_token_importance, + topk=args.visual_token_importance_topk, + ) + if question_visual_token_importance is not None: + result_item["question_visual_token_importance_stats"] = summarize_visual_token_importance( + question_visual_token_importance, + topk=args.visual_token_importance_topk, + ) + if answer_visual_token_importance is not None: + result_item["answer_visual_token_importance_stats"] = summarize_visual_token_importance( + answer_visual_token_importance, + topk=args.visual_token_importance_topk, + ) + if guide_text_hint is not None: + result_item["guide_text_hint"] = guide_text_hint + if args.save_reasoning and guide_reasoning is not None: + result_item["guide_reasoning"] = guide_reasoning + if args.save_reasoning and reasoning is not None: + result_item["large_reasoning"] = reasoning + results.append(result_item) + filter_debug_results.append( + { + "question_id": sample["question_id"], + "question": sample["question"], + "small_answer": guide_answer, + "large_answer": large_answer, + "guide_reasoning": guide_reasoning, + "guide_reasoning_filter_mode": args.guide_reasoning_filter_mode, + "guide_reasoning_filter_backend": guide_reasoning_filter_debug.get("backend", "none"), + "kept_tokens": guide_reasoning_filter_debug.get("kept_tokens", []), + "token_analysis": guide_reasoning_filter_debug.get("token_analysis", []), + } + ) + if (idx + 1) % args.log_every == 0 or idx + 1 == num_items: + status = ( + f"[{idx + 1}/{num_items}] question_id={sample['question_id']} " + f"small={guide_answer} large={large_answer} kept={kept_visual_token_count}/{visual_token_count}" + ) + if guide_text_hint is not None: + status += f" hint={guide_text_hint}" + print(status) + sys.stdout.flush() + + evaluator = TextVQAAccuracyEvaluator() + accuracy = evaluator.eval_pred_list(results) + + os.makedirs(args.out_dir, exist_ok=True) + run_name = args.run_name or "textvqa_shared_vision_2bguide_8btext" + result_path = os.path.join(args.out_dir, f"{run_name}.json") + summary_path = os.path.join(args.out_dir, f"{run_name}.summary.json") + filter_debug_path = os.path.join(args.out_dir, f"{run_name}.filter_debug.json") + + with open(result_path, "w") as f: + json.dump(results, f, ensure_ascii=False, indent=2) + with open(filter_debug_path, "w") as f: + json.dump(filter_debug_results, f, ensure_ascii=False, indent=2) + + token_count_keys = [ + "question_attention_token_count", + "answer_attention_token_count", + "reasoning_attention_token_count", + "guide_answer_generated_token_count", + "guide_reasoning_generated_token_count", + ] + avg_guide_attention_token_counts = { + key: ( + sum(item.get("guide_attention_token_counts", {}).get(key, 0) for item in results) + / max(len(results), 1) + ) + for key in token_count_keys + } + + summary = { + "mode": "shared_vision_guided", + "guide_checkpoint": guide_checkpoint, + "large_checkpoint": large_checkpoint, + "count": num_items, + "accuracy": accuracy, + "large_model_prune_layer": args.large_model_prune_layer, + "large_model_prune_ratio": args.large_model_prune_ratio, + "large_model_prune_selection": args.large_model_prune_selection, + "consistency_token_ratio": args.consistency_token_ratio, + "guide_reasoning_mode": args.guide_reasoning_mode, + "guide_reasoning_max_new_tokens": args.guide_reasoning_max_new_tokens, + "guide_reasoning_filter_mode": args.guide_reasoning_filter_mode, + "guide_attention_aggregation_mode": args.guide_attention_aggregation_mode, + "guide_attention_source": resolve_guide_attention_source(args), + "guide_reasoning_attention_weight": args.guide_reasoning_attention_weight, + "guide_answer_attention_weight": args.guide_answer_attention_weight, + "guide_question_attention_weight": args.guide_question_attention_weight, + "guide_text_mode": args.guide_text_mode, + "guide_text_max_new_tokens": args.guide_text_max_new_tokens, + "avg_guide_attention_token_counts": avg_guide_attention_token_counts, + "avg_small_model_time": sum(item["small_model_time"] for item in results) / max(len(results), 1), + "avg_large_model_time": sum(item["large_model_time"] for item in results) / max(len(results), 1), + "results_file": result_path, + "filter_debug_file": filter_debug_path, + } + with open(summary_path, "w") as f: + json.dump(summary, f, ensure_ascii=False, indent=2) + + print(f"accuracy: {accuracy:.6f}") + print(f"results_file: {result_path}") + print(f"summary_file: {summary_path}") + + +def main(): + parser = argparse.ArgumentParser() + parser.add_argument("--guide-checkpoint", type=str, required=True) + parser.add_argument("--large-checkpoint", type=str, required=True) + parser.add_argument("--data-root", type=str, default=str(REPO_ROOT)) + parser.add_argument("--textvqa-root", type=str, default="") + parser.add_argument("--out-dir", type=str, default=str(REPO_ROOT / "outputs" / "shared_vision_guided")) + parser.add_argument("--run-name", type=str, default="") + parser.add_argument("--limit", type=int, default=None) + parser.add_argument("--max-new-tokens", type=int, default=10) + parser.add_argument("--num-beams", type=int, default=1) + parser.add_argument("--temperature", type=float, default=0.0) + parser.add_argument("--reasoning-mode", type=str, choices=["none", "prompt", "two_pass"], default="none") + parser.add_argument("--reasoning-max-new-tokens", type=int, default=64) + parser.add_argument("--reasoning-temperature", type=float, default=0.0) + parser.add_argument("--save-reasoning", action="store_true") + parser.add_argument( + "--guide-reasoning-mode", + type=str, + choices=["none", "short_cot", "explicit_cot", "two_pass_explicit"], + default="none", + ) + parser.add_argument("--guide-reasoning-max-new-tokens", type=int, default=1024) + parser.add_argument("--guide-reasoning-temperature", type=float, default=0.0) + parser.add_argument( + "--guide-reasoning-filter-mode", + type=str, + choices=["none", "pos_ner"], + default="none", + ) + parser.add_argument( + "--guide-attention-source", + type=str, + choices=["default", "reasoning", "answer", "combined"], + default="default", + ) + parser.add_argument( + "--guide-attention-aggregation-mode", + type=str, + choices=["raw", "normalized"], + default="raw", + ) + parser.add_argument("--guide-question-attention-weight", type=float, default=1.0) + parser.add_argument("--guide-reasoning-attention-weight", type=float, default=1.0) + parser.add_argument("--guide-answer-attention-weight", type=float, default=1.0) + parser.add_argument("--guide-text-mode", type=str, choices=["none", "short_rationale"], default="none") + parser.add_argument("--guide-text-max-new-tokens", type=int, default=12) + parser.add_argument("--guide-text-temperature", type=float, default=0.0) + parser.add_argument("--save-visual-token-importance", action="store_true") + parser.add_argument("--visual-token-importance-topk", type=int, default=16) + parser.add_argument("--dynamic", action="store_true") + parser.add_argument("--max-num", type=int, default=6) + parser.add_argument("--log-every", type=int, default=20) + parser.add_argument("--seed", type=int, default=0) + parser.add_argument("--large-model-prune-layer", type=float, default=0.0) + parser.add_argument("--large-model-prune-ratio", type=float, default=0.4) + parser.add_argument( + "--large-model-prune-selection", + type=str, + choices=["topk", "random", "similarity_greedy"], + default="topk", + ) + parser.add_argument("--consistency-token-ratio", type=float, default=0.05) + parser.add_argument("--auto", action="store_true") + parser.add_argument("--load-in-8bit", action="store_true") + parser.add_argument("--load-in-4bit", action="store_true") + parser.add_argument("--use-flash-attn", action="store_true") + args = parser.parse_args() + + if not torch.cuda.is_available(): + raise RuntimeError("CUDA is required for shared-vision guided evaluation.") + if args.large_model_prune_ratio <= 0 or args.large_model_prune_ratio > 1: + raise ValueError("large-model-prune-ratio must be in (0, 1].") + if args.consistency_token_ratio <= 0 or args.consistency_token_ratio > 1: + raise ValueError("consistency-token-ratio must be in (0, 1].") + if args.guide_reasoning_attention_weight < 0 or args.guide_answer_attention_weight < 0: + raise ValueError("guide reasoning/answer attention weights must be >= 0.") + if args.guide_question_attention_weight < 0: + raise ValueError("guide question attention weight must be >= 0.") + if args.guide_reasoning_mode == "two_pass_explicit": + attention_source = resolve_guide_attention_source(args) + if attention_source == "reasoning" and args.guide_reasoning_attention_weight == 0: + raise ValueError("guide_reasoning_attention_weight must be > 0 when guide-attention-source=reasoning.") + if ( + attention_source == "answer" + and args.guide_question_attention_weight == 0 + and args.guide_answer_attention_weight == 0 + ): + raise ValueError( + "At least one of guide_question_attention_weight or guide_answer_attention_weight " + "must be > 0 when guide-attention-source=answer." + ) + if ( + attention_source == "combined" + and args.guide_question_attention_weight == 0 + and args.guide_reasoning_attention_weight == 0 + and args.guide_answer_attention_weight == 0 + ): + raise ValueError("At least one guide attention weight must be > 0 for two_pass_explicit.") + if ( + args.guide_reasoning_mode != "two_pass_explicit" + and args.guide_question_attention_weight == 0 + and args.guide_answer_attention_weight == 0 + ): + raise ValueError("At least one guide question/answer attention weight must be > 0.") + + random.seed(args.seed) + torch.manual_seed(args.seed) + evaluate(args) + + +if __name__ == "__main__": + main() diff --git a/eval/vqa/run_single_model_native.py b/eval/vqa/run_single_model_native.py new file mode 100644 index 0000000000000000000000000000000000000000..412fcebf8d55fb87558c07cc4ba57ab76a3c7ed1 --- /dev/null +++ b/eval/vqa/run_single_model_native.py @@ -0,0 +1,603 @@ +import argparse +import json +import math +import os +import random +import re +import sys +from pathlib import Path +from typing import Optional, Tuple + +import torch +from PIL import Image +from transformers import AutoTokenizer + +from internvl.conversation import get_conv_template +from internvl.conversation import register_conv_template +from internvl.conversation import Conversation +from internvl.conversation import SeparatorStyle +from internvl.model.internvl_chat import InternVLChatModel +from internvl.model.internvl_chat.configuration_internvl_chat import InternVLChatConfig +from internvl.train.dataset import build_transform, dynamic_preprocess + +from evaluate_vqa import VQADataset, ds_collections +from textvqa_eval import TextVQAAccuracyEvaluator + + +BASE_PROMPT = "Answer the question using a single word or phrase." +VIZWIZ_PROMPT = "When the provided information is insufficient, respond with 'Unanswerable'. " +INFOGRAPHICSVQA_PROMPT = "Answer the question using a single word or phrase." +AI2D_PROMPT = "" +HIDDEN_REASONING_INSTRUCTION = ( + "Think through the relevant visual evidence and any text in the image step by step internally before answering." +) +EXPLICIT_REASONING_INSTRUCTION = ( + "Explain your reasoning step by step using the relevant visual evidence and any text in the image." +) +DEFAULT_FINAL_ANSWER_INSTRUCTION = "Provide the final answer only." +REPO_ROOT = Path(__file__).resolve().parents[2] + + +def ensure_internvl2_5_template() -> None: + try: + get_conv_template("internvl2_5") + return + except KeyError: + pass + + register_conv_template( + Conversation( + name="internvl2_5", + system_template="<|im_start|>system\n{system_message}", + system_message="你是书生·万象,英文名是InternVL,是由上海人工智能实验室、清华大学及多家合作单位联合开发的多模态大语言模型。", + roles=("<|im_start|>user\n", "<|im_start|>assistant\n"), + sep_style=SeparatorStyle.MPT, + sep="<|im_end|>\n", + ) + ) + + +def configure_model(checkpoint_path: str) -> InternVLChatConfig: + config = InternVLChatConfig.from_json_file(os.path.join(checkpoint_path, "config.json")) + match = re.search(r"InternVL2-(\d+B)", checkpoint_path) + model_size = match.group(1) if match else checkpoint_path.split("-")[-1] + if model_size in ["1B", "40B"]: + config.llm_config._attn_implementation = "eager" + else: + config.llm_config.attn_implementation = "eager" + config.vision_config.use_flash_attn = True + return config + + +def split_model(num_layers: int, gpus_per_model: int) -> dict: + if gpus_per_model < 1: + raise ValueError("gpus_per_model must be >= 1") + + device_map = {} + if gpus_per_model == 1: + for layer_idx in range(num_layers): + device_map[f"language_model.model.layers.{layer_idx}"] = 0 + else: + # Keep the vision tower and embeddings on GPU 0 and spread decoder layers. + num_layers_per_gpu = math.ceil(num_layers / (gpus_per_model - 0.5)) + num_layers_per_gpu = [num_layers_per_gpu] * gpus_per_model + num_layers_per_gpu[0] = math.ceil(num_layers_per_gpu[0] * 0.5) + + layer_cnt = 0 + for gpu_idx, layer_count in enumerate(num_layers_per_gpu): + for _ in range(layer_count): + if layer_cnt >= num_layers: + break + device_map[f"language_model.model.layers.{layer_cnt}"] = gpu_idx + layer_cnt += 1 + + if layer_cnt < num_layers: + for layer_idx in range(layer_cnt, num_layers): + device_map[f"language_model.model.layers.{layer_idx}"] = gpus_per_model - 1 + + device_map["vision_model"] = 0 + device_map["mlp1"] = 0 + device_map["language_model.model.tok_embeddings"] = 0 + device_map["language_model.model.rotary_emb"] = 0 + device_map["language_model.model.embed_tokens"] = 0 + device_map["language_model.output"] = 0 + device_map["language_model.model.norm"] = 0 + device_map["language_model.lm_head"] = 0 + if num_layers > 1 and gpus_per_model > 1: + device_map[f"language_model.model.layers.{num_layers - 1}"] = 1 + return device_map + + +def load_model(checkpoint_path: str, config: InternVLChatConfig, args) -> InternVLChatModel: + ensure_internvl2_5_template() + kwargs = {"device_map": "auto"} if args.auto else {} + if args.gpus_per_model > 1 and not args.auto: + if args.gpus_per_model > torch.cuda.device_count(): + raise ValueError( + f"gpus_per_model={args.gpus_per_model} exceeds visible CUDA devices={torch.cuda.device_count()}" + ) + kwargs["device_map"] = split_model(config.llm_config.num_hidden_layers, args.gpus_per_model) + + model = InternVLChatModel.from_pretrained( + checkpoint_path, + config=config, + low_cpu_mem_usage=True, + torch_dtype=torch.bfloat16, + load_in_8bit=args.load_in_8bit, + load_in_4bit=args.load_in_4bit, + **kwargs, + ).eval() + + if args.gpus_per_model == 1 and not args.auto and not args.load_in_8bit and not args.load_in_4bit: + model = model.cuda() + return model + + +def dataset_prompt(dataset_name: str) -> str: + if "vizwiz" in dataset_name: + return VIZWIZ_PROMPT + BASE_PROMPT + if "ai2d" in dataset_name: + return AI2D_PROMPT + if "infographicsvqa" in dataset_name: + return INFOGRAPHICSVQA_PROMPT + return BASE_PROMPT + + +def resolve_dataset_path(data_root: str, path: str) -> str: + if os.path.isabs(path): + return path + return os.path.join(data_root, path) + + +def resolve_image_path(image_path: str, data_root: str, jsonl_path: str = "") -> str: + candidates = [] + if os.path.isabs(image_path): + candidates.append(image_path) + + jsonl_dir = os.path.dirname(jsonl_path) if jsonl_path else "" + candidates.append(os.path.join(data_root, image_path)) + if image_path.startswith("data/"): + candidates.append(os.path.join(data_root, image_path[len("data/"):])) + if jsonl_dir: + candidates.append(os.path.join(jsonl_dir, image_path)) + candidates.append(os.path.join(jsonl_dir, os.path.basename(image_path))) + + for candidate in candidates: + if candidate and os.path.exists(candidate): + return candidate + raise FileNotFoundError(f"Could not resolve image path: {image_path}") + + +def load_textvqa_sample(jsonl_path: str, sample_index: int) -> Tuple[str, str, Optional[int], Optional[str]]: + with open(jsonl_path) as f: + for idx, line in enumerate(f): + if idx == sample_index: + item = json.loads(line) + return item["image"], item["question"], item.get("question_id"), item.get("answer") + raise IndexError(f"sample_index {sample_index} is out of range for {jsonl_path}") + + +def build_pixel_values( + image_path: str, + image_size: int, + dynamic: bool, + use_thumbnail: bool, + max_num: int, +) -> torch.Tensor: + transform = build_transform(is_train=False, input_size=image_size) + image = Image.open(image_path).convert("RGB") + if dynamic: + images = dynamic_preprocess( + image, + image_size=image_size, + use_thumbnail=use_thumbnail, + max_num=max_num, + ) + else: + images = [image] + return torch.stack([transform(img) for img in images]) + + +def build_query(model: InternVLChatModel, tokenizer, question: str, num_patches: int): + img_context_token = "" + img_start_token = "" + img_end_token = "" + + if "" not in question: + question = "\n" + question + + model.img_context_token_id = tokenizer.convert_tokens_to_ids(img_context_token) + + template = get_conv_template(model.template) + template.system_message = model.system_message + template.append_message(template.roles[0], question) + template.append_message(template.roles[1], None) + query = template.get_prompt() + + image_tokens = img_start_token + img_context_token * model.num_image_token * num_patches + img_end_token + query = query.replace("", image_tokens, 1) + return query, template + + +def model_input_device(model: InternVLChatModel) -> torch.device: + return next(model.vision_model.parameters()).device + + +@torch.inference_mode() +def generate_answer( + model: InternVLChatModel, + tokenizer, + pixel_values: torch.Tensor, + question: str, + generation_config: dict, +) -> str: + query, template = build_query(model, tokenizer, question, pixel_values.shape[0]) + model_inputs = tokenizer(query, return_tensors="pt") + + device = model_input_device(model) + input_ids = model_inputs["input_ids"].to(device) + attention_mask = model_inputs["attention_mask"].to(device) + eos_token_id = tokenizer.convert_tokens_to_ids(template.sep) + + output_ids = model.generate( + pixel_values=pixel_values.to(device=device, dtype=torch.bfloat16), + input_ids=input_ids, + attention_mask=attention_mask, + large_model=True, + eos_token_id=eos_token_id, + **generation_config, + ) + response = tokenizer.batch_decode(output_ids, skip_special_tokens=True)[0] + return response.split(template.sep)[0].strip() + + +def build_eval_entries(result_items, annotation_file: str): + evaluator = TextVQAAccuracyEvaluator() + with open(annotation_file) as f: + annotations = json.load(f)["annotations"] + question_id_to_answers = { + item["question_id"]: [answer["answer"] for answer in item["answers"]] + for item in annotations + } + eval_entries = [ + { + "question_id": item["question_id"], + "answer": item["answer"], + "pred_answer": item["answer"], + "gt_answers": question_id_to_answers[item["question_id"]], + } + for item in result_items + ] + return evaluator, eval_entries + + +def make_generation_config(num_beams: int, max_new_tokens: int, temperature: float) -> dict: + generation_config = { + "num_beams": num_beams, + "max_new_tokens": max_new_tokens, + "min_new_tokens": 1, + "do_sample": temperature > 0, + } + if temperature > 0: + generation_config["temperature"] = temperature + return generation_config + + +def append_instruction(question: str, instruction: str) -> str: + instruction = instruction.strip() + if not instruction: + return question + return f"{question.rstrip()}\n{instruction}" + + +def render_custom_prompt(question: str, prompt_template: str) -> str: + prompt_template = prompt_template.strip() + if not prompt_template: + raise ValueError("custom_prompt_template must be non-empty when reasoning_mode='custom_prompt'.") + if "{question}" in prompt_template: + return prompt_template.replace("{question}", question) + if "Question:" in prompt_template or "Question:" in prompt_template: + return f"{prompt_template.rstrip()} {question}" + return f"{prompt_template.rstrip()}\nQuestion: {question}" + + +def extract_final_answer(response: str, final_answer_prefix: str) -> str: + final_answer_prefix = final_answer_prefix.strip() + if not final_answer_prefix: + return response.strip() + + pattern = re.compile(rf"(?im)^{re.escape(final_answer_prefix)}\s*(.*)$") + match = pattern.search(response) + if not match: + return response.strip() + + inline_answer = match.group(1).strip() + if inline_answer: + return inline_answer + + trailing_lines = response[match.end():].splitlines() + for line in trailing_lines: + stripped = line.strip() + if stripped: + return stripped + return "" + + +def make_reasoning_generation_config(base_generation_config: dict, args) -> dict: + generation_config = dict(base_generation_config) + generation_config["max_new_tokens"] = args.reasoning_max_new_tokens + temperature = args.reasoning_temperature + generation_config["do_sample"] = temperature > 0 + if temperature > 0: + generation_config["temperature"] = temperature + else: + generation_config.pop("temperature", None) + return generation_config + + +def generate_answer_with_reasoning( + model: InternVLChatModel, + tokenizer, + pixel_values: torch.Tensor, + question: str, + generation_config: dict, + reasoning_mode: str, + reasoning_generation_config: Optional[dict] = None, + final_answer_instruction: str = "", +) -> Tuple[str, Optional[str]]: + if reasoning_mode == "none": + return generate_answer(model, tokenizer, pixel_values, question, generation_config), None + + if reasoning_mode == "prompt": + prompted_question = append_instruction(question, HIDDEN_REASONING_INSTRUCTION) + return generate_answer(model, tokenizer, pixel_values, prompted_question, generation_config), None + + if reasoning_generation_config is None: + raise ValueError("reasoning_generation_config is required when reasoning_mode='two_pass'.") + + reasoning_question = append_instruction(question, EXPLICIT_REASONING_INSTRUCTION) + reasoning = generate_answer(model, tokenizer, pixel_values, reasoning_question, reasoning_generation_config) + final_instruction = final_answer_instruction or DEFAULT_FINAL_ANSWER_INSTRUCTION + final_question = append_instruction( + question, + f"Reasoning:\n{reasoning}\n{final_instruction}", + ) + answer = generate_answer(model, tokenizer, pixel_values, final_question, generation_config) + return answer, reasoning + + +def run_single(args): + tokenizer = AutoTokenizer.from_pretrained( + args.checkpoint, + trust_remote_code=True, + use_fast=False, + ) + config = configure_model(args.checkpoint) + model = load_model(args.checkpoint, config, args) + + if args.textvqa_jsonl: + image_path, prompt, question_id, answer = load_textvqa_sample(args.textvqa_jsonl, args.sample_index) + image_path = resolve_image_path(image_path, args.data_root, args.textvqa_jsonl) + else: + image_path = args.image_path + prompt = args.prompt + question_id = None + answer = None + + if not image_path or not prompt: + raise ValueError("Provide either --image-path and --prompt, or --textvqa-jsonl.") + if not os.path.exists(image_path): + raise FileNotFoundError(f"image not found: {image_path}") + + image_size = config.force_image_size or config.vision_config.image_size + pixel_values = build_pixel_values( + image_path=image_path, + image_size=image_size, + dynamic=args.dynamic, + use_thumbnail=config.use_thumbnail, + max_num=args.max_num, + ) + + generation_config = make_generation_config( + num_beams=args.num_beams, + max_new_tokens=args.max_new_tokens, + temperature=args.temperature, + ) + reasoning_generation_config = None + if args.reasoning_mode == "two_pass": + reasoning_generation_config = make_reasoning_generation_config(generation_config, args) + raw_prediction = None + if args.reasoning_mode == "custom_prompt": + raw_prediction = generate_answer( + model, + tokenizer, + pixel_values, + render_custom_prompt(prompt, args.custom_prompt_template), + generation_config, + ) + prediction = ( + extract_final_answer(raw_prediction, args.final_answer_prefix) + if args.extract_final_answer + else raw_prediction + ) + reasoning = None + else: + prediction, reasoning = generate_answer_with_reasoning( + model=model, + tokenizer=tokenizer, + pixel_values=pixel_values, + question=prompt, + generation_config=generation_config, + reasoning_mode=args.reasoning_mode, + reasoning_generation_config=reasoning_generation_config, + final_answer_instruction=args.answer_format_prompt, + ) + + print(f"checkpoint: {args.checkpoint}") + print(f"image_path: {image_path}") + if question_id is not None: + print(f"question_id: {question_id}") + if answer is not None: + print(f"reference_answer: {answer}") + print(f"prompt: {prompt}") + if reasoning is not None: + print(f"reasoning: {reasoning}") + if raw_prediction is not None: + print(f"raw_prediction: {raw_prediction}") + print(f"prediction: {prediction}") + + +def run_textvqa_eval(args): + if args.dataset not in ds_collections: + raise KeyError(f"unknown dataset: {args.dataset}") + + ds_cfg = ds_collections[args.dataset] + test_file = args.test_file or resolve_dataset_path(args.data_root, ds_cfg["test"]) + train_file = args.train_file or resolve_dataset_path(args.data_root, ds_cfg["train"]) + annotation_file = args.annotation_file or resolve_dataset_path(args.data_root, ds_cfg["annotation"]) + + tokenizer = AutoTokenizer.from_pretrained( + args.checkpoint, + trust_remote_code=True, + use_fast=False, + ) + config = configure_model(args.checkpoint) + model = load_model(args.checkpoint, config, args) + + image_size = config.force_image_size or config.vision_config.image_size + prompt = args.prompt or dataset_prompt(args.dataset) + dataset = VQADataset( + train=train_file, + test=test_file, + prompt=prompt, + few_shot=0, + input_size=image_size, + dynamic_image_size=args.dynamic, + use_thumbnail=config.use_thumbnail, + max_num=args.max_num, + ) + + num_items = len(dataset) if args.limit is None else min(len(dataset), args.limit) + result_items = [] + generation_config = make_generation_config( + num_beams=args.num_beams, + max_new_tokens=args.max_new_tokens or ds_cfg["max_new_tokens"], + temperature=args.temperature, + ) + reasoning_generation_config = None + if args.reasoning_mode == "two_pass": + reasoning_generation_config = make_reasoning_generation_config(generation_config, args) + + for idx in range(num_items): + sample = dataset[idx] + raw_prediction = None + if args.reasoning_mode == "custom_prompt": + raw_prediction = generate_answer( + model, + tokenizer, + sample["pixel_values"], + render_custom_prompt(sample["question"], args.custom_prompt_template), + generation_config, + ) + prediction = ( + extract_final_answer(raw_prediction, args.final_answer_prefix) + if args.extract_final_answer + else raw_prediction + ) + reasoning = None + else: + prediction, reasoning = generate_answer_with_reasoning( + model=model, + tokenizer=tokenizer, + pixel_values=sample["pixel_values"], + question=sample["question"], + generation_config=generation_config, + reasoning_mode=args.reasoning_mode, + reasoning_generation_config=reasoning_generation_config, + ) + result_item = { + "question": sample["question"], + "question_id": sample["question_id"], + "answer": prediction, + "annotation": sample["annotation"], + } + if raw_prediction is not None: + result_item["raw_answer"] = raw_prediction + if args.save_reasoning and reasoning is not None: + result_item["reasoning"] = reasoning + result_items.append(result_item) + if (idx + 1) % args.log_every == 0 or idx + 1 == num_items: + print(f"[{idx + 1}/{num_items}] question_id={sample['question_id']} prediction={prediction}") + sys.stdout.flush() + + os.makedirs(args.out_dir, exist_ok=True) + run_name = args.run_name or f"{args.dataset}_{os.path.basename(args.checkpoint)}" + output_file = os.path.join(args.out_dir, f"{run_name}.json") + with open(output_file, "w") as f: + json.dump(result_items, f, ensure_ascii=False, indent=2) + + evaluator, eval_entries = build_eval_entries(result_items, annotation_file) + accuracy = evaluator.eval_pred_list(eval_entries) + print(f"dataset: {args.dataset}") + print(f"checkpoint: {args.checkpoint}") + print(f"count: {num_items}") + print(f"accuracy: {accuracy:.6f}") + print(f"results_file: {output_file}") + + +def main(): + parser = argparse.ArgumentParser() + parser.add_argument("--checkpoint", type=str, required=True) + parser.add_argument("--mode", type=str, choices=["single", "textvqa_eval"], default="single") + parser.add_argument("--image-path", type=str, default="") + parser.add_argument("--prompt", type=str, default="") + parser.add_argument("--textvqa-jsonl", type=str, default="") + parser.add_argument("--sample-index", type=int, default=0) + parser.add_argument("--dataset", type=str, default="textvqa_val") + parser.add_argument("--data-root", type=str, default=str(REPO_ROOT)) + parser.add_argument("--test-file", type=str, default="") + parser.add_argument("--train-file", type=str, default="") + parser.add_argument("--annotation-file", type=str, default="") + parser.add_argument("--out-dir", type=str, default=str(REPO_ROOT / "outputs" / "native_single")) + parser.add_argument("--run-name", type=str, default="") + parser.add_argument("--limit", type=int, default=None) + parser.add_argument("--max-new-tokens", type=int, default=0) + parser.add_argument("--num-beams", type=int, default=1) + parser.add_argument("--temperature", type=float, default=0.0) + parser.add_argument("--reasoning-mode", type=str, choices=["none", "prompt", "two_pass", "custom_prompt"], default="none") + parser.add_argument("--reasoning-max-new-tokens", type=int, default=64) + parser.add_argument("--reasoning-temperature", type=float, default=0.0) + parser.add_argument("--save-reasoning", action="store_true") + parser.add_argument("--answer-format-prompt", type=str, default="") + parser.add_argument("--custom-prompt-template", type=str, default="") + parser.add_argument("--extract-final-answer", action="store_true") + parser.add_argument("--final-answer-prefix", type=str, default="Final answer:") + parser.add_argument("--dynamic", action="store_true") + parser.add_argument("--max-num", type=int, default=6) + parser.add_argument("--log-every", type=int, default=20) + parser.add_argument("--seed", type=int, default=0) + parser.add_argument("--gpus-per-model", type=int, default=1) + parser.add_argument("--auto", action="store_true") + parser.add_argument("--load-in-8bit", action="store_true") + parser.add_argument("--load-in-4bit", action="store_true") + args = parser.parse_args() + + if not torch.cuda.is_available(): + raise RuntimeError("CUDA is required for native InternVL inference.") + + random.seed(args.seed) + torch.manual_seed(args.seed) + + if args.mode == "single": + if args.max_new_tokens == 0: + args.max_new_tokens = 32 + run_single(args) + return + + if args.max_new_tokens == 0: + args.max_new_tokens = None + run_textvqa_eval(args) + + +if __name__ == "__main__": + main() diff --git a/isolated/sim_greedy/eval/vqa/run_shared_vision_guided_textvqa.py b/isolated/sim_greedy/eval/vqa/run_shared_vision_guided_textvqa.py new file mode 100644 index 0000000000000000000000000000000000000000..49da1fe63f0ae413ba4a8de25c6bb225b2eb0f8f --- /dev/null +++ b/isolated/sim_greedy/eval/vqa/run_shared_vision_guided_textvqa.py @@ -0,0 +1,1742 @@ +import argparse +import inspect +import json +import math +import os +import random +import re +import sys +import time +from functools import wraps +from pathlib import Path +from typing import Dict, List, Optional, Tuple + +import torch +from PIL import Image +from transformers import AutoTokenizer +from transformers.generation.logits_process import LogitsProcessorList + + +REPO_ROOT = Path(__file__).resolve().parents[2] +DEFAULT_UPSTREAM_SGL_ROOT = Path(os.environ.get("UPSTREAM_SGL_ROOT", str(REPO_ROOT / "upstream_sgl"))) +if str(DEFAULT_UPSTREAM_SGL_ROOT) not in sys.path: + sys.path.insert(0, str(DEFAULT_UPSTREAM_SGL_ROOT)) +eval_vqa_path = DEFAULT_UPSTREAM_SGL_ROOT / "eval" / "vqa" +if str(eval_vqa_path) not in sys.path: + sys.path.insert(0, str(eval_vqa_path)) + +from internvl.conversation import get_conv_template +from internvl.model.internvl_chat import InternVLChatModel +from internvl.model.internvl_chat.configuration_internvl_chat import InternVLChatConfig +from internvl.train.dataset import build_transform, dynamic_preprocess +from textvqa_eval import TextVQAAccuracyEvaluator + + +BASE_PROMPT = "Answer the question using a single word or phrase." +BASE_PROMPT_SUFFIX = " " + BASE_PROMPT +HIDDEN_REASONING_INSTRUCTION = ( + "Think through the relevant visual evidence and any text in the image step by step internally before answering." +) +EXPLICIT_REASONING_INSTRUCTION = ( + "Explain your reasoning step by step using the relevant visual evidence and any text in the image." +) +DEFAULT_FINAL_ANSWER_INSTRUCTION = "Provide the final answer only." +GUIDE_ATTENTION_COT_PROMPT_TEMPLATE = """You are solving a TextVQA task. +Read the image carefully, especially visible text. +Reason through the answer in at least 5 explicit steps. +Do not skip the reasoning. +Question: {question} +1. +2. +3. +4. +5. +Final answer:""" +GUIDE_ATTENTION_REASONING_ONLY_PROMPT_TEMPLATE = """You are solving a TextVQA task. + +Read the image carefully, especially all visible text. +Reason using only evidence from the image and OCR text. +You must output exactly 5 numbered reasoning steps. +Each step must be a short sentence. +Do not provide the final answer. +Do not provide a summary. +Do not output any text other than the 5 numbered steps. + +Question: {question} + +1. Identify the most relevant visible text or object. +2. Explain how that evidence relates to the question. +3. Check for another supporting clue in the image. +4. Resolve any ambiguity using the strongest evidence. +5. State the final reasoning conclusion without giving the final answer.""" +GUIDE_ATTENTION_EXPLICIT_COT_INSTRUCTION = ( + "First reason step by step using the relevant visual evidence and OCR text. " + "Then end with a new line in the exact format: Answer: ." +) +GUIDE_TEXT_HINT_INSTRUCTION = ( + "Give a very short guide hint grounded in the image and OCR text. Use a short phrase, not a full sentence." +) +GUIDED_DECODE_INSTRUCTION = ( + "Use the guide hint only if it matches the image. Answer the question using a single word or phrase." +) + +REASONING_FILTER_STOPWORDS = { + "a", "an", "and", "are", "as", "at", "be", "because", "but", "by", "for", "from", "has", + "have", "if", "in", "into", "is", "it", "its", "of", "on", "or", "that", "the", "their", + "there", "this", "those", "to", "was", "were", "with", +} +REASONING_FILTER_TEMPLATE_WORDS = { + "answer", "conclusion", "directly", "evidence", "final", "identify", "indicating", + "question", "reason", "reasoning", "relates", "relevant", "resolve", "shows", "state", + "strongest", "supporting", "supports", "using", "visible", +} +REASONING_FILTER_POSITION_WORDS = { + "left", "right", "top", "bottom", "middle", "center", "centre", "upper", "lower", +} +REASONING_FILTER_COLOR_WORDS = { + "black", "blue", "brown", "gold", "gray", "green", "grey", "orange", "pink", + "purple", "red", "silver", "white", "yellow", +} +REASONING_FILTER_KEEP_POS = {"NOUN", "PROPN", "ADJ"} +SPACY_REASONING_NLP = None +SPACY_REASONING_LOAD_ATTEMPTED = False +SPACY_REASONING_FALLBACK_WARNED = False + + +def resolve_hf_snapshot(path: str) -> str: + path = os.path.abspath(path) + config_path = os.path.join(path, "config.json") + if os.path.isfile(config_path): + return path + + refs_main = os.path.join(path, "refs", "main") + if os.path.isfile(refs_main): + with open(refs_main) as f: + revision = f.read().strip() + snapshot_path = os.path.join(path, "snapshots", revision) + if os.path.isfile(os.path.join(snapshot_path, "config.json")): + return snapshot_path + + raise FileNotFoundError(f"Could not resolve checkpoint snapshot from: {path}") + + +def configure_model(checkpoint_path: str, use_flash_attn: bool) -> InternVLChatConfig: + checkpoint_path = resolve_hf_snapshot(checkpoint_path) + config = InternVLChatConfig.from_json_file(os.path.join(checkpoint_path, "config.json")) + llm_arch = config.llm_config.architectures[0] + if llm_arch == "InternLM2ForCausalLM": + config.llm_config.attn_implementation = "eager" + else: + config.llm_config._attn_implementation = "eager" + config.vision_config.use_flash_attn = use_flash_attn + return config + + +def patch_internlm2_sample_signature(model: InternVLChatModel) -> None: + language_model_cls = model.language_model.__class__ + sample_fn = getattr(language_model_cls, "_sample", None) + if sample_fn is None or getattr(sample_fn, "_sgl_logits_warper_compat", False): + return + + signature = inspect.signature(sample_fn) + logits_warper_param = signature.parameters.get("logits_warper") + if logits_warper_param is None or logits_warper_param.default is not inspect._empty: + return + + @wraps(sample_fn) + def compat_sample( + self, + input_ids: torch.LongTensor, + logits_processor, + stopping_criteria, + generation_config, + synced_gpus: bool, + streamer=None, + logits_warper=None, + **model_kwargs, + ): + # transformers>=4.49 folds samplers into logits_processor and no longer + # passes logits_warper to custom _sample overrides. + if logits_warper is None: + logits_warper = LogitsProcessorList() + return sample_fn( + self, + input_ids=input_ids, + logits_processor=logits_processor, + stopping_criteria=stopping_criteria, + generation_config=generation_config, + synced_gpus=synced_gpus, + streamer=streamer, + logits_warper=logits_warper, + **model_kwargs, + ) + + compat_sample._sgl_logits_warper_compat = True + language_model_cls._sample = compat_sample + + +def load_model( + checkpoint_path: str, + config: InternVLChatConfig, + auto: bool, + load_in_8bit: bool, + load_in_4bit: bool, +) -> InternVLChatModel: + checkpoint_path = resolve_hf_snapshot(checkpoint_path) + kwargs = {"device_map": "auto"} if auto else {} + model = InternVLChatModel.from_pretrained( + checkpoint_path, + config=config, + low_cpu_mem_usage=True, + torch_dtype=torch.bfloat16, + load_in_8bit=load_in_8bit, + load_in_4bit=load_in_4bit, + **kwargs, + ).eval() + if not auto and not load_in_8bit and not load_in_4bit: + model = model.cuda() + patch_internlm2_sample_signature(model) + return model + + +def build_decode_model( + guide_model: InternVLChatModel, + large_checkpoint: str, + use_flash_attn: bool, + auto: bool, + load_in_8bit: bool, + load_in_4bit: bool, +) -> Tuple[InternVLChatModel, AutoTokenizer]: + large_checkpoint = resolve_hf_snapshot(large_checkpoint) + large_config = configure_model(large_checkpoint, use_flash_attn=use_flash_attn) + large_source = load_model( + large_checkpoint, + large_config, + auto=auto, + load_in_8bit=load_in_8bit, + load_in_4bit=load_in_4bit, + ) + + decode_model = InternVLChatModel( + large_config, + vision_model=guide_model.vision_model, + language_model=large_source.language_model, + ) + decode_model.config.vision_config = guide_model.config.vision_config + decode_model.vision_model.config = guide_model.config.vision_config + decode_model.mlp1 = large_source.mlp1 + decode_model.template = large_source.template + decode_model.system_message = large_source.system_message + decode_model.num_image_token = large_source.num_image_token + decode_model.ps_version = guide_model.ps_version + decode_model.select_layer = guide_model.select_layer + decode_model.downsample_ratio = guide_model.downsample_ratio + decode_model.img_context_token_id = large_source.img_context_token_id + decode_model.eval() + patch_internlm2_sample_signature(decode_model) + + large_tokenizer = AutoTokenizer.from_pretrained( + large_checkpoint, + trust_remote_code=True, + use_fast=False, + ) + return decode_model, large_tokenizer + + +def model_text_device(model: InternVLChatModel) -> torch.device: + return next(model.language_model.get_input_embeddings().parameters()).device + + +def model_vision_device(model: InternVLChatModel) -> torch.device: + return next(model.vision_model.parameters()).device + + +def resolve_image_path(image_path: str, data_root: str, jsonl_dir: str) -> str: + candidates = [] + if os.path.isabs(image_path): + candidates.append(image_path) + candidates.append(os.path.join(data_root, image_path)) + if image_path.startswith("data/"): + candidates.append(os.path.join(data_root, image_path[len("data/"):])) + candidates.append(os.path.join(jsonl_dir, image_path)) + candidates.append(os.path.join(jsonl_dir, os.path.basename(image_path))) + + for candidate in candidates: + if os.path.exists(candidate): + return candidate + raise FileNotFoundError(f"Could not resolve image path: {image_path}") + + +class TextVQADataset: + def __init__(self, jsonl_path: str, data_root: str, image_size: int, dynamic: bool, use_thumbnail: bool, max_num: int): + with open(jsonl_path) as f: + self.items = [json.loads(line) for line in f if line.strip()] + self.jsonl_dir = os.path.dirname(jsonl_path) + self.data_root = data_root + self.image_size = image_size + self.dynamic = dynamic + self.use_thumbnail = use_thumbnail + self.max_num = max_num + self.transform = build_transform(is_train=False, input_size=image_size) + + def __len__(self) -> int: + return len(self.items) + + def __getitem__(self, idx: int) -> Dict[str, object]: + item = self.items[idx] + image_path = resolve_image_path(item["image"], self.data_root, self.jsonl_dir) + image = Image.open(image_path).convert("RGB") + if self.dynamic: + images = dynamic_preprocess( + image, + image_size=self.image_size, + use_thumbnail=self.use_thumbnail, + max_num=self.max_num, + ) + else: + images = [image] + pixel_values = torch.stack([self.transform(img) for img in images]) + return { + "question_id": item["question_id"], + "question": item["question"], + "pixel_values": pixel_values, + "annotation": item.get("answer", ""), + } + + +def load_annotations(annotation_file: str) -> Dict[int, List[str]]: + with open(annotation_file) as f: + annotations = json.load(f)["annotations"] + return { + item["question_id"]: [answer["answer"] for answer in item["answers"]] + for item in annotations + } + + +def build_query(model: InternVLChatModel, tokenizer, question: str, num_patches: int): + img_context_token = "" + img_start_token = "" + img_end_token = "" + + if "" not in question: + question = "\n" + question + + model.img_context_token_id = tokenizer.convert_tokens_to_ids(img_context_token) + + template = get_conv_template(model.template) + template.system_message = model.system_message + template.append_message(template.roles[0], question) + template.append_message(template.roles[1], None) + query = template.get_prompt() + + image_tokens = img_start_token + img_context_token * model.num_image_token * num_patches + img_end_token + query = query.replace("", image_tokens, 1) + return query, template + + +@torch.inference_mode() +def extract_shared_raw_visual_tokens(model: InternVLChatModel, pixel_values: torch.Tensor) -> torch.Tensor: + vision_device = model_vision_device(model) + pixel_values = pixel_values.to(device=vision_device, dtype=torch.bfloat16) + if model.select_layer == -1: + vit_embeds = model.vision_model( + pixel_values=pixel_values, + output_hidden_states=False, + return_dict=True, + ).last_hidden_state + else: + vit_embeds = model.vision_model( + pixel_values=pixel_values, + output_hidden_states=True, + return_dict=True, + ).hidden_states[model.select_layer] + vit_embeds = vit_embeds[:, 1:, :] + h = w = int(vit_embeds.shape[1] ** 0.5) + vit_embeds = vit_embeds.reshape(vit_embeds.shape[0], h, w, -1) + vit_embeds = model.pixel_shuffle(vit_embeds, scale_factor=model.downsample_ratio) + return vit_embeds.reshape(vit_embeds.shape[0], -1, vit_embeds.shape[-1]) + + +@torch.inference_mode() +def project_visual_tokens(model: InternVLChatModel, raw_visual_tokens: torch.Tensor) -> torch.Tensor: + mlp_device = next(model.mlp1.parameters()).device + raw_visual_tokens = raw_visual_tokens.to(device=mlp_device, dtype=torch.bfloat16) + return model.mlp1(raw_visual_tokens) + + +@torch.inference_mode() +def build_input_embeds_from_visual_features( + model: InternVLChatModel, + input_ids: torch.Tensor, + visual_features: torch.Tensor, +) -> Tuple[torch.Tensor, torch.Tensor]: + input_embeds = model.language_model.get_input_embeddings()(input_ids) + batch_size, seq_len, hidden_size = input_embeds.shape + flat_input_embeds = input_embeds.reshape(batch_size * seq_len, hidden_size) + flat_input_ids = input_ids.reshape(batch_size * seq_len) + selected = flat_input_ids == model.img_context_token_id + if selected.sum().item() == 0: + raise ValueError("No image context tokens found in input_ids.") + flat_input_embeds[selected] = visual_features.reshape(-1, hidden_size).to(flat_input_embeds.device) + return flat_input_embeds.reshape(batch_size, seq_len, hidden_size), flat_input_ids + + +@torch.inference_mode() +def run_guide_generation( + model: InternVLChatModel, + tokenizer, + projected_visual_tokens: torch.Tensor, + question: str, + generation_config: dict, +) -> Dict[str, object]: + query, template = build_query(model, tokenizer, question, projected_visual_tokens.shape[0]) + model_inputs = tokenizer(query, return_tensors="pt") + input_device = model_text_device(model) + input_ids = model_inputs["input_ids"].to(input_device) + attention_mask = model_inputs["attention_mask"].to(input_device) + eos_token_id = tokenizer.convert_tokens_to_ids(template.sep) + input_embeds, flat_input_ids = build_input_embeds_from_visual_features(model, input_ids, projected_visual_tokens) + + visual_token_index = (input_ids == model.img_context_token_id).view(-1).nonzero() + visual_start_index, visual_end_index = visual_token_index[0], visual_token_index[-1] + + run_config = dict(generation_config) + run_config["eos_token_id"] = eos_token_id + + outputs = model.language_model.generate( + inputs_embeds=input_embeds, + attention_mask=attention_mask, + generation_config=None, + output_hidden_states=None, + return_dict=None, + use_cache=True, + visual_token_index=(visual_start_index, visual_end_index), + **run_config, + ) + response = tokenizer.batch_decode(outputs["sequences"], skip_special_tokens=True)[0] + response = response.split(template.sep)[0].strip() + return { + "response": response, + "outputs": outputs, + "input_embeds": input_embeds, + "flat_input_ids": flat_input_ids, + "attention_mask": attention_mask, + "visual_token_index": (visual_start_index, visual_end_index), + } + + +def aggregate_attention_from_step(attentions, visual_token_index: Tuple[int, int]) -> torch.Tensor: + visual_start_index, visual_end_index = visual_token_index + visual_token_num = visual_end_index - visual_start_index + 1 + visual_token_importance = None + + for attention in attentions: + if attention is None: + continue + if visual_token_importance is None: + visual_token_importance = torch.zeros( + visual_token_num, + device=attention.device, + dtype=torch.float32, + ) + + merged_attention = attention[0].sum(dim=0) + if attention.shape[2] != 1: + visual_token_importance += merged_attention[ + visual_end_index + 1 :, + visual_start_index : visual_end_index + 1, + ].sum(dim=0) + else: + visual_token_importance += merged_attention[ + 0:1, + visual_start_index : visual_end_index + 1, + ].sum(dim=0) + + if visual_token_importance is None: + raise RuntimeError("Guide model did not return layer attentions for the current decoding step.") + return visual_token_importance + + +def get_reasoning_spacy_nlp(): + global SPACY_REASONING_NLP, SPACY_REASONING_LOAD_ATTEMPTED + if SPACY_REASONING_LOAD_ATTEMPTED: + return SPACY_REASONING_NLP + + SPACY_REASONING_LOAD_ATTEMPTED = True + try: + import spacy + + SPACY_REASONING_NLP = spacy.load("en_core_web_sm", disable=["parser", "lemmatizer"]) + except Exception: + SPACY_REASONING_NLP = None + return SPACY_REASONING_NLP + + +def should_keep_reasoning_heuristic_token(token_text: str) -> bool: + stripped = token_text.strip() + if not stripped: + return False + + lowered = stripped.lower() + if re.fullmatch(r"\d+[.)]?", stripped): + return False + if lowered in REASONING_FILTER_STOPWORDS or lowered in REASONING_FILTER_TEMPLATE_WORDS: + return False + if lowered in REASONING_FILTER_POSITION_WORDS or lowered in REASONING_FILTER_COLOR_WORDS: + return True + if any(ch.isdigit() for ch in stripped): + return True + if any(ch.isupper() for ch in stripped): + return True + if any(ch in ".:/-@&" for ch in stripped): + return True + alpha_count = sum(ch.isalpha() for ch in stripped) + return alpha_count >= 4 + + +def should_keep_reasoning_doc_token(token) -> bool: + stripped = token.text.strip() + if not stripped: + return False + + lowered = stripped.lower() + if token.is_punct or token.is_space: + return False + if lowered in REASONING_FILTER_STOPWORDS or lowered in REASONING_FILTER_TEMPLATE_WORDS: + return False + if token.pos_ in REASONING_FILTER_KEEP_POS: + return True + return False + + +def build_generated_token_spans(tokenizer, generated_ids: torch.Tensor) -> Tuple[str, List[Tuple[int, int]]]: + decoded_text = "" + token_spans: List[Tuple[int, int]] = [] + for token_id in generated_ids.detach().cpu().tolist(): + piece = tokenizer.decode([int(token_id)], skip_special_tokens=True, clean_up_tokenization_spaces=False) + start = len(decoded_text) + decoded_text += piece + token_spans.append((start, len(decoded_text))) + return decoded_text, token_spans + + +def analyze_reasoning_filter(text: str, args) -> Tuple[List[Tuple[int, int]], str, List[Dict[str, object]]]: + if args.guide_reasoning_filter_mode == "none": + return [], "none", [] + + if args.guide_reasoning_filter_mode == "pos_ner": + nlp = get_reasoning_spacy_nlp() + if nlp is not None: + doc = nlp(text) + token_analysis = [] + intervals = [ + (token.idx, token.idx + len(token)) + for token in doc + if should_keep_reasoning_doc_token(token) + ] + for token in doc: + token_analysis.append( + { + "text": token.text, + "lemma": token.lemma_, + "pos": token.pos_, + "tag": token.tag_, + "dep": token.dep_, + "ent_type": token.ent_type_, + "like_num": bool(getattr(token, "like_num", False)), + "like_url": bool(getattr(token, "like_url", False)), + "is_stop": bool(token.is_stop), + "keep": should_keep_reasoning_doc_token(token), + } + ) + return intervals, "spacy_pos_ner", token_analysis + + token_analysis = [] + intervals = [ + (match.start(), match.end()) + for match in re.finditer(r"\S+", text) + if should_keep_reasoning_heuristic_token(match.group(0)) + ] + for match in re.finditer(r"\S+", text): + token_text = match.group(0) + token_analysis.append( + { + "text": token_text, + "lemma": token_text.lower(), + "pos": "", + "tag": "", + "dep": "", + "ent_type": "", + "like_num": any(ch.isdigit() for ch in token_text), + "like_url": "http" in token_text.lower() or "www." in token_text.lower(), + "is_stop": token_text.lower() in REASONING_FILTER_STOPWORDS, + "keep": should_keep_reasoning_heuristic_token(token_text), + } + ) + return intervals, "heuristic_fallback", token_analysis + + +def build_reasoning_attention_step_mask_and_debug(tokenizer, outputs, args) -> Tuple[Optional[List[bool]], Dict[str, object]]: + if args.guide_reasoning_filter_mode == "none": + return None, {"backend": "none", "kept_tokens": [], "token_analysis": []} + + sequences = outputs["sequences"][0] + decoded_text, token_spans = build_generated_token_spans(tokenizer, sequences) + intervals, backend, token_analysis = analyze_reasoning_filter(decoded_text, args) + + global SPACY_REASONING_FALLBACK_WARNED + if backend == "heuristic_fallback" and not SPACY_REASONING_FALLBACK_WARNED: + print("Warning: spaCy POS/NER model unavailable; guide reasoning filter is using heuristic fallback.") + SPACY_REASONING_FALLBACK_WARNED = True + + debug_info = { + "backend": backend, + "token_analysis": token_analysis, + "kept_tokens": [token["text"] for token in token_analysis if token.get("keep")], + } + if not intervals: + return None, debug_info + + step_mask = [] + for start, end in token_spans: + if start == end: + step_mask.append(False) + continue + keep = any(start < interval_end and end > interval_start for interval_start, interval_end in intervals) + step_mask.append(keep) + + debug_info["step_mask"] = step_mask + if not any(step_mask): + return None, debug_info + return step_mask, debug_info + + +def aggregate_attention_from_generation_outputs( + outputs, + visual_token_index: Tuple[int, int], + step_mask: Optional[List[bool]] = None, +) -> torch.Tensor: + aggregated = getattr(outputs, "aggregated_viusal_token_attention", None) + if aggregated is not None and step_mask is None: + return aggregated.detach().float() + + attentions = getattr(outputs, "attentions", None) + if not attentions: + raise RuntimeError("Guide generation did not return attentions; enable output_attentions.") + + visual_token_importance = None + for step_idx, step_attentions in enumerate(attentions): + if step_mask is not None and (step_idx >= len(step_mask) or not step_mask[step_idx]): + continue + step_importance = aggregate_attention_from_step(step_attentions, visual_token_index) + if visual_token_importance is None: + visual_token_importance = step_importance + else: + visual_token_importance = visual_token_importance + step_importance + + if visual_token_importance is None: + if step_mask is not None: + return aggregate_attention_from_generation_outputs(outputs, visual_token_index, step_mask=None) + raise RuntimeError("Guide generation returned no attention steps.") + return visual_token_importance + + +def aggregate_question_and_answer_attention_from_generation_outputs( + outputs, + visual_token_index: Tuple[int, int], +) -> Tuple[torch.Tensor, torch.Tensor]: + attentions = getattr(outputs, "attentions", None) + if not attentions: + raise RuntimeError("Guide generation did not return attentions; enable output_attentions.") + + question_visual_token_importance = None + answer_visual_token_importance = None + for step_idx, step_attentions in enumerate(attentions): + step_importance = aggregate_attention_from_step(step_attentions, visual_token_index) + if step_idx == 0: + if question_visual_token_importance is None: + question_visual_token_importance = step_importance + else: + question_visual_token_importance = question_visual_token_importance + step_importance + else: + if answer_visual_token_importance is None: + answer_visual_token_importance = step_importance + else: + answer_visual_token_importance = answer_visual_token_importance + step_importance + + if question_visual_token_importance is None and answer_visual_token_importance is None: + raise RuntimeError("Guide generation returned no attention steps.") + if question_visual_token_importance is None: + question_visual_token_importance = torch.zeros_like(answer_visual_token_importance) + if answer_visual_token_importance is None: + answer_visual_token_importance = torch.zeros_like(question_visual_token_importance) + return question_visual_token_importance, answer_visual_token_importance + + +@torch.inference_mode() +def compute_consistency_score( + model: InternVLChatModel, + input_embeds: torch.Tensor, + flat_input_ids: torch.Tensor, + attention_mask: torch.Tensor, + generated_ids: torch.Tensor, + visual_token_importance: torch.Tensor, + visual_token_index: Tuple[int, int], + consistency_token_ratio: float, + large_model_prune_selection: str, +) -> torch.Tensor: + visual_start_index, visual_end_index = visual_token_index + new_input_ids_ = generated_ids + new_token_num = new_input_ids_.shape[-1] + new_input_embedding = torch.concatenate( + (input_embeds, model.language_model.get_input_embeddings()(new_input_ids_).unsqueeze(0)), + dim=1, + ) + new_attention_mask = torch.concatenate( + ( + attention_mask, + torch.ones((1, new_input_ids_.shape[0]), device=attention_mask.device, dtype=attention_mask.dtype), + ), + dim=-1, + ) + new_input_ids = torch.concatenate((flat_input_ids, new_input_ids_), dim=-1) + consistency_generate_kwargs = { + "large_model_prune_layer": 0.0, + "large_model_prune_ratio": consistency_token_ratio, + "large_model_prune_selection": large_model_prune_selection, + "visual_token_index": (visual_start_index, visual_end_index), + "visual_token_importance": visual_token_importance, + "inputs_embeds": new_input_embedding, + "attention_mask": new_attention_mask, + "output_scores": False, + "output_attentions": False, + "return_dict_in_generate": False, + "use_cache": True, + } + consistency_generate_kwargs["inputs_embeds"] = new_input_embedding + consistency_generate_kwargs["attention_mask"] = new_attention_mask + consistency_generate_kwargs["output_scores"] = False + consistency_generate_kwargs["output_attentions"] = False + consistency_generate_kwargs = model.language_model._get_initial_cache_position(new_input_ids, consistency_generate_kwargs) + model_inputs = model.language_model.prepare_inputs_for_generation(new_input_ids, **consistency_generate_kwargs) + consistency_output = model.language_model.forward(**model_inputs, return_dict=True) + consistency_score = torch.gather( + consistency_output["logits"][:, -new_token_num - 1 : -1, :].softmax(dim=-1), + index=new_input_ids_[None, :, None], + dim=-1, + ) + return torch.prod(consistency_score) + + +@torch.inference_mode() +def run_guide_branch( + model: InternVLChatModel, + tokenizer, + projected_visual_tokens: torch.Tensor, + question: str, + generation_config: dict, + consistency_token_ratio: float, + args, +) -> Tuple[str, List[torch.Tensor], torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]: + generation_result = run_guide_generation( + model, + tokenizer, + projected_visual_tokens, + question, + generation_config, + ) + outputs = generation_result["outputs"] + question_visual_token_importance, answer_visual_token_importance = ( + aggregate_question_and_answer_attention_from_generation_outputs( + outputs, + generation_result["visual_token_index"], + ) + ) + visual_token_importance = combine_question_and_answer_attention( + question_visual_token_importance, + answer_visual_token_importance, + args, + ) + if args.large_model_prune_selection in {"similarity_greedy", "similarity_cover_greedy"}: + consistency_score = torch.tensor(1.0, device=visual_token_importance.device) + else: + consistency_score = compute_consistency_score( + model, + generation_result["input_embeds"], + generation_result["flat_input_ids"], + generation_result["attention_mask"], + outputs["sequences"][0], + visual_token_importance, + generation_result["visual_token_index"], + consistency_token_ratio, + args.large_model_prune_selection, + ) + return ( + generation_result["response"], + outputs.scores, + consistency_score, + visual_token_importance, + question_visual_token_importance, + answer_visual_token_importance, + ) + + +@torch.inference_mode() +def run_decode_branch( + model: InternVLChatModel, + tokenizer, + projected_visual_tokens: torch.Tensor, + question: str, + generation_config: dict, + visual_token_importance: torch.Tensor, + large_model_prune_layer: float, + large_model_prune_ratio: float, + large_model_prune_keep_count: int, + large_model_prune_selection: str, + large_model_similarity_target_coverage: float, + large_model_similarity_min_gain: float, + large_model_similarity_min_keep: int, + large_model_similarity_max_keep_ratio: float, +) -> str: + query, template = build_query(model, tokenizer, question, projected_visual_tokens.shape[0]) + model_inputs = tokenizer(query, return_tensors="pt") + input_device = model_text_device(model) + input_ids = model_inputs["input_ids"].to(input_device) + attention_mask = model_inputs["attention_mask"].to(input_device) + eos_token_id = tokenizer.convert_tokens_to_ids(template.sep) + input_embeds, _ = build_input_embeds_from_visual_features(model, input_ids, projected_visual_tokens) + + visual_token_index = (input_ids == model.img_context_token_id).view(-1).nonzero() + visual_start_index, visual_end_index = visual_token_index[0], visual_token_index[-1] + + run_config = dict(generation_config) + run_config["eos_token_id"] = eos_token_id + run_config["return_dict_in_generate"] = False + run_config["output_scores"] = False + run_config["output_attentions"] = False + run_config["large_model_prune_layer"] = large_model_prune_layer + run_config["large_model_prune_ratio"] = large_model_prune_ratio + run_config["large_model_prune_keep_count"] = large_model_prune_keep_count + run_config["large_model_prune_selection"] = large_model_prune_selection + run_config["large_model_similarity_target_coverage"] = large_model_similarity_target_coverage + run_config["large_model_similarity_min_gain"] = large_model_similarity_min_gain + run_config["large_model_similarity_min_keep"] = large_model_similarity_min_keep + run_config["large_model_similarity_max_keep_ratio"] = large_model_similarity_max_keep_ratio + run_config["visual_token_importance"] = visual_token_importance + run_config["visual_token_index"] = (visual_start_index, visual_end_index) + + output_ids = model.language_model.generate( + inputs_embeds=input_embeds, + attention_mask=attention_mask, + generation_config=None, + output_hidden_states=None, + return_dict=None, + use_cache=True, + **run_config, + ) + response = tokenizer.batch_decode(output_ids, skip_special_tokens=True)[0] + return response.split(template.sep)[0].strip() + + +def make_generation_config(args) -> dict: + generation_config = { + "num_beams": args.num_beams, + "max_new_tokens": args.max_new_tokens, + "min_new_tokens": 1, + "do_sample": args.temperature > 0, + "return_dict_in_generate": True, + "output_scores": True, + "output_attentions": True, + } + if args.temperature > 0: + generation_config["temperature"] = args.temperature + return generation_config + + +def append_instruction(question: str, instruction: str) -> str: + instruction = instruction.strip() + if not instruction: + return question + return f"{question.rstrip()}\n{instruction}" + + +def make_reasoning_generation_config(base_generation_config: dict, args) -> dict: + generation_config = dict(base_generation_config) + generation_config["max_new_tokens"] = args.reasoning_max_new_tokens + generation_config["return_dict_in_generate"] = True + generation_config["output_scores"] = True + generation_config["output_attentions"] = True + temperature = args.reasoning_temperature + generation_config["do_sample"] = temperature > 0 + if temperature > 0: + generation_config["temperature"] = temperature + else: + generation_config.pop("temperature", None) + return generation_config + + +def make_custom_generation_config( + base_generation_config: dict, + max_new_tokens: int, + temperature: float, + return_dict_in_generate: bool, + output_scores: bool, + output_attentions: bool, +) -> dict: + generation_config = dict(base_generation_config) + generation_config["max_new_tokens"] = max_new_tokens + generation_config["return_dict_in_generate"] = return_dict_in_generate + generation_config["output_scores"] = output_scores + generation_config["output_attentions"] = output_attentions + generation_config["do_sample"] = temperature > 0 + if temperature > 0: + generation_config["temperature"] = temperature + else: + generation_config.pop("temperature", None) + return generation_config + + +def normalize_generated_text(text: str) -> str: + return " ".join(text.strip().split()) + + +def strip_base_prompt(question: str) -> str: + if question.endswith(BASE_PROMPT_SUFFIX): + return question[: -len(BASE_PROMPT_SUFFIX)].rstrip() + return question + + +def summarize_visual_token_importance(visual_token_importance: torch.Tensor, topk: int) -> Dict[str, object]: + values = visual_token_importance.detach().float().view(-1).cpu() + total = values.sum().item() + if total > 0: + normalized = values / total + else: + normalized = torch.full_like(values, 1.0 / max(values.numel(), 1)) + + topk = min(topk, normalized.numel()) + top_values, top_indices = torch.topk(normalized, k=topk) + entropy = -(normalized * torch.clamp(normalized, min=1e-12).log()).sum().item() + return { + "raw_sum": total, + "entropy": entropy, + "max_weight": normalized.max().item(), + "top_indices": top_indices.tolist(), + "top_weights": top_values.tolist(), + "weights": normalized.tolist(), + } + + +def normalize_visual_token_importance(visual_token_importance: torch.Tensor) -> torch.Tensor: + visual_token_importance = visual_token_importance.detach().float() + total = visual_token_importance.sum() + if total.item() > 0: + return visual_token_importance / total + return torch.full_like(visual_token_importance, 1.0 / max(visual_token_importance.numel(), 1)) + + +def prepare_decode_visual_token_importance( + visual_token_importance: torch.Tensor, + selection_mode: str, +) -> torch.Tensor: + raw_importance = visual_token_importance.detach().float() + if selection_mode in {"topk", "similarity_greedy", "similarity_cover_greedy"}: + return raw_importance + if selection_mode == "random": + return torch.rand_like(raw_importance) + raise ValueError(f"Unsupported large model prune selection mode: {selection_mode}") + + +def resolve_decode_prune_plan( + selection_visual_tokens: torch.Tensor, + visual_token_importance: torch.Tensor, + args, +) -> Tuple[torch.Tensor, float, str, int]: + raw_importance = visual_token_importance.detach().float().view(-1) + visual_token_count = raw_importance.numel() + if args.large_model_prune_selection != "similarity_cover_greedy": + prepared = prepare_decode_visual_token_importance(raw_importance, args.large_model_prune_selection) + kept_count = max(1, int(visual_token_count * args.large_model_prune_ratio)) + return prepared, args.large_model_prune_ratio, args.large_model_prune_selection, kept_count + + weights = normalize_visual_token_importance(raw_importance) + features = selection_visual_tokens.detach().float() + if features.dim() == 3: + features = features.reshape(-1, features.shape[-1]) + features = torch.nn.functional.normalize(features, dim=-1) + similarity = (features @ features.T).clamp_min(0.0) + distance = 1.0 - similarity + keep_count = max(1, min(visual_token_count, int(math.ceil(visual_token_count * args.large_model_prune_ratio)))) + seed_index = int(torch.argmax(weights).item()) + selected_list = [seed_index] + selected_mask = torch.zeros(visual_token_count, device=features.device, dtype=torch.bool) + selected_mask[seed_index] = True + min_distance_to_selected = distance[:, seed_index].clone() + + while len(selected_list) < keep_count: + candidate_scores = min_distance_to_selected + 0.05 * weights.to(features.device) + candidate_scores = candidate_scores.masked_fill(selected_mask, float("-inf")) + next_index = int(torch.argmax(candidate_scores).item()) + selected_list.append(next_index) + selected_mask[next_index] = True + min_distance_to_selected = torch.minimum(min_distance_to_selected, distance[:, next_index]) + + selected_indices = torch.tensor(selected_list, device=raw_importance.device, dtype=torch.long) + prepared = torch.zeros_like(raw_importance) + prepared[selected_indices] = 1.0 + weights[selected_indices].to(prepared.device) + kept_count = int(selected_indices.numel()) + keep_ratio = min(1.0, (kept_count + 1e-6) / max(visual_token_count, 1)) + return prepared, keep_ratio, "topk", kept_count + + +def maybe_normalize_visual_token_importance(visual_token_importance: torch.Tensor, args) -> torch.Tensor: + if args.guide_attention_aggregation_mode == "normalized": + return normalize_visual_token_importance(visual_token_importance) + return visual_token_importance.detach().float() + + +def combine_question_and_answer_attention( + question_visual_token_importance: torch.Tensor, + answer_visual_token_importance: torch.Tensor, + args, +) -> torch.Tensor: + question_weight = args.guide_question_attention_weight + answer_weight = args.guide_answer_attention_weight + if question_weight == 0 and answer_weight == 0: + raise ValueError("At least one guide question/answer attention weight must be > 0.") + + return ( + question_weight * maybe_normalize_visual_token_importance(question_visual_token_importance, args) + + answer_weight * maybe_normalize_visual_token_importance(answer_visual_token_importance, args) + ) + + +def resolve_guide_attention_source(args) -> str: + if args.guide_attention_source != "default": + return args.guide_attention_source + if args.guide_reasoning_mode == "two_pass_explicit": + return "combined" + return "answer" + + +def combine_reasoning_and_answer_attention( + reasoning_visual_token_importance: torch.Tensor, + answer_visual_token_importance: torch.Tensor, + args, +) -> torch.Tensor: + attention_source = resolve_guide_attention_source(args) + if attention_source == "reasoning": + return args.guide_reasoning_attention_weight * maybe_normalize_visual_token_importance( + reasoning_visual_token_importance, + args, + ) + if attention_source == "answer": + return args.guide_answer_attention_weight * maybe_normalize_visual_token_importance( + answer_visual_token_importance, + args, + ) + + reasoning_weight = args.guide_reasoning_attention_weight + answer_weight = args.guide_answer_attention_weight + if reasoning_weight == 0 and answer_weight == 0: + raise ValueError("At least one guide attention weight must be > 0.") + + return ( + reasoning_weight * maybe_normalize_visual_token_importance(reasoning_visual_token_importance, args) + + answer_weight * maybe_normalize_visual_token_importance(answer_visual_token_importance, args) + ) + + +def combine_question_reasoning_and_answer_attention( + question_visual_token_importance: torch.Tensor, + reasoning_visual_token_importance: torch.Tensor, + answer_visual_token_importance: torch.Tensor, + args, +) -> torch.Tensor: + attention_source = resolve_guide_attention_source(args) + if attention_source == "reasoning": + return args.guide_reasoning_attention_weight * maybe_normalize_visual_token_importance( + reasoning_visual_token_importance, + args, + ) + if attention_source == "answer": + return combine_question_and_answer_attention( + question_visual_token_importance, + answer_visual_token_importance, + args, + ) + + return combine_question_and_answer_attention( + question_visual_token_importance, + answer_visual_token_importance, + args, + ) + args.guide_reasoning_attention_weight * reasoning_visual_token_importance.detach().float() + + +def build_guide_attention_question(question: str, args) -> str: + if args.guide_reasoning_mode == "short_cot": + return GUIDE_ATTENTION_COT_PROMPT_TEMPLATE.replace("{question}", strip_base_prompt(question)) + if args.guide_reasoning_mode == "explicit_cot": + return append_instruction(strip_base_prompt(question), GUIDE_ATTENTION_EXPLICIT_COT_INSTRUCTION) + return question + + +def build_guide_reasoning_question(question: str) -> str: + return GUIDE_ATTENTION_REASONING_ONLY_PROMPT_TEMPLATE.replace( + "{question}", + strip_base_prompt(question), + ) + + +def build_guide_text_question(question: str) -> str: + return append_instruction(question, GUIDE_TEXT_HINT_INSTRUCTION) + + +def build_decode_question(question: str, guide_text_hint: Optional[str]) -> str: + if not guide_text_hint: + return question + return append_instruction( + question, + f"Guide hint: {guide_text_hint}\n{GUIDED_DECODE_INSTRUCTION}", + ) + + +def make_guide_attention_generation_config(base_generation_config: dict, args) -> dict: + if args.guide_reasoning_mode in {"short_cot", "explicit_cot", "two_pass_explicit"}: + return make_custom_generation_config( + base_generation_config, + max_new_tokens=args.guide_reasoning_max_new_tokens, + temperature=args.guide_reasoning_temperature, + return_dict_in_generate=True, + output_scores=True, + output_attentions=True, + ) + return dict(base_generation_config) + + +def make_guide_text_generation_config(base_generation_config: dict, args) -> dict: + return make_custom_generation_config( + base_generation_config, + max_new_tokens=args.guide_text_max_new_tokens, + temperature=args.guide_text_temperature, + return_dict_in_generate=False, + output_scores=False, + output_attentions=False, + ) + + +@torch.inference_mode() +def run_text_generation_branch( + model: InternVLChatModel, + tokenizer, + projected_visual_tokens: torch.Tensor, + question: str, + generation_config: dict, +) -> str: + query, template = build_query(model, tokenizer, question, projected_visual_tokens.shape[0]) + model_inputs = tokenizer(query, return_tensors="pt") + input_device = model_text_device(model) + input_ids = model_inputs["input_ids"].to(input_device) + attention_mask = model_inputs["attention_mask"].to(input_device) + eos_token_id = tokenizer.convert_tokens_to_ids(template.sep) + input_embeds, _ = build_input_embeds_from_visual_features(model, input_ids, projected_visual_tokens) + + run_config = dict(generation_config) + run_config["eos_token_id"] = eos_token_id + output_ids = model.language_model.generate( + inputs_embeds=input_embeds, + attention_mask=attention_mask, + generation_config=None, + output_hidden_states=None, + return_dict=None, + use_cache=True, + **run_config, + ) + response = tokenizer.batch_decode(output_ids, skip_special_tokens=True)[0] + return response.split(template.sep)[0].strip() + + +def run_decode_answer( + model: InternVLChatModel, + tokenizer, + projected_visual_tokens: torch.Tensor, + question: str, + generation_config: dict, + decode_visual_token_importance: torch.Tensor, + decode_prune_ratio: float, + decode_prune_keep_count: int, + decode_prune_selection: str, + args, +) -> str: + return run_decode_branch( + model, + tokenizer, + projected_visual_tokens, + question, + generation_config, + decode_visual_token_importance, + args.large_model_prune_layer, + decode_prune_ratio, + decode_prune_keep_count, + decode_prune_selection, + args.large_model_similarity_target_coverage, + args.large_model_similarity_min_gain, + args.large_model_similarity_min_keep, + args.large_model_similarity_max_keep_ratio, + ) + + +@torch.inference_mode() +def run_guide_two_pass_explicit_branch( + model: InternVLChatModel, + tokenizer, + projected_visual_tokens: torch.Tensor, + question: str, + reasoning_generation_config: dict, + answer_generation_config: dict, + consistency_token_ratio: float, + args, +) -> Tuple[str, List[torch.Tensor], torch.Tensor, torch.Tensor, str, Dict[str, object]]: + answer_result = run_guide_generation( + model, + tokenizer, + projected_visual_tokens, + question, + answer_generation_config, + ) + reasoning_result = run_guide_generation( + model, + tokenizer, + projected_visual_tokens, + build_guide_reasoning_question(question), + reasoning_generation_config, + ) + reasoning = reasoning_result["response"] + + reasoning_step_mask, reasoning_filter_debug = build_reasoning_attention_step_mask_and_debug( + tokenizer, + reasoning_result["outputs"], + args, + ) + reasoning_visual_token_importance = aggregate_attention_from_generation_outputs( + reasoning_result["outputs"], + reasoning_result["visual_token_index"], + reasoning_step_mask, + ) + question_visual_token_importance, answer_visual_token_importance = ( + aggregate_question_and_answer_attention_from_generation_outputs( + answer_result["outputs"], + answer_result["visual_token_index"], + ) + ) + visual_token_importance = combine_question_reasoning_and_answer_attention( + question_visual_token_importance, + reasoning_visual_token_importance, + answer_visual_token_importance, + args, + ) + if args.large_model_prune_selection in {"similarity_greedy", "similarity_cover_greedy"}: + consistency_score = torch.tensor(1.0, device=visual_token_importance.device) + else: + consistency_score = compute_consistency_score( + model, + answer_result["input_embeds"], + answer_result["flat_input_ids"], + answer_result["attention_mask"], + answer_result["outputs"]["sequences"][0], + visual_token_importance, + answer_result["visual_token_index"], + consistency_token_ratio, + args.large_model_prune_selection, + ) + return ( + answer_result["response"], + answer_result["outputs"].scores, + consistency_score, + visual_token_importance, + reasoning, + reasoning_filter_debug, + ) + + +def generate_with_reasoning( + guide_model: InternVLChatModel, + guide_tokenizer, + decode_model: InternVLChatModel, + large_tokenizer, + selection_visual_tokens: torch.Tensor, + projected_visual_tokens: torch.Tensor, + question: str, + generation_config: dict, + reasoning_generation_config: dict, + visual_token_importance: torch.Tensor, + args, +) -> Tuple[str, str]: + reasoning_question = append_instruction(question, EXPLICIT_REASONING_INSTRUCTION) + decode_visual_token_importance, decode_prune_ratio, decode_prune_selection, decode_prune_keep_count = resolve_decode_prune_plan( + selection_visual_tokens, + visual_token_importance, + args, + ) + reasoning = run_decode_answer( + decode_model, + large_tokenizer, + projected_visual_tokens, + reasoning_question, + reasoning_generation_config, + decode_visual_token_importance, + decode_prune_ratio, + decode_prune_keep_count, + decode_prune_selection, + args, + ) + final_question = append_instruction( + question, + f"Reasoning:\n{reasoning}\n{DEFAULT_FINAL_ANSWER_INSTRUCTION}", + ) + answer = run_decode_answer( + decode_model, + large_tokenizer, + projected_visual_tokens, + final_question, + generation_config, + decode_visual_token_importance, + decode_prune_ratio, + decode_prune_keep_count, + decode_prune_selection, + args, + ) + return answer, reasoning + + +def evaluate(args): + guide_checkpoint = resolve_hf_snapshot(args.guide_checkpoint) + large_checkpoint = resolve_hf_snapshot(args.large_checkpoint) + + guide_tokenizer = AutoTokenizer.from_pretrained(guide_checkpoint, trust_remote_code=True, use_fast=False) + guide_config = configure_model(guide_checkpoint, use_flash_attn=args.use_flash_attn) + guide_model = load_model( + guide_checkpoint, + guide_config, + auto=args.auto, + load_in_8bit=args.load_in_8bit, + load_in_4bit=args.load_in_4bit, + ) + decode_model, large_tokenizer = build_decode_model( + guide_model, + large_checkpoint, + use_flash_attn=args.use_flash_attn, + auto=args.auto, + load_in_8bit=args.load_in_8bit, + load_in_4bit=args.load_in_4bit, + ) + + guide_image_size = guide_model.config.force_image_size or guide_model.config.vision_config.image_size + large_image_size = decode_model.config.force_image_size or decode_model.config.vision_config.image_size + if guide_image_size != large_image_size: + raise ValueError(f"Guide and decode image size mismatch: {guide_image_size} vs {large_image_size}") + if guide_model.num_image_token != decode_model.num_image_token: + raise ValueError( + f"Guide and decode image token count mismatch: {guide_model.num_image_token} vs {decode_model.num_image_token}" + ) + + data_root = os.path.abspath(args.data_root) + textvqa_root = os.path.abspath(args.textvqa_root) if args.textvqa_root else os.path.join(data_root, "data", "textvqa") + dataset = TextVQADataset( + jsonl_path=os.path.join(textvqa_root, "textvqa_val.jsonl"), + data_root=data_root, + image_size=guide_image_size, + dynamic=args.dynamic, + use_thumbnail=guide_model.config.use_thumbnail, + max_num=args.max_num, + ) + question_id_to_answers = load_annotations(os.path.join(textvqa_root, "textvqa_val_annotations.json")) + generation_config = make_generation_config(args) + guide_attention_generation_config = make_guide_attention_generation_config(generation_config, args) + guide_text_generation_config = None + if args.guide_text_mode != "none": + guide_text_generation_config = make_guide_text_generation_config(generation_config, args) + reasoning_generation_config = None + if args.reasoning_mode == "two_pass": + reasoning_generation_config = make_reasoning_generation_config(generation_config, args) + + num_items = len(dataset) if args.limit is None else min(len(dataset), args.limit) + results = [] + filter_debug_results = [] + + for idx in range(num_items): + sample = dataset[idx] + question = sample["question"] + " " + BASE_PROMPT + pixel_values = sample["pixel_values"] + guide_attention_question = build_guide_attention_question(question, args) + + torch.cuda.synchronize() + start = time.time() + raw_visual_tokens = extract_shared_raw_visual_tokens(guide_model, pixel_values) + guide_visual_tokens = project_visual_tokens(guide_model, raw_visual_tokens) + guide_reasoning = None + guide_reasoning_filter_debug = {"backend": "none", "kept_tokens": [], "token_analysis": []} + question_visual_token_importance = None + answer_visual_token_importance = None + if args.guide_reasoning_mode == "two_pass_explicit": + ( + guide_answer, + guide_scores, + consistency_score, + visual_token_importance, + guide_reasoning, + guide_reasoning_filter_debug, + ) = ( + run_guide_two_pass_explicit_branch( + guide_model, + guide_tokenizer, + guide_visual_tokens, + question, + guide_attention_generation_config, + generation_config, + args.consistency_token_ratio, + args, + ) + ) + else: + ( + guide_answer, + guide_scores, + consistency_score, + visual_token_importance, + question_visual_token_importance, + answer_visual_token_importance, + ) = run_guide_branch( + guide_model, + guide_tokenizer, + guide_visual_tokens, + guide_attention_question, + guide_attention_generation_config, + args.consistency_token_ratio, + args, + ) + guide_text_hint = None + if args.guide_text_mode != "none": + if guide_text_generation_config is None: + raise ValueError("guide_text_generation_config is required when guide_text_mode is enabled.") + guide_text_hint = normalize_generated_text( + run_text_generation_branch( + guide_model, + guide_tokenizer, + guide_visual_tokens, + build_guide_text_question(question), + guide_text_generation_config, + ) + ) + torch.cuda.synchronize() + end = time.time() + small_model_time = end - start + + scores = torch.concatenate(guide_scores, dim=0) + scores, _ = scores.softmax(dim=-1).max(dim=-1) + original_confidence = math.pow(torch.prod(scores).item(), 1 / len(scores)) + + torch.cuda.synchronize() + start = time.time() + large_visual_tokens = project_visual_tokens(decode_model, raw_visual_tokens) + ( + decode_visual_token_importance, + decode_prune_ratio, + decode_prune_selection, + kept_visual_token_count, + ) = resolve_decode_prune_plan( + raw_visual_tokens, + visual_token_importance, + args, + ) + decode_question = build_decode_question(question, guide_text_hint) + reasoning = None + if args.reasoning_mode == "none": + large_answer = run_decode_answer( + decode_model, + large_tokenizer, + large_visual_tokens, + decode_question, + generation_config, + decode_visual_token_importance, + decode_prune_ratio, + kept_visual_token_count, + decode_prune_selection, + args, + ) + elif args.reasoning_mode == "prompt": + prompted_question = append_instruction(decode_question, HIDDEN_REASONING_INSTRUCTION) + large_answer = run_decode_answer( + decode_model, + large_tokenizer, + large_visual_tokens, + prompted_question, + generation_config, + decode_visual_token_importance, + decode_prune_ratio, + kept_visual_token_count, + decode_prune_selection, + args, + ) + else: + if reasoning_generation_config is None: + raise ValueError("reasoning_generation_config is required when reasoning_mode='two_pass'.") + large_answer, reasoning = generate_with_reasoning( + guide_model, + guide_tokenizer, + decode_model, + large_tokenizer, + raw_visual_tokens, + large_visual_tokens, + decode_question, + generation_config, + reasoning_generation_config, + visual_token_importance, + args, + ) + torch.cuda.synchronize() + end = time.time() + large_model_time = end - start + + visual_token_count = visual_token_importance.shape[0] + result_item = { + "question_id": sample["question_id"], + "question": sample["question"], + "answer": large_answer, + "pred_answer": large_answer, + "gt_answers": question_id_to_answers[sample["question_id"]], + "small_answer": guide_answer, + "guide_attention_output": guide_answer, + "large_answer": large_answer, + "small_model_time": small_model_time, + "large_model_time": large_model_time, + "original_confidence": original_confidence, + "consistency_score": consistency_score.item(), + "visual_token_count": visual_token_count, + "kept_visual_token_count": kept_visual_token_count, + } + if args.save_visual_token_importance: + result_item["visual_token_importance_stats"] = summarize_visual_token_importance( + visual_token_importance, + topk=args.visual_token_importance_topk, + ) + if question_visual_token_importance is not None: + result_item["question_visual_token_importance_stats"] = summarize_visual_token_importance( + question_visual_token_importance, + topk=args.visual_token_importance_topk, + ) + if answer_visual_token_importance is not None: + result_item["answer_visual_token_importance_stats"] = summarize_visual_token_importance( + answer_visual_token_importance, + topk=args.visual_token_importance_topk, + ) + if guide_text_hint is not None: + result_item["guide_text_hint"] = guide_text_hint + if args.save_reasoning and guide_reasoning is not None: + result_item["guide_reasoning"] = guide_reasoning + if args.save_reasoning and reasoning is not None: + result_item["large_reasoning"] = reasoning + results.append(result_item) + filter_debug_results.append( + { + "question_id": sample["question_id"], + "question": sample["question"], + "small_answer": guide_answer, + "large_answer": large_answer, + "guide_reasoning": guide_reasoning, + "guide_reasoning_filter_mode": args.guide_reasoning_filter_mode, + "guide_reasoning_filter_backend": guide_reasoning_filter_debug.get("backend", "none"), + "kept_tokens": guide_reasoning_filter_debug.get("kept_tokens", []), + "token_analysis": guide_reasoning_filter_debug.get("token_analysis", []), + } + ) + if (idx + 1) % args.log_every == 0 or idx + 1 == num_items: + status = ( + f"[{idx + 1}/{num_items}] question_id={sample['question_id']} " + f"small={guide_answer} large={large_answer} kept={kept_visual_token_count}/{visual_token_count}" + ) + if guide_text_hint is not None: + status += f" hint={guide_text_hint}" + print(status) + sys.stdout.flush() + + evaluator = TextVQAAccuracyEvaluator() + accuracy = evaluator.eval_pred_list(results) + + os.makedirs(args.out_dir, exist_ok=True) + run_name = args.run_name or "textvqa_shared_vision_2bguide_8btext" + result_path = os.path.join(args.out_dir, f"{run_name}.json") + summary_path = os.path.join(args.out_dir, f"{run_name}.summary.json") + filter_debug_path = os.path.join(args.out_dir, f"{run_name}.filter_debug.json") + + with open(result_path, "w") as f: + json.dump(results, f, ensure_ascii=False, indent=2) + with open(filter_debug_path, "w") as f: + json.dump(filter_debug_results, f, ensure_ascii=False, indent=2) + + avg_kept_visual_token_count = sum(item["kept_visual_token_count"] for item in results) / max(len(results), 1) + avg_visual_token_count = sum(item["visual_token_count"] for item in results) / max(len(results), 1) + avg_kept_visual_token_ratio = sum( + item["kept_visual_token_count"] / max(item["visual_token_count"], 1) + for item in results + ) / max(len(results), 1) + + summary = { + "mode": "shared_vision_guided", + "guide_checkpoint": guide_checkpoint, + "large_checkpoint": large_checkpoint, + "count": num_items, + "accuracy": accuracy, + "large_model_prune_layer": args.large_model_prune_layer, + "large_model_prune_ratio": args.large_model_prune_ratio, + "large_model_prune_selection": args.large_model_prune_selection, + "large_model_similarity_target_coverage": args.large_model_similarity_target_coverage, + "large_model_similarity_min_gain": args.large_model_similarity_min_gain, + "large_model_similarity_min_keep": args.large_model_similarity_min_keep, + "large_model_similarity_max_keep_ratio": args.large_model_similarity_max_keep_ratio, + "consistency_token_ratio": args.consistency_token_ratio, + "guide_reasoning_mode": args.guide_reasoning_mode, + "guide_reasoning_max_new_tokens": args.guide_reasoning_max_new_tokens, + "guide_reasoning_filter_mode": args.guide_reasoning_filter_mode, + "guide_attention_aggregation_mode": args.guide_attention_aggregation_mode, + "guide_attention_source": resolve_guide_attention_source(args), + "guide_reasoning_attention_weight": args.guide_reasoning_attention_weight, + "guide_answer_attention_weight": args.guide_answer_attention_weight, + "guide_question_attention_weight": args.guide_question_attention_weight, + "guide_text_mode": args.guide_text_mode, + "guide_text_max_new_tokens": args.guide_text_max_new_tokens, + "avg_visual_token_count": avg_visual_token_count, + "avg_kept_visual_token_count": avg_kept_visual_token_count, + "avg_kept_visual_token_ratio": avg_kept_visual_token_ratio, + "avg_small_model_time": sum(item["small_model_time"] for item in results) / max(len(results), 1), + "avg_large_model_time": sum(item["large_model_time"] for item in results) / max(len(results), 1), + "results_file": result_path, + "filter_debug_file": filter_debug_path, + } + with open(summary_path, "w") as f: + json.dump(summary, f, ensure_ascii=False, indent=2) + + print(f"accuracy: {accuracy:.6f}") + print(f"avg_kept_visual_token_ratio: {avg_kept_visual_token_ratio:.6f}") + print(f"avg_kept_visual_token_count: {avg_kept_visual_token_count:.2f}") + print(f"results_file: {result_path}") + print(f"summary_file: {summary_path}") + + +def main(): + parser = argparse.ArgumentParser() + parser.add_argument("--guide-checkpoint", type=str, required=True) + parser.add_argument("--large-checkpoint", type=str, required=True) + parser.add_argument("--data-root", type=str, default=str(REPO_ROOT)) + parser.add_argument("--textvqa-root", type=str, default="") + parser.add_argument("--out-dir", type=str, default=str(REPO_ROOT / "outputs" / "shared_vision_guided")) + parser.add_argument("--run-name", type=str, default="") + parser.add_argument("--limit", type=int, default=None) + parser.add_argument("--max-new-tokens", type=int, default=10) + parser.add_argument("--num-beams", type=int, default=1) + parser.add_argument("--temperature", type=float, default=0.0) + parser.add_argument("--reasoning-mode", type=str, choices=["none", "prompt", "two_pass"], default="none") + parser.add_argument("--reasoning-max-new-tokens", type=int, default=64) + parser.add_argument("--reasoning-temperature", type=float, default=0.0) + parser.add_argument("--save-reasoning", action="store_true") + parser.add_argument( + "--guide-reasoning-mode", + type=str, + choices=["none", "short_cot", "explicit_cot", "two_pass_explicit"], + default="none", + ) + parser.add_argument("--guide-reasoning-max-new-tokens", type=int, default=1024) + parser.add_argument("--guide-reasoning-temperature", type=float, default=0.0) + parser.add_argument( + "--guide-reasoning-filter-mode", + type=str, + choices=["none", "pos_ner"], + default="none", + ) + parser.add_argument( + "--guide-attention-source", + type=str, + choices=["default", "reasoning", "answer", "combined"], + default="default", + ) + parser.add_argument( + "--guide-attention-aggregation-mode", + type=str, + choices=["raw", "normalized"], + default="raw", + ) + parser.add_argument("--guide-question-attention-weight", type=float, default=1.0) + parser.add_argument("--guide-reasoning-attention-weight", type=float, default=1.0) + parser.add_argument("--guide-answer-attention-weight", type=float, default=1.0) + parser.add_argument("--guide-text-mode", type=str, choices=["none", "short_rationale"], default="none") + parser.add_argument("--guide-text-max-new-tokens", type=int, default=12) + parser.add_argument("--guide-text-temperature", type=float, default=0.0) + parser.add_argument("--save-visual-token-importance", action="store_true") + parser.add_argument("--visual-token-importance-topk", type=int, default=16) + parser.add_argument("--dynamic", action="store_true") + parser.add_argument("--max-num", type=int, default=6) + parser.add_argument("--log-every", type=int, default=20) + parser.add_argument("--seed", type=int, default=0) + parser.add_argument("--large-model-prune-layer", type=float, default=0.0) + parser.add_argument("--large-model-prune-ratio", type=float, default=0.4) + parser.add_argument( + "--large-model-prune-selection", + type=str, + choices=["topk", "random", "similarity_greedy", "similarity_cover_greedy"], + default="topk", + ) + parser.add_argument("--large-model-similarity-target-coverage", type=float, default=0.9) + parser.add_argument("--large-model-similarity-min-gain", type=float, default=0.0) + parser.add_argument("--large-model-similarity-min-keep", type=int, default=1) + parser.add_argument("--large-model-similarity-max-keep-ratio", type=float, default=1.0) + parser.add_argument("--consistency-token-ratio", type=float, default=0.05) + parser.add_argument("--auto", action="store_true") + parser.add_argument("--load-in-8bit", action="store_true") + parser.add_argument("--load-in-4bit", action="store_true") + parser.add_argument("--use-flash-attn", action="store_true") + args = parser.parse_args() + + if not torch.cuda.is_available(): + raise RuntimeError("CUDA is required for shared-vision guided evaluation.") + if args.large_model_prune_ratio <= 0 or args.large_model_prune_ratio > 1: + raise ValueError("large-model-prune-ratio must be in (0, 1].") + if args.consistency_token_ratio <= 0 or args.consistency_token_ratio > 1: + raise ValueError("consistency-token-ratio must be in (0, 1].") + if args.guide_reasoning_attention_weight < 0 or args.guide_answer_attention_weight < 0: + raise ValueError("guide reasoning/answer attention weights must be >= 0.") + if args.guide_question_attention_weight < 0: + raise ValueError("guide question attention weight must be >= 0.") + if args.guide_reasoning_mode == "two_pass_explicit": + attention_source = resolve_guide_attention_source(args) + if attention_source == "reasoning" and args.guide_reasoning_attention_weight == 0: + raise ValueError("guide_reasoning_attention_weight must be > 0 when guide-attention-source=reasoning.") + if ( + attention_source == "answer" + and args.guide_question_attention_weight == 0 + and args.guide_answer_attention_weight == 0 + ): + raise ValueError( + "At least one of guide_question_attention_weight or guide_answer_attention_weight " + "must be > 0 when guide-attention-source=answer." + ) + if ( + attention_source == "combined" + and args.guide_question_attention_weight == 0 + and args.guide_reasoning_attention_weight == 0 + and args.guide_answer_attention_weight == 0 + ): + raise ValueError("At least one guide attention weight must be > 0 for two_pass_explicit.") + if ( + args.guide_reasoning_mode != "two_pass_explicit" + and args.guide_question_attention_weight == 0 + and args.guide_answer_attention_weight == 0 + ): + raise ValueError("At least one guide question/answer attention weight must be > 0.") + + random.seed(args.seed) + torch.manual_seed(args.seed) + evaluate(args) + + +if __name__ == "__main__": + main() diff --git a/isolated/sim_greedy/upstream_sgl/eval/vqa/textvqa_eval.py b/isolated/sim_greedy/upstream_sgl/eval/vqa/textvqa_eval.py new file mode 100644 index 0000000000000000000000000000000000000000..8839eea8b34bb76e8bba0e238d72175fcb14fcc3 --- /dev/null +++ b/isolated/sim_greedy/upstream_sgl/eval/vqa/textvqa_eval.py @@ -0,0 +1,345 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +# copied from https://github.com/haotian-liu/LLaVA/blob/main/llava/eval/m4c_evaluator.py +import re + +from tqdm import tqdm + + +class EvalAIAnswerProcessor: + """ + Processes an answer similar to Eval AI + copied from + https://github.com/facebookresearch/mmf/blob/c46b3b3391275b4181567db80943473a89ab98ab/pythia/tasks/processors.py#L897 + """ + + CONTRACTIONS = { + 'aint': "ain't", + 'arent': "aren't", + 'cant': "can't", + 'couldve': "could've", + 'couldnt': "couldn't", + "couldn'tve": "couldn't've", + "couldnt've": "couldn't've", + 'didnt': "didn't", + 'doesnt': "doesn't", + 'dont': "don't", + 'hadnt': "hadn't", + "hadnt've": "hadn't've", + "hadn'tve": "hadn't've", + 'hasnt': "hasn't", + 'havent': "haven't", + 'hed': "he'd", + "hed've": "he'd've", + "he'dve": "he'd've", + 'hes': "he's", + 'howd': "how'd", + 'howll': "how'll", + 'hows': "how's", + "Id've": "I'd've", + "I'dve": "I'd've", + 'Im': "I'm", + 'Ive': "I've", + 'isnt': "isn't", + 'itd': "it'd", + "itd've": "it'd've", + "it'dve": "it'd've", + 'itll': "it'll", + "let's": "let's", + 'maam': "ma'am", + 'mightnt': "mightn't", + "mightnt've": "mightn't've", + "mightn'tve": "mightn't've", + 'mightve': "might've", + 'mustnt': "mustn't", + 'mustve': "must've", + 'neednt': "needn't", + 'notve': "not've", + 'oclock': "o'clock", + 'oughtnt': "oughtn't", + "ow's'at": "'ow's'at", + "'ows'at": "'ow's'at", + "'ow'sat": "'ow's'at", + 'shant': "shan't", + "shed've": "she'd've", + "she'dve": "she'd've", + "she's": "she's", + 'shouldve': "should've", + 'shouldnt': "shouldn't", + "shouldnt've": "shouldn't've", + "shouldn'tve": "shouldn't've", + "somebody'd": 'somebodyd', + "somebodyd've": "somebody'd've", + "somebody'dve": "somebody'd've", + 'somebodyll': "somebody'll", + 'somebodys': "somebody's", + 'someoned': "someone'd", + "someoned've": "someone'd've", + "someone'dve": "someone'd've", + 'someonell': "someone'll", + 'someones': "someone's", + 'somethingd': "something'd", + "somethingd've": "something'd've", + "something'dve": "something'd've", + 'somethingll': "something'll", + 'thats': "that's", + 'thered': "there'd", + "thered've": "there'd've", + "there'dve": "there'd've", + 'therere': "there're", + 'theres': "there's", + 'theyd': "they'd", + "theyd've": "they'd've", + "they'dve": "they'd've", + 'theyll': "they'll", + 'theyre': "they're", + 'theyve': "they've", + 'twas': "'twas", + 'wasnt': "wasn't", + "wed've": "we'd've", + "we'dve": "we'd've", + 'weve': "we've", + 'werent': "weren't", + 'whatll': "what'll", + 'whatre': "what're", + 'whats': "what's", + 'whatve': "what've", + 'whens': "when's", + 'whered': "where'd", + 'wheres': "where's", + 'whereve': "where've", + 'whod': "who'd", + "whod've": "who'd've", + "who'dve": "who'd've", + 'wholl': "who'll", + 'whos': "who's", + 'whove': "who've", + 'whyll': "why'll", + 'whyre': "why're", + 'whys': "why's", + 'wont': "won't", + 'wouldve': "would've", + 'wouldnt': "wouldn't", + "wouldnt've": "wouldn't've", + "wouldn'tve": "wouldn't've", + 'yall': "y'all", + "yall'll": "y'all'll", + "y'allll": "y'all'll", + "yall'd've": "y'all'd've", + "y'alld've": "y'all'd've", + "y'all'dve": "y'all'd've", + 'youd': "you'd", + "youd've": "you'd've", + "you'dve": "you'd've", + 'youll': "you'll", + 'youre': "you're", + 'youve': "you've", + } + + NUMBER_MAP = { + 'none': '0', + 'zero': '0', + 'one': '1', + 'two': '2', + 'three': '3', + 'four': '4', + 'five': '5', + 'six': '6', + 'seven': '7', + 'eight': '8', + 'nine': '9', + 'ten': '10', + } + ARTICLES = ['a', 'an', 'the'] + PERIOD_STRIP = re.compile(r'(?!<=\d)(\.)(?!\d)') + COMMA_STRIP = re.compile(r'(?<=\d)(\,)+(?=\d)') + PUNCTUATIONS = [ + ';', + r'/', + '[', + ']', + '"', + '{', + '}', + '(', + ')', + '=', + '+', + '\\', + '_', + '-', + '>', + '<', + '@', + '`', + ',', + '?', + '!', + ] + + def __init__(self, *args, **kwargs): + pass + + def word_tokenize(self, word): + word = word.lower() + word = word.replace(',', '').replace('?', '').replace("'s", " 's") + return word.strip() + + def process_punctuation(self, in_text): + out_text = in_text + for p in self.PUNCTUATIONS: + if (p + ' ' in in_text or ' ' + p in in_text) or ( + re.search(self.COMMA_STRIP, in_text) is not None + ): + out_text = out_text.replace(p, '') + else: + out_text = out_text.replace(p, ' ') + out_text = self.PERIOD_STRIP.sub('', out_text, re.UNICODE) + return out_text + + def process_digit_article(self, in_text): + out_text = [] + temp_text = in_text.lower().split() + for word in temp_text: + word = self.NUMBER_MAP.setdefault(word, word) + if word not in self.ARTICLES: + out_text.append(word) + else: + pass + for word_id, word in enumerate(out_text): + if word in self.CONTRACTIONS: + out_text[word_id] = self.CONTRACTIONS[word] + out_text = ' '.join(out_text) + return out_text + + def __call__(self, item): + item = self.word_tokenize(item) + item = item.replace('\n', ' ').replace('\t', ' ').strip() + item = self.process_punctuation(item) + item = self.process_digit_article(item) + return item + + +class TextVQAAccuracyEvaluator: + def __init__(self): + self.answer_processor = EvalAIAnswerProcessor() + + def _compute_answer_scores(self, raw_answers): + """ + compute the accuracy (soft score) of human answers + """ + answers = [self.answer_processor(a) for a in raw_answers] + assert len(answers) == 10 + gt_answers = list(enumerate(answers)) + unique_answers = set(answers) + unique_answer_scores = {} + + for unique_answer in unique_answers: + accs = [] + for gt_answer in gt_answers: + other_answers = [item for item in gt_answers if item != gt_answer] + matching_answers = [ + item for item in other_answers if item[1] == unique_answer + ] + acc = min(1, float(len(matching_answers)) / 3) + accs.append(acc) + unique_answer_scores[unique_answer] = sum(accs) / len(accs) + + return unique_answer_scores + + def eval_pred_list(self, pred_list): + pred_scores = [] + for entry in tqdm(pred_list): + pred_answer = self.answer_processor(entry['pred_answer']) + unique_answer_scores = self._compute_answer_scores(entry['gt_answers']) + score = unique_answer_scores.get(pred_answer, 0.0) + pred_scores.append(score) + + accuracy = sum(pred_scores) / len(pred_scores) + return accuracy + + def eval_pred_scores(self, pred_list): + pred_scores = [] + for entry in tqdm(pred_list): + pred_answer = self.answer_processor(entry['pred_answer']) + unique_answer_scores = self._compute_answer_scores(entry['gt_answers']) + score = unique_answer_scores.get(pred_answer, 0.0) + pred_scores.append(score) + + return pred_scores + + +class STVQAAccuracyEvaluator: + def __init__(self): + self.answer_processor = EvalAIAnswerProcessor() + + def eval_pred_list(self, pred_list): + pred_scores = [] + for entry in pred_list: + pred_answer = self.answer_processor(entry['pred_answer']) + gts = [self.answer_processor(a) for a in entry['gt_answers']] + score = 1.0 if pred_answer in gts else 0.0 + pred_scores.append(score) + + accuracy = sum(pred_scores) / len(pred_scores) + return accuracy + + +class STVQAANLSEvaluator: + def __init__(self): + import editdistance # install with `pip install editdistance` + + self.get_edit_distance = editdistance.eval + + def get_anls(self, s1, s2): + s1 = s1.lower().strip() + s2 = s2.lower().strip() + iou = 1 - self.get_edit_distance(s1, s2) / max(len(s1), len(s2)) + anls = iou if iou >= 0.5 else 0.0 + return anls + + def eval_pred_list(self, pred_list): + pred_scores = [] + for entry in pred_list: + anls = max( + self.get_anls(entry['pred_answer'], gt) for gt in entry['gt_answers'] + ) + pred_scores.append(anls) + + accuracy = sum(pred_scores) / len(pred_scores) + return accuracy + + +class TextCapsBleu4Evaluator: + def __init__(self): + # The following script requires Java 1.8.0 and pycocotools installed. + # The pycocoevalcap can be installed with pip as + # pip install git+https://github.com/ronghanghu/coco-caption.git@python23 + # Original pycocoevalcap code is at https://github.com/tylin/coco-caption + # but has no python3 support yet. + try: + from pycocoevalcap.bleu.bleu import Bleu + from pycocoevalcap.tokenizer.ptbtokenizer import PTBTokenizer + except ModuleNotFoundError: + print( + 'Please install pycocoevalcap module using ' + 'pip install git+https://github.com/ronghanghu/coco-caption.git@python23' # noqa + ) + raise + + self.tokenizer = PTBTokenizer() + self.scorer = Bleu(4) + + def eval_pred_list(self, pred_list): + # Create reference and hypotheses captions. + gts = {} + res = {} + for idx, entry in enumerate(pred_list): + gts[idx] = [{'caption': a} for a in entry['gt_answers']] + res[idx] = [{'caption': entry['pred_answer']}] + + gts = self.tokenizer.tokenize(gts) + res = self.tokenizer.tokenize(res) + score, _ = self.scorer.compute_score(gts, res) + + bleu4 = score[3] # score is (Bleu-1, Bleu-2, Bleu-3, Bleu-4) + return bleu4 diff --git a/isolated/sim_greedy/upstream_sgl/internvl/model/__init__.py b/isolated/sim_greedy/upstream_sgl/internvl/model/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/isolated/sim_greedy/upstream_sgl/internvl/model/internlm2/configuration_internlm2.py b/isolated/sim_greedy/upstream_sgl/internvl/model/internlm2/configuration_internlm2.py new file mode 100644 index 0000000000000000000000000000000000000000..282b13b1e2066ecc074ecae87b35a19d251f0ed7 --- /dev/null +++ b/isolated/sim_greedy/upstream_sgl/internvl/model/internlm2/configuration_internlm2.py @@ -0,0 +1,150 @@ +# Copyright (c) The InternLM team and The HuggingFace Inc. team. All rights reserved. +# +# This code is based on transformers/src/transformers/models/llama/configuration_llama.py +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" InternLM2 model configuration""" + +from transformers.configuration_utils import PretrainedConfig +from transformers.utils import logging + +logger = logging.get_logger(__name__) + +INTERNLM2_PRETRAINED_CONFIG_ARCHIVE_MAP = {} + + +# Modified from transformers.model.llama.configuration_llama.LlamaConfig +class InternLM2Config(PretrainedConfig): + r""" + This is the configuration class to store the configuration of a [`InternLM2Model`]. It is used to instantiate + an InternLM2 model according to the specified arguments, defining the model architecture. Instantiating a + configuration with the defaults will yield a similar configuration to that of the InternLM2-7B. + + Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the + documentation from [`PretrainedConfig`] for more information. + + + Args: + vocab_size (`int`, *optional*, defaults to 32000): + Vocabulary size of the InternLM2 model. Defines the number of different tokens that can be represented by the + `inputs_ids` passed when calling [`InternLM2Model`] + hidden_size (`int`, *optional*, defaults to 4096): + Dimension of the hidden representations. + intermediate_size (`int`, *optional*, defaults to 11008): + Dimension of the MLP representations. + num_hidden_layers (`int`, *optional*, defaults to 32): + Number of hidden layers in the Transformer encoder. + num_attention_heads (`int`, *optional*, defaults to 32): + Number of attention heads for each attention layer in the Transformer encoder. + num_key_value_heads (`int`, *optional*): + This is the number of key_value heads that should be used to implement Grouped Query Attention. If + `num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if + `num_key_value_heads=1 the model will use Multi Query Attention (MQA) otherwise GQA is used. When + converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed + by meanpooling all the original heads within that group. For more details checkout [this + paper](https://arxiv.org/pdf/2305.13245.pdf). If it is not specified, will default to + `num_attention_heads`. + hidden_act (`str` or `function`, *optional*, defaults to `"silu"`): + The non-linear activation function (function or string) in the decoder. + max_position_embeddings (`int`, *optional*, defaults to 2048): + The maximum sequence length that this model might ever be used with. Typically set this to something large + just in case (e.g., 512 or 1024 or 2048). + initializer_range (`float`, *optional*, defaults to 0.02): + The standard deviation of the truncated_normal_initializer for initializing all weight matrices. + rms_norm_eps (`float`, *optional*, defaults to 1e-12): + The epsilon used by the rms normalization layers. + use_cache (`bool`, *optional*, defaults to `True`): + Whether or not the model should return the last key/values attentions (not used by all models). Only + relevant if `config.is_decoder=True`. + tie_word_embeddings(`bool`, *optional*, defaults to `False`): + Whether to tie weight embeddings + Example: + + """ + model_type = 'internlm2' + _auto_class = 'AutoConfig' + + def __init__( # pylint: disable=W0102 + self, + vocab_size=103168, + hidden_size=4096, + intermediate_size=11008, + num_hidden_layers=32, + num_attention_heads=32, + num_key_value_heads=None, + hidden_act='silu', + max_position_embeddings=2048, + initializer_range=0.02, + rms_norm_eps=1e-6, + use_cache=True, + pad_token_id=0, + bos_token_id=1, + eos_token_id=2, + tie_word_embeddings=False, + bias=True, + rope_theta=10000, + rope_scaling=None, + attn_implementation='eager', + **kwargs, + ): + self.vocab_size = vocab_size + self.max_position_embeddings = max_position_embeddings + self.hidden_size = hidden_size + self.intermediate_size = intermediate_size + self.num_hidden_layers = num_hidden_layers + self.num_attention_heads = num_attention_heads + self.bias = bias + + if num_key_value_heads is None: + num_key_value_heads = num_attention_heads + self.num_key_value_heads = num_key_value_heads + + self.hidden_act = hidden_act + self.initializer_range = initializer_range + self.rms_norm_eps = rms_norm_eps + self.use_cache = use_cache + self.rope_theta = rope_theta + self.rope_scaling = rope_scaling + self._rope_scaling_validation() + + self.attn_implementation = attn_implementation + if self.attn_implementation is None: + self.attn_implementation = 'eager' + super().__init__( + pad_token_id=pad_token_id, + bos_token_id=bos_token_id, + eos_token_id=eos_token_id, + tie_word_embeddings=tie_word_embeddings, + **kwargs, + ) + + def _rope_scaling_validation(self): + """ + Validate the `rope_scaling` configuration. + """ + if self.rope_scaling is None: + return + + if not isinstance(self.rope_scaling, dict) or len(self.rope_scaling) != 2: + raise ValueError( + '`rope_scaling` must be a dictionary with with two fields, `type` and `factor`, ' + f'got {self.rope_scaling}' + ) + rope_scaling_type = self.rope_scaling.get('type', None) + rope_scaling_factor = self.rope_scaling.get('factor', None) + if rope_scaling_type is None or rope_scaling_type not in ['linear', 'dynamic']: + raise ValueError( + f"`rope_scaling`'s type field must be one of ['linear', 'dynamic'], got {rope_scaling_type}" + ) + if rope_scaling_factor is None or not isinstance(rope_scaling_factor, float) or rope_scaling_factor < 1.0: + raise ValueError(f"`rope_scaling`'s factor field must be a float >= 1, got {rope_scaling_factor}") diff --git a/isolated/sim_greedy/upstream_sgl/internvl/model/internlm2/modeling_internlm2.py b/isolated/sim_greedy/upstream_sgl/internvl/model/internlm2/modeling_internlm2.py new file mode 100644 index 0000000000000000000000000000000000000000..94baff697d25f640ff95afa87da3267c022cb0e2 --- /dev/null +++ b/isolated/sim_greedy/upstream_sgl/internvl/model/internlm2/modeling_internlm2.py @@ -0,0 +1,1709 @@ +# Copyright (c) The InternLM team and The HuggingFace Inc. team. All rights reserved. +# +# This code is based on transformers/src/transformers/models/llama/modeling_llama.py +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" PyTorch InternLM2 model.""" +import math +import queue +import threading +import warnings +from typing import List, Optional, Tuple, Union + +import torch +import torch.nn.functional as F +import torch.utils.checkpoint +from einops import rearrange +from torch import nn +from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss +from transformers.activations import ACT2FN +from transformers.modeling_outputs import (BaseModelOutputWithPast, + CausalLMOutputWithPast, + SequenceClassifierOutputWithPast) +from transformers.modeling_utils import PreTrainedModel +from transformers.utils import (add_start_docstrings, + add_start_docstrings_to_model_forward, logging, + replace_return_docstrings) + +from transformers import LogitsProcessorList, StoppingCriteriaList, GenerationConfig +from transformers.generation.utils import GenerateNonBeamOutput, GenerateDecoderOnlyOutput + +try: + from transformers.generation.streamers import BaseStreamer +except: # noqa # pylint: disable=bare-except + BaseStreamer = None + +from .configuration_internlm2 import InternLM2Config +from ..token_pruning import select_visual_token_indices + +logger = logging.get_logger(__name__) + +_CONFIG_FOR_DOC = 'InternLM2Config' + +flash_attn_func, flash_attn_varlen_func = None, None +pad_input, index_first_axis, unpad_input = None, None, None +try: + from flash_attn import flash_attn_func as _flash_attn_func + from flash_attn import flash_attn_varlen_func as _flash_attn_varlen_func + from flash_attn.bert_padding import index_first_axis as _index_first_axis + from flash_attn.bert_padding import pad_input as _pad_input + from flash_attn.bert_padding import unpad_input as _unpad_input + + flash_attn_func, flash_attn_varlen_func = _flash_attn_func, _flash_attn_varlen_func + pad_input, index_first_axis, unpad_input = _pad_input, _index_first_axis, _unpad_input + has_flash_attn = True +except: + has_flash_attn = False + + +def _import_flash_attn(): + global flash_attn_func, flash_attn_varlen_func + global pad_input, index_first_axis, unpad_input + try: + from flash_attn import flash_attn_func as _flash_attn_func + from flash_attn import \ + flash_attn_varlen_func as _flash_attn_varlen_func + from flash_attn.bert_padding import \ + index_first_axis as _index_first_axis + from flash_attn.bert_padding import pad_input as _pad_input + from flash_attn.bert_padding import unpad_input as _unpad_input + flash_attn_func, flash_attn_varlen_func = _flash_attn_func, _flash_attn_varlen_func + pad_input, index_first_axis, unpad_input = _pad_input, _index_first_axis, _unpad_input + except ImportError: + raise ImportError('flash_attn is not installed.') + + +# Copied from transformers.models.llama.modeling_llama._get_unpad_data +def _get_unpad_data(attention_mask): + seqlens_in_batch = attention_mask.sum(dim=-1, dtype=torch.int32) + indices = torch.nonzero(attention_mask.flatten(), as_tuple=False).flatten() + max_seqlen_in_batch = seqlens_in_batch.max().item() + cu_seqlens = F.pad(torch.cumsum(seqlens_in_batch, dim=0, dtype=torch.torch.int32), (1, 0)) + return ( + indices, + cu_seqlens, + max_seqlen_in_batch, + ) + + +# Copied from transformers.models.bart.modeling_bart._make_causal_mask +def _make_causal_mask( + input_ids_shape: torch.Size, dtype: torch.dtype, device: torch.device, past_key_values_length: int = 0 +): + """ + Make causal mask used for bi-directional self-attention. + """ + bsz, tgt_len = input_ids_shape + mask = torch.full((tgt_len, tgt_len), torch.tensor(torch.finfo(dtype).min, device=device), device=device) + mask_cond = torch.arange(mask.size(-1), device=device) + mask.masked_fill_(mask_cond < (mask_cond + 1).view(mask.size(-1), 1), 0) + mask = mask.to(dtype) + + if past_key_values_length > 0: + mask = torch.cat([torch.zeros(tgt_len, past_key_values_length, dtype=dtype, device=device), mask], dim=-1) + return mask[None, None, :, :].expand(bsz, 1, tgt_len, tgt_len + past_key_values_length) + + +# Copied from transformers.models.bart.modeling_bart._expand_mask +def _expand_mask(mask: torch.Tensor, dtype: torch.dtype, tgt_len: Optional[int] = None): + """ + Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_len, src_seq_len]`. + """ + bsz, src_len = mask.size() + tgt_len = tgt_len if tgt_len is not None else src_len + + expanded_mask = mask[:, None, None, :].expand(bsz, 1, tgt_len, src_len).to(dtype) + + inverted_mask = 1.0 - expanded_mask + + return inverted_mask.masked_fill(inverted_mask.to(torch.bool), torch.finfo(dtype).min) + + +# Copied from transformers.models.llama.modeling_llama.LlamaRMSNorm with Llama->InternLM2 +class InternLM2RMSNorm(nn.Module): + def __init__(self, hidden_size, eps=1e-6): + """ + InternLM2RMSNorm is equivalent to T5LayerNorm + """ + super().__init__() + self.weight = nn.Parameter(torch.ones(hidden_size)) + self.variance_epsilon = eps + + def forward(self, hidden_states): + input_dtype = hidden_states.dtype + hidden_states = hidden_states.to(torch.float32) + variance = hidden_states.pow(2).mean(-1, keepdim=True) + hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon) + return self.weight * hidden_states.to(input_dtype) + + +try: + from functools import partial + + from apex.normalization import FusedRMSNorm + InternLM2RMSNorm = partial(FusedRMSNorm, eps=1e-6) # noqa + print('Discovered apex.normalization.FusedRMSNorm - will use it instead of InternLM2RMSNorm') +except ImportError: + # using the normal LlamaRMSNorm + pass +except Exception: + print('discovered apex but it failed to load, falling back to InternLM2RMSNorm') + pass + + +# Copied from transformers.model.llama.modeling_llama.LlamaRotaryEmbedding with Llama->InternLM2 +class InternLM2RotaryEmbedding(nn.Module): + def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None): + super().__init__() + + self.dim = dim + self.max_position_embeddings = max_position_embeddings + self.base = base + inv_freq = 1.0 / (self.base ** (torch.arange(0, self.dim, 2).float().to(device) / self.dim)) + self.register_buffer('inv_freq', inv_freq, persistent=False) + + # Build here to make `torch.jit.trace` work. + self._set_cos_sin_cache( + seq_len=max_position_embeddings, device=self.inv_freq.device, dtype=torch.get_default_dtype() + ) + + def _set_cos_sin_cache(self, seq_len, device, dtype): + self.max_seq_len_cached = seq_len + t = torch.arange(self.max_seq_len_cached, device=device).to(dtype=self.inv_freq.dtype) + + freqs = torch.einsum('i,j->ij', t, self.inv_freq) + # Different from paper, but it uses a different permutation in order to obtain the same calculation + emb = torch.cat((freqs, freqs), dim=-1) + self.register_buffer('cos_cached', emb.cos().to(dtype), persistent=False) + self.register_buffer('sin_cached', emb.sin().to(dtype), persistent=False) + + def forward(self, x, seq_len=None): + # x: [bs, num_attention_heads, seq_len, head_size] + if seq_len > self.max_seq_len_cached: + self._set_cos_sin_cache(seq_len=seq_len, device=x.device, dtype=torch.float32) + + return ( + self.cos_cached[:seq_len].to(dtype=x.dtype), + self.sin_cached[:seq_len].to(dtype=x.dtype), + ) + + +# Copied from transformers.model.llama.modeling_llama.LlamaLinearScalingRotaryEmbedding with Llama->InternLM2 +class InternLM2LinearScalingRotaryEmbedding(InternLM2RotaryEmbedding): + """InternLM2RotaryEmbedding extended with linear scaling. Credits to the Reddit user /u/kaiokendev""" + + def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None, scaling_factor=1.0): + self.scaling_factor = scaling_factor + super().__init__(dim, max_position_embeddings, base, device) + + def _set_cos_sin_cache(self, seq_len, device, dtype): + self.max_seq_len_cached = seq_len + t = torch.arange(self.max_seq_len_cached, device=device).to(dtype=self.inv_freq.dtype) + t = t / self.scaling_factor + + freqs = torch.einsum('i,j->ij', t, self.inv_freq) + # Different from paper, but it uses a different permutation in order to obtain the same calculation + emb = torch.cat((freqs, freqs), dim=-1) + self.register_buffer('cos_cached', emb.cos().to(dtype), persistent=False) + self.register_buffer('sin_cached', emb.sin().to(dtype), persistent=False) + + +# Copied from transformers.model.llama.modeling_llama.LlamaDynamicNTKScalingRotaryEmbedding with Llama->InternLM2 +class InternLM2DynamicNTKScalingRotaryEmbedding(InternLM2RotaryEmbedding): + """InternLM2RotaryEmbedding extended with Dynamic NTK scaling. + Credits to the Reddit users /u/bloc97 and /u/emozilla. + """ + + def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None, scaling_factor=1.0): + self.scaling_factor = scaling_factor + super().__init__(dim, max_position_embeddings, base, device) + + def _set_cos_sin_cache(self, seq_len, device, dtype): + self.max_seq_len_cached = seq_len + + if seq_len > self.max_position_embeddings: + base = self.base * ( + (self.scaling_factor * seq_len / self.max_position_embeddings) - (self.scaling_factor - 1) + ) ** (self.dim / (self.dim - 2)) + inv_freq = 1.0 / (base ** (torch.arange(0, self.dim, 2).float().to(device) / self.dim)) + self.register_buffer('inv_freq', inv_freq, persistent=False) + + t = torch.arange(self.max_seq_len_cached, device=device).to(dtype=self.inv_freq.dtype) + + freqs = torch.einsum('i,j->ij', t, self.inv_freq) + # Different from paper, but it uses a different permutation in order to obtain the same calculation + emb = torch.cat((freqs, freqs), dim=-1) + self.register_buffer('cos_cached', emb.cos().to(dtype), persistent=False) + self.register_buffer('sin_cached', emb.sin().to(dtype), persistent=False) + + +# Copied from transformers.model.llama.modeling_llama.rotate_half +def rotate_half(x): + """Rotates half the hidden dims of the input.""" + x1 = x[..., : x.shape[-1] // 2] + x2 = x[..., x.shape[-1] // 2:] + return torch.cat((-x2, x1), dim=-1) + + +# Copied from transformers.model.llama.modeling_llama.apply_rotary_pos_emb +def apply_rotary_pos_emb(q, k, cos, sin, position_ids, unsqueeze_dim=1): + """Applies Rotary Position Embedding to the query and key tensors.""" + cos = cos[position_ids].unsqueeze(unsqueeze_dim) + sin = sin[position_ids].unsqueeze(unsqueeze_dim) + q_embed = (q * cos) + (rotate_half(q) * sin) + k_embed = (k * cos) + (rotate_half(k) * sin) + return q_embed, k_embed + + +class InternLM2MLP(nn.Module): + def __init__(self, config): + super().__init__() + self.config = config + self.hidden_size = config.hidden_size + self.intermediate_size = config.intermediate_size + self.w1 = nn.Linear(self.hidden_size, self.intermediate_size, bias=False) + self.w3 = nn.Linear(self.hidden_size, self.intermediate_size, bias=False) + self.w2 = nn.Linear(self.intermediate_size, self.hidden_size, bias=False) + self.act_fn = ACT2FN[config.hidden_act] + + def forward(self, x): + down_proj = self.w2(self.act_fn(self.w1(x)) * self.w3(x)) + + return down_proj + + +# Copied from transformers.model.llama.modeling_llama.repeat_kv +def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor: + """ + This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch, + num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim) + """ + batch, num_key_value_heads, slen, head_dim = hidden_states.shape + if n_rep == 1: + return hidden_states + hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim) + return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim) + + +# Modified from transformers.model.llama.modeling_llama.LlamaAttention +class InternLM2Attention(nn.Module): + """Multi-headed attention from 'Attention Is All You Need' paper""" + + def __init__(self, config: InternLM2Config): + super().__init__() + self.config = config + self.hidden_size = config.hidden_size + self.num_heads = config.num_attention_heads + self.head_dim = self.hidden_size // self.num_heads + self.num_key_value_heads = config.num_key_value_heads + self.num_key_value_groups = self.num_heads // self.num_key_value_heads + self.max_position_embeddings = config.max_position_embeddings + self.is_causal = True + + if (self.head_dim * self.num_heads) != self.hidden_size: + raise ValueError( + f'hidden_size must be divisible by num_heads (got `hidden_size`: {self.hidden_size}' + f' and `num_heads`: {self.num_heads}).' + ) + + self.wqkv = nn.Linear( + self.hidden_size, + (self.num_heads + 2 * self.num_key_value_heads) * self.head_dim, + bias=config.bias, + ) + + self.wo = nn.Linear(self.num_heads * self.head_dim, self.hidden_size, bias=config.bias) + self._init_rope() + + def _init_rope(self): + if self.config.rope_scaling is None: + self.rotary_emb = InternLM2RotaryEmbedding( + self.head_dim, + max_position_embeddings=self.max_position_embeddings, + base=self.config.rope_theta, + ) + else: + scaling_type = self.config.rope_scaling['type'] + scaling_factor = self.config.rope_scaling['factor'] + if scaling_type == 'dynamic': + self.rotary_emb = InternLM2DynamicNTKScalingRotaryEmbedding( + self.head_dim, + max_position_embeddings=self.max_position_embeddings, + base=self.config.rope_theta, + scaling_factor=scaling_factor, + ) + elif scaling_type == 'linear': + self.rotary_emb = InternLM2LinearScalingRotaryEmbedding( + self.head_dim, + max_position_embeddings=self.max_position_embeddings, + base=self.config.rope_theta, + scaling_factor=scaling_factor, + ) + else: + raise ValueError("Currently we only support rotary embedding's type being 'dynamic' or 'linear'.") + return self.rotary_emb + + def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int): + return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous() + + def forward( + self, + hidden_states: torch.Tensor, + attention_mask: Optional[torch.Tensor] = None, + position_ids: Optional[torch.LongTensor] = None, + past_key_value: Optional[Tuple[torch.Tensor]] = None, + output_attentions: bool = False, + use_cache: bool = False, + **kwargs, + ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: + if 'padding_mask' in kwargs: + warnings.warn( + 'Passing `padding_mask` is deprecated and will be removed in v4.37. ' + 'Please make sure use `attention_mask` instead.`' + ) + + bsz, q_len, _ = hidden_states.size() + + qkv_states = self.wqkv(hidden_states) + + qkv_states = rearrange( + qkv_states, + 'b q (h gs d) -> b q h gs d', + gs=2 + self.num_key_value_groups, + d=self.head_dim, + ) + + query_states = qkv_states[..., : self.num_key_value_groups, :] + query_states = rearrange(query_states, 'b q h gs d -> b q (h gs) d') + key_states = qkv_states[..., -2, :] + value_states = qkv_states[..., -1, :] + + query_states = query_states.transpose(1, 2) + key_states = key_states.transpose(1, 2) + value_states = value_states.transpose(1, 2) + + kv_seq_len = key_states.shape[-2] + if past_key_value is not None: + kv_seq_len += past_key_value[0].shape[-2] + + prunded_sequence_length = kwargs["prunded_sequence_length"] + cos, sin = self.rotary_emb(value_states, seq_len=kv_seq_len + prunded_sequence_length) + query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin, position_ids) + + if past_key_value is not None: + # reuse k, v, self_attention + key_states = torch.cat([past_key_value[0], key_states], dim=2) + value_states = torch.cat([past_key_value[1], value_states], dim=2) + + past_key_value = (key_states, value_states) if use_cache else None + + key_states = repeat_kv(key_states, self.num_key_value_groups) + value_states = repeat_kv(value_states, self.num_key_value_groups) + + attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) / math.sqrt(self.head_dim) + + if attn_weights.size() != (bsz, self.num_heads, q_len, kv_seq_len): + raise ValueError( + f'Attention weights should be of size {(bsz, self.num_heads, q_len, kv_seq_len)}, but is' + f' {attn_weights.size()}' + ) + + if attention_mask is not None: + if attention_mask.size(-1) < kv_seq_len: + pad_width = kv_seq_len - attention_mask.size(-1) + attention_mask = F.pad(attention_mask, (0, pad_width), value=0) + elif attention_mask.size(-1) > kv_seq_len: + attention_mask = attention_mask[:, :, :, :kv_seq_len] + if attention_mask.size() != (bsz, 1, q_len, kv_seq_len): + raise ValueError( + f'Attention mask should be of size {(bsz, 1, q_len, kv_seq_len)}, but is {attention_mask.size()}' + ) + attn_weights = attn_weights + attention_mask + + # upcast attention to fp32 + attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query_states.dtype) + attn_output = torch.matmul(attn_weights, value_states) + + if attn_output.size() != (bsz, self.num_heads, q_len, self.head_dim): + raise ValueError( + f'`attn_output` should be of size {(bsz, self.num_heads, q_len, self.head_dim)}, but is' + f' {attn_output.size()}' + ) + + attn_output = attn_output.transpose(1, 2).contiguous() + attn_output = attn_output.reshape(bsz, q_len, self.hidden_size) + + attn_output = self.wo(attn_output) + + if not output_attentions: + attn_weights = None + + return attn_output, attn_weights, past_key_value + + +# Modified from transformers.model.llama.modeling_llama.InternLM2FlashAttention2 +class InternLM2FlashAttention2(InternLM2Attention): + """ + InternLM2 flash attention module. This module inherits from `InternLM2Attention` as the weights of the module stays + untouched. The only required change would be on the forward pass where it needs to correctly call the public API of + flash attention and deal with padding tokens in case the input contains any of them. + """ + + def forward( + self, + hidden_states: torch.Tensor, + attention_mask: Optional[torch.LongTensor] = None, + position_ids: Optional[torch.LongTensor] = None, + past_key_value: Optional[Tuple[torch.Tensor]] = None, + output_attentions: bool = False, + use_cache: bool = False, + **kwargs, + ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: + + + # InternLM2FlashAttention2 attention does not support output_attentions + if 'padding_mask' in kwargs: + warnings.warn( + 'Passing `padding_mask` is deprecated and will be removed in v4.37. ' + 'Please make sure use `attention_mask` instead.`' + ) + + # overwrite attention_mask with padding_mask + attention_mask = kwargs.pop('padding_mask') + + output_attentions = False + + bsz, q_len, _ = hidden_states.size() + + qkv_states = self.wqkv(hidden_states) + + qkv_states = rearrange( + qkv_states, + 'b q (h gs d) -> b q h gs d', + gs=2 + self.num_key_value_groups, + d=self.head_dim, + ) + + query_states = qkv_states[..., : self.num_key_value_groups, :] + query_states = rearrange(query_states, 'b q h gs d -> b q (h gs) d') + key_states = qkv_states[..., -2, :] + value_states = qkv_states[..., -1, :] + + query_states = query_states.transpose(1, 2) + key_states = key_states.transpose(1, 2) + value_states = value_states.transpose(1, 2) + + kv_seq_len = key_states.shape[-2] + if past_key_value is not None: + kv_seq_len += past_key_value[0].shape[-2] + + prunded_sequence_length = kwargs["prunded_sequence_length"] + cos, sin = self.rotary_emb(value_states, seq_len=kv_seq_len + prunded_sequence_length) + + query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin, position_ids) + + if past_key_value is not None: + # reuse k, v, self_attention + key_states = torch.cat([past_key_value[0], key_states], dim=2) + value_states = torch.cat([past_key_value[1], value_states], dim=2) + + past_key_value = (key_states, value_states) if use_cache else None + + query_states = query_states.transpose(1, 2) + key_states = key_states.transpose(1, 2) + value_states = value_states.transpose(1, 2) + + attn_output = self._flash_attention_forward( + query_states, key_states, value_states, attention_mask, q_len + ) + attn_output = attn_output.reshape(bsz, q_len, self.hidden_size).contiguous() + attn_output = self.wo(attn_output) + + if not output_attentions: + attn_weights = None + + return attn_output, attn_weights, past_key_value + + def _flash_attention_forward( + self, query_states, key_states, value_states, attention_mask, query_length, dropout=0.0, softmax_scale=None + ): + """ + Calls the forward method of Flash Attention - if the input hidden states contain at least one padding token + first unpad the input, then computes the attention scores and pad the final attention scores. + + Args: + query_states (`torch.Tensor`): + Input query states to be passed to Flash Attention API + key_states (`torch.Tensor`): + Input key states to be passed to Flash Attention API + value_states (`torch.Tensor`): + Input value states to be passed to Flash Attention API + attention_mask (`torch.Tensor`): + The padding mask - corresponds to a tensor of size `(batch_size, seq_len)` where 0 stands for the + position of padding tokens and 1 for the position of non-padding tokens. + dropout (`int`, *optional*): + Attention dropout + softmax_scale (`float`, *optional*): + The scaling of QK^T before applying softmax. Default to 1 / sqrt(head_dim) + """ + # Contains at least one padding token in the sequence + causal = self.is_causal and query_length != 1 + if attention_mask is not None: + batch_size = query_states.shape[0] + query_states, key_states, value_states, indices_q, cu_seq_lens, max_seq_lens = self._unpad_input( + query_states, key_states, value_states, attention_mask, query_length + ) + + cu_seqlens_q, cu_seqlens_k = cu_seq_lens + max_seqlen_in_batch_q, max_seqlen_in_batch_k = max_seq_lens + + attn_output_unpad = flash_attn_varlen_func( + query_states, + key_states, + value_states, + cu_seqlens_q=cu_seqlens_q, + cu_seqlens_k=cu_seqlens_k, + max_seqlen_q=max_seqlen_in_batch_q, + max_seqlen_k=max_seqlen_in_batch_k, + dropout_p=dropout, + softmax_scale=softmax_scale, + causal=causal, + ) + + attn_output = pad_input(attn_output_unpad, indices_q, batch_size, query_length) + else: + attn_output = flash_attn_func( + query_states, key_states, value_states, dropout, softmax_scale=softmax_scale, causal=causal + ) + + return attn_output + + def _unpad_input(self, query_layer, key_layer, value_layer, attention_mask, query_length): + indices_k, cu_seqlens_k, max_seqlen_in_batch_k = _get_unpad_data(attention_mask) + batch_size, kv_seq_len, num_key_value_heads, head_dim = key_layer.shape + + key_layer = index_first_axis( + key_layer.reshape(batch_size * kv_seq_len, num_key_value_heads, head_dim), indices_k + ) + value_layer = index_first_axis( + value_layer.reshape(batch_size * kv_seq_len, num_key_value_heads, head_dim), indices_k + ) + + if query_length == kv_seq_len: + query_layer = index_first_axis( + query_layer.reshape(batch_size * kv_seq_len, self.num_heads, head_dim), indices_k + ) + cu_seqlens_q = cu_seqlens_k + max_seqlen_in_batch_q = max_seqlen_in_batch_k + indices_q = indices_k + elif query_length == 1: + max_seqlen_in_batch_q = 1 + cu_seqlens_q = torch.arange( + batch_size + 1, dtype=torch.int32, device=query_layer.device + ) # There is a memcpy here, that is very bad. + indices_q = cu_seqlens_q[:-1] + query_layer = query_layer.squeeze(1) + else: + # The -q_len: slice assumes left padding. + attention_mask = attention_mask[:, -query_length:] + query_layer, indices_q, cu_seqlens_q, max_seqlen_in_batch_q = unpad_input(query_layer, attention_mask) + + return ( + query_layer, + key_layer, + value_layer, + indices_q.to(torch.int64), + (cu_seqlens_q, cu_seqlens_k), + (max_seqlen_in_batch_q, max_seqlen_in_batch_k), + ) + + +INTERNLM2_ATTENTION_CLASSES = { + 'eager': InternLM2Attention, + 'flash_attention_2': InternLM2FlashAttention2, +} + + +# Modified from transformers.model.llama.modeling_llama.LlamaDecoderLayer +class InternLM2DecoderLayer(nn.Module): + def __init__(self, config: InternLM2Config): + super().__init__() + self.hidden_size = config.hidden_size + + self.attention = INTERNLM2_ATTENTION_CLASSES[config.attn_implementation](config=config) + + self.feed_forward = InternLM2MLP(config) + self.attention_norm = InternLM2RMSNorm(config.hidden_size, eps=config.rms_norm_eps) + self.ffn_norm = InternLM2RMSNorm(config.hidden_size, eps=config.rms_norm_eps) + + def forward( + self, + hidden_states: torch.Tensor, + attention_mask: Optional[torch.Tensor] = None, + position_ids: Optional[torch.LongTensor] = None, + past_key_value: Optional[Tuple[torch.Tensor]] = None, + output_attentions: Optional[bool] = False, + use_cache: Optional[bool] = False, + **kwargs, + ) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]: + """ + Args: + hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)` + attention_mask (`torch.FloatTensor`, *optional*): + attention mask of size `(batch_size, sequence_length)` if flash attention is used or `(batch_size, 1, + query_sequence_length, key_sequence_length)` if default attention is used. + output_attentions (`bool`, *optional*): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under + returned tensors for more detail. + use_cache (`bool`, *optional*): + If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding + (see `past_key_values`). + past_key_value (`Tuple(torch.FloatTensor)`, *optional*): cached past key and value projection states + """ + if 'padding_mask' in kwargs: + warnings.warn( + 'Passing `padding_mask` is deprecated and will be removed in v4.37. ' + 'Please make sure use `attention_mask` instead.`' + ) + + residual = hidden_states + + hidden_states = self.attention_norm(hidden_states) + + # Self Attention + hidden_states, self_attn_weights, present_key_value = self.attention( + hidden_states=hidden_states, + attention_mask=attention_mask, + position_ids=position_ids, + past_key_value=past_key_value, + output_attentions=output_attentions, + use_cache=use_cache, + **kwargs, + ) + hidden_states = residual + hidden_states + + # Fully Connected + residual = hidden_states + hidden_states = self.ffn_norm(hidden_states) + hidden_states = self.feed_forward(hidden_states) + hidden_states = residual + hidden_states + + outputs = (hidden_states,) + + if output_attentions: + outputs += (self_attn_weights,) + + if use_cache: + outputs += (present_key_value,) + + return outputs + + +InternLM2_START_DOCSTRING = r""" + This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the + library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads + etc.) + + This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. + Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage + and behavior. + + Parameters: + config ([`InternLM2Config`]): + Model configuration class with all the parameters of the model. Initializing with a config file does not + load the weights associated with the model, only the configuration. Check out the + [`~PreTrainedModel.from_pretrained`] method to load the model weights. +""" + + +# Copied from transformers.models.llama.modeling_llama.LlamaPreTrainedModel with Llama->InternLM2 +@add_start_docstrings( + 'The bare InternLM2 Model outputting raw hidden-states without any specific head on top.', + InternLM2_START_DOCSTRING, +) +class InternLM2PreTrainedModel(PreTrainedModel): + config_class = InternLM2Config + base_model_prefix = 'model' + supports_gradient_checkpointing = True + _no_split_modules = ['InternLM2DecoderLayer'] + _skip_keys_device_placement = 'past_key_values' + _supports_flash_attn_2 = True + + def _init_weights(self, module): + std = self.config.initializer_range + if isinstance(module, nn.Linear): + module.weight.data.normal_(mean=0.0, std=std) + if module.bias is not None: + module.bias.data.zero_() + elif isinstance(module, nn.Embedding): + module.weight.data.normal_(mean=0.0, std=std) + if module.padding_idx is not None: + module.weight.data[module.padding_idx].zero_() + + +InternLM2_INPUTS_DOCSTRING = r""" + Args: + input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): + Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide + it. + + Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and + [`PreTrainedTokenizer.__call__`] for details. + + [What are input IDs?](../glossary#input-ids) + attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): + Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: + + - 1 for tokens that are **not masked**, + - 0 for tokens that are **masked**. + + [What are attention masks?](../glossary#attention-mask) + + Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and + [`PreTrainedTokenizer.__call__`] for details. + + If `past_key_values` is used, optionally only the last `input_ids` have to be input (see + `past_key_values`). + + If you want to change padding behavior, you should read [`modeling_opt._prepare_decoder_attention_mask`] + and modify to your needs. See diagram 1 in [the paper](https://arxiv.org/abs/1910.13461) for more + information on the default strategy. + + - 1 indicates the head is **not masked**, + - 0 indicates the head is **masked**. + position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): + Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, + config.n_positions - 1]`. + + [What are position IDs?](../glossary#position-ids) + past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or + when `config.use_cache=True`): + Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape + `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape + `(batch_size, num_heads, decoder_sequence_length, embed_size_per_head)`. + + Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention + blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. + + If `past_key_values` are used, the user can optionally input only the last `input_ids` (those that don't + have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `input_ids` + of shape `(batch_size, sequence_length)`. + inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): + Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This + is useful if you want more control over how to convert `input_ids` indices into associated vectors than the + model's internal embedding lookup matrix. + use_cache (`bool`, *optional*): + If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see + `past_key_values`). + output_attentions (`bool`, *optional*): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned + tensors for more detail. + output_hidden_states (`bool`, *optional*): + Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for + more detail. + return_dict (`bool`, *optional*): + Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. +""" + + +# Modified from transformers.model.llama.modeling_llama.LlamaModel +@add_start_docstrings( + 'The bare InternLM2 Model outputting raw hidden-states without any specific head on top.', + InternLM2_START_DOCSTRING, +) +class InternLM2Model(InternLM2PreTrainedModel): + """ + Transformer decoder consisting of *config.num_hidden_layers* layers. Each layer is a [`InternLM2DecoderLayer`] + + Args: + config: InternLM2Config + """ + + _auto_class = 'AutoModel' + + def __init__(self, config: InternLM2Config): + super().__init__(config) + self.padding_idx = config.pad_token_id + self.vocab_size = config.vocab_size + self.config = config + if not has_flash_attn: + self.config.attn_implementation = 'eager' + print('Warning: Flash attention is not available, using eager attention instead.') + + self.tok_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx) + + self.layers = nn.ModuleList([InternLM2DecoderLayer(config) for _ in range(config.num_hidden_layers)]) + self.norm = InternLM2RMSNorm(config.hidden_size, eps=config.rms_norm_eps) + + self.gradient_checkpointing = False + # Initialize weights and apply final processing + self.post_init() + + def get_input_embeddings(self): + return self.tok_embeddings + + def set_input_embeddings(self, value): + self.tok_embeddings = value + + def _prepare_decoder_attention_mask(self, attention_mask, input_shape, inputs_embeds, past_key_values_length): + # create causal mask + # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] + combined_attention_mask = None + if input_shape[-1] > 1: + combined_attention_mask = _make_causal_mask( + input_shape, + inputs_embeds.dtype, + device=inputs_embeds.device, + past_key_values_length=past_key_values_length, + ) + + if attention_mask is not None: + # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] + expanded_attn_mask = _expand_mask(attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1]).to( + inputs_embeds.device + ) + combined_attention_mask = ( + expanded_attn_mask if combined_attention_mask is None else expanded_attn_mask + combined_attention_mask + ) + + return combined_attention_mask + + @add_start_docstrings_to_model_forward(InternLM2_INPUTS_DOCSTRING) + def forward( + self, + input_ids: torch.LongTensor = None, + attention_mask: Optional[torch.Tensor] = None, + position_ids: Optional[torch.LongTensor] = None, + past_key_values: Optional[List[torch.FloatTensor]] = None, + inputs_embeds: Optional[torch.FloatTensor] = None, + use_cache: Optional[bool] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + visual_token_index: Optional[torch.Tensor] = None, + large_model_prune_layer: Optional[float] = None, + large_model_prune_ratio: Optional[float] = None, + large_model_prune_keep_count: Optional[int] = None, + large_model_prune_selection: Optional[str] = None, + large_model_similarity_target_coverage: Optional[float] = None, + large_model_similarity_min_gain: Optional[float] = None, + large_model_similarity_min_keep: Optional[int] = None, + large_model_similarity_max_keep_ratio: Optional[float] = None, + visual_token_importance: Optional[torch.Tensor] = None, + ) -> Union[Tuple, BaseModelOutputWithPast]: + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + use_cache = use_cache if use_cache is not None else self.config.use_cache + + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + if self.config.attn_implementation == 'flash_attention_2': + _import_flash_attn() + + # retrieve input_ids and inputs_embeds + if input_ids is not None and inputs_embeds is not None: + raise ValueError('You cannot specify both input_ids and inputs_embeds at the same time') + elif input_ids is not None: + batch_size, seq_length = input_ids.shape[:2] + elif inputs_embeds is not None: + batch_size, seq_length = inputs_embeds.shape[:2] + else: + raise ValueError('You have to specify either input_ids or inputs_embeds') + + seq_length_with_past = seq_length + past_key_values_length = 0 + if past_key_values is not None: + past_key_values_length = past_key_values[0][0].shape[2] + seq_length_with_past = seq_length_with_past + past_key_values_length + + if position_ids is None: + device = input_ids.device if input_ids is not None else inputs_embeds.device + position_ids = torch.arange( + past_key_values_length, seq_length + past_key_values_length, dtype=torch.long, device=device + ) + position_ids = position_ids.unsqueeze(0) + + if inputs_embeds is None: + inputs_embeds = self.tok_embeddings(input_ids) + + if self.config.attn_implementation == 'flash_attention_2': + # 2d mask is passed through the layers + attention_mask = attention_mask if (attention_mask is not None and 0 in attention_mask) else None + else: + if attention_mask is None: + attention_mask = torch.ones( + (batch_size, seq_length_with_past), dtype=torch.bool, device=inputs_embeds.device + ) + attention_mask = self._prepare_decoder_attention_mask( + attention_mask, (batch_size, seq_length), inputs_embeds, past_key_values_length + ) + + # embed positions + hidden_states = inputs_embeds + + if self.gradient_checkpointing and self.training: + if use_cache: + logger.warning_once( + '`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`...' + ) + use_cache = False + + # decoder layers + all_hidden_states = () if output_hidden_states else None + all_self_attns = () if output_attentions else None + next_decoder_cache = () if use_cache else None + + if large_model_prune_layer is not None: + token_prune = True + K = int(len(self.layers) * large_model_prune_layer) + keep_ratio = large_model_prune_ratio + visual_token_length = int(visual_token_index[1] - visual_token_index[0] + 1) + else: + token_prune = False + + + aggregated_viusal_token_attention = 0 if output_attentions else None + prunded_sequence_length = 0 + for idx, decoder_layer in enumerate(self.layers): + if output_hidden_states: + all_hidden_states += (hidden_states,) + + past_key_value = past_key_values[idx] if past_key_values is not None else None + + if self.gradient_checkpointing and self.training: + + def create_custom_forward(module): + def custom_forward(*inputs): + # None for past_key_value + return module(*inputs, output_attentions, None) + + return custom_forward + + layer_outputs = torch.utils.checkpoint.checkpoint( + create_custom_forward(decoder_layer), + hidden_states, + attention_mask, + position_ids, + None, + ) + else: + + ##### 某一层 random pruning ######### + if token_prune: + if hidden_states.shape[1] != 1: + if idx == K: + device = hidden_states.device + selected_visual_index = select_visual_token_indices( + hidden_states, + visual_token_importance, + visual_token_index, + keep_ratio, + large_model_prune_selection or "topk", + similarity_target_coverage=large_model_similarity_target_coverage or 0.9, + similarity_min_gain=large_model_similarity_min_gain or 0.0, + similarity_min_keep=large_model_similarity_min_keep or 1, + similarity_max_keep_ratio=large_model_similarity_max_keep_ratio or 1.0, + ) + int(visual_token_index[0]) + keep_indexs = torch.cat(( + torch.arange(int(visual_token_index[0]), device=device), + selected_visual_index.to(device), + torch.arange(int(visual_token_index[1] + 1), seq_length, device=device), + )) + keep_indexs = keep_indexs.sort().values + hidden_states = hidden_states[:, keep_indexs,:] + if attention_mask is not None: + attention_mask = attention_mask[:,:,:hidden_states.shape[1], :hidden_states.shape[1]] + position_ids = keep_indexs.unsqueeze(0) + prunded_sequence_length = visual_token_length - ( + large_model_prune_keep_count if large_model_prune_keep_count is not None else selected_visual_index.numel() + ) + + + else: + if idx == K: + visual_token_length = visual_token_index[1] - visual_token_index[0] + 1 + kept_count = ( + large_model_prune_keep_count + if large_model_prune_keep_count is not None + else int(visual_token_length * keep_ratio) + ) + prunded_sequence_length = visual_token_length - kept_count + if attention_mask is not None: + attention_mask = attention_mask[:, :, :, prunded_sequence_length:] + + + + + layer_outputs = decoder_layer( + hidden_states, + attention_mask=attention_mask, + position_ids=position_ids, + past_key_value=past_key_value, + output_attentions=output_attentions, + use_cache=use_cache, + prunded_sequence_length=prunded_sequence_length + ) + + hidden_states = layer_outputs[0] + + if use_cache: + next_decoder_cache += (layer_outputs[2 if output_attentions else 1],) + + if output_attentions: + # all_self_attns += (layer_outputs[1],) + if layer_outputs[1].shape[2] != 1: + aggregated_viusal_token_attention = aggregated_viusal_token_attention + layer_outputs[1][:, :, visual_token_index[1]:, visual_token_index[0]:visual_token_index[1]+1].sum(dim=(0, 1, 2)) + else: + aggregated_viusal_token_attention = aggregated_viusal_token_attention + layer_outputs[1][:, :, :, visual_token_index[0]:visual_token_index[1]+1].sum(dim=(0, 1, 2)) + + hidden_states = self.norm(hidden_states) + + # add hidden states from the last decoder layer + if output_hidden_states: + all_hidden_states += (hidden_states,) + + next_cache = next_decoder_cache if use_cache else None + if not return_dict: + return tuple(v for v in [hidden_states, next_cache, all_hidden_states, all_self_attns] if v is not None) + + + out_dict = BaseModelOutputWithPast( + last_hidden_state=hidden_states, + past_key_values=next_cache, + hidden_states=all_hidden_states, + attentions=all_self_attns, + ) + out_dict.aggregated_viusal_token_attention = aggregated_viusal_token_attention + + return out_dict + + +# Modified from transformers.model.llama.modeling_llama.LlamaForCausalLM +class InternLM2ForCausalLM(InternLM2PreTrainedModel): + _auto_class = 'AutoModelForCausalLM' + + _tied_weights_keys = ['output.weight'] + + def __init__(self, config): + super().__init__(config) + self.model = InternLM2Model(config) + self.vocab_size = config.vocab_size + self.output = nn.Linear(config.hidden_size, config.vocab_size, bias=False) + + # Initialize weights and apply final processing + self.post_init() + + def get_input_embeddings(self): + return self.model.tok_embeddings + + def set_input_embeddings(self, value): + self.model.tok_embeddings = value + + def get_output_embeddings(self): + return self.output + + def set_output_embeddings(self, new_embeddings): + self.output = new_embeddings + + def set_decoder(self, decoder): + self.model = decoder + + def get_decoder(self): + return self.model + + @add_start_docstrings_to_model_forward(InternLM2_INPUTS_DOCSTRING) + @replace_return_docstrings(output_type=CausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC) + def forward( + self, + input_ids: torch.LongTensor = None, + attention_mask: Optional[torch.Tensor] = None, + position_ids: Optional[torch.LongTensor] = None, + past_key_values: Optional[List[torch.FloatTensor]] = None, + inputs_embeds: Optional[torch.FloatTensor] = None, + labels: Optional[torch.LongTensor] = None, + use_cache: Optional[bool] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + visual_token_index: Optional[torch.Tensor] = None, + large_model_prune_layer: Optional[float] = None, + large_model_prune_ratio: Optional[float] = None, + large_model_prune_keep_count: Optional[int] = None, + large_model_prune_selection: Optional[str] = None, + large_model_similarity_target_coverage: Optional[float] = None, + large_model_similarity_min_gain: Optional[float] = None, + large_model_similarity_min_keep: Optional[int] = None, + large_model_similarity_max_keep_ratio: Optional[float] = None, + visual_token_importance: Optional[torch.Tensor] = None + + ) -> Union[Tuple, CausalLMOutputWithPast]: + r""" + Args: + labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): + Labels for computing the masked language modeling loss. Indices should either be in `[0, ..., + config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored + (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`. + + Returns: + + Example: + + ```python + >>> from transformers import AutoTokenizer, InternLM2ForCausalLM + + >>> model = InternLM2ForCausalLM.from_pretrained(PATH_TO_CONVERTED_WEIGHTS) + >>> tokenizer = AutoTokenizer.from_pretrained(PATH_TO_CONVERTED_TOKENIZER) + + >>> prompt = "Hey, are you conscious? Can you talk to me?" + >>> inputs = tokenizer(prompt, return_tensors="pt") + + >>> # Generate + >>> generate_ids = model.generate(inputs.input_ids, max_length=30) + >>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0] + "Hey, are you conscious? Can you talk to me?\nI'm not conscious, but I can talk to you." + ```""" + + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn) + outputs = self.model( + input_ids=input_ids, + attention_mask=attention_mask, + position_ids=position_ids, + past_key_values=past_key_values, + inputs_embeds=inputs_embeds, + use_cache=use_cache, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + visual_token_index=visual_token_index, + large_model_prune_layer=large_model_prune_layer, + large_model_prune_ratio=large_model_prune_ratio, + large_model_prune_keep_count=large_model_prune_keep_count, + large_model_prune_selection=large_model_prune_selection, + large_model_similarity_target_coverage=large_model_similarity_target_coverage, + large_model_similarity_min_gain=large_model_similarity_min_gain, + large_model_similarity_min_keep=large_model_similarity_min_keep, + large_model_similarity_max_keep_ratio=large_model_similarity_max_keep_ratio, + visual_token_importance=visual_token_importance + ) + + hidden_states = outputs[0] + logits = self.output(hidden_states) + logits = logits.float() + + loss = None + if labels is not None: + # Shift so that tokens < n predict n + shift_logits = logits[..., :-1, :].contiguous() + shift_labels = labels[..., 1:].contiguous() + # Flatten the tokens + loss_fct = CrossEntropyLoss() + shift_logits = shift_logits.view(-1, self.config.vocab_size) + shift_labels = shift_labels.view(-1) + # Enable model parallelism + shift_labels = shift_labels.to(shift_logits.device) + loss = loss_fct(shift_logits, shift_labels) + + if not return_dict: + output = (logits,) + outputs[1:] + return (loss,) + output if loss is not None else output + + device = input_ids.device if input_ids is not None else inputs_embeds.device + output = CausalLMOutputWithPast( + loss=loss, + logits=logits, + past_key_values=outputs.past_key_values, + hidden_states=outputs.hidden_states, + attentions=outputs.attentions, + ) + output['aggregated_viusal_token_attention'] = outputs.aggregated_viusal_token_attention + output['logits'] = output['logits'].to(device) + return output + + + def _sample( + self, + input_ids: torch.LongTensor, + logits_processor: LogitsProcessorList, + stopping_criteria: StoppingCriteriaList, + generation_config: GenerationConfig, + synced_gpus: bool, + streamer: Optional["BaseStreamer"], + logits_warper: Optional[LogitsProcessorList], + **model_kwargs, + ) -> Union[GenerateNonBeamOutput, torch.LongTensor]: + # init values + pad_token_id = generation_config._pad_token_tensor + output_attentions = generation_config.output_attentions + output_hidden_states = generation_config.output_hidden_states + output_scores = generation_config.output_scores + output_logits = generation_config.output_logits + return_dict_in_generate = generation_config.return_dict_in_generate + max_length = generation_config.max_length + has_eos_stopping_criteria = any(hasattr(criteria, "eos_token_id") for criteria in stopping_criteria) + do_sample = generation_config.do_sample + if do_sample is True and not isinstance(logits_warper, LogitsProcessorList): + raise ValueError( + "`do_sample` is set to `True`, `logits_warper` must be a `LogitsProcessorList` instance (it is " + f"{logits_warper})." + ) + + # init attention / hidden states / scores tuples + scores = () if (return_dict_in_generate and output_scores) else None + raw_logits = () if (return_dict_in_generate and output_logits) else None + decoder_attentions = () if (return_dict_in_generate and output_attentions) else None + cross_attentions = () if (return_dict_in_generate and output_attentions) else None + decoder_hidden_states = () if (return_dict_in_generate and output_hidden_states) else None + + # if model is an encoder-decoder, retrieve encoder attention weights and hidden states + if return_dict_in_generate and self.config.is_encoder_decoder: + encoder_attentions = model_kwargs["encoder_outputs"].get("attentions") if output_attentions else None + encoder_hidden_states = ( + model_kwargs["encoder_outputs"].get("hidden_states") if output_hidden_states else None + ) + + # keep track of which sequences are already finished + batch_size, cur_len = input_ids.shape + this_peer_finished = False + unfinished_sequences = torch.ones(batch_size, dtype=torch.long, device=input_ids.device) + model_kwargs = self._get_initial_cache_position(input_ids, model_kwargs) + + aggregated_viusal_token_attention = 0 if output_attentions else None + while self._has_unfinished_sequences( + this_peer_finished, synced_gpus, device=input_ids.device, cur_len=cur_len, max_length=max_length + ): + # prepare model inputs + model_inputs = self.prepare_inputs_for_generation(input_ids, **model_kwargs) + + # prepare variable output controls (note: some models won't accept all output controls) + model_inputs.update({"output_attentions": output_attentions} if output_attentions else {}) + model_inputs.update({"output_hidden_states": output_hidden_states} if output_hidden_states else {}) + + # forward pass to get next token + outputs = self(**model_inputs, return_dict=True) + if output_attentions: + aggregated_viusal_token_attention = aggregated_viusal_token_attention + outputs['aggregated_viusal_token_attention'] + + if synced_gpus and this_peer_finished: + continue # don't waste resources running the code we don't need + + # Clone is needed to avoid keeping a hanging ref to outputs.logits which may be very large for first iteration + # (the clone itself is always small) + next_token_logits = outputs.logits[:, -1, :].clone() + + # pre-process distribution + next_token_scores = logits_processor(input_ids, next_token_logits) + if do_sample: + next_token_scores = logits_warper(input_ids, next_token_scores) + + # Store scores, attentions and hidden_states when required + if return_dict_in_generate: + if output_scores: + scores += (next_token_scores,) + if output_logits: + raw_logits += (next_token_logits,) + if output_attentions: + decoder_attentions += ( + (outputs.decoder_attentions,) if self.config.is_encoder_decoder else (outputs.attentions,) + ) + if self.config.is_encoder_decoder: + cross_attentions += (outputs.cross_attentions,) + + if output_hidden_states: + decoder_hidden_states += ( + (outputs.decoder_hidden_states,) + if self.config.is_encoder_decoder + else (outputs.hidden_states,) + ) + + # token selection + if do_sample: + probs = nn.functional.softmax(next_token_scores, dim=-1) + # TODO (joao): this OP throws "skipping cudagraphs due to ['incompatible ops']", find solution + next_tokens = torch.multinomial(probs, num_samples=1).squeeze(1) + else: + next_tokens = torch.argmax(next_token_scores, dim=-1) + + # finished sentences should have their next token be a padding token + if has_eos_stopping_criteria: + next_tokens = next_tokens * unfinished_sequences + pad_token_id * (1 - unfinished_sequences) + + # update generated ids, model inputs, and length for next step + input_ids = torch.cat([input_ids, next_tokens[:, None]], dim=-1) + if streamer is not None: + streamer.put(next_tokens.cpu()) + model_kwargs = self._update_model_kwargs_for_generation( + outputs, + model_kwargs, + is_encoder_decoder=self.config.is_encoder_decoder, + ) + + unfinished_sequences = unfinished_sequences & ~stopping_criteria(input_ids, scores) + this_peer_finished = unfinished_sequences.max() == 0 + cur_len += 1 + + # This is needed to properly delete outputs.logits which may be very large for first iteration + # Otherwise a reference to outputs is kept which keeps the logits alive in the next iteration + del outputs + + if streamer is not None: + streamer.end() + + if return_dict_in_generate: + if self.config.is_encoder_decoder: + return GenerateEncoderDecoderOutput( + sequences=input_ids, + scores=scores, + logits=raw_logits, + encoder_attentions=encoder_attentions, + encoder_hidden_states=encoder_hidden_states, + decoder_attentions=decoder_attentions, + cross_attentions=cross_attentions, + decoder_hidden_states=decoder_hidden_states, + past_key_values=model_kwargs.get("past_key_values"), + ) + else: + out_dict = GenerateDecoderOnlyOutput( + sequences=input_ids, + scores=scores, + logits=raw_logits, + attentions=decoder_attentions, + hidden_states=decoder_hidden_states, + past_key_values=model_kwargs.get("past_key_values"), + ) + out_dict["aggregated_viusal_token_attention"] = aggregated_viusal_token_attention + return out_dict + else: + return input_ids + + def prepare_inputs_for_generation( + self, input_ids, past_key_values=None, attention_mask=None, inputs_embeds=None, **kwargs + ): + if past_key_values is not None: + past_length = past_key_values[0][0].shape[2] + + # Some generation methods already pass only the last input ID + if input_ids.shape[1] > past_length: + remove_prefix_length = past_length + else: + # Default to old behavior: keep only final ID + remove_prefix_length = input_ids.shape[1] - 1 + + input_ids = input_ids[:, remove_prefix_length:] + + position_ids = kwargs.get('position_ids', None) + if attention_mask is not None and position_ids is None: + # create position_ids on the fly for batch generation + position_ids = attention_mask.long().cumsum(-1) - 1 + position_ids.masked_fill_(attention_mask == 0, 1) + if past_key_values: + position_ids = position_ids[:, -input_ids.shape[1]:] + + # if `inputs_embeds` are passed, we only want to use them in the 1st generation step + if inputs_embeds is not None and past_key_values is None: + model_inputs = {'inputs_embeds': inputs_embeds} + else: + model_inputs = {'input_ids': input_ids} + + model_inputs.update( + { + 'position_ids': position_ids, + 'past_key_values': past_key_values, + 'use_cache': kwargs.get('use_cache'), + 'attention_mask': attention_mask, + 'visual_token_index': kwargs.get('visual_token_index'), + 'large_model_prune_layer': kwargs.get('large_model_prune_layer'), + 'large_model_prune_ratio': kwargs.get('large_model_prune_ratio'), + 'large_model_prune_keep_count': kwargs.get('large_model_prune_keep_count'), + 'large_model_prune_selection': kwargs.get('large_model_prune_selection'), + 'large_model_similarity_target_coverage': kwargs.get('large_model_similarity_target_coverage'), + 'large_model_similarity_min_gain': kwargs.get('large_model_similarity_min_gain'), + 'large_model_similarity_min_keep': kwargs.get('large_model_similarity_min_keep'), + 'large_model_similarity_max_keep_ratio': kwargs.get('large_model_similarity_max_keep_ratio'), + 'visual_token_importance': kwargs.get('visual_token_importance') + } + ) + return model_inputs + + @staticmethod + def _reorder_cache(past_key_values, beam_idx): + reordered_past = () + for layer_past in past_key_values: + reordered_past += ( + tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past), + ) + return reordered_past + + def build_inputs(self, tokenizer, query: str, history: List[Tuple[str, str]] = [], meta_instruction=''): + if tokenizer.add_bos_token: + prompt = '' + else: + prompt = tokenizer.bos_token + if meta_instruction: + prompt += f"""<|im_start|>system\n{meta_instruction}<|im_end|>\n""" + for record in history: + prompt += f"""<|im_start|>user\n{record[0]}<|im_end|>\n<|im_start|>assistant\n{record[1]}<|im_end|>\n""" + prompt += f"""<|im_start|>user\n{query}<|im_end|>\n<|im_start|>assistant\n""" + return tokenizer([prompt], return_tensors='pt') + + @torch.no_grad() + def chat( + self, + tokenizer, + query: str, + history: List[Tuple[str, str]] = [], + streamer: Optional[BaseStreamer] = None, + max_new_tokens: int = 1024, + do_sample: bool = True, + temperature: float = 0.8, + top_p: float = 0.8, + meta_instruction: str = 'You are an AI assistant whose name is InternLM (书生·浦语).\n' + '- InternLM (书生·浦语) is a conversational language model that is developed by Shanghai AI Laboratory (上海人工智能实验室). It is designed to be helpful, honest, and harmless.\n' + '- InternLM (书生·浦语) can understand and communicate fluently in the language chosen by the user such as English and 中文.', + **kwargs, + ): + inputs = self.build_inputs(tokenizer, query, history, meta_instruction) + inputs = {k: v.to(self.device) for k, v in inputs.items() if torch.is_tensor(v)} + # also add end-of-assistant token in eos token id to avoid unnecessary generation + eos_token_id = [tokenizer.eos_token_id, tokenizer.convert_tokens_to_ids(['<|im_end|>'])[0]] + outputs = self.generate( + **inputs, + streamer=streamer, + max_new_tokens=max_new_tokens, + do_sample=do_sample, + temperature=temperature, + top_p=top_p, + eos_token_id=eos_token_id, + **kwargs, + ) + outputs = outputs[0].cpu().tolist()[len(inputs['input_ids'][0]):] + response = tokenizer.decode(outputs, skip_special_tokens=True) + response = response.split('<|im_end|>')[0] + history = history + [(query, response)] + return response, history + + @torch.no_grad() + def stream_chat( + self, + tokenizer, + query: str, + history: List[Tuple[str, str]] = [], + max_new_tokens: int = 1024, + do_sample: bool = True, + temperature: float = 0.8, + top_p: float = 0.8, + **kwargs, + ): + """ + Return a generator in format: (response, history) + Eg. + ('你好,有什么可以帮助您的吗', [('你好', '你好,有什么可以帮助您的吗')]) + ('你好,有什么可以帮助您的吗?', [('你好', '你好,有什么可以帮助您的吗?')]) + """ + if BaseStreamer is None: + raise ModuleNotFoundError( + 'The version of `transformers` is too low. Please make sure ' + 'that you have installed `transformers>=4.28.0`.' + ) + + response_queue = queue.Queue(maxsize=20) + + class ChatStreamer(BaseStreamer): + def __init__(self, tokenizer) -> None: + super().__init__() + self.tokenizer = tokenizer + self.queue = response_queue + self.query = query + self.history = history + self.response = '' + self.cache = [] + self.received_inputs = False + self.queue.put((self.response, history + [(self.query, self.response)])) + + def put(self, value): + if len(value.shape) > 1 and value.shape[0] > 1: + raise ValueError('ChatStreamer only supports batch size 1') + elif len(value.shape) > 1: + value = value[0] + + if not self.received_inputs: + # The first received value is input_ids, ignore here + self.received_inputs = True + return + + self.cache.extend(value.tolist()) + token = self.tokenizer.decode(self.cache, skip_special_tokens=True) + if token.strip() != '<|im_end|>': + self.response = self.response + token + history = self.history + [(self.query, self.response)] + self.queue.put((self.response, history)) + self.cache = [] + else: + self.end() + + def end(self): + self.queue.put(None) + + def stream_producer(): + return self.chat( + tokenizer=tokenizer, + query=query, + streamer=ChatStreamer(tokenizer=tokenizer), + history=history, + max_new_tokens=max_new_tokens, + do_sample=do_sample, + temperature=temperature, + top_p=top_p, + **kwargs, + ) + + def consumer(): + producer = threading.Thread(target=stream_producer) + producer.start() + while True: + res = response_queue.get() + if res is None: + return + yield res + + return consumer() + + +# Copied from transformers.model.llama.modeling_llama.LlamaForSequenceClassification with Llama->InternLM2 +@add_start_docstrings( + """ + The InternLM2 Model transformer with a sequence classification head on top (linear layer). + + [`InternLM2ForSequenceClassification`] uses the last token in order to do the classification, + as other causal models (e.g. GPT-2) do. + + Since it does classification on the last token, it requires to know the position of the last token. If a + `pad_token_id` is defined in the configuration, it finds the last token that is not a padding token in each row. If + no `pad_token_id` is defined, it simply takes the last value in each row of the batch. Since it cannot guess the + padding tokens when `inputs_embeds` are passed instead of `input_ids`, it does the same (take the last value in + each row of the batch). + """, + InternLM2_START_DOCSTRING, +) +class InternLM2ForSequenceClassification(InternLM2PreTrainedModel): + def __init__(self, config): + super().__init__(config) + self.num_labels = config.num_labels + self.model = InternLM2Model(config) + self.score = nn.Linear(config.hidden_size, self.num_labels, bias=False) + + # Initialize weights and apply final processing + self.post_init() + + def get_input_embeddings(self): + return self.model.tok_embeddings + + def set_input_embeddings(self, value): + self.model.tok_embeddings = value + + @add_start_docstrings_to_model_forward(InternLM2_INPUTS_DOCSTRING) + def forward( + self, + input_ids: torch.LongTensor = None, + attention_mask: Optional[torch.Tensor] = None, + position_ids: Optional[torch.LongTensor] = None, + past_key_values: Optional[List[torch.FloatTensor]] = None, + inputs_embeds: Optional[torch.FloatTensor] = None, + labels: Optional[torch.LongTensor] = None, + use_cache: Optional[bool] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple, SequenceClassifierOutputWithPast]: + r""" + labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): + Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., + config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If + `config.num_labels > 1` a classification loss is computed (Cross-Entropy). + """ + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + transformer_outputs = self.model( + input_ids, + attention_mask=attention_mask, + position_ids=position_ids, + past_key_values=past_key_values, + inputs_embeds=inputs_embeds, + use_cache=use_cache, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + hidden_states = transformer_outputs[0] + logits = self.score(hidden_states) + + if input_ids is not None: + batch_size = input_ids.shape[0] + else: + batch_size = inputs_embeds.shape[0] + + if self.config.pad_token_id is None and batch_size != 1: + raise ValueError('Cannot handle batch sizes > 1 if no padding token is defined.') + if self.config.pad_token_id is None: + sequence_lengths = -1 + else: + if input_ids is not None: + sequence_lengths = (torch.eq(input_ids, self.config.pad_token_id).int().argmax(-1) - 1).to( + logits.device + ) + else: + sequence_lengths = -1 + + pooled_logits = logits[torch.arange(batch_size, device=logits.device), sequence_lengths] + + loss = None + if labels is not None: + labels = labels.to(logits.device) + if self.config.problem_type is None: + if self.num_labels == 1: + self.config.problem_type = 'regression' + elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): + self.config.problem_type = 'single_label_classification' + else: + self.config.problem_type = 'multi_label_classification' + + if self.config.problem_type == 'regression': + loss_fct = MSELoss() + if self.num_labels == 1: + loss = loss_fct(pooled_logits.squeeze(), labels.squeeze()) + else: + loss = loss_fct(pooled_logits, labels) + elif self.config.problem_type == 'single_label_classification': + loss_fct = CrossEntropyLoss() + loss = loss_fct(pooled_logits.view(-1, self.num_labels), labels.view(-1)) + elif self.config.problem_type == 'multi_label_classification': + loss_fct = BCEWithLogitsLoss() + loss = loss_fct(pooled_logits, labels) + if not return_dict: + output = (pooled_logits,) + transformer_outputs[1:] + return ((loss,) + output) if loss is not None else output + + return SequenceClassifierOutputWithPast( + loss=loss, + logits=pooled_logits, + past_key_values=transformer_outputs.past_key_values, + hidden_states=transformer_outputs.hidden_states, + attentions=transformer_outputs.attentions, + ) diff --git a/isolated/sim_greedy/upstream_sgl/internvl/model/internlm2/tokenization_internlm2.py b/isolated/sim_greedy/upstream_sgl/internvl/model/internlm2/tokenization_internlm2.py new file mode 100644 index 0000000000000000000000000000000000000000..1be581da37ef678de65f2737493fc0ed7160446e --- /dev/null +++ b/isolated/sim_greedy/upstream_sgl/internvl/model/internlm2/tokenization_internlm2.py @@ -0,0 +1,235 @@ +# Copyright (c) The InternLM team and The HuggingFace Inc. team. All rights reserved. +# +# This code is based on transformers/src/transformers/models/llama/tokenization_llama.py +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Tokenization classes for InternLM.""" +import os +from shutil import copyfile +from typing import Any, Dict, List, Optional, Tuple + +import sentencepiece as spm +from transformers.tokenization_utils import PreTrainedTokenizer +from transformers.utils import logging + +logger = logging.get_logger(__name__) + +VOCAB_FILES_NAMES = {'vocab_file': './tokenizer.model'} + +PRETRAINED_VOCAB_FILES_MAP = {} + + +# Modified from transformers.model.llama.tokenization_llama.LlamaTokenizer +class InternLM2Tokenizer(PreTrainedTokenizer): + """ + Construct a InternLM2 tokenizer. Based on byte-level Byte-Pair-Encoding. + + Args: + vocab_file (`str`): + Path to the vocabulary file. + """ + + vocab_files_names = VOCAB_FILES_NAMES + pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP + model_input_names = ['input_ids', 'attention_mask'] + _auto_class = 'AutoTokenizer' + + def __init__( + self, + vocab_file, + unk_token='', + bos_token='', + eos_token='', + pad_token='', + sp_model_kwargs: Optional[Dict[str, Any]] = None, + add_bos_token=True, + add_eos_token=False, + decode_with_prefix_space=False, + clean_up_tokenization_spaces=False, + **kwargs, + ): + self.sp_model_kwargs = {} if sp_model_kwargs is None else sp_model_kwargs + self.vocab_file = vocab_file + self.add_bos_token = add_bos_token + self.add_eos_token = add_eos_token + self.decode_with_prefix_space = decode_with_prefix_space + self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs) + self.sp_model.Load(vocab_file) + self._no_prefix_space_tokens = None + super().__init__( + bos_token=bos_token, + eos_token=eos_token, + unk_token=unk_token, + pad_token=pad_token, + clean_up_tokenization_spaces=clean_up_tokenization_spaces, + **kwargs, + ) + + @property + def no_prefix_space_tokens(self): + if self._no_prefix_space_tokens is None: + vocab = self.convert_ids_to_tokens(list(range(self.vocab_size))) + self._no_prefix_space_tokens = {i for i, tok in enumerate(vocab) if not tok.startswith('▁')} + return self._no_prefix_space_tokens + + @property + def vocab_size(self): + """Returns vocab size""" + return self.sp_model.get_piece_size() + + @property + def bos_token_id(self) -> Optional[int]: + return self.sp_model.bos_id() + + @property + def eos_token_id(self) -> Optional[int]: + return self.sp_model.eos_id() + + def get_vocab(self): + """Returns vocab as a dict""" + vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size)} + vocab.update(self.added_tokens_encoder) + return vocab + + def _tokenize(self, text): + """Returns a tokenized string.""" + return self.sp_model.encode(text, out_type=str) + + def _convert_token_to_id(self, token): + """Converts a token (str) in an id using the vocab.""" + return self.sp_model.piece_to_id(token) + + def _convert_id_to_token(self, index): + """Converts an index (integer) in a token (str) using the vocab.""" + token = self.sp_model.IdToPiece(index) + return token + + def _maybe_add_prefix_space(self, tokens, decoded): + if tokens and tokens[0] not in self.no_prefix_space_tokens: + return ' ' + decoded + else: + return decoded + + def convert_tokens_to_string(self, tokens): + """Converts a sequence of tokens (string) in a single string.""" + current_sub_tokens = [] + out_string = '' + prev_is_special = False + for token in tokens: + # make sure that special tokens are not decoded using sentencepiece model + if token in self.all_special_tokens: + if not prev_is_special: + out_string += ' ' + out_string += self.sp_model.decode(current_sub_tokens) + token + prev_is_special = True + current_sub_tokens = [] + else: + current_sub_tokens.append(token) + prev_is_special = False + out_string += self.sp_model.decode(current_sub_tokens) + out_string = self.clean_up_tokenization(out_string) + out_string = self._maybe_add_prefix_space(tokens=tokens, decoded=out_string) + return out_string[1:] + + def save_vocabulary(self, save_directory, filename_prefix: Optional[str] = None) -> Tuple[str]: + """ + Save the vocabulary and special tokens file to a directory. + + Args: + save_directory (`str`): + The directory in which to save the vocabulary. + + Returns: + `Tuple(str)`: Paths to the files saved. + """ + if not os.path.isdir(save_directory): + logger.error(f'Vocabulary path ({save_directory}) should be a directory') + return + out_vocab_file = os.path.join( + save_directory, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] + ) + + if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file) and os.path.isfile(self.vocab_file): + copyfile(self.vocab_file, out_vocab_file) + elif not os.path.isfile(self.vocab_file): + with open(out_vocab_file, 'wb') as fi: + content_spiece_model = self.sp_model.serialized_model_proto() + fi.write(content_spiece_model) + + return (out_vocab_file,) + + def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None): + if self.add_bos_token: + bos_token_ids = [self.bos_token_id] + else: + bos_token_ids = [] + + output = bos_token_ids + token_ids_0 + + if token_ids_1 is not None: + output = output + token_ids_1 + + if self.add_eos_token: + output = output + [self.eos_token_id] + + return output + + def get_special_tokens_mask( + self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False + ) -> List[int]: + """ + Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding + special tokens using the tokenizer `prepare_for_model` method. + + Args: + token_ids_0 (`List[int]`): + List of IDs. + token_ids_1 (`List[int]`, *optional*): + Optional second list of IDs for sequence pairs. + already_has_special_tokens (`bool`, *optional*, defaults to `False`): + Whether or not the token list is already formatted with special tokens for the model. + + Returns: + `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token. + """ + if already_has_special_tokens: + return super().get_special_tokens_mask( + token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True + ) + + if token_ids_1 is None: + return [1] + ([0] * len(token_ids_0)) + [1] + return [1] + ([0] * len(token_ids_0)) + [1, 1] + ([0] * len(token_ids_1)) + [1] + + def create_token_type_ids_from_sequences( + self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None + ) -> List[int]: + """ + Create a mask from the two sequences passed to be used in a sequence-pair classification task. T5 does not make + use of token type ids, therefore a list of zeros is returned. + + Args: + token_ids_0 (`List[int]`): + List of IDs. + token_ids_1 (`List[int]`, *optional*): + Optional second list of IDs for sequence pairs. + + Returns: + `List[int]`: List of zeros. + """ + eos = [self.eos_token_id] + + if token_ids_1 is None: + return len(token_ids_0 + eos) * [0] + return len(token_ids_0 + eos + token_ids_1 + eos) * [0] diff --git a/isolated/sim_greedy/upstream_sgl/internvl/model/internlm2/tokenization_internlm2_fast.py b/isolated/sim_greedy/upstream_sgl/internvl/model/internlm2/tokenization_internlm2_fast.py new file mode 100644 index 0000000000000000000000000000000000000000..aa0fccbd0f1d029d79e19821f2edcb01b594537c --- /dev/null +++ b/isolated/sim_greedy/upstream_sgl/internvl/model/internlm2/tokenization_internlm2_fast.py @@ -0,0 +1,211 @@ +# Copyright (c) The InternLM team and The HuggingFace Inc. team. All rights reserved. +# +# This code is based on transformers/src/transformers/models/llama/tokenization_llama_fast.py +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Tokenization Fast class for InternLM.""" +import os +from shutil import copyfile +from typing import Any, Dict, Optional, Tuple + +from tokenizers import Tokenizer, decoders, normalizers, processors +from tokenizers.models import BPE +from transformers.convert_slow_tokenizer import (SLOW_TO_FAST_CONVERTERS, + SentencePieceExtractor, + SpmConverter) +from transformers.tokenization_utils_fast import PreTrainedTokenizerFast +from transformers.utils import logging + +from .tokenization_internlm2 import InternLM2Tokenizer + +logger = logging.get_logger(__name__) + +VOCAB_FILES_NAMES = {'vocab_file': './tokenizer.model'} + + +# Modified from transformers.convert_slow_tokenizer.LlamaConverter +class InternLM2Converter(SpmConverter): + handle_byte_fallback = True + + def vocab(self, proto): + vocab = [ + ('', 0.0), + ('', 0.0), + ('', 0.0), + ] + vocab += [(piece.piece, piece.score) for piece in proto.pieces[3:]] + return vocab + + def unk_id(self, proto): + unk_id = 0 + return unk_id + + def decoder(self, replacement, add_prefix_space): + return decoders.Sequence( + [ + decoders.Replace('▁', ' '), + decoders.ByteFallback(), + decoders.Fuse(), + decoders.Strip(content=' ', left=1), + ] + ) + + def tokenizer(self, proto): + model_type = proto.trainer_spec.model_type + vocab_scores = self.vocab(proto) + # special tokens + added_tokens = self.original_tokenizer.added_tokens_decoder + for i in range(len(vocab_scores)): + piece, score = vocab_scores[i] + if i in added_tokens: + vocab_scores[i] = (added_tokens[i].content, score) + if model_type == 1: + raise RuntimeError('InternLM2 is supposed to be a BPE model!') + + elif model_type == 2: + _, merges = SentencePieceExtractor(self.original_tokenizer.vocab_file).extract(vocab_scores) + bpe_vocab = {word: i for i, (word, _score) in enumerate(vocab_scores)} + tokenizer = Tokenizer( + BPE(bpe_vocab, merges, unk_token=proto.trainer_spec.unk_piece, fuse_unk=True, byte_fallback=True) + ) + tokenizer.add_special_tokens( + [ added_token for index, added_token in added_tokens.items()] + ) + else: + raise Exception( + "You're trying to run a `Unigram` model but you're file was trained with a different algorithm" + ) + + return tokenizer + + def normalizer(self, proto): + normalizers_list = [] + if proto.normalizer_spec.add_dummy_prefix: + normalizers_list.append(normalizers.Prepend(prepend='▁')) + normalizers_list.append(normalizers.Replace(pattern=' ', content='▁')) + return normalizers.Sequence(normalizers_list) + + def pre_tokenizer(self, replacement, add_prefix_space): + return None + + +SLOW_TO_FAST_CONVERTERS['InternLM2Tokenizer'] = InternLM2Converter + + +# Modified from transformers.model.llama.tokenization_llama_fast.LlamaTokenizerFast -> InternLM2TokenizerFast +class InternLM2TokenizerFast(PreTrainedTokenizerFast): + vocab_files_names = VOCAB_FILES_NAMES + slow_tokenizer_class = InternLM2Tokenizer + padding_side = 'left' + model_input_names = ['input_ids', 'attention_mask'] + _auto_class = 'AutoTokenizer' + + def __init__( + self, + vocab_file, + unk_token='', + bos_token='', + eos_token='', + pad_token='', + sp_model_kwargs: Optional[Dict[str, Any]] = None, + add_bos_token=True, + add_eos_token=False, + decode_with_prefix_space=False, + clean_up_tokenization_spaces=False, + **kwargs, + ): + super().__init__( + vocab_file=vocab_file, + unk_token=unk_token, + bos_token=bos_token, + eos_token=eos_token, + pad_token=pad_token, + sp_model_kwargs=sp_model_kwargs, + add_bos_token=add_bos_token, + add_eos_token=add_eos_token, + decode_with_prefix_space=decode_with_prefix_space, + clean_up_tokenization_spaces=clean_up_tokenization_spaces, + **kwargs, + ) + self._add_bos_token = add_bos_token + self._add_eos_token = add_eos_token + self.update_post_processor() + self.vocab_file = vocab_file + + @property + def can_save_slow_tokenizer(self) -> bool: + return os.path.isfile(self.vocab_file) if self.vocab_file else False + + def update_post_processor(self): + """ + Updates the underlying post processor with the current `bos_token` and `eos_token`. + """ + bos = self.bos_token + bos_token_id = self.bos_token_id + if bos is None and self.add_bos_token: + raise ValueError('add_bos_token = True but bos_token = None') + + eos = self.eos_token + eos_token_id = self.eos_token_id + if eos is None and self.add_eos_token: + raise ValueError('add_eos_token = True but eos_token = None') + + single = f"{(bos+':0 ') if self.add_bos_token else ''}$A:0{(' '+eos+':0') if self.add_eos_token else ''}" + pair = f"{single}{(' '+bos+':1') if self.add_bos_token else ''} $B:1{(' '+eos+':1') if self.add_eos_token else ''}" + + special_tokens = [] + if self.add_bos_token: + special_tokens.append((bos, bos_token_id)) + if self.add_eos_token: + special_tokens.append((eos, eos_token_id)) + self._tokenizer.post_processor = processors.TemplateProcessing( + single=single, pair=pair, special_tokens=special_tokens + ) + + @property + def add_eos_token(self): + return self._add_eos_token + + @property + def add_bos_token(self): + return self._add_bos_token + + @add_eos_token.setter + def add_eos_token(self, value): + self._add_eos_token = value + self.update_post_processor() + + @add_bos_token.setter + def add_bos_token(self, value): + self._add_bos_token = value + self.update_post_processor() + + def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]: + if not self.can_save_slow_tokenizer: + raise ValueError( + 'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow ' + 'tokenizer.' + ) + + if not os.path.isdir(save_directory): + logger.error(f'Vocabulary path ({save_directory}) should be a directory') + return + out_vocab_file = os.path.join( + save_directory, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] + ) + + if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file): + copyfile(self.vocab_file, out_vocab_file) + + return (out_vocab_file,) diff --git a/isolated/sim_greedy/upstream_sgl/internvl/model/internvl_chat/__init__.py b/isolated/sim_greedy/upstream_sgl/internvl/model/internvl_chat/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..2d57341208843b8ac0376b4f8cd3e3b9186e3621 --- /dev/null +++ b/isolated/sim_greedy/upstream_sgl/internvl/model/internvl_chat/__init__.py @@ -0,0 +1,13 @@ +# -------------------------------------------------------- +# InternVL +# Copyright (c) 2024 OpenGVLab +# Licensed under The MIT License [see LICENSE for details] +# -------------------------------------------------------- + +from .configuration_intern_vit import InternVisionConfig +from .configuration_internvl_chat import InternVLChatConfig +from .modeling_intern_vit import InternVisionModel +from .modeling_internvl_chat import InternVLChatModel + +__all__ = ['InternVisionConfig', 'InternVisionModel', + 'InternVLChatConfig', 'InternVLChatModel'] diff --git a/isolated/sim_greedy/upstream_sgl/internvl/model/internvl_chat/configuration_intern_vit.py b/isolated/sim_greedy/upstream_sgl/internvl/model/internvl_chat/configuration_intern_vit.py new file mode 100644 index 0000000000000000000000000000000000000000..ac60112c79abc35627a5b6b58e760c2f78e71839 --- /dev/null +++ b/isolated/sim_greedy/upstream_sgl/internvl/model/internvl_chat/configuration_intern_vit.py @@ -0,0 +1,119 @@ +# -------------------------------------------------------- +# InternVL +# Copyright (c) 2024 OpenGVLab +# Licensed under The MIT License [see LICENSE for details] +# -------------------------------------------------------- +import os +from typing import Union + +from transformers.configuration_utils import PretrainedConfig +from transformers.utils import logging + +logger = logging.get_logger(__name__) + + +class InternVisionConfig(PretrainedConfig): + r""" + This is the configuration class to store the configuration of a [`InternVisionModel`]. It is used to + instantiate a vision encoder according to the specified arguments, defining the model architecture. + + Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the + documentation from [`PretrainedConfig`] for more information. + + Args: + num_channels (`int`, *optional*, defaults to 3): + Number of color channels in the input images (e.g., 3 for RGB). + patch_size (`int`, *optional*, defaults to 14): + The size (resolution) of each patch. + image_size (`int`, *optional*, defaults to 224): + The size (resolution) of each image. + qkv_bias (`bool`, *optional*, defaults to `False`): + Whether to add a bias to the queries and values in the self-attention layers. + hidden_size (`int`, *optional*, defaults to 3200): + Dimensionality of the encoder layers and the pooler layer. + num_attention_heads (`int`, *optional*, defaults to 25): + Number of attention heads for each attention layer in the Transformer encoder. + intermediate_size (`int`, *optional*, defaults to 12800): + Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder. + qk_normalization (`bool`, *optional*, defaults to `True`): + Whether to normalize the queries and keys in the self-attention layers. + num_hidden_layers (`int`, *optional*, defaults to 48): + Number of hidden layers in the Transformer encoder. + use_flash_attn (`bool`, *optional*, defaults to `True`): + Whether to use flash attention mechanism. + hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`): + The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, + `"relu"`, `"selu"` and `"gelu_new"` ``"gelu"` are supported. + layer_norm_eps (`float`, *optional*, defaults to 1e-6): + The epsilon used by the layer normalization layers. + dropout (`float`, *optional*, defaults to 0.0): + The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. + drop_path_rate (`float`, *optional*, defaults to 0.0): + Dropout rate for stochastic depth. + attention_dropout (`float`, *optional*, defaults to 0.0): + The dropout ratio for the attention probabilities. + initializer_range (`float`, *optional*, defaults to 0.02): + The standard deviation of the truncated_normal_initializer for initializing all weight matrices. + initializer_factor (`float`, *optional*, defaults to 0.1): + A factor for layer scale. + """ + + model_type = 'intern_vit_6b' + + def __init__( + self, + num_channels=3, + patch_size=14, + image_size=224, + qkv_bias=False, + hidden_size=3200, + num_attention_heads=25, + intermediate_size=12800, + qk_normalization=True, + num_hidden_layers=48, + use_flash_attn=True, + hidden_act='gelu', + norm_type='rms_norm', + layer_norm_eps=1e-6, + dropout=0.0, + drop_path_rate=0.0, + attention_dropout=0.0, + initializer_range=0.02, + initializer_factor=0.1, + **kwargs, + ): + super().__init__(**kwargs) + + self.hidden_size = hidden_size + self.intermediate_size = intermediate_size + self.dropout = dropout + self.drop_path_rate = drop_path_rate + self.num_hidden_layers = num_hidden_layers + self.num_attention_heads = num_attention_heads + self.num_channels = num_channels + self.patch_size = patch_size + self.image_size = image_size + self.initializer_range = initializer_range + self.initializer_factor = initializer_factor + self.attention_dropout = attention_dropout + self.layer_norm_eps = layer_norm_eps + self.hidden_act = hidden_act + self.norm_type = norm_type + self.qkv_bias = qkv_bias + self.qk_normalization = qk_normalization + self.use_flash_attn = use_flash_attn + + @classmethod + def from_pretrained(cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs) -> 'PretrainedConfig': + config_dict, kwargs = cls.get_config_dict(pretrained_model_name_or_path, **kwargs) + + if 'vision_config' in config_dict: + config_dict = config_dict['vision_config'] + + if 'model_type' in config_dict and hasattr(cls, 'model_type') and config_dict['model_type'] != cls.model_type: + logger.warning( + f"You are using a model of type {config_dict['model_type']} to instantiate a model of type " + f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' + ) + + return cls.from_dict(config_dict, **kwargs) diff --git a/isolated/sim_greedy/upstream_sgl/internvl/model/internvl_chat/configuration_internvl_chat.py b/isolated/sim_greedy/upstream_sgl/internvl/model/internvl_chat/configuration_internvl_chat.py new file mode 100644 index 0000000000000000000000000000000000000000..866691793c82090434bffcc8e147a1c2c5a22f7b --- /dev/null +++ b/isolated/sim_greedy/upstream_sgl/internvl/model/internvl_chat/configuration_internvl_chat.py @@ -0,0 +1,106 @@ +# -------------------------------------------------------- +# InternVL +# Copyright (c) 2024 OpenGVLab +# Licensed under The MIT License [see LICENSE for details] +# -------------------------------------------------------- + +import copy + +from internvl.model.internlm2.configuration_internlm2 import InternLM2Config +from internvl.model.phi3.configuration_phi3 import Phi3Config +from internvl.model.llama.configuration_llama import LlamaConfig +from transformers import AutoConfig, Qwen2Config + +from transformers.configuration_utils import PretrainedConfig +from transformers.utils import logging + +from .configuration_intern_vit import InternVisionConfig + +logger = logging.get_logger(__name__) + + +class InternVLChatConfig(PretrainedConfig): + model_type = 'internvl_chat' + is_composition = True + + def __init__( + self, + vision_config=None, + llm_config=None, + use_backbone_lora=0, + use_llm_lora=0, + pad2square=False, + select_layer=-1, + force_image_size=None, + downsample_ratio=0.5, + template=None, + dynamic_image_size=False, + use_thumbnail=False, + ps_version='v1', + min_dynamic_patch=1, + max_dynamic_patch=6, + **kwargs): + super().__init__(**kwargs) + + if vision_config is None: + vision_config = {} + logger.info('vision_config is None. Initializing the InternVisionConfig with default values.') + + if llm_config is None: + llm_config = {} + logger.info('llm_config is None. Initializing the LlamaConfig config with default values (`LlamaConfig`).') + + self.vision_config = InternVisionConfig(**vision_config) + if llm_config['architectures'][0] == 'LlamaForCausalLM': + self.llm_config = LlamaConfig(**llm_config) + elif llm_config['architectures'][0] == 'InternLM2ForCausalLM': + self.llm_config = InternLM2Config(**llm_config) + elif llm_config['architectures'][0] == 'Phi3ForCausalLM': + self.llm_config = Phi3Config(**llm_config) + elif llm_config['architectures'][0] == 'Qwen2ForCausalLM': + self.llm_config = Qwen2Config(**llm_config) + else: + raise ValueError('Unsupported architecture: {}'.format(llm_config['architectures'][0])) + self.use_backbone_lora = use_backbone_lora + self.use_llm_lora = use_llm_lora + self.pad2square = pad2square + self.select_layer = select_layer + self.force_image_size = force_image_size + self.downsample_ratio = downsample_ratio + self.template = template + self.dynamic_image_size = dynamic_image_size + self.use_thumbnail = use_thumbnail + self.ps_version = ps_version # pixel shuffle version + self.min_dynamic_patch = min_dynamic_patch + self.max_dynamic_patch = max_dynamic_patch + + logger.info(f'vision_select_layer: {self.select_layer}') + logger.info(f'ps_version: {self.ps_version}') + logger.info(f'min_dynamic_patch: {self.min_dynamic_patch}') + logger.info(f'max_dynamic_patch: {self.max_dynamic_patch}') + + def to_dict(self): + """ + Serializes this instance to a Python dictionary. Override the default [`~PretrainedConfig.to_dict`]. + + Returns: + `Dict[str, any]`: Dictionary of all the attributes that make up this configuration instance, + """ + output = copy.deepcopy(self.__dict__) + output['vision_config'] = self.vision_config.to_dict() + output['llm_config'] = self.llm_config.to_dict() + output['model_type'] = self.__class__.model_type + output['use_backbone_lora'] = self.use_backbone_lora + output['use_llm_lora'] = self.use_llm_lora + output['pad2square'] = self.pad2square + output['select_layer'] = self.select_layer + output['force_image_size'] = self.force_image_size + output['downsample_ratio'] = self.downsample_ratio + output['template'] = self.template + output['dynamic_image_size'] = self.dynamic_image_size + output['use_thumbnail'] = self.use_thumbnail + output['ps_version'] = self.ps_version + output['min_dynamic_patch'] = self.min_dynamic_patch + output['max_dynamic_patch'] = self.max_dynamic_patch + + return output diff --git a/isolated/sim_greedy/upstream_sgl/internvl/model/internvl_chat/flash_attention.py b/isolated/sim_greedy/upstream_sgl/internvl/model/internvl_chat/flash_attention.py new file mode 100644 index 0000000000000000000000000000000000000000..7cda9bfadd290da35bdd04cccd51725e2d419c2f --- /dev/null +++ b/isolated/sim_greedy/upstream_sgl/internvl/model/internvl_chat/flash_attention.py @@ -0,0 +1,76 @@ +# https://github.com/Dao-AILab/flash-attention/blob/v0.2.8/flash_attn/flash_attention.py +import torch +import torch.nn as nn +from einops import rearrange + +try: # v1 + from flash_attn.flash_attn_interface import \ + flash_attn_unpadded_qkvpacked_func +except: # v2 + from flash_attn.flash_attn_interface import flash_attn_varlen_qkvpacked_func as flash_attn_unpadded_qkvpacked_func + +from flash_attn.bert_padding import pad_input, unpad_input + + +class FlashAttention(nn.Module): + """Implement the scaled dot product attention with softmax. + Arguments + --------- + softmax_scale: The temperature to use for the softmax attention. + (default: 1/sqrt(d_keys) where d_keys is computed at + runtime) + attention_dropout: The dropout rate to apply to the attention + (default: 0.0) + """ + + def __init__(self, softmax_scale=None, attention_dropout=0.0, device=None, dtype=None): + super().__init__() + self.softmax_scale = softmax_scale + self.dropout_p = attention_dropout + + def forward(self, qkv, key_padding_mask=None, causal=False, cu_seqlens=None, + max_s=None, need_weights=False): + """Implements the multihead softmax attention. + Arguments + --------- + qkv: The tensor containing the query, key, and value. (B, S, 3, H, D) if key_padding_mask is None + if unpadded: (nnz, 3, h, d) + key_padding_mask: a bool tensor of shape (B, S) + """ + assert not need_weights + assert qkv.dtype in [torch.float16, torch.bfloat16] + assert qkv.is_cuda + + if cu_seqlens is None: + batch_size = qkv.shape[0] + seqlen = qkv.shape[1] + if key_padding_mask is None: + qkv = rearrange(qkv, 'b s ... -> (b s) ...') + max_s = seqlen + cu_seqlens = torch.arange(0, (batch_size + 1) * seqlen, step=seqlen, dtype=torch.int32, + device=qkv.device) + output = flash_attn_unpadded_qkvpacked_func( + qkv, cu_seqlens, max_s, self.dropout_p if self.training else 0.0, + softmax_scale=self.softmax_scale, causal=causal + ) + output = rearrange(output, '(b s) ... -> b s ...', b=batch_size) + else: + nheads = qkv.shape[-2] + x = rearrange(qkv, 'b s three h d -> b s (three h d)') + x_unpad, indices, cu_seqlens, max_s = unpad_input(x, key_padding_mask) + x_unpad = rearrange(x_unpad, 'nnz (three h d) -> nnz three h d', three=3, h=nheads) + output_unpad = flash_attn_unpadded_qkvpacked_func( + x_unpad, cu_seqlens, max_s, self.dropout_p if self.training else 0.0, + softmax_scale=self.softmax_scale, causal=causal + ) + output = rearrange(pad_input(rearrange(output_unpad, 'nnz h d -> nnz (h d)'), + indices, batch_size, seqlen), + 'b s (h d) -> b s h d', h=nheads) + else: + assert max_s is not None + output = flash_attn_unpadded_qkvpacked_func( + qkv, cu_seqlens, max_s, self.dropout_p if self.training else 0.0, + softmax_scale=self.softmax_scale, causal=causal + ) + + return output, None diff --git a/isolated/sim_greedy/upstream_sgl/internvl/model/internvl_chat/modeling_intern_vit.py b/isolated/sim_greedy/upstream_sgl/internvl/model/internvl_chat/modeling_intern_vit.py new file mode 100644 index 0000000000000000000000000000000000000000..5d875f4b30e6b476d1555687f29129c94940a4f2 --- /dev/null +++ b/isolated/sim_greedy/upstream_sgl/internvl/model/internvl_chat/modeling_intern_vit.py @@ -0,0 +1,362 @@ +# -------------------------------------------------------- +# InternVL +# Copyright (c) 2024 OpenGVLab +# Licensed under The MIT License [see LICENSE for details] +# -------------------------------------------------------- +from typing import Optional, Tuple, Union + +import torch +import torch.nn.functional as F +import torch.utils.checkpoint +from einops import rearrange +from timm.models.layers import DropPath +from torch import nn +from transformers.activations import ACT2FN +from transformers.modeling_outputs import (BaseModelOutput, + BaseModelOutputWithPooling) +from transformers.modeling_utils import PreTrainedModel +from transformers.utils import logging + +from .configuration_intern_vit import InternVisionConfig + +try: + from .flash_attention import FlashAttention + has_flash_attn = True +except: + print('FlashAttention is not installed.') + has_flash_attn = False + +logger = logging.get_logger(__name__) + + +class InternRMSNorm(nn.Module): + def __init__(self, hidden_size, eps=1e-6): + super().__init__() + self.weight = nn.Parameter(torch.ones(hidden_size)) + self.variance_epsilon = eps + + def forward(self, hidden_states): + input_dtype = hidden_states.dtype + hidden_states = hidden_states.to(torch.float32) + variance = hidden_states.pow(2).mean(-1, keepdim=True) + hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon) + return self.weight * hidden_states.to(input_dtype) + + +try: + from apex.normalization import FusedRMSNorm + + InternRMSNorm = FusedRMSNorm # noqa + + logger.info('Discovered apex.normalization.FusedRMSNorm - will use it instead of InternRMSNorm') +except ImportError: + # using the normal InternRMSNorm + pass +except Exception: + logger.warning('discovered apex but it failed to load, falling back to InternRMSNorm') + pass + + +NORM2FN = { + 'rms_norm': InternRMSNorm, + 'layer_norm': nn.LayerNorm, +} + + +class InternVisionEmbeddings(nn.Module): + def __init__(self, config: InternVisionConfig): + super().__init__() + self.config = config + self.embed_dim = config.hidden_size + self.image_size = config.image_size + self.patch_size = config.patch_size + + self.class_embedding = nn.Parameter( + torch.randn(1, 1, self.embed_dim), + ) + + self.patch_embedding = nn.Conv2d( + in_channels=3, out_channels=self.embed_dim, kernel_size=self.patch_size, stride=self.patch_size + ) + + self.num_patches = (self.image_size // self.patch_size) ** 2 + self.num_positions = self.num_patches + 1 + + self.position_embedding = nn.Parameter(torch.randn(1, self.num_positions, self.embed_dim)) + + def _get_pos_embed(self, pos_embed, H, W): + target_dtype = pos_embed.dtype + pos_embed = pos_embed.float().reshape( + 1, self.image_size // self.patch_size, self.image_size // self.patch_size, -1).permute(0, 3, 1, 2) + pos_embed = F.interpolate(pos_embed, size=(H, W), mode='bicubic', align_corners=False). \ + reshape(1, -1, H * W).permute(0, 2, 1).to(target_dtype) + return pos_embed + + def forward(self, pixel_values: torch.FloatTensor) -> torch.Tensor: + target_dtype = self.patch_embedding.weight.dtype + patch_embeds = self.patch_embedding(pixel_values) # shape = [*, channel, width, height] + batch_size, _, height, width = patch_embeds.shape + patch_embeds = patch_embeds.flatten(2).transpose(1, 2) + class_embeds = self.class_embedding.expand(batch_size, 1, -1).to(target_dtype) + embeddings = torch.cat([class_embeds, patch_embeds], dim=1) + position_embedding = torch.cat([ + self.position_embedding[:, :1, :], + self._get_pos_embed(self.position_embedding[:, 1:, :], height, width) + ], dim=1) + embeddings = embeddings + position_embedding.to(target_dtype) + return embeddings + + +class InternAttention(nn.Module): + """Multi-headed attention from 'Attention Is All You Need' paper""" + + def __init__(self, config: InternVisionConfig): + super().__init__() + self.config = config + self.embed_dim = config.hidden_size + self.num_heads = config.num_attention_heads + self.use_flash_attn = config.use_flash_attn and has_flash_attn + if config.use_flash_attn and not has_flash_attn: + print('Warning: Flash Attention is not available, use_flash_attn is set to False.') + self.head_dim = self.embed_dim // self.num_heads + if self.head_dim * self.num_heads != self.embed_dim: + raise ValueError( + f'embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`:' + f' {self.num_heads}).' + ) + + self.scale = self.head_dim ** -0.5 + self.qkv = nn.Linear(self.embed_dim, 3 * self.embed_dim, bias=config.qkv_bias) + self.attn_drop = nn.Dropout(config.attention_dropout) + self.proj_drop = nn.Dropout(config.dropout) + + self.qk_normalization = config.qk_normalization + + if self.qk_normalization: + self.q_norm = InternRMSNorm(self.embed_dim, eps=config.layer_norm_eps) + self.k_norm = InternRMSNorm(self.embed_dim, eps=config.layer_norm_eps) + + if self.use_flash_attn: + self.inner_attn = FlashAttention(attention_dropout=config.attention_dropout) + self.proj = nn.Linear(self.embed_dim, self.embed_dim) + + def _naive_attn(self, x): + B, N, C = x.shape + qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4) + q, k, v = qkv.unbind(0) # make torchscript happy (cannot use tensor as tuple) + + if self.qk_normalization: + B_, H_, N_, D_ = q.shape + q = self.q_norm(q.transpose(1, 2).flatten(-2, -1)).view(B_, N_, H_, D_).transpose(1, 2) + k = self.k_norm(k.transpose(1, 2).flatten(-2, -1)).view(B_, N_, H_, D_).transpose(1, 2) + + attn = ((q * self.scale) @ k.transpose(-2, -1)) + attn = attn.softmax(dim=-1) + attn = self.attn_drop(attn) + + x = (attn @ v).transpose(1, 2).reshape(B, N, C) + x = self.proj(x) + x = self.proj_drop(x) + return x + + def _flash_attn(self, x, key_padding_mask=None, need_weights=False): + qkv = self.qkv(x) + qkv = rearrange(qkv, 'b s (three h d) -> b s three h d', three=3, h=self.num_heads) + + if self.qk_normalization: + q, k, v = qkv.unbind(2) + q = self.q_norm(q.flatten(-2, -1)).view(q.shape) + k = self.k_norm(k.flatten(-2, -1)).view(k.shape) + qkv = torch.stack([q, k, v], dim=2) + + context, _ = self.inner_attn( + qkv, key_padding_mask=key_padding_mask, need_weights=need_weights, causal=False + ) + outs = self.proj(rearrange(context, 'b s h d -> b s (h d)')) + outs = self.proj_drop(outs) + return outs + + def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: + x = self._naive_attn(hidden_states) if not self.use_flash_attn else self._flash_attn(hidden_states) + return x + + +class InternMLP(nn.Module): + def __init__(self, config: InternVisionConfig): + super().__init__() + self.config = config + self.act = ACT2FN[config.hidden_act] + self.fc1 = nn.Linear(config.hidden_size, config.intermediate_size) + self.fc2 = nn.Linear(config.intermediate_size, config.hidden_size) + + def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: + hidden_states = self.fc1(hidden_states) + hidden_states = self.act(hidden_states) + hidden_states = self.fc2(hidden_states) + return hidden_states + + +class InternVisionEncoderLayer(nn.Module): + def __init__(self, config: InternVisionConfig, drop_path_rate: float): + super().__init__() + self.embed_dim = config.hidden_size + self.intermediate_size = config.intermediate_size + self.norm_type = config.norm_type + + self.attn = InternAttention(config) + self.mlp = InternMLP(config) + self.norm1 = NORM2FN[self.norm_type](self.embed_dim, eps=config.layer_norm_eps) + self.norm2 = NORM2FN[self.norm_type](self.embed_dim, eps=config.layer_norm_eps) + + self.ls1 = nn.Parameter(config.initializer_factor * torch.ones(self.embed_dim)) + self.ls2 = nn.Parameter(config.initializer_factor * torch.ones(self.embed_dim)) + self.drop_path1 = DropPath(drop_path_rate) if drop_path_rate > 0. else nn.Identity() + self.drop_path2 = DropPath(drop_path_rate) if drop_path_rate > 0. else nn.Identity() + + def forward( + self, + hidden_states: torch.Tensor, + ) -> Tuple[torch.FloatTensor, Optional[torch.FloatTensor], Optional[Tuple[torch.FloatTensor]]]: + """ + Args: + hidden_states (`Tuple[torch.FloatTensor, Optional[torch.FloatTensor]]`): input to the layer of shape `(batch, seq_len, embed_dim)` + """ + hidden_states = hidden_states + self.drop_path1(self.attn(self.norm1(hidden_states)) * self.ls1) + + hidden_states = hidden_states + self.drop_path2(self.mlp(self.norm2(hidden_states)) * self.ls2) + + return hidden_states + + +class InternVisionEncoder(nn.Module): + """ + Transformer encoder consisting of `config.num_hidden_layers` self attention layers. Each layer is a + [`InternEncoderLayer`]. + + Args: + config (`InternConfig`): + The corresponding vision configuration for the `InternEncoder`. + """ + + def __init__(self, config: InternVisionConfig): + super().__init__() + self.config = config + # stochastic depth decay rule + dpr = [x.item() for x in torch.linspace(0, config.drop_path_rate, config.num_hidden_layers)] + self.layers = nn.ModuleList([ + InternVisionEncoderLayer(config, dpr[idx]) for idx in range(config.num_hidden_layers)]) + self.gradient_checkpointing = True + + def forward( + self, + inputs_embeds, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple, BaseModelOutput]: + r""" + Args: + inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): + Embedded representation of the inputs. Should be float, not int tokens. + output_hidden_states (`bool`, *optional*): + Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors + for more detail. + return_dict (`bool`, *optional*): + Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. + """ + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + encoder_states = () if output_hidden_states else None + hidden_states = inputs_embeds + + for idx, encoder_layer in enumerate(self.layers): + if output_hidden_states: + encoder_states = encoder_states + (hidden_states,) + if self.gradient_checkpointing and self.training: + layer_outputs = torch.utils.checkpoint.checkpoint( + encoder_layer, + hidden_states) + else: + layer_outputs = encoder_layer( + hidden_states, + ) + hidden_states = layer_outputs + + if output_hidden_states: + encoder_states = encoder_states + (hidden_states,) + + if not return_dict: + return tuple(v for v in [hidden_states, encoder_states] if v is not None) + return BaseModelOutput( + last_hidden_state=hidden_states, hidden_states=encoder_states + ) + + +class InternVisionModel(PreTrainedModel): + main_input_name = 'pixel_values' + config_class = InternVisionConfig + _no_split_modules = ['InternVisionEncoderLayer'] + + def __init__(self, config: InternVisionConfig): + super().__init__(config) + self.config = config + + self.embeddings = InternVisionEmbeddings(config) + self.encoder = InternVisionEncoder(config) + + def resize_pos_embeddings(self, old_size, new_size, patch_size): + pos_emb = self.embeddings.position_embedding + _, num_positions, embed_dim = pos_emb.shape + cls_emb = pos_emb[:, :1, :] + pos_emb = pos_emb[:, 1:, :].reshape(1, old_size // patch_size, old_size // patch_size, -1).permute(0, 3, 1, 2) + pos_emb = F.interpolate(pos_emb.float(), size=new_size // patch_size, mode='bicubic', align_corners=False) + pos_emb = pos_emb.to(cls_emb.dtype).reshape(1, embed_dim, -1).permute(0, 2, 1) + pos_emb = torch.cat([cls_emb, pos_emb], dim=1) + self.embeddings.position_embedding = nn.Parameter(pos_emb) + self.embeddings.image_size = new_size + logger.info('Resized position embeddings from {} to {}'.format(old_size, new_size)) + + def get_input_embeddings(self): + return self.embeddings + + def forward( + self, + pixel_values: Optional[torch.FloatTensor] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + pixel_embeds: Optional[torch.FloatTensor] = None, + ) -> Union[Tuple, BaseModelOutputWithPooling]: + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + if pixel_values is None and pixel_embeds is None: + raise ValueError('You have to specify pixel_values or pixel_embeds') + + if pixel_embeds is not None: + hidden_states = pixel_embeds + else: + if len(pixel_values.shape) == 4: + hidden_states = self.embeddings(pixel_values) + else: + raise ValueError(f'wrong pixel_values size: {pixel_values.shape}') + encoder_outputs = self.encoder( + inputs_embeds=hidden_states, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + last_hidden_state = encoder_outputs.last_hidden_state + pooled_output = last_hidden_state[:, 0, :] + + if not return_dict: + return (last_hidden_state, pooled_output) + encoder_outputs[1:] + + return BaseModelOutputWithPooling( + last_hidden_state=last_hidden_state, + pooler_output=pooled_output, + hidden_states=encoder_outputs.hidden_states, + attentions=encoder_outputs.attentions, + ) diff --git a/isolated/sim_greedy/upstream_sgl/internvl/model/internvl_chat/modeling_internvl_chat.py b/isolated/sim_greedy/upstream_sgl/internvl/model/internvl_chat/modeling_internvl_chat.py new file mode 100644 index 0000000000000000000000000000000000000000..d65e8dd7db3a176e08b87929532c62782c04d03e --- /dev/null +++ b/isolated/sim_greedy/upstream_sgl/internvl/model/internvl_chat/modeling_internvl_chat.py @@ -0,0 +1,506 @@ +# -------------------------------------------------------- +# InternVL +# Copyright (c) 2024 OpenGVLab +# Licensed under The MIT License [see LICENSE for details] +# -------------------------------------------------------- +import warnings +from typing import Any, List, Optional, Tuple, Union + +import torch.distributed as dist +import torch.utils.checkpoint +import transformers +from internvl.conversation import get_conv_template +from internvl.model.internlm2.modeling_internlm2 import InternLM2ForCausalLM +from internvl.model.phi3.modeling_phi3 import Phi3ForCausalLM +from internvl.model.llama.modeling_llama import LlamaForCausalLM +from internvl.model.qwen2.modeling_qwen2 import Qwen2ForCausalLM + +from peft import LoraConfig, get_peft_model +from torch import nn +from torch.nn import CrossEntropyLoss +from transformers import (AutoModel, GenerationConfig) +from transformers.modeling_outputs import CausalLMOutputWithPast +from transformers.modeling_utils import PreTrainedModel +from transformers.utils import ModelOutput, logging + +from .configuration_internvl_chat import InternVLChatConfig +from .modeling_intern_vit import InternVisionModel +import time +import torch + + +logger = logging.get_logger(__name__) + + +def version_cmp(v1, v2, op='eq'): + import operator + + from packaging import version + op_func = getattr(operator, op) + return op_func(version.parse(v1), version.parse(v2)) + + +def get_attention_rank(visual_token_index, attentions): + + # assert visual_token_index.shape[0] == 1 # batchsize = 1 + # visual_token_index = visual_token_index.view(-1).nonzero() + visual_start_index, visual_end_index = visual_token_index[0], visual_token_index[-1] + + attentions = [torch.stack(attention, dim=1) for attention in attentions] # [n l heads tokens, tokens] + + + visual_token_importance = 0.0 + for i, attn in enumerate(attentions): + if i == 0: + visual_token_importance += attn[0].sum(dim=0).sum(dim=0)[visual_end_index+1:, visual_start_index:visual_end_index+1].sum(dim=0) + else: + visual_token_importance += attn[0].sum(dim=0).sum(dim=0)[0:1, visual_start_index:visual_end_index+1].sum(dim=0) + + return visual_token_importance + + + +class InternVLChatModel(PreTrainedModel): + config_class = InternVLChatConfig + main_input_name = 'pixel_values' + _no_split_modules = ['InternVisionModel', 'LlamaDecoderLayer', 'InternLM2DecoderLayer', + 'Phi3DecoderLayer', 'Qwen2DecoderLayer'] + _supports_flash_attn_2 = True + + def __init__(self, config: InternVLChatConfig, vision_model=None, language_model=None): + super().__init__(config) + + assert version_cmp(transformers.__version__, '4.37.0', 'ge') + image_size = config.force_image_size or config.vision_config.image_size + patch_size = config.vision_config.patch_size + self.patch_size = patch_size + self.select_layer = config.select_layer + self.template = config.template + self.num_image_token = int((image_size // patch_size) ** 2 * (config.downsample_ratio ** 2)) + self.downsample_ratio = config.downsample_ratio + self.ps_version = config.ps_version + self.llm_arch_name = config.llm_config.architectures[0] + + logger.info(f'num_image_token: {self.num_image_token}') + logger.info(f'ps_version: {self.ps_version}') + if vision_model is not None: + self.vision_model = vision_model + else: + self.vision_model = InternVisionModel(config.vision_config) + if language_model is not None: + self.language_model = language_model + else: + if config.llm_config.architectures[0] == 'LlamaForCausalLM': + self.language_model = LlamaForCausalLM(config.llm_config) + elif config.llm_config.architectures[0] == 'InternLM2ForCausalLM': + self.language_model = InternLM2ForCausalLM(config.llm_config) + elif config.llm_config.architectures[0] == 'Phi3ForCausalLM': + self.language_model = Phi3ForCausalLM(config.llm_config) + elif config.llm_config.architectures[0] == 'Qwen2ForCausalLM': + self.language_model = Qwen2ForCausalLM(config.llm_config) + else: + raise NotImplementedError(f'{config.llm_config.architectures[0]} is not implemented.') + + vit_hidden_size = config.vision_config.hidden_size + llm_hidden_size = config.llm_config.hidden_size + + self.mlp1 = nn.Sequential( + nn.LayerNorm(vit_hidden_size * int(1 / self.downsample_ratio) ** 2), + nn.Linear(vit_hidden_size * int(1 / self.downsample_ratio) ** 2, llm_hidden_size), + nn.GELU(), + nn.Linear(llm_hidden_size, llm_hidden_size) + ) + + self.img_context_token_id = None + self.conv_template = get_conv_template(self.template) + self.system_message = self.conv_template.system_message + self.num_samples = 0 + + if config.use_backbone_lora: + self.wrap_backbone_lora(r=config.use_backbone_lora, lora_alpha=2 * config.use_backbone_lora) + + if config.use_llm_lora: + self.wrap_llm_lora(r=config.use_llm_lora, lora_alpha=2 * config.use_llm_lora) + + def wrap_backbone_lora(self, r=128, lora_alpha=256, lora_dropout=0.05): + lora_config = LoraConfig( + r=r, + target_modules=['attn.qkv', 'attn.proj', 'mlp.fc1', 'mlp.fc2'], + lora_alpha=lora_alpha, + lora_dropout=lora_dropout, + ) + self.vision_model = get_peft_model(self.vision_model, lora_config) + self.vision_model.print_trainable_parameters() + + def wrap_llm_lora(self, r=128, lora_alpha=256, lora_dropout=0.05): + # Determine the target modules based on the architecture of the language model + if self.llm_arch_name == 'InternLM2ForCausalLM': + target_modules = ['attention.wqkv', 'attention.wo', 'feed_forward.w1', 'feed_forward.w2', 'feed_forward.w3'] + elif self.llm_arch_name == 'Phi3ForCausalLM': + target_modules = ['mlp.down_proj', 'mlp.gate_up_proj', 'self_attn.o_proj', 'self_attn.qkv_proj'] + elif self.llm_arch_name in ['Qwen2ForCausalLM', 'LlamaForCausalLM']: + target_modules = ['self_attn.q_proj', 'self_attn.k_proj', 'self_attn.v_proj', 'self_attn.o_proj', + 'mlp.gate_proj', 'mlp.down_proj', 'mlp.up_proj'] + else: + raise NotImplemented + lora_config = LoraConfig( + r=r, + target_modules=target_modules, + lora_alpha=lora_alpha, + lora_dropout=lora_dropout, + task_type='CAUSAL_LM' + ) + self.language_model = get_peft_model(self.language_model, lora_config) + self.language_model.enable_input_require_grads() + self.language_model.print_trainable_parameters() + + def forward( + self, + pixel_values: torch.FloatTensor, + input_ids: torch.LongTensor = None, + attention_mask: Optional[torch.Tensor] = None, + position_ids: Optional[torch.LongTensor] = None, + image_flags: Optional[torch.LongTensor] = None, + past_key_values: Optional[List[torch.FloatTensor]] = None, + labels: Optional[torch.LongTensor] = None, + use_cache: Optional[bool] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple, CausalLMOutputWithPast]: + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + image_flags = image_flags.squeeze(-1) + input_embeds = self.language_model.get_input_embeddings()(input_ids).clone() + + vit_embeds = self.extract_feature(pixel_values) + vit_embeds = vit_embeds[image_flags == 1] + vit_batch_size = pixel_values.shape[0] + + B, N, C = input_embeds.shape + input_embeds = input_embeds.reshape(B * N, C) + + if torch.distributed.is_initialized() and torch.distributed.get_rank() == 0: + print(f'dynamic ViT batch size: {vit_batch_size}, images per sample: {vit_batch_size / B}, dynamic token length: {N}') + + input_ids = input_ids.reshape(B * N) + selected = (input_ids == self.img_context_token_id) + try: + input_embeds[selected] = input_embeds[selected] * 0.0 + vit_embeds.reshape(-1, C) + ignore_flag = False + except Exception as e: + vit_embeds = vit_embeds.reshape(-1, C) + print(f'warning: {e}, input_embeds[selected].shape={input_embeds[selected].shape}, ' + f'vit_embeds.shape={vit_embeds.shape}') + n_token = selected.sum() + input_embeds[selected] = input_embeds[selected] * 0.0 + vit_embeds[:n_token] + ignore_flag = True + + input_embeds = input_embeds.reshape(B, N, C) + + outputs = self.language_model( + inputs_embeds=input_embeds, + attention_mask=attention_mask, + position_ids=position_ids, + past_key_values=past_key_values, + use_cache=use_cache, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + logits = outputs.logits + + loss = None + if labels is not None: + # Shift so that tokens < n predict n + shift_logits = logits[..., :-1, :].contiguous() + shift_labels = labels[..., 1:].contiguous() + # Flatten the tokens + loss_fct = CrossEntropyLoss() + shift_logits = shift_logits.view(-1, self.language_model.config.vocab_size) + shift_labels = shift_labels.view(-1) + # Enable model parallelism + shift_labels = shift_labels.to(shift_logits.device) + loss = loss_fct(shift_logits, shift_labels) + if ignore_flag: + loss = loss * 0.0 + + if not return_dict: + output = (logits,) + outputs[1:] + return (loss,) + output if loss is not None else output + + return CausalLMOutputWithPast( + loss=loss, + logits=logits, + past_key_values=outputs.past_key_values, + hidden_states=outputs.hidden_states, + attentions=outputs.attentions, + ) + + def pixel_shuffle(self, x, scale_factor=0.5): + n, w, h, c = x.size() + # N, W, H, C --> N, W, H * scale, C // scale + x = x.view(n, w, int(h * scale_factor), int(c / scale_factor)) + # N, W, H * scale, C // scale --> N, H * scale, W, C // scale + x = x.permute(0, 2, 1, 3).contiguous() + # N, H * scale, W, C // scale --> N, H * scale, W * scale, C // (scale ** 2) + x = x.view(n, int(h * scale_factor), int(w * scale_factor), + int(c / (scale_factor * scale_factor))) + if self.ps_version == 'v1': + warnings.warn("In ps_version 'v1', the height and width have not been swapped back, " + 'which results in a transposed image.') + else: + x = x.permute(0, 2, 1, 3).contiguous() + return x + + def extract_feature(self, pixel_values): + if self.select_layer == -1: + vit_embeds = self.vision_model( + pixel_values=pixel_values, + output_hidden_states=False, + return_dict=True).last_hidden_state + else: + vit_embeds = self.vision_model( + pixel_values=pixel_values, + output_hidden_states=True, + return_dict=True).hidden_states[self.select_layer] + vit_embeds = vit_embeds[:, 1:, :] + + h = w = int(vit_embeds.shape[1] ** 0.5) + vit_embeds = vit_embeds.reshape(vit_embeds.shape[0], h, w, -1) + vit_embeds = self.pixel_shuffle(vit_embeds, scale_factor=self.downsample_ratio) + vit_embeds = vit_embeds.reshape(vit_embeds.shape[0], -1, vit_embeds.shape[-1]) + vit_embeds = self.mlp1(vit_embeds) + return vit_embeds + + def batch_chat(self, tokenizer, pixel_values, questions, generation_config, num_patches_list=None, + history=None, return_history=False, IMG_START_TOKEN='', IMG_END_TOKEN='', + IMG_CONTEXT_TOKEN='', verbose=False, image_counts=None): + if history is not None or return_history: + print('Now multi-turn chat is not supported in batch_chat.') + raise NotImplementedError + + if image_counts is not None: + num_patches_list = image_counts + print('Warning: `image_counts` is deprecated. Please use `num_patches_list` instead.') + + img_context_token_id = tokenizer.convert_tokens_to_ids(IMG_CONTEXT_TOKEN) + self.img_context_token_id = img_context_token_id + + if verbose and pixel_values is not None: + image_bs = pixel_values.shape[0] + print(f'dynamic ViT batch size: {image_bs}') + + queries = [] + for idx, num_patches in enumerate(num_patches_list): + question = questions[idx] + if pixel_values is not None and '' not in question: + question = '\n' + question + template = get_conv_template(self.template) + template.append_message(template.roles[0], question) + template.append_message(template.roles[1], None) + query = template.get_prompt() + + image_tokens = IMG_START_TOKEN + IMG_CONTEXT_TOKEN * self.num_image_token * num_patches + IMG_END_TOKEN + query = query.replace('', image_tokens, 1) + queries.append(query) + + tokenizer.padding_side = 'left' + model_inputs = tokenizer(queries, return_tensors='pt', padding=True) + input_ids = model_inputs['input_ids'].cuda() + attention_mask = model_inputs['attention_mask'].cuda() + eos_token_id = tokenizer.convert_tokens_to_ids(template.sep) + generation_config['eos_token_id'] = eos_token_id + generation_output = self.generate( + pixel_values=pixel_values, + input_ids=input_ids, + attention_mask=attention_mask, + **generation_config + ) + responses = tokenizer.batch_decode(generation_output, skip_special_tokens=True) + responses = [response.split(template.sep)[0].strip() for response in responses] + return responses + + def chat(self, tokenizer, pixel_values, question, generation_config, history=None, return_history=False, + num_patches_list=None, IMG_START_TOKEN='', IMG_END_TOKEN='', IMG_CONTEXT_TOKEN='', + verbose=False, large_model=False): + + if history is None and pixel_values is not None and '' not in question: + question = '\n' + question + + if num_patches_list is None: + num_patches_list = [pixel_values.shape[0]] if pixel_values is not None else [] + assert pixel_values is None or len(pixel_values) == sum(num_patches_list) + + img_context_token_id = tokenizer.convert_tokens_to_ids(IMG_CONTEXT_TOKEN) + self.img_context_token_id = img_context_token_id + + template = get_conv_template(self.template) + template.system_message = self.system_message + eos_token_id = tokenizer.convert_tokens_to_ids(template.sep) + + history = [] if history is None else history + for (old_question, old_answer) in history: + template.append_message(template.roles[0], old_question) + template.append_message(template.roles[1], old_answer) + template.append_message(template.roles[0], question) + template.append_message(template.roles[1], None) + query = template.get_prompt() + + if verbose and pixel_values is not None: + image_bs = pixel_values.shape[0] + print(f'dynamic ViT batch size: {image_bs}') + + for num_patches in num_patches_list: + image_tokens = IMG_START_TOKEN + IMG_CONTEXT_TOKEN * self.num_image_token * num_patches + IMG_END_TOKEN + query = query.replace('', image_tokens, 1) + + model_inputs = tokenizer(query, return_tensors='pt') + input_ids = model_inputs['input_ids'].cuda() + attention_mask = model_inputs['attention_mask'].cuda() + generation_config['eos_token_id'] = eos_token_id + + + visual_token_index = (input_ids == self.img_context_token_id) + + visual_token_index = visual_token_index.view(-1).nonzero() + visual_start_index, visual_end_index = visual_token_index[0], visual_token_index[-1] + + + if large_model: + generation_config["visual_token_index"] = (visual_start_index, visual_end_index) + assert (visual_end_index - visual_start_index + 1) == generation_config["visual_token_importance"].shape[0] + else: + generation_config['consistency_config']["visual_token_index"] = (visual_start_index, visual_end_index) + + + if not large_model: + generation_output, consistency_score, visual_token_importance = self.generate( + pixel_values=pixel_values, + input_ids=input_ids, + attention_mask=attention_mask, + large_model=large_model, + **generation_config + ) + + response = tokenizer.batch_decode(generation_output['sequences'], skip_special_tokens=True)[0] + response = response.split(template.sep)[0].strip() + history.append((question, response)) + + if return_history: + return response, history + else: + query_to_print = query.replace(IMG_CONTEXT_TOKEN, '') + query_to_print = query_to_print.replace(f'{IMG_START_TOKEN}{IMG_END_TOKEN}', '') + if verbose: + print(query_to_print, response) + return response, generation_output.scores, consistency_score, visual_token_importance + + + else: + generation_output = self.generate( + pixel_values=pixel_values, + input_ids=input_ids, + attention_mask=attention_mask, + large_model=large_model, + **generation_config + ) + + response = tokenizer.batch_decode(generation_output, skip_special_tokens=True)[0] + response = response.split(template.sep)[0].strip() + history.append((question, response)) + if return_history: + return response, history + else: + query_to_print = query.replace(IMG_CONTEXT_TOKEN, '') + query_to_print = query_to_print.replace(f'{IMG_START_TOKEN}{IMG_END_TOKEN}', '') + if verbose: + print(query_to_print, response) + return response + + + + @torch.no_grad() + def generate( + self, + pixel_values: Optional[torch.FloatTensor] = None, + input_ids: Optional[torch.FloatTensor] = None, + attention_mask: Optional[torch.LongTensor] = None, + visual_features: Optional[torch.FloatTensor] = None, + generation_config: Optional[GenerationConfig] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + large_model: Optional[bool] = False, + **generate_kwargs, + ) -> torch.LongTensor: + + assert self.img_context_token_id is not None + if pixel_values is not None: + if visual_features is not None: + vit_embeds = visual_features + else: + vit_embeds = self.extract_feature(pixel_values) + input_embeds = self.language_model.get_input_embeddings()(input_ids) + B, N, C = input_embeds.shape + input_embeds = input_embeds.reshape(B * N, C) + + input_ids = input_ids.reshape(B * N) + selected = (input_ids == self.img_context_token_id) + assert selected.sum() != 0 + input_embeds[selected] = vit_embeds.reshape(-1, C).to(input_embeds.device) + + input_embeds = input_embeds.reshape(B, N, C) + else: + input_embeds = self.language_model.get_input_embeddings()(input_ids) + + + + if not large_model: + consistency_generate_kwargs = generate_kwargs.pop('consistency_config') + generate_kwargs['visual_token_index'] = consistency_generate_kwargs['visual_token_index'] + outputs = self.language_model.generate( + inputs_embeds=input_embeds, + attention_mask=attention_mask, + generation_config=generation_config, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + use_cache=True, + **generate_kwargs, + ) + + visual_token_importance = outputs.aggregated_viusal_token_attention + consistency_generate_kwargs['visual_token_importance'] = visual_token_importance + + new_input_ids_ = outputs['sequences'][0] + new_token_num = outputs['sequences'].shape[-1] + new_input_embedding = torch.concatenate((input_embeds, self.language_model.get_input_embeddings()(new_input_ids_).unsqueeze(0)), dim=1) + new_attention_mask = torch.concatenate((attention_mask, torch.ones((1, new_input_ids_.shape[0]), device=attention_mask.device, dtype=attention_mask.dtype)), dim=-1) + new_input_ids = torch.concatenate((input_ids, new_input_ids_), dim=-1) + consistency_generate_kwargs['inputs_embeds'] = new_input_embedding + consistency_generate_kwargs['attention_mask'] = new_attention_mask + consistency_generate_kwargs['output_scores'] = False + consistency_generate_kwargs['output_attentions'] = False + consistency_generate_kwargs = self.language_model._get_initial_cache_position(new_input_ids, consistency_generate_kwargs) + + model_inputs = self.language_model.prepare_inputs_for_generation(new_input_ids, **consistency_generate_kwargs) + consistency_output = self.language_model.forward(**model_inputs, return_dict=True) + consistency_score = torch.gather(consistency_output['logits'][:, -new_token_num-1:-1, :].softmax(dim=-1), index=new_input_ids_[None, :, None], dim=-1) + + consistency_score = torch.prod(consistency_score) + + + + return outputs, consistency_score, visual_token_importance + + + + else: + return self.language_model.generate( + inputs_embeds=input_embeds, + attention_mask=attention_mask, + generation_config=generation_config, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + use_cache=True, + **generate_kwargs, + ) diff --git a/isolated/sim_greedy/upstream_sgl/internvl/model/llama/__init__.py b/isolated/sim_greedy/upstream_sgl/internvl/model/llama/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..41febe935576e984146d660895148fd04af43736 --- /dev/null +++ b/isolated/sim_greedy/upstream_sgl/internvl/model/llama/__init__.py @@ -0,0 +1,116 @@ +# Copyright 2022 EleutherAI and The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import TYPE_CHECKING + +from transformers.utils import ( + OptionalDependencyNotAvailable, + _LazyModule, + is_flax_available, + is_sentencepiece_available, + is_tokenizers_available, + is_torch_available, +) + + +_import_structure = { + "configuration_llama": ["LlamaConfig"], +} + +try: + if not is_sentencepiece_available(): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + pass +else: + _import_structure["tokenization_llama"] = ["LlamaTokenizer"] + +try: + if not is_tokenizers_available(): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + pass +else: + _import_structure["tokenization_llama_fast"] = ["LlamaTokenizerFast"] + +try: + if not is_torch_available(): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + pass +else: + _import_structure["modeling_llama"] = [ + "LlamaForCausalLM", + "LlamaModel", + "LlamaPreTrainedModel", + "LlamaForSequenceClassification", + "LlamaForQuestionAnswering", + "LlamaForTokenClassification", + ] + +try: + if not is_flax_available(): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + pass +else: + _import_structure["modeling_flax_llama"] = ["FlaxLlamaForCausalLM", "FlaxLlamaModel", "FlaxLlamaPreTrainedModel"] + + +if TYPE_CHECKING: + from .configuration_llama import LlamaConfig + + try: + if not is_sentencepiece_available(): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + pass + else: + from .tokenization_llama import LlamaTokenizer + + try: + if not is_tokenizers_available(): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + pass + else: + from .tokenization_llama_fast import LlamaTokenizerFast + + try: + if not is_torch_available(): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + pass + else: + from .modeling_llama import ( + LlamaForCausalLM, + LlamaForQuestionAnswering, + LlamaForSequenceClassification, + LlamaForTokenClassification, + LlamaModel, + LlamaPreTrainedModel, + ) + + try: + if not is_flax_available(): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + pass + else: + from .modeling_flax_llama import FlaxLlamaForCausalLM, FlaxLlamaModel, FlaxLlamaPreTrainedModel + + +else: + import sys + + sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) diff --git a/isolated/sim_greedy/upstream_sgl/internvl/model/llama/configuration_llama.py b/isolated/sim_greedy/upstream_sgl/internvl/model/llama/configuration_llama.py new file mode 100644 index 0000000000000000000000000000000000000000..2b4f4db62d84d34e2fb8541ababbfe17280e664e --- /dev/null +++ b/isolated/sim_greedy/upstream_sgl/internvl/model/llama/configuration_llama.py @@ -0,0 +1,203 @@ +# coding=utf-8 +# Copyright 2022 EleutherAI and the HuggingFace Inc. team. All rights reserved. +# +# This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX +# and OPT implementations in this library. It has been modified from its +# original forms to accommodate minor architectural differences compared +# to GPT-NeoX and OPT used by the Meta AI team that trained the model. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""LLaMA model configuration""" + +from transformers.configuration_utils import PretrainedConfig +from transformers.modeling_rope_utils import rope_config_validation + + +class LlamaConfig(PretrainedConfig): + r""" + This is the configuration class to store the configuration of a [`LlamaModel`]. It is used to instantiate an LLaMA + model according to the specified arguments, defining the model architecture. Instantiating a configuration with the + defaults will yield a similar configuration to that of the LLaMA-7B. + + Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the + documentation from [`PretrainedConfig`] for more information. + + + Args: + vocab_size (`int`, *optional*, defaults to 32000): + Vocabulary size of the LLaMA model. Defines the number of different tokens that can be represented by the + `inputs_ids` passed when calling [`LlamaModel`] + hidden_size (`int`, *optional*, defaults to 4096): + Dimension of the hidden representations. + intermediate_size (`int`, *optional*, defaults to 11008): + Dimension of the MLP representations. + num_hidden_layers (`int`, *optional*, defaults to 32): + Number of hidden layers in the Transformer decoder. + num_attention_heads (`int`, *optional*, defaults to 32): + Number of attention heads for each attention layer in the Transformer decoder. + num_key_value_heads (`int`, *optional*): + This is the number of key_value heads that should be used to implement Grouped Query Attention. If + `num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if + `num_key_value_heads=1` the model will use Multi Query Attention (MQA) otherwise GQA is used. When + converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed + by meanpooling all the original heads within that group. For more details checkout [this + paper](https://arxiv.org/pdf/2305.13245.pdf). If it is not specified, will default to + `num_attention_heads`. + hidden_act (`str` or `function`, *optional*, defaults to `"silu"`): + The non-linear activation function (function or string) in the decoder. + max_position_embeddings (`int`, *optional*, defaults to 2048): + The maximum sequence length that this model might ever be used with. Llama 1 supports up to 2048 tokens, + Llama 2 up to 4096, CodeLlama up to 16384. + initializer_range (`float`, *optional*, defaults to 0.02): + The standard deviation of the truncated_normal_initializer for initializing all weight matrices. + rms_norm_eps (`float`, *optional*, defaults to 1e-06): + The epsilon used by the rms normalization layers. + use_cache (`bool`, *optional*, defaults to `True`): + Whether or not the model should return the last key/values attentions (not used by all models). Only + relevant if `config.is_decoder=True`. + pad_token_id (`int`, *optional*): + Padding token id. + bos_token_id (`int`, *optional*, defaults to 1): + Beginning of stream token id. + eos_token_id (`int`, *optional*, defaults to 2): + End of stream token id. + pretraining_tp (`int`, *optional*, defaults to 1): + Experimental feature. Tensor parallelism rank used during pretraining. Please refer to [this + document](https://huggingface.co/docs/transformers/main/perf_train_gpu_many#tensor-parallelism) to + understand more about it. This value is necessary to ensure exact reproducibility of the pretraining + results. Please refer to [this issue](https://github.com/pytorch/pytorch/issues/76232). + tie_word_embeddings (`bool`, *optional*, defaults to `False`): + Whether to tie weight embeddings + rope_theta (`float`, *optional*, defaults to 10000.0): + The base period of the RoPE embeddings. + rope_scaling (`Dict`, *optional*): + Dictionary containing the scaling configuration for the RoPE embeddings. NOTE: if you apply new rope type + and you expect the model to work on longer `max_position_embeddings`, we recommend you to update this value + accordingly. + Expected contents: + `rope_type` (`str`): + The sub-variant of RoPE to use. Can be one of ['default', 'linear', 'dynamic', 'yarn', 'longrope', + 'llama3'], with 'default' being the original RoPE implementation. + `factor` (`float`, *optional*): + Used with all rope types except 'default'. The scaling factor to apply to the RoPE embeddings. In + most scaling types, a `factor` of x will enable the model to handle sequences of length x * + original maximum pre-trained length. + `original_max_position_embeddings` (`int`, *optional*): + Used with 'dynamic', 'longrope' and 'llama3'. The original max position embeddings used during + pretraining. + `attention_factor` (`float`, *optional*): + Used with 'yarn' and 'longrope'. The scaling factor to be applied on the attention + computation. If unspecified, it defaults to value recommended by the implementation, using the + `factor` field to infer the suggested value. + `beta_fast` (`float`, *optional*): + Only used with 'yarn'. Parameter to set the boundary for extrapolation (only) in the linear + ramp function. If unspecified, it defaults to 32. + `beta_slow` (`float`, *optional*): + Only used with 'yarn'. Parameter to set the boundary for interpolation (only) in the linear + ramp function. If unspecified, it defaults to 1. + `short_factor` (`List[float]`, *optional*): + Only used with 'longrope'. The scaling factor to be applied to short contexts (< + `original_max_position_embeddings`). Must be a list of numbers with the same length as the hidden + size divided by the number of attention heads divided by 2 + `long_factor` (`List[float]`, *optional*): + Only used with 'longrope'. The scaling factor to be applied to long contexts (< + `original_max_position_embeddings`). Must be a list of numbers with the same length as the hidden + size divided by the number of attention heads divided by 2 + `low_freq_factor` (`float`, *optional*): + Only used with 'llama3'. Scaling factor applied to low frequency components of the RoPE + `high_freq_factor` (`float`, *optional*): + Only used with 'llama3'. Scaling factor applied to high frequency components of the RoPE + attention_bias (`bool`, *optional*, defaults to `False`): + Whether to use a bias in the query, key, value and output projection layers during self-attention. + attention_dropout (`float`, *optional*, defaults to 0.0): + The dropout ratio for the attention probabilities. + mlp_bias (`bool`, *optional*, defaults to `False`): + Whether to use a bias in up_proj, down_proj and gate_proj layers in the MLP layers. + + ```python + >>> from transformers import LlamaModel, LlamaConfig + + >>> # Initializing a LLaMA llama-7b style configuration + >>> configuration = LlamaConfig() + + >>> # Initializing a model from the llama-7b style configuration + >>> model = LlamaModel(configuration) + + >>> # Accessing the model configuration + >>> configuration = model.config + ```""" + + model_type = "llama" + keys_to_ignore_at_inference = ["past_key_values"] + + def __init__( + self, + vocab_size=32000, + hidden_size=4096, + intermediate_size=11008, + num_hidden_layers=32, + num_attention_heads=32, + num_key_value_heads=None, + hidden_act="silu", + max_position_embeddings=2048, + initializer_range=0.02, + rms_norm_eps=1e-6, + use_cache=True, + pad_token_id=None, + bos_token_id=1, + eos_token_id=2, + pretraining_tp=1, + tie_word_embeddings=False, + rope_theta=10000.0, + rope_scaling=None, + attention_bias=False, + attention_dropout=0.0, + mlp_bias=False, + **kwargs, + ): + self.vocab_size = vocab_size + self.max_position_embeddings = max_position_embeddings + self.hidden_size = hidden_size + self.intermediate_size = intermediate_size + self.num_hidden_layers = num_hidden_layers + self.num_attention_heads = num_attention_heads + + # for backward compatibility + if num_key_value_heads is None: + num_key_value_heads = num_attention_heads + + self.num_key_value_heads = num_key_value_heads + self.hidden_act = hidden_act + self.initializer_range = initializer_range + self.rms_norm_eps = rms_norm_eps + self.pretraining_tp = pretraining_tp + self.use_cache = use_cache + self.rope_theta = rope_theta + self.rope_scaling = rope_scaling + self.attention_bias = attention_bias + self.attention_dropout = attention_dropout + self.mlp_bias = mlp_bias + + # Validate the correctness of rotary position embeddings parameters + # BC: if there is a 'type' field, move it to 'rope_type'. + if self.rope_scaling is not None and "type" in self.rope_scaling: + self.rope_scaling["rope_type"] = self.rope_scaling["type"] + rope_config_validation(self) + + super().__init__( + pad_token_id=pad_token_id, + bos_token_id=bos_token_id, + eos_token_id=eos_token_id, + tie_word_embeddings=tie_word_embeddings, + **kwargs, + ) diff --git a/isolated/sim_greedy/upstream_sgl/internvl/model/llama/convert_llama_weights_to_hf.py b/isolated/sim_greedy/upstream_sgl/internvl/model/llama/convert_llama_weights_to_hf.py new file mode 100644 index 0000000000000000000000000000000000000000..a75ce5245eee60a170e24e480841ee8e268d98db --- /dev/null +++ b/isolated/sim_greedy/upstream_sgl/internvl/model/llama/convert_llama_weights_to_hf.py @@ -0,0 +1,479 @@ +# Copyright 2022 EleutherAI and The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import argparse +import gc +import json +import os +import shutil +import warnings +from typing import List + +import torch + +from transformers import GenerationConfig, LlamaConfig, LlamaForCausalLM, LlamaTokenizer, PreTrainedTokenizerFast +from transformers.convert_slow_tokenizer import TikTokenConverter + + +try: + from transformers import LlamaTokenizerFast +except ImportError as e: + warnings.warn(e) + warnings.warn( + "The converted tokenizer will be the `slow` tokenizer. To use the fast, update your `tokenizers` library and re-run the tokenizer conversion" + ) + LlamaTokenizerFast = None + +""" +Sample usage: + +``` +python src/transformers/models/llama/convert_llama_weights_to_hf.py \ + --input_dir /path/to/downloaded/llama/weights --model_size 7B --output_dir /output/path +``` + +Thereafter, models can be loaded via: + +```py +from transformers import LlamaForCausalLM, LlamaTokenizer + +model = LlamaForCausalLM.from_pretrained("/output/path") +tokenizer = LlamaTokenizer.from_pretrained("/output/path") +``` + +Important note: you need to be able to host the whole model in RAM to execute this script (even if the biggest versions +come in several checkpoints they each contain a part of each weight of the model, so we need to load them all in RAM). + +If you want you tokenizer to add a bos automatically you should update the tokenizer._tokenizers.post_processor: + +```py +from tokenizers import processors +bos = "<|begin_of_text|>" +tokenizer._tokenizers.post_processor = processors.Sequence( + [ + processors.ByteLevel(trim_offsets=False), + processors.TemplateProcessing( + single=f"{bos}:0 $A:0", + pair=f"{bos}:0 $A:0 {bos}:1 $B:1", + special_tokens=[ + (bos, tokenizer.encode(bos)), + ], + ), + ] +) +``` +""" + +NUM_SHARDS = { + "7B": 1, + "8B": 1, + "8Bf": 1, + "7Bf": 1, + "13B": 2, + "13Bf": 2, + "34B": 4, + "30B": 4, + "65B": 8, + "70B": 8, + "70Bf": 8, + "405B": 8, + "405B-MP16": 16, +} + +CONTEXT_LENGTH_FOR_VERSION = {"3.1": 131072, "3": 8192, "2": 4096, "1": 2048} + + +def compute_intermediate_size(n, ffn_dim_multiplier=1, multiple_of=256): + return multiple_of * ((int(ffn_dim_multiplier * int(8 * n / 3)) + multiple_of - 1) // multiple_of) + + +def read_json(path): + with open(path, "r") as f: + return json.load(f) + + +def write_json(text, path): + with open(path, "w") as f: + json.dump(text, f) + + +def write_model( + model_path, + input_base_path, + model_size=None, + safe_serialization=True, + llama_version="1", + vocab_size=None, + num_shards=None, + instruct=False, +): + os.makedirs(model_path, exist_ok=True) + tmp_model_path = os.path.join(model_path, "tmp") + os.makedirs(tmp_model_path, exist_ok=True) + + params = read_json(os.path.join(input_base_path, "params.json")) + num_shards = NUM_SHARDS[model_size] if num_shards is None else num_shards + params = params.get("model", params) + n_layers = params["n_layers"] + n_heads = params["n_heads"] + n_heads_per_shard = n_heads // num_shards + dim = params["dim"] + dims_per_head = dim // n_heads + base = params.get("rope_theta", 10000.0) + inv_freq = 1.0 / (base ** (torch.arange(0, dims_per_head, 2).float() / dims_per_head)) + if base > 10000.0 and float(llama_version) < 3: + max_position_embeddings = 16384 + else: + max_position_embeddings = CONTEXT_LENGTH_FOR_VERSION[llama_version] + + if params.get("n_kv_heads", None) is not None: + num_key_value_heads = params["n_kv_heads"] # for GQA / MQA + num_key_value_heads_per_shard = num_key_value_heads // num_shards + key_value_dim = dims_per_head * num_key_value_heads + else: # compatibility with other checkpoints + num_key_value_heads = n_heads + num_key_value_heads_per_shard = n_heads_per_shard + key_value_dim = dim + + # permute for sliced rotary + def permute(w, n_heads, dim1=dim, dim2=dim): + return w.view(n_heads, dim1 // n_heads // 2, 2, dim2).transpose(1, 2).reshape(dim1, dim2) + + print(f"Fetching all parameters from the checkpoint at {input_base_path}.") + # Load weights + if num_shards == 1: + # Not sharded + # (The sharded implementation would also work, but this is simpler.) + loaded = torch.load(os.path.join(input_base_path, "consolidated.00.pth"), map_location="cpu") + else: + # Sharded + checkpoint_list = sorted([file for file in os.listdir(input_base_path) if file.endswith(".pth")]) + print("Loading in order:", checkpoint_list) + loaded = [torch.load(os.path.join(input_base_path, file), map_location="cpu") for file in checkpoint_list] + param_count = 0 + index_dict = {"weight_map": {}} + for layer_i in range(n_layers): + filename = f"pytorch_model-{layer_i + 1}-of-{n_layers + 1}.bin" + if num_shards == 1: + # Unsharded + state_dict = { + f"model.layers.{layer_i}.self_attn.q_proj.weight": permute( + loaded[f"layers.{layer_i}.attention.wq.weight"], n_heads=n_heads + ), + f"model.layers.{layer_i}.self_attn.k_proj.weight": permute( + loaded[f"layers.{layer_i}.attention.wk.weight"], + n_heads=num_key_value_heads, + dim1=key_value_dim, + ), + f"model.layers.{layer_i}.self_attn.v_proj.weight": loaded[f"layers.{layer_i}.attention.wv.weight"], + f"model.layers.{layer_i}.self_attn.o_proj.weight": loaded[f"layers.{layer_i}.attention.wo.weight"], + f"model.layers.{layer_i}.mlp.gate_proj.weight": loaded[f"layers.{layer_i}.feed_forward.w1.weight"], + f"model.layers.{layer_i}.mlp.down_proj.weight": loaded[f"layers.{layer_i}.feed_forward.w2.weight"], + f"model.layers.{layer_i}.mlp.up_proj.weight": loaded[f"layers.{layer_i}.feed_forward.w3.weight"], + f"model.layers.{layer_i}.input_layernorm.weight": loaded[f"layers.{layer_i}.attention_norm.weight"], + f"model.layers.{layer_i}.post_attention_layernorm.weight": loaded[f"layers.{layer_i}.ffn_norm.weight"], + } + else: + # Sharded + # Note that attention.w{q,k,v,o}, feed_fordward.w[1,2,3], attention_norm.weight and ffn_norm.weight share + # the same storage object, saving attention_norm and ffn_norm will save other weights too, which is + # redundant as other weights will be stitched from multiple shards. To avoid that, they are cloned. + + state_dict = { + f"model.layers.{layer_i}.input_layernorm.weight": loaded[0][ + f"layers.{layer_i}.attention_norm.weight" + ].clone(), + f"model.layers.{layer_i}.post_attention_layernorm.weight": loaded[0][ + f"layers.{layer_i}.ffn_norm.weight" + ].clone(), + } + state_dict[f"model.layers.{layer_i}.self_attn.q_proj.weight"] = permute( + torch.cat( + [ + loaded[i][f"layers.{layer_i}.attention.wq.weight"].view(n_heads_per_shard, dims_per_head, dim) + for i in range(len(loaded)) + ], + dim=0, + ).reshape(dim, dim), + n_heads=n_heads, + ) + state_dict[f"model.layers.{layer_i}.self_attn.k_proj.weight"] = permute( + torch.cat( + [ + loaded[i][f"layers.{layer_i}.attention.wk.weight"].view( + num_key_value_heads_per_shard, dims_per_head, dim + ) + for i in range(len(loaded)) + ], + dim=0, + ).reshape(key_value_dim, dim), + num_key_value_heads, + key_value_dim, + dim, + ) + state_dict[f"model.layers.{layer_i}.self_attn.v_proj.weight"] = torch.cat( + [ + loaded[i][f"layers.{layer_i}.attention.wv.weight"].view( + num_key_value_heads_per_shard, dims_per_head, dim + ) + for i in range(len(loaded)) + ], + dim=0, + ).reshape(key_value_dim, dim) + + state_dict[f"model.layers.{layer_i}.self_attn.o_proj.weight"] = torch.cat( + [loaded[i][f"layers.{layer_i}.attention.wo.weight"] for i in range(len(loaded))], dim=1 + ) + state_dict[f"model.layers.{layer_i}.mlp.gate_proj.weight"] = torch.cat( + [loaded[i][f"layers.{layer_i}.feed_forward.w1.weight"] for i in range(len(loaded))], dim=0 + ) + state_dict[f"model.layers.{layer_i}.mlp.down_proj.weight"] = torch.cat( + [loaded[i][f"layers.{layer_i}.feed_forward.w2.weight"] for i in range(len(loaded))], dim=1 + ) + state_dict[f"model.layers.{layer_i}.mlp.up_proj.weight"] = torch.cat( + [loaded[i][f"layers.{layer_i}.feed_forward.w3.weight"] for i in range(len(loaded))], dim=0 + ) + + state_dict[f"model.layers.{layer_i}.self_attn.rotary_emb.inv_freq"] = inv_freq + for k, v in state_dict.items(): + index_dict["weight_map"][k] = filename + param_count += v.numel() + torch.save(state_dict, os.path.join(tmp_model_path, filename)) + + filename = f"pytorch_model-{n_layers + 1}-of-{n_layers + 1}.bin" + if num_shards == 1: + # Unsharded + state_dict = { + "model.embed_tokens.weight": loaded["tok_embeddings.weight"], + "model.norm.weight": loaded["norm.weight"], + "lm_head.weight": loaded["output.weight"], + } + else: + concat_dim = 0 if llama_version in ["3", "3.1"] else 1 + state_dict = { + "model.norm.weight": loaded[0]["norm.weight"], + "model.embed_tokens.weight": torch.cat( + [loaded[i]["tok_embeddings.weight"] for i in range(len(loaded))], dim=concat_dim + ), + "lm_head.weight": torch.cat([loaded[i]["output.weight"] for i in range(len(loaded))], dim=0), + } + + for k, v in state_dict.items(): + index_dict["weight_map"][k] = filename + param_count += v.numel() + torch.save(state_dict, os.path.join(tmp_model_path, filename)) + + # Write configs + index_dict["metadata"] = {"total_size": param_count * 2} + write_json(index_dict, os.path.join(tmp_model_path, "pytorch_model.bin.index.json")) + ffn_dim_multiplier = params["ffn_dim_multiplier"] if "ffn_dim_multiplier" in params else 1 + multiple_of = params["multiple_of"] if "multiple_of" in params else 256 + + if llama_version in ["3", "3.1"]: + bos_token_id = 128000 + + if instruct: + eos_token_id = [128001, 128008, 128009] + else: + eos_token_id = 128001 + else: + bos_token_id = 1 + eos_token_id = 2 + + config = LlamaConfig( + hidden_size=dim, + intermediate_size=compute_intermediate_size(dim, ffn_dim_multiplier, multiple_of), + num_attention_heads=params["n_heads"], + num_hidden_layers=params["n_layers"], + rms_norm_eps=params["norm_eps"], + num_key_value_heads=num_key_value_heads, + vocab_size=vocab_size, + rope_theta=base, + max_position_embeddings=max_position_embeddings, + bos_token_id=bos_token_id, + eos_token_id=eos_token_id, + ) + config.save_pretrained(tmp_model_path) + + if instruct: + generation_config = GenerationConfig( + do_sample=True, + temperature=0.6, + top_p=0.9, + bos_token_id=bos_token_id, + eos_token_id=eos_token_id, + ) + generation_config.save_pretrained(tmp_model_path) + + # Make space so we can load the model properly now. + del state_dict + del loaded + gc.collect() + + print("Loading the checkpoint in a Llama model.") + model = LlamaForCausalLM.from_pretrained(tmp_model_path, torch_dtype=torch.bfloat16, low_cpu_mem_usage=True) + # Avoid saving this as part of the config. + del model.config._name_or_path + model.config.torch_dtype = torch.float16 + print("Saving in the Transformers format.") + model.save_pretrained(model_path, safe_serialization=safe_serialization) + shutil.rmtree(tmp_model_path, ignore_errors=True) + + +class Llama3Converter(TikTokenConverter): + def __init__(self, vocab_file, special_tokens=None, instruct=False, model_max_length=None, **kwargs): + super().__init__(vocab_file, **kwargs) + tokenizer = self.converted() + chat_template = ( + "{% set loop_messages = messages %}" + "{% for message in loop_messages %}" + "{% set content = '<|start_header_id|>' + message['role'] + '<|end_header_id|>\n\n'+ message['content'] | trim + '<|eot_id|>' %}" + "{% if loop.index0 == 0 %}" + "{% set content = bos_token + content %}" + "{% endif %}" + "{{ content }}" + "{% endfor %}" + "{{ '<|start_header_id|>assistant<|end_header_id|>\n\n' }}" + ) + tokenizer.add_special_tokens(special_tokens) + + self.tokenizer = PreTrainedTokenizerFast( + tokenizer_object=tokenizer, + bos_token="<|begin_of_text|>", + eos_token="<|end_of_text|>" if not instruct else "<|eot_id|>", + chat_template=chat_template if instruct else None, + model_input_names=["input_ids", "attention_mask"], + model_max_length=model_max_length, + ) + + +def write_tokenizer(tokenizer_path, input_tokenizer_path, llama_version="2", special_tokens=None, instruct=False): + tokenizer_class = LlamaTokenizer if LlamaTokenizerFast is None else LlamaTokenizerFast + if llama_version in ["3", "3.1"]: + tokenizer = Llama3Converter( + input_tokenizer_path, special_tokens, instruct, model_max_length=CONTEXT_LENGTH_FOR_VERSION[llama_version] + ).tokenizer + else: + tokenizer = tokenizer_class(input_tokenizer_path) + print(f"Saving a {tokenizer_class.__name__} to {tokenizer_path}.") + tokenizer.save_pretrained(tokenizer_path) + return tokenizer + + +DEFAULT_LLAMA_SPECIAL_TOKENS = { + "3": [ + "<|begin_of_text|>", + "<|end_of_text|>", + "<|reserved_special_token_0|>", + "<|reserved_special_token_1|>", + "<|reserved_special_token_2|>", + "<|reserved_special_token_3|>", + "<|start_header_id|>", + "<|end_header_id|>", + "<|reserved_special_token_4|>", + "<|eot_id|>", # end of turn + ] + + [f"<|reserved_special_token_{i}|>" for i in range(5, 256 - 5)], + "3.1": [ + "<|begin_of_text|>", + "<|end_of_text|>", + "<|reserved_special_token_0|>", + "<|reserved_special_token_1|>", + "<|finetune_right_pad_id|>", + "<|reserved_special_token_2|>", + "<|start_header_id|>", + "<|end_header_id|>", + "<|eom_id|>", # end of message + "<|eot_id|>", # end of turn + "<|python_tag|>", + ] + + [f"<|reserved_special_token_{i}|>" for i in range(3, 256 - 8)], +} + + +def main(): + parser = argparse.ArgumentParser() + parser.add_argument( + "--input_dir", + help="Location of LLaMA weights, which contains tokenizer.model and model folders", + ) + parser.add_argument( + "--model_size", + default=None, + help="'f' Deprecated in favor of `num_shards`: models correspond to the finetuned versions, and are specific to the Llama2 official release. For more details on Llama2, checkout the original repo: https://huggingface.co/meta-llama", + ) + parser.add_argument( + "--output_dir", + help="Location to write HF model and tokenizer", + ) + parser.add_argument( + "--safe_serialization", default=True, type=bool, help="Whether or not to save using `safetensors`." + ) + # Different Llama versions used different default values for max_position_embeddings, hence the need to be able to specify which version is being used. + parser.add_argument( + "--llama_version", + choices=["1", "2", "3", "3.1"], + default="1", + type=str, + help="Version of the Llama model to convert. Currently supports Llama1 and Llama2. Controls the context size", + ) + parser.add_argument( + "--num_shards", + default=None, + type=int, + help="The number of individual shards used for the model. Does not have to be the same as the number of consolidated_xx.pth", + ) + parser.add_argument( + "--special_tokens", + default=None, + type=List[str], + help="The list of special tokens that should be added to the model.", + ) + parser.add_argument( + "--instruct", + default=False, + type=bool, + help="Whether the model is an instruct model or not. Will affect special tokens for llama 3.1.", + ) + args = parser.parse_args() + if args.model_size is None and args.num_shards is None: + raise ValueError("You have to set at least `num_shards` if you are not giving the `model_size`") + if args.special_tokens is None: + # no special tokens by default + args.special_tokens = DEFAULT_LLAMA_SPECIAL_TOKENS.get(str(args.llama_version), []) + + spm_path = os.path.join(args.input_dir, "tokenizer.model") + vocab_size = len( + write_tokenizer( + args.output_dir, + spm_path, + llama_version=args.llama_version, + special_tokens=args.special_tokens, + instruct=args.instruct, + ) + ) + if args.model_size != "tokenizer_only": + write_model( + model_path=args.output_dir, + input_base_path=args.input_dir, + model_size=args.model_size, + safe_serialization=args.safe_serialization, + llama_version=args.llama_version, + vocab_size=vocab_size, + num_shards=args.num_shards, + instruct=args.instruct, + ) + + +if __name__ == "__main__": + main() diff --git a/isolated/sim_greedy/upstream_sgl/internvl/model/llama/modeling_flax_llama.py b/isolated/sim_greedy/upstream_sgl/internvl/model/llama/modeling_flax_llama.py new file mode 100644 index 0000000000000000000000000000000000000000..5bf00b3e7abb3e7d054c74d6c7b97ceb649eaec5 --- /dev/null +++ b/isolated/sim_greedy/upstream_sgl/internvl/model/llama/modeling_flax_llama.py @@ -0,0 +1,750 @@ +# coding=utf-8 +# Copyright 2023 Meta AI, EleutherAI and the HuggingFace Inc. team. All rights reserved. +# +# This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX +# and OPT implementations in this library. It has been modified from its +# original forms to accommodate minor architectural differences compared +# to GPT-NeoX and OPT used by the Meta AI team that trained the model. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Flax LLaMA model.""" + +from functools import partial +from typing import Optional, Tuple + +import flax.linen as nn +import jax +import jax.numpy as jnp +import numpy as np +from flax.core.frozen_dict import FrozenDict, freeze, unfreeze +from flax.linen import combine_masks, make_causal_mask +from flax.linen.attention import dot_product_attention_weights +from flax.traverse_util import flatten_dict, unflatten_dict +from jax import lax + +from transformers.modeling_flax_outputs import FlaxBaseModelOutput, FlaxCausalLMOutput +from transformers.modeling_flax_utils import ACT2FN, FlaxPreTrainedModel, append_call_sample_docstring +from transformers.utils import add_start_docstrings, add_start_docstrings_to_model_forward, logging +from transformers.configuration_llama import LlamaConfig + + +logger = logging.get_logger(__name__) + +_CONFIG_FOR_DOC = "LlamaConfig" +_CHECKPOINT_FOR_DOC = "afmck/testing-llama-tiny" +_REAL_CHECKPOINT_FOR_DOC = "openlm-research/open_llama_3b_v2" + +LLAMA_START_DOCSTRING = r""" + + This model inherits from [`FlaxPreTrainedModel`]. Check the superclass documentation for the generic methods the + library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads + etc.) + + This model is also a Flax Linen + [flax.nn.Module](https://flax.readthedocs.io/en/latest/_autosummary/flax.nn.module.html) subclass. Use it as a + regular Flax Module and refer to the Flax documentation for all matter related to general usage and behavior. + + Finally, this model supports inherent JAX features such as: + + - [Just-In-Time (JIT) compilation](https://jax.readthedocs.io/en/latest/jax.html#just-in-time-compilation-jit) + - [Automatic Differentiation](https://jax.readthedocs.io/en/latest/jax.html#automatic-differentiation) + - [Vectorization](https://jax.readthedocs.io/en/latest/jax.html#vectorization-vmap) + - [Parallelization](https://jax.readthedocs.io/en/latest/jax.html#parallelization-pmap) + + Parameters: + config ([`LlamaConfig`]): Model configuration class with all the parameters of the model. + Initializing with a config file does not load the weights associated with the model, only the + configuration. Check out the [`~FlaxPreTrainedModel.from_pretrained`] method to load the model weights. + dtype (`jax.numpy.dtype`, *optional*, defaults to `jax.numpy.float32`): + The data type of the computation. Can be one of `jax.numpy.float32`, `jax.numpy.float16`, or + `jax.numpy.bfloat16`. + + This can be used to enable mixed-precision training or half-precision inference on GPUs or TPUs. If + specified all the computation will be performed with the given `dtype`. + + **Note that this only specifies the dtype of the computation and does not influence the dtype of model + parameters.** + + If you wish to change the dtype of the model parameters, see [`~FlaxPreTrainedModel.to_fp16`] and + [`~FlaxPreTrainedModel.to_bf16`]. +""" + +LLAMA_INPUTS_DOCSTRING = r""" + Args: + input_ids (`numpy.ndarray` of shape `(batch_size, input_ids_length)`): + Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide + it. + + Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and + [`PreTrainedTokenizer.__call__`] for details. + + [What are input IDs?](../glossary#input-ids) + attention_mask (`numpy.ndarray` of shape `(batch_size, sequence_length)`, *optional*): + Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: + + - 1 for tokens that are **not masked**, + - 0 for tokens that are **masked**. + + [What are attention masks?](../glossary#attention-mask) + + Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and + [`PreTrainedTokenizer.__call__`] for details. + + If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see + `past_key_values`). + + If you want to change padding behavior, you should read [`modeling_opt._prepare_decoder_attention_mask`] + and modify to your needs. See diagram 1 in [the paper](https://arxiv.org/abs/1910.13461) for more + information on the default strategy. + + - 1 indicates the head is **not masked**, + - 0 indicates the head is **masked**. + position_ids (`numpy.ndarray` of shape `(batch_size, sequence_length)`, *optional*): + Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, + config.n_positions - 1]`. + + [What are position IDs?](../glossary#position-ids) + past_key_values (`Dict[str, np.ndarray]`, *optional*, returned by `init_cache` or when passing previous `past_key_values`): + Dictionary of pre-computed hidden-states (key and values in the attention blocks) that can be used for fast + auto-regressive decoding. Pre-computed key and value hidden-states are of shape *[batch_size, max_length]*. + output_attentions (`bool`, *optional*): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned + tensors for more detail. + output_hidden_states (`bool`, *optional*): + Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for + more detail. + return_dict (`bool`, *optional*): + Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. +""" + + +def create_sinusoidal_positions(num_pos, dim): + inv_freq = 1.0 / (10000 ** (np.arange(0, dim, 2) / dim)) + freqs = np.einsum("i , j -> i j", np.arange(num_pos), inv_freq).astype("float32") + + emb = np.concatenate((freqs, freqs), axis=-1) + out = np.concatenate((np.sin(emb)[:, None, :], np.cos(emb)[:, None, :]), axis=-1) + return jnp.array(out[:, :, :num_pos]) + + +def rotate_half(tensor): + """Rotates half the hidden dims of the input.""" + rotate_half_tensor = jnp.concatenate( + (-tensor[..., tensor.shape[-1] // 2 :], tensor[..., : tensor.shape[-1] // 2]), axis=-1 + ) + return rotate_half_tensor + + +def apply_rotary_pos_emb(tensor, sin_pos, cos_pos): + return (tensor * cos_pos) + (rotate_half(tensor) * sin_pos) + + +class FlaxLlamaRMSNorm(nn.Module): + config: LlamaConfig + dtype: jnp.dtype = jnp.float32 + + def setup(self): + self.epsilon = self.config.rms_norm_eps + self.weight = self.param("weight", lambda _, shape: jnp.ones(shape), self.config.hidden_size) + + def __call__(self, hidden_states): + variance = jnp.asarray(hidden_states, dtype=jnp.float32) + variance = jnp.power(variance, 2) + variance = variance.mean(-1, keepdims=True) + # use `jax.numpy.sqrt` as `jax.lax.rsqrt` does not match `torch.rsqrt` + hidden_states = hidden_states / jnp.sqrt(variance + self.epsilon) + + return self.weight * jnp.asarray(hidden_states, dtype=self.dtype) + + +class FlaxLlamaRotaryEmbedding(nn.Module): + config: LlamaConfig + dtype: jnp.dtype = jnp.float32 + + def setup(self): + head_dim = self.config.hidden_size // self.config.num_attention_heads + self.sincos = create_sinusoidal_positions(self.config.max_position_embeddings, head_dim) + + def __call__(self, key, query, position_ids): + sincos = self.sincos[position_ids] + sin_pos, cos_pos = jnp.split(sincos, 2, axis=-1) + + key = apply_rotary_pos_emb(key, sin_pos, cos_pos) + query = apply_rotary_pos_emb(query, sin_pos, cos_pos) + + key = jnp.asarray(key, dtype=self.dtype) + query = jnp.asarray(query, dtype=self.dtype) + + return key, query + + +class FlaxLlamaAttention(nn.Module): + config: LlamaConfig + dtype: jnp.dtype = jnp.float32 + causal: bool = True + is_cross_attention: bool = False + + def setup(self): + config = self.config + self.embed_dim = config.hidden_size + self.num_heads = config.num_attention_heads + self.head_dim = self.embed_dim // self.num_heads + self.num_key_value_heads = config.num_key_value_heads + self.num_key_value_groups = self.num_heads // self.num_key_value_heads + self.attention_softmax_in_fp32 = self.dtype is not jnp.float32 + + dense = partial( + nn.Dense, + use_bias=config.attention_bias, + dtype=self.dtype, + kernel_init=jax.nn.initializers.normal(self.config.initializer_range), + ) + + self.q_proj = dense(self.num_heads * self.head_dim) + self.k_proj = dense(self.num_key_value_heads * self.head_dim) + self.v_proj = dense(self.num_key_value_heads * self.head_dim) + self.o_proj = dense(self.embed_dim) + if (self.head_dim * self.num_heads) != self.embed_dim: + raise ValueError( + f"hidden_size must be divisible by num_heads (got `hidden_size`: {self.embed_dim}" + f" and `num_heads`: {self.num_heads})." + ) + + self.causal_mask = make_causal_mask(jnp.ones((1, config.max_position_embeddings), dtype="bool"), dtype="bool") + self.rotary_emb = FlaxLlamaRotaryEmbedding(config, dtype=self.dtype) + + def _split_heads(self, hidden_states, num_heads): + return hidden_states.reshape(hidden_states.shape[:2] + (num_heads, self.head_dim)) + + def _merge_heads(self, hidden_states): + return hidden_states.reshape(hidden_states.shape[:2] + (self.embed_dim,)) + + @nn.compact + # Copied from transformers.models.gpt_neo.modeling_flax_gpt_neo.FlaxGPTNeoSelfAttention._concatenate_to_cache + def _concatenate_to_cache(self, key, value, query, attention_mask): + """ + This function takes projected key, value states from a single input token and concatenates the states to cached + states from previous steps. This function is slighly adapted from the official Flax repository: + https://github.com/google/flax/blob/491ce18759622506588784b4fca0e4bf05f8c8cd/flax/linen/attention.py#L252 + """ + # detect if we're initializing by absence of existing cache data. + is_initialized = self.has_variable("cache", "cached_key") + cached_key = self.variable("cache", "cached_key", jnp.zeros, key.shape, key.dtype) + cached_value = self.variable("cache", "cached_value", jnp.zeros, value.shape, value.dtype) + cache_index = self.variable("cache", "cache_index", lambda: jnp.array(0, dtype=jnp.int32)) + + if is_initialized: + *batch_dims, max_length, num_heads, depth_per_head = cached_key.value.shape + # update key, value caches with our new 1d spatial slices + cur_index = cache_index.value + indices = (0,) * len(batch_dims) + (cur_index, 0, 0) + key = lax.dynamic_update_slice(cached_key.value, key, indices) + value = lax.dynamic_update_slice(cached_value.value, value, indices) + cached_key.value = key + cached_value.value = value + num_updated_cache_vectors = query.shape[1] + cache_index.value = cache_index.value + num_updated_cache_vectors + # causal mask for cached decoder self-attention: our single query position should only attend to those key positions that have already been generated and cached, not the remaining zero elements. + pad_mask = jnp.broadcast_to( + jnp.arange(max_length) < cur_index + num_updated_cache_vectors, + tuple(batch_dims) + (1, num_updated_cache_vectors, max_length), + ) + attention_mask = combine_masks(pad_mask, attention_mask) + return key, value, attention_mask + + def __call__( + self, + hidden_states, + attention_mask, + position_ids, + deterministic: bool = True, + init_cache: bool = False, + output_attentions: bool = False, + ): + query = self.q_proj(hidden_states) + key = self.k_proj(hidden_states) + value = self.v_proj(hidden_states) + + query = self._split_heads(query, self.num_heads) + key = self._split_heads(key, self.num_key_value_heads) + value = self._split_heads(value, self.num_key_value_heads) + + key, query = self.rotary_emb(key, query, position_ids) + + query_length, key_length = query.shape[1], key.shape[1] + + if self.has_variable("cache", "cached_key"): + mask_shift = self.variables["cache"]["cache_index"] + max_decoder_length = self.variables["cache"]["cached_key"].shape[1] + causal_mask = lax.dynamic_slice( + self.causal_mask, (0, 0, mask_shift, 0), (1, 1, query_length, max_decoder_length) + ) + else: + causal_mask = self.causal_mask[:, :, :query_length, :key_length] + + batch_size = hidden_states.shape[0] + causal_mask = jnp.broadcast_to(causal_mask, (batch_size,) + causal_mask.shape[1:]) + + attention_mask = jnp.broadcast_to(jnp.expand_dims(attention_mask, axis=(-3, -2)), causal_mask.shape) + attention_mask = combine_masks(attention_mask, causal_mask) + + dropout_rng = None + if not deterministic and self.config.attention_dropout > 0.0: + dropout_rng = self.make_rng("dropout") + + # During fast autoregressive decoding, we feed one position at a time, + # and cache the keys and values step by step. + if self.has_variable("cache", "cached_key") or init_cache: + key, value, attention_mask = self._concatenate_to_cache(key, value, query, attention_mask) + + key = jnp.repeat(key, self.num_key_value_groups, axis=2) + value = jnp.repeat(value, self.num_key_value_groups, axis=2) + + # transform boolean mask into float mask + attention_bias = lax.select( + attention_mask > 0, + jnp.full(attention_mask.shape, 0.0).astype(self.dtype), + jnp.full(attention_mask.shape, jnp.finfo(self.dtype).min).astype(self.dtype), + ) + + # usual dot product attention + attention_dtype = jnp.float32 if self.attention_softmax_in_fp32 else self.dtype + attn_weights = dot_product_attention_weights( + query, + key, + bias=attention_bias, + dropout_rng=dropout_rng, + dropout_rate=self.config.attention_dropout, + deterministic=deterministic, + dtype=attention_dtype, + ) + + if self.attention_softmax_in_fp32: + attn_weights = attn_weights.astype(self.dtype) + + attn_output = jnp.einsum("...hqk,...khd->...qhd", attn_weights, value) + attn_output = self._merge_heads(attn_output) + attn_output = self.o_proj(attn_output) + + outputs = (attn_output, attn_weights) if output_attentions else (attn_output,) + return outputs + + +class FlaxLlamaMLP(nn.Module): + config: LlamaConfig + dtype: jnp.dtype = jnp.float32 + + def setup(self): + embed_dim = self.config.hidden_size + inner_dim = self.config.intermediate_size if self.config.intermediate_size is not None else 4 * embed_dim + + kernel_init = jax.nn.initializers.normal(self.config.initializer_range) + self.act = ACT2FN[self.config.hidden_act] + + self.gate_proj = nn.Dense(inner_dim, use_bias=False, dtype=self.dtype, kernel_init=kernel_init) + self.down_proj = nn.Dense(embed_dim, use_bias=False, dtype=self.dtype, kernel_init=kernel_init) + self.up_proj = nn.Dense(inner_dim, use_bias=False, dtype=self.dtype, kernel_init=kernel_init) + + def __call__(self, hidden_states): + up_proj_states = self.up_proj(hidden_states) + gate_states = self.act(self.gate_proj(hidden_states)) + + hidden_states = self.down_proj(up_proj_states * gate_states) + return hidden_states + + +class FlaxLlamaDecoderLayer(nn.Module): + config: LlamaConfig + dtype: jnp.dtype = jnp.float32 + + def setup(self): + self.input_layernorm = FlaxLlamaRMSNorm(self.config, dtype=self.dtype) + self.self_attn = FlaxLlamaAttention(self.config, dtype=self.dtype) + self.post_attention_layernorm = FlaxLlamaRMSNorm(self.config, dtype=self.dtype) + self.mlp = FlaxLlamaMLP(self.config, dtype=self.dtype) + + def __call__( + self, + hidden_states, + attention_mask=None, + position_ids=None, + deterministic: bool = True, + init_cache: bool = False, + output_attentions: bool = False, + ): + residual = hidden_states + hidden_states = self.input_layernorm(hidden_states) + outputs = self.self_attn( + hidden_states, + attention_mask=attention_mask, + position_ids=position_ids, + deterministic=deterministic, + init_cache=init_cache, + output_attentions=output_attentions, + ) + # residual connection + attn_output = outputs[0] + hidden_states = residual + attn_output + + residual = hidden_states + hidden_states = self.post_attention_layernorm(hidden_states) + hidden_states = self.mlp(hidden_states) + # residual connection + hidden_states = residual + hidden_states + + return (hidden_states,) + outputs[1:] + + +# Copied from transformers.models.gpt_neo.modeling_flax_gpt_neo.FlaxGPTNeoPreTrainedModel with GPTNeo->Llama, GPT_NEO->LLAMA, transformer->model +class FlaxLlamaPreTrainedModel(FlaxPreTrainedModel): + """ + An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained + models. + """ + + config_class = LlamaConfig + base_model_prefix = "model" + module_class: nn.Module = None + + def __init__( + self, + config: LlamaConfig, + input_shape: Tuple = (1, 1), + seed: int = 0, + dtype: jnp.dtype = jnp.float32, + _do_init: bool = True, + **kwargs, + ): + module = self.module_class(config=config, dtype=dtype, **kwargs) + super().__init__(config, module, input_shape=input_shape, seed=seed, dtype=dtype, _do_init=_do_init) + + def init_weights(self, rng: jax.random.PRNGKey, input_shape: Tuple, params: FrozenDict = None) -> FrozenDict: + # init input tensors + input_ids = jnp.zeros(input_shape, dtype="i4") + attention_mask = jnp.ones_like(input_ids) + position_ids = jnp.broadcast_to(jnp.arange(jnp.atleast_2d(input_ids).shape[-1]), input_shape) + params_rng, dropout_rng = jax.random.split(rng) + rngs = {"params": params_rng, "dropout": dropout_rng} + + random_params = self.module.init(rngs, input_ids, attention_mask, position_ids, return_dict=False)["params"] + + if params is not None: + random_params = flatten_dict(unfreeze(random_params)) + params = flatten_dict(unfreeze(params)) + for missing_key in self._missing_keys: + params[missing_key] = random_params[missing_key] + self._missing_keys = set() + return freeze(unflatten_dict(params)) + else: + return random_params + + def init_cache(self, batch_size, max_length): + r""" + Args: + batch_size (`int`): + batch_size used for fast auto-regressive decoding. Defines the batch size of the initialized cache. + max_length (`int`): + maximum possible length for auto-regressive decoding. Defines the sequence length of the initialized + cache. + """ + # init input variables to retrieve cache + input_ids = jnp.ones((batch_size, max_length)) + attention_mask = jnp.ones_like(input_ids) + position_ids = jnp.broadcast_to(jnp.arange(jnp.atleast_2d(input_ids).shape[-1]), input_ids.shape) + + init_variables = self.module.init( + jax.random.PRNGKey(0), input_ids, attention_mask, position_ids, return_dict=False, init_cache=True + ) + return unfreeze(init_variables["cache"]) + + @add_start_docstrings_to_model_forward(LLAMA_INPUTS_DOCSTRING) + def __call__( + self, + input_ids, + attention_mask=None, + position_ids=None, + params: dict = None, + past_key_values: dict = None, + dropout_rng: jax.random.PRNGKey = None, + train: bool = False, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ): + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + return_dict = return_dict if return_dict is not None else self.config.return_dict + + batch_size, sequence_length = input_ids.shape + + if position_ids is None: + if past_key_values is not None: + raise ValueError("Make sure to provide `position_ids` when passing `past_key_values`.") + + position_ids = jnp.broadcast_to(jnp.arange(sequence_length)[None, :], (batch_size, sequence_length)) + + if attention_mask is None: + attention_mask = jnp.ones((batch_size, sequence_length)) + + # Handle any PRNG if needed + rngs = {} + if dropout_rng is not None: + rngs["dropout"] = dropout_rng + + inputs = {"params": params or self.params} + + # if past_key_values are passed then cache is already initialized a private flag init_cache has to be passed down to ensure cache is used. It has to be made sure that cache is marked as mutable so that it can be changed by FlaxLlamaAttention module + if past_key_values: + inputs["cache"] = past_key_values + mutable = ["cache"] + else: + mutable = False + + outputs = self.module.apply( + inputs, + jnp.array(input_ids, dtype="i4"), + jnp.array(attention_mask, dtype="i4"), + jnp.array(position_ids, dtype="i4"), + not train, + False, + output_attentions, + output_hidden_states, + return_dict, + rngs=rngs, + mutable=mutable, + ) + + # add updated cache to model output + if past_key_values is not None and return_dict: + outputs, past_key_values = outputs + outputs["past_key_values"] = unfreeze(past_key_values["cache"]) + return outputs + elif past_key_values is not None and not return_dict: + outputs, past_key_values = outputs + outputs = outputs[:1] + (unfreeze(past_key_values["cache"]),) + outputs[1:] + + return outputs + + +class FlaxLlamaLayerCollection(nn.Module): + config: LlamaConfig + dtype: jnp.dtype = jnp.float32 + + def setup(self): + self.blocks = [ + FlaxLlamaDecoderLayer(self.config, dtype=self.dtype, name=str(i)) + for i in range(self.config.num_hidden_layers) + ] + + def __call__( + self, + hidden_states, + attention_mask=None, + position_ids=None, + deterministic: bool = True, + init_cache: bool = False, + output_attentions: bool = False, + output_hidden_states: bool = False, + return_dict: bool = False, + ): + all_attentions = () if output_attentions else None + all_hidden_states = () if output_hidden_states else None + + for block in self.blocks: + if output_hidden_states: + all_hidden_states += (hidden_states,) + layer_outputs = block( + hidden_states, + attention_mask=attention_mask, + position_ids=position_ids, + deterministic=deterministic, + init_cache=init_cache, + output_attentions=output_attentions, + ) + hidden_states = layer_outputs[0] + + if output_attentions: + all_attentions += (layer_outputs[1],) + + # this contains possible `None` values - `FlaxLlamaModule` will filter them out + outputs = (hidden_states, all_hidden_states, all_attentions) + + return outputs + + +class FlaxLlamaModule(nn.Module): + config: LlamaConfig + dtype: jnp.dtype = jnp.float32 + + def setup(self): + self.hidden_size = self.config.hidden_size + embedding_init = jax.nn.initializers.normal(stddev=self.config.initializer_range) + self.embed_tokens = nn.Embed( + self.config.vocab_size, + self.hidden_size, + embedding_init=embedding_init, + dtype=self.dtype, + ) + self.layers = FlaxLlamaLayerCollection(self.config, dtype=self.dtype) + self.norm = FlaxLlamaRMSNorm(self.config, dtype=self.dtype) + + def __call__( + self, + input_ids, + attention_mask=None, + position_ids=None, + deterministic=True, + init_cache: bool = False, + output_attentions: bool = False, + output_hidden_states: bool = False, + return_dict: bool = True, + ): + input_embeds = self.embed_tokens(input_ids.astype("i4")) + + outputs = self.layers( + input_embeds, + position_ids=position_ids, + attention_mask=attention_mask, + deterministic=deterministic, + init_cache=init_cache, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + + hidden_states = outputs[0] + hidden_states = self.norm(hidden_states) + + if output_hidden_states: + all_hidden_states = outputs[1] + (hidden_states,) + outputs = (hidden_states, all_hidden_states) + outputs[2:] + else: + outputs = (hidden_states,) + outputs[1:] + + if not return_dict: + return tuple(v for v in outputs if v is not None) + + return FlaxBaseModelOutput( + last_hidden_state=hidden_states, + hidden_states=outputs[1], + attentions=outputs[-1], + ) + + +@add_start_docstrings( + "The bare Llama Model transformer outputting raw hidden-states without any specific head on top.", + LLAMA_START_DOCSTRING, +) +class FlaxLlamaModel(FlaxLlamaPreTrainedModel): + module_class = FlaxLlamaModule + + +append_call_sample_docstring( + FlaxLlamaModel, + _CHECKPOINT_FOR_DOC, + FlaxBaseModelOutput, + _CONFIG_FOR_DOC, + real_checkpoint=_REAL_CHECKPOINT_FOR_DOC, +) + + +class FlaxLlamaForCausalLMModule(nn.Module): + config: LlamaConfig + dtype: jnp.dtype = jnp.float32 + + def setup(self): + self.model = FlaxLlamaModule(self.config, dtype=self.dtype) + self.lm_head = nn.Dense( + self.config.vocab_size, + use_bias=False, + dtype=self.dtype, + kernel_init=jax.nn.initializers.normal(stddev=self.config.initializer_range), + ) + + def __call__( + self, + input_ids, + attention_mask=None, + position_ids=None, + deterministic: bool = True, + init_cache: bool = False, + output_attentions: bool = False, + output_hidden_states: bool = False, + return_dict: bool = True, + ): + outputs = self.model( + input_ids, + position_ids=position_ids, + attention_mask=attention_mask, + deterministic=deterministic, + init_cache=init_cache, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + + hidden_states = outputs[0] + lm_logits = self.lm_head(hidden_states) + + if not return_dict: + return (lm_logits,) + outputs[1:] + + return FlaxCausalLMOutput(logits=lm_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions) + + +@add_start_docstrings( + """ + The Llama Model transformer with a language modeling head (linear layer) on top. + """, + LLAMA_START_DOCSTRING, +) +# Copied from transformers.models.gptj.modeling_flax_gptj.FlaxGPTJForCausalLM with GPTJ->Llama +class FlaxLlamaForCausalLM(FlaxLlamaPreTrainedModel): + module_class = FlaxLlamaForCausalLMModule + + def prepare_inputs_for_generation(self, input_ids, max_length, attention_mask: Optional[jax.Array] = None): + # initializing the cache + batch_size, seq_length = input_ids.shape + + past_key_values = self.init_cache(batch_size, max_length) + # Note that usually one would have to put 0's in the attention_mask for x > input_ids.shape[-1] and x < cache_length. + # But since Llama uses a causal mask, those positions are masked anyways. + # Thus we can create a single static attention_mask here, which is more efficient for compilation + extended_attention_mask = jnp.ones((batch_size, max_length), dtype="i4") + if attention_mask is not None: + position_ids = attention_mask.cumsum(axis=-1) - 1 + extended_attention_mask = lax.dynamic_update_slice(extended_attention_mask, attention_mask, (0, 0)) + else: + position_ids = jnp.broadcast_to(jnp.arange(seq_length, dtype="i4")[None, :], (batch_size, seq_length)) + + return { + "past_key_values": past_key_values, + "attention_mask": extended_attention_mask, + "position_ids": position_ids, + } + + def update_inputs_for_generation(self, model_outputs, model_kwargs): + model_kwargs["past_key_values"] = model_outputs.past_key_values + model_kwargs["position_ids"] = model_kwargs["position_ids"][:, -1:] + 1 + return model_kwargs + + +append_call_sample_docstring( + FlaxLlamaForCausalLM, + _CHECKPOINT_FOR_DOC, + FlaxCausalLMOutput, + _CONFIG_FOR_DOC, + real_checkpoint=_REAL_CHECKPOINT_FOR_DOC, +) diff --git a/isolated/sim_greedy/upstream_sgl/internvl/model/llama/modeling_llama.py b/isolated/sim_greedy/upstream_sgl/internvl/model/llama/modeling_llama.py new file mode 100644 index 0000000000000000000000000000000000000000..f991ccfb90c4a1e089a3882d8f1383dc3d7c5407 --- /dev/null +++ b/isolated/sim_greedy/upstream_sgl/internvl/model/llama/modeling_llama.py @@ -0,0 +1,1872 @@ +# coding=utf-8 +# Copyright 2022 EleutherAI and the HuggingFace Inc. team. All rights reserved. +# +# This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX +# and OPT implementations in this library. It has been modified from its +# original forms to accommodate minor architectural differences compared +# to GPT-NeoX and OPT used by the Meta AI team that trained the model. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import math +from typing import List, Optional, Tuple, Union + +import torch +import torch.nn.functional as F +import torch.utils.checkpoint +from torch import nn +from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss + +from transformers.activations import ACT2FN +from transformers.cache_utils import Cache, DynamicCache, StaticCache +from transformers.modeling_attn_mask_utils import AttentionMaskConverter +from transformers.modeling_flash_attention_utils import _flash_attention_forward +from transformers.modeling_outputs import ( + BaseModelOutputWithPast, + CausalLMOutputWithPast, + QuestionAnsweringModelOutput, + SequenceClassifierOutputWithPast, + TokenClassifierOutput, +) +from transformers.modeling_rope_utils import ROPE_INIT_FUNCTIONS +from transformers.modeling_utils import PreTrainedModel +from transformers.pytorch_utils import ALL_LAYERNORM_LAYERS +from transformers.utils import ( + add_start_docstrings, + add_start_docstrings_to_model_forward, + is_flash_attn_greater_or_equal_2_10, + logging, + replace_return_docstrings, +) +from .configuration_llama import LlamaConfig +from transformers import LogitsProcessorList, StoppingCriteriaList, GenerationConfig +from transformers.generation.utils import GenerateNonBeamOutput, GenerateDecoderOnlyOutput +from ..token_pruning import select_visual_token_indices + +logger = logging.get_logger(__name__) + +_CONFIG_FOR_DOC = "LlamaConfig" + + +def _prepare_4d_causal_attention_mask_with_cache_position( + attention_mask: torch.Tensor, + sequence_length: int, + target_length: int, + dtype: torch.dtype, + device: torch.device, + min_dtype: float, + cache_position: torch.Tensor, + batch_size: int, +): + """ + Creates a causal 4D mask of shape `(batch_size, 1, query_length, key_value_length)` from a 2D mask of shape + `(batch_size, key_value_length)`, or if the input `attention_mask` is already 4D, do nothing. + + Args: + attention_mask (`torch.Tensor`): + A 2D attention mask of shape `(batch_size, key_value_length)` or a 4D attention mask of shape `(batch_size, 1, query_length, key_value_length)`. + sequence_length (`int`): + The sequence length being processed. + target_length (`int`): + The target length: when generating with static cache, the mask should be as long as the static cache, to account for the 0 padding, the part of the cache that is not filled yet. + dtype (`torch.dtype`): + The dtype to use for the 4D attention mask. + device (`torch.device`): + The device to plcae the 4D attention mask on. + min_dtype (`float`): + The minimum value representable with the dtype `dtype`. + cache_position (`torch.Tensor`): + Indices depicting the position of the input sequence tokens in the sequence. + batch_size (`torch.Tensor`): + Batch size. + """ + if attention_mask is not None and attention_mask.dim() == 4: + # In this case we assume that the mask comes already in inverted form and requires no inversion or slicing. + causal_mask = attention_mask + else: + causal_mask = torch.full((sequence_length, target_length), fill_value=min_dtype, dtype=dtype, device=device) + if sequence_length != 1: + causal_mask = torch.triu(causal_mask, diagonal=1) + causal_mask *= torch.arange(target_length, device=device) > cache_position.reshape(-1, 1) + causal_mask = causal_mask[None, None, :, :].expand(batch_size, 1, -1, -1) + if attention_mask is not None: + causal_mask = causal_mask.clone() # copy to contiguous memory for in-place edit + mask_length = attention_mask.shape[-1] + padding_mask = causal_mask[:, :, :, :mask_length] + attention_mask[:, None, None, :] + padding_mask = padding_mask == 0 + causal_mask[:, :, :, :mask_length] = causal_mask[:, :, :, :mask_length].masked_fill( + padding_mask, min_dtype + ) + + return causal_mask + + +class LlamaRMSNorm(nn.Module): + def __init__(self, hidden_size, eps=1e-6): + """ + LlamaRMSNorm is equivalent to T5LayerNorm + """ + super().__init__() + self.weight = nn.Parameter(torch.ones(hidden_size)) + self.variance_epsilon = eps + + def forward(self, hidden_states): + input_dtype = hidden_states.dtype + hidden_states = hidden_states.to(torch.float32) + variance = hidden_states.pow(2).mean(-1, keepdim=True) + hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon) + return self.weight * hidden_states.to(input_dtype) + + def extra_repr(self): + return f"{tuple(self.weight.shape)}, eps={self.variance_epsilon}" + + +ALL_LAYERNORM_LAYERS.append(LlamaRMSNorm) + + +class LlamaRotaryEmbedding(nn.Module): + def __init__( + self, + dim=None, + max_position_embeddings=2048, + base=10000, + device=None, + scaling_factor=1.0, + rope_type="default", + config: Optional[LlamaConfig] = None, + ): + super().__init__() + # TODO (joao): remove the `if` below, only used for BC + self.rope_kwargs = {} + if config is None: + logger.warning_once( + "`LlamaRotaryEmbedding` can now be fully parameterized by passing the model config through the " + "`config` argument. All other arguments will be removed in v4.45" + ) + self.rope_kwargs = { + "rope_type": rope_type, + "factor": scaling_factor, + "dim": dim, + "base": base, + "max_position_embeddings": max_position_embeddings, + } + self.rope_type = rope_type + self.max_seq_len_cached = max_position_embeddings + self.original_max_seq_len = max_position_embeddings + else: + # BC: "rope_type" was originally "type" + if config.rope_scaling is not None: + self.rope_type = config.rope_scaling.get("rope_type", config.rope_scaling.get("type")) + else: + self.rope_type = "default" + self.max_seq_len_cached = config.max_position_embeddings + self.original_max_seq_len = config.max_position_embeddings + + self.config = config + self.rope_init_fn = ROPE_INIT_FUNCTIONS[self.rope_type] + + inv_freq, self.attention_scaling = self.rope_init_fn(self.config, device, **self.rope_kwargs) + self.register_buffer("inv_freq", inv_freq, persistent=False) + self.original_inv_freq = self.inv_freq + + def _dynamic_frequency_update(self, position_ids, device): + """ + dynamic RoPE layers should recompute `inv_freq` in the following situations: + 1 - growing beyond the cached sequence length (allow scaling) + 2 - the current sequence length is in the original scale (avoid losing precision with small sequences) + """ + seq_len = torch.max(position_ids) + 1 + if seq_len > self.max_seq_len_cached: # growth + inv_freq, self.attention_scaling = self.rope_init_fn( + self.config, device, seq_len=seq_len, **self.rope_kwargs + ) + self.register_buffer("inv_freq", inv_freq, persistent=False) # TODO joao: may break with compilation + self.max_seq_len_cached = seq_len + + if seq_len < self.original_max_seq_len and self.max_seq_len_cached > self.original_max_seq_len: # reset + self.register_buffer("inv_freq", self.original_inv_freq, persistent=False) + self.max_seq_len_cached = self.original_max_seq_len + + @torch.no_grad() + def forward(self, x, position_ids): + if "dynamic" in self.rope_type: + self._dynamic_frequency_update(position_ids, device=x.device) + + # Core RoPE block + inv_freq_expanded = self.inv_freq[None, :, None].float().expand(position_ids.shape[0], -1, 1) + position_ids_expanded = position_ids[:, None, :].float() + # Force float32 (see https://github.com/huggingface/transformers/pull/29285) + device_type = x.device.type + device_type = device_type if isinstance(device_type, str) and device_type != "mps" else "cpu" + with torch.autocast(device_type=device_type, enabled=False): + freqs = (inv_freq_expanded.float() @ position_ids_expanded.float()).transpose(1, 2) + emb = torch.cat((freqs, freqs), dim=-1) + cos = emb.cos() + sin = emb.sin() + + # Advanced RoPE types (e.g. yarn) apply a post-processing scaling factor, equivalent to scaling attention + cos = cos * self.attention_scaling + sin = sin * self.attention_scaling + + return cos.to(dtype=x.dtype), sin.to(dtype=x.dtype) + + +class LlamaLinearScalingRotaryEmbedding(LlamaRotaryEmbedding): + """LlamaRotaryEmbedding extended with linear scaling. Credits to the Reddit user /u/kaiokendev""" + + def __init__(self, *args, **kwargs): + logger.warning_once( + "`LlamaLinearScalingRotaryEmbedding` is deprecated an will be removed in v4.45. Please use " + "`LlamaRotaryEmbedding`, which now also does linear scaling (simply pass the model config to __init__)." + ) + kwargs["rope_type"] = "linear" + super().__init__(*args, **kwargs) + + +class LlamaDynamicNTKScalingRotaryEmbedding(LlamaRotaryEmbedding): + """LlamaRotaryEmbedding extended with Dynamic NTK scaling. Credits to the Reddit users /u/bloc97 and /u/emozilla""" + + def __init__(self, *args, **kwargs): + logger.warning_once( + "`LlamaDynamicNTKScalingRotaryEmbedding` is deprecated an will be removed in v4.45. Please use " + "`LlamaRotaryEmbedding`, which now also does dynamic ntk scaling (simply pass the model config to " + "__init__)." + ) + kwargs["rope_type"] = "dynamic" + super().__init__(*args, **kwargs) + + +def rotate_half(x): + """Rotates half the hidden dims of the input.""" + x1 = x[..., : x.shape[-1] // 2] + x2 = x[..., x.shape[-1] // 2 :] + return torch.cat((-x2, x1), dim=-1) + + +def apply_rotary_pos_emb(q, k, cos, sin, position_ids=None, unsqueeze_dim=1): + """Applies Rotary Position Embedding to the query and key tensors. + + Args: + q (`torch.Tensor`): The query tensor. + k (`torch.Tensor`): The key tensor. + cos (`torch.Tensor`): The cosine part of the rotary embedding. + sin (`torch.Tensor`): The sine part of the rotary embedding. + position_ids (`torch.Tensor`, *optional*): + Deprecated and unused. + unsqueeze_dim (`int`, *optional*, defaults to 1): + The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and + sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note + that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and + k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes + cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have + the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2. + Returns: + `tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding. + """ + cos = cos.unsqueeze(unsqueeze_dim) + sin = sin.unsqueeze(unsqueeze_dim) + q_embed = (q * cos) + (rotate_half(q) * sin) + k_embed = (k * cos) + (rotate_half(k) * sin) + return q_embed, k_embed + + +class LlamaMLP(nn.Module): + def __init__(self, config): + super().__init__() + self.config = config + self.hidden_size = config.hidden_size + self.intermediate_size = config.intermediate_size + self.gate_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=config.mlp_bias) + self.up_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=config.mlp_bias) + self.down_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=config.mlp_bias) + self.act_fn = ACT2FN[config.hidden_act] + + def forward(self, x): + if self.config.pretraining_tp > 1: + slice = self.intermediate_size // self.config.pretraining_tp + gate_proj_slices = self.gate_proj.weight.split(slice, dim=0) + up_proj_slices = self.up_proj.weight.split(slice, dim=0) + down_proj_slices = self.down_proj.weight.split(slice, dim=1) + + gate_proj = torch.cat( + [F.linear(x, gate_proj_slices[i]) for i in range(self.config.pretraining_tp)], dim=-1 + ) + up_proj = torch.cat([F.linear(x, up_proj_slices[i]) for i in range(self.config.pretraining_tp)], dim=-1) + + intermediate_states = (self.act_fn(gate_proj) * up_proj).split(slice, dim=2) + down_proj = [ + F.linear(intermediate_states[i], down_proj_slices[i]) for i in range(self.config.pretraining_tp) + ] + down_proj = sum(down_proj) + else: + down_proj = self.down_proj(self.act_fn(self.gate_proj(x)) * self.up_proj(x)) + + return down_proj + + +def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor: + """ + This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch, + num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim) + """ + batch, num_key_value_heads, slen, head_dim = hidden_states.shape + if n_rep == 1: + return hidden_states + hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim) + return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim) + + +class LlamaAttention(nn.Module): + """Multi-headed attention from 'Attention Is All You Need' paper""" + + def __init__(self, config: LlamaConfig, layer_idx: Optional[int] = None): + super().__init__() + self.config = config + self.layer_idx = layer_idx + if layer_idx is None: + logger.warning_once( + f"Instantiating {self.__class__.__name__} without passing a `layer_idx` is not recommended and will " + "lead to errors during the forward call if caching is used. Please make sure to provide a `layer_idx` " + "when creating this class." + ) + + self.attention_dropout = config.attention_dropout + self.hidden_size = config.hidden_size + self.num_heads = config.num_attention_heads + self.head_dim = self.hidden_size // self.num_heads + self.num_key_value_heads = config.num_key_value_heads + self.num_key_value_groups = self.num_heads // self.num_key_value_heads + self.max_position_embeddings = config.max_position_embeddings + self.rope_theta = config.rope_theta + self.is_causal = True + + if (self.head_dim * self.num_heads) != self.hidden_size: + raise ValueError( + f"hidden_size must be divisible by num_heads (got `hidden_size`: {self.hidden_size}" + f" and `num_heads`: {self.num_heads})." + ) + + self.q_proj = nn.Linear(self.hidden_size, self.num_heads * self.head_dim, bias=config.attention_bias) + self.k_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=config.attention_bias) + self.v_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=config.attention_bias) + self.o_proj = nn.Linear(self.hidden_size, self.hidden_size, bias=config.attention_bias) + + # TODO (joao): remove in v4.45 (RoPE is computed in the model, not in the decoder layers) + self.rotary_emb = LlamaRotaryEmbedding(config=self.config) + + def forward( + self, + hidden_states: torch.Tensor, + attention_mask: Optional[torch.Tensor] = None, + position_ids: Optional[torch.LongTensor] = None, + past_key_value: Optional[Cache] = None, + output_attentions: bool = False, + use_cache: bool = False, + cache_position: Optional[torch.LongTensor] = None, + position_embeddings: Optional[Tuple[torch.Tensor, torch.Tensor]] = None, # will become mandatory in v4.45 + **kwargs, + ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: + bsz, q_len, _ = hidden_states.size() + + if self.config.pretraining_tp > 1: + key_value_slicing = (self.num_key_value_heads * self.head_dim) // self.config.pretraining_tp + query_slices = self.q_proj.weight.split( + (self.num_heads * self.head_dim) // self.config.pretraining_tp, dim=0 + ) + key_slices = self.k_proj.weight.split(key_value_slicing, dim=0) + value_slices = self.v_proj.weight.split(key_value_slicing, dim=0) + + query_states = [F.linear(hidden_states, query_slices[i]) for i in range(self.config.pretraining_tp)] + query_states = torch.cat(query_states, dim=-1) + + key_states = [F.linear(hidden_states, key_slices[i]) for i in range(self.config.pretraining_tp)] + key_states = torch.cat(key_states, dim=-1) + + value_states = [F.linear(hidden_states, value_slices[i]) for i in range(self.config.pretraining_tp)] + value_states = torch.cat(value_states, dim=-1) + + else: + query_states = self.q_proj(hidden_states) + key_states = self.k_proj(hidden_states) + value_states = self.v_proj(hidden_states) + + query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2) + key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2) + value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2) + + if position_embeddings is None: + logger.warning_once( + "The attention layers in this model are transitioning from computing the RoPE embeddings internally " + "through `position_ids` (2D tensor with the indexes of the tokens), to using externally computed " + "`position_embeddings` (Tuple of tensors, containing cos and sin). In v4.45 `position_ids` will be " + "removed and `position_embeddings` will be mandatory." + ) + cos, sin = self.rotary_emb(value_states, position_ids) + else: + cos, sin = position_embeddings + query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin) + + if past_key_value is not None: + # sin and cos are specific to RoPE models; cache_position needed for the static cache + cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position} + key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs) + + key_states = repeat_kv(key_states, self.num_key_value_groups) + value_states = repeat_kv(value_states, self.num_key_value_groups) + + attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) / math.sqrt(self.head_dim) + + if attention_mask is not None: # no matter the length, we just slice it + causal_mask = attention_mask[:, :, :, : key_states.shape[-2]] + attn_weights = attn_weights + causal_mask + + # upcast attention to fp32 + attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query_states.dtype) + attn_weights = nn.functional.dropout(attn_weights, p=self.attention_dropout, training=self.training) + attn_output = torch.matmul(attn_weights, value_states) + + if attn_output.size() != (bsz, self.num_heads, q_len, self.head_dim): + raise ValueError( + f"`attn_output` should be of size {(bsz, self.num_heads, q_len, self.head_dim)}, but is" + f" {attn_output.size()}" + ) + + attn_output = attn_output.transpose(1, 2).contiguous() + + attn_output = attn_output.reshape(bsz, q_len, -1) + + if self.config.pretraining_tp > 1: + attn_output = attn_output.split(self.hidden_size // self.config.pretraining_tp, dim=2) + o_proj_slices = self.o_proj.weight.split(self.hidden_size // self.config.pretraining_tp, dim=1) + attn_output = sum([F.linear(attn_output[i], o_proj_slices[i]) for i in range(self.config.pretraining_tp)]) + else: + attn_output = self.o_proj(attn_output) + + if not output_attentions: + attn_weights = None + + return attn_output, attn_weights, past_key_value + + +class LlamaFlashAttention2(LlamaAttention): + """ + Llama flash attention module. This module inherits from `LlamaAttention` as the weights of the module stays + untouched. The only required change would be on the forward pass where it needs to correctly call the public API of + flash attention and deal with padding tokens in case the input contains any of them. + """ + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + + # TODO: Should be removed once Flash Attention for RoCm is bumped to 2.1. + # flash_attn<2.1 generates top-left aligned causal mask, while what is needed here is bottom-right alignement, that was made default for flash_attn>=2.1. This attribute is used to handle this difference. Reference: https://github.com/Dao-AILab/flash-attention/releases/tag/v2.1.0. + # Beware that with flash_attn<2.1, using q_seqlen != k_seqlen (except for the case q_seqlen == 1) produces a wrong mask (top-left). + self._flash_attn_uses_top_left_mask = not is_flash_attn_greater_or_equal_2_10() + + def forward( + self, + hidden_states: torch.Tensor, + attention_mask: Optional[torch.LongTensor] = None, + position_ids: Optional[torch.LongTensor] = None, + past_key_value: Optional[Cache] = None, + output_attentions: bool = False, + use_cache: bool = False, + cache_position: Optional[torch.LongTensor] = None, + position_embeddings: Optional[Tuple[torch.Tensor, torch.Tensor]] = None, # will become mandatory in v4.45 + ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: + if isinstance(past_key_value, StaticCache): + raise ValueError( + "`static` cache implementation is not compatible with `attn_implementation==flash_attention_2` " + "make sure to use `sdpa` in the mean time, and open an issue at https://github.com/huggingface/transformers" + ) + + output_attentions = False + + bsz, q_len, _ = hidden_states.size() + + query_states = self.q_proj(hidden_states) + key_states = self.k_proj(hidden_states) + value_states = self.v_proj(hidden_states) + + # Flash attention requires the input to have the shape + # batch_size x seq_length x head_dim x hidden_dim + # therefore we just need to keep the original shape + query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2) + key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2) + value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2) + + if position_embeddings is None: + logger.warning_once( + "The attention layers in this model are transitioning from computing the RoPE embeddings internally " + "through `position_ids` (2D tensor with the indexes of the tokens), to using externally computed " + "`position_embeddings` (Tuple of tensors, containing cos and sin). In v4.45 `position_ids` will be " + "removed and `position_embeddings` will be mandatory." + ) + cos, sin = self.rotary_emb(value_states, position_ids) + else: + cos, sin = position_embeddings + query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin) + + if past_key_value is not None: + # sin and cos are specific to RoPE models; cache_position needed for the static cache + cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position} + key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs) + + # TODO: These transpose are quite inefficient but Flash Attention requires the layout [batch_size, sequence_length, num_heads, head_dim]. We would need to refactor the KV cache + # to be able to avoid many of these transpose/reshape/view. + query_states = query_states.transpose(1, 2) + key_states = key_states.transpose(1, 2) + value_states = value_states.transpose(1, 2) + + dropout_rate = self.attention_dropout if self.training else 0.0 + + # In PEFT, usually we cast the layer norms in float32 for training stability reasons + # therefore the input hidden states gets silently casted in float32. Hence, we need + # cast them back in the correct dtype just to be sure everything works as expected. + # This might slowdown training & inference so it is recommended to not cast the LayerNorms + # in fp32. (LlamaRMSNorm handles it correctly) + + input_dtype = query_states.dtype + if input_dtype == torch.float32: + if torch.is_autocast_enabled(): + target_dtype = torch.get_autocast_gpu_dtype() + # Handle the case where the model is quantized + elif hasattr(self.config, "_pre_quantization_dtype"): + target_dtype = self.config._pre_quantization_dtype + else: + target_dtype = self.q_proj.weight.dtype + + logger.warning_once( + f"The input hidden states seems to be silently casted in float32, this might be related to" + f" the fact you have upcasted embedding or layer norm layers in float32. We will cast back the input in" + f" {target_dtype}." + ) + + query_states = query_states.to(target_dtype) + key_states = key_states.to(target_dtype) + value_states = value_states.to(target_dtype) + + attn_output = _flash_attention_forward( + query_states, + key_states, + value_states, + attention_mask, + q_len, + position_ids=position_ids, + dropout=dropout_rate, + sliding_window=getattr(self, "sliding_window", None), + use_top_left_mask=self._flash_attn_uses_top_left_mask, + is_causal=self.is_causal, + ) + + attn_output = attn_output.reshape(bsz, q_len, -1).contiguous() + attn_output = self.o_proj(attn_output) + + if not output_attentions: + attn_weights = None + + return attn_output, attn_weights, past_key_value + + +class LlamaSdpaAttention(LlamaAttention): + """ + Llama attention module using torch.nn.functional.scaled_dot_product_attention. This module inherits from + `LlamaAttention` as the weights of the module stays untouched. The only changes are on the forward pass to adapt to + SDPA API. + """ + + # Adapted from LlamaAttention.forward + def forward( + self, + hidden_states: torch.Tensor, + attention_mask: Optional[torch.Tensor] = None, + position_ids: Optional[torch.LongTensor] = None, + past_key_value: Optional[Cache] = None, + output_attentions: bool = False, + use_cache: bool = False, + cache_position: Optional[torch.LongTensor] = None, + position_embeddings: Optional[Tuple[torch.Tensor, torch.Tensor]] = None, # will become mandatory in v4.45 + **kwargs, + ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: + if output_attentions: + # TODO: Improve this warning with e.g. `model.config.attn_implementation = "manual"` once this is implemented. + logger.warning_once( + "LlamaModel is using LlamaSdpaAttention, but `torch.nn.functional.scaled_dot_product_attention` does not support `output_attentions=True`. Falling back to the manual attention implementation, " + 'but specifying the manual implementation will be required from Transformers version v5.0.0 onwards. This warning can be removed using the argument `attn_implementation="eager"` when loading the model.' + ) + return super().forward( + hidden_states=hidden_states, + attention_mask=attention_mask, + position_ids=position_ids, + past_key_value=past_key_value, + output_attentions=output_attentions, + use_cache=use_cache, + cache_position=cache_position, + position_embeddings=position_embeddings, + ) + + bsz, q_len, _ = hidden_states.size() + + query_states = self.q_proj(hidden_states) + key_states = self.k_proj(hidden_states) + value_states = self.v_proj(hidden_states) + + query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2) + key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2) + value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2) + + if position_embeddings is None: + logger.warning_once( + "The attention layers in this model are transitioning from computing the RoPE embeddings internally " + "through `position_ids` (2D tensor with the indexes of the tokens), to using externally computed " + "`position_embeddings` (Tuple of tensors, containing cos and sin). In v4.45 `position_ids` will be " + "removed and `position_embeddings` will be mandatory." + ) + cos, sin = self.rotary_emb(value_states, position_ids) + else: + cos, sin = position_embeddings + query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin) + + if past_key_value is not None: + # sin and cos are specific to RoPE models; cache_position needed for the static cache + cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position} + key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs) + + key_states = repeat_kv(key_states, self.num_key_value_groups) + value_states = repeat_kv(value_states, self.num_key_value_groups) + + causal_mask = attention_mask + if attention_mask is not None: + causal_mask = causal_mask[:, :, :, : key_states.shape[-2]] + + # SDPA with memory-efficient backend is currently (torch==2.1.2) bugged with non-contiguous inputs with custom attn_mask, + # Reference: https://github.com/pytorch/pytorch/issues/112577. + if query_states.device.type == "cuda" and causal_mask is not None: + query_states = query_states.contiguous() + key_states = key_states.contiguous() + value_states = value_states.contiguous() + + # We dispatch to SDPA's Flash Attention or Efficient kernels via this `is_causal` if statement instead of an inline conditional assignment + # in SDPA to support both torch.compile's dynamic shapes and full graph options. An inline conditional prevents dynamic shapes from compiling. + is_causal = True if causal_mask is None and q_len > 1 else False + + attn_output = torch.nn.functional.scaled_dot_product_attention( + query_states, + key_states, + value_states, + attn_mask=causal_mask, + dropout_p=self.attention_dropout if self.training else 0.0, + is_causal=is_causal, + ) + + attn_output = attn_output.transpose(1, 2).contiguous() + attn_output = attn_output.view(bsz, q_len, -1) + + attn_output = self.o_proj(attn_output) + + return attn_output, None, past_key_value + + +LLAMA_ATTENTION_CLASSES = { + "eager": LlamaAttention, + "flash_attention_2": LlamaFlashAttention2, + "sdpa": LlamaSdpaAttention, +} + + +class LlamaDecoderLayer(nn.Module): + def __init__(self, config: LlamaConfig, layer_idx: int): + super().__init__() + self.hidden_size = config.hidden_size + + self.self_attn = LLAMA_ATTENTION_CLASSES[config._attn_implementation](config=config, layer_idx=layer_idx) + + self.mlp = LlamaMLP(config) + self.input_layernorm = LlamaRMSNorm(config.hidden_size, eps=config.rms_norm_eps) + self.post_attention_layernorm = LlamaRMSNorm(config.hidden_size, eps=config.rms_norm_eps) + + def forward( + self, + hidden_states: torch.Tensor, + attention_mask: Optional[torch.Tensor] = None, + position_ids: Optional[torch.LongTensor] = None, + past_key_value: Optional[Cache] = None, + output_attentions: Optional[bool] = False, + use_cache: Optional[bool] = False, + cache_position: Optional[torch.LongTensor] = None, + position_embeddings: Optional[Tuple[torch.Tensor, torch.Tensor]] = None, # will become mandatory in v4.45 + **kwargs, + ) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]: + """ + Args: + hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)` + attention_mask (`torch.FloatTensor`, *optional*): + attention mask of size `(batch_size, sequence_length)` if flash attention is used or `(batch_size, 1, + query_sequence_length, key_sequence_length)` if default attention is used. + output_attentions (`bool`, *optional*): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under + returned tensors for more detail. + use_cache (`bool`, *optional*): + If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding + (see `past_key_values`). + past_key_value (`Tuple(torch.FloatTensor)`, *optional*): cached past key and value projection states + cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*): + Indices depicting the position of the input sequence tokens in the sequence + position_embeddings (`Tuple[torch.FloatTensor, torch.FloatTensor]`, *optional*): + Tuple containing the cosine and sine positional embeddings of shape `(batch_size, seq_len, head_dim)`, + with `head_dim` being the embedding dimension of each attention head. + kwargs (`dict`, *optional*): + Arbitrary kwargs to be ignored, used for FSDP and other methods that injects code + into the model + """ + residual = hidden_states + + hidden_states = self.input_layernorm(hidden_states) + + # Self Attention + hidden_states, self_attn_weights, present_key_value = self.self_attn( + hidden_states=hidden_states, + attention_mask=attention_mask, + position_ids=position_ids, + past_key_value=past_key_value, + output_attentions=output_attentions, + use_cache=use_cache, + cache_position=cache_position, + position_embeddings=position_embeddings, + **kwargs, + ) + hidden_states = residual + hidden_states + + # Fully Connected + residual = hidden_states + hidden_states = self.post_attention_layernorm(hidden_states) + hidden_states = self.mlp(hidden_states) + hidden_states = residual + hidden_states + + outputs = (hidden_states,) + + if output_attentions: + outputs += (self_attn_weights,) + + if use_cache: + outputs += (present_key_value,) + + return outputs + + +LLAMA_START_DOCSTRING = r""" + This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the + library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads + etc.) + + This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. + Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage + and behavior. + + Parameters: + config ([`LlamaConfig`]): + Model configuration class with all the parameters of the model. Initializing with a config file does not + load the weights associated with the model, only the configuration. Check out the + [`~PreTrainedModel.from_pretrained`] method to load the model weights. +""" + + +@add_start_docstrings( + "The bare LLaMA Model outputting raw hidden-states without any specific head on top.", + LLAMA_START_DOCSTRING, +) +class LlamaPreTrainedModel(PreTrainedModel): + config_class = LlamaConfig + base_model_prefix = "model" + supports_gradient_checkpointing = True + _no_split_modules = ["LlamaDecoderLayer"] + _skip_keys_device_placement = ["past_key_values"] + _supports_flash_attn_2 = True + _supports_sdpa = True + _supports_cache_class = True + _supports_quantized_cache = True + _supports_static_cache = True + + def _init_weights(self, module): + std = self.config.initializer_range + if isinstance(module, nn.Linear): + module.weight.data.normal_(mean=0.0, std=std) + if module.bias is not None: + module.bias.data.zero_() + elif isinstance(module, nn.Embedding): + module.weight.data.normal_(mean=0.0, std=std) + if module.padding_idx is not None: + module.weight.data[module.padding_idx].zero_() + + +LLAMA_INPUTS_DOCSTRING = r""" + Args: + input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): + Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide + it. + + Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and + [`PreTrainedTokenizer.__call__`] for details. + + [What are input IDs?](../glossary#input-ids) + attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): + Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: + + - 1 for tokens that are **not masked**, + - 0 for tokens that are **masked**. + + [What are attention masks?](../glossary#attention-mask) + + Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and + [`PreTrainedTokenizer.__call__`] for details. + + If `past_key_values` is used, optionally only the last `input_ids` have to be input (see + `past_key_values`). + + If you want to change padding behavior, you should read [`modeling_opt._prepare_decoder_attention_mask`] + and modify to your needs. See diagram 1 in [the paper](https://arxiv.org/abs/1910.13461) for more + information on the default strategy. + + - 1 indicates the head is **not masked**, + - 0 indicates the head is **masked**. + position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): + Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, + config.n_positions - 1]`. + + [What are position IDs?](../glossary#position-ids) + past_key_values (`Cache` or `tuple(tuple(torch.FloatTensor))`, *optional*): + Pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention + blocks) that can be used to speed up sequential decoding. This typically consists in the `past_key_values` + returned by the model at a previous stage of decoding, when `use_cache=True` or `config.use_cache=True`. + + Two formats are allowed: + - a [`~cache_utils.Cache`] instance; + - Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of + shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`). This is also known as the legacy + cache format. + + The model will output the same cache format that is fed as input. If no `past_key_values` are passed, the + legacy cache format will be returned. + + If `past_key_values` are used, the user can optionally input only the last `input_ids` (those that don't + have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `input_ids` + of shape `(batch_size, sequence_length)`. + inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): + Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This + is useful if you want more control over how to convert `input_ids` indices into associated vectors than the + model's internal embedding lookup matrix. + use_cache (`bool`, *optional*): + If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see + `past_key_values`). + output_attentions (`bool`, *optional*): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned + tensors for more detail. + output_hidden_states (`bool`, *optional*): + Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for + more detail. + return_dict (`bool`, *optional*): + Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. + cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*): + Indices depicting the position of the input sequence tokens in the sequence. Contrarily to `position_ids`, + this tensor is not affected by padding. It is used to update the cache in the correct position and to infer + the complete sequence length. +""" + + +@add_start_docstrings( + "The bare LLaMA Model outputting raw hidden-states without any specific head on top.", + LLAMA_START_DOCSTRING, +) +class LlamaModel(LlamaPreTrainedModel): + """ + Transformer decoder consisting of *config.num_hidden_layers* layers. Each layer is a [`LlamaDecoderLayer`] + + Args: + config: LlamaConfig + """ + + def __init__(self, config: LlamaConfig): + super().__init__(config) + self.padding_idx = config.pad_token_id + self.vocab_size = config.vocab_size + + self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx) + self.layers = nn.ModuleList( + [LlamaDecoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)] + ) + self.norm = LlamaRMSNorm(config.hidden_size, eps=config.rms_norm_eps) + self.rotary_emb = LlamaRotaryEmbedding(config=config) + self.gradient_checkpointing = False + + # Initialize weights and apply final processing + self.post_init() + + def get_input_embeddings(self): + return self.embed_tokens + + def set_input_embeddings(self, value): + self.embed_tokens = value + + @add_start_docstrings_to_model_forward(LLAMA_INPUTS_DOCSTRING) + def forward( + self, + input_ids: torch.LongTensor = None, + attention_mask: Optional[torch.Tensor] = None, + position_ids: Optional[torch.LongTensor] = None, + past_key_values: Optional[Union[Cache, List[torch.FloatTensor]]] = None, + inputs_embeds: Optional[torch.FloatTensor] = None, + use_cache: Optional[bool] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + cache_position: Optional[torch.LongTensor] = None, + visual_token_index: Optional[torch.Tensor] = None, + large_model_prune_layer: Optional[float] = None, + large_model_prune_ratio: Optional[float] = None, + large_model_prune_selection: Optional[str] = None, + large_model_similarity_target_coverage: Optional[float] = None, + large_model_similarity_min_gain: Optional[float] = None, + large_model_similarity_min_keep: Optional[int] = None, + large_model_similarity_max_keep_ratio: Optional[float] = None, + visual_token_importance: Optional[torch.Tensor] = None, + ) -> Union[Tuple, BaseModelOutputWithPast]: + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + use_cache = use_cache if use_cache is not None else self.config.use_cache + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + if (input_ids is None) ^ (inputs_embeds is not None): + raise ValueError( + "You cannot specify both input_ids and inputs_embeds at the same time, and must specify either one" + ) + + if self.gradient_checkpointing and self.training and use_cache: + logger.warning_once( + "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`." + ) + use_cache = False + + # retrieve input_ids and inputs_embeds + if input_ids is not None and inputs_embeds is not None: + raise ValueError('You cannot specify both input_ids and inputs_embeds at the same time') + elif input_ids is not None: + batch_size, seq_length = input_ids.shape[:2] + elif inputs_embeds is not None: + batch_size, seq_length = inputs_embeds.shape[:2] + else: + raise ValueError('You have to specify either input_ids or inputs_embeds') + + + + if inputs_embeds is None: + inputs_embeds = self.embed_tokens(input_ids) + + return_legacy_cache = False + if ( + use_cache and not isinstance(past_key_values, Cache) and not self.training + ): # kept for BC (non `Cache` `past_key_values` inputs) + return_legacy_cache = True + past_key_values = DynamicCache.from_legacy_cache(past_key_values) + logger.warning_once( + "We detected that you are passing `past_key_values` as a tuple and this is deprecated and will be removed in v4.43. " + "Please use an appropriate `Cache` class (https://huggingface.co/docs/transformers/v4.41.3/en/internal/generation_utils#transformers.Cache)" + ) + + if cache_position is None: + past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0 + cache_position = torch.arange( + past_seen_tokens, past_seen_tokens + inputs_embeds.shape[1], device=inputs_embeds.device + ) + if position_ids is None: + position_ids = cache_position.unsqueeze(0) + + causal_mask = self._update_causal_mask( + attention_mask, inputs_embeds, cache_position, past_key_values, output_attentions + ) + hidden_states = inputs_embeds + + # create position embeddings to be shared across the decoder layers + position_embeddings = self.rotary_emb(hidden_states, position_ids) + + # decoder layers + all_hidden_states = () if output_hidden_states else None + all_self_attns = () if output_attentions else None + next_decoder_cache = None + + aggregated_viusal_token_attention = 0 if output_attentions else None + for idx, decoder_layer in enumerate(self.layers): + if output_hidden_states: + all_hidden_states += (hidden_states,) + + if self.gradient_checkpointing and self.training: + layer_outputs = self._gradient_checkpointing_func( + decoder_layer.__call__, + hidden_states, + causal_mask, + position_ids, + past_key_values, + output_attentions, + use_cache, + cache_position, + position_embeddings, + ) + else: + + ##### 某一层 random pruning ######### + K = int(len(self.layers) * large_model_prune_layer) + keep_ratio = large_model_prune_ratio + visual_token_length = int(visual_token_index[1] - visual_token_index[0] + 1) + + if hidden_states.shape[1] != 1: + if idx == K: + device = hidden_states.device + selected_visual_index = select_visual_token_indices( + hidden_states, + visual_token_importance, + visual_token_index, + keep_ratio, + large_model_prune_selection or "topk", + similarity_target_coverage=large_model_similarity_target_coverage or 0.9, + similarity_min_gain=large_model_similarity_min_gain or 0.0, + similarity_min_keep=large_model_similarity_min_keep or 1, + similarity_max_keep_ratio=large_model_similarity_max_keep_ratio or 1.0, + ) + int(visual_token_index[0]) + keep_indexs = torch.cat(( + torch.arange(int(visual_token_index[0]), device=device), + selected_visual_index.to(device), + torch.arange(int(visual_token_index[1] + 1), seq_length, device=device), + )) + keep_indexs = keep_indexs.sort().values + hidden_states = hidden_states[:, keep_indexs,:] + position_embeddings = (position_embeddings[0].to(device)[:, keep_indexs, :], position_embeddings[1].to(device)[:, keep_indexs,:], ) + + if causal_mask is not None: + causal_mask = causal_mask[:,:,:hidden_states.shape[1], :hidden_states.shape[1]] + position_ids = keep_indexs.unsqueeze(0) + prunded_sequence_length = visual_token_length - selected_visual_index.numel() + + + else: + if idx == K: + visual_token_length = visual_token_index[1] - visual_token_index[0] + 1 + prunded_sequence_length = visual_token_length - int(visual_token_length * keep_ratio) + if causal_mask is not None: + causal_mask = causal_mask[:, :, :, prunded_sequence_length:] + + + + layer_outputs = decoder_layer( + hidden_states, + attention_mask=causal_mask, + position_ids=position_ids, + past_key_value=past_key_values, + output_attentions=output_attentions, + use_cache=use_cache, + cache_position=cache_position, + position_embeddings=position_embeddings + ) + + hidden_states = layer_outputs[0] + + if use_cache: + next_decoder_cache = layer_outputs[2 if output_attentions else 1] + + if output_attentions: + # all_self_attns += (layer_outputs[1],) + if layer_outputs[1].shape[2] != 1: + aggregated_viusal_token_attention = aggregated_viusal_token_attention + layer_outputs[1][:, :, visual_token_index[1]:, visual_token_index[0]:visual_token_index[1]+1].sum(dim=(0, 1, 2)) + else: + aggregated_viusal_token_attention = aggregated_viusal_token_attention + layer_outputs[1][:, :, :, visual_token_index[0]:visual_token_index[1]+1].sum(dim=(0, 1, 2)) + + + hidden_states = self.norm(hidden_states) + + # add hidden states from the last decoder layer + if output_hidden_states: + all_hidden_states += (hidden_states,) + + next_cache = next_decoder_cache if use_cache else None + if return_legacy_cache: + next_cache = next_cache.to_legacy_cache() + + if not return_dict: + return tuple(v for v in [hidden_states, next_cache, all_hidden_states, all_self_attns] if v is not None) + + out_dict = BaseModelOutputWithPast( + last_hidden_state=hidden_states, + past_key_values=next_cache, + hidden_states=all_hidden_states, + attentions=all_self_attns, + ) + out_dict.aggregated_viusal_token_attention = aggregated_viusal_token_attention + return out_dict + + def _update_causal_mask( + self, + attention_mask: torch.Tensor, + input_tensor: torch.Tensor, + cache_position: torch.Tensor, + past_key_values: Cache, + output_attentions: bool, + ): + # TODO: As of torch==2.2.0, the `attention_mask` passed to the model in `generate` is 2D and of dynamic length even when the static + # KV cache is used. This is an issue for torch.compile which then recaptures cudagraphs at each decode steps due to the dynamic shapes. + # (`recording cudagraph tree for symint key 13`, etc.), which is VERY slow. A workaround is `@torch.compiler.disable`, but this prevents using + # `fullgraph=True`. See more context in https://github.com/huggingface/transformers/pull/29114 + + if self.config._attn_implementation == "flash_attention_2": + if attention_mask is not None and 0.0 in attention_mask: + return attention_mask + return None + + # For SDPA, when possible, we will rely on its `is_causal` argument instead of its `attn_mask` argument, in + # order to dispatch on Flash Attention 2. This feature is not compatible with static cache, as SDPA will fail + # to infer the attention mask. + past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0 + using_static_cache = isinstance(past_key_values, StaticCache) + + # When output attentions is True, sdpa implementation's forward method calls the eager implementation's forward + if self.config._attn_implementation == "sdpa" and not using_static_cache and not output_attentions: + if AttentionMaskConverter._ignore_causal_mask_sdpa( + attention_mask, + inputs_embeds=input_tensor, + past_key_values_length=past_seen_tokens, + is_training=self.training, + ): + return None + + dtype, device = input_tensor.dtype, input_tensor.device + min_dtype = torch.finfo(dtype).min + sequence_length = input_tensor.shape[1] + if using_static_cache: + target_length = past_key_values.get_max_length() + else: + target_length = ( + attention_mask.shape[-1] + if isinstance(attention_mask, torch.Tensor) + else past_seen_tokens + sequence_length + 1 + ) + + # In case the provided `attention` mask is 2D, we generate a causal mask here (4D). + causal_mask = _prepare_4d_causal_attention_mask_with_cache_position( + attention_mask, + sequence_length=sequence_length, + target_length=target_length, + dtype=dtype, + device=device, + min_dtype=min_dtype, + cache_position=cache_position, + batch_size=input_tensor.shape[0], + ) + + if ( + self.config._attn_implementation == "sdpa" + and attention_mask is not None + and attention_mask.device.type == "cuda" + and not output_attentions + ): + # Attend to all tokens in fully masked rows in the causal_mask, for example the relevant first rows when + # using left padding. This is required by F.scaled_dot_product_attention memory-efficient attention path. + # Details: https://github.com/pytorch/pytorch/issues/110213 + causal_mask = AttentionMaskConverter._unmask_unattended(causal_mask, min_dtype) + + return causal_mask + + +class LlamaForCausalLM(LlamaPreTrainedModel): + _tied_weights_keys = ["lm_head.weight"] + + def __init__(self, config): + super().__init__(config) + self.model = LlamaModel(config) + self.vocab_size = config.vocab_size + self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False) + + # Initialize weights and apply final processing + self.post_init() + + def get_input_embeddings(self): + return self.model.embed_tokens + + def set_input_embeddings(self, value): + self.model.embed_tokens = value + + def get_output_embeddings(self): + return self.lm_head + + def set_output_embeddings(self, new_embeddings): + self.lm_head = new_embeddings + + def set_decoder(self, decoder): + self.model = decoder + + def get_decoder(self): + return self.model + + @add_start_docstrings_to_model_forward(LLAMA_INPUTS_DOCSTRING) + @replace_return_docstrings(output_type=CausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC) + def forward( + self, + input_ids: torch.LongTensor = None, + attention_mask: Optional[torch.Tensor] = None, + position_ids: Optional[torch.LongTensor] = None, + past_key_values: Optional[Union[Cache, List[torch.FloatTensor]]] = None, + inputs_embeds: Optional[torch.FloatTensor] = None, + labels: Optional[torch.LongTensor] = None, + use_cache: Optional[bool] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + cache_position: Optional[torch.LongTensor] = None, + visual_token_index: Optional[torch.Tensor] = None, + large_model_prune_layer: Optional[float] = None, + large_model_prune_ratio: Optional[float] = None, + large_model_prune_selection: Optional[str] = None, + large_model_similarity_target_coverage: Optional[float] = None, + large_model_similarity_min_gain: Optional[float] = None, + large_model_similarity_min_keep: Optional[int] = None, + large_model_similarity_max_keep_ratio: Optional[float] = None, + visual_token_importance: Optional[torch.Tensor] = None, + ) -> Union[Tuple, CausalLMOutputWithPast]: + r""" + Args: + labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): + Labels for computing the masked language modeling loss. Indices should either be in `[0, ..., + config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored + (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`. + + Returns: + + Example: + + ```python + >>> from transformers import AutoTokenizer, LlamaForCausalLM + + >>> model = LlamaForCausalLM.from_pretrained("meta-llama/Llama-2-7b-hf") + >>> tokenizer = AutoTokenizer.from_pretrained("meta-llama/Llama-2-7b-hf") + + >>> prompt = "Hey, are you conscious? Can you talk to me?" + >>> inputs = tokenizer(prompt, return_tensors="pt") + + >>> # Generate + >>> generate_ids = model.generate(inputs.input_ids, max_length=30) + >>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0] + "Hey, are you conscious? Can you talk to me?\nI'm not conscious, but I can talk to you." + ```""" + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn) + outputs = self.model( + input_ids=input_ids, + attention_mask=attention_mask, + position_ids=position_ids, + past_key_values=past_key_values, + inputs_embeds=inputs_embeds, + use_cache=use_cache, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + cache_position=cache_position, + visual_token_index=visual_token_index, + large_model_prune_layer=large_model_prune_layer, + large_model_prune_ratio=large_model_prune_ratio, + large_model_prune_selection=large_model_prune_selection, + large_model_similarity_target_coverage=large_model_similarity_target_coverage, + large_model_similarity_min_gain=large_model_similarity_min_gain, + large_model_similarity_min_keep=large_model_similarity_min_keep, + large_model_similarity_max_keep_ratio=large_model_similarity_max_keep_ratio, + visual_token_importance=visual_token_importance + ) + + hidden_states = outputs[0] + if self.config.pretraining_tp > 1: + lm_head_slices = self.lm_head.weight.split(self.vocab_size // self.config.pretraining_tp, dim=0) + logits = [F.linear(hidden_states, lm_head_slices[i]) for i in range(self.config.pretraining_tp)] + logits = torch.cat(logits, dim=-1) + else: + logits = self.lm_head(hidden_states) + logits = logits.float() + + loss = None + if labels is not None: + # Shift so that tokens < n predict n + shift_logits = logits[..., :-1, :].contiguous() + shift_labels = labels[..., 1:].contiguous() + # Flatten the tokens + loss_fct = CrossEntropyLoss() + shift_logits = shift_logits.view(-1, self.config.vocab_size) + shift_labels = shift_labels.view(-1) + # Enable model parallelism + shift_labels = shift_labels.to(shift_logits.device) + loss = loss_fct(shift_logits, shift_labels) + + if not return_dict: + output = (logits,) + outputs[1:] + return (loss,) + output if loss is not None else output + + + output = CausalLMOutputWithPast( + loss=loss, + logits=logits, + past_key_values=outputs.past_key_values, + hidden_states=outputs.hidden_states, + attentions=outputs.attentions, + ) + output['aggregated_viusal_token_attention'] = outputs.aggregated_viusal_token_attention + return output + + def _sample( + self, + input_ids: torch.LongTensor, + logits_processor: LogitsProcessorList, + stopping_criteria: StoppingCriteriaList, + generation_config: GenerationConfig, + synced_gpus: bool, + streamer: Optional["BaseStreamer"], + logits_warper: Optional[LogitsProcessorList], + **model_kwargs, + ) -> Union[GenerateNonBeamOutput, torch.LongTensor]: + # init values + pad_token_id = generation_config._pad_token_tensor + output_attentions = generation_config.output_attentions + output_hidden_states = generation_config.output_hidden_states + output_scores = generation_config.output_scores + output_logits = generation_config.output_logits + return_dict_in_generate = generation_config.return_dict_in_generate + max_length = generation_config.max_length + has_eos_stopping_criteria = any(hasattr(criteria, "eos_token_id") for criteria in stopping_criteria) + do_sample = generation_config.do_sample + if do_sample is True and not isinstance(logits_warper, LogitsProcessorList): + raise ValueError( + "`do_sample` is set to `True`, `logits_warper` must be a `LogitsProcessorList` instance (it is " + f"{logits_warper})." + ) + + # init attention / hidden states / scores tuples + scores = () if (return_dict_in_generate and output_scores) else None + raw_logits = () if (return_dict_in_generate and output_logits) else None + decoder_attentions = () if (return_dict_in_generate and output_attentions) else None + cross_attentions = () if (return_dict_in_generate and output_attentions) else None + decoder_hidden_states = () if (return_dict_in_generate and output_hidden_states) else None + + # if model is an encoder-decoder, retrieve encoder attention weights and hidden states + if return_dict_in_generate and self.config.is_encoder_decoder: + encoder_attentions = model_kwargs["encoder_outputs"].get("attentions") if output_attentions else None + encoder_hidden_states = ( + model_kwargs["encoder_outputs"].get("hidden_states") if output_hidden_states else None + ) + + # keep track of which sequences are already finished + batch_size, cur_len = input_ids.shape + this_peer_finished = False + unfinished_sequences = torch.ones(batch_size, dtype=torch.long, device=input_ids.device) + model_kwargs = self._get_initial_cache_position(input_ids, model_kwargs) + + aggregated_viusal_token_attention = 0 if output_attentions else None + while self._has_unfinished_sequences( + this_peer_finished, synced_gpus, device=input_ids.device, cur_len=cur_len, max_length=max_length + ): + # prepare model inputs + model_inputs = self.prepare_inputs_for_generation(input_ids, **model_kwargs) + + # prepare variable output controls (note: some models won't accept all output controls) + model_inputs.update({"output_attentions": output_attentions} if output_attentions else {}) + model_inputs.update({"output_hidden_states": output_hidden_states} if output_hidden_states else {}) + + # forward pass to get next token + outputs = self(**model_inputs, return_dict=True) + if output_attentions: + aggregated_viusal_token_attention = aggregated_viusal_token_attention + outputs['aggregated_viusal_token_attention'] + + if synced_gpus and this_peer_finished: + continue # don't waste resources running the code we don't need + + # Clone is needed to avoid keeping a hanging ref to outputs.logits which may be very large for first iteration + # (the clone itself is always small) + next_token_logits = outputs.logits[:, -1, :].clone() + + # pre-process distribution + next_token_scores = logits_processor(input_ids, next_token_logits) + if do_sample: + next_token_scores = logits_warper(input_ids, next_token_scores) + + # Store scores, attentions and hidden_states when required + if return_dict_in_generate: + if output_scores: + scores += (next_token_scores,) + if output_logits: + raw_logits += (next_token_logits,) + if output_attentions: + decoder_attentions += ( + (outputs.decoder_attentions,) if self.config.is_encoder_decoder else (outputs.attentions,) + ) + if self.config.is_encoder_decoder: + cross_attentions += (outputs.cross_attentions,) + + if output_hidden_states: + decoder_hidden_states += ( + (outputs.decoder_hidden_states,) + if self.config.is_encoder_decoder + else (outputs.hidden_states,) + ) + + # token selection + if do_sample: + probs = nn.functional.softmax(next_token_scores, dim=-1) + # TODO (joao): this OP throws "skipping cudagraphs due to ['incompatible ops']", find solution + next_tokens = torch.multinomial(probs, num_samples=1).squeeze(1) + else: + next_tokens = torch.argmax(next_token_scores, dim=-1) + + # finished sentences should have their next token be a padding token + if has_eos_stopping_criteria: + next_tokens = next_tokens * unfinished_sequences + pad_token_id * (1 - unfinished_sequences) + + # update generated ids, model inputs, and length for next step + input_ids = torch.cat([input_ids, next_tokens[:, None]], dim=-1) + if streamer is not None: + streamer.put(next_tokens.cpu()) + model_kwargs = self._update_model_kwargs_for_generation( + outputs, + model_kwargs, + is_encoder_decoder=self.config.is_encoder_decoder, + ) + + unfinished_sequences = unfinished_sequences & ~stopping_criteria(input_ids, scores) + this_peer_finished = unfinished_sequences.max() == 0 + cur_len += 1 + + # This is needed to properly delete outputs.logits which may be very large for first iteration + # Otherwise a reference to outputs is kept which keeps the logits alive in the next iteration + del outputs + + if streamer is not None: + streamer.end() + + if return_dict_in_generate: + if self.config.is_encoder_decoder: + return GenerateEncoderDecoderOutput( + sequences=input_ids, + scores=scores, + logits=raw_logits, + encoder_attentions=encoder_attentions, + encoder_hidden_states=encoder_hidden_states, + decoder_attentions=decoder_attentions, + cross_attentions=cross_attentions, + decoder_hidden_states=decoder_hidden_states, + past_key_values=model_kwargs.get("past_key_values"), + ) + else: + out_dict = GenerateDecoderOnlyOutput( + sequences=input_ids, + scores=scores, + logits=raw_logits, + attentions=decoder_attentions, + hidden_states=decoder_hidden_states, + past_key_values=model_kwargs.get("past_key_values"), + ) + out_dict["aggregated_viusal_token_attention"] = aggregated_viusal_token_attention + return out_dict + else: + return input_ids + + + def prepare_inputs_for_generation( + self, + input_ids, + past_key_values=None, + attention_mask=None, + inputs_embeds=None, + cache_position=None, + position_ids=None, + use_cache=True, + **kwargs, + ): + # If we have cache: let's slice `input_ids` through `cache_position`, to keep only the unprocessed tokens + # Exception 1: when passing input_embeds, input_ids may be missing entries + # Exception 2: some generation methods do special slicing of input_ids, so we don't need to do it here + if past_key_values is not None: + if inputs_embeds is not None: # Exception 1 + input_ids = input_ids[:, -cache_position.shape[0] :] + elif input_ids.shape[1] != cache_position.shape[0]: # Default case (the "else", a no op, is Exception 2) + input_ids = input_ids[:, cache_position] + + if attention_mask is not None and position_ids is None: + # create position_ids on the fly for batch generation + position_ids = attention_mask.long().cumsum(-1) - 1 + position_ids.masked_fill_(attention_mask == 0, 1) + if past_key_values: + position_ids = position_ids[:, -input_ids.shape[1] :] + + # This `clone` call is needed to avoid recapturing cuda graphs with `torch.compile`'s `mode="reduce-overhead`, as otherwise the input `position_ids` would have various stride during the decoding. Here, simply using `.contiguous()` is not sufficient as in the batch size = 1 case, `position_ids` is already contiguous but with varying stride which retriggers a capture. + position_ids = position_ids.clone(memory_format=torch.contiguous_format) + + # if `inputs_embeds` are passed, we only want to use them in the 1st generation step + if inputs_embeds is not None and cache_position[0] == 0: + model_inputs = {"inputs_embeds": inputs_embeds, "input_ids": None} + else: + # The clone here is for the same reason as for `position_ids`. + model_inputs = {"input_ids": input_ids.clone(memory_format=torch.contiguous_format), "inputs_embeds": None} + + if isinstance(past_key_values, StaticCache) and attention_mask.ndim == 2: + if model_inputs["inputs_embeds"] is not None: + batch_size, sequence_length, _ = model_inputs["inputs_embeds"].shape + device = model_inputs["inputs_embeds"].device + else: + batch_size, sequence_length = model_inputs["input_ids"].shape + device = model_inputs["input_ids"].device + + dtype = self.lm_head.weight.dtype + min_dtype = torch.finfo(dtype).min + + attention_mask = _prepare_4d_causal_attention_mask_with_cache_position( + attention_mask, + sequence_length=sequence_length, + target_length=past_key_values.get_max_length(), + dtype=dtype, + device=device, + min_dtype=min_dtype, + cache_position=cache_position, + batch_size=batch_size, + ) + + model_inputs.update( + { + "position_ids": position_ids, + "cache_position": cache_position, + "past_key_values": past_key_values, + "use_cache": use_cache, + "attention_mask": attention_mask, + 'visual_token_index': kwargs.get('visual_token_index'), + 'large_model_prune_layer': kwargs.get('large_model_prune_layer'), + 'large_model_prune_ratio': kwargs.get('large_model_prune_ratio'), + 'large_model_prune_selection': kwargs.get('large_model_prune_selection'), + 'large_model_similarity_target_coverage': kwargs.get('large_model_similarity_target_coverage'), + 'large_model_similarity_min_gain': kwargs.get('large_model_similarity_min_gain'), + 'large_model_similarity_min_keep': kwargs.get('large_model_similarity_min_keep'), + 'large_model_similarity_max_keep_ratio': kwargs.get('large_model_similarity_max_keep_ratio'), + 'visual_token_importance': kwargs.get('visual_token_importance') + } + ) + return model_inputs + + +@add_start_docstrings( + """ + The LLaMa Model transformer with a sequence classification head on top (linear layer). + + [`LlamaForSequenceClassification`] uses the last token in order to do the classification, as other causal models + (e.g. GPT-2) do. + + Since it does classification on the last token, it requires to know the position of the last token. If a + `pad_token_id` is defined in the configuration, it finds the last token that is not a padding token in each row. If + no `pad_token_id` is defined, it simply takes the last value in each row of the batch. Since it cannot guess the + padding tokens when `inputs_embeds` are passed instead of `input_ids`, it does the same (take the last value in + each row of the batch). + """, + LLAMA_START_DOCSTRING, +) +class LlamaForSequenceClassification(LlamaPreTrainedModel): + def __init__(self, config): + super().__init__(config) + self.num_labels = config.num_labels + self.model = LlamaModel(config) + self.score = nn.Linear(config.hidden_size, self.num_labels, bias=False) + + # Initialize weights and apply final processing + self.post_init() + + def get_input_embeddings(self): + return self.model.embed_tokens + + def set_input_embeddings(self, value): + self.model.embed_tokens = value + + @add_start_docstrings_to_model_forward(LLAMA_INPUTS_DOCSTRING) + def forward( + self, + input_ids: Optional[torch.LongTensor] = None, + attention_mask: Optional[torch.Tensor] = None, + position_ids: Optional[torch.LongTensor] = None, + past_key_values: Optional[Union[Cache, List[torch.FloatTensor]]] = None, + inputs_embeds: Optional[torch.FloatTensor] = None, + labels: Optional[torch.LongTensor] = None, + use_cache: Optional[bool] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple, SequenceClassifierOutputWithPast]: + r""" + labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): + Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., + config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If + `config.num_labels > 1` a classification loss is computed (Cross-Entropy). + """ + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + transformer_outputs = self.model( + input_ids, + attention_mask=attention_mask, + position_ids=position_ids, + past_key_values=past_key_values, + inputs_embeds=inputs_embeds, + use_cache=use_cache, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + hidden_states = transformer_outputs[0] + logits = self.score(hidden_states) + + if input_ids is not None: + batch_size = input_ids.shape[0] + else: + batch_size = inputs_embeds.shape[0] + + if self.config.pad_token_id is None and batch_size != 1: + raise ValueError("Cannot handle batch sizes > 1 if no padding token is defined.") + if self.config.pad_token_id is None: + sequence_lengths = -1 + else: + if input_ids is not None: + # if no pad token found, use modulo instead of reverse indexing for ONNX compatibility + sequence_lengths = torch.eq(input_ids, self.config.pad_token_id).int().argmax(-1) - 1 + sequence_lengths = sequence_lengths % input_ids.shape[-1] + sequence_lengths = sequence_lengths.to(logits.device) + else: + sequence_lengths = -1 + + pooled_logits = logits[torch.arange(batch_size, device=logits.device), sequence_lengths] + + loss = None + if labels is not None: + labels = labels.to(logits.device) + if self.config.problem_type is None: + if self.num_labels == 1: + self.config.problem_type = "regression" + elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): + self.config.problem_type = "single_label_classification" + else: + self.config.problem_type = "multi_label_classification" + + if self.config.problem_type == "regression": + loss_fct = MSELoss() + if self.num_labels == 1: + loss = loss_fct(pooled_logits.squeeze(), labels.squeeze()) + else: + loss = loss_fct(pooled_logits, labels) + elif self.config.problem_type == "single_label_classification": + loss_fct = CrossEntropyLoss() + loss = loss_fct(pooled_logits.view(-1, self.num_labels), labels.view(-1)) + elif self.config.problem_type == "multi_label_classification": + loss_fct = BCEWithLogitsLoss() + loss = loss_fct(pooled_logits, labels) + if not return_dict: + output = (pooled_logits,) + transformer_outputs[1:] + return ((loss,) + output) if loss is not None else output + + return SequenceClassifierOutputWithPast( + loss=loss, + logits=pooled_logits, + past_key_values=transformer_outputs.past_key_values, + hidden_states=transformer_outputs.hidden_states, + attentions=transformer_outputs.attentions, + ) + + +@add_start_docstrings( + """ +The Llama Model transformer with a span classification head on top for extractive question-answering tasks like +SQuAD (a linear layer on top of the hidden-states output to compute `span start logits` and `span end logits`). + """, + LLAMA_START_DOCSTRING, +) +class LlamaForQuestionAnswering(LlamaPreTrainedModel): + base_model_prefix = "transformer" + + # Copied from transformers.models.bloom.modeling_bloom.BloomForQuestionAnswering.__init__ with Bloom->Llama + def __init__(self, config): + super().__init__(config) + self.transformer = LlamaModel(config) + self.qa_outputs = nn.Linear(config.hidden_size, 2) + + # Initialize weights and apply final processing + self.post_init() + + def get_input_embeddings(self): + return self.transformer.embed_tokens + + def set_input_embeddings(self, value): + self.transformer.embed_tokens = value + + @add_start_docstrings_to_model_forward(LLAMA_INPUTS_DOCSTRING) + def forward( + self, + input_ids: Optional[torch.LongTensor] = None, + attention_mask: Optional[torch.FloatTensor] = None, + position_ids: Optional[torch.LongTensor] = None, + past_key_values: Optional[Union[Cache, List[torch.FloatTensor]]] = None, + inputs_embeds: Optional[torch.FloatTensor] = None, + start_positions: Optional[torch.LongTensor] = None, + end_positions: Optional[torch.LongTensor] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple, QuestionAnsweringModelOutput]: + r""" + start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*): + Labels for position (index) of the start of the labelled span for computing the token classification loss. + Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence + are not taken into account for computing the loss. + end_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*): + Labels for position (index) of the end of the labelled span for computing the token classification loss. + Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence + are not taken into account for computing the loss. + """ + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + outputs = self.transformer( + input_ids, + attention_mask=attention_mask, + position_ids=position_ids, + past_key_values=past_key_values, + inputs_embeds=inputs_embeds, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + + sequence_output = outputs[0] + + logits = self.qa_outputs(sequence_output) + start_logits, end_logits = logits.split(1, dim=-1) + start_logits = start_logits.squeeze(-1).contiguous() + end_logits = end_logits.squeeze(-1).contiguous() + + total_loss = None + if start_positions is not None and end_positions is not None: + # If we are on multi-GPU, split add a dimension + if len(start_positions.size()) > 1: + start_positions = start_positions.squeeze(-1).to(start_logits.device) + if len(end_positions.size()) > 1: + end_positions = end_positions.squeeze(-1).to(end_logits.device) + # sometimes the start/end positions are outside our model inputs, we ignore these terms + ignored_index = start_logits.size(1) + start_positions = start_positions.clamp(0, ignored_index) + end_positions = end_positions.clamp(0, ignored_index) + + loss_fct = CrossEntropyLoss(ignore_index=ignored_index) + start_loss = loss_fct(start_logits, start_positions) + end_loss = loss_fct(end_logits, end_positions) + total_loss = (start_loss + end_loss) / 2 + + if not return_dict: + output = (start_logits, end_logits) + outputs[2:] + return ((total_loss,) + output) if total_loss is not None else output + + return QuestionAnsweringModelOutput( + loss=total_loss, + start_logits=start_logits, + end_logits=end_logits, + hidden_states=outputs.hidden_states, + attentions=outputs.attentions, + ) + + +@add_start_docstrings( + """ + The Llama Model transformer with a token classification head on top (a linear layer on top of the hidden-states + output) e.g. for Named-Entity-Recognition (NER) tasks. + """, + LLAMA_START_DOCSTRING, +) +class LlamaForTokenClassification(LlamaPreTrainedModel): + def __init__(self, config): + super().__init__(config) + self.num_labels = config.num_labels + self.model = LlamaModel(config) + if getattr(config, "classifier_dropout", None) is not None: + classifier_dropout = config.classifier_dropout + elif getattr(config, "hidden_dropout", None) is not None: + classifier_dropout = config.hidden_dropout + else: + classifier_dropout = 0.1 + self.dropout = nn.Dropout(classifier_dropout) + self.score = nn.Linear(config.hidden_size, config.num_labels) + + # Initialize weights and apply final processing + self.post_init() + + def get_input_embeddings(self): + return self.model.embed_tokens + + def set_input_embeddings(self, value): + self.model.embed_tokens = value + + @add_start_docstrings_to_model_forward(LLAMA_INPUTS_DOCSTRING) + def forward( + self, + input_ids: Optional[torch.LongTensor] = None, + attention_mask: Optional[torch.Tensor] = None, + position_ids: Optional[torch.LongTensor] = None, + past_key_values: Optional[List[torch.FloatTensor]] = None, + inputs_embeds: Optional[torch.FloatTensor] = None, + labels: Optional[torch.LongTensor] = None, + use_cache: Optional[bool] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple, TokenClassifierOutput]: + r""" + labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): + Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., + config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If + `config.num_labels > 1` a classification loss is computed (Cross-Entropy). + """ + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + outputs = self.model( + input_ids, + attention_mask=attention_mask, + position_ids=position_ids, + past_key_values=past_key_values, + inputs_embeds=inputs_embeds, + use_cache=use_cache, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + sequence_output = outputs[0] + sequence_output = self.dropout(sequence_output) + logits = self.score(sequence_output) + + loss = None + if labels is not None: + loss_fct = CrossEntropyLoss() + loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) + + if not return_dict: + output = (logits,) + outputs[2:] + return ((loss,) + output) if loss is not None else output + + return TokenClassifierOutput( + loss=loss, + logits=logits, + hidden_states=outputs.hidden_states, + attentions=outputs.attentions, + ) diff --git a/isolated/sim_greedy/upstream_sgl/internvl/model/llama/tokenization_llama.py b/isolated/sim_greedy/upstream_sgl/internvl/model/llama/tokenization_llama.py new file mode 100644 index 0000000000000000000000000000000000000000..e5a31446e0004b1a59e0af248d66c6e7de77395f --- /dev/null +++ b/isolated/sim_greedy/upstream_sgl/internvl/model/llama/tokenization_llama.py @@ -0,0 +1,412 @@ +# coding=utf-8 +# Copyright 2022 EleutherAI and the HuggingFace Inc. team. All rights reserved. +# +# This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX +# and OPT implementations in this library. It has been modified from its +# original forms to accommodate minor architectural differences compared +# to GPT-NeoX and OPT used by the Meta AI team that trained the model. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Tokenization classes for LLaMA.""" + +import os +from shutil import copyfile +from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple + +import sentencepiece as spm + +from transformers.convert_slow_tokenizer import import_protobuf +from transformers.tokenization_utils import AddedToken, PreTrainedTokenizer +from transformers.tils import logging + + +if TYPE_CHECKING: + from transformers.tokenization_utils_base import TextInput + +logger = logging.get_logger(__name__) + +VOCAB_FILES_NAMES = {"vocab_file": "tokenizer.model"} + +SPIECE_UNDERLINE = "▁" + +B_INST, E_INST = "[INST]", "[/INST]" +B_SYS, E_SYS = "<>\n", "\n<>\n\n" + +# fmt: off +DEFAULT_SYSTEM_PROMPT = """You are a helpful, respectful and honest assistant. Always answer as helpfully as possible, while being safe. Your \ +answers should not include any harmful, unethical, racist, sexist, toxic, dangerous, or illegal content. Please ensure\ + that your responses are socially unbiased and positive in nature. + +If a question does not make any sense, or is not factually coherent, explain why instead of answering something not \ +correct. If you don't know the answer to a question, please don't share false information.""" +# fmt: on + + +class LlamaTokenizer(PreTrainedTokenizer): + """ + Construct a Llama tokenizer. Based on byte-level Byte-Pair-Encoding. The default padding token is unset as there is + no padding token in the original model. + + Args: + vocab_file (`str`): + Path to the vocabulary file. + unk_token (`str` or `tokenizers.AddedToken`, *optional*, defaults to `""`): + The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this + token instead. + bos_token (`str` or `tokenizers.AddedToken`, *optional*, defaults to `""`): + The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token. + eos_token (`str` or `tokenizers.AddedToken`, *optional*, defaults to `""`): + The end of sequence token. + pad_token (`str` or `tokenizers.AddedToken`, *optional*): + A special token used to make arrays of tokens the same size for batching purpose. Will then be ignored by + attention mechanisms or loss computation. + sp_model_kwargs (`Dict[str, Any]`, `Optional`, *optional*): + Will be passed to the `SentencePieceProcessor.__init__()` method. The [Python wrapper for + SentencePiece](https://github.com/google/sentencepiece/tree/master/python) can be used, among other things, + to set: + + - `enable_sampling`: Enable subword regularization. + - `nbest_size`: Sampling parameters for unigram. Invalid for BPE-Dropout. + + - `nbest_size = {0,1}`: No sampling is performed. + - `nbest_size > 1`: samples from the nbest_size results. + - `nbest_size < 0`: assuming that nbest_size is infinite and samples from the all hypothesis (lattice) + using forward-filtering-and-backward-sampling algorithm. + + - `alpha`: Smoothing parameter for unigram sampling, and dropout probability of merge operations for + BPE-dropout. + + add_bos_token (`bool`, *optional*, defaults to `True`): + Whether or not to add an `bos_token` at the start of sequences. + add_eos_token (`bool`, *optional*, defaults to `False`): + Whether or not to add an `eos_token` at the end of sequences. + clean_up_tokenization_spaces (`bool`, *optional*, defaults to `False`): + Whether or not to cleanup spaces after decoding, cleanup consists in removing potential artifacts like + extra spaces. + use_default_system_prompt (`bool`, *optional*, defaults to `False`): + Whether or not the default system prompt for Llama should be used. + spaces_between_special_tokens (`bool`, *optional*, defaults to `False`): + Whether or not to add spaces between special tokens. + legacy (`bool`, *optional*): + Whether or not the `legacy` behavior of the tokenizer should be used. Legacy is before the merge of #24622 + and #25224 which includes fixes to properly handle tokens that appear after special tokens. + Make sure to also set `from_slow` to `True`. + A simple example: + + - `legacy=True`: + ```python + >>> from transformers import LlamaTokenizerFast + + >>> tokenizer = LlamaTokenizerFast.from_pretrained("huggyllama/llama-7b", legacy=True, from_slow=True) + >>> tokenizer.encode("Hello .") # 869 is '▁.' + [1, 15043, 29871, 1, 869] + ``` + - `legacy=False`: + ```python + >>> from transformers import LlamaTokenizerFast + + >>> tokenizer = LlamaTokenizerFast.from_pretrained("huggyllama/llama-7b", legacy=False, from_slow=True) + >>> tokenizer.encode("Hello .") # 29889 is '.' + [1, 15043, 29871, 1, 29889] + ``` + Checkout the [pull request](https://github.com/huggingface/transformers/pull/24565) for more details. + add_prefix_space (`bool`, *optional*, defaults to `True`): + Whether or not to add an initial space to the input. This allows to treat the leading word just as any + other word. Again, this should be set with `from_slow=True` to make sure it's taken into account. + """ + + vocab_files_names = VOCAB_FILES_NAMES + model_input_names = ["input_ids", "attention_mask"] + + def __init__( + self, + vocab_file, + unk_token="", + bos_token="", + eos_token="", + pad_token=None, + sp_model_kwargs: Optional[Dict[str, Any]] = None, + add_bos_token=True, + add_eos_token=False, + clean_up_tokenization_spaces=False, + use_default_system_prompt=False, + spaces_between_special_tokens=False, + legacy=None, + add_prefix_space=True, + **kwargs, + ): + self.sp_model_kwargs = {} if sp_model_kwargs is None else sp_model_kwargs + bos_token = AddedToken(bos_token, normalized=False, special=True) if isinstance(bos_token, str) else bos_token + eos_token = AddedToken(eos_token, normalized=False, special=True) if isinstance(eos_token, str) else eos_token + unk_token = AddedToken(unk_token, normalized=False, special=True) if isinstance(unk_token, str) else unk_token + pad_token = AddedToken(pad_token, normalized=False, special=True) if isinstance(pad_token, str) else pad_token + + if legacy is None: + logger.warning_once( + f"You are using the default legacy behaviour of the {self.__class__}. This is" + " expected, and simply means that the `legacy` (previous) behavior will be used so nothing changes for you." + " If you want to use the new behaviour, set `legacy=False`. This should only be set if you understand what it" + " means, and thoroughly read the reason why this was added as explained in" + " https://github.com/huggingface/transformers/pull/24565 - if you loaded a llama tokenizer from a GGUF file" + " you can ignore this message" + ) + legacy = True + + self.legacy = legacy + self.vocab_file = vocab_file + self.add_bos_token = add_bos_token + self.add_eos_token = add_eos_token + self.use_default_system_prompt = use_default_system_prompt + self.sp_model = self.get_spm_processor(kwargs.pop("from_slow", False)) + self.add_prefix_space = add_prefix_space + + super().__init__( + bos_token=bos_token, + eos_token=eos_token, + unk_token=unk_token, + pad_token=pad_token, + add_bos_token=add_bos_token, + add_eos_token=add_eos_token, + sp_model_kwargs=self.sp_model_kwargs, + clean_up_tokenization_spaces=clean_up_tokenization_spaces, + use_default_system_prompt=use_default_system_prompt, + spaces_between_special_tokens=spaces_between_special_tokens, + legacy=legacy, + add_prefix_space=add_prefix_space, + **kwargs, + ) + + @property + def unk_token_length(self): + return len(self.sp_model.encode(str(self.unk_token))) + + # Copied from transformers.models.t5.tokenization_t5.T5Tokenizer.get_spm_processor + def get_spm_processor(self, from_slow=False): + tokenizer = spm.SentencePieceProcessor(**self.sp_model_kwargs) + if self.legacy or from_slow: # no dependency on protobuf + tokenizer.Load(self.vocab_file) + return tokenizer + + with open(self.vocab_file, "rb") as f: + sp_model = f.read() + model_pb2 = import_protobuf(f"The new behaviour of {self.__class__.__name__} (with `self.legacy = False`)") + model = model_pb2.ModelProto.FromString(sp_model) + normalizer_spec = model_pb2.NormalizerSpec() + normalizer_spec.add_dummy_prefix = False + model.normalizer_spec.MergeFrom(normalizer_spec) + sp_model = model.SerializeToString() + tokenizer.LoadFromSerializedProto(sp_model) + return tokenizer + + def __getstate__(self): + state = self.__dict__.copy() + state["sp_model"] = None + state["sp_model_proto"] = self.sp_model.serialized_model_proto() + return state + + def __setstate__(self, d): + self.__dict__ = d + self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs) + self.sp_model.LoadFromSerializedProto(self.sp_model_proto) + + @property + def vocab_size(self): + """Returns vocab size""" + return self.sp_model.get_piece_size() + + def get_vocab(self): + """Returns vocab as a dict""" + vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size)} + vocab.update(self.added_tokens_encoder) + return vocab + + # Copied from transformers.models.t5.tokenization_t5.T5Tokenizer.tokenize + def tokenize(self, text: "TextInput", **kwargs) -> List[str]: + """ + Converts a string to a list of tokens. If `self.legacy` is set to `False`, a prefix token is added unless the + first token is special. + """ + if self.legacy or len(text) == 0: + return super().tokenize(text, **kwargs) + + text = text.replace(SPIECE_UNDERLINE, " ") + if self.add_prefix_space: + text = SPIECE_UNDERLINE + text + + tokens = super().tokenize(text, **kwargs) + + if len(tokens) > 1 and tokens[0] == SPIECE_UNDERLINE and tokens[1] in self.all_special_tokens: + tokens = tokens[1:] + return tokens + + # Copied from transformers.models.t5.tokenization_t5.T5Tokenizer._tokenize + def _tokenize(self, text, **kwargs): + """ + Returns a tokenized string. + + We de-activated the `add_dummy_prefix` option, thus the sentencepiece internals will always strip any + SPIECE_UNDERLINE. For example: `self.sp_model.encode(f"{SPIECE_UNDERLINE}Hey", out_type = str)` will give + `['H', 'e', 'y']` instead of `['▁He', 'y']`. Thus we always encode `f"{unk_token}text"` and strip the + `unk_token`. Here is an example with `unk_token = ""` and `unk_token_length = 4`. + `self.tokenizer.sp_model.encode(" Hey", out_type = str)[4:]`. + """ + if self.legacy or not text.startswith((SPIECE_UNDERLINE, " ")): + return self.sp_model.encode(text, out_type=str) + + # 1. Encode string + prefix ex: " Hey" + tokens = self.sp_model.encode(self.unk_token + text, out_type=str) + # 2. Remove self.unk_token from ['<','unk','>', '▁Hey'] + return tokens[self.unk_token_length :] if len(tokens) >= self.unk_token_length else tokens + + def _convert_token_to_id(self, token): + """Converts a token (str) in an id using the vocab.""" + return self.sp_model.piece_to_id(token) + + def _convert_id_to_token(self, index): + """Converts an index (integer) in a token (str) using the vocab.""" + token = self.sp_model.IdToPiece(index) + return token + + def convert_tokens_to_string(self, tokens): + """Converts a sequence of tokens (string) in a single string.""" + # since we manually add the prefix space, we have to remove it when decoding + if tokens[0].startswith(SPIECE_UNDERLINE) and self.add_prefix_space: + tokens[0] = tokens[0][1:] + + current_sub_tokens = [] + out_string = "" + prev_is_special = False + for i, token in enumerate(tokens): + # make sure that special tokens are not decoded using sentencepiece model + if token in self.all_special_tokens: + if not prev_is_special and i != 0 and self.legacy: + out_string += " " + out_string += self.sp_model.decode(current_sub_tokens) + token + prev_is_special = True + current_sub_tokens = [] + else: + if prev_is_special and i == 1 and self.add_prefix_space and not token.startswith(SPIECE_UNDERLINE): + out_string += " " + current_sub_tokens.append(token) + prev_is_special = False + out_string += self.sp_model.decode(current_sub_tokens) + return out_string + + def save_vocabulary(self, save_directory, filename_prefix: Optional[str] = None) -> Tuple[str]: + """ + Save the vocabulary and special tokens file to a directory. + + Args: + save_directory (`str`): + The directory in which to save the vocabulary. + + Returns: + `Tuple(str)`: Paths to the files saved. + """ + if not os.path.isdir(save_directory): + logger.error(f"Vocabulary path ({save_directory}) should be a directory") + return + out_vocab_file = os.path.join( + save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] + ) + + if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file) and os.path.isfile(self.vocab_file): + copyfile(self.vocab_file, out_vocab_file) + elif not os.path.isfile(self.vocab_file): + with open(out_vocab_file, "wb") as fi: + content_spiece_model = self.sp_model.serialized_model_proto() + fi.write(content_spiece_model) + + return (out_vocab_file,) + + def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None): + bos_token_id = [self.bos_token_id] if self.add_bos_token else [] + eos_token_id = [self.eos_token_id] if self.add_eos_token else [] + + output = bos_token_id + token_ids_0 + eos_token_id + + if token_ids_1 is not None: + output = output + bos_token_id + token_ids_1 + eos_token_id + + return output + + def get_special_tokens_mask( + self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False + ) -> List[int]: + """ + Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding + special tokens using the tokenizer `prepare_for_model` method. + + Args: + token_ids_0 (`List[int]`): + List of IDs. + token_ids_1 (`List[int]`, *optional*): + Optional second list of IDs for sequence pairs. + already_has_special_tokens (`bool`, *optional*, defaults to `False`): + Whether or not the token list is already formatted with special tokens for the model. + + Returns: + `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token. + """ + if already_has_special_tokens: + return super().get_special_tokens_mask( + token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True + ) + + bos_token_id = [1] if self.add_bos_token else [] + eos_token_id = [1] if self.add_eos_token else [] + + if token_ids_1 is None: + return bos_token_id + ([0] * len(token_ids_0)) + eos_token_id + return ( + bos_token_id + + ([0] * len(token_ids_0)) + + eos_token_id + + bos_token_id + + ([0] * len(token_ids_1)) + + eos_token_id + ) + + def create_token_type_ids_from_sequences( + self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None + ) -> List[int]: + """ + Creates a mask from the two sequences passed to be used in a sequence-pair classification task. An ALBERT + sequence pair mask has the following format: + + ``` + 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 + | first sequence | second sequence | + ``` + + if token_ids_1 is None, only returns the first portion of the mask (0s). + + Args: + token_ids_0 (`List[int]`): + List of ids. + token_ids_1 (`List[int]`, *optional*): + Optional second list of IDs for sequence pairs. + + Returns: + `List[int]`: List of [token type IDs](../glossary#token-type-ids) according to the given sequence(s). + """ + bos_token_id = [self.bos_token_id] if self.add_bos_token else [] + eos_token_id = [self.eos_token_id] if self.add_eos_token else [] + + output = [0] * len(bos_token_id + token_ids_0 + eos_token_id) + + if token_ids_1 is not None: + output += [1] * len(bos_token_id + token_ids_1 + eos_token_id) + + return output diff --git a/isolated/sim_greedy/upstream_sgl/internvl/model/llama/tokenization_llama_fast.py b/isolated/sim_greedy/upstream_sgl/internvl/model/llama/tokenization_llama_fast.py new file mode 100644 index 0000000000000000000000000000000000000000..8576cbef40baa9c36f4607ca4ec3f5c0abab4950 --- /dev/null +++ b/isolated/sim_greedy/upstream_sgl/internvl/model/llama/tokenization_llama_fast.py @@ -0,0 +1,255 @@ +# coding=utf-8 +# Copyright 2020 The HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import os +from shutil import copyfile +from typing import Optional, Tuple + +from tokenizers import processors + +from transformers.tokenization_utils_fast import PreTrainedTokenizerFast +from transformers.utils import is_sentencepiece_available, logging +from transformers.utils.versions import require_version + + +require_version("tokenizers>=0.13.3") + +if is_sentencepiece_available(): + from .tokenization_llama import LlamaTokenizer +else: + LlamaTokenizer = None + +logger = logging.get_logger(__name__) +VOCAB_FILES_NAMES = {"vocab_file": "tokenizer.model", "tokenizer_file": "tokenizer.json"} + +B_INST, E_INST = "[INST]", "[/INST]" +B_SYS, E_SYS = "<>\n", "\n<>\n\n" + +# fmt: off +DEFAULT_SYSTEM_PROMPT = """You are a helpful, respectful and honest assistant. Always answer as helpfully as possible, while being safe. Your \ +answers should not include any harmful, unethical, racist, sexist, toxic, dangerous, or illegal content. Please ensure\ + that your responses are socially unbiased and positive in nature. + +If a question does not make any sense, or is not factually coherent, explain why instead of answering something not \ +correct. If you don't know the answer to a question, please don't share false information.""" +# fmt: on + + +class LlamaTokenizerFast(PreTrainedTokenizerFast): + """ + Construct a Llama tokenizer. Based on byte-level Byte-Pair-Encoding. + + This uses notably ByteFallback and no normalization. + + ```python + >>> from transformers import LlamaTokenizerFast + + >>> tokenizer = LlamaTokenizerFast.from_pretrained("hf-internal-testing/llama-tokenizer") + >>> tokenizer.encode("Hello this is a test") + [1, 15043, 445, 338, 263, 1243] + ``` + + If you want to change the `bos_token` or the `eos_token`, make sure to specify them when initializing the model, or + call `tokenizer.update_post_processor()` to make sure that the post-processing is correctly done (otherwise the + values of the first token and final token of an encoded sequence will not be correct). For more details, checkout + [post-processors] (https://huggingface.co/docs/tokenizers/api/post-processors) documentation. + + + This tokenizer inherits from [`PreTrainedTokenizerFast`] which contains most of the main methods. Users should + refer to this superclass for more information regarding those methods. + + Args: + vocab_file (`str`, *optional*): + [SentencePiece](https://github.com/google/sentencepiece) file (generally has a .model extension) that + contains the vocabulary necessary to instantiate a tokenizer. + tokenizer_file (`str`, *optional*): + [tokenizers](https://github.com/huggingface/tokenizers) file (generally has a .json extension) that + contains everything needed to load the tokenizer. + clean_up_tokenization_spaces (`bool`, *optional*, defaults to `False`): + Whether or not to cleanup spaces after decoding, cleanup consists in removing potential artifacts like + extra spaces. + unk_token (`str` or `tokenizers.AddedToken`, *optional*, defaults to `""`): + The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this + token instead. + bos_token (`str` or `tokenizers.AddedToken`, *optional*, defaults to `""`): + The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token. + eos_token (`str` or `tokenizers.AddedToken`, *optional*, defaults to `""`): + The end of sequence token. + add_bos_token (`bool`, *optional*, defaults to `True`): + Whether or not to add an `bos_token` at the start of sequences. + add_eos_token (`bool`, *optional*, defaults to `False`): + Whether or not to add an `eos_token` at the end of sequences. + use_default_system_prompt (`bool`, *optional*, defaults to `False`): + Whether or not the default system prompt for Llama should be used + legacy (`bool`, *optional*): + Whether or not the `legacy` behavior of the tokenizer should be used. Legacy is before the merge of #24622 + and #25224 which includes fixes to properly handle tokens that appear after special tokens. + Make sure to also set `from_slow` to `True`. + A simple example: + + - `legacy=True`: + ```python + >>> from transformers import LlamaTokenizerFast + + >>> tokenizer = LlamaTokenizerFast.from_pretrained("huggyllama/llama-7b", legacy=True, from_slow=True) + >>> tokenizer.encode("Hello .") # 869 is '▁.' + [1, 15043, 29871, 1, 869] + ``` + - `legacy=False`: + ```python + >>> from transformers import LlamaTokenizerFast + + >>> tokenizer = LlamaTokenizerFast.from_pretrained("huggyllama/llama-7b", legacy=False, from_slow=True) + >>> tokenizer.encode("Hello .") # 29889 is '.' + [1, 15043, 29871, 1, 29889] + ``` + Checkout the [pull request](https://github.com/huggingface/transformers/pull/24565) for more details. + add_prefix_space (`bool`, *optional*): + Whether or not the tokenizer should automatically add a prefix space + """ + + vocab_files_names = VOCAB_FILES_NAMES + slow_tokenizer_class = LlamaTokenizer + padding_side = "left" + model_input_names = ["input_ids", "attention_mask"] + + def __init__( + self, + vocab_file=None, + tokenizer_file=None, + clean_up_tokenization_spaces=False, + unk_token="", + bos_token="", + eos_token="", + add_bos_token=True, + add_eos_token=False, + use_default_system_prompt=False, + legacy=None, + add_prefix_space=None, + **kwargs, + ): + if legacy is None: + logger.warning_once( + f"You are using the default legacy behaviour of the {self.__class__}. This is" + " expected, and simply means that the `legacy` (previous) behavior will be used so nothing changes for you." + " If you want to use the new behaviour, set `legacy=False`. This should only be set if you understand what it" + " means, and thoroughly read the reason why this was added as explained in" + " https://github.com/huggingface/transformers/pull/24565 - if you loaded a llama tokenizer from a GGUF file" + " you can ignore this message." + ) + legacy = True + self.legacy = legacy + + if add_prefix_space is not None: + kwargs["from_slow"] = True + + super().__init__( + vocab_file=vocab_file, + tokenizer_file=tokenizer_file, + clean_up_tokenization_spaces=clean_up_tokenization_spaces, + unk_token=unk_token, + bos_token=bos_token, + eos_token=eos_token, + add_bos_token=add_bos_token, + add_eos_token=add_eos_token, + use_default_system_prompt=use_default_system_prompt, + add_prefix_space=add_prefix_space, + legacy=legacy, + **kwargs, + ) + self._add_bos_token = add_bos_token + self._add_eos_token = add_eos_token + self.update_post_processor() + self.use_default_system_prompt = use_default_system_prompt + self.vocab_file = vocab_file + + @property + def can_save_slow_tokenizer(self) -> bool: + return os.path.isfile(self.vocab_file) if self.vocab_file else False + + def update_post_processor(self): + """ + Updates the underlying post processor with the current `bos_token` and `eos_token`. + """ + bos = self.bos_token + bos_token_id = self.bos_token_id + if bos is None and self.add_bos_token: + raise ValueError("add_bos_token = True but bos_token = None") + + eos = self.eos_token + eos_token_id = self.eos_token_id + if eos is None and self.add_eos_token: + raise ValueError("add_eos_token = True but eos_token = None") + + single = f"{(bos+':0 ') if self.add_bos_token else ''}$A:0{(' '+eos+':0') if self.add_eos_token else ''}" + pair = f"{single}{(' '+bos+':1') if self.add_bos_token else ''} $B:1{(' '+eos+':1') if self.add_eos_token else ''}" + + special_tokens = [] + if self.add_bos_token: + special_tokens.append((bos, bos_token_id)) + if self.add_eos_token: + special_tokens.append((eos, eos_token_id)) + self._tokenizer.post_processor = processors.TemplateProcessing( + single=single, pair=pair, special_tokens=special_tokens + ) + + @property + def add_eos_token(self): + return self._add_eos_token + + @property + def add_bos_token(self): + return self._add_bos_token + + @add_eos_token.setter + def add_eos_token(self, value): + self._add_eos_token = value + self.update_post_processor() + + @add_bos_token.setter + def add_bos_token(self, value): + self._add_bos_token = value + self.update_post_processor() + + def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]: + if not self.can_save_slow_tokenizer: + raise ValueError( + "Your fast tokenizer does not have the necessary information to save the vocabulary for a slow " + "tokenizer." + ) + + if not os.path.isdir(save_directory): + logger.error(f"Vocabulary path ({save_directory}) should be a directory") + return + out_vocab_file = os.path.join( + save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] + ) + + if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file): + copyfile(self.vocab_file, out_vocab_file) + + return (out_vocab_file,) + + # TODO ArthurZ let's rely on the template processor instead, refactor all fast tokenizers + # Copied from transformers.models.llama.tokenization_llama.LlamaTokenizer.build_inputs_with_special_tokens + def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None): + bos_token_id = [self.bos_token_id] if self.add_bos_token else [] + eos_token_id = [self.eos_token_id] if self.add_eos_token else [] + + output = bos_token_id + token_ids_0 + eos_token_id + + if token_ids_1 is not None: + output = output + bos_token_id + token_ids_1 + eos_token_id + + return output diff --git a/isolated/sim_greedy/upstream_sgl/internvl/model/phi3/configuration_phi3.py b/isolated/sim_greedy/upstream_sgl/internvl/model/phi3/configuration_phi3.py new file mode 100644 index 0000000000000000000000000000000000000000..c657051097ebd7655786d74f8ed75635bfc844c4 --- /dev/null +++ b/isolated/sim_greedy/upstream_sgl/internvl/model/phi3/configuration_phi3.py @@ -0,0 +1,211 @@ +# Copyright 2024 Microsoft and the HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License atd +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" Phi-3 model configuration""" + + +from transformers.configuration_utils import PretrainedConfig +from transformers.utils import logging + +logger = logging.get_logger(__name__) + +PHI3_PRETRAINED_CONFIG_ARCHIVE_MAP = { + 'microsoft/Phi-3-mini-4k-instruct': 'https://huggingface.co/microsoft/Phi-3-mini-4k-instruct/resolve/main/config.json', + 'microsoft/Phi-3-mini-128k-instruct': 'https://huggingface.co/microsoft/Phi-3-mini-128k-instruct/resolve/main/config.json', +} + + +class Phi3Config(PretrainedConfig): + r""" + This is the configuration class to store the configuration of a [`Phi3Model`]. It is used to instantiate a Phi-3 + model according to the specified arguments, defining the model architecture. Instantiating a configuration with the + defaults will yield a similar configuration to that of the + [microsoft/Phi-3-mini-4k-instruct](https://huggingface.co/microsoft/Phi-3-mini-4k-instruct). + + Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the + documentation from [`PretrainedConfig`] for more information. + + Args: + vocab_size (`int`, *optional*, defaults to 32064): + Vocabulary size of the Phi-3 model. Defines the number of different tokens that can be represented by the + `inputs_ids` passed when calling [`Phi3Model`]. + hidden_size (`int`, *optional*, defaults to 3072): + Dimension of the hidden representations. + intermediate_size (`int`, *optional*, defaults to 8192): + Dimension of the MLP representations. + num_hidden_layers (`int`, *optional*, defaults to 32): + Number of hidden layers in the Transformer decoder. + num_attention_heads (`int`, *optional*, defaults to 32): + Number of attention heads for each attention layer in the Transformer decoder. + num_key_value_heads (`int`, *optional*): + This is the number of key_value heads that should be used to implement Grouped Query Attention. If + `num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if + `num_key_value_heads=1 the model will use Multi Query Attention (MQA) otherwise GQA is used. When + converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed + by meanpooling all the original heads within that group. For more details checkout [this + paper](https://arxiv.org/pdf/2305.13245.pdf). If it is not specified, will default to + `num_attention_heads`. + resid_pdrop (`float`, *optional*, defaults to 0.0): + Dropout probability for mlp outputs. + embd_pdrop (`int`, *optional*, defaults to 0.0): + The dropout ratio for the embeddings. + attention_dropout (`float`, *optional*, defaults to 0.0): + The dropout ratio after computing the attention scores. + hidden_act (`str` or `function`, *optional*, defaults to `"silu"`): + The non-linear activation function (function or string) in the decoder. + max_position_embeddings (`int`, *optional*, defaults to 4096): + The maximum sequence length that this model might ever be used with. + original_max_position_embeddings (`int`, *optional*, defaults to 4096): + The maximum sequence length that this model was trained with. This is used to determine the size of the + original RoPE embeddings when using long scaling. + initializer_range (`float`, *optional*, defaults to 0.02): + The standard deviation of the truncated_normal_initializer for initializing all weight matrices. + rms_norm_eps (`float`, *optional*, defaults to 1e-05): + The epsilon value used for the RMSNorm. + use_cache (`bool`, *optional*, defaults to `True`): + Whether or not the model should return the last key/values attentions (not used by all models). Only + relevant if `config.is_decoder=True`. Whether to tie weight embeddings or not. + tie_word_embeddings (`bool`, *optional*, defaults to `False`): + Whether to tie weight embeddings + rope_theta (`float`, *optional*, defaults to 10000.0): + The base period of the RoPE embeddings. + rope_scaling (`dict`, *optional*): + The scaling strategy for the RoPE embeddings. If `None`, no scaling is applied. If a dictionary, it must + contain the following keys: `type`, `short_factor` and `long_factor`. The `type` must be either `su` or `yarn` and + the `short_factor` and `long_factor` must be lists of numbers with the same length as the hidden size + divided by the number of attention heads divided by 2. + bos_token_id (`int`, *optional*, defaults to 1): + The id of the "beginning-of-sequence" token. + eos_token_id (`int`, *optional*, defaults to 32000): + The id of the "end-of-sequence" token. + pad_token_id (`int`, *optional*, defaults to 32000): + The id of the padding token. + sliding_window (`int`, *optional*): + Sliding window attention window size. If `None`, no sliding window is applied. + + Example: + + ```python + >>> from transformers import Phi3Model, Phi3Config + + >>> # Initializing a Phi-3 style configuration + >>> configuration = Phi3Config.from_pretrained("microsoft/Phi-3-mini-4k-instruct") + + >>> # Initializing a model from the configuration + >>> model = Phi3Model(configuration) + + >>> # Accessing the model configuration + >>> configuration = model.config + ```""" + + model_type = 'phi3' + keys_to_ignore_at_inference = ['past_key_values'] + + def __init__( + self, + vocab_size=32064, + hidden_size=3072, + intermediate_size=8192, + num_hidden_layers=32, + num_attention_heads=32, + num_key_value_heads=None, + resid_pdrop=0.0, + embd_pdrop=0.0, + attention_dropout=0.0, + hidden_act='silu', + max_position_embeddings=4096, + original_max_position_embeddings=4096, + initializer_range=0.02, + rms_norm_eps=1e-5, + use_cache=True, + tie_word_embeddings=False, + rope_theta=10000.0, + rope_scaling=None, + bos_token_id=1, + eos_token_id=32000, + pad_token_id=32000, + sliding_window=None, + **kwargs, + ): + self.vocab_size = vocab_size + self.hidden_size = hidden_size + self.intermediate_size = intermediate_size + self.num_hidden_layers = num_hidden_layers + self.num_attention_heads = num_attention_heads + + if num_key_value_heads is None: + num_key_value_heads = num_attention_heads + + self.num_key_value_heads = num_key_value_heads + self.resid_pdrop = resid_pdrop + self.embd_pdrop = embd_pdrop + self.attention_dropout = attention_dropout + self.hidden_act = hidden_act + self.max_position_embeddings = max_position_embeddings + self.original_max_position_embeddings = original_max_position_embeddings + self.initializer_range = initializer_range + self.rms_norm_eps = rms_norm_eps + self.use_cache = use_cache + self.rope_theta = rope_theta + self.rope_scaling = rope_scaling + self._rope_scaling_validation() + self.sliding_window = sliding_window + + super().__init__( + bos_token_id=bos_token_id, + eos_token_id=eos_token_id, + pad_token_id=pad_token_id, + tie_word_embeddings=tie_word_embeddings, + **kwargs, + ) + + def _rope_scaling_validation(self): + """ + Validate the `rope_scaling` configuration. + """ + if self.rope_scaling is None: + return + + if not isinstance(self.rope_scaling, dict) or len(self.rope_scaling) != 3: + raise ValueError( + '`rope_scaling` must be a dictionary with three fields, `type`, `short_factor` and `long_factor`, ' + f'got {self.rope_scaling}' + ) + rope_scaling_type = self.rope_scaling.get('type', None) + rope_scaling_short_factor = self.rope_scaling.get('short_factor', None) + rope_scaling_long_factor = self.rope_scaling.get('long_factor', None) + if rope_scaling_type is None or rope_scaling_type not in ['su', 'yarn']: + raise ValueError(f"`rope_scaling`'s type field must be one of ['su', 'yarn'], got {rope_scaling_type}") + if not ( + isinstance(rope_scaling_short_factor, list) + and all(isinstance(x, (int, float)) for x in rope_scaling_short_factor) + ): + raise ValueError( + f"`rope_scaling`'s short_factor field must be a list of numbers, got {rope_scaling_short_factor}" + ) + if not len(rope_scaling_short_factor) == self.hidden_size // self.num_attention_heads // 2: + raise ValueError( + f"`rope_scaling`'s short_factor field must have length {self.hidden_size // self.num_attention_heads // 2}, got {len(rope_scaling_short_factor)}" + ) + if not ( + isinstance(rope_scaling_long_factor, list) + and all(isinstance(x, (int, float)) for x in rope_scaling_long_factor) + ): + raise ValueError( + f"`rope_scaling`'s long_factor field must be a list of numbers, got {rope_scaling_long_factor}" + ) + if not len(rope_scaling_long_factor) == self.hidden_size // self.num_attention_heads // 2: + raise ValueError( + f"`rope_scaling`'s long_factor field must have length {self.hidden_size // self.num_attention_heads // 2}, got {len(rope_scaling_long_factor)}" + ) diff --git a/isolated/sim_greedy/upstream_sgl/internvl/model/phi3/modeling_phi3.py b/isolated/sim_greedy/upstream_sgl/internvl/model/phi3/modeling_phi3.py new file mode 100644 index 0000000000000000000000000000000000000000..982b5480717d543c965b9047c85611f08fee7644 --- /dev/null +++ b/isolated/sim_greedy/upstream_sgl/internvl/model/phi3/modeling_phi3.py @@ -0,0 +1,1601 @@ +# Copyright 2024 Microsoft and the HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" PyTorch Phi-3 model.""" + +import inspect +import math +import warnings +from typing import List, Optional, Tuple, Union + +import torch +import torch.nn.functional as F +import torch.utils.checkpoint +from torch import nn +from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss +from transformers.activations import ACT2FN +from transformers.cache_utils import Cache, DynamicCache +from transformers.modeling_attn_mask_utils import \ + _prepare_4d_causal_attention_mask +from transformers.modeling_outputs import (BaseModelOutputWithPast, + CausalLMOutputWithPast, + SequenceClassifierOutputWithPast, + TokenClassifierOutput) +from transformers.modeling_utils import PreTrainedModel +from transformers.utils import (add_code_sample_docstrings, + add_start_docstrings, + add_start_docstrings_to_model_forward, + is_flash_attn_2_available, + is_flash_attn_greater_or_equal_2_10, logging, + replace_return_docstrings) + +from .configuration_phi3 import Phi3Config + +logger = logging.get_logger(__name__) + +# Transformers scans dependencies in the modeling file, causing issues on conditional loading. The regex only ignores try/catch blocks, but not if statements +# if is_flash_attn_2_available(): +_flash_supports_window_size = False +try: + from flash_attn import flash_attn_func, flash_attn_varlen_func + from flash_attn.bert_padding import (index_first_axis, pad_input, # noqa + unpad_input) + + _flash_supports_window_size = 'window_size' in list(inspect.signature(flash_attn_func).parameters) +except ImportError as error: + logger.warning( + f'`flash-attention` package not found, consider installing for better performance: {error}.' + ) + if not _flash_supports_window_size: + logger.warning( + "Current `flash-attenton` does not support `window_size`. Either upgrade or use `attn_implementation='eager'`." + ) + +_CHECKPOINT_FOR_DOC = 'microsoft/Phi-3-mini-4k-instruct' +_CONFIG_FOR_DOC = 'Phi3Config' + +PHI3_PRETRAINED_MODEL_ARCHIVE_LIST = [ + 'microsoft/Phi-3-mini-4k-instruct', + 'microsoft/Phi-3-mini-128k-instruct', + # See all Phi-3 models at https://huggingface.co/models?filter=Phi-3 +] + + +# Copied from transformers.models.llama.modeling_llama.LlamaRMSNorm with Llama->Phi3 +class Phi3RMSNorm(nn.Module): + def __init__(self, hidden_size, eps=1e-6): + """ + Phi3RMSNorm is equivalent to T5LayerNorm + """ + super().__init__() + self.weight = nn.Parameter(torch.ones(hidden_size)) + self.variance_epsilon = eps + + def forward(self, hidden_states): + input_dtype = hidden_states.dtype + hidden_states = hidden_states.to(torch.float32) + variance = hidden_states.pow(2).mean(-1, keepdim=True) + hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon) + return self.weight * hidden_states.to(input_dtype) + + +# Copied from transformers.models.llama.modeling_llama._get_unpad_data +def _get_unpad_data(attention_mask): + seqlens_in_batch = attention_mask.sum(dim=-1, dtype=torch.int32) + indices = torch.nonzero(attention_mask.flatten(), as_tuple=False).flatten() + max_seqlen_in_batch = seqlens_in_batch.max().item() + cu_seqlens = F.pad(torch.cumsum(seqlens_in_batch, dim=0, dtype=torch.int32), (1, 0)) + return ( + indices, + cu_seqlens, + max_seqlen_in_batch, + ) + + +# Copied from transformers.models.gemma.modeling_gemma.GemmaRotaryEmbedding with gemma->phi3, Gemma->Phi3 +class Phi3RotaryEmbedding(nn.Module): + def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None): + super().__init__() + + self.dim = dim + self.max_position_embeddings = max_position_embeddings + self.base = base + self.register_buffer('inv_freq', None, persistent=False) + + @torch.no_grad() + def forward(self, x, position_ids, seq_len=None): + # x: [bs, num_attention_heads, seq_len, head_size] + if self.inv_freq is None: + self.inv_freq = 1.0 / ( + self.base ** (torch.arange(0, self.dim, 2, dtype=torch.int64, device=x.device).float() / self.dim) + ) + inv_freq_expanded = self.inv_freq[None, :, None].float().expand(position_ids.shape[0], -1, 1) + position_ids_expanded = position_ids[:, None, :].float() + # Force float32 since bfloat16 loses precision on long contexts + # See https://github.com/huggingface/transformers/pull/29285 + device_type = x.device.type + device_type = device_type if isinstance(device_type, str) and device_type != 'mps' else 'cpu' + with torch.autocast(device_type=device_type, enabled=False): + freqs = (inv_freq_expanded.float() @ position_ids_expanded.float()).transpose(1, 2) + emb = torch.cat((freqs, freqs), dim=-1) + cos = emb.cos() + sin = emb.sin() + return cos.to(dtype=x.dtype), sin.to(dtype=x.dtype) + + +class Phi3SuScaledRotaryEmbedding(Phi3RotaryEmbedding): + def __init__(self, dim, config, device=None): + super().__init__(dim, config.max_position_embeddings, config.rope_theta, device) + + self.short_factor = config.rope_scaling['short_factor'] + self.long_factor = config.rope_scaling['long_factor'] + self.original_max_position_embeddings = config.original_max_position_embeddings + + @torch.no_grad() + def forward(self, x, position_ids, seq_len=None): + seq_len = torch.max(position_ids) + 1 + if seq_len > self.original_max_position_embeddings: + ext_factors = torch.tensor(self.long_factor, dtype=torch.float32, device=x.device) + else: + ext_factors = torch.tensor(self.short_factor, dtype=torch.float32, device=x.device) + + inv_freq_shape = torch.arange(0, self.dim, 2, dtype=torch.int64, device=x.device).float() / self.dim + self.inv_freq = 1.0 / (ext_factors * self.base**inv_freq_shape) + + inv_freq_expanded = self.inv_freq[None, :, None].float().expand(position_ids.shape[0], -1, 1) + position_ids_expanded = position_ids[:, None, :].float() + + # Force float32 since bfloat16 loses precision on long contexts + # See https://github.com/huggingface/transformers/pull/29285 + device_type = x.device.type + device_type = device_type if isinstance(device_type, str) and device_type != 'mps' else 'cpu' + with torch.autocast(device_type=device_type, enabled=False): + freqs = (inv_freq_expanded.float() @ position_ids_expanded.float()).transpose(1, 2) + emb = torch.cat((freqs, freqs), dim=-1) + + scale = self.max_position_embeddings / self.original_max_position_embeddings + if scale <= 1.0: + scaling_factor = 1.0 + else: + scaling_factor = math.sqrt(1 + math.log(scale) / math.log(self.original_max_position_embeddings)) + + cos = emb.cos() * scaling_factor + sin = emb.sin() * scaling_factor + return cos.to(dtype=x.dtype), sin.to(dtype=x.dtype) + + +class Phi3YarnScaledRotaryEmbedding(Phi3RotaryEmbedding): + def __init__(self, dim, config, device=None): + super().__init__(dim, config.max_position_embeddings, config.rope_theta, device) + + self.short_factor = config.rope_scaling['short_factor'] + self.long_factor = config.rope_scaling['long_factor'] + self.original_max_position_embeddings = config.original_max_position_embeddings + + @torch.no_grad() + def forward(self, x, position_ids, seq_len=None): + seq_len = torch.max(position_ids) + 1 + if seq_len > self.original_max_position_embeddings: + ext_factors = torch.tensor(self.long_factor, dtype=torch.float32, device=x.device) + else: + ext_factors = torch.tensor(self.short_factor, dtype=torch.float32, device=x.device) + + inv_freq_shape = torch.arange(0, self.dim, 2, dtype=torch.int64, device=x.device).float() / self.dim + self.inv_freq = 1.0 / (ext_factors * self.base**inv_freq_shape) + + inv_freq_expanded = self.inv_freq[None, :, None].float().expand(position_ids.shape[0], -1, 1) + position_ids_expanded = position_ids[:, None, :].float() + + # Force float32 since bfloat16 loses precision on long contexts + # See https://github.com/huggingface/transformers/pull/29285 + device_type = x.device.type + device_type = device_type if isinstance(device_type, str) and device_type != 'mps' else 'cpu' + with torch.autocast(device_type=device_type, enabled=False): + freqs = (inv_freq_expanded.float() @ position_ids_expanded.float()).transpose(1, 2) + emb = torch.cat((freqs, freqs), dim=-1) + + scale = self.max_position_embeddings / self.original_max_position_embeddings + if scale <= 1.0: + scaling_factor = 1.0 + else: + scaling_factor = 0.1 * math.log(scale) + 1.0 + + cos = emb.cos() * scaling_factor + sin = emb.sin() * scaling_factor + return cos.to(dtype=x.dtype), sin.to(dtype=x.dtype) + + +# Copied from transformers.models.llama.modeling_llama.rotate_half +def rotate_half(x): + """Rotates half the hidden dims of the input.""" + x1 = x[..., : x.shape[-1] // 2] + x2 = x[..., x.shape[-1] // 2 :] + return torch.cat((-x2, x1), dim=-1) + + +# Copied from transformers.models.llama.modeling_llama.apply_rotary_pos_emb +def apply_rotary_pos_emb(q, k, cos, sin, position_ids=None, unsqueeze_dim=1): + """Applies Rotary Position Embedding to the query and key tensors. + + Args: + q (`torch.Tensor`): The query tensor. + k (`torch.Tensor`): The key tensor. + cos (`torch.Tensor`): The cosine part of the rotary embedding. + sin (`torch.Tensor`): The sine part of the rotary embedding. + position_ids (`torch.Tensor`, *optional*): + Deprecated and unused. + unsqueeze_dim (`int`, *optional*, defaults to 1): + The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and + sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note + that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and + k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes + cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have + the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2. + Returns: + `tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding. + """ + cos = cos.unsqueeze(unsqueeze_dim) + sin = sin.unsqueeze(unsqueeze_dim) + q_embed = (q * cos) + (rotate_half(q) * sin) + k_embed = (k * cos) + (rotate_half(k) * sin) + return q_embed, k_embed + + +class Phi3MLP(nn.Module): + def __init__(self, config): + super().__init__() + + self.config = config + self.gate_up_proj = nn.Linear(config.hidden_size, 2 * config.intermediate_size, bias=False) + self.down_proj = nn.Linear(config.intermediate_size, config.hidden_size, bias=False) + + self.activation_fn = ACT2FN[config.hidden_act] + + def forward(self, hidden_states: torch.FloatTensor) -> torch.FloatTensor: + up_states = self.gate_up_proj(hidden_states) + + gate, up_states = up_states.chunk(2, dim=-1) + up_states = up_states * self.activation_fn(gate) + + return self.down_proj(up_states) + + +# Copied from transformers.models.llama.modeling_llama.repeat_kv with llama->phi +def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor: + """ + This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch, + num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim) + """ + batch, num_key_value_heads, slen, head_dim = hidden_states.shape + if n_rep == 1: + return hidden_states + hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim) + return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim) + + +class Phi3Attention(nn.Module): + """Multi-headed attention from 'Attention Is All You Need' paper""" + + def __init__(self, config: Phi3Config, layer_idx: Optional[int] = None): + super().__init__() + self.config = config + self.layer_idx = layer_idx + if layer_idx is None: + logger.warning_once( + f'Instantiating {self.__class__.__name__} without passing a `layer_idx` is not recommended and will ' + 'lead to errors during the forward call if caching is used. Please make sure to provide a `layer_idx` ' + 'when creating this class.' + ) + + self.attention_dropout = config.attention_dropout + self.hidden_size = config.hidden_size + self.num_heads = config.num_attention_heads + self.head_dim = self.hidden_size // self.num_heads + self.num_key_value_heads = config.num_key_value_heads + self.num_key_value_groups = self.num_heads // self.num_key_value_heads + self.max_position_embeddings = config.max_position_embeddings + self.original_max_position_embeddings = config.original_max_position_embeddings + self.rope_theta = config.rope_theta + self.rope_scaling = config.rope_scaling + self.is_causal = True + + if (self.head_dim * self.num_heads) != self.hidden_size: + raise ValueError( + f'hidden_size must be divisible by num_heads (got `hidden_size`: {self.hidden_size}' + f' and `num_heads`: {self.num_heads}).' + ) + + op_size = self.num_heads * self.head_dim + 2 * (self.num_key_value_heads * self.head_dim) + self.o_proj = nn.Linear(self.num_heads * self.head_dim, self.hidden_size, bias=False) + self.qkv_proj = nn.Linear(self.hidden_size, op_size, bias=False) + self._init_rope() + + def _init_rope(self): + if self.rope_scaling is None: + self.rotary_emb = Phi3RotaryEmbedding( + self.head_dim, + max_position_embeddings=self.max_position_embeddings, + base=self.rope_theta, + ) + else: + scaling_type = self.config.rope_scaling['type'] + if scaling_type == 'su': + self.rotary_emb = Phi3SuScaledRotaryEmbedding(self.head_dim, self.config) + elif scaling_type == 'yarn': + self.rotary_emb = Phi3YarnScaledRotaryEmbedding(self.head_dim, self.config) + else: + raise ValueError(f'Unknown RoPE scaling type {scaling_type}') + + def forward( + self, + hidden_states: torch.Tensor, + attention_mask: Optional[torch.Tensor] = None, + position_ids: Optional[torch.LongTensor] = None, + past_key_value: Optional[Cache] = None, + output_attentions: bool = False, + use_cache: bool = False, + ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: + logger.warning_once('You are not running the flash-attention implementation, expect numerical differences.') + + bsz, q_len, _ = hidden_states.size() + + qkv = self.qkv_proj(hidden_states) + query_pos = self.num_heads * self.head_dim + query_states = qkv[..., :query_pos] + key_states = qkv[..., query_pos : query_pos + self.num_key_value_heads * self.head_dim] + value_states = qkv[..., query_pos + self.num_key_value_heads * self.head_dim :] + + query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2) + key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2) + value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2) + + kv_seq_len = key_states.shape[-2] + if past_key_value is not None: + if self.layer_idx is None: + raise ValueError( + f'The cache structure has changed since version v4.36. If you are using {self.__class__.__name__} ' + 'for auto-regressive decoding with k/v caching, please make sure to initialize the attention class ' + 'with a layer index.' + ) + kv_seq_len += past_key_value.get_usable_length(kv_seq_len, self.layer_idx) + cos, sin = self.rotary_emb(value_states, position_ids, seq_len=kv_seq_len) + + query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin, position_ids) + + if past_key_value is not None: + cache_kwargs = {'sin': sin, 'cos': cos} # Specific to RoPE models + key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs) + + # repeat k/v heads if n_kv_heads < n_heads + key_states = repeat_kv(key_states, self.num_key_value_groups) + value_states = repeat_kv(value_states, self.num_key_value_groups) + + attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) / math.sqrt(self.head_dim) + + if attn_weights.size() != (bsz, self.num_heads, q_len, kv_seq_len): + raise ValueError( + f'Attention weights should be of size {(bsz, self.num_heads, q_len, kv_seq_len)}, but is' + f' {attn_weights.size()}' + ) + + if attention_mask is not None: + if attention_mask.size() != (bsz, 1, q_len, kv_seq_len): + raise ValueError( + f'Attention mask should be of size {(bsz, 1, q_len, kv_seq_len)}, but is {attention_mask.size()}' + ) + attn_weights = attn_weights + attention_mask + + # upcast attention to fp32 + attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(value_states.dtype) + attn_weights = nn.functional.dropout(attn_weights, p=self.attention_dropout, training=self.training) + + attn_output = torch.matmul(attn_weights, value_states) + + if attn_output.size() != (bsz, self.num_heads, q_len, self.head_dim): + raise ValueError( + f'`attn_output` should be of size {(bsz, self.num_heads, q_len, self.head_dim)}, but is' + f' {attn_output.size()}' + ) + + attn_output = attn_output.transpose(1, 2).contiguous() + attn_output = attn_output.reshape(bsz, q_len, self.hidden_size) + + attn_output = self.o_proj(attn_output) + + if not output_attentions: + attn_weights = None + + return attn_output, attn_weights, past_key_value + + +class Phi3FlashAttention2(Phi3Attention): + """ + Phi-3 flash attention module. This module inherits from `Phi3Attention` as the weights of the module stays + untouched. The only required change would be on the forward pass where it needs to correctly call the public API of + flash attention and deal with padding tokens in case the input contains any of them. + """ + + # Copied from transformers.models.llama.modeling_llama.LlamaFlashAttention2.__init__ + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + + # TODO: Should be removed once Flash Attention for RoCm is bumped to 2.1. + # flash_attn<2.1 generates top-left aligned causal mask, while what is needed here is bottom-right alignement, that was made default for flash_attn>=2.1. This attribute is used to handle this difference. Reference: https://github.com/Dao-AILab/flash-attention/releases/tag/v2.1.0. + # Beware that with flash_attn<2.1, using q_seqlen != k_seqlen (except for the case q_seqlen == 1) produces a wrong mask (top-left). + self._flash_attn_uses_top_left_mask = not is_flash_attn_greater_or_equal_2_10() + + def forward( + self, + hidden_states: torch.Tensor, + attention_mask: Optional[torch.LongTensor] = None, + position_ids: Optional[torch.LongTensor] = None, + past_key_value: Optional[Cache] = None, + output_attentions: bool = False, + use_cache: bool = False, + **kwargs, + ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: + # Phi3FlashAttention2 attention does not support output_attentions + + if not _flash_supports_window_size: + logger.warning_once( + "The current flash attention version does not support sliding window attention. Please use `attn_implementation='eager'` or upgrade flash-attn library." + ) + raise ValueError('The current flash attention version does not support sliding window attention.') + + output_attentions = False + + if 'padding_mask' in kwargs: + warnings.warn( + 'Passing `padding_mask` is deprecated and will be removed in v4.37. Please make sure use `attention_mask` instead.`' + ) + + # overwrite attention_mask with padding_mask + attention_mask = kwargs.pop('padding_mask') + + bsz, q_len, _ = hidden_states.size() + + qkv = self.qkv_proj(hidden_states) + query_pos = self.num_heads * self.head_dim + query_states = qkv[..., :query_pos] + key_states = qkv[..., query_pos : query_pos + self.num_key_value_heads * self.head_dim] + value_states = qkv[..., query_pos + self.num_key_value_heads * self.head_dim :] + + # Flash attention requires the input to have the shape + # batch_size x seq_length x head_dim x hidden_dim + # therefore we just need to keep the original shape + query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2) + key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2) + value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2) + + kv_seq_len = key_states.shape[-2] + if past_key_value is not None: + if self.layer_idx is None: + raise ValueError( + f'The cache structure has changed since version v4.36. If you are using {self.__class__.__name__} ' + 'for auto-regressive decoding with k/v caching, please make sure to initialize the attention class ' + 'with a layer index.' + ) + kv_seq_len += past_key_value.get_usable_length(kv_seq_len, self.layer_idx) + + # Because the input can be padded, the absolute sequence length depends on the max position id. + rotary_seq_len = max(kv_seq_len, position_ids[:, -1].max().item()) + 1 + cos, sin = self.rotary_emb(value_states, position_ids, seq_len=rotary_seq_len) + + query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin, position_ids) + + use_sliding_windows = ( + _flash_supports_window_size + and getattr(self.config, 'sliding_window', None) is not None + and kv_seq_len > self.config.sliding_window + ) + + if past_key_value is not None: + # Activate slicing cache only if the config has a value `sliding_windows` attribute + cache_has_contents = past_key_value.get_seq_length(self.layer_idx) > 0 + if ( + getattr(self.config, 'sliding_window', None) is not None + and kv_seq_len > self.config.sliding_window + and cache_has_contents + ): + slicing_tokens = 1 - self.config.sliding_window + + past_key = past_key_value[self.layer_idx][0] + past_value = past_key_value[self.layer_idx][1] + + past_key = past_key[:, :, slicing_tokens:, :].contiguous() + past_value = past_value[:, :, slicing_tokens:, :].contiguous() + + if past_key.shape[-2] != self.config.sliding_window - 1: + raise ValueError( + f'past key must have a shape of (`batch_size, num_heads, self.config.sliding_window-1, head_dim`), got' + f' {past_key.shape}' + ) + + if attention_mask is not None: + attention_mask = attention_mask[:, slicing_tokens:] + attention_mask = torch.cat([attention_mask, torch.ones_like(attention_mask[:, -1:])], dim=-1) + + cache_kwargs = {'sin': sin, 'cos': cos} # Specific to RoPE models + key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs) + + # repeat k/v heads if n_kv_heads < n_heads + key_states = repeat_kv(key_states, self.num_key_value_groups) + value_states = repeat_kv(value_states, self.num_key_value_groups) + + attn_dropout = self.attention_dropout if self.training else 0.0 + + # In PEFT, usually we cast the layer norms in float32 for training stability reasons + # therefore the input hidden states gets silently casted in float32. Hence, we need + # cast them back in the correct dtype just to be sure everything works as expected. + # This might slowdown training & inference so it is recommended to not cast the LayerNorms + # in fp32. + + if query_states.dtype == torch.float32: + if torch.is_autocast_enabled(): + target_dtype = torch.get_autocast_gpu_dtype() + # Handle the case where the model is quantized + elif hasattr(self.config, '_pre_quantization_dtype'): + target_dtype = self.config._pre_quantization_dtype + else: + target_dtype = self.qkv_proj.weight.dtype + + logger.warning_once( + f'The input hidden states seems to be silently casted in float32, this might be related to' + f' the fact you have upcasted embedding or layer norm layers in float32. We will cast back the input in' + f' {target_dtype}.' + ) + + query_states = query_states.to(target_dtype) + key_states = key_states.to(target_dtype) + value_states = value_states.to(target_dtype) + + # Reashape to the expected shape for Flash Attention + query_states = query_states.transpose(1, 2) + key_states = key_states.transpose(1, 2) + value_states = value_states.transpose(1, 2) + + attn_output = self._flash_attention_forward( + query_states, + key_states, + value_states, + attention_mask, + q_len, + dropout=attn_dropout, + use_sliding_windows=use_sliding_windows, + ) + + attn_output = attn_output.reshape(bsz, q_len, self.hidden_size).contiguous() + attn_output = self.o_proj(attn_output) + + if not output_attentions: + attn_weights = None + + return attn_output, attn_weights, past_key_value + + # Copied from transformers.models.mistral.modeling_mistral.MistralFlashAttention2._flash_attention_forward + def _flash_attention_forward( + self, + query_states, + key_states, + value_states, + attention_mask, + query_length, + dropout=0.0, + softmax_scale=None, + use_sliding_windows=False, + ): + """ + Calls the forward method of Flash Attention - if the input hidden states contain at least one padding token + first unpad the input, then computes the attention scores and pad the final attention scores. + + Args: + query_states (`torch.Tensor`): + Input query states to be passed to Flash Attention API + key_states (`torch.Tensor`): + Input key states to be passed to Flash Attention API + value_states (`torch.Tensor`): + Input value states to be passed to Flash Attention API + attention_mask (`torch.Tensor`): + The padding mask - corresponds to a tensor of size `(batch_size, seq_len)` where 0 stands for the + position of padding tokens and 1 for the position of non-padding tokens. + dropout (`float`): + Attention dropout + softmax_scale (`float`, *optional*): + The scaling of QK^T before applying softmax. Default to 1 / sqrt(head_dim) + use_sliding_windows (`bool`, *optional*): + Whether to activate sliding window attention. + """ + if not self._flash_attn_uses_top_left_mask: + causal = self.is_causal + else: + # TODO: Remove the `query_length != 1` check once Flash Attention for RoCm is bumped to 2.1. For details, please see the comment in LlamaFlashAttention2 __init__. + causal = self.is_causal and query_length != 1 + + # Contains at least one padding token in the sequence + if attention_mask is not None: + batch_size = query_states.shape[0] + query_states, key_states, value_states, indices_q, cu_seq_lens, max_seq_lens = self._upad_input( + query_states, key_states, value_states, attention_mask, query_length + ) + + cu_seqlens_q, cu_seqlens_k = cu_seq_lens + max_seqlen_in_batch_q, max_seqlen_in_batch_k = max_seq_lens + + if not use_sliding_windows: + attn_output_unpad = flash_attn_varlen_func( + query_states, + key_states, + value_states, + cu_seqlens_q=cu_seqlens_q, + cu_seqlens_k=cu_seqlens_k, + max_seqlen_q=max_seqlen_in_batch_q, + max_seqlen_k=max_seqlen_in_batch_k, + dropout_p=dropout, + softmax_scale=softmax_scale, + causal=causal, + ) + else: + attn_output_unpad = flash_attn_varlen_func( + query_states, + key_states, + value_states, + cu_seqlens_q=cu_seqlens_q, + cu_seqlens_k=cu_seqlens_k, + max_seqlen_q=max_seqlen_in_batch_q, + max_seqlen_k=max_seqlen_in_batch_k, + dropout_p=dropout, + softmax_scale=softmax_scale, + causal=causal, + window_size=(self.config.sliding_window, self.config.sliding_window), + ) + + attn_output = pad_input(attn_output_unpad, indices_q, batch_size, query_length) + else: + if not use_sliding_windows: + attn_output = flash_attn_func( + query_states, + key_states, + value_states, + dropout, + softmax_scale=softmax_scale, + causal=causal, + ) + else: + attn_output = flash_attn_func( + query_states, + key_states, + value_states, + dropout, + softmax_scale=softmax_scale, + causal=causal, + window_size=(self.config.sliding_window, self.config.sliding_window), + ) + + return attn_output + + # Copied from transformers.models.mistral.modeling_mistral.MistralFlashAttention2._upad_input + def _upad_input(self, query_layer, key_layer, value_layer, attention_mask, query_length): + batch_size, kv_seq_len, num_heads, head_dim = key_layer.shape + + # On the first iteration we need to properly re-create the padding mask + # by slicing it on the proper place + if kv_seq_len != attention_mask.shape[-1]: + attention_mask_num_tokens = attention_mask.shape[-1] + attention_mask = attention_mask[:, attention_mask_num_tokens - kv_seq_len :] + + indices_k, cu_seqlens_k, max_seqlen_in_batch_k = _get_unpad_data(attention_mask) + + key_layer = index_first_axis(key_layer.reshape(batch_size * kv_seq_len, num_heads, head_dim), indices_k) + value_layer = index_first_axis(value_layer.reshape(batch_size * kv_seq_len, num_heads, head_dim), indices_k) + + if query_length == kv_seq_len: + query_layer = index_first_axis( + query_layer.reshape(batch_size * kv_seq_len, num_heads, head_dim), indices_k + ) + cu_seqlens_q = cu_seqlens_k + max_seqlen_in_batch_q = max_seqlen_in_batch_k + indices_q = indices_k + elif query_length == 1: + max_seqlen_in_batch_q = 1 + cu_seqlens_q = torch.arange( + batch_size + 1, dtype=torch.int32, device=query_layer.device + ) # There is a memcpy here, that is very bad. + indices_q = cu_seqlens_q[:-1] + query_layer = query_layer.squeeze(1) + else: + # The -q_len: slice assumes left padding. + attention_mask = attention_mask[:, -query_length:] + query_layer, indices_q, cu_seqlens_q, max_seqlen_in_batch_q = unpad_input(query_layer, attention_mask) + + return ( + query_layer, + key_layer, + value_layer, + indices_q, + (cu_seqlens_q, cu_seqlens_k), + (max_seqlen_in_batch_q, max_seqlen_in_batch_k), + ) + + +# copied from transformers.models.llama.modeling_llama.LlamaSdpaAttention with Llama->Phi3 +# TODO @Arthur no longer copied from LLama after static cache +class Phi3SdpaAttention(Phi3Attention): + """ + Phi3 attention module using torch.nn.functional.scaled_dot_product_attention. This module inherits from + `Phi3Attention` as the weights of the module stays untouched. The only changes are on the forward pass to adapt to + SDPA API. + """ + + # Adapted from Phi3Attention.forward + def forward( + self, + hidden_states: torch.Tensor, + attention_mask: Optional[torch.Tensor] = None, + position_ids: Optional[torch.LongTensor] = None, + past_key_value: Optional[Cache] = None, + output_attentions: bool = False, + use_cache: bool = False, + ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: + if output_attentions: + # TODO: Improve this warning with e.g. `model.config.attn_implementation = "manual"` once this is implemented. + logger.warning_once( + 'Phi3Model is using Phi3SdpaAttention, but `torch.nn.functional.scaled_dot_product_attention` does not support `output_attentions=True`. Falling back to the manual attention implementation, ' + 'but specifying the manual implementation will be required from Transformers version v5.0.0 onwards. This warning can be removed using the argument `attn_implementation="eager"` when loading the model.' + ) + return super().forward( + hidden_states=hidden_states, + attention_mask=attention_mask, + position_ids=position_ids, + past_key_value=past_key_value, + output_attentions=output_attentions, + use_cache=use_cache, + ) + + bsz, q_len, _ = hidden_states.size() + + qkv = self.qkv_proj(hidden_states) + query_pos = self.num_heads * self.head_dim + query_states = qkv[..., :query_pos] + key_states = qkv[..., query_pos : query_pos + self.num_key_value_heads * self.head_dim] + value_states = qkv[..., query_pos + self.num_key_value_heads * self.head_dim :] + + query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2) + key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2) + value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2) + + kv_seq_len = key_states.shape[-2] + if past_key_value is not None: + kv_seq_len += past_key_value.get_usable_length(kv_seq_len, self.layer_idx) + cos, sin = self.rotary_emb(value_states, position_ids, seq_len=kv_seq_len) + + query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin, position_ids) + + if past_key_value is not None: + cache_kwargs = {'sin': sin, 'cos': cos} # Specific to RoPE models + key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs) + + key_states = repeat_kv(key_states, self.num_key_value_groups) + value_states = repeat_kv(value_states, self.num_key_value_groups) + + if attention_mask is not None: + if attention_mask.size() != (bsz, 1, q_len, kv_seq_len): + raise ValueError( + f'Attention mask should be of size {(bsz, 1, q_len, kv_seq_len)}, but is {attention_mask.size()}' + ) + + # SDPA with memory-efficient backend is currently (torch==2.1.2) bugged with non-contiguous inputs with custom attn_mask, + # Reference: https://github.com/pytorch/pytorch/issues/112577. + if query_states.device.type == 'cuda' and attention_mask is not None: + query_states = query_states.contiguous() + key_states = key_states.contiguous() + value_states = value_states.contiguous() + + attn_output = torch.nn.functional.scaled_dot_product_attention( + query_states, + key_states, + value_states, + attn_mask=attention_mask, + dropout_p=self.attention_dropout if self.training else 0.0, + # The q_len > 1 is necessary to match with AttentionMaskConverter.to_causal_4d that does not create a causal mask in case q_len == 1. + is_causal=self.is_causal and attention_mask is None and q_len > 1, + ) + + attn_output = attn_output.transpose(1, 2).contiguous() + attn_output = attn_output.view(bsz, q_len, self.hidden_size) + + attn_output = self.o_proj(attn_output) + + return attn_output, None, past_key_value + + +PHI3_ATTENTION_CLASSES = { + 'eager': Phi3Attention, + 'flash_attention_2': Phi3FlashAttention2, + 'sdpa': Phi3SdpaAttention, +} + + +class Phi3DecoderLayer(nn.Module): + def __init__(self, config: Phi3Config, layer_idx: int): + super().__init__() + + self.config = config + self.self_attn = PHI3_ATTENTION_CLASSES[config._attn_implementation](config, layer_idx=layer_idx) + + self.mlp = Phi3MLP(config) + self.input_layernorm = Phi3RMSNorm(config.hidden_size, eps=config.rms_norm_eps) + + self.resid_attn_dropout = nn.Dropout(config.resid_pdrop) + self.resid_mlp_dropout = nn.Dropout(config.resid_pdrop) + self.post_attention_layernorm = Phi3RMSNorm(config.hidden_size, eps=config.rms_norm_eps) + + def forward( + self, + hidden_states: torch.Tensor, + attention_mask: Optional[torch.Tensor] = None, + position_ids: Optional[torch.LongTensor] = None, + past_key_value: Optional[Tuple[torch.Tensor]] = None, + output_attentions: Optional[bool] = False, + use_cache: Optional[bool] = False, + **kwargs, + ) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]: + if 'padding_mask' in kwargs: + warnings.warn( + 'Passing `padding_mask` is deprecated and will be removed in v4.37. Please make sure use `attention_mask` instead.`' + ) + """ + Args: + hidden_states (`torch.FloatTensor`): + input to the layer of shape `(batch, seq_len, embed_dim)` + attention_mask (`torch.FloatTensor`, *optional*): attention mask of size + `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. + position_ids (`torch.LongTensor` of shape `({0})`, *optional*): + Indices of positions of each input sequence tokens in the position embeddings. Selected in the range + `[0, config.n_positions - 1]`. [What are position IDs?](../glossary#position-ids) + output_attentions (`bool`, *optional*): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under + returned tensors for more detail. + use_cache (`bool`, *optional*): + If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding + (see `past_key_values`). + past_key_value (`Tuple(torch.FloatTensor)`, *optional*): cached past key and value projection states + """ + + residual = hidden_states + + hidden_states = self.input_layernorm(hidden_states) + + # Self Attention + attn_outputs, self_attn_weights, present_key_value = self.self_attn( + hidden_states=hidden_states, + attention_mask=attention_mask, + position_ids=position_ids, + past_key_value=past_key_value, + output_attentions=output_attentions, + use_cache=use_cache, + ) + + hidden_states = residual + self.resid_attn_dropout(attn_outputs) + + residual = hidden_states + hidden_states = self.post_attention_layernorm(hidden_states) + hidden_states = self.mlp(hidden_states) + hidden_states = residual + self.resid_mlp_dropout(hidden_states) + + outputs = (hidden_states,) + + if output_attentions: + outputs += (self_attn_weights,) + + if use_cache: + outputs += (present_key_value,) + + return outputs + + +PHI3_START_DOCSTRING = r""" + This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the + library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads + etc.) + + This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. + Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage + and behavior. + + Parameters: + config ([`Phi3Config`]): + Model configuration class with all the parameters of the model. Initializing with a config file does not + load the weights associated with the model, only the configuration. Check out the + [`~PreTrainedModel.from_pretrained`] method to load the model weights. +""" + + +@add_start_docstrings( + 'The bare Phi-3 model outputting raw hidden-states without any specific head on top.', + PHI3_START_DOCSTRING, +) +class Phi3PreTrainedModel(PreTrainedModel): + config_class = Phi3Config + base_model_prefix = 'model' + supports_gradient_checkpointing = True + _no_split_modules = ['Phi3DecoderLayer'] + _skip_keys_device_placement = 'past_key_values' + _supports_flash_attn_2 = True + _supports_sdpa = False + _supports_cache_class = True + + _version = '0.0.5' + + def _init_weights(self, module): + std = self.config.initializer_range + if isinstance(module, nn.Linear): + module.weight.data.normal_(mean=0.0, std=std) + if module.bias is not None: + module.bias.data.zero_() + elif isinstance(module, nn.Embedding): + module.weight.data.normal_(mean=0.0, std=std) + if module.padding_idx is not None: + module.weight.data[module.padding_idx].zero_() + + +PHI3_INPUTS_DOCSTRING = r""" + Args: + input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): + Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide + it. + + Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and + [`PreTrainedTokenizer.__call__`] for details. + + [What are input IDs?](../glossary#input-ids) + attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): + Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: + + - 1 for tokens that are **not masked**, + - 0 for tokens that are **masked**. + + [What are attention masks?](../glossary#attention-mask) + + Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and + [`PreTrainedTokenizer.__call__`] for details. + + If `past_key_values` is used, optionally only the last `input_ids` have to be input (see + `past_key_values`). + + If you want to change padding behavior, you should read [`modeling_opt._prepare_decoder_attention_mask`] + and modify to your needs. See diagram 1 in [the paper](https://arxiv.org/abs/1910.13461) for more + information on the default strategy. + + - 1 indicates the head is **not masked**, + - 0 indicates the head is **masked**. + position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): + Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, + config.n_positions - 1]`. + + [What are position IDs?](../glossary#position-ids) + past_key_values (`Cache` or `tuple(tuple(torch.FloatTensor))`, *optional*): + Pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention + blocks) that can be used to speed up sequential decoding. This typically consists in the `past_key_values` + returned by the model at a previous stage of decoding, when `use_cache=True` or `config.use_cache=True`. + + Two formats are allowed: + - a [`~cache_utils.Cache`] instance; + - Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of + shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`). This is also known as the legacy + cache format. + + The model will output the same cache format that is fed as input. If no `past_key_values` are passed, the + legacy cache format will be returned. + + If `past_key_values` are used, the user can optionally input only the last `input_ids` (those that don't + have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `input_ids` + of shape `(batch_size, sequence_length)`. + inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): + Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This + is useful if you want more control over how to convert `input_ids` indices into associated vectors than the + model's internal embedding lookup matrix. + use_cache (`bool`, *optional*): + If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see + `past_key_values`). + output_attentions (`bool`, *optional*): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned + tensors for more detail. + output_hidden_states (`bool`, *optional*): + Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for + more detail. + return_dict (`bool`, *optional*): + Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. +""" + + +@add_start_docstrings( + 'The bare Phi-3 model outputting raw hidden-states without any specific head on top.', + PHI3_START_DOCSTRING, +) +class Phi3Model(Phi3PreTrainedModel): + """ + Transformer decoder consisting of *config.num_hidden_layers* layers. Each layer is a [`Phi3DecoderLayer`] + + Args: + config: Phi3Config + """ + + def __init__(self, config: Phi3Config): + super().__init__(config) + self.padding_idx = config.pad_token_id + self.vocab_size = config.vocab_size + + self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx) + self.embed_dropout = nn.Dropout(config.embd_pdrop) + self.layers = nn.ModuleList( + [Phi3DecoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)] + ) + self._attn_implementation = config._attn_implementation + self.norm = Phi3RMSNorm(config.hidden_size, eps=config.rms_norm_eps) + + self.gradient_checkpointing = False + # Initialize weights and apply final processing + self.post_init() + + def get_input_embeddings(self): + return self.embed_tokens + + def set_input_embeddings(self, value): + self.embed_tokens = value + + @add_start_docstrings_to_model_forward(PHI3_INPUTS_DOCSTRING) + def forward( + self, + input_ids: torch.LongTensor = None, + attention_mask: Optional[torch.Tensor] = None, + position_ids: Optional[torch.LongTensor] = None, + past_key_values: Optional[List[torch.FloatTensor]] = None, + inputs_embeds: Optional[torch.FloatTensor] = None, + use_cache: Optional[bool] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple, BaseModelOutputWithPast]: + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + use_cache = use_cache if use_cache is not None else self.config.use_cache + + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + # retrieve input_ids and inputs_embeds + if input_ids is not None and inputs_embeds is not None: + raise ValueError('You cannot specify both input_ids and inputs_embeds at the same time') + elif input_ids is not None: + batch_size, seq_length = input_ids.shape[:2] + elif inputs_embeds is not None: + batch_size, seq_length = inputs_embeds.shape[:2] + else: + raise ValueError('You have to specify either input_ids or inputs_embeds') + + past_key_values_length = 0 + + if self.gradient_checkpointing and self.training: + if use_cache: + logger.warning_once( + '`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`...' + ) + use_cache = False + + if use_cache: + use_legacy_cache = not isinstance(past_key_values, Cache) + if use_legacy_cache: + past_key_values = DynamicCache.from_legacy_cache(past_key_values) + past_key_values_length = past_key_values.get_usable_length(seq_length) + + if position_ids is None: + device = input_ids.device if input_ids is not None else inputs_embeds.device + position_ids = torch.arange( + past_key_values_length, seq_length + past_key_values_length, dtype=torch.long, device=device + ) + position_ids = position_ids.unsqueeze(0).view(-1, seq_length) + else: + position_ids = position_ids.view(-1, seq_length).long() + + if inputs_embeds is None: + inputs_embeds = self.embed_tokens(input_ids) + + if attention_mask is not None and self._attn_implementation == 'flash_attention_2' and use_cache: + is_padding_right = attention_mask[:, -1].sum().item() != batch_size + if is_padding_right: + raise ValueError( + "You are attempting to perform batched generation with padding_side='right'" + ' this may lead to unexpected behaviour for Flash Attention version of Phi3. Make sure to ' + " call `tokenizer.padding_side = 'left'` before tokenizing the input. " + ) + + if self._attn_implementation == 'flash_attention_2': + # 2d mask is passed through the layers + attention_mask = attention_mask if (attention_mask is not None and 0 in attention_mask) else None + else: + # 4d mask is passed through the layers + attention_mask = _prepare_4d_causal_attention_mask( + attention_mask, + (batch_size, seq_length), + inputs_embeds, + past_key_values_length, + sliding_window=self.config.sliding_window, + ) + + hidden_states = inputs_embeds + + # decoder layers + all_hidden_states = () if output_hidden_states else None + all_self_attns = () if output_attentions else None + next_decoder_cache = None + + for decoder_layer in self.layers: + if output_hidden_states: + all_hidden_states += (hidden_states,) + + if self.gradient_checkpointing and self.training: + layer_outputs = self._gradient_checkpointing_func( + decoder_layer.__call__, + hidden_states, + attention_mask, + position_ids, + past_key_values, + output_attentions, + use_cache, + ) + else: + layer_outputs = decoder_layer( + hidden_states, + attention_mask=attention_mask, + position_ids=position_ids, + past_key_value=past_key_values, + output_attentions=output_attentions, + use_cache=use_cache, + ) + + hidden_states = layer_outputs[0] + + if use_cache: + next_decoder_cache = layer_outputs[2 if output_attentions else 1] + + if output_attentions: + all_self_attns += (layer_outputs[1],) + + hidden_states = self.norm(hidden_states) + + # add hidden states from the last decoder layer + if output_hidden_states: + all_hidden_states += (hidden_states,) + + next_cache = None + if use_cache: + next_cache = next_decoder_cache.to_legacy_cache() if use_legacy_cache else next_decoder_cache + if not return_dict: + return tuple(v for v in [hidden_states, next_cache, all_hidden_states, all_self_attns] if v is not None) + return BaseModelOutputWithPast( + last_hidden_state=hidden_states, + past_key_values=next_cache, + hidden_states=all_hidden_states, + attentions=all_self_attns, + ) + + +class Phi3ForCausalLM(Phi3PreTrainedModel): + _tied_weights_keys = ['lm_head.weight'] + + # Copied from transformers.models.llama.modeling_llama.LlamaForCausalLM.__init__ with Llama->Phi3 + def __init__(self, config): + super().__init__(config) + self.model = Phi3Model(config) + self.vocab_size = config.vocab_size + self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False) + + # Initialize weights and apply final processing + self.post_init() + + # Copied from transformers.models.llama.modeling_llama.LlamaForCausalLM.get_input_embeddings + def get_input_embeddings(self): + return self.model.embed_tokens + + # Copied from transformers.models.llama.modeling_llama.LlamaForCausalLM.set_input_embeddings + def set_input_embeddings(self, value): + self.model.embed_tokens = value + + # Copied from transformers.models.llama.modeling_llama.LlamaForCausalLM.get_output_embeddings + def get_output_embeddings(self): + return self.lm_head + + # Copied from transformers.models.llama.modeling_llama.LlamaForCausalLM.set_output_embeddings + def set_output_embeddings(self, new_embeddings): + self.lm_head = new_embeddings + + # Copied from transformers.models.llama.modeling_llama.LlamaForCausalLM.set_decoder + def set_decoder(self, decoder): + self.model = decoder + + # Copied from transformers.models.llama.modeling_llama.LlamaForCausalLM.get_decoder + def get_decoder(self): + return self.model + + # Ignore copy + @add_start_docstrings_to_model_forward(PHI3_INPUTS_DOCSTRING) + @replace_return_docstrings(output_type=CausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC) + def forward( + self, + input_ids: torch.LongTensor = None, + attention_mask: Optional[torch.Tensor] = None, + position_ids: Optional[torch.LongTensor] = None, + past_key_values: Optional[List[torch.FloatTensor]] = None, + inputs_embeds: Optional[torch.FloatTensor] = None, + labels: Optional[torch.LongTensor] = None, + use_cache: Optional[bool] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple, CausalLMOutputWithPast]: + r""" + Args: + labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): + Labels for computing the masked language modeling loss. Indices should either be in `[0, ..., + config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored + (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`. + + Returns: + + Example: + + ```python + >>> from transformers import AutoTokenizer, Phi3ForCausalLM + + >>> model = Phi3ForCausalLM.from_pretrained("microsoft/phi-3-mini-4k-instruct") + >>> tokenizer = AutoTokenizer.from_pretrained("microsoft/phi-3-mini-4k-instruct") + + >>> prompt = "This is an example script ." + >>> inputs = tokenizer(prompt, return_tensors="pt") + + >>> # Generate + >>> generate_ids = model.generate(inputs.input_ids, max_length=30) + >>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0] + 'This is an example script .\n Certainly! Below is a sample script that demonstrates a simple task, such as calculating the sum' + ```""" + + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn) + outputs = self.model( + input_ids=input_ids, + attention_mask=attention_mask, + position_ids=position_ids, + past_key_values=past_key_values, + inputs_embeds=inputs_embeds, + use_cache=use_cache, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + + hidden_states = outputs[0] + logits = self.lm_head(hidden_states) + logits = logits.float() + + loss = None + if labels is not None: + # Shift so that tokens < n predict n + shift_logits = logits[..., :-1, :].contiguous() + shift_labels = labels[..., 1:].contiguous() + # Flatten the tokens + loss_fct = CrossEntropyLoss() + shift_logits = shift_logits.view(-1, self.config.vocab_size) + shift_labels = shift_labels.view(-1) + # Enable model parallelism + shift_labels = shift_labels.to(shift_logits.device) + loss = loss_fct(shift_logits, shift_labels) + + if not return_dict: + output = (logits,) + outputs[1:] + return (loss,) + output if loss is not None else output + + return CausalLMOutputWithPast( + loss=loss, + logits=logits, + past_key_values=outputs.past_key_values, + hidden_states=outputs.hidden_states, + attentions=outputs.attentions, + ) + + # Copied from transformers.models.persimmon.modeling_persimmon.PersimmonForCausalLM.prepare_inputs_for_generation + def prepare_inputs_for_generation( + self, input_ids, past_key_values=None, attention_mask=None, inputs_embeds=None, **kwargs + ): + if past_key_values is not None: + if isinstance(past_key_values, Cache): + cache_length = past_key_values.get_seq_length() + past_length = past_key_values.seen_tokens + max_cache_length = past_key_values.get_max_length() + else: + cache_length = past_length = past_key_values[0][0].shape[2] + max_cache_length = None + + # Keep only the unprocessed tokens: + # 1 - If the length of the attention_mask exceeds the length of input_ids, then we are in a setting where + # some of the inputs are exclusively passed as part of the cache (e.g. when passing input_embeds as + # input) + if attention_mask is not None and attention_mask.shape[1] > input_ids.shape[1]: + input_ids = input_ids[:, -(attention_mask.shape[1] - past_length) :] + # 2 - If the past_length is smaller than input_ids', then input_ids holds all input tokens. We can discard + # input_ids based on the past_length. + elif past_length < input_ids.shape[1]: + input_ids = input_ids[:, past_length:] + # 3 - Otherwise (past_length >= input_ids.shape[1]), let's assume input_ids only has unprocessed tokens. + + # If we are about to go beyond the maximum cache length, we need to crop the input attention mask. + if ( + max_cache_length is not None + and attention_mask is not None + and cache_length + input_ids.shape[1] > max_cache_length + ): + attention_mask = attention_mask[:, -max_cache_length:] + + position_ids = kwargs.get('position_ids', None) + if attention_mask is not None and position_ids is None: + # create position_ids on the fly for batch generation + position_ids = attention_mask.long().cumsum(-1) - 1 + position_ids.masked_fill_(attention_mask == 0, 1) + if past_key_values: + position_ids = position_ids[:, -input_ids.shape[1] :] + + # if `inputs_embeds` are passed, we only want to use them in the 1st generation step + if inputs_embeds is not None and past_key_values is None: + model_inputs = {'inputs_embeds': inputs_embeds} + else: + model_inputs = {'input_ids': input_ids} + + model_inputs.update( + { + 'position_ids': position_ids, + 'past_key_values': past_key_values, + 'use_cache': kwargs.get('use_cache'), + 'attention_mask': attention_mask, + } + ) + return model_inputs + + @staticmethod + # Copied from transformers.models.llama.modeling_llama.LlamaForCausalLM._reorder_cache + def _reorder_cache(past_key_values, beam_idx): + reordered_past = () + for layer_past in past_key_values: + reordered_past += ( + tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past), + ) + return reordered_past + + +@add_start_docstrings( + """ + The [`Phi3Model`] with a sequence classification head on top (linear layer). + + [`Phi3ForSequenceClassification`] uses the last token in order to do the classification, as other causal models + (e.g. GPT-2) do. + + Since it does classification on the last token, it requires to know the position of the last token. If a + `pad_token_id` is defined in the configuration, it finds the last token that is not a padding token in each row. If + no `pad_token_id` is defined, it simply takes the last value in each row of the batch. Since it cannot guess the + padding tokens when `inputs_embeds` are passed instead of `input_ids`, it does the same (take the last value in + each row of the batch). + """, + PHI3_START_DOCSTRING, +) +# Copied from transformers.models.llama.modeling_llama.LlamaForSequenceClassification with Llama->Phi3, LLAMA->PHI3, self.transformer->self.model, transformer_outputs->model_outputs +class Phi3ForSequenceClassification(Phi3PreTrainedModel): + def __init__(self, config): + super().__init__(config) + self.num_labels = config.num_labels + self.model = Phi3Model(config) + self.score = nn.Linear(config.hidden_size, self.num_labels, bias=False) + + # Initialize weights and apply final processing + self.post_init() + + def get_input_embeddings(self): + return self.model.embed_tokens + + def set_input_embeddings(self, value): + self.model.embed_tokens = value + + @add_start_docstrings_to_model_forward(PHI3_INPUTS_DOCSTRING) + def forward( + self, + input_ids: torch.LongTensor = None, + attention_mask: Optional[torch.Tensor] = None, + position_ids: Optional[torch.LongTensor] = None, + past_key_values: Optional[List[torch.FloatTensor]] = None, + inputs_embeds: Optional[torch.FloatTensor] = None, + labels: Optional[torch.LongTensor] = None, + use_cache: Optional[bool] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple, SequenceClassifierOutputWithPast]: + r""" + labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): + Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., + config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If + `config.num_labels > 1` a classification loss is computed (Cross-Entropy). + """ + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + model_outputs = self.model( + input_ids, + attention_mask=attention_mask, + position_ids=position_ids, + past_key_values=past_key_values, + inputs_embeds=inputs_embeds, + use_cache=use_cache, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + hidden_states = model_outputs[0] + logits = self.score(hidden_states) + + if input_ids is not None: + batch_size = input_ids.shape[0] + else: + batch_size = inputs_embeds.shape[0] + + if self.config.pad_token_id is None and batch_size != 1: + raise ValueError('Cannot handle batch sizes > 1 if no padding token is defined.') + if self.config.pad_token_id is None: + sequence_lengths = -1 + else: + if input_ids is not None: + # if no pad token found, use modulo instead of reverse indexing for ONNX compatibility + sequence_lengths = torch.eq(input_ids, self.config.pad_token_id).int().argmax(-1) - 1 + sequence_lengths = sequence_lengths % input_ids.shape[-1] + sequence_lengths = sequence_lengths.to(logits.device) + else: + sequence_lengths = -1 + + pooled_logits = logits[torch.arange(batch_size, device=logits.device), sequence_lengths] + + loss = None + if labels is not None: + labels = labels.to(logits.device) + if self.config.problem_type is None: + if self.num_labels == 1: + self.config.problem_type = 'regression' + elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): + self.config.problem_type = 'single_label_classification' + else: + self.config.problem_type = 'multi_label_classification' + + if self.config.problem_type == 'regression': + loss_fct = MSELoss() + if self.num_labels == 1: + loss = loss_fct(pooled_logits.squeeze(), labels.squeeze()) + else: + loss = loss_fct(pooled_logits, labels) + elif self.config.problem_type == 'single_label_classification': + loss_fct = CrossEntropyLoss() + loss = loss_fct(pooled_logits.view(-1, self.num_labels), labels.view(-1)) + elif self.config.problem_type == 'multi_label_classification': + loss_fct = BCEWithLogitsLoss() + loss = loss_fct(pooled_logits, labels) + if not return_dict: + output = (pooled_logits,) + model_outputs[1:] + return ((loss,) + output) if loss is not None else output + + return SequenceClassifierOutputWithPast( + loss=loss, + logits=pooled_logits, + past_key_values=model_outputs.past_key_values, + hidden_states=model_outputs.hidden_states, + attentions=model_outputs.attentions, + ) + + +@add_start_docstrings( + """ + [`Phi3Model`] with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for + Named-Entity-Recognition (NER) tasks. + """, + PHI3_START_DOCSTRING, +) +# Copied from transformers.models.mpt.modeling_mpt.MptForTokenClassification with Mpt->Phi3,MPT->PHI3,self.transformer->self.model,transformer_outputs->model_outputs +class Phi3ForTokenClassification(Phi3PreTrainedModel): + def __init__(self, config: Phi3Config): + super().__init__(config) + self.num_labels = config.num_labels + + self.model = Phi3Model(config) + if hasattr(config, 'classifier_dropout') and config.classifier_dropout is not None: + classifier_dropout = config.classifier_dropout + elif hasattr(config, 'hidden_dropout') and config.hidden_dropout is not None: + classifier_dropout = config.hidden_dropout + else: + classifier_dropout = 0.1 + self.dropout = nn.Dropout(classifier_dropout) + self.classifier = nn.Linear(config.hidden_size, config.num_labels) + + # Initialize weights and apply final processing + self.post_init() + + @add_start_docstrings_to_model_forward(PHI3_INPUTS_DOCSTRING) + @add_code_sample_docstrings( + checkpoint=_CHECKPOINT_FOR_DOC, + output_type=TokenClassifierOutput, + config_class=_CONFIG_FOR_DOC, + ) + def forward( + self, + input_ids: Optional[torch.LongTensor] = None, + past_key_values: Optional[Tuple[Tuple[torch.Tensor, torch.Tensor], ...]] = None, + attention_mask: Optional[torch.Tensor] = None, + inputs_embeds: Optional[torch.Tensor] = None, + labels: Optional[torch.Tensor] = None, + use_cache: Optional[bool] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + **deprecated_arguments, + ) -> Union[Tuple[torch.Tensor], TokenClassifierOutput]: + r""" + labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): + Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., + config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If + `config.num_labels > 1` a classification loss is computed (Cross-Entropy). + """ + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + model_outputs = self.model( + input_ids, + past_key_values=past_key_values, + attention_mask=attention_mask, + inputs_embeds=inputs_embeds, + use_cache=use_cache, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + + hidden_states = model_outputs[0] + hidden_states = self.dropout(hidden_states) + logits = self.classifier(hidden_states) + + loss = None + if labels is not None: + # move labels to correct device to enable model parallelism + labels = labels.to(logits.device) + batch_size, seq_length = labels.shape + loss_fct = CrossEntropyLoss() + loss = loss_fct( + logits.view(batch_size * seq_length, self.num_labels), labels.view(batch_size * seq_length) + ) + + if not return_dict: + output = (logits,) + model_outputs[2:] + return ((loss,) + output) if loss is not None else output + + return TokenClassifierOutput( + loss=loss, + logits=logits, + hidden_states=model_outputs.hidden_states, + attentions=model_outputs.attentions, + ) diff --git a/isolated/sim_greedy/upstream_sgl/internvl/model/qwen2/__init__.py b/isolated/sim_greedy/upstream_sgl/internvl/model/qwen2/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..aa30e79a5d329eb46b715d65b415c0740fcaee87 --- /dev/null +++ b/isolated/sim_greedy/upstream_sgl/internvl/model/qwen2/__init__.py @@ -0,0 +1,82 @@ +# Copyright 2024 The Qwen Team and The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import TYPE_CHECKING + +from transformers.utils import ( + OptionalDependencyNotAvailable, + _LazyModule, + is_tokenizers_available, + is_torch_available, +) + + +_import_structure = { + "configuration_qwen2": ["Qwen2Config"], + "tokenization_qwen2": ["Qwen2Tokenizer"], +} + +try: + if not is_tokenizers_available(): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + pass +else: + _import_structure["tokenization_qwen2_fast"] = ["Qwen2TokenizerFast"] + +try: + if not is_torch_available(): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + pass +else: + _import_structure["modeling_qwen2"] = [ + "Qwen2ForCausalLM", + "Qwen2Model", + "Qwen2PreTrainedModel", + "Qwen2ForSequenceClassification", + "Qwen2ForTokenClassification", + ] + + +if TYPE_CHECKING: + from .configuration_qwen2 import Qwen2Config + from .tokenization_qwen2 import Qwen2Tokenizer + + try: + if not is_tokenizers_available(): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + pass + else: + from .tokenization_qwen2_fast import Qwen2TokenizerFast + + try: + if not is_torch_available(): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + pass + else: + from .modeling_qwen2 import ( + Qwen2ForCausalLM, + Qwen2ForSequenceClassification, + Qwen2ForTokenClassification, + Qwen2Model, + Qwen2PreTrainedModel, + ) + + +else: + import sys + + sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) diff --git a/isolated/sim_greedy/upstream_sgl/internvl/model/qwen2/configuration_qwen2.py b/isolated/sim_greedy/upstream_sgl/internvl/model/qwen2/configuration_qwen2.py new file mode 100644 index 0000000000000000000000000000000000000000..9403364f980faade8504f6ffedd1082bdb8c6097 --- /dev/null +++ b/isolated/sim_greedy/upstream_sgl/internvl/model/qwen2/configuration_qwen2.py @@ -0,0 +1,140 @@ +# coding=utf-8 +# Copyright 2024 The Qwen team, Alibaba Group and the HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Qwen2 model configuration""" + +from transformers.configuration_utils import PretrainedConfig +from transformers.utils import logging + + +logger = logging.get_logger(__name__) + + +class Qwen2Config(PretrainedConfig): + r""" + This is the configuration class to store the configuration of a [`Qwen2Model`]. It is used to instantiate a + Qwen2 model according to the specified arguments, defining the model architecture. Instantiating a configuration + with the defaults will yield a similar configuration to that of + Qwen2-7B-beta [Qwen/Qwen2-7B-beta](https://huggingface.co/Qwen/Qwen2-7B-beta). + + Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the + documentation from [`PretrainedConfig`] for more information. + + + Args: + vocab_size (`int`, *optional*, defaults to 151936): + Vocabulary size of the Qwen2 model. Defines the number of different tokens that can be represented by the + `inputs_ids` passed when calling [`Qwen2Model`] + hidden_size (`int`, *optional*, defaults to 4096): + Dimension of the hidden representations. + intermediate_size (`int`, *optional*, defaults to 22016): + Dimension of the MLP representations. + num_hidden_layers (`int`, *optional*, defaults to 32): + Number of hidden layers in the Transformer encoder. + num_attention_heads (`int`, *optional*, defaults to 32): + Number of attention heads for each attention layer in the Transformer encoder. + num_key_value_heads (`int`, *optional*, defaults to 32): + This is the number of key_value heads that should be used to implement Grouped Query Attention. If + `num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if + `num_key_value_heads=1` the model will use Multi Query Attention (MQA) otherwise GQA is used. When + converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed + by meanpooling all the original heads within that group. For more details checkout [this + paper](https://arxiv.org/pdf/2305.13245.pdf). If it is not specified, will default to `32`. + hidden_act (`str` or `function`, *optional*, defaults to `"silu"`): + The non-linear activation function (function or string) in the decoder. + max_position_embeddings (`int`, *optional*, defaults to 32768): + The maximum sequence length that this model might ever be used with. + initializer_range (`float`, *optional*, defaults to 0.02): + The standard deviation of the truncated_normal_initializer for initializing all weight matrices. + rms_norm_eps (`float`, *optional*, defaults to 1e-06): + The epsilon used by the rms normalization layers. + use_cache (`bool`, *optional*, defaults to `True`): + Whether or not the model should return the last key/values attentions (not used by all models). Only + relevant if `config.is_decoder=True`. + tie_word_embeddings (`bool`, *optional*, defaults to `False`): + Whether the model's input and output word embeddings should be tied. + rope_theta (`float`, *optional*, defaults to 10000.0): + The base period of the RoPE embeddings. + use_sliding_window (`bool`, *optional*, defaults to `False`): + Whether to use sliding window attention. + sliding_window (`int`, *optional*, defaults to 4096): + Sliding window attention (SWA) window size. If not specified, will default to `4096`. + max_window_layers (`int`, *optional*, defaults to 28): + The number of layers that use SWA (Sliding Window Attention). The bottom layers use SWA while the top use full attention. + attention_dropout (`float`, *optional*, defaults to 0.0): + The dropout ratio for the attention probabilities. + + ```python + >>> from transformers import Qwen2Model, Qwen2Config + + >>> # Initializing a Qwen2 style configuration + >>> configuration = Qwen2Config() + + >>> # Initializing a model from the Qwen2-7B style configuration + >>> model = Qwen2Model(configuration) + + >>> # Accessing the model configuration + >>> configuration = model.config + ```""" + + model_type = "qwen2" + keys_to_ignore_at_inference = ["past_key_values"] + + def __init__( + self, + vocab_size=151936, + hidden_size=4096, + intermediate_size=22016, + num_hidden_layers=32, + num_attention_heads=32, + num_key_value_heads=32, + hidden_act="silu", + max_position_embeddings=32768, + initializer_range=0.02, + rms_norm_eps=1e-6, + use_cache=True, + tie_word_embeddings=False, + rope_theta=10000.0, + use_sliding_window=False, + sliding_window=4096, + max_window_layers=28, + attention_dropout=0.0, + **kwargs, + ): + self.vocab_size = vocab_size + self.max_position_embeddings = max_position_embeddings + self.hidden_size = hidden_size + self.intermediate_size = intermediate_size + self.num_hidden_layers = num_hidden_layers + self.num_attention_heads = num_attention_heads + self.use_sliding_window = use_sliding_window + self.sliding_window = sliding_window if use_sliding_window else None + self.max_window_layers = max_window_layers + + # for backward compatibility + if num_key_value_heads is None: + num_key_value_heads = num_attention_heads + + self.num_key_value_heads = num_key_value_heads + self.hidden_act = hidden_act + self.initializer_range = initializer_range + self.rms_norm_eps = rms_norm_eps + self.use_cache = use_cache + self.rope_theta = rope_theta + self.attention_dropout = attention_dropout + + super().__init__( + tie_word_embeddings=tie_word_embeddings, + **kwargs, + ) diff --git a/isolated/sim_greedy/upstream_sgl/internvl/model/qwen2/modeling_qwen2.py b/isolated/sim_greedy/upstream_sgl/internvl/model/qwen2/modeling_qwen2.py new file mode 100644 index 0000000000000000000000000000000000000000..7efe0d98d472fcd77220c5ce94de1f1e1e5f720f --- /dev/null +++ b/isolated/sim_greedy/upstream_sgl/internvl/model/qwen2/modeling_qwen2.py @@ -0,0 +1,1551 @@ +# coding=utf-8 +# Copyright 2024 The Qwen team, Alibaba Group and the HuggingFace Inc. team. All rights reserved. +# +# This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX +# and OPT implementations in this library. It has been modified from its +# original forms to accommodate minor architectural differences compared +# to GPT-NeoX and OPT used by the Meta AI team that trained the model. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""PyTorch Qwen2 model.""" + +import math +from typing import List, Optional, Tuple, Union + +import torch +import torch.utils.checkpoint +from torch import nn +from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss + +from transformers.activations import ACT2FN +from transformers.cache_utils import Cache, DynamicCache, StaticCache +from transformers.modeling_attn_mask_utils import AttentionMaskConverter +from transformers.modeling_outputs import ( + BaseModelOutputWithPast, + CausalLMOutputWithPast, + SequenceClassifierOutputWithPast, + TokenClassifierOutput, +) +from transformers.modeling_utils import PreTrainedModel +from transformers.utils import ( + add_start_docstrings, + add_start_docstrings_to_model_forward, + is_flash_attn_2_available, + is_flash_attn_greater_or_equal_2_10, + logging, + replace_return_docstrings, +) +from .configuration_qwen2 import Qwen2Config +from ..token_pruning import select_visual_token_indices + + +if is_flash_attn_2_available(): + from transformers.modeling_flash_attention_utils import _flash_attention_forward + + +logger = logging.get_logger(__name__) + + +_CHECKPOINT_FOR_DOC = "Qwen/Qwen2-7B-beta" +_CONFIG_FOR_DOC = "Qwen2Config" + + +# Copied from transformers.models.llama.modeling_llama._prepare_4d_causal_attention_mask_with_cache_position +def _prepare_4d_causal_attention_mask_with_cache_position( + attention_mask: torch.Tensor, + sequence_length: int, + target_length: int, + dtype: torch.dtype, + device: torch.device, + min_dtype: float, + cache_position: torch.Tensor, + batch_size: int, +): + """ + Creates a causal 4D mask of shape `(batch_size, 1, query_length, key_value_length)` from a 2D mask of shape + `(batch_size, key_value_length)`, or if the input `attention_mask` is already 4D, do nothing. + + Args: + attention_mask (`torch.Tensor`): + A 2D attention mask of shape `(batch_size, key_value_length)` or a 4D attention mask of shape `(batch_size, 1, query_length, key_value_length)`. + sequence_length (`int`): + The sequence length being processed. + target_length (`int`): + The target length: when generating with static cache, the mask should be as long as the static cache, to account for the 0 padding, the part of the cache that is not filled yet. + dtype (`torch.dtype`): + The dtype to use for the 4D attention mask. + device (`torch.device`): + The device to plcae the 4D attention mask on. + min_dtype (`float`): + The minimum value representable with the dtype `dtype`. + cache_position (`torch.Tensor`): + Indices depicting the position of the input sequence tokens in the sequence. + batch_size (`torch.Tensor`): + Batch size. + """ + if attention_mask is not None and attention_mask.dim() == 4: + # In this case we assume that the mask comes already in inverted form and requires no inversion or slicing. + causal_mask = attention_mask + else: + causal_mask = torch.full((sequence_length, target_length), fill_value=min_dtype, dtype=dtype, device=device) + if sequence_length != 1: + causal_mask = torch.triu(causal_mask, diagonal=1) + causal_mask *= torch.arange(target_length, device=device) > cache_position.reshape(-1, 1) + causal_mask = causal_mask[None, None, :, :].expand(batch_size, 1, -1, -1) + if attention_mask is not None: + causal_mask = causal_mask.clone() # copy to contiguous memory for in-place edit + mask_length = attention_mask.shape[-1] + padding_mask = causal_mask[:, :, :, :mask_length] + attention_mask[:, None, None, :] + padding_mask = padding_mask == 0 + causal_mask[:, :, :, :mask_length] = causal_mask[:, :, :, :mask_length].masked_fill( + padding_mask, min_dtype + ) + + return causal_mask + + +# Copied from transformers.models.llama.modeling_llama.LlamaRMSNorm with Llama->Qwen2 +class Qwen2RMSNorm(nn.Module): + def __init__(self, hidden_size, eps=1e-6): + """ + Qwen2RMSNorm is equivalent to T5LayerNorm + """ + super().__init__() + self.weight = nn.Parameter(torch.ones(hidden_size)) + self.variance_epsilon = eps + + def forward(self, hidden_states): + input_dtype = hidden_states.dtype + hidden_states = hidden_states.to(torch.float32) + variance = hidden_states.pow(2).mean(-1, keepdim=True) + hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon) + return self.weight * hidden_states.to(input_dtype) + + def extra_repr(self): + return f"{tuple(self.weight.shape)}, eps={self.variance_epsilon}" + + +# Copied from transformers.models.mixtral.modeling_mixtral.MixtralRotaryEmbedding with Mixtral->Qwen2 +class Qwen2RotaryEmbedding(nn.Module): + def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None): + super().__init__() + + self.dim = dim + self.max_position_embeddings = max_position_embeddings + self.base = base + inv_freq = 1.0 / (self.base ** (torch.arange(0, self.dim, 2, dtype=torch.int64).float().to(device) / self.dim)) + self.register_buffer("inv_freq", inv_freq, persistent=False) + + # Build here to make `torch.jit.trace` work. + self._set_cos_sin_cache( + seq_len=max_position_embeddings, device=self.inv_freq.device, dtype=torch.get_default_dtype() + ) + + def _set_cos_sin_cache(self, seq_len, device, dtype): + self.max_seq_len_cached = seq_len + t = torch.arange(self.max_seq_len_cached, device=device, dtype=torch.int64).type_as(self.inv_freq) + + freqs = torch.outer(t, self.inv_freq) + # Different from paper, but it uses a different permutation in order to obtain the same calculation + emb = torch.cat((freqs, freqs), dim=-1) + self.register_buffer("cos_cached", emb.cos().to(dtype), persistent=False) + self.register_buffer("sin_cached", emb.sin().to(dtype), persistent=False) + + def forward(self, x, seq_len=None): + # x: [bs, num_attention_heads, seq_len, head_size] + if seq_len > self.max_seq_len_cached: + self._set_cos_sin_cache(seq_len=seq_len, device=x.device, dtype=x.dtype) + + return ( + self.cos_cached[:seq_len].to(dtype=x.dtype), + self.sin_cached[:seq_len].to(dtype=x.dtype), + ) + + +# Copied from transformers.models.llama.modeling_llama.rotate_half +def rotate_half(x): + """Rotates half the hidden dims of the input.""" + x1 = x[..., : x.shape[-1] // 2] + x2 = x[..., x.shape[-1] // 2 :] + return torch.cat((-x2, x1), dim=-1) + + +# Copied from transformers.models.mixtral.modeling_mixtral.apply_rotary_pos_emb +def apply_rotary_pos_emb(q, k, cos, sin, position_ids, unsqueeze_dim=1): + """Applies Rotary Position Embedding to the query and key tensors. + + Args: + q (`torch.Tensor`): The query tensor. + k (`torch.Tensor`): The key tensor. + cos (`torch.Tensor`): The cosine part of the rotary embedding. + sin (`torch.Tensor`): The sine part of the rotary embedding. + position_ids (`torch.Tensor`): + The position indices of the tokens corresponding to the query and key tensors. For example, this can be + used to pass offsetted position ids when working with a KV-cache. + unsqueeze_dim (`int`, *optional*, defaults to 1): + The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and + sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note + that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and + k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes + cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have + the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2. + Returns: + `tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding. + """ + cos = cos[position_ids].unsqueeze(unsqueeze_dim) + sin = sin[position_ids].unsqueeze(unsqueeze_dim) + q_embed = (q * cos) + (rotate_half(q) * sin) + k_embed = (k * cos) + (rotate_half(k) * sin) + return q_embed, k_embed + + +# Copied from transformers.models.mistral.modeling_mistral.MistralMLP with Mistral->Qwen2 +class Qwen2MLP(nn.Module): + def __init__(self, config): + super().__init__() + self.hidden_size = config.hidden_size + self.intermediate_size = config.intermediate_size + self.gate_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False) + self.up_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False) + self.down_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=False) + self.act_fn = ACT2FN[config.hidden_act] + + def forward(self, hidden_state): + return self.down_proj(self.act_fn(self.gate_proj(hidden_state)) * self.up_proj(hidden_state)) + + +# Copied from transformers.models.llama.modeling_llama.repeat_kv +def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor: + """ + This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch, + num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim) + """ + batch, num_key_value_heads, slen, head_dim = hidden_states.shape + if n_rep == 1: + return hidden_states + hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim) + return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim) + + +class Qwen2Attention(nn.Module): + """ + Multi-headed attention from 'Attention Is All You Need' paper. Modified to use sliding window attention: Longformer + and "Generating Long Sequences with Sparse Transformers". + """ + + def __init__(self, config: Qwen2Config, layer_idx: Optional[int] = None): + super().__init__() + self.config = config + self.layer_idx = layer_idx + if layer_idx is None: + logger.warning_once( + f"Instantiating {self.__class__.__name__} without passing `layer_idx` is not recommended and will " + "to errors during the forward call, if caching is used. Please make sure to provide a `layer_idx` " + "when creating this class." + ) + + self.hidden_size = config.hidden_size + self.num_heads = config.num_attention_heads + self.head_dim = self.hidden_size // self.num_heads + self.num_key_value_heads = config.num_key_value_heads + self.num_key_value_groups = self.num_heads // self.num_key_value_heads + self.max_position_embeddings = config.max_position_embeddings + self.rope_theta = config.rope_theta + self.is_causal = True + self.attention_dropout = config.attention_dropout + + if (self.head_dim * self.num_heads) != self.hidden_size: + raise ValueError( + f"hidden_size must be divisible by num_heads (got `hidden_size`: {self.hidden_size}" + f" and `num_heads`: {self.num_heads})." + ) + self.q_proj = nn.Linear(self.hidden_size, self.num_heads * self.head_dim, bias=True) + self.k_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=True) + self.v_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=True) + self.o_proj = nn.Linear(self.num_heads * self.head_dim, self.hidden_size, bias=False) + + self.rotary_emb = Qwen2RotaryEmbedding( + self.head_dim, + max_position_embeddings=self.max_position_embeddings, + base=self.rope_theta, + ) + + def forward( + self, + hidden_states: torch.Tensor, + attention_mask: Optional[torch.Tensor] = None, + position_ids: Optional[torch.LongTensor] = None, + past_key_value: Optional[Cache] = None, + output_attentions: bool = False, + use_cache: bool = False, + cache_position: Optional[torch.LongTensor] = None, + **kwargs, + ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: + bsz, q_len, _ = hidden_states.size() + + query_states = self.q_proj(hidden_states) + key_states = self.k_proj(hidden_states) + value_states = self.v_proj(hidden_states) + + query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2) + key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2) + value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2) + + kv_seq_len = key_states.shape[-2] + if past_key_value is not None: + if self.layer_idx is None: + raise ValueError( + f"The cache structure has changed since version v4.36. If you are using {self.__class__.__name__} " + "for auto-regressive decoding with k/v caching, please make sure to initialize the attention class " + "with a layer index." + ) + kv_seq_len += past_key_value.get_usable_length(kv_seq_len, self.layer_idx) + + prunded_sequence_length = kwargs["prunded_sequence_length"] + cos, sin = self.rotary_emb(value_states, seq_len=kv_seq_len + prunded_sequence_length) + query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin, position_ids) + + if past_key_value is not None: + cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position} # Specific to RoPE models + key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs) + + # repeat k/v heads if n_kv_heads < n_heads + key_states = repeat_kv(key_states, self.num_key_value_groups) + value_states = repeat_kv(value_states, self.num_key_value_groups) + + attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) / math.sqrt(self.head_dim) + + if attn_weights.size() != (bsz, self.num_heads, q_len, kv_seq_len): + raise ValueError( + f"Attention weights should be of size {(bsz, self.num_heads, q_len, kv_seq_len)}, but is" + f" {attn_weights.size()}" + ) + + if attention_mask is not None: # no matter the length, we just slice it + causal_mask = attention_mask[:, :, :, : key_states.shape[-2]] + attn_weights = attn_weights + causal_mask + + # upcast attention to fp32 + attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query_states.dtype) + attn_weights = nn.functional.dropout(attn_weights, p=self.attention_dropout, training=self.training) + attn_output = torch.matmul(attn_weights, value_states) + + if attn_output.size() != (bsz, self.num_heads, q_len, self.head_dim): + raise ValueError( + f"`attn_output` should be of size {(bsz, self.num_heads, q_len, self.head_dim)}, but is" + f" {attn_output.size()}" + ) + + attn_output = attn_output.transpose(1, 2).contiguous() + attn_output = attn_output.reshape(bsz, q_len, self.hidden_size) + + attn_output = self.o_proj(attn_output) + + if not output_attentions: + attn_weights = None + + return attn_output, attn_weights, past_key_value + + +class Qwen2FlashAttention2(Qwen2Attention): + """ + Qwen2 flash attention module, following Qwen2 attention module. This module inherits from `Qwen2Attention` + as the weights of the module stays untouched. The only required change would be on the forward pass + where it needs to correctly call the public API of flash attention and deal with padding tokens + in case the input contains any of them. Additionally, for sliding window attention, we apply SWA only to the bottom + config.max_window_layers layers. + """ + + # Copied from transformers.models.llama.modeling_llama.LlamaFlashAttention2.__init__ + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + + # TODO: Should be removed once Flash Attention for RoCm is bumped to 2.1. + # flash_attn<2.1 generates top-left aligned causal mask, while what is needed here is bottom-right alignement, that was made default for flash_attn>=2.1. This attribute is used to handle this difference. Reference: https://github.com/Dao-AILab/flash-attention/releases/tag/v2.1.0. + # Beware that with flash_attn<2.1, using q_seqlen != k_seqlen (except for the case q_seqlen == 1) produces a wrong mask (top-left). + self._flash_attn_uses_top_left_mask = not is_flash_attn_greater_or_equal_2_10() + + def forward( + self, + hidden_states: torch.Tensor, + attention_mask: Optional[torch.Tensor] = None, + position_ids: Optional[torch.LongTensor] = None, + past_key_value: Optional[Cache] = None, + output_attentions: bool = False, + use_cache: bool = False, + cache_position: Optional[torch.LongTensor] = None, + **kwargs, + ): + if output_attentions: + + return super().forward( + hidden_states=hidden_states, + attention_mask=attention_mask, + position_ids=position_ids, + past_key_value=past_key_value, + output_attentions=output_attentions, + use_cache=use_cache, + **kwargs, + ) + + + bsz, q_len, _ = hidden_states.size() + + query_states = self.q_proj(hidden_states) + key_states = self.k_proj(hidden_states) + value_states = self.v_proj(hidden_states) + + query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2) + key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2) + value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2) + + kv_seq_len = key_states.shape[-2] + if past_key_value is not None: + if self.layer_idx is None: + raise ValueError( + f"The cache structure has changed since version v4.36. If you are using {self.__class__.__name__} " + "for auto-regressive decoding with k/v caching, please make sure to initialize the attention class " + "with a layer index." + ) + kv_seq_len += past_key_value.get_usable_length(kv_seq_len, self.layer_idx) + + # Because the input can be padded, the absolute sequence length depends on the max position id. + rotary_seq_len = ( + max(kv_seq_len, position_ids[:, -1].max().item() + 1) if position_ids is not None else kv_seq_len + ) + + prunded_sequence_length = kwargs["prunded_sequence_length"] + cos, sin = self.rotary_emb(value_states, seq_len=kv_seq_len + prunded_sequence_length) + + query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin, position_ids) + + if past_key_value is not None: + # Activate slicing cache only if the config has a value `sliding_windows` attribute + cache_has_contents = past_key_value.get_seq_length(self.layer_idx) > 0 + if ( + getattr(self.config, "sliding_window", None) is not None + and kv_seq_len > self.config.sliding_window + and cache_has_contents + ): + slicing_tokens = 1 - self.config.sliding_window + + past_key = past_key_value[self.layer_idx][0] + past_value = past_key_value[self.layer_idx][1] + + past_key = past_key[:, :, slicing_tokens:, :].contiguous() + past_value = past_value[:, :, slicing_tokens:, :].contiguous() + + if past_key.shape[-2] != self.config.sliding_window - 1: + raise ValueError( + f"past key must have a shape of (`batch_size, num_heads, self.config.sliding_window-1, head_dim`), got" + f" {past_key.shape}" + ) + + if attention_mask is not None: + attention_mask = attention_mask[:, slicing_tokens:] + attention_mask = torch.cat([attention_mask, torch.ones_like(attention_mask[:, -1:])], dim=-1) + + cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position} # Specific to RoPE models + key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs) + + # repeat k/v heads if n_kv_heads < n_heads + key_states = repeat_kv(key_states, self.num_key_value_groups) + value_states = repeat_kv(value_states, self.num_key_value_groups) + dropout_rate = 0.0 if not self.training else self.attention_dropout + + # In PEFT, usually we cast the layer norms in float32 for training stability reasons + # therefore the input hidden states gets silently casted in float32. Hence, we need + # cast them back in float16 just to be sure everything works as expected. + input_dtype = query_states.dtype + if input_dtype == torch.float32: + if torch.is_autocast_enabled(): + target_dtype = torch.get_autocast_gpu_dtype() + # Handle the case where the model is quantized + elif hasattr(self.config, "_pre_quantization_dtype"): + target_dtype = self.config._pre_quantization_dtype + else: + target_dtype = self.q_proj.weight.dtype + + logger.warning_once( + f"The input hidden states seems to be silently casted in float32, this might be related to" + f" the fact you have upcasted embedding or layer norm layers in float32. We will cast back the input in" + f" {target_dtype}." + ) + + query_states = query_states.to(target_dtype) + key_states = key_states.to(target_dtype) + value_states = value_states.to(target_dtype) + + # Reashape to the expected shape for Flash Attention + query_states = query_states.transpose(1, 2) + key_states = key_states.transpose(1, 2) + value_states = value_states.transpose(1, 2) + + if ( + self.config.use_sliding_window + and getattr(self.config, "sliding_window", None) is not None + and self.layer_idx >= self.config.max_window_layers + ): + sliding_window = self.config.sliding_window + else: + sliding_window = None + + attn_output = _flash_attention_forward( + query_states, + key_states, + value_states, + attention_mask, + q_len, + position_ids=position_ids, + dropout=dropout_rate, + sliding_window=sliding_window, + is_causal=self.is_causal, + use_top_left_mask=self._flash_attn_uses_top_left_mask, + ) + + attn_output = attn_output.reshape(bsz, q_len, self.hidden_size).contiguous() + attn_output = self.o_proj(attn_output) + + + attn_weights = None + + return attn_output, attn_weights, past_key_value + + +# Copied from transformers.models.mixtral.modeling_mixtral.MixtralSdpaAttention with Mixtral->Qwen2 +class Qwen2SdpaAttention(Qwen2Attention): + """ + Qwen2 attention module using torch.nn.functional.scaled_dot_product_attention. This module inherits from + `Qwen2Attention` as the weights of the module stays untouched. The only changes are on the forward pass to adapt to + SDPA API. + """ + + # Adapted from Qwen2Attention.forward + def forward( + self, + hidden_states: torch.Tensor, + attention_mask: Optional[torch.Tensor] = None, + position_ids: Optional[torch.LongTensor] = None, + past_key_value: Optional[Cache] = None, + output_attentions: bool = False, + use_cache: bool = False, + cache_position: Optional[torch.LongTensor] = None, + **kwargs, + ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: + if output_attentions: + # TODO: Improve this warning with e.g. `model.config.attn_implementation = "manual"` once this is implemented. + logger.warning_once( + "Qwen2Model is using Qwen2SdpaAttention, but `torch.nn.functional.scaled_dot_product_attention` does not support `output_attentions=True`. Falling back to the manual attention implementation, " + 'but specifying the manual implementation will be required from Transformers version v5.0.0 onwards. This warning can be removed using the argument `attn_implementation="eager"` when loading the model.' + ) + return super().forward( + hidden_states=hidden_states, + attention_mask=attention_mask, + position_ids=position_ids, + past_key_value=past_key_value, + output_attentions=output_attentions, + use_cache=use_cache, + **kwargs, + ) + + bsz, q_len, _ = hidden_states.size() + + query_states = self.q_proj(hidden_states) + key_states = self.k_proj(hidden_states) + value_states = self.v_proj(hidden_states) + + query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2) + key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2) + value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2) + + kv_seq_len = key_states.shape[-2] + if past_key_value is not None: + kv_seq_len += past_key_value.get_usable_length(kv_seq_len, self.layer_idx) + + prunded_sequence_length = kwargs["prunded_sequence_length"] + cos, sin = self.rotary_emb(value_states, seq_len=kv_seq_len + prunded_sequence_length) + + query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin, position_ids) + + if past_key_value is not None: + cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position} # Specific to RoPE models + key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs) + + key_states = repeat_kv(key_states, self.num_key_value_groups) + value_states = repeat_kv(value_states, self.num_key_value_groups) + + causal_mask = attention_mask + if attention_mask is not None: # no matter the length, we just slice it + causal_mask = attention_mask[:, :, :, : key_states.shape[-2]] + + # SDPA with memory-efficient backend is currently (torch==2.1.2) bugged with non-contiguous inputs with custom attn_mask, + # Reference: https://github.com/pytorch/pytorch/issues/112577. + if query_states.device.type == "cuda" and attention_mask is not None: + query_states = query_states.contiguous() + key_states = key_states.contiguous() + value_states = value_states.contiguous() + + # We dispatch to SDPA's Flash Attention or Efficient kernels via this `is_causal` if statement instead of an inline conditional assignment + # in SDPA to support both torch.compile's dynamic shapes and full graph options. An inline conditional prevents dynamic shapes from compiling. + # The q_len > 1 is necessary to match with AttentionMaskConverter.to_causal_4d that does not create a causal mask in case q_len == 1. + is_causal = True if causal_mask is None and q_len > 1 else False + + attn_output = torch.nn.functional.scaled_dot_product_attention( + query_states, + key_states, + value_states, + attn_mask=causal_mask, + dropout_p=self.attention_dropout if self.training else 0.0, + is_causal=is_causal, + ) + + attn_output = attn_output.transpose(1, 2).contiguous() + attn_output = attn_output.view(bsz, q_len, self.hidden_size) + + attn_output = self.o_proj(attn_output) + + return attn_output, None, past_key_value + + +QWEN2_ATTENTION_CLASSES = { + "eager": Qwen2Attention, + "flash_attention_2": Qwen2FlashAttention2, + "sdpa": Qwen2SdpaAttention, +} + + +class Qwen2DecoderLayer(nn.Module): + def __init__(self, config: Qwen2Config, layer_idx: int): + super().__init__() + self.hidden_size = config.hidden_size + + if config.sliding_window and config._attn_implementation != "flash_attention_2": + logger.warning_once( + f"Sliding Window Attention is enabled but not implemented for `{config._attn_implementation}`; " + "unexpected results may be encountered." + ) + self.self_attn = QWEN2_ATTENTION_CLASSES[config._attn_implementation](config, layer_idx) + + self.mlp = Qwen2MLP(config) + self.input_layernorm = Qwen2RMSNorm(config.hidden_size, eps=config.rms_norm_eps) + self.post_attention_layernorm = Qwen2RMSNorm(config.hidden_size, eps=config.rms_norm_eps) + + def forward( + self, + hidden_states: torch.Tensor, + attention_mask: Optional[torch.Tensor] = None, + position_ids: Optional[torch.LongTensor] = None, + past_key_value: Optional[Tuple[torch.Tensor]] = None, + output_attentions: Optional[bool] = False, + use_cache: Optional[bool] = False, + cache_position: Optional[torch.LongTensor] = None, + **kwargs, + ) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]: + """ + Args: + hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)` + attention_mask (`torch.FloatTensor`, *optional*): attention mask of size + `(batch, sequence_length)` where padding elements are indicated by 0. + output_attentions (`bool`, *optional*): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under + returned tensors for more detail. + use_cache (`bool`, *optional*): + If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding + (see `past_key_values`). + past_key_value (`Tuple(torch.FloatTensor)`, *optional*): cached past key and value projection states + cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*): + Indices depicting the position of the input sequence tokens in the sequence. + kwargs (`dict`, *optional*): + Arbitrary kwargs to be ignored, used for FSDP and other methods that injects code + into the model + """ + + residual = hidden_states + + hidden_states = self.input_layernorm(hidden_states) + + # Self Attention + hidden_states, self_attn_weights, present_key_value = self.self_attn( + hidden_states=hidden_states, + attention_mask=attention_mask, + position_ids=position_ids, + past_key_value=past_key_value, + output_attentions=output_attentions, + use_cache=use_cache, + cache_position=cache_position, + **kwargs, + ) + hidden_states = residual + hidden_states + + # Fully Connected + residual = hidden_states + hidden_states = self.post_attention_layernorm(hidden_states) + hidden_states = self.mlp(hidden_states) + hidden_states = residual + hidden_states + + outputs = (hidden_states,) + + if output_attentions: + outputs += (self_attn_weights,) + + if use_cache: + outputs += (present_key_value,) + + return outputs + + +QWEN2_START_DOCSTRING = r""" + This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the + library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads + etc.) + + This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. + Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage + and behavior. + + Parameters: + config ([`Qwen2Config`]): + Model configuration class with all the parameters of the model. Initializing with a config file does not + load the weights associated with the model, only the configuration. Check out the + [`~PreTrainedModel.from_pretrained`] method to load the model weights. +""" + + +@add_start_docstrings( + "The bare Qwen2 Model outputting raw hidden-states without any specific head on top.", + QWEN2_START_DOCSTRING, +) +class Qwen2PreTrainedModel(PreTrainedModel): + config_class = Qwen2Config + base_model_prefix = "model" + supports_gradient_checkpointing = True + _no_split_modules = ["Qwen2DecoderLayer"] + _skip_keys_device_placement = "past_key_values" + _supports_flash_attn_2 = True + _supports_sdpa = True + _supports_cache_class = True + + def _init_weights(self, module): + std = self.config.initializer_range + if isinstance(module, nn.Linear): + module.weight.data.normal_(mean=0.0, std=std) + if module.bias is not None: + module.bias.data.zero_() + elif isinstance(module, nn.Embedding): + module.weight.data.normal_(mean=0.0, std=std) + if module.padding_idx is not None: + module.weight.data[module.padding_idx].zero_() + + +QWEN2_INPUTS_DOCSTRING = r""" + Args: + input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): + Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide + it. + + Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and + [`PreTrainedTokenizer.__call__`] for details. + + [What are input IDs?](../glossary#input-ids) + attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): + Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: + + - 1 for tokens that are **not masked**, + - 0 for tokens that are **masked**. + + [What are attention masks?](../glossary#attention-mask) + + Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and + [`PreTrainedTokenizer.__call__`] for details. + + If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see + `past_key_values`). + + If you want to change padding behavior, you should read [`modeling_opt._prepare_decoder_attention_mask`] + and modify to your needs. See diagram 1 in [the paper](https://arxiv.org/abs/1910.13461) for more + information on the default strategy. + + - 1 indicates the head is **not masked**, + - 0 indicates the head is **masked**. + position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): + Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, + config.n_positions - 1]`. + + [What are position IDs?](../glossary#position-ids) + past_key_values (`Cache` or `tuple(tuple(torch.FloatTensor))`, *optional*): + Pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention + blocks) that can be used to speed up sequential decoding. This typically consists in the `past_key_values` + returned by the model at a previous stage of decoding, when `use_cache=True` or `config.use_cache=True`. + + Two formats are allowed: + - a [`~cache_utils.Cache`] instance; + - Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of + shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`). This is also known as the legacy + cache format. + + The model will output the same cache format that is fed as input. If no `past_key_values` are passed, the + legacy cache format will be returned. + + If `past_key_values` are used, the user can optionally input only the last `input_ids` (those that don't + have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `input_ids` + of shape `(batch_size, sequence_length)`. + inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): + Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This + is useful if you want more control over how to convert `input_ids` indices into associated vectors than the + model's internal embedding lookup matrix. + use_cache (`bool`, *optional*): + If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see + `past_key_values`). + output_attentions (`bool`, *optional*): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned + tensors for more detail. + output_hidden_states (`bool`, *optional*): + Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for + more detail. + return_dict (`bool`, *optional*): + Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. + cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*): + Indices depicting the position of the input sequence tokens in the sequence. Contrarily to `position_ids`, + this tensor is not affected by padding. It is used to update the cache in the correct position and to infer + the complete sequence length. +""" + + +@add_start_docstrings( + "The bare Qwen2 Model outputting raw hidden-states without any specific head on top.", + QWEN2_START_DOCSTRING, +) +class Qwen2Model(Qwen2PreTrainedModel): + """ + Transformer decoder consisting of *config.num_hidden_layers* layers. Each layer is a [`Qwen2DecoderLayer`] + + Args: + config: Qwen2Config + """ + + def __init__(self, config: Qwen2Config): + super().__init__(config) + self.padding_idx = config.pad_token_id + self.vocab_size = config.vocab_size + + self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx) + self.layers = nn.ModuleList( + [Qwen2DecoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)] + ) + self._attn_implementation = config._attn_implementation + self.norm = Qwen2RMSNorm(config.hidden_size, eps=config.rms_norm_eps) + + self.gradient_checkpointing = False + # Initialize weights and apply final processing + self.post_init() + + def get_input_embeddings(self): + return self.embed_tokens + + def set_input_embeddings(self, value): + self.embed_tokens = value + + @add_start_docstrings_to_model_forward(QWEN2_INPUTS_DOCSTRING) + def forward( + self, + input_ids: torch.LongTensor = None, + attention_mask: Optional[torch.Tensor] = None, + position_ids: Optional[torch.LongTensor] = None, + past_key_values: Optional[List[torch.FloatTensor]] = None, + inputs_embeds: Optional[torch.FloatTensor] = None, + use_cache: Optional[bool] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + cache_position: Optional[torch.LongTensor] = None, + visual_token_index: Optional[torch.Tensor] = None, + large_model_prune_layer: Optional[float] = None, + large_model_prune_ratio: Optional[float] = None, + large_model_prune_selection: Optional[str] = None, + large_model_similarity_target_coverage: Optional[float] = None, + large_model_similarity_min_gain: Optional[float] = None, + large_model_similarity_min_keep: Optional[int] = None, + large_model_similarity_max_keep_ratio: Optional[float] = None, + visual_token_importance: Optional[torch.Tensor] = None, + ) -> Union[Tuple, BaseModelOutputWithPast]: + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + use_cache = use_cache if use_cache is not None else self.config.use_cache + + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + + # retrieve input_ids and inputs_embeds + if input_ids is not None and inputs_embeds is not None: + raise ValueError('You cannot specify both input_ids and inputs_embeds at the same time') + elif input_ids is not None: + batch_size, seq_length = input_ids.shape[:2] + elif inputs_embeds is not None: + batch_size, seq_length = inputs_embeds.shape[:2] + else: + raise ValueError('You have to specify either input_ids or inputs_embeds') + + + + if (input_ids is None) ^ (inputs_embeds is not None): + raise ValueError( + "You cannot specify both input_ids and inputs_embeds at the same time, and must specify either one" + ) + + if self.gradient_checkpointing and self.training: + if use_cache: + logger.warning_once( + "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..." + ) + use_cache = False + + use_legacy_cache = False + if use_cache and not isinstance(past_key_values, Cache) and not self.training: + use_legacy_cache = True + past_key_values = DynamicCache.from_legacy_cache(past_key_values) + logger.warning_once( + "We detected that you are passing `past_key_values` as a tuple and this is deprecated and will be removed in v4.43. " + "Please use an appropriate `Cache` class (https://huggingface.co/docs/transformers/v4.41.3/en/internal/generation_utils#transformers.Cache)" + ) + + if inputs_embeds is None: + inputs_embeds = self.embed_tokens(input_ids) + + if cache_position is None: + past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0 + cache_position = torch.arange( + past_seen_tokens, past_seen_tokens + inputs_embeds.shape[1], device=inputs_embeds.device + ) + if position_ids is None: + position_ids = cache_position.unsqueeze(0) + + causal_mask = self._update_causal_mask( + attention_mask, inputs_embeds, cache_position, past_key_values, output_attentions + ) + + hidden_states = inputs_embeds + + # decoder layers + all_hidden_states = () if output_hidden_states else None + all_self_attns = () if output_attentions else None + next_decoder_cache = None + + + if large_model_prune_layer is not None: + token_prune = True + K = int(len(self.layers) * large_model_prune_layer) + keep_ratio = large_model_prune_ratio + visual_token_length = int(visual_token_index[1] - visual_token_index[0] + 1) + else: + token_prune = False + + prunded_sequence_length = 0 + + + + for idx, decoder_layer in enumerate(self.layers): + if output_hidden_states: + all_hidden_states += (hidden_states,) + + if self.gradient_checkpointing and self.training: + layer_outputs = self._gradient_checkpointing_func( + decoder_layer.__call__, + hidden_states, + causal_mask, + position_ids, + past_key_values, + output_attentions, + use_cache, + cache_position, + ) + else: + + + ##### 某一层 random pruning ######### + if token_prune: + if hidden_states.shape[1] != 1: + if idx == K: + device = hidden_states.device + selected_visual_index = select_visual_token_indices( + hidden_states, + visual_token_importance, + visual_token_index, + keep_ratio, + large_model_prune_selection or "topk", + similarity_target_coverage=large_model_similarity_target_coverage or 0.9, + similarity_min_gain=large_model_similarity_min_gain or 0.0, + similarity_min_keep=large_model_similarity_min_keep or 1, + similarity_max_keep_ratio=large_model_similarity_max_keep_ratio or 1.0, + ) + int(visual_token_index[0]) + keep_indexs = torch.cat(( + torch.arange(int(visual_token_index[0]), device=device), + selected_visual_index.to(device), + torch.arange(int(visual_token_index[1] + 1), seq_length, device=device), + )) + keep_indexs = keep_indexs.sort().values + hidden_states = hidden_states[:, keep_indexs,:] + if causal_mask is not None: + causal_mask = causal_mask[:,:,:hidden_states.shape[1], :hidden_states.shape[1]] + position_ids = keep_indexs.unsqueeze(0) + prunded_sequence_length = visual_token_length - int(visual_token_length * keep_ratio) + + + else: + if idx == K: + visual_token_length = visual_token_index[1] - visual_token_index[0] + 1 + prunded_sequence_length = visual_token_length - int(visual_token_length * keep_ratio) + if causal_mask is not None: + causal_mask = causal_mask[:, :, :, prunded_sequence_length:] + + + + + layer_outputs = decoder_layer( + hidden_states, + attention_mask=causal_mask, + position_ids=position_ids, + past_key_value=past_key_values, + output_attentions=output_attentions, + use_cache=use_cache, + cache_position=cache_position, + prunded_sequence_length=prunded_sequence_length + ) + + hidden_states = layer_outputs[0] + + if use_cache: + next_decoder_cache = layer_outputs[2 if output_attentions else 1] + + if output_attentions: + all_self_attns += (layer_outputs[1],) + + hidden_states = self.norm(hidden_states) + + # add hidden states from the last decoder layer + if output_hidden_states: + all_hidden_states += (hidden_states,) + + next_cache = None + if use_cache: + next_cache = next_decoder_cache.to_legacy_cache() if use_legacy_cache else next_decoder_cache + + if not return_dict: + return tuple(v for v in [hidden_states, next_cache, all_hidden_states, all_self_attns] if v is not None) + return BaseModelOutputWithPast( + last_hidden_state=hidden_states, + past_key_values=next_cache, + hidden_states=all_hidden_states, + attentions=all_self_attns, + ) + + # Copied from transformers.models.llama.modeling_llama.LlamaModel._update_causal_mask + def _update_causal_mask( + self, + attention_mask: torch.Tensor, + input_tensor: torch.Tensor, + cache_position: torch.Tensor, + past_key_values: Cache, + output_attentions: bool, + ): + # TODO: As of torch==2.2.0, the `attention_mask` passed to the model in `generate` is 2D and of dynamic length even when the static + # KV cache is used. This is an issue for torch.compile which then recaptures cudagraphs at each decode steps due to the dynamic shapes. + # (`recording cudagraph tree for symint key 13`, etc.), which is VERY slow. A workaround is `@torch.compiler.disable`, but this prevents using + # `fullgraph=True`. See more context in https://github.com/huggingface/transformers/pull/29114 + + if self.config._attn_implementation == "flash_attention_2": + if attention_mask is not None and 0.0 in attention_mask: + return attention_mask + return None + + # For SDPA, when possible, we will rely on its `is_causal` argument instead of its `attn_mask` argument, in + # order to dispatch on Flash Attention 2. This feature is not compatible with static cache, as SDPA will fail + # to infer the attention mask. + past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0 + using_static_cache = isinstance(past_key_values, StaticCache) + + # When output attentions is True, sdpa implementation's forward method calls the eager implementation's forward + if self.config._attn_implementation == "sdpa" and not using_static_cache and not output_attentions: + if AttentionMaskConverter._ignore_causal_mask_sdpa( + attention_mask, + inputs_embeds=input_tensor, + past_key_values_length=past_seen_tokens, + is_training=self.training, + ): + return None + + dtype, device = input_tensor.dtype, input_tensor.device + min_dtype = torch.finfo(dtype).min + sequence_length = input_tensor.shape[1] + if using_static_cache: + target_length = past_key_values.get_max_length() + else: + target_length = ( + attention_mask.shape[-1] + if isinstance(attention_mask, torch.Tensor) + else past_seen_tokens + sequence_length + 1 + ) + + # In case the provided `attention` mask is 2D, we generate a causal mask here (4D). + causal_mask = _prepare_4d_causal_attention_mask_with_cache_position( + attention_mask, + sequence_length=sequence_length, + target_length=target_length, + dtype=dtype, + device=device, + min_dtype=min_dtype, + cache_position=cache_position, + batch_size=input_tensor.shape[0], + ) + + if ( + self.config._attn_implementation == "sdpa" + and attention_mask is not None + and attention_mask.device.type == "cuda" + and not output_attentions + ): + # Attend to all tokens in fully masked rows in the causal_mask, for example the relevant first rows when + # using left padding. This is required by F.scaled_dot_product_attention memory-efficient attention path. + # Details: https://github.com/pytorch/pytorch/issues/110213 + causal_mask = AttentionMaskConverter._unmask_unattended(causal_mask, min_dtype) + + return causal_mask + + +class Qwen2ForCausalLM(Qwen2PreTrainedModel): + _tied_weights_keys = ["lm_head.weight"] + + def __init__(self, config): + super().__init__(config) + self.model = Qwen2Model(config) + self.vocab_size = config.vocab_size + self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False) + + # Initialize weights and apply final processing + self.post_init() + + def get_input_embeddings(self): + return self.model.embed_tokens + + def set_input_embeddings(self, value): + self.model.embed_tokens = value + + def get_output_embeddings(self): + return self.lm_head + + def set_output_embeddings(self, new_embeddings): + self.lm_head = new_embeddings + + def set_decoder(self, decoder): + self.model = decoder + + def get_decoder(self): + return self.model + + @add_start_docstrings_to_model_forward(QWEN2_INPUTS_DOCSTRING) + @replace_return_docstrings(output_type=CausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC) + def forward( + self, + input_ids: torch.LongTensor = None, + attention_mask: Optional[torch.Tensor] = None, + position_ids: Optional[torch.LongTensor] = None, + past_key_values: Optional[List[torch.FloatTensor]] = None, + inputs_embeds: Optional[torch.FloatTensor] = None, + labels: Optional[torch.LongTensor] = None, + use_cache: Optional[bool] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + cache_position: Optional[torch.LongTensor] = None, + visual_token_index: Optional[torch.Tensor] = None, + large_model_prune_layer: Optional[float] = None, + large_model_prune_ratio: Optional[float] = None, + large_model_prune_selection: Optional[str] = None, + large_model_similarity_target_coverage: Optional[float] = None, + large_model_similarity_min_gain: Optional[float] = None, + large_model_similarity_min_keep: Optional[int] = None, + large_model_similarity_max_keep_ratio: Optional[float] = None, + visual_token_importance: Optional[torch.Tensor] = None, + ) -> Union[Tuple, CausalLMOutputWithPast]: + r""" + Args: + labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): + Labels for computing the masked language modeling loss. Indices should either be in `[0, ..., + config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored + (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`. + + Returns: + + Example: + + ```python + >>> from transformers import AutoTokenizer, Qwen2ForCausalLM + + >>> model = Qwen2ForCausalLM.from_pretrained(PATH_TO_CONVERTED_WEIGHTS) + >>> tokenizer = AutoTokenizer.from_pretrained(PATH_TO_CONVERTED_TOKENIZER) + + >>> prompt = "Hey, are you conscious? Can you talk to me?" + >>> inputs = tokenizer(prompt, return_tensors="pt") + + >>> # Generate + >>> generate_ids = model.generate(inputs.input_ids, max_length=30) + >>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0] + "Hey, are you conscious? Can you talk to me?\nI'm not conscious, but I can talk to you." + ```""" + + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn) + outputs = self.model( + input_ids=input_ids, + attention_mask=attention_mask, + position_ids=position_ids, + past_key_values=past_key_values, + inputs_embeds=inputs_embeds, + use_cache=use_cache, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + cache_position=cache_position, + visual_token_index=visual_token_index, + large_model_prune_layer=large_model_prune_layer, + large_model_prune_ratio=large_model_prune_ratio, + large_model_prune_selection=large_model_prune_selection, + large_model_similarity_target_coverage=large_model_similarity_target_coverage, + large_model_similarity_min_gain=large_model_similarity_min_gain, + large_model_similarity_min_keep=large_model_similarity_min_keep, + large_model_similarity_max_keep_ratio=large_model_similarity_max_keep_ratio, + visual_token_importance=visual_token_importance + ) + + hidden_states = outputs[0] + logits = self.lm_head(hidden_states) + logits = logits.float() + + loss = None + if labels is not None: + # Shift so that tokens < n predict n + shift_logits = logits[..., :-1, :].contiguous() + shift_labels = labels[..., 1:].contiguous() + # Flatten the tokens + loss_fct = CrossEntropyLoss() + shift_logits = shift_logits.view(-1, self.config.vocab_size) + shift_labels = shift_labels.view(-1) + # Enable model parallelism + shift_labels = shift_labels.to(shift_logits.device) + loss = loss_fct(shift_logits, shift_labels) + + if not return_dict: + output = (logits,) + outputs[1:] + return (loss,) + output if loss is not None else output + + return CausalLMOutputWithPast( + loss=loss, + logits=logits, + past_key_values=outputs.past_key_values, + hidden_states=outputs.hidden_states, + attentions=outputs.attentions, + ) + + # Copied from transformers.models.llama.modeling_llama.LlamaForCausalLM.prepare_inputs_for_generation + def prepare_inputs_for_generation( + self, + input_ids, + past_key_values=None, + attention_mask=None, + inputs_embeds=None, + cache_position=None, + position_ids=None, + use_cache=True, + **kwargs, + ): + # If we have cache: let's slice `input_ids` through `cache_position`, to keep only the unprocessed tokens + # Exception 1: when passing input_embeds, input_ids may be missing entries + # Exception 2: some generation methods do special slicing of input_ids, so we don't need to do it here + if past_key_values is not None: + if inputs_embeds is not None: # Exception 1 + input_ids = input_ids[:, -cache_position.shape[0] :] + elif input_ids.shape[1] != cache_position.shape[0]: # Default case (the "else", a no op, is Exception 2) + input_ids = input_ids[:, cache_position] + + if attention_mask is not None and position_ids is None: + # create position_ids on the fly for batch generation + position_ids = attention_mask.long().cumsum(-1) - 1 + position_ids.masked_fill_(attention_mask == 0, 1) + if past_key_values: + position_ids = position_ids[:, -input_ids.shape[1] :] + + # This `clone` call is needed to avoid recapturing cuda graphs with `torch.compile`'s `mode="reduce-overhead`, as otherwise the input `position_ids` would have various stride during the decoding. Here, simply using `.contiguous()` is not sufficient as in the batch size = 1 case, `position_ids` is already contiguous but with varying stride which retriggers a capture. + position_ids = position_ids.clone(memory_format=torch.contiguous_format) + + # if `inputs_embeds` are passed, we only want to use them in the 1st generation step + if inputs_embeds is not None and cache_position[0] == 0: + model_inputs = {"inputs_embeds": inputs_embeds, "input_ids": None} + else: + # The clone here is for the same reason as for `position_ids`. + model_inputs = {"input_ids": input_ids.clone(memory_format=torch.contiguous_format), "inputs_embeds": None} + + if isinstance(past_key_values, StaticCache) and attention_mask.ndim == 2: + if model_inputs["inputs_embeds"] is not None: + batch_size, sequence_length, _ = model_inputs["inputs_embeds"].shape + device = model_inputs["inputs_embeds"].device + else: + batch_size, sequence_length = model_inputs["input_ids"].shape + device = model_inputs["input_ids"].device + + dtype = self.lm_head.weight.dtype + min_dtype = torch.finfo(dtype).min + + attention_mask = _prepare_4d_causal_attention_mask_with_cache_position( + attention_mask, + sequence_length=sequence_length, + target_length=past_key_values.get_max_length(), + dtype=dtype, + device=device, + min_dtype=min_dtype, + cache_position=cache_position, + batch_size=batch_size, + ) + + model_inputs.update( + { + "position_ids": position_ids, + "cache_position": cache_position, + "past_key_values": past_key_values, + "use_cache": use_cache, + "attention_mask": attention_mask, + 'visual_token_index': kwargs.get('visual_token_index'), + 'large_model_prune_layer': kwargs.get('large_model_prune_layer'), + 'large_model_prune_ratio': kwargs.get('large_model_prune_ratio'), + 'large_model_prune_selection': kwargs.get('large_model_prune_selection'), + 'large_model_similarity_target_coverage': kwargs.get('large_model_similarity_target_coverage'), + 'large_model_similarity_min_gain': kwargs.get('large_model_similarity_min_gain'), + 'large_model_similarity_min_keep': kwargs.get('large_model_similarity_min_keep'), + 'large_model_similarity_max_keep_ratio': kwargs.get('large_model_similarity_max_keep_ratio'), + 'visual_token_importance': kwargs.get('visual_token_importance') + } + ) + return model_inputs + + +@add_start_docstrings( + """ + The Qwen2 Model transformer with a sequence classification head on top (linear layer). + + [`Qwen2ForSequenceClassification`] uses the last token in order to do the classification, as other causal models + (e.g. GPT-2) do. + + Since it does classification on the last token, it requires to know the position of the last token. If a + `pad_token_id` is defined in the configuration, it finds the last token that is not a padding token in each row. If + no `pad_token_id` is defined, it simply takes the last value in each row of the batch. Since it cannot guess the + padding tokens when `inputs_embeds` are passed instead of `input_ids`, it does the same (take the last value in + each row of the batch). + """, + QWEN2_START_DOCSTRING, +) +class Qwen2ForSequenceClassification(Qwen2PreTrainedModel): + def __init__(self, config): + super().__init__(config) + self.num_labels = config.num_labels + self.model = Qwen2Model(config) + self.score = nn.Linear(config.hidden_size, self.num_labels, bias=False) + + # Initialize weights and apply final processing + self.post_init() + + def get_input_embeddings(self): + return self.model.embed_tokens + + def set_input_embeddings(self, value): + self.model.embed_tokens = value + + @add_start_docstrings_to_model_forward(QWEN2_INPUTS_DOCSTRING) + def forward( + self, + input_ids: torch.LongTensor = None, + attention_mask: Optional[torch.Tensor] = None, + position_ids: Optional[torch.LongTensor] = None, + past_key_values: Optional[List[torch.FloatTensor]] = None, + inputs_embeds: Optional[torch.FloatTensor] = None, + labels: Optional[torch.LongTensor] = None, + use_cache: Optional[bool] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple, SequenceClassifierOutputWithPast]: + r""" + labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): + Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., + config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If + `config.num_labels > 1` a classification loss is computed (Cross-Entropy). + """ + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + transformer_outputs = self.model( + input_ids, + attention_mask=attention_mask, + position_ids=position_ids, + past_key_values=past_key_values, + inputs_embeds=inputs_embeds, + use_cache=use_cache, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + hidden_states = transformer_outputs[0] + logits = self.score(hidden_states) + + if input_ids is not None: + batch_size = input_ids.shape[0] + else: + batch_size = inputs_embeds.shape[0] + + if self.config.pad_token_id is None and batch_size != 1: + raise ValueError("Cannot handle batch sizes > 1 if no padding token is defined.") + if self.config.pad_token_id is None: + sequence_lengths = -1 + else: + if input_ids is not None: + # if no pad token found, use modulo instead of reverse indexing for ONNX compatibility + sequence_lengths = torch.eq(input_ids, self.config.pad_token_id).int().argmax(-1) - 1 + sequence_lengths = sequence_lengths % input_ids.shape[-1] + sequence_lengths = sequence_lengths.to(logits.device) + else: + sequence_lengths = -1 + + pooled_logits = logits[torch.arange(batch_size, device=logits.device), sequence_lengths] + + loss = None + if labels is not None: + labels = labels.to(logits.device) + if self.config.problem_type is None: + if self.num_labels == 1: + self.config.problem_type = "regression" + elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): + self.config.problem_type = "single_label_classification" + else: + self.config.problem_type = "multi_label_classification" + + if self.config.problem_type == "regression": + loss_fct = MSELoss() + if self.num_labels == 1: + loss = loss_fct(pooled_logits.squeeze(), labels.squeeze()) + else: + loss = loss_fct(pooled_logits, labels) + elif self.config.problem_type == "single_label_classification": + loss_fct = CrossEntropyLoss() + loss = loss_fct(pooled_logits.view(-1, self.num_labels), labels.view(-1)) + elif self.config.problem_type == "multi_label_classification": + loss_fct = BCEWithLogitsLoss() + loss = loss_fct(pooled_logits, labels) + if not return_dict: + output = (pooled_logits,) + transformer_outputs[1:] + return ((loss,) + output) if loss is not None else output + + return SequenceClassifierOutputWithPast( + loss=loss, + logits=pooled_logits, + past_key_values=transformer_outputs.past_key_values, + hidden_states=transformer_outputs.hidden_states, + attentions=transformer_outputs.attentions, + ) + + +@add_start_docstrings( + """ + The Qwen2 Model transformer with a token classification head on top (a linear layer on top of the hidden-states + output) e.g. for Named-Entity-Recognition (NER) tasks. + """, + QWEN2_START_DOCSTRING, +) +# Copied from transformers.models.llama.modeling_llama.LlamaForTokenClassification with Llama->Qwen2, LLAMA->QWEN2 +class Qwen2ForTokenClassification(Qwen2PreTrainedModel): + def __init__(self, config): + super().__init__(config) + self.num_labels = config.num_labels + self.model = Qwen2Model(config) + if getattr(config, "classifier_dropout", None) is not None: + classifier_dropout = config.classifier_dropout + elif getattr(config, "hidden_dropout", None) is not None: + classifier_dropout = config.hidden_dropout + else: + classifier_dropout = 0.1 + self.dropout = nn.Dropout(classifier_dropout) + self.score = nn.Linear(config.hidden_size, config.num_labels) + + # Initialize weights and apply final processing + self.post_init() + + def get_input_embeddings(self): + return self.model.embed_tokens + + def set_input_embeddings(self, value): + self.model.embed_tokens = value + + @add_start_docstrings_to_model_forward(QWEN2_INPUTS_DOCSTRING) + def forward( + self, + input_ids: Optional[torch.LongTensor] = None, + attention_mask: Optional[torch.Tensor] = None, + position_ids: Optional[torch.LongTensor] = None, + past_key_values: Optional[List[torch.FloatTensor]] = None, + inputs_embeds: Optional[torch.FloatTensor] = None, + labels: Optional[torch.LongTensor] = None, + use_cache: Optional[bool] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple, TokenClassifierOutput]: + r""" + labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): + Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., + config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If + `config.num_labels > 1` a classification loss is computed (Cross-Entropy). + """ + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + outputs = self.model( + input_ids, + attention_mask=attention_mask, + position_ids=position_ids, + past_key_values=past_key_values, + inputs_embeds=inputs_embeds, + use_cache=use_cache, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + sequence_output = outputs[0] + sequence_output = self.dropout(sequence_output) + logits = self.score(sequence_output) + + loss = None + if labels is not None: + loss_fct = CrossEntropyLoss() + loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) + + if not return_dict: + output = (logits,) + outputs[2:] + return ((loss,) + output) if loss is not None else output + + return TokenClassifierOutput( + loss=loss, + logits=logits, + hidden_states=outputs.hidden_states, + attentions=outputs.attentions, + ) diff --git a/isolated/sim_greedy/upstream_sgl/internvl/model/qwen2/tokenization_qwen2.py b/isolated/sim_greedy/upstream_sgl/internvl/model/qwen2/tokenization_qwen2.py new file mode 100644 index 0000000000000000000000000000000000000000..be2685430f649eab8bde99f217597afd282337c5 --- /dev/null +++ b/isolated/sim_greedy/upstream_sgl/internvl/model/qwen2/tokenization_qwen2.py @@ -0,0 +1,339 @@ +# coding=utf-8 +# Copyright 2024 The Qwen team, Alibaba Group and The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Tokenization classes for Qwen2.""" + +import json +import os +import unicodedata +from functools import lru_cache +from typing import Optional, Tuple + +import regex as re + +from ...tokenization_utils import AddedToken, PreTrainedTokenizer +from ...utils import logging + + +logger = logging.get_logger(__name__) + +VOCAB_FILES_NAMES = { + "vocab_file": "vocab.json", + "merges_file": "merges.txt", +} + + +MAX_MODEL_INPUT_SIZES = {"qwen/qwen-tokenizer": 32768} + +PRETOKENIZE_REGEX = r"""(?i:'s|'t|'re|'ve|'m|'ll|'d)|[^\r\n\p{L}\p{N}]?\p{L}+|\p{N}| ?[^\s\p{L}\p{N}]+[\r\n]*|\s*[\r\n]+|\s+(?!\S)|\s+""" + + +@lru_cache() +# Copied from transformers.models.gpt2.tokenization_gpt2.bytes_to_unicode +def bytes_to_unicode(): + """ + Returns list of utf-8 byte and a mapping to unicode strings. We specifically avoids mapping to whitespace/control + characters the bpe code barfs on. + + The reversible bpe codes work on unicode strings. This means you need a large # of unicode characters in your vocab + if you want to avoid UNKs. When you're at something like a 10B token dataset you end up needing around 5K for + decent coverage. This is a significant percentage of your normal, say, 32K bpe vocab. To avoid that, we want lookup + tables between utf-8 bytes and unicode strings. + """ + bs = ( + list(range(ord("!"), ord("~") + 1)) + list(range(ord("¡"), ord("¬") + 1)) + list(range(ord("®"), ord("ÿ") + 1)) + ) + cs = bs[:] + n = 0 + for b in range(2**8): + if b not in bs: + bs.append(b) + cs.append(2**8 + n) + n += 1 + cs = [chr(n) for n in cs] + return dict(zip(bs, cs)) + + +# Copied from transformers.models.gpt2.tokenization_gpt2.get_pairs +def get_pairs(word): + """ + Return set of symbol pairs in a word. + + Word is represented as tuple of symbols (symbols being variable-length strings). + """ + pairs = set() + prev_char = word[0] + for char in word[1:]: + pairs.add((prev_char, char)) + prev_char = char + return pairs + + +class Qwen2Tokenizer(PreTrainedTokenizer): + """ + Construct a Qwen2 tokenizer. Based on byte-level Byte-Pair-Encoding. + + Same with GPT2Tokenizer, this tokenizer has been trained to treat spaces like parts of the tokens so a word will + be encoded differently whether it is at the beginning of the sentence (without space) or not: + + ```python + >>> from transformers import Qwen2Tokenizer + + >>> tokenizer = Qwen2Tokenizer.from_pretrained("Qwen/Qwen-tokenizer") + >>> tokenizer("Hello world")["input_ids"] + [9707, 1879] + + >>> tokenizer(" Hello world")["input_ids"] + [21927, 1879] + ``` + This is expected. + + You should not use GPT2Tokenizer instead, because of the different pretokenization rules. + + This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to + this superclass for more information regarding those methods. + + Args: + vocab_file (`str`): + Path to the vocabulary file. + merges_file (`str`): + Path to the merges file. + errors (`str`, *optional*, defaults to `"replace"`): + Paradigm to follow when decoding bytes to UTF-8. See + [bytes.decode](https://docs.python.org/3/library/stdtypes.html#bytes.decode) for more information. + unk_token (`str`, *optional*, defaults to `"<|endoftext|>"`): + The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this + token instead. + bos_token (`str`, *optional*): + The beginning of sequence token. Not applicable for this tokenizer. + eos_token (`str`, *optional*, defaults to `"<|endoftext|>"`): + The end of sequence token. + pad_token (`str`, *optional*, defaults to `"<|endoftext|>"`): + The token used for padding, for example when batching sequences of different lengths. + clean_up_tokenization_spaces (`bool`, *optional*, defaults to `False`): + Whether or not the model should cleanup the spaces that were added when splitting the input text during the + tokenization process. Not applicable to this tokenizer, since tokenization does not add spaces. + split_special_tokens (`bool`, *optional*, defaults to `False`): + Whether or not the special tokens should be split during the tokenization process. The default behavior is + to not split special tokens. This means that if `<|endoftext|>` is the `eos_token`, then `tokenizer.tokenize("<|endoftext|>") = + ['<|endoftext|>`]. Otherwise, if `split_special_tokens=True`, then `tokenizer.tokenize("<|endoftext|>")` will be give `['<', + '|', 'endo', 'ft', 'ext', '|', '>']`. This argument is only supported for `slow` tokenizers for the moment. + """ + + vocab_files_names = VOCAB_FILES_NAMES + model_input_names = ["input_ids", "attention_mask"] + + def __init__( + self, + vocab_file, + merges_file, + errors="replace", + unk_token="<|endoftext|>", + bos_token=None, + eos_token="<|endoftext|>", + pad_token="<|endoftext|>", + clean_up_tokenization_spaces=False, + split_special_tokens=False, + **kwargs, + ): + # Qwen vocab does not contain control tokens; added tokens need to be special + bos_token = ( + AddedToken(bos_token, lstrip=False, rstrip=False, special=True, normalized=False) + if isinstance(bos_token, str) + else bos_token + ) + eos_token = ( + AddedToken(eos_token, lstrip=False, rstrip=False, special=True, normalized=False) + if isinstance(eos_token, str) + else eos_token + ) + unk_token = ( + AddedToken(unk_token, lstrip=False, rstrip=False, special=True, normalized=False) + if isinstance(unk_token, str) + else unk_token + ) + pad_token = ( + AddedToken(pad_token, lstrip=False, rstrip=False, special=True, normalized=False) + if isinstance(pad_token, str) + else pad_token + ) + + with open(vocab_file, encoding="utf-8") as vocab_handle: + self.encoder = json.load(vocab_handle) + self.decoder = {v: k for k, v in self.encoder.items()} + self.errors = errors # how to handle errors in decoding + self.byte_encoder = bytes_to_unicode() + self.byte_decoder = {v: k for k, v in self.byte_encoder.items()} + bpe_merges = [] + with open(merges_file, encoding="utf-8") as merges_handle: + for i, line in enumerate(merges_handle): + line = line.strip() + if (i == 0 and line.startswith("#version:")) or not line: + continue + bpe_merges.append(tuple(line.split())) + self.bpe_ranks = dict(zip(bpe_merges, range(len(bpe_merges)))) + # NOTE: the cache can grow without bound and will get really large for long running processes + # (esp. for texts of language that do not use space between word, e.g. Chinese); technically + # not a memory leak but appears as one. + # GPT2Tokenizer has the same problem, so let's be consistent. + self.cache = {} + + self.pat = re.compile(PRETOKENIZE_REGEX) + + if kwargs.get("add_prefix_space", False): + logger.warning_once( + f"{self.__class__.__name} does not support `add_prefix_space`, setting it to True has no effect." + ) + + super().__init__( + errors=errors, + bos_token=bos_token, + eos_token=eos_token, + pad_token=pad_token, + unk_token=unk_token, + clean_up_tokenization_spaces=clean_up_tokenization_spaces, + split_special_tokens=split_special_tokens, + **kwargs, + ) + + @property + def vocab_size(self) -> int: + return len(self.encoder) + + # Copied from transformers.models.gpt2.tokenization_gpt2.GPT2Tokenizer.get_vocab + def get_vocab(self): + return dict(self.encoder, **self.added_tokens_encoder) + + # Copied from transformers.models.gpt2.tokenization_gpt2.GPT2Tokenizer.bpe + def bpe(self, token): + if token in self.cache: + return self.cache[token] + word = tuple(token) + pairs = get_pairs(word) + + if not pairs: + return token + + while True: + bigram = min(pairs, key=lambda pair: self.bpe_ranks.get(pair, float("inf"))) + if bigram not in self.bpe_ranks: + break + first, second = bigram + new_word = [] + i = 0 + while i < len(word): + try: + j = word.index(first, i) + except ValueError: + new_word.extend(word[i:]) + break + else: + new_word.extend(word[i:j]) + i = j + + if word[i] == first and i < len(word) - 1 and word[i + 1] == second: + new_word.append(first + second) + i += 2 + else: + new_word.append(word[i]) + i += 1 + new_word = tuple(new_word) + word = new_word + if len(word) == 1: + break + else: + pairs = get_pairs(word) + word = " ".join(word) + self.cache[token] = word + return word + + # Copied from transformers.models.gpt2.tokenization_gpt2.GPT2Tokenizer._tokenize + def _tokenize(self, text): + """Tokenize a string.""" + bpe_tokens = [] + for token in re.findall(self.pat, text): + token = "".join( + self.byte_encoder[b] for b in token.encode("utf-8") + ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case) + bpe_tokens.extend(bpe_token for bpe_token in self.bpe(token).split(" ")) + return bpe_tokens + + # Copied from transformers.models.gpt2.tokenization_gpt2.GPT2Tokenizer._convert_token_to_id + def _convert_token_to_id(self, token): + """Converts a token (str) in an id using the vocab.""" + return self.encoder.get(token, self.encoder.get(self.unk_token)) + + # Copied from transformers.models.gpt2.tokenization_gpt2.GPT2Tokenizer._convert_id_to_token + def _convert_id_to_token(self, index): + """Converts an index (integer) in a token (str) using the vocab.""" + return self.decoder.get(index) + + # Copied from transformers.models.gpt2.tokenization_gpt2.GPT2Tokenizer.convert_tokens_to_string + def convert_tokens_to_string(self, tokens): + """Converts a sequence of tokens (string) in a single string.""" + text = "".join(tokens) + text = bytearray([self.byte_decoder[c] for c in text]).decode("utf-8", errors=self.errors) + return text + + def decode( + self, + token_ids, + skip_special_tokens: bool = False, + clean_up_tokenization_spaces: Optional[bool] = False, + spaces_between_special_tokens: bool = False, + **kwargs, + ) -> str: + # `spaces_between_special_tokens` defaults to True for _decode in slow tokenizers + # and cannot be configured elsewhere, but it should default to False for Qwen2Tokenizer + return super().decode( + token_ids, + skip_special_tokens=skip_special_tokens, + clean_up_tokenization_spaces=clean_up_tokenization_spaces, + spaces_between_special_tokens=spaces_between_special_tokens, + **kwargs, + ) + + # Copied from transformers.models.gpt2.tokenization_gpt2.GPT2Tokenizer.save_vocabulary + def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]: + if not os.path.isdir(save_directory): + logger.error(f"Vocabulary path ({save_directory}) should be a directory") + return + vocab_file = os.path.join( + save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] + ) + merge_file = os.path.join( + save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] + ) + + with open(vocab_file, "w", encoding="utf-8") as f: + f.write(json.dumps(self.encoder, indent=2, sort_keys=True, ensure_ascii=False) + "\n") + + index = 0 + with open(merge_file, "w", encoding="utf-8") as writer: + writer.write("#version: 0.2\n") + for bpe_tokens, token_index in sorted(self.bpe_ranks.items(), key=lambda kv: kv[1]): + if index != token_index: + logger.warning( + f"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive." + " Please check that the tokenizer is not corrupted!" + ) + index = token_index + writer.write(" ".join(bpe_tokens) + "\n") + index += 1 + + return vocab_file, merge_file + + def prepare_for_tokenization(self, text, **kwargs): + text = unicodedata.normalize("NFC", text) + return (text, kwargs) diff --git a/isolated/sim_greedy/upstream_sgl/internvl/model/qwen2/tokenization_qwen2_fast.py b/isolated/sim_greedy/upstream_sgl/internvl/model/qwen2/tokenization_qwen2_fast.py new file mode 100644 index 0000000000000000000000000000000000000000..01d709ba6027c968d09cbeabfcfb0ecb4819a4b8 --- /dev/null +++ b/isolated/sim_greedy/upstream_sgl/internvl/model/qwen2/tokenization_qwen2_fast.py @@ -0,0 +1,134 @@ +# coding=utf-8 +# Copyright 2024 The Qwen team, Alibaba Group and The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Tokenization classes for Qwen2.""" + +from typing import Optional, Tuple + +from transformers.tokenization_utils import AddedToken +from transformers.tokenization_utils_fast import PreTrainedTokenizerFast +from transformers.utils import logging +from transformers.tokenization_qwen2 import Qwen2Tokenizer + + +logger = logging.get_logger(__name__) + +VOCAB_FILES_NAMES = { + "vocab_file": "vocab.json", + "merges_file": "merges.txt", + "tokenizer_file": "tokenizer.json", +} + + +MAX_MODEL_INPUT_SIZES = {"qwen/qwen-tokenizer": 32768} + + +class Qwen2TokenizerFast(PreTrainedTokenizerFast): + """ + Construct a "fast" Qwen2 tokenizer (backed by HuggingFace's *tokenizers* library). Based on byte-level + Byte-Pair-Encoding. + + Same with GPT2Tokenizer, this tokenizer has been trained to treat spaces like parts of the tokens so a word will + be encoded differently whether it is at the beginning of the sentence (without space) or not: + + ```python + >>> from transformers import Qwen2TokenizerFast + + >>> tokenizer = Qwen2TokenizerFast.from_pretrained("Qwen/Qwen-tokenizer") + >>> tokenizer("Hello world")["input_ids"] + [9707, 1879] + + >>> tokenizer(" Hello world")["input_ids"] + [21927, 1879] + ``` + This is expected. + + This tokenizer inherits from [`PreTrainedTokenizerFast`] which contains most of the main methods. Users should + refer to this superclass for more information regarding those methods. + + Args: + vocab_file (`str`, *optional*): + Path to the vocabulary file. + merges_file (`str`, *optional*): + Path to the merges file. + tokenizer_file (`str`, *optional*): + Path to [tokenizers](https://github.com/huggingface/tokenizers) file (generally has a .json extension) that + contains everything needed to load the tokenizer. + unk_token (`str`, *optional*, defaults to `"<|endoftext|>"`): + The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this + token instead. Not applicable to this tokenizer. + bos_token (`str`, *optional*): + The beginning of sequence token. Not applicable for this tokenizer. + eos_token (`str`, *optional*, defaults to `"<|endoftext|>"`): + The end of sequence token. + pad_token (`str`, *optional*, defaults to `"<|endoftext|>"`): + The token used for padding, for example when batching sequences of different lengths. + """ + + vocab_files_names = VOCAB_FILES_NAMES + model_input_names = ["input_ids", "attention_mask"] + slow_tokenizer_class = Qwen2Tokenizer + + def __init__( + self, + vocab_file=None, + merges_file=None, + tokenizer_file=None, + unk_token="<|endoftext|>", + bos_token=None, + eos_token="<|endoftext|>", + pad_token="<|endoftext|>", + **kwargs, + ): + # We need to at least pass vocab_file and merges_file to base class + # in case a slow tokenizer needs to be initialized; other can be + # configured through files. + # following GPT2TokenizerFast, also adding unk_token, bos_token, and eos_token + + bos_token = ( + AddedToken(bos_token, lstrip=False, rstrip=False, special=True, normalized=False) + if isinstance(bos_token, str) + else bos_token + ) + eos_token = ( + AddedToken(eos_token, lstrip=False, rstrip=False, special=True, normalized=False) + if isinstance(eos_token, str) + else eos_token + ) + unk_token = ( + AddedToken(unk_token, lstrip=False, rstrip=False, special=True, normalized=False) + if isinstance(unk_token, str) + else unk_token + ) + pad_token = ( + AddedToken(pad_token, lstrip=False, rstrip=False, special=True, normalized=False) + if isinstance(pad_token, str) + else pad_token + ) + + super().__init__( + vocab_file=vocab_file, + merges_file=merges_file, + tokenizer_file=tokenizer_file, + unk_token=unk_token, + bos_token=bos_token, + eos_token=eos_token, + pad_token=pad_token, + **kwargs, + ) + + # Copied from transformers.models.gpt2.tokenization_gpt2_fast.GPT2TokenizerFast.save_vocabulary + def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]: + files = self._tokenizer.model.save(save_directory, name=filename_prefix) + return tuple(files) diff --git a/isolated/sim_greedy/upstream_sgl/internvl/train/__init__.py b/isolated/sim_greedy/upstream_sgl/internvl/train/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/isolated/sim_greedy/upstream_sgl/internvl/train/constants.py b/isolated/sim_greedy/upstream_sgl/internvl/train/constants.py new file mode 100644 index 0000000000000000000000000000000000000000..28d4e69ec545d2fe9a747aa355aa13e66be2d653 --- /dev/null +++ b/isolated/sim_greedy/upstream_sgl/internvl/train/constants.py @@ -0,0 +1,15 @@ +IMG_CONTEXT_TOKEN = '' +IMG_START_TOKEN = '' +IMG_END_TOKEN = '' +QUAD_START_TOKEN = '' +QUAD_END_TOKEN = '' +REF_START_TOKEN = '' +REF_END_TOKEN = '' +BOX_START_TOKEN = '' +BOX_END_TOKEN = '' +IMAGENET_MEAN = (0.485, 0.456, 0.406) +IMAGENET_STD = (0.229, 0.224, 0.225) +CLIP_MEAN = (0.4814546, 0.4578275, 0.40821073) +CLIP_STD = (0.2686295, 0.2613025, 0.2757711) +SIGLIP_MEAN = (0.5, 0.5, 0.5) +SIGLIP_STD = (0.5, 0.5, 0.5) diff --git a/isolated/sim_greedy/upstream_sgl/internvl/train/dataset.py b/isolated/sim_greedy/upstream_sgl/internvl/train/dataset.py new file mode 100644 index 0000000000000000000000000000000000000000..7c07ce9668e2eed3b2ee89db05c8274c9407fb54 --- /dev/null +++ b/isolated/sim_greedy/upstream_sgl/internvl/train/dataset.py @@ -0,0 +1,726 @@ +import io + +from transformers.trainer_pt_utils import LabelSmoother + +IGNORE_TOKEN_ID = LabelSmoother.ignore_index +import os +import random +from typing import Dict + +import cv2 +import imageio +import numpy as np +import torch +import torchvision.transforms as T +import transformers +from decord import VideoReader +from internvl.conversation import get_conv_template +from PIL import Image +from torch.utils.data import ConcatDataset, WeightedRandomSampler +from torchvision.transforms.functional import InterpolationMode + +from .constants import (CLIP_MEAN, CLIP_STD, IMAGENET_MEAN, IMAGENET_STD, + IMG_CONTEXT_TOKEN, IMG_END_TOKEN, IMG_START_TOKEN, + SIGLIP_MEAN, SIGLIP_STD) + +try: + from petrel_client.client import Client + from petrel_client.common.config import Config +except ImportError as E: + print('petrel_client is not installed. If you read data locally instead of from ceph, ignore it.') +import sys + + +def get_frame_indices(num_frames, vlen, sample='rand', fix_start=None, input_fps=1, max_num_frames=-1): + if sample in ['rand', 'middle']: # uniform sampling + acc_samples = min(num_frames, vlen) + # split the video into `acc_samples` intervals, and sample from each interval. + intervals = np.linspace(start=0, stop=vlen, num=acc_samples + 1).astype(int) + ranges = [] + for idx, interv in enumerate(intervals[:-1]): + ranges.append((interv, intervals[idx + 1] - 1)) + if sample == 'rand': + try: + frame_indices = [random.choice(range(x[0], x[1])) for x in ranges] + except: + frame_indices = np.random.permutation(vlen)[:acc_samples] + frame_indices.sort() + frame_indices = list(frame_indices) + elif fix_start is not None: + frame_indices = [x[0] + fix_start for x in ranges] + elif sample == 'middle': + frame_indices = [(x[0] + x[1]) // 2 for x in ranges] + else: + raise NotImplementedError + + if len(frame_indices) < num_frames: # padded with last frame + padded_frame_indices = [frame_indices[-1]] * num_frames + padded_frame_indices[:len(frame_indices)] = frame_indices + frame_indices = padded_frame_indices + elif 'fps' in sample: # fps0.5, sequentially sample frames at 0.5 fps + output_fps = float(sample[3:]) + duration = float(vlen) / input_fps + delta = 1 / output_fps # gap between frames, this is also the clip length each frame represents + frame_seconds = np.arange(0 + delta / 2, duration + delta / 2, delta) + frame_indices = np.around(frame_seconds * input_fps).astype(int) + frame_indices = [e for e in frame_indices if e < vlen] + if max_num_frames > 0 and len(frame_indices) > max_num_frames: + frame_indices = frame_indices[:max_num_frames] + # frame_indices = np.linspace(0 + delta / 2, duration + delta / 2, endpoint=False, num=max_num_frames) + else: + raise ValueError + return frame_indices + + +def read_frames_gif( + video_path, num_frames, sample='rand', fix_start=None, + client=None, min_num_frames=4 +): + if 's3://' in video_path: + video_bytes = client.get(video_path) + gif = imageio.get_reader(io.BytesIO(video_bytes)) + else: + gif = imageio.get_reader(video_path) + vlen = len(gif) + + t_num_frames = np.random.randint(min_num_frames, num_frames + 1) + frame_indices = get_frame_indices( + t_num_frames, vlen, sample=sample, fix_start=fix_start + ) + frames = [] + for index, frame in enumerate(gif): + if index in frame_indices: + frame = cv2.cvtColor(frame, cv2.COLOR_RGBA2RGB).astype(np.uint8) + frame = Image.fromarray(frame) + frames.append(frame) + return frames + + +def read_frames_decord( + video_path, num_frames, sample='rand', fix_start=None, + client=None, clip=None, min_num_frames=4 +): + if 's3://' in video_path: + video_bytes = client.get(video_path) + video_reader = VideoReader(io.BytesIO(video_bytes), num_threads=1) + else: + video_reader = VideoReader(video_path, num_threads=1) + vlen = len(video_reader) + fps = video_reader.get_avg_fps() + duration = vlen / float(fps) + if clip: + start, end = clip + duration = end - start + vlen = int(duration * fps) + start_index = int(start * fps) + + # t_num_frames = min(max(int(duration * sample_fps), min_num_frames), num_frames) + t_num_frames = np.random.randint(min_num_frames, num_frames + 1) + + frame_indices = get_frame_indices( + t_num_frames, vlen, sample=sample, fix_start=fix_start, + input_fps=fps + ) + if clip: + frame_indices = [f + start_index for f in frame_indices] + frames = video_reader.get_batch(frame_indices).asnumpy() # (T, H, W, C), np.uint8 + frames = [Image.fromarray(frames[i]) for i in range(frames.shape[0])] + return frames + + +def read_frames_folder( + video_path, num_frames, sample='rand', fix_start=None, + client=None, clip=None, min_num_frames=4 +): + if 's3://' in video_path: + image_list = client.list(video_path) + frames = [] + for image in image_list: + fp = os.path.join(video_path, image) + frame = Image.open(io.BytesIO(client.get(fp))) + frames.append(frame) + else: + image_list = sorted(list(os.listdir(video_path))) + frames = [] + for image in image_list: + fp = os.path.join(video_path, image) + frame = Image.open(fp).convert('RGB') + frames.append(frame) + vlen = len(frames) + + t_num_frames = np.random.randint(min_num_frames, num_frames + 1) + + if vlen > t_num_frames: + frame_indices = get_frame_indices( + t_num_frames, vlen, sample=sample, fix_start=fix_start + ) + frames = [frames[i] for i in frame_indices] + return frames + + +class WeightedConcatDataset(ConcatDataset): + def __init__(self, datasets, weights): + super().__init__(datasets) + self.weights = torch.DoubleTensor(weights) + self.total_size = sum(len(d) for d in datasets) + self.sampler = WeightedRandomSampler(weights=self.weights, num_samples=self.total_size, replacement=True) + + def __iter__(self): + return iter(self.sampler) + + def __len__(self): + return self.total_size + + +def pil_loader(img_str): + buff = io.BytesIO(img_str) + img = Image.open(buff) + return img.convert('RGB') + + +class TCSLoader(object): + + def __init__(self, conf_path, sc_config_key='sensecore'): + print(f'[TCSLoader] config_path: {conf_path}') + print('--> before Client(conf_path)') + self.client = Client(conf_path) + self.sc_config_key = sc_config_key + print('--> after Client(conf_path)') + + def __call__(self, fn, image_type='image', max_num_frames=-1, min_num_frames=4, sample='rand', clip=None): + if image_type == 'image': + img_value_str = self.client.get(fn) + img = pil_loader(img_value_str) + return img + + elif image_type == 'video': + if fn.endswith('/'): + frames = read_frames_folder(fn, num_frames=max_num_frames, min_num_frames=min_num_frames, + client=self.client, sample=sample) + elif fn.endswith('.gif'): + frames = read_frames_gif(fn, num_frames=max_num_frames, min_num_frames=min_num_frames, + client=self.client, sample=sample) + else: + frames = read_frames_decord(fn, num_frames=max_num_frames, min_num_frames=min_num_frames, + client=self.client, sample=sample, clip=clip) + return frames + + +def expand2square(pil_img, background_color): + width, height = pil_img.size + if width == height: + return pil_img + elif width > height: + result = Image.new(pil_img.mode, (width, width), background_color) + result.paste(pil_img, (0, (width - height) // 2)) + return result + else: + result = Image.new(pil_img.mode, (height, height), background_color) + result.paste(pil_img, ((height - width) // 2, 0)) + return result + + +def simulate_jpeg_degradation(quality): + def jpeg_degrade(img): + with io.BytesIO() as output: + img.convert('RGB').save(output, format='JPEG', quality=quality) + output.seek(0) # Move the reading cursor to the start of the stream + img_jpeg = Image.open(output).copy() # Use .copy() to make sure the image is loaded in memory + return img_jpeg + return jpeg_degrade + + +# Define the JPEG compression quality range, pre-create all JPEG compression functions +qualities = list(range(75, 101)) +jpeg_degrade_functions = {quality: simulate_jpeg_degradation(quality) for quality in qualities} + + +def build_transform(is_train, input_size, pad2square=False, normalize_type='imagenet'): + if normalize_type == 'imagenet': + MEAN, STD = IMAGENET_MEAN, IMAGENET_STD + elif normalize_type == 'clip': + MEAN, STD = CLIP_MEAN, CLIP_STD + elif normalize_type == 'siglip': + MEAN, STD = SIGLIP_MEAN, SIGLIP_STD + else: + raise NotImplementedError + if is_train: # use data augumentation + transform = T.Compose([ + T.Lambda(lambda img: img.convert('RGB') if img.mode != 'RGB' else img), + T.RandomChoice([T.Lambda(jpeg_degrade_functions[quality]) for quality in qualities]), + T.Resize((input_size, input_size), interpolation=InterpolationMode.BICUBIC), + T.ToTensor(), + T.Normalize(mean=MEAN, std=STD) + ]) + else: + if pad2square is False: # now we use this transform function by default + transform = T.Compose([ + T.Lambda(lambda img: img.convert('RGB') if img.mode != 'RGB' else img), + T.Resize((input_size, input_size), interpolation=InterpolationMode.BICUBIC), + T.ToTensor(), + T.Normalize(mean=MEAN, std=STD) + ]) + else: + transform = T.Compose([ + T.Lambda(lambda img: img.convert('RGB') if img.mode != 'RGB' else img), + T.Lambda(lambda img: expand2square(img, tuple(int(x * 255) for x in MEAN))), + T.Resize((input_size, input_size), interpolation=InterpolationMode.BICUBIC), + T.ToTensor(), + T.Normalize(mean=MEAN, std=STD) + ]) + + return transform + + +def preprocess( + template_name, + sources, + tokenizer: transformers.PreTrainedTokenizer, + num_image_token_list: list, + text_only: bool = False, + group_by_length: bool = False, + use_packed_ds: bool = False, + ds_name: str = None, + num_image: int = 1 +) -> Dict: + conv = get_conv_template(template_name) + roles = {'human': conv.roles[0], 'gpt': conv.roles[1]} + + # Apply prompt templates + conversations = [] + for i, source in enumerate(sources): + if roles[source[0]['from']] != conv.roles[0]: + # Skip the first one if it is not from human + source = source[1:] + + conv.messages = [] + for j, sentence in enumerate(source): + role = roles[sentence['from']] + assert role == conv.roles[j % 2], f'{i}' + conv.append_message(role, sentence['value']) + conversations.append(conv.get_prompt()) + + if not text_only: + new_conversations = [] + for conversation in conversations: + for i in range(num_image): + image_tokens = f'{IMG_START_TOKEN}{IMG_CONTEXT_TOKEN * num_image_token_list[i]}{IMG_END_TOKEN}' + conversation = conversation.replace('', image_tokens, 1) + new_conversations.append(conversation) + conversations = new_conversations + + # Tokenize conversations + input_ids = tokenizer( + conversations, + return_tensors='pt', + padding=False if group_by_length or use_packed_ds else 'max_length', + max_length=tokenizer.model_max_length, + truncation=True, + ).input_ids + targets = input_ids.clone() + + # assert conv.sep_style == SeparatorStyle.ADD_COLON_TWO + + # Mask targets. Only compute loss on the assistant outputs. + sep = conv.sep + conv.roles[1] + ': ' + for conversation, target in zip(conversations, targets): + total_len = int(target.ne(tokenizer.pad_token_id).sum()) + + turns = conversation.split(conv.sep2) + cur_len = 1 + target[:cur_len] = IGNORE_TOKEN_ID + for i, turn in enumerate(turns): + if turn == '': + break + turn_len = len(tokenizer(turn).input_ids) + + parts = turn.split(sep) + if len(parts) != 2: + break + parts[0] += sep + # "-2" is hardcoded for the Llama tokenizer to make the offset correct. + instruction_len = len(tokenizer(parts[0]).input_ids) - 2 + + if i != 0 and not tokenizer.legacy: + # The legacy and non-legacy modes handle special tokens differently + instruction_len -= 1 + + # Ignore the user instructions + target[cur_len: cur_len + instruction_len] = IGNORE_TOKEN_ID + cur_len += turn_len + + if i != 0 and not tokenizer.legacy: + # The legacy and non-legacy modes handle special tokens differently + cur_len -= 1 + + target[cur_len:] = IGNORE_TOKEN_ID + + if False: # Inspect and check the correctness of masking + z = target.clone() + z = torch.where(z == IGNORE_TOKEN_ID, tokenizer.unk_token_id, z) + logger.info(tokenizer.decode(z)) + exit() + + if cur_len < tokenizer.model_max_length: + if cur_len != total_len: + target[:] = IGNORE_TOKEN_ID + print( + f'WARNING: tokenization mismatch: {cur_len} vs. {total_len}.' + f' #turn = {len(turns) - 1}. (ignored). This dataset is {ds_name}.' + ) + sys.stdout.flush() + + return dict( + input_ids=input_ids, + labels=targets, + attention_mask=input_ids.ne(tokenizer.pad_token_id), + ) + + +def preprocess_mpt( + template_name, + sources, + tokenizer: transformers.PreTrainedTokenizer, + num_image_token_list: list, + text_only: bool = False, + group_by_length: bool = False, + use_packed_ds: bool = False, + ds_name: str = None, + num_image: int = 1 +) -> Dict: + conv = get_conv_template(template_name) + roles = {'human': conv.roles[0], 'gpt': conv.roles[1]} + + # Apply prompt templates + conversations = [] + for i, source in enumerate(sources): + if roles[source[0]['from']] != conv.roles[0]: + # Skip the first one if it is not from human + source = source[1:] + + conv.messages = [] + for j, sentence in enumerate(source): + role = roles[sentence['from']] + assert role == conv.roles[j % 2], f'{i}' + conv.append_message(role, sentence['value']) + conversations.append(conv.get_prompt()) + + if not text_only: + new_conversations = [] + for conversation in conversations: + for i in range(num_image): + image_tokens = f'{IMG_START_TOKEN}{IMG_CONTEXT_TOKEN * num_image_token_list[i]}{IMG_END_TOKEN}' + conversation = conversation.replace('', image_tokens, 1) + new_conversations.append(conversation) + conversations = new_conversations + + # Tokenize conversations + input_ids = tokenizer( + conversations, + return_tensors='pt', + padding=False if group_by_length or use_packed_ds else 'max_length', + max_length=tokenizer.model_max_length, + truncation=True, + ).input_ids + targets = input_ids.clone() + + # Mask targets. Only compute loss on the assistant outputs. + sep = conv.sep + conv.roles[1] # <|im_end|><|im_start|>assistant\n + for conversation, target in zip(conversations, targets): + total_len = int(target.ne(tokenizer.pad_token_id).sum()) + + turns = conversation.split(conv.sep) + re_turns = [conv.sep.join(turns[:3])] # system + user + gpt + for conv_idx in range(3, len(turns), 2): + re_turns.append(conv.sep.join(turns[conv_idx:conv_idx + 2])) # user + gpt + cur_len = 0 + target[:cur_len] = IGNORE_TOKEN_ID + for i, turn in enumerate(re_turns): + if turn == '': + break + turn_len = len(tokenizer(turn).input_ids) + 1 + + parts = turn.split(sep) + if len(parts) != 2: + break + parts[0] += sep + instruction_len = len(tokenizer(parts[0]).input_ids) + + # Ignore the user instructions + target[cur_len: cur_len + instruction_len] = IGNORE_TOKEN_ID + # print(f'[question {i}]', tokenizer.decode(input_ids[:, cur_len: cur_len + instruction_len][0])) + # print(f'[answer {i}]', tokenizer.decode(input_ids[:, cur_len + instruction_len: cur_len + turn_len][0])) + # print(f'[label {i}]', target[cur_len + instruction_len: cur_len + turn_len]) + cur_len += turn_len + + target[cur_len:] = IGNORE_TOKEN_ID + + if cur_len < tokenizer.model_max_length: + if cur_len != total_len: + target[:] = IGNORE_TOKEN_ID + print( + f'WARNING: tokenization mismatch: {cur_len} vs. {total_len}.' + f' #turn = {len(turns) - 1}. (ignored). This dataset is {ds_name}.' + ) + sys.stdout.flush() + + return dict( + input_ids=input_ids, + labels=targets, + attention_mask=input_ids.ne(tokenizer.pad_token_id), + ) + + +def preprocess_phi3( + template_name, + sources, + tokenizer: transformers.PreTrainedTokenizer, + num_image_token_list: list, + text_only: bool = False, + group_by_length: bool = False, + use_packed_ds: bool = False, + ds_name: str = None, + num_image: int = 1 +) -> Dict: + conv = get_conv_template(template_name) + roles = {'human': conv.roles[0], 'gpt': conv.roles[1]} + + # Apply prompt templates + conversations = [] + for i, source in enumerate(sources): + if roles[source[0]['from']] != conv.roles[0]: + # Skip the first one if it is not from human + source = source[1:] + + conv.messages = [] + for j, sentence in enumerate(source): + role = roles[sentence['from']] + assert role == conv.roles[j % 2], f'{i}' + conv.append_message(role, sentence['value']) + conversations.append(conv.get_prompt()) + + if not text_only: + new_conversations = [] + for conversation in conversations: + for i in range(num_image): + image_tokens = f'{IMG_START_TOKEN}{IMG_CONTEXT_TOKEN * num_image_token_list[i]}{IMG_END_TOKEN}' + conversation = conversation.replace('', image_tokens, 1) + new_conversations.append(conversation) + conversations = new_conversations + + # Tokenize conversations + tokenizer.padding_side = 'right' + input_ids = tokenizer( + conversations, + return_tensors='pt', + padding=False if group_by_length or use_packed_ds else 'max_length', + max_length=tokenizer.model_max_length, + truncation=True, + ).input_ids + targets = input_ids.clone() + + # Mask targets. Only compute loss on the assistant outputs. + sep = conv.sep + conv.roles[1] # <|end|>\n<|assistant|> + for conversation, target in zip(conversations, targets): + total_len = int(target.ne(int(tokenizer.pad_token_id)).sum()) + + turns = conversation.split(conv.sep) + re_turns = [conv.sep.join(turns[:3])] # system + user + gpt + for conv_idx in range(3, len(turns), 2): + re_turns.append(conv.sep.join(turns[conv_idx:conv_idx + 2])) # user + gpt + cur_len = 1 + target[:cur_len] = IGNORE_TOKEN_ID + endoftext_id = tokenizer.convert_tokens_to_ids('<|endoftext|>') + target[target == endoftext_id] = IGNORE_TOKEN_ID + + for i, turn in enumerate(re_turns): + if turn == '': + break + if i == 0: + turn_len = len(tokenizer(turn).input_ids) + else: + turn_len = len(tokenizer(turn).input_ids) - 1 + parts = turn.split(sep) + if len(parts) != 2: + break + parts[0] += sep + + if i == 0: + instruction_len = len(tokenizer(parts[0]).input_ids) - 1 + else: + instruction_len = len(tokenizer(parts[0]).input_ids) - 2 + + # Ignore the user instructions + target[cur_len: cur_len + instruction_len] = IGNORE_TOKEN_ID + # print(f'[question {i}]', tokenizer.decode(input_ids[:, cur_len: cur_len + instruction_len][0])) + # print(f'[answer {i}]', tokenizer.decode(input_ids[:, cur_len + instruction_len: cur_len + turn_len][0])) + # print(f'[label {i}]', target[cur_len + instruction_len: cur_len + turn_len]) + cur_len += turn_len + + target[cur_len:] = IGNORE_TOKEN_ID + + if False: # Inspect and check the correctness of masking + z = target.clone() + z = torch.where(z == IGNORE_TOKEN_ID, tokenizer.unk_token_id, z) + print(repr(tokenizer.decode(z))) + + if cur_len < tokenizer.model_max_length: + if cur_len != total_len: + target[:] = IGNORE_TOKEN_ID + print( + f'WARNING: tokenization mismatch: {cur_len} vs. {total_len}.' + f' #turn = {len(turns) - 1}. (ignored). This dataset is {ds_name}.' + ) + sys.stdout.flush() + + return dict( + input_ids=input_ids, + labels=targets, + attention_mask=input_ids.ne(tokenizer.pad_token_id), + ) + + +def preprocess_internlm( + template_name, + sources, + tokenizer: transformers.PreTrainedTokenizer, + num_image_token_list: list, + text_only: bool = False, + group_by_length: bool = False, + use_packed_ds: bool = False, + ds_name: str = None, + num_image: int = 1 +) -> Dict: + conv = get_conv_template(template_name) + roles = {'human': conv.roles[0], 'gpt': conv.roles[1]} + + # Apply prompt templates + conversations = [] + for i, source in enumerate(sources): + if roles[source[0]['from']] != conv.roles[0]: + # Skip the first one if it is not from human + source = source[1:] + + conv.messages = [] + for j, sentence in enumerate(source): + role = roles[sentence['from']] + assert role == conv.roles[j % 2], f'{i}' + sentence['value'] = sentence['value'].strip() + conv.append_message(role, sentence['value']) + conversations.append(conv.get_prompt()) + + if not text_only: + new_conversations = [] + for conversation in conversations: + for i in range(num_image): + image_tokens = f'{IMG_START_TOKEN}{IMG_CONTEXT_TOKEN * num_image_token_list[i]}{IMG_END_TOKEN}' + conversation = conversation.replace('', image_tokens, 1) + new_conversations.append(conversation) + conversations = new_conversations + + # Tokenize conversations + input_ids = tokenizer( + conversations, + return_tensors='pt', + padding=False if group_by_length or use_packed_ds else 'max_length', + max_length=tokenizer.model_max_length, + truncation=True, + ).input_ids + targets = input_ids.clone() + + for conversation, target in zip(conversations, targets): + total_len = int(target.ne(tokenizer.pad_token_id).sum()) # 浦语里面 pad_token_id = eos_token_id + cur_len = 1 + target[:cur_len] = IGNORE_TOKEN_ID # + parts = conversation.split(conv.roles[1]) # [UNUSED_TOKEN_146]assistant\n + info = parts[0] + conv.roles[1] + temp_len = len(tokenizer(info).input_ids) - 1 # 去除tokenizer的 + target[cur_len: cur_len + temp_len] = IGNORE_TOKEN_ID + cur_len = cur_len + temp_len + + for index in range(1, len(parts) - 1): + info = parts[index] + part1, part2 = info.split(conv.roles[0]) + temp_len = len(tokenizer(part1).input_ids) - 1 + cur_len = cur_len + temp_len + part = conv.roles[0] + part2 + conv.roles[1] + temp_len = len(tokenizer(part).input_ids) - 1 + target[cur_len: cur_len + temp_len] = IGNORE_TOKEN_ID + cur_len = cur_len + temp_len + last_info = parts[-1] + temp_len = len(tokenizer(last_info).input_ids) - 1 + cur_len = cur_len + temp_len + + target[cur_len:] = IGNORE_TOKEN_ID + if False: # Inspect and check the correctness of masking + z = target.clone() + z = torch.where(z == IGNORE_TOKEN_ID, tokenizer.unk_token_id, z) + print(repr(tokenizer.decode(z))) + + if cur_len < tokenizer.model_max_length: + if cur_len != total_len: + target[:] = IGNORE_TOKEN_ID + print(f'WARNING: tokenization mismatch: {cur_len} vs. {total_len}. This dataset is {ds_name}.') + sys.stdout.flush() + + return dict( + input_ids=input_ids, + labels=targets, + attention_mask=input_ids.ne(tokenizer.pad_token_id), + ) + + +def find_closest_aspect_ratio(aspect_ratio, target_ratios, width, height, image_size): + best_ratio_diff = float('inf') + best_ratio = (1, 1) + area = width * height + for ratio in target_ratios: + target_aspect_ratio = ratio[0] / ratio[1] + ratio_diff = abs(aspect_ratio - target_aspect_ratio) + if ratio_diff < best_ratio_diff: + best_ratio_diff = ratio_diff + best_ratio = ratio + elif ratio_diff == best_ratio_diff: + if area > 0.5 * image_size * image_size * ratio[0] * ratio[1]: + best_ratio = ratio + # print(f'width: {width}, height: {height}, best_ratio: {best_ratio}') + return best_ratio + + +def dynamic_preprocess(image, min_num=1, max_num=6, image_size=448, use_thumbnail=False): + orig_width, orig_height = image.size + aspect_ratio = orig_width / orig_height + + # calculate the existing image aspect ratio + target_ratios = set( + (i, j) for n in range(min_num, max_num + 1) for i in range(1, n + 1) for j in range(1, n + 1) if + i * j <= max_num and i * j >= min_num) + target_ratios = sorted(target_ratios, key=lambda x: x[0] * x[1]) + + # find the closest aspect ratio to the target + target_aspect_ratio = find_closest_aspect_ratio( + aspect_ratio, target_ratios, orig_width, orig_height, image_size) + + # calculate the target width and height + target_width = image_size * target_aspect_ratio[0] + target_height = image_size * target_aspect_ratio[1] + blocks = target_aspect_ratio[0] * target_aspect_ratio[1] + + # resize the image + resized_img = image.resize((target_width, target_height)) + processed_images = [] + for i in range(blocks): + box = ( + (i % (target_width // image_size)) * image_size, + (i // (target_width // image_size)) * image_size, + ((i % (target_width // image_size)) + 1) * image_size, + ((i // (target_width // image_size)) + 1) * image_size + ) + # split the image + split_img = resized_img.crop(box) + processed_images.append(split_img) + assert len(processed_images) == blocks + if use_thumbnail and len(processed_images) != 1: + thumbnail_img = image.resize((image_size, image_size)) + processed_images.append(thumbnail_img) + return processed_images diff --git a/isolated/sim_greedy/upstream_sgl/internvl/train/internvl_chat_finetune.py b/isolated/sim_greedy/upstream_sgl/internvl/train/internvl_chat_finetune.py new file mode 100644 index 0000000000000000000000000000000000000000..2ace5ea16a32e5fe71fdac52fe96ff137489743f --- /dev/null +++ b/isolated/sim_greedy/upstream_sgl/internvl/train/internvl_chat_finetune.py @@ -0,0 +1,847 @@ +import gc +import json +import logging +import math +import os +import random +import sys +import traceback +import warnings +from copy import deepcopy +from dataclasses import dataclass, field +from typing import Dict, Optional + +import numpy as np +import torch +import torch.distributed as dist +import transformers +from internvl.dist_utils import init_dist +from internvl.model.internlm2.modeling_internlm2 import InternLM2ForCausalLM +from internvl.model.internvl_chat import (InternVisionConfig, + InternVisionModel, + InternVLChatConfig, + InternVLChatModel) +from internvl.patch import (concat_pad_data_collator, + replace_llama_rmsnorm_with_fused_rmsnorm, + replace_train_sampler) +from internvl.train.constants import (BOX_END_TOKEN, BOX_START_TOKEN, + IMG_CONTEXT_TOKEN, IMG_END_TOKEN, + IMG_START_TOKEN, QUAD_END_TOKEN, + QUAD_START_TOKEN, REF_END_TOKEN, + REF_START_TOKEN) +from internvl.train.dataset import (ConcatDataset, TCSLoader, + WeightedConcatDataset, build_transform, + dynamic_preprocess, preprocess, + preprocess_internlm, preprocess_mpt, + preprocess_phi3) +from internvl.train.trainer_monkey_patch import replace_create_optimizer +from PIL import Image, ImageFile, PngImagePlugin, UnidentifiedImageError +from torch.utils.data import Dataset +from transformers import (AutoConfig, AutoModelForCausalLM, AutoTokenizer, + HfArgumentParser, Trainer, TrainingArguments, + set_seed) +from transformers.trainer_utils import get_last_checkpoint +from transformers.utils.logging import (enable_default_handler, + enable_explicit_format, set_verbosity) + +# Apply necessary patches for the transformers library +replace_llama_rmsnorm_with_fused_rmsnorm() +replace_train_sampler() + +# Try to import petrel_client for image loading, fallback to PIL if unavailable +try: + from petrel_client.client import Client + from petrel_client.common.config import Config + has_tcs_loader = True +except ImportError as E: + print('petrel_client is not installed. Using PIL to load images.') + has_tcs_loader = False + +# Set constants for image processing and logging +IGNORE_INDEX = -100 +Image.MAX_IMAGE_PIXELS = None +ImageFile.LOAD_TRUNCATED_IMAGES = True +MaximumDecompressedSize = 1024 +MegaByte = 2 ** 20 +PngImagePlugin.MAX_TEXT_CHUNK = MaximumDecompressedSize * MegaByte + +warnings.filterwarnings('ignore') +logger = logging.getLogger(__name__) + +os.environ['TOKENIZERS_PARALLELISM'] = 'true' + + +@dataclass +class ModelArguments: + """ + Arguments for specifying model, tokenizer, and configurations. + """ + model_name_or_path: Optional[str] = field( + default=None, + metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} + ) + vision_path: Optional[str] = field( + default=None, + metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} + ) + llm_path: Optional[str] = field( + default=None, + metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} + ) + mlp_path: Optional[str] = field( + default=None, + metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} + ) + freeze_llm: bool = field( + default=False, + metadata={'help': 'Set to True to freeze the LLM decoder.'}, + ) + freeze_backbone: bool = field( + default=False, + metadata={'help': 'Set to True to freeze the vision backbone of the model.'}, + ) + freeze_mlp: bool = field( + default=False, + metadata={'help': 'Set to True to freeze the MLP layers of the model.'}, + ) + unfreeze_vit_layers: int = field( + default=0, + metadata={'help': 'Specify the number of ViT layers to unfreeze. Default is 0.'}, + ) + vision_select_layer: int = field( + default=-1, + metadata={'help': 'Specify the layer of ViT feature map to use. Default is last layer.'}, + ) + use_backbone_lora: int = field( + default=0, + metadata={'help': 'Set the LoRA adapter rank for the backbone model. Default is 0.'} + ) + use_llm_lora: int = field( + default=0, + metadata={'help': 'Set the LoRA adapter rank for the LLM. Default is 0.'} + ) + unfreeze_lm_head: bool = field( + default=False, + metadata={'help': "Set to True to unfreeze the language model's head."}, + ) + use_custom_trainer: bool = field( + default=False, + metadata={'help': 'Set to True to enable the use of a custom trainer.'}, + ) + grad_checkpoint: Optional[bool] = field( + default=False, + metadata={'help': 'Set to True to use gradient checkpointing.'}, + ) + drop_path_rate: float = field( + default=0.0, + metadata={'help': 'Set the drop path rate for the ViT model. Default is 0.'}, + ) + ps_version: str = field( + default='v2', + metadata={'help': 'Specify the version of pixel shuffle implementation. Default is `v1`.' + 'Please use `v2` to fix the bug of transposed image.'} + ) + + +@dataclass +class DataTrainingArguments: + """ + Arguments for specifying data input for training and evaluation. + """ + max_seq_length: Optional[int] = field( + default=2048, + metadata={ + 'help': ( + 'The maximum total input sequence length after tokenization. Sequences longer ' + 'than this will be truncated, sequences shorter will be padded.' + ) + }, + ) + force_image_size: Optional[int] = field( + default=448, + metadata={'help': 'Set the desired size for the image. Default is 224.'}, + ) + down_sample_ratio: Optional[float] = field( + default=0.5, + metadata={'help': 'Set the desired down-sampling ratio for the image. Default is 1.0.'}, + ) + pad2square: Optional[bool] = field( + default=False, + metadata={'help': 'Pad the image to a square shape if set to True.'}, + ) + conv_style: Optional[str] = field( + default='internlm2-chat', metadata={'help': 'Prompt style for a conversation.'} + ) + meta_path: Optional[str] = field( + default=None, + metadata={'help': 'The path of the meta file of datasets.'}, + ) + use_data_resampling: Optional[bool] = field( + default=False, + metadata={'help': 'Set to True to use data resampling.'}, + ) + dynamic_image_size: Optional[bool] = field( + default=False, + metadata={'help': 'Set to True to use dynamic image size.'}, + ) + use_thumbnail: Optional[bool] = field( + default=False, + metadata={'help': 'Set to True to add a thumbnail image.'}, + ) + min_dynamic_patch: Optional[int] = field( + default=1, + metadata={'help': 'The minimum number of dynamic patches. Default is 1.'}, + ) + max_dynamic_patch: Optional[int] = field( + default=12, + metadata={'help': 'The maximum number of dynamic patches. Default is 6.'}, + ) + normalize_type: Optional[str] = field( + default='imagenet', + metadata={'help': 'The normalize type for the image. Default is imagenet.'}, + ) + + +class LazySupervisedDataset(Dataset): + """Dataset for supervised fine-tuning.""" + + def __init__( + self, + template_name, + meta, + tokenizer, + tcs_loader, + ds_name, + num_image_token, + image_size=224, + is_train=True, + pad2square=False, + group_by_length=False, + dynamic_image_size=False, + use_thumbnail=False, + min_dynamic_patch=1, + max_dynamic_patch=6, + min_num_frame=4, # for video data + max_num_frame=12, # for video data + sampling_method='rand', # for video data + repeat_time=1, + normalize_type='imagenet', + random_seed=0, + ): + super(LazySupervisedDataset, self).__init__() + self.ds_name = ds_name + self.tokenizer = tokenizer + self.template_name = template_name + self.num_image_token = num_image_token + logger.info(f'[Dataset] num_image_token: {num_image_token}') + logger.info(f'[Dataset] dynamic_image_size: {dynamic_image_size}') + logger.info(f'[Dataset] use_thumbnail: {use_thumbnail}') + logger.info(f'[Dataset] min_dynamic_patch: {min_dynamic_patch}, max_dynamic_patch: {max_dynamic_patch}') + + self.image_size = image_size + self.is_train = is_train + self.pad2square = pad2square + self.max_num_frame = max_num_frame + self.min_num_frame = min_num_frame + self.sampling_method = sampling_method + + logger.info('Formatting inputs...Skip in lazy mode') + assert meta['annotation'].endswith('jsonl'), f'annotation must be jsonl, but got {meta["annotation"]}' + + with open(meta['annotation'], 'r') as f: + self.raw_data = f.readlines() + if repeat_time < 1: + # If repeat_time is less than 1, select a portion of the data + self.raw_data = self.raw_data[:int(len(self.raw_data) * repeat_time)] + if repeat_time > 1: + assert isinstance(repeat_time, int) + # Repeat the list if repeat_time is greater than 1 + self.raw_data = self.raw_data * repeat_time + + self.rng = np.random.default_rng(seed=random_seed) + self.rng.shuffle(self.raw_data) + + gc.collect() + self.root = meta['root'] + self.cached_data_dict = {} + self.tcs_loader = tcs_loader + self.group_by_length = group_by_length + self.dynamic_image_size = dynamic_image_size + self.use_thumbnail = use_thumbnail + self.min_dynamic_patch = min_dynamic_patch + self.max_dynamic_patch = max_dynamic_patch + self.normalize_type = normalize_type + + # If the precomputed length does not exist, roughly estimate the length of + # each sample to improve the efficiency of group_by_length. + if self.group_by_length: + self.conv2length = {} # Using a dictionary to speed up token length calculation + self.length = [] + for data_item in self.raw_data: + data_item = json.loads(data_item) + if 'length' in data_item: + token_length = data_item['length'] # Use precomputed length if available + else: + # Compute token length using the tokenizer + conversations = '\n'.join([temp['value'] for temp in data_item['conversations']]) + str_length = len(conversations) + if str_length not in self.conv2length: + token_length = tokenizer( + conversations, return_tensors='pt', padding=False, truncation=False, + ).input_ids.size(1) + self.conv2length[str_length] = token_length + num_image_token * ( + max_dynamic_patch + use_thumbnail) + else: + token_length = self.conv2length[str_length] + self.length.append(token_length) + gc.collect() + + def __len__(self): + return len(self.raw_data) + + def get_preprocess_function(self): + # Select the appropriate preprocessing function based on the template name + if self.template_name == 'Hermes-2': + preprocess_function = preprocess_mpt + elif self.template_name == 'internlm2-chat': + preprocess_function = preprocess_internlm + elif self.template_name == 'phi3-chat': + preprocess_function = preprocess_phi3 + else: + preprocess_function = preprocess + return preprocess_function + + def load_image(self, image_path): + # Load the image using tcs_loader if available, otherwise use PIL + if self.tcs_loader is not None and 's3://' in image_path: + return self.tcs_loader(image_path) + return Image.open(image_path).convert('RGB') + + def get_image_path(self, image_path): + if image_path.startswith('s3://'): # for ceph + image_path = self.root + image_path + else: # for local image + image_path = os.path.join(self.root, image_path) + return image_path + + def get_transform(self): + # Build transformation function + transform = build_transform(is_train=self.is_train, input_size=self.image_size, + pad2square=self.pad2square, normalize_type=self.normalize_type) + return transform + + def multi_modal_get_item(self, data_item): + # Build transformation function + transform = self.get_transform() + + # Ensure the first conversation contains an image placeholder + if '' not in data_item['conversations'][0]['value']: + data_item['conversations'][0]['value'] = '\n' + data_item['conversations'][0]['value'] + + # Merge the image path + image_path = self.get_image_path(data_item['image']) + + # Load the image using tcs_loader if available, otherwise use PIL + image = self.load_image(image_path) + + if self.dynamic_image_size: # If dynamic image size is enabled, preprocess the image dynamically + images = dynamic_preprocess(image, min_num=self.min_dynamic_patch, max_num=self.max_dynamic_patch, + image_size=self.image_size, use_thumbnail=self.use_thumbnail) + else: # Otherwise, use the original image as a single patch + images = [image] + + # Apply the transformation to each image and stack the results into a tensor + pixel_values = [transform(image) for image in images] + pixel_values = torch.stack(pixel_values) + + # Ensure that there is only one patch if dynamic image size is not enabled + num_patches = pixel_values.size(0) + if not self.dynamic_image_size: + assert num_patches == 1, f'The number of patches should be 1, but got {num_patches}.' + + # Select the appropriate preprocessing function based on the template name + preprocess_function = self.get_preprocess_function() + + # Preprocess the conversations and generate the return dictionary + ret = preprocess_function(self.template_name, [deepcopy(data_item['conversations'])], + self.tokenizer, [self.num_image_token * num_patches], + group_by_length=self.group_by_length, ds_name=self.ds_name) + + # Create the final return dictionary + ret = dict( + input_ids=ret['input_ids'][0], + labels=ret['labels'][0], + attention_mask=ret['attention_mask'][0], + pixel_values=pixel_values, + image_flags=torch.tensor([1] * num_patches, dtype=torch.long) + ) + return ret + + def multi_modal_multi_image_get_item(self, data_item): + # Build transformation function + transform = self.get_transform() + + images, num_tiles = [], [] + num_image = len(data_item['image']) + for image_path in data_item['image']: + # Merge the image path + image_path = self.get_image_path(image_path) + # Load the image using tcs_loader if available, otherwise use PIL + image = self.load_image(image_path) + if self.dynamic_image_size: # If dynamic image size is enabled, preprocess the image dynamically + image = dynamic_preprocess(image, min_num=self.min_dynamic_patch, + max_num=self.max_dynamic_patch // num_image, + image_size=self.image_size, use_thumbnail=self.use_thumbnail) + images += image + num_tiles.append(len(image)) + else: # Otherwise, use the original image as a single patch + images.append(image) + num_tiles.append(1) + pixel_values = [transform(image) for image in images] + pixel_values = torch.stack(pixel_values) + num_patches = pixel_values.size(0) + + # Select the appropriate preprocessing function based on the template name + preprocess_function = self.get_preprocess_function() + + # Preprocess the conversations and generate the return dictionary + num_image_tokens = [self.num_image_token * num_tile for num_tile in num_tiles] + ret = preprocess_function(self.template_name, [deepcopy(data_item['conversations'])], + self.tokenizer, num_image_tokens, group_by_length=self.group_by_length, + ds_name=self.ds_name, num_image=num_image) + + # Create the final return dictionary + ret = dict( + input_ids=ret['input_ids'][0], + labels=ret['labels'][0], + attention_mask=ret['attention_mask'][0], + pixel_values=pixel_values, + image_flags=torch.tensor([1] * num_patches, dtype=torch.long) + ) + return ret + + def video_get_item(self, data_item): + # Build transformation function + transform = self.get_transform() + + # Ensure the first conversation contains a video placeholder + if '