xiaohaoWillX commited on
Commit
5316f3e
·
verified ·
1 Parent(s): e130339

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. eval/vqa/run_full_textvqa_native.sh +34 -0
  2. eval/vqa/run_shared_vision_guided_textvqa.py +1751 -0
  3. eval/vqa/run_single_model_native.py +603 -0
  4. isolated/sim_greedy/eval/vqa/run_shared_vision_guided_textvqa.py +1742 -0
  5. isolated/sim_greedy/upstream_sgl/eval/vqa/textvqa_eval.py +345 -0
  6. isolated/sim_greedy/upstream_sgl/internvl/model/__init__.py +0 -0
  7. isolated/sim_greedy/upstream_sgl/internvl/model/internlm2/configuration_internlm2.py +150 -0
  8. isolated/sim_greedy/upstream_sgl/internvl/model/internlm2/modeling_internlm2.py +1709 -0
  9. isolated/sim_greedy/upstream_sgl/internvl/model/internlm2/tokenization_internlm2.py +235 -0
  10. isolated/sim_greedy/upstream_sgl/internvl/model/internlm2/tokenization_internlm2_fast.py +211 -0
  11. isolated/sim_greedy/upstream_sgl/internvl/model/internvl_chat/__init__.py +13 -0
  12. isolated/sim_greedy/upstream_sgl/internvl/model/internvl_chat/configuration_intern_vit.py +119 -0
  13. isolated/sim_greedy/upstream_sgl/internvl/model/internvl_chat/configuration_internvl_chat.py +106 -0
  14. isolated/sim_greedy/upstream_sgl/internvl/model/internvl_chat/flash_attention.py +76 -0
  15. isolated/sim_greedy/upstream_sgl/internvl/model/internvl_chat/modeling_intern_vit.py +362 -0
  16. isolated/sim_greedy/upstream_sgl/internvl/model/internvl_chat/modeling_internvl_chat.py +506 -0
  17. isolated/sim_greedy/upstream_sgl/internvl/model/llama/__init__.py +116 -0
  18. isolated/sim_greedy/upstream_sgl/internvl/model/llama/configuration_llama.py +203 -0
  19. isolated/sim_greedy/upstream_sgl/internvl/model/llama/convert_llama_weights_to_hf.py +479 -0
  20. isolated/sim_greedy/upstream_sgl/internvl/model/llama/modeling_flax_llama.py +750 -0
  21. isolated/sim_greedy/upstream_sgl/internvl/model/llama/modeling_llama.py +1872 -0
  22. isolated/sim_greedy/upstream_sgl/internvl/model/llama/tokenization_llama.py +412 -0
  23. isolated/sim_greedy/upstream_sgl/internvl/model/llama/tokenization_llama_fast.py +255 -0
  24. isolated/sim_greedy/upstream_sgl/internvl/model/phi3/configuration_phi3.py +211 -0
  25. isolated/sim_greedy/upstream_sgl/internvl/model/phi3/modeling_phi3.py +1601 -0
  26. isolated/sim_greedy/upstream_sgl/internvl/model/qwen2/__init__.py +82 -0
  27. isolated/sim_greedy/upstream_sgl/internvl/model/qwen2/configuration_qwen2.py +140 -0
  28. isolated/sim_greedy/upstream_sgl/internvl/model/qwen2/modeling_qwen2.py +1551 -0
  29. isolated/sim_greedy/upstream_sgl/internvl/model/qwen2/tokenization_qwen2.py +339 -0
  30. isolated/sim_greedy/upstream_sgl/internvl/model/qwen2/tokenization_qwen2_fast.py +134 -0
  31. isolated/sim_greedy/upstream_sgl/internvl/train/__init__.py +0 -0
  32. isolated/sim_greedy/upstream_sgl/internvl/train/constants.py +15 -0
  33. isolated/sim_greedy/upstream_sgl/internvl/train/dataset.py +726 -0
  34. isolated/sim_greedy/upstream_sgl/internvl/train/internvl_chat_finetune.py +847 -0
  35. isolated/sim_greedy/upstream_sgl/internvl/train/internvl_chat_pretrain.py +885 -0
  36. isolated/sim_greedy/upstream_sgl/internvl/train/trainer_monkey_patch.py +159 -0
  37. outputs/full_shared_vision_1bguide_8btext_random_20260511_0932/keep09_random/run.log +0 -0
  38. outputs/full_shared_vision_1bguide_8btext_random_20260511_0932/keep09_random/textvqa_shared_vision_1bguide_8btext_keep09_random.filter_debug.json +0 -0
  39. outputs/full_shared_vision_1bguide_8btext_random_20260511_0932/keep09_random/textvqa_shared_vision_1bguide_8btext_keep09_random.json +0 -0
  40. outputs/full_shared_vision_1bguide_8btext_random_20260511_0932/keep09_random/textvqa_shared_vision_1bguide_8btext_keep09_random.summary.json +24 -0
  41. outputs/full_shared_vision_1bguide_8btext_random_20260511_0932/keep09_random_gpu1/run.log +0 -0
  42. outputs/full_shared_vision_1bguide_8btext_random_20260511_0932/keep09_random_gpu1/textvqa_shared_vision_1bguide_8btext_keep09_random_gpu1.filter_debug.json +0 -0
  43. outputs/full_shared_vision_1bguide_8btext_random_20260511_0932/keep09_random_gpu1/textvqa_shared_vision_1bguide_8btext_keep09_random_gpu1.json +0 -0
  44. outputs/full_shared_vision_1bguide_8btext_random_20260511_0932/keep09_random_gpu1/textvqa_shared_vision_1bguide_8btext_keep09_random_gpu1.summary.json +24 -0
  45. outputs/full_shared_vision_1bguide_8btext_random_20260511_0932/keep40_random/run.log +0 -0
  46. outputs/full_shared_vision_1bguide_8btext_random_20260511_0932/keep40_random/textvqa_shared_vision_1bguide_8btext_keep40_random.filter_debug.json +0 -0
  47. outputs/full_shared_vision_1bguide_8btext_random_20260511_0932/keep40_random/textvqa_shared_vision_1bguide_8btext_keep40_random.json +0 -0
  48. outputs/full_shared_vision_1bguide_8btext_random_20260511_0932/keep40_random/textvqa_shared_vision_1bguide_8btext_keep40_random.summary.json +24 -0
  49. outputs/full_shared_vision_1bguide_8btext_rawalign_prune0p09_restart/full_shared_vision_1bguide_8btext_rawalign_prune0p09_restart.json +0 -0
  50. outputs/full_shared_vision_1bguide_8btext_rawalign_prune0p09_restart/run.log +0 -0
eval/vqa/run_full_textvqa_native.sh ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env bash
2
+ set -euo pipefail
3
+
4
+ if [[ $# -ne 1 ]]; then
5
+ echo "Usage: $0 RUN_ROOT" >&2
6
+ exit 1
7
+ fi
8
+
9
+ RUN_ROOT="$1"
10
+ mkdir -p "$RUN_ROOT"
11
+
12
+ SCRIPT_DIR="$(cd -- "$(dirname -- "${BASH_SOURCE[0]}")" && pwd)"
13
+ REPO_ROOT="$(cd -- "${SCRIPT_DIR}/../.." && pwd)"
14
+
15
+ export CUDA_VISIBLE_DEVICES=0
16
+ export PYTHONPATH="${REPO_ROOT}:${PYTHONPATH:-}"
17
+
18
+ PY=${PYTHON_BIN:-python}
19
+ SCRIPT="${REPO_ROOT}/eval/vqa/run_single_model_native.py"
20
+ CHECKPOINT_ROOT=${CHECKPOINT_ROOT:-"${REPO_ROOT}/checkpoints"}
21
+
22
+ "$PY" "$SCRIPT" \
23
+ --checkpoint "${CHECKPOINT_ROOT}/models--OpenGVLab--InternVL2-2B" \
24
+ --mode textvqa_eval \
25
+ --dataset textvqa_val \
26
+ --run-name native_textvqa_2b_full \
27
+ --out-dir "$RUN_ROOT" | tee "$RUN_ROOT/native_textvqa_2b_full.log"
28
+
29
+ "$PY" "$SCRIPT" \
30
+ --checkpoint "${CHECKPOINT_ROOT}/models--OpenGVLab--InternVL2-8B" \
31
+ --mode textvqa_eval \
32
+ --dataset textvqa_val \
33
+ --run-name native_textvqa_8b_full \
34
+ --out-dir "$RUN_ROOT" | tee "$RUN_ROOT/native_textvqa_8b_full.log"
eval/vqa/run_shared_vision_guided_textvqa.py ADDED
@@ -0,0 +1,1751 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ import inspect
3
+ import json
4
+ import math
5
+ import os
6
+ import random
7
+ import re
8
+ import sys
9
+ import time
10
+ from functools import wraps
11
+ from pathlib import Path
12
+ from typing import Dict, List, Optional, Tuple
13
+
14
+ import torch
15
+ from PIL import Image
16
+ from transformers import AutoTokenizer
17
+ from transformers.generation.logits_process import LogitsProcessorList
18
+
19
+
20
+ REPO_ROOT = Path(__file__).resolve().parents[2]
21
+ DEFAULT_UPSTREAM_SGL_ROOT = Path(os.environ.get("UPSTREAM_SGL_ROOT", "/home/yf/snap/SGL"))
22
+ if str(DEFAULT_UPSTREAM_SGL_ROOT) not in sys.path:
23
+ sys.path.insert(0, str(DEFAULT_UPSTREAM_SGL_ROOT))
24
+ eval_vqa_path = DEFAULT_UPSTREAM_SGL_ROOT / "eval" / "vqa"
25
+ if str(eval_vqa_path) not in sys.path:
26
+ sys.path.insert(0, str(eval_vqa_path))
27
+
28
+ from internvl.conversation import get_conv_template
29
+ from internvl.model.internvl_chat import InternVLChatModel
30
+ from internvl.model.internvl_chat.configuration_internvl_chat import InternVLChatConfig
31
+ from internvl.train.dataset import build_transform, dynamic_preprocess
32
+ from textvqa_eval import TextVQAAccuracyEvaluator
33
+
34
+
35
+ BASE_PROMPT = "Answer the question using a single word or phrase."
36
+ BASE_PROMPT_SUFFIX = " " + BASE_PROMPT
37
+ HIDDEN_REASONING_INSTRUCTION = (
38
+ "Think through the relevant visual evidence and any text in the image step by step internally before answering."
39
+ )
40
+ EXPLICIT_REASONING_INSTRUCTION = (
41
+ "Explain your reasoning step by step using the relevant visual evidence and any text in the image."
42
+ )
43
+ DEFAULT_FINAL_ANSWER_INSTRUCTION = "Provide the final answer only."
44
+ GUIDE_ATTENTION_COT_PROMPT_TEMPLATE = """You are solving a TextVQA task.
45
+ Read the image carefully, especially visible text.
46
+ Reason through the answer in at least 5 explicit steps.
47
+ Do not skip the reasoning.
48
+ Question: {question}
49
+ 1.
50
+ 2.
51
+ 3.
52
+ 4.
53
+ 5.
54
+ Final answer:"""
55
+ GUIDE_ATTENTION_REASONING_ONLY_PROMPT_TEMPLATE = """You are solving a TextVQA task.
56
+
57
+ Read the image carefully, especially all visible text.
58
+ Reason using only evidence from the image and OCR text.
59
+ You must output exactly 5 numbered reasoning steps.
60
+ Each step must be a short sentence.
61
+ Do not provide the final answer.
62
+ Do not provide a summary.
63
+ Do not output any text other than the 5 numbered steps.
64
+
65
+ Question: {question}
66
+
67
+ 1. Identify the most relevant visible text or object.
68
+ 2. Explain how that evidence relates to the question.
69
+ 3. Check for another supporting clue in the image.
70
+ 4. Resolve any ambiguity using the strongest evidence.
71
+ 5. State the final reasoning conclusion without giving the final answer."""
72
+ GUIDE_ATTENTION_EXPLICIT_COT_INSTRUCTION = (
73
+ "First reason step by step using the relevant visual evidence and OCR text. "
74
+ "Then end with a new line in the exact format: Answer: <short answer>."
75
+ )
76
+ GUIDE_TEXT_HINT_INSTRUCTION = (
77
+ "Give a very short guide hint grounded in the image and OCR text. Use a short phrase, not a full sentence."
78
+ )
79
+ GUIDED_DECODE_INSTRUCTION = (
80
+ "Use the guide hint only if it matches the image. Answer the question using a single word or phrase."
81
+ )
82
+
83
+ REASONING_FILTER_STOPWORDS = {
84
+ "a", "an", "and", "are", "as", "at", "be", "because", "but", "by", "for", "from", "has",
85
+ "have", "if", "in", "into", "is", "it", "its", "of", "on", "or", "that", "the", "their",
86
+ "there", "this", "those", "to", "was", "were", "with",
87
+ }
88
+ REASONING_FILTER_TEMPLATE_WORDS = {
89
+ "answer", "conclusion", "directly", "evidence", "final", "identify", "indicating",
90
+ "question", "reason", "reasoning", "relates", "relevant", "resolve", "shows", "state",
91
+ "strongest", "supporting", "supports", "using", "visible",
92
+ }
93
+ REASONING_FILTER_POSITION_WORDS = {
94
+ "left", "right", "top", "bottom", "middle", "center", "centre", "upper", "lower",
95
+ }
96
+ REASONING_FILTER_COLOR_WORDS = {
97
+ "black", "blue", "brown", "gold", "gray", "green", "grey", "orange", "pink",
98
+ "purple", "red", "silver", "white", "yellow",
99
+ }
100
+ REASONING_FILTER_KEEP_POS = {"NOUN", "PROPN", "ADJ"}
101
+ SPACY_REASONING_NLP = None
102
+ SPACY_REASONING_LOAD_ATTEMPTED = False
103
+ SPACY_REASONING_FALLBACK_WARNED = False
104
+
105
+
106
+ def resolve_hf_snapshot(path: str) -> str:
107
+ path = os.path.abspath(path)
108
+ config_path = os.path.join(path, "config.json")
109
+ if os.path.isfile(config_path):
110
+ return path
111
+
112
+ refs_main = os.path.join(path, "refs", "main")
113
+ if os.path.isfile(refs_main):
114
+ with open(refs_main) as f:
115
+ revision = f.read().strip()
116
+ snapshot_path = os.path.join(path, "snapshots", revision)
117
+ if os.path.isfile(os.path.join(snapshot_path, "config.json")):
118
+ return snapshot_path
119
+
120
+ raise FileNotFoundError(f"Could not resolve checkpoint snapshot from: {path}")
121
+
122
+
123
+ def configure_model(checkpoint_path: str, use_flash_attn: bool) -> InternVLChatConfig:
124
+ checkpoint_path = resolve_hf_snapshot(checkpoint_path)
125
+ config = InternVLChatConfig.from_json_file(os.path.join(checkpoint_path, "config.json"))
126
+ llm_arch = config.llm_config.architectures[0]
127
+ if llm_arch == "InternLM2ForCausalLM":
128
+ config.llm_config.attn_implementation = "eager"
129
+ else:
130
+ config.llm_config._attn_implementation = "eager"
131
+ config.vision_config.use_flash_attn = use_flash_attn
132
+ return config
133
+
134
+
135
+ def patch_internlm2_sample_signature(model: InternVLChatModel) -> None:
136
+ language_model_cls = model.language_model.__class__
137
+ sample_fn = getattr(language_model_cls, "_sample", None)
138
+ if sample_fn is None or getattr(sample_fn, "_sgl_logits_warper_compat", False):
139
+ return
140
+
141
+ signature = inspect.signature(sample_fn)
142
+ logits_warper_param = signature.parameters.get("logits_warper")
143
+ if logits_warper_param is None or logits_warper_param.default is not inspect._empty:
144
+ return
145
+
146
+ @wraps(sample_fn)
147
+ def compat_sample(
148
+ self,
149
+ input_ids: torch.LongTensor,
150
+ logits_processor,
151
+ stopping_criteria,
152
+ generation_config,
153
+ synced_gpus: bool,
154
+ streamer=None,
155
+ logits_warper=None,
156
+ **model_kwargs,
157
+ ):
158
+ # transformers>=4.49 folds samplers into logits_processor and no longer
159
+ # passes logits_warper to custom _sample overrides.
160
+ if logits_warper is None:
161
+ logits_warper = LogitsProcessorList()
162
+ return sample_fn(
163
+ self,
164
+ input_ids=input_ids,
165
+ logits_processor=logits_processor,
166
+ stopping_criteria=stopping_criteria,
167
+ generation_config=generation_config,
168
+ synced_gpus=synced_gpus,
169
+ streamer=streamer,
170
+ logits_warper=logits_warper,
171
+ **model_kwargs,
172
+ )
173
+
174
+ compat_sample._sgl_logits_warper_compat = True
175
+ language_model_cls._sample = compat_sample
176
+
177
+
178
+ def load_model(
179
+ checkpoint_path: str,
180
+ config: InternVLChatConfig,
181
+ auto: bool,
182
+ load_in_8bit: bool,
183
+ load_in_4bit: bool,
184
+ ) -> InternVLChatModel:
185
+ checkpoint_path = resolve_hf_snapshot(checkpoint_path)
186
+ kwargs = {"device_map": "auto"} if auto else {}
187
+ model = InternVLChatModel.from_pretrained(
188
+ checkpoint_path,
189
+ config=config,
190
+ low_cpu_mem_usage=True,
191
+ torch_dtype=torch.bfloat16,
192
+ load_in_8bit=load_in_8bit,
193
+ load_in_4bit=load_in_4bit,
194
+ **kwargs,
195
+ ).eval()
196
+ if not auto and not load_in_8bit and not load_in_4bit:
197
+ model = model.cuda()
198
+ patch_internlm2_sample_signature(model)
199
+ return model
200
+
201
+
202
+ def build_decode_model(
203
+ guide_model: InternVLChatModel,
204
+ large_checkpoint: str,
205
+ use_flash_attn: bool,
206
+ auto: bool,
207
+ load_in_8bit: bool,
208
+ load_in_4bit: bool,
209
+ ) -> Tuple[InternVLChatModel, AutoTokenizer]:
210
+ large_checkpoint = resolve_hf_snapshot(large_checkpoint)
211
+ large_config = configure_model(large_checkpoint, use_flash_attn=use_flash_attn)
212
+ large_source = load_model(
213
+ large_checkpoint,
214
+ large_config,
215
+ auto=auto,
216
+ load_in_8bit=load_in_8bit,
217
+ load_in_4bit=load_in_4bit,
218
+ )
219
+
220
+ decode_model = InternVLChatModel(
221
+ large_config,
222
+ vision_model=guide_model.vision_model,
223
+ language_model=large_source.language_model,
224
+ )
225
+ decode_model.config.vision_config = guide_model.config.vision_config
226
+ decode_model.vision_model.config = guide_model.config.vision_config
227
+ decode_model.mlp1 = large_source.mlp1
228
+ decode_model.template = large_source.template
229
+ decode_model.system_message = large_source.system_message
230
+ decode_model.num_image_token = large_source.num_image_token
231
+ decode_model.ps_version = guide_model.ps_version
232
+ decode_model.select_layer = guide_model.select_layer
233
+ decode_model.downsample_ratio = guide_model.downsample_ratio
234
+ decode_model.img_context_token_id = large_source.img_context_token_id
235
+ decode_model.eval()
236
+ patch_internlm2_sample_signature(decode_model)
237
+
238
+ large_tokenizer = AutoTokenizer.from_pretrained(
239
+ large_checkpoint,
240
+ trust_remote_code=True,
241
+ use_fast=False,
242
+ )
243
+ return decode_model, large_tokenizer
244
+
245
+
246
+ def model_text_device(model: InternVLChatModel) -> torch.device:
247
+ return next(model.language_model.get_input_embeddings().parameters()).device
248
+
249
+
250
+ def model_vision_device(model: InternVLChatModel) -> torch.device:
251
+ return next(model.vision_model.parameters()).device
252
+
253
+
254
+ def resolve_image_path(image_path: str, data_root: str, jsonl_dir: str) -> str:
255
+ candidates = []
256
+ if os.path.isabs(image_path):
257
+ candidates.append(image_path)
258
+ candidates.append(os.path.join(data_root, image_path))
259
+ if image_path.startswith("data/"):
260
+ candidates.append(os.path.join(data_root, image_path[len("data/"):]))
261
+ candidates.append(os.path.join(jsonl_dir, image_path))
262
+ candidates.append(os.path.join(jsonl_dir, os.path.basename(image_path)))
263
+
264
+ for candidate in candidates:
265
+ if os.path.exists(candidate):
266
+ return candidate
267
+ raise FileNotFoundError(f"Could not resolve image path: {image_path}")
268
+
269
+
270
+ class TextVQADataset:
271
+ def __init__(self, jsonl_path: str, data_root: str, image_size: int, dynamic: bool, use_thumbnail: bool, max_num: int):
272
+ with open(jsonl_path) as f:
273
+ self.items = [json.loads(line) for line in f if line.strip()]
274
+ self.jsonl_dir = os.path.dirname(jsonl_path)
275
+ self.data_root = data_root
276
+ self.image_size = image_size
277
+ self.dynamic = dynamic
278
+ self.use_thumbnail = use_thumbnail
279
+ self.max_num = max_num
280
+ self.transform = build_transform(is_train=False, input_size=image_size)
281
+
282
+ def __len__(self) -> int:
283
+ return len(self.items)
284
+
285
+ def __getitem__(self, idx: int) -> Dict[str, object]:
286
+ item = self.items[idx]
287
+ image_path = resolve_image_path(item["image"], self.data_root, self.jsonl_dir)
288
+ image = Image.open(image_path).convert("RGB")
289
+ if self.dynamic:
290
+ images = dynamic_preprocess(
291
+ image,
292
+ image_size=self.image_size,
293
+ use_thumbnail=self.use_thumbnail,
294
+ max_num=self.max_num,
295
+ )
296
+ else:
297
+ images = [image]
298
+ pixel_values = torch.stack([self.transform(img) for img in images])
299
+ return {
300
+ "question_id": item["question_id"],
301
+ "question": item["question"],
302
+ "pixel_values": pixel_values,
303
+ "annotation": item.get("answer", ""),
304
+ }
305
+
306
+
307
+ def load_annotations(annotation_file: str) -> Dict[int, List[str]]:
308
+ with open(annotation_file) as f:
309
+ annotations = json.load(f)["annotations"]
310
+ return {
311
+ item["question_id"]: [answer["answer"] for answer in item["answers"]]
312
+ for item in annotations
313
+ }
314
+
315
+
316
+ def build_query(model: InternVLChatModel, tokenizer, question: str, num_patches: int):
317
+ img_context_token = "<IMG_CONTEXT>"
318
+ img_start_token = "<img>"
319
+ img_end_token = "</img>"
320
+
321
+ if "<image>" not in question:
322
+ question = "<image>\n" + question
323
+
324
+ model.img_context_token_id = tokenizer.convert_tokens_to_ids(img_context_token)
325
+
326
+ template = get_conv_template(model.template)
327
+ template.system_message = model.system_message
328
+ template.append_message(template.roles[0], question)
329
+ template.append_message(template.roles[1], None)
330
+ query = template.get_prompt()
331
+
332
+ image_tokens = img_start_token + img_context_token * model.num_image_token * num_patches + img_end_token
333
+ query = query.replace("<image>", image_tokens, 1)
334
+ return query, template
335
+
336
+
337
+ @torch.inference_mode()
338
+ def extract_shared_raw_visual_tokens(model: InternVLChatModel, pixel_values: torch.Tensor) -> torch.Tensor:
339
+ vision_device = model_vision_device(model)
340
+ pixel_values = pixel_values.to(device=vision_device, dtype=torch.bfloat16)
341
+ if model.select_layer == -1:
342
+ vit_embeds = model.vision_model(
343
+ pixel_values=pixel_values,
344
+ output_hidden_states=False,
345
+ return_dict=True,
346
+ ).last_hidden_state
347
+ else:
348
+ vit_embeds = model.vision_model(
349
+ pixel_values=pixel_values,
350
+ output_hidden_states=True,
351
+ return_dict=True,
352
+ ).hidden_states[model.select_layer]
353
+ vit_embeds = vit_embeds[:, 1:, :]
354
+ h = w = int(vit_embeds.shape[1] ** 0.5)
355
+ vit_embeds = vit_embeds.reshape(vit_embeds.shape[0], h, w, -1)
356
+ vit_embeds = model.pixel_shuffle(vit_embeds, scale_factor=model.downsample_ratio)
357
+ return vit_embeds.reshape(vit_embeds.shape[0], -1, vit_embeds.shape[-1])
358
+
359
+
360
+ @torch.inference_mode()
361
+ def project_visual_tokens(model: InternVLChatModel, raw_visual_tokens: torch.Tensor) -> torch.Tensor:
362
+ mlp_device = next(model.mlp1.parameters()).device
363
+ raw_visual_tokens = raw_visual_tokens.to(device=mlp_device, dtype=torch.bfloat16)
364
+ return model.mlp1(raw_visual_tokens)
365
+
366
+
367
+ @torch.inference_mode()
368
+ def build_input_embeds_from_visual_features(
369
+ model: InternVLChatModel,
370
+ input_ids: torch.Tensor,
371
+ visual_features: torch.Tensor,
372
+ ) -> Tuple[torch.Tensor, torch.Tensor]:
373
+ input_embeds = model.language_model.get_input_embeddings()(input_ids)
374
+ batch_size, seq_len, hidden_size = input_embeds.shape
375
+ flat_input_embeds = input_embeds.reshape(batch_size * seq_len, hidden_size)
376
+ flat_input_ids = input_ids.reshape(batch_size * seq_len)
377
+ selected = flat_input_ids == model.img_context_token_id
378
+ if selected.sum().item() == 0:
379
+ raise ValueError("No image context tokens found in input_ids.")
380
+ flat_input_embeds[selected] = visual_features.reshape(-1, hidden_size).to(flat_input_embeds.device)
381
+ return flat_input_embeds.reshape(batch_size, seq_len, hidden_size), flat_input_ids
382
+
383
+
384
+ @torch.inference_mode()
385
+ def run_guide_generation(
386
+ model: InternVLChatModel,
387
+ tokenizer,
388
+ projected_visual_tokens: torch.Tensor,
389
+ question: str,
390
+ generation_config: dict,
391
+ ) -> Dict[str, object]:
392
+ query, template = build_query(model, tokenizer, question, projected_visual_tokens.shape[0])
393
+ model_inputs = tokenizer(query, return_tensors="pt")
394
+ input_device = model_text_device(model)
395
+ input_ids = model_inputs["input_ids"].to(input_device)
396
+ attention_mask = model_inputs["attention_mask"].to(input_device)
397
+ eos_token_id = tokenizer.convert_tokens_to_ids(template.sep)
398
+ input_embeds, flat_input_ids = build_input_embeds_from_visual_features(model, input_ids, projected_visual_tokens)
399
+
400
+ visual_token_index = (input_ids == model.img_context_token_id).view(-1).nonzero()
401
+ visual_start_index, visual_end_index = visual_token_index[0], visual_token_index[-1]
402
+
403
+ run_config = dict(generation_config)
404
+ run_config["eos_token_id"] = eos_token_id
405
+
406
+ outputs = model.language_model.generate(
407
+ inputs_embeds=input_embeds,
408
+ attention_mask=attention_mask,
409
+ generation_config=None,
410
+ output_hidden_states=None,
411
+ return_dict=None,
412
+ use_cache=True,
413
+ visual_token_index=(visual_start_index, visual_end_index),
414
+ **run_config,
415
+ )
416
+ response = tokenizer.batch_decode(outputs["sequences"], skip_special_tokens=True)[0]
417
+ response = response.split(template.sep)[0].strip()
418
+ return {
419
+ "response": response,
420
+ "outputs": outputs,
421
+ "input_embeds": input_embeds,
422
+ "flat_input_ids": flat_input_ids,
423
+ "attention_mask": attention_mask,
424
+ "visual_token_index": (visual_start_index, visual_end_index),
425
+ }
426
+
427
+
428
+ def aggregate_attention_from_step(attentions, visual_token_index: Tuple[int, int]) -> torch.Tensor:
429
+ visual_start_index, visual_end_index = visual_token_index
430
+ visual_token_num = visual_end_index - visual_start_index + 1
431
+ visual_token_importance = None
432
+
433
+ for attention in attentions:
434
+ if attention is None:
435
+ continue
436
+ if visual_token_importance is None:
437
+ visual_token_importance = torch.zeros(
438
+ visual_token_num,
439
+ device=attention.device,
440
+ dtype=torch.float32,
441
+ )
442
+
443
+ merged_attention = attention[0].sum(dim=0)
444
+ if attention.shape[2] != 1:
445
+ visual_token_importance += merged_attention[
446
+ visual_end_index + 1 :,
447
+ visual_start_index : visual_end_index + 1,
448
+ ].sum(dim=0)
449
+ else:
450
+ visual_token_importance += merged_attention[
451
+ 0:1,
452
+ visual_start_index : visual_end_index + 1,
453
+ ].sum(dim=0)
454
+
455
+ if visual_token_importance is None:
456
+ raise RuntimeError("Guide model did not return layer attentions for the current decoding step.")
457
+ return visual_token_importance
458
+
459
+
460
+ def count_attention_query_tokens_from_step(attentions, visual_token_index: Tuple[int, int]) -> int:
461
+ _, visual_end_index = visual_token_index
462
+ for attention in attentions:
463
+ if attention is None:
464
+ continue
465
+ query_length = int(attention.shape[2])
466
+ if query_length != 1:
467
+ return max(query_length - int(visual_end_index) - 1, 0)
468
+ return 1
469
+ return 0
470
+
471
+
472
+ def count_generated_tokens(outputs) -> int:
473
+ sequences = getattr(outputs, "sequences", None)
474
+ if sequences is None and isinstance(outputs, dict):
475
+ sequences = outputs.get("sequences")
476
+ if sequences is None:
477
+ return 0
478
+ if sequences.ndim == 0:
479
+ return 0
480
+ return int(sequences.shape[-1])
481
+
482
+
483
+ def count_attention_query_tokens_from_generation_outputs(
484
+ outputs,
485
+ visual_token_index: Tuple[int, int],
486
+ step_mask: Optional[List[bool]] = None,
487
+ ) -> int:
488
+ attentions = getattr(outputs, "attentions", None)
489
+ if not attentions:
490
+ return 0
491
+
492
+ token_count = 0
493
+ for step_idx, step_attentions in enumerate(attentions):
494
+ if step_mask is not None and (step_idx >= len(step_mask) or not step_mask[step_idx]):
495
+ continue
496
+ token_count += count_attention_query_tokens_from_step(step_attentions, visual_token_index)
497
+ if token_count == 0 and step_mask is not None:
498
+ return count_attention_query_tokens_from_generation_outputs(outputs, visual_token_index, step_mask=None)
499
+ return token_count
500
+
501
+
502
+ def count_question_and_answer_attention_query_tokens(
503
+ outputs,
504
+ visual_token_index: Tuple[int, int],
505
+ ) -> Tuple[int, int]:
506
+ attentions = getattr(outputs, "attentions", None)
507
+ if not attentions:
508
+ return 0, 0
509
+
510
+ question_token_count = 0
511
+ answer_token_count = 0
512
+ for step_idx, step_attentions in enumerate(attentions):
513
+ step_token_count = count_attention_query_tokens_from_step(step_attentions, visual_token_index)
514
+ if step_idx == 0:
515
+ question_token_count += step_token_count
516
+ else:
517
+ answer_token_count += step_token_count
518
+ return question_token_count, answer_token_count
519
+
520
+
521
+ def get_reasoning_spacy_nlp():
522
+ global SPACY_REASONING_NLP, SPACY_REASONING_LOAD_ATTEMPTED
523
+ if SPACY_REASONING_LOAD_ATTEMPTED:
524
+ return SPACY_REASONING_NLP
525
+
526
+ SPACY_REASONING_LOAD_ATTEMPTED = True
527
+ try:
528
+ import spacy
529
+
530
+ SPACY_REASONING_NLP = spacy.load("en_core_web_sm", disable=["parser", "lemmatizer"])
531
+ except Exception:
532
+ SPACY_REASONING_NLP = None
533
+ return SPACY_REASONING_NLP
534
+
535
+
536
+ def should_keep_reasoning_heuristic_token(token_text: str) -> bool:
537
+ stripped = token_text.strip()
538
+ if not stripped:
539
+ return False
540
+
541
+ lowered = stripped.lower()
542
+ if re.fullmatch(r"\d+[.)]?", stripped):
543
+ return False
544
+ if lowered in REASONING_FILTER_STOPWORDS or lowered in REASONING_FILTER_TEMPLATE_WORDS:
545
+ return False
546
+ if lowered in REASONING_FILTER_POSITION_WORDS or lowered in REASONING_FILTER_COLOR_WORDS:
547
+ return True
548
+ if any(ch.isdigit() for ch in stripped):
549
+ return True
550
+ if any(ch.isupper() for ch in stripped):
551
+ return True
552
+ if any(ch in ".:/-@&" for ch in stripped):
553
+ return True
554
+ alpha_count = sum(ch.isalpha() for ch in stripped)
555
+ return alpha_count >= 4
556
+
557
+
558
+ def should_keep_reasoning_doc_token(token) -> bool:
559
+ stripped = token.text.strip()
560
+ if not stripped:
561
+ return False
562
+
563
+ lowered = stripped.lower()
564
+ if token.is_punct or token.is_space:
565
+ return False
566
+ if lowered in REASONING_FILTER_STOPWORDS or lowered in REASONING_FILTER_TEMPLATE_WORDS:
567
+ return False
568
+ if token.pos_ in REASONING_FILTER_KEEP_POS:
569
+ return True
570
+ return False
571
+
572
+
573
+ def build_generated_token_spans(tokenizer, generated_ids: torch.Tensor) -> Tuple[str, List[Tuple[int, int]]]:
574
+ decoded_text = ""
575
+ token_spans: List[Tuple[int, int]] = []
576
+ for token_id in generated_ids.detach().cpu().tolist():
577
+ piece = tokenizer.decode([int(token_id)], skip_special_tokens=True, clean_up_tokenization_spaces=False)
578
+ start = len(decoded_text)
579
+ decoded_text += piece
580
+ token_spans.append((start, len(decoded_text)))
581
+ return decoded_text, token_spans
582
+
583
+
584
+ def analyze_reasoning_filter(text: str, args) -> Tuple[List[Tuple[int, int]], str, List[Dict[str, object]]]:
585
+ if args.guide_reasoning_filter_mode == "none":
586
+ return [], "none", []
587
+
588
+ if args.guide_reasoning_filter_mode == "pos_ner":
589
+ nlp = get_reasoning_spacy_nlp()
590
+ if nlp is not None:
591
+ doc = nlp(text)
592
+ token_analysis = []
593
+ intervals = [
594
+ (token.idx, token.idx + len(token))
595
+ for token in doc
596
+ if should_keep_reasoning_doc_token(token)
597
+ ]
598
+ for token in doc:
599
+ token_analysis.append(
600
+ {
601
+ "text": token.text,
602
+ "lemma": token.lemma_,
603
+ "pos": token.pos_,
604
+ "tag": token.tag_,
605
+ "dep": token.dep_,
606
+ "ent_type": token.ent_type_,
607
+ "like_num": bool(getattr(token, "like_num", False)),
608
+ "like_url": bool(getattr(token, "like_url", False)),
609
+ "is_stop": bool(token.is_stop),
610
+ "keep": should_keep_reasoning_doc_token(token),
611
+ }
612
+ )
613
+ return intervals, "spacy_pos_ner", token_analysis
614
+
615
+ token_analysis = []
616
+ intervals = [
617
+ (match.start(), match.end())
618
+ for match in re.finditer(r"\S+", text)
619
+ if should_keep_reasoning_heuristic_token(match.group(0))
620
+ ]
621
+ for match in re.finditer(r"\S+", text):
622
+ token_text = match.group(0)
623
+ token_analysis.append(
624
+ {
625
+ "text": token_text,
626
+ "lemma": token_text.lower(),
627
+ "pos": "",
628
+ "tag": "",
629
+ "dep": "",
630
+ "ent_type": "",
631
+ "like_num": any(ch.isdigit() for ch in token_text),
632
+ "like_url": "http" in token_text.lower() or "www." in token_text.lower(),
633
+ "is_stop": token_text.lower() in REASONING_FILTER_STOPWORDS,
634
+ "keep": should_keep_reasoning_heuristic_token(token_text),
635
+ }
636
+ )
637
+ return intervals, "heuristic_fallback", token_analysis
638
+
639
+
640
+ def build_reasoning_attention_step_mask_and_debug(tokenizer, outputs, args) -> Tuple[Optional[List[bool]], Dict[str, object]]:
641
+ if args.guide_reasoning_filter_mode == "none":
642
+ return None, {"backend": "none", "kept_tokens": [], "token_analysis": []}
643
+
644
+ sequences = outputs["sequences"][0]
645
+ decoded_text, token_spans = build_generated_token_spans(tokenizer, sequences)
646
+ intervals, backend, token_analysis = analyze_reasoning_filter(decoded_text, args)
647
+
648
+ global SPACY_REASONING_FALLBACK_WARNED
649
+ if backend == "heuristic_fallback" and not SPACY_REASONING_FALLBACK_WARNED:
650
+ print("Warning: spaCy POS/NER model unavailable; guide reasoning filter is using heuristic fallback.")
651
+ SPACY_REASONING_FALLBACK_WARNED = True
652
+
653
+ debug_info = {
654
+ "backend": backend,
655
+ "token_analysis": token_analysis,
656
+ "kept_tokens": [token["text"] for token in token_analysis if token.get("keep")],
657
+ }
658
+ if not intervals:
659
+ return None, debug_info
660
+
661
+ step_mask = []
662
+ for start, end in token_spans:
663
+ if start == end:
664
+ step_mask.append(False)
665
+ continue
666
+ keep = any(start < interval_end and end > interval_start for interval_start, interval_end in intervals)
667
+ step_mask.append(keep)
668
+
669
+ debug_info["step_mask"] = step_mask
670
+ if not any(step_mask):
671
+ return None, debug_info
672
+ return step_mask, debug_info
673
+
674
+
675
+ def aggregate_attention_from_generation_outputs(
676
+ outputs,
677
+ visual_token_index: Tuple[int, int],
678
+ step_mask: Optional[List[bool]] = None,
679
+ ) -> torch.Tensor:
680
+ aggregated = getattr(outputs, "aggregated_viusal_token_attention", None)
681
+ if aggregated is not None and step_mask is None:
682
+ return aggregated.detach().float()
683
+
684
+ attentions = getattr(outputs, "attentions", None)
685
+ if not attentions:
686
+ raise RuntimeError("Guide generation did not return attentions; enable output_attentions.")
687
+
688
+ visual_token_importance = None
689
+ for step_idx, step_attentions in enumerate(attentions):
690
+ if step_mask is not None and (step_idx >= len(step_mask) or not step_mask[step_idx]):
691
+ continue
692
+ step_importance = aggregate_attention_from_step(step_attentions, visual_token_index)
693
+ if visual_token_importance is None:
694
+ visual_token_importance = step_importance
695
+ else:
696
+ visual_token_importance = visual_token_importance + step_importance
697
+
698
+ if visual_token_importance is None:
699
+ if step_mask is not None:
700
+ return aggregate_attention_from_generation_outputs(outputs, visual_token_index, step_mask=None)
701
+ raise RuntimeError("Guide generation returned no attention steps.")
702
+ return visual_token_importance
703
+
704
+
705
+ def aggregate_question_and_answer_attention_from_generation_outputs(
706
+ outputs,
707
+ visual_token_index: Tuple[int, int],
708
+ ) -> Tuple[torch.Tensor, torch.Tensor]:
709
+ attentions = getattr(outputs, "attentions", None)
710
+ if not attentions:
711
+ raise RuntimeError("Guide generation did not return attentions; enable output_attentions.")
712
+
713
+ question_visual_token_importance = None
714
+ answer_visual_token_importance = None
715
+ for step_idx, step_attentions in enumerate(attentions):
716
+ step_importance = aggregate_attention_from_step(step_attentions, visual_token_index)
717
+ if step_idx == 0:
718
+ if question_visual_token_importance is None:
719
+ question_visual_token_importance = step_importance
720
+ else:
721
+ question_visual_token_importance = question_visual_token_importance + step_importance
722
+ else:
723
+ if answer_visual_token_importance is None:
724
+ answer_visual_token_importance = step_importance
725
+ else:
726
+ answer_visual_token_importance = answer_visual_token_importance + step_importance
727
+
728
+ if question_visual_token_importance is None and answer_visual_token_importance is None:
729
+ raise RuntimeError("Guide generation returned no attention steps.")
730
+ if question_visual_token_importance is None:
731
+ question_visual_token_importance = torch.zeros_like(answer_visual_token_importance)
732
+ if answer_visual_token_importance is None:
733
+ answer_visual_token_importance = torch.zeros_like(question_visual_token_importance)
734
+ return question_visual_token_importance, answer_visual_token_importance
735
+
736
+
737
+ @torch.inference_mode()
738
+ def compute_consistency_score(
739
+ model: InternVLChatModel,
740
+ input_embeds: torch.Tensor,
741
+ flat_input_ids: torch.Tensor,
742
+ attention_mask: torch.Tensor,
743
+ generated_ids: torch.Tensor,
744
+ visual_token_importance: torch.Tensor,
745
+ visual_token_index: Tuple[int, int],
746
+ consistency_token_ratio: float,
747
+ large_model_prune_selection: str,
748
+ ) -> torch.Tensor:
749
+ visual_start_index, visual_end_index = visual_token_index
750
+ new_input_ids_ = generated_ids
751
+ new_token_num = new_input_ids_.shape[-1]
752
+ new_input_embedding = torch.concatenate(
753
+ (input_embeds, model.language_model.get_input_embeddings()(new_input_ids_).unsqueeze(0)),
754
+ dim=1,
755
+ )
756
+ new_attention_mask = torch.concatenate(
757
+ (
758
+ attention_mask,
759
+ torch.ones((1, new_input_ids_.shape[0]), device=attention_mask.device, dtype=attention_mask.dtype),
760
+ ),
761
+ dim=-1,
762
+ )
763
+ new_input_ids = torch.concatenate((flat_input_ids, new_input_ids_), dim=-1)
764
+ consistency_generate_kwargs = {
765
+ "large_model_prune_layer": 0.0,
766
+ "large_model_prune_ratio": consistency_token_ratio,
767
+ "large_model_prune_selection": large_model_prune_selection,
768
+ "visual_token_index": (visual_start_index, visual_end_index),
769
+ "visual_token_importance": visual_token_importance,
770
+ "inputs_embeds": new_input_embedding,
771
+ "attention_mask": new_attention_mask,
772
+ "output_scores": False,
773
+ "output_attentions": False,
774
+ "return_dict_in_generate": False,
775
+ "use_cache": True,
776
+ }
777
+ consistency_generate_kwargs["inputs_embeds"] = new_input_embedding
778
+ consistency_generate_kwargs["attention_mask"] = new_attention_mask
779
+ consistency_generate_kwargs["output_scores"] = False
780
+ consistency_generate_kwargs["output_attentions"] = False
781
+ consistency_generate_kwargs = model.language_model._get_initial_cache_position(new_input_ids, consistency_generate_kwargs)
782
+ model_inputs = model.language_model.prepare_inputs_for_generation(new_input_ids, **consistency_generate_kwargs)
783
+ consistency_output = model.language_model.forward(**model_inputs, return_dict=True)
784
+ consistency_score = torch.gather(
785
+ consistency_output["logits"][:, -new_token_num - 1 : -1, :].softmax(dim=-1),
786
+ index=new_input_ids_[None, :, None],
787
+ dim=-1,
788
+ )
789
+ return torch.prod(consistency_score)
790
+
791
+
792
+ @torch.inference_mode()
793
+ def run_guide_branch(
794
+ model: InternVLChatModel,
795
+ tokenizer,
796
+ projected_visual_tokens: torch.Tensor,
797
+ question: str,
798
+ generation_config: dict,
799
+ consistency_token_ratio: float,
800
+ args,
801
+ ) -> Tuple[str, List[torch.Tensor], torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor, Dict[str, int]]:
802
+ generation_result = run_guide_generation(
803
+ model,
804
+ tokenizer,
805
+ projected_visual_tokens,
806
+ question,
807
+ generation_config,
808
+ )
809
+ outputs = generation_result["outputs"]
810
+ question_visual_token_importance, answer_visual_token_importance = (
811
+ aggregate_question_and_answer_attention_from_generation_outputs(
812
+ outputs,
813
+ generation_result["visual_token_index"],
814
+ )
815
+ )
816
+ question_attention_token_count, answer_attention_token_count = count_question_and_answer_attention_query_tokens(
817
+ outputs,
818
+ generation_result["visual_token_index"],
819
+ )
820
+ visual_token_importance = combine_question_and_answer_attention(
821
+ question_visual_token_importance,
822
+ answer_visual_token_importance,
823
+ args,
824
+ )
825
+ if args.large_model_prune_selection == "similarity_greedy":
826
+ consistency_score = torch.tensor(1.0, device=visual_token_importance.device)
827
+ else:
828
+ consistency_score = compute_consistency_score(
829
+ model,
830
+ generation_result["input_embeds"],
831
+ generation_result["flat_input_ids"],
832
+ generation_result["attention_mask"],
833
+ outputs["sequences"][0],
834
+ visual_token_importance,
835
+ generation_result["visual_token_index"],
836
+ consistency_token_ratio,
837
+ args.large_model_prune_selection,
838
+ )
839
+ return (
840
+ generation_result["response"],
841
+ outputs.scores,
842
+ consistency_score,
843
+ visual_token_importance,
844
+ question_visual_token_importance,
845
+ answer_visual_token_importance,
846
+ {
847
+ "question_attention_token_count": question_attention_token_count,
848
+ "answer_attention_token_count": answer_attention_token_count,
849
+ "reasoning_attention_token_count": 0,
850
+ "guide_answer_generated_token_count": count_generated_tokens(outputs),
851
+ "guide_reasoning_generated_token_count": 0,
852
+ },
853
+ )
854
+
855
+
856
+ @torch.inference_mode()
857
+ def run_decode_branch(
858
+ model: InternVLChatModel,
859
+ tokenizer,
860
+ projected_visual_tokens: torch.Tensor,
861
+ question: str,
862
+ generation_config: dict,
863
+ visual_token_importance: torch.Tensor,
864
+ large_model_prune_layer: float,
865
+ large_model_prune_ratio: float,
866
+ large_model_prune_selection: str,
867
+ ) -> str:
868
+ query, template = build_query(model, tokenizer, question, projected_visual_tokens.shape[0])
869
+ model_inputs = tokenizer(query, return_tensors="pt")
870
+ input_device = model_text_device(model)
871
+ input_ids = model_inputs["input_ids"].to(input_device)
872
+ attention_mask = model_inputs["attention_mask"].to(input_device)
873
+ eos_token_id = tokenizer.convert_tokens_to_ids(template.sep)
874
+ input_embeds, _ = build_input_embeds_from_visual_features(model, input_ids, projected_visual_tokens)
875
+
876
+ visual_token_index = (input_ids == model.img_context_token_id).view(-1).nonzero()
877
+ visual_start_index, visual_end_index = visual_token_index[0], visual_token_index[-1]
878
+
879
+ run_config = dict(generation_config)
880
+ run_config["eos_token_id"] = eos_token_id
881
+ run_config["return_dict_in_generate"] = False
882
+ run_config["output_scores"] = False
883
+ run_config["output_attentions"] = False
884
+ run_config["large_model_prune_layer"] = large_model_prune_layer
885
+ run_config["large_model_prune_ratio"] = large_model_prune_ratio
886
+ run_config["large_model_prune_selection"] = large_model_prune_selection
887
+ run_config["visual_token_importance"] = visual_token_importance
888
+ run_config["visual_token_index"] = (visual_start_index, visual_end_index)
889
+
890
+ output_ids = model.language_model.generate(
891
+ inputs_embeds=input_embeds,
892
+ attention_mask=attention_mask,
893
+ generation_config=None,
894
+ output_hidden_states=None,
895
+ return_dict=None,
896
+ use_cache=True,
897
+ **run_config,
898
+ )
899
+ response = tokenizer.batch_decode(output_ids, skip_special_tokens=True)[0]
900
+ return response.split(template.sep)[0].strip()
901
+
902
+
903
+ def make_generation_config(args) -> dict:
904
+ generation_config = {
905
+ "num_beams": args.num_beams,
906
+ "max_new_tokens": args.max_new_tokens,
907
+ "min_new_tokens": 1,
908
+ "do_sample": args.temperature > 0,
909
+ "return_dict_in_generate": True,
910
+ "output_scores": True,
911
+ "output_attentions": True,
912
+ }
913
+ if args.temperature > 0:
914
+ generation_config["temperature"] = args.temperature
915
+ return generation_config
916
+
917
+
918
+ def append_instruction(question: str, instruction: str) -> str:
919
+ instruction = instruction.strip()
920
+ if not instruction:
921
+ return question
922
+ return f"{question.rstrip()}\n{instruction}"
923
+
924
+
925
+ def make_reasoning_generation_config(base_generation_config: dict, args) -> dict:
926
+ generation_config = dict(base_generation_config)
927
+ generation_config["max_new_tokens"] = args.reasoning_max_new_tokens
928
+ generation_config["return_dict_in_generate"] = True
929
+ generation_config["output_scores"] = True
930
+ generation_config["output_attentions"] = True
931
+ temperature = args.reasoning_temperature
932
+ generation_config["do_sample"] = temperature > 0
933
+ if temperature > 0:
934
+ generation_config["temperature"] = temperature
935
+ else:
936
+ generation_config.pop("temperature", None)
937
+ return generation_config
938
+
939
+
940
+ def make_custom_generation_config(
941
+ base_generation_config: dict,
942
+ max_new_tokens: int,
943
+ temperature: float,
944
+ return_dict_in_generate: bool,
945
+ output_scores: bool,
946
+ output_attentions: bool,
947
+ ) -> dict:
948
+ generation_config = dict(base_generation_config)
949
+ generation_config["max_new_tokens"] = max_new_tokens
950
+ generation_config["return_dict_in_generate"] = return_dict_in_generate
951
+ generation_config["output_scores"] = output_scores
952
+ generation_config["output_attentions"] = output_attentions
953
+ generation_config["do_sample"] = temperature > 0
954
+ if temperature > 0:
955
+ generation_config["temperature"] = temperature
956
+ else:
957
+ generation_config.pop("temperature", None)
958
+ return generation_config
959
+
960
+
961
+ def normalize_generated_text(text: str) -> str:
962
+ return " ".join(text.strip().split())
963
+
964
+
965
+ def strip_base_prompt(question: str) -> str:
966
+ if question.endswith(BASE_PROMPT_SUFFIX):
967
+ return question[: -len(BASE_PROMPT_SUFFIX)].rstrip()
968
+ return question
969
+
970
+
971
+ def summarize_visual_token_importance(visual_token_importance: torch.Tensor, topk: int) -> Dict[str, object]:
972
+ values = visual_token_importance.detach().float().view(-1).cpu()
973
+ total = values.sum().item()
974
+ if total > 0:
975
+ normalized = values / total
976
+ else:
977
+ normalized = torch.full_like(values, 1.0 / max(values.numel(), 1))
978
+
979
+ topk = min(topk, normalized.numel())
980
+ top_values, top_indices = torch.topk(normalized, k=topk)
981
+ entropy = -(normalized * torch.clamp(normalized, min=1e-12).log()).sum().item()
982
+ return {
983
+ "raw_sum": total,
984
+ "entropy": entropy,
985
+ "max_weight": normalized.max().item(),
986
+ "top_indices": top_indices.tolist(),
987
+ "top_weights": top_values.tolist(),
988
+ "weights": normalized.tolist(),
989
+ }
990
+
991
+
992
+ def normalize_visual_token_importance(visual_token_importance: torch.Tensor) -> torch.Tensor:
993
+ visual_token_importance = visual_token_importance.detach().float()
994
+ total = visual_token_importance.sum()
995
+ if total.item() > 0:
996
+ return visual_token_importance / total
997
+ return torch.full_like(visual_token_importance, 1.0 / max(visual_token_importance.numel(), 1))
998
+
999
+
1000
+ def prepare_decode_visual_token_importance(
1001
+ visual_token_importance: torch.Tensor,
1002
+ selection_mode: str,
1003
+ ) -> torch.Tensor:
1004
+ raw_importance = visual_token_importance.detach().float()
1005
+ if selection_mode in {"topk", "similarity_greedy"}:
1006
+ return raw_importance
1007
+ if selection_mode == "random":
1008
+ return torch.rand_like(raw_importance)
1009
+ raise ValueError(f"Unsupported large model prune selection mode: {selection_mode}")
1010
+
1011
+
1012
+ def maybe_normalize_visual_token_importance(visual_token_importance: torch.Tensor, args) -> torch.Tensor:
1013
+ if args.guide_attention_aggregation_mode == "normalized":
1014
+ return normalize_visual_token_importance(visual_token_importance)
1015
+ return visual_token_importance.detach().float()
1016
+
1017
+
1018
+ def combine_question_and_answer_attention(
1019
+ question_visual_token_importance: torch.Tensor,
1020
+ answer_visual_token_importance: torch.Tensor,
1021
+ args,
1022
+ ) -> torch.Tensor:
1023
+ question_weight = args.guide_question_attention_weight
1024
+ answer_weight = args.guide_answer_attention_weight
1025
+ if question_weight == 0 and answer_weight == 0:
1026
+ raise ValueError("At least one guide question/answer attention weight must be > 0.")
1027
+
1028
+ return (
1029
+ question_weight * maybe_normalize_visual_token_importance(question_visual_token_importance, args)
1030
+ + answer_weight * maybe_normalize_visual_token_importance(answer_visual_token_importance, args)
1031
+ )
1032
+
1033
+
1034
+ def resolve_guide_attention_source(args) -> str:
1035
+ if args.guide_attention_source != "default":
1036
+ return args.guide_attention_source
1037
+ if args.guide_reasoning_mode == "two_pass_explicit":
1038
+ return "combined"
1039
+ return "answer"
1040
+
1041
+
1042
+ def combine_reasoning_and_answer_attention(
1043
+ reasoning_visual_token_importance: torch.Tensor,
1044
+ answer_visual_token_importance: torch.Tensor,
1045
+ args,
1046
+ ) -> torch.Tensor:
1047
+ attention_source = resolve_guide_attention_source(args)
1048
+ if attention_source == "reasoning":
1049
+ return args.guide_reasoning_attention_weight * maybe_normalize_visual_token_importance(
1050
+ reasoning_visual_token_importance,
1051
+ args,
1052
+ )
1053
+ if attention_source == "answer":
1054
+ return args.guide_answer_attention_weight * maybe_normalize_visual_token_importance(
1055
+ answer_visual_token_importance,
1056
+ args,
1057
+ )
1058
+
1059
+ reasoning_weight = args.guide_reasoning_attention_weight
1060
+ answer_weight = args.guide_answer_attention_weight
1061
+ if reasoning_weight == 0 and answer_weight == 0:
1062
+ raise ValueError("At least one guide attention weight must be > 0.")
1063
+
1064
+ return (
1065
+ reasoning_weight * maybe_normalize_visual_token_importance(reasoning_visual_token_importance, args)
1066
+ + answer_weight * maybe_normalize_visual_token_importance(answer_visual_token_importance, args)
1067
+ )
1068
+
1069
+
1070
+ def combine_question_reasoning_and_answer_attention(
1071
+ question_visual_token_importance: torch.Tensor,
1072
+ reasoning_visual_token_importance: torch.Tensor,
1073
+ answer_visual_token_importance: torch.Tensor,
1074
+ args,
1075
+ ) -> torch.Tensor:
1076
+ attention_source = resolve_guide_attention_source(args)
1077
+ if attention_source == "reasoning":
1078
+ return args.guide_reasoning_attention_weight * maybe_normalize_visual_token_importance(
1079
+ reasoning_visual_token_importance,
1080
+ args,
1081
+ )
1082
+ if attention_source == "answer":
1083
+ return combine_question_and_answer_attention(
1084
+ question_visual_token_importance,
1085
+ answer_visual_token_importance,
1086
+ args,
1087
+ )
1088
+
1089
+ return combine_question_and_answer_attention(
1090
+ question_visual_token_importance,
1091
+ answer_visual_token_importance,
1092
+ args,
1093
+ ) + args.guide_reasoning_attention_weight * reasoning_visual_token_importance.detach().float()
1094
+
1095
+
1096
+ def build_guide_attention_question(question: str, args) -> str:
1097
+ if args.guide_reasoning_mode == "short_cot":
1098
+ return GUIDE_ATTENTION_COT_PROMPT_TEMPLATE.replace("{question}", strip_base_prompt(question))
1099
+ if args.guide_reasoning_mode == "explicit_cot":
1100
+ return append_instruction(strip_base_prompt(question), GUIDE_ATTENTION_EXPLICIT_COT_INSTRUCTION)
1101
+ return question
1102
+
1103
+
1104
+ def build_guide_reasoning_question(question: str) -> str:
1105
+ return GUIDE_ATTENTION_REASONING_ONLY_PROMPT_TEMPLATE.replace(
1106
+ "{question}",
1107
+ strip_base_prompt(question),
1108
+ )
1109
+
1110
+
1111
+ def build_guide_text_question(question: str) -> str:
1112
+ return append_instruction(question, GUIDE_TEXT_HINT_INSTRUCTION)
1113
+
1114
+
1115
+ def build_decode_question(question: str, guide_text_hint: Optional[str]) -> str:
1116
+ if not guide_text_hint:
1117
+ return question
1118
+ return append_instruction(
1119
+ question,
1120
+ f"Guide hint: {guide_text_hint}\n{GUIDED_DECODE_INSTRUCTION}",
1121
+ )
1122
+
1123
+
1124
+ def make_guide_attention_generation_config(base_generation_config: dict, args) -> dict:
1125
+ if args.guide_reasoning_mode in {"short_cot", "explicit_cot", "two_pass_explicit"}:
1126
+ return make_custom_generation_config(
1127
+ base_generation_config,
1128
+ max_new_tokens=args.guide_reasoning_max_new_tokens,
1129
+ temperature=args.guide_reasoning_temperature,
1130
+ return_dict_in_generate=True,
1131
+ output_scores=True,
1132
+ output_attentions=True,
1133
+ )
1134
+ return dict(base_generation_config)
1135
+
1136
+
1137
+ def make_guide_text_generation_config(base_generation_config: dict, args) -> dict:
1138
+ return make_custom_generation_config(
1139
+ base_generation_config,
1140
+ max_new_tokens=args.guide_text_max_new_tokens,
1141
+ temperature=args.guide_text_temperature,
1142
+ return_dict_in_generate=False,
1143
+ output_scores=False,
1144
+ output_attentions=False,
1145
+ )
1146
+
1147
+
1148
+ @torch.inference_mode()
1149
+ def run_text_generation_branch(
1150
+ model: InternVLChatModel,
1151
+ tokenizer,
1152
+ projected_visual_tokens: torch.Tensor,
1153
+ question: str,
1154
+ generation_config: dict,
1155
+ ) -> str:
1156
+ query, template = build_query(model, tokenizer, question, projected_visual_tokens.shape[0])
1157
+ model_inputs = tokenizer(query, return_tensors="pt")
1158
+ input_device = model_text_device(model)
1159
+ input_ids = model_inputs["input_ids"].to(input_device)
1160
+ attention_mask = model_inputs["attention_mask"].to(input_device)
1161
+ eos_token_id = tokenizer.convert_tokens_to_ids(template.sep)
1162
+ input_embeds, _ = build_input_embeds_from_visual_features(model, input_ids, projected_visual_tokens)
1163
+
1164
+ run_config = dict(generation_config)
1165
+ run_config["eos_token_id"] = eos_token_id
1166
+ output_ids = model.language_model.generate(
1167
+ inputs_embeds=input_embeds,
1168
+ attention_mask=attention_mask,
1169
+ generation_config=None,
1170
+ output_hidden_states=None,
1171
+ return_dict=None,
1172
+ use_cache=True,
1173
+ **run_config,
1174
+ )
1175
+ response = tokenizer.batch_decode(output_ids, skip_special_tokens=True)[0]
1176
+ return response.split(template.sep)[0].strip()
1177
+
1178
+
1179
+ def run_decode_answer(
1180
+ model: InternVLChatModel,
1181
+ tokenizer,
1182
+ projected_visual_tokens: torch.Tensor,
1183
+ question: str,
1184
+ generation_config: dict,
1185
+ visual_token_importance: torch.Tensor,
1186
+ args,
1187
+ ) -> str:
1188
+ return run_decode_branch(
1189
+ model,
1190
+ tokenizer,
1191
+ projected_visual_tokens,
1192
+ question,
1193
+ generation_config,
1194
+ prepare_decode_visual_token_importance(
1195
+ visual_token_importance,
1196
+ args.large_model_prune_selection,
1197
+ ),
1198
+ args.large_model_prune_layer,
1199
+ args.large_model_prune_ratio,
1200
+ args.large_model_prune_selection,
1201
+ )
1202
+
1203
+
1204
+ @torch.inference_mode()
1205
+ def run_guide_two_pass_explicit_branch(
1206
+ model: InternVLChatModel,
1207
+ tokenizer,
1208
+ projected_visual_tokens: torch.Tensor,
1209
+ question: str,
1210
+ reasoning_generation_config: dict,
1211
+ answer_generation_config: dict,
1212
+ consistency_token_ratio: float,
1213
+ args,
1214
+ ) -> Tuple[str, List[torch.Tensor], torch.Tensor, torch.Tensor, str, Dict[str, object], Dict[str, int]]:
1215
+ answer_result = run_guide_generation(
1216
+ model,
1217
+ tokenizer,
1218
+ projected_visual_tokens,
1219
+ question,
1220
+ answer_generation_config,
1221
+ )
1222
+ reasoning_result = run_guide_generation(
1223
+ model,
1224
+ tokenizer,
1225
+ projected_visual_tokens,
1226
+ build_guide_reasoning_question(question),
1227
+ reasoning_generation_config,
1228
+ )
1229
+ reasoning = reasoning_result["response"]
1230
+
1231
+ reasoning_step_mask, reasoning_filter_debug = build_reasoning_attention_step_mask_and_debug(
1232
+ tokenizer,
1233
+ reasoning_result["outputs"],
1234
+ args,
1235
+ )
1236
+ reasoning_visual_token_importance = aggregate_attention_from_generation_outputs(
1237
+ reasoning_result["outputs"],
1238
+ reasoning_result["visual_token_index"],
1239
+ reasoning_step_mask,
1240
+ )
1241
+ reasoning_attention_token_count = count_attention_query_tokens_from_generation_outputs(
1242
+ reasoning_result["outputs"],
1243
+ reasoning_result["visual_token_index"],
1244
+ reasoning_step_mask,
1245
+ )
1246
+ question_visual_token_importance, answer_visual_token_importance = (
1247
+ aggregate_question_and_answer_attention_from_generation_outputs(
1248
+ answer_result["outputs"],
1249
+ answer_result["visual_token_index"],
1250
+ )
1251
+ )
1252
+ question_attention_token_count, answer_attention_token_count = count_question_and_answer_attention_query_tokens(
1253
+ answer_result["outputs"],
1254
+ answer_result["visual_token_index"],
1255
+ )
1256
+ visual_token_importance = combine_question_reasoning_and_answer_attention(
1257
+ question_visual_token_importance,
1258
+ reasoning_visual_token_importance,
1259
+ answer_visual_token_importance,
1260
+ args,
1261
+ )
1262
+ if args.large_model_prune_selection == "similarity_greedy":
1263
+ consistency_score = torch.tensor(1.0, device=visual_token_importance.device)
1264
+ else:
1265
+ consistency_score = compute_consistency_score(
1266
+ model,
1267
+ answer_result["input_embeds"],
1268
+ answer_result["flat_input_ids"],
1269
+ answer_result["attention_mask"],
1270
+ answer_result["outputs"]["sequences"][0],
1271
+ visual_token_importance,
1272
+ answer_result["visual_token_index"],
1273
+ consistency_token_ratio,
1274
+ args.large_model_prune_selection,
1275
+ )
1276
+ return (
1277
+ answer_result["response"],
1278
+ answer_result["outputs"].scores,
1279
+ consistency_score,
1280
+ visual_token_importance,
1281
+ reasoning,
1282
+ reasoning_filter_debug,
1283
+ {
1284
+ "question_attention_token_count": question_attention_token_count,
1285
+ "answer_attention_token_count": answer_attention_token_count,
1286
+ "reasoning_attention_token_count": reasoning_attention_token_count,
1287
+ "guide_answer_generated_token_count": count_generated_tokens(answer_result["outputs"]),
1288
+ "guide_reasoning_generated_token_count": count_generated_tokens(reasoning_result["outputs"]),
1289
+ },
1290
+ )
1291
+
1292
+
1293
+ def generate_with_reasoning(
1294
+ guide_model: InternVLChatModel,
1295
+ guide_tokenizer,
1296
+ decode_model: InternVLChatModel,
1297
+ large_tokenizer,
1298
+ projected_visual_tokens: torch.Tensor,
1299
+ question: str,
1300
+ generation_config: dict,
1301
+ reasoning_generation_config: dict,
1302
+ visual_token_importance: torch.Tensor,
1303
+ args,
1304
+ ) -> Tuple[str, str]:
1305
+ reasoning_question = append_instruction(question, EXPLICIT_REASONING_INSTRUCTION)
1306
+ reasoning = run_decode_answer(
1307
+ decode_model,
1308
+ large_tokenizer,
1309
+ projected_visual_tokens,
1310
+ reasoning_question,
1311
+ reasoning_generation_config,
1312
+ visual_token_importance,
1313
+ args,
1314
+ )
1315
+ final_question = append_instruction(
1316
+ question,
1317
+ f"Reasoning:\n{reasoning}\n{DEFAULT_FINAL_ANSWER_INSTRUCTION}",
1318
+ )
1319
+ answer = run_decode_answer(
1320
+ decode_model,
1321
+ large_tokenizer,
1322
+ projected_visual_tokens,
1323
+ final_question,
1324
+ generation_config,
1325
+ visual_token_importance,
1326
+ args,
1327
+ )
1328
+ return answer, reasoning
1329
+
1330
+
1331
+ def evaluate(args):
1332
+ guide_checkpoint = resolve_hf_snapshot(args.guide_checkpoint)
1333
+ large_checkpoint = resolve_hf_snapshot(args.large_checkpoint)
1334
+
1335
+ guide_tokenizer = AutoTokenizer.from_pretrained(guide_checkpoint, trust_remote_code=True, use_fast=False)
1336
+ guide_config = configure_model(guide_checkpoint, use_flash_attn=args.use_flash_attn)
1337
+ guide_model = load_model(
1338
+ guide_checkpoint,
1339
+ guide_config,
1340
+ auto=args.auto,
1341
+ load_in_8bit=args.load_in_8bit,
1342
+ load_in_4bit=args.load_in_4bit,
1343
+ )
1344
+ decode_model, large_tokenizer = build_decode_model(
1345
+ guide_model,
1346
+ large_checkpoint,
1347
+ use_flash_attn=args.use_flash_attn,
1348
+ auto=args.auto,
1349
+ load_in_8bit=args.load_in_8bit,
1350
+ load_in_4bit=args.load_in_4bit,
1351
+ )
1352
+
1353
+ guide_image_size = guide_model.config.force_image_size or guide_model.config.vision_config.image_size
1354
+ large_image_size = decode_model.config.force_image_size or decode_model.config.vision_config.image_size
1355
+ if guide_image_size != large_image_size:
1356
+ raise ValueError(f"Guide and decode image size mismatch: {guide_image_size} vs {large_image_size}")
1357
+ if guide_model.num_image_token != decode_model.num_image_token:
1358
+ raise ValueError(
1359
+ f"Guide and decode image token count mismatch: {guide_model.num_image_token} vs {decode_model.num_image_token}"
1360
+ )
1361
+
1362
+ data_root = os.path.abspath(args.data_root)
1363
+ textvqa_root = os.path.abspath(args.textvqa_root) if args.textvqa_root else os.path.join(data_root, "data", "textvqa")
1364
+ dataset = TextVQADataset(
1365
+ jsonl_path=os.path.join(textvqa_root, "textvqa_val.jsonl"),
1366
+ data_root=data_root,
1367
+ image_size=guide_image_size,
1368
+ dynamic=args.dynamic,
1369
+ use_thumbnail=guide_model.config.use_thumbnail,
1370
+ max_num=args.max_num,
1371
+ )
1372
+ question_id_to_answers = load_annotations(os.path.join(textvqa_root, "textvqa_val_annotations.json"))
1373
+ generation_config = make_generation_config(args)
1374
+ guide_attention_generation_config = make_guide_attention_generation_config(generation_config, args)
1375
+ guide_text_generation_config = None
1376
+ if args.guide_text_mode != "none":
1377
+ guide_text_generation_config = make_guide_text_generation_config(generation_config, args)
1378
+ reasoning_generation_config = None
1379
+ if args.reasoning_mode == "two_pass":
1380
+ reasoning_generation_config = make_reasoning_generation_config(generation_config, args)
1381
+
1382
+ num_items = len(dataset) if args.limit is None else min(len(dataset), args.limit)
1383
+ results = []
1384
+ filter_debug_results = []
1385
+
1386
+ for idx in range(num_items):
1387
+ sample = dataset[idx]
1388
+ question = sample["question"] + " " + BASE_PROMPT
1389
+ pixel_values = sample["pixel_values"]
1390
+ guide_attention_question = build_guide_attention_question(question, args)
1391
+
1392
+ torch.cuda.synchronize()
1393
+ start = time.time()
1394
+ raw_visual_tokens = extract_shared_raw_visual_tokens(guide_model, pixel_values)
1395
+ guide_visual_tokens = project_visual_tokens(guide_model, raw_visual_tokens)
1396
+ guide_reasoning = None
1397
+ guide_reasoning_filter_debug = {"backend": "none", "kept_tokens": [], "token_analysis": []}
1398
+ guide_attention_token_counts = {
1399
+ "question_attention_token_count": 0,
1400
+ "answer_attention_token_count": 0,
1401
+ "reasoning_attention_token_count": 0,
1402
+ "guide_answer_generated_token_count": 0,
1403
+ "guide_reasoning_generated_token_count": 0,
1404
+ }
1405
+ question_visual_token_importance = None
1406
+ answer_visual_token_importance = None
1407
+ if args.guide_reasoning_mode == "two_pass_explicit":
1408
+ (
1409
+ guide_answer,
1410
+ guide_scores,
1411
+ consistency_score,
1412
+ visual_token_importance,
1413
+ guide_reasoning,
1414
+ guide_reasoning_filter_debug,
1415
+ guide_attention_token_counts,
1416
+ ) = (
1417
+ run_guide_two_pass_explicit_branch(
1418
+ guide_model,
1419
+ guide_tokenizer,
1420
+ guide_visual_tokens,
1421
+ question,
1422
+ guide_attention_generation_config,
1423
+ generation_config,
1424
+ args.consistency_token_ratio,
1425
+ args,
1426
+ )
1427
+ )
1428
+ else:
1429
+ (
1430
+ guide_answer,
1431
+ guide_scores,
1432
+ consistency_score,
1433
+ visual_token_importance,
1434
+ question_visual_token_importance,
1435
+ answer_visual_token_importance,
1436
+ guide_attention_token_counts,
1437
+ ) = run_guide_branch(
1438
+ guide_model,
1439
+ guide_tokenizer,
1440
+ guide_visual_tokens,
1441
+ guide_attention_question,
1442
+ guide_attention_generation_config,
1443
+ args.consistency_token_ratio,
1444
+ args,
1445
+ )
1446
+ guide_text_hint = None
1447
+ if args.guide_text_mode != "none":
1448
+ if guide_text_generation_config is None:
1449
+ raise ValueError("guide_text_generation_config is required when guide_text_mode is enabled.")
1450
+ guide_text_hint = normalize_generated_text(
1451
+ run_text_generation_branch(
1452
+ guide_model,
1453
+ guide_tokenizer,
1454
+ guide_visual_tokens,
1455
+ build_guide_text_question(question),
1456
+ guide_text_generation_config,
1457
+ )
1458
+ )
1459
+ torch.cuda.synchronize()
1460
+ end = time.time()
1461
+ small_model_time = end - start
1462
+
1463
+ scores = torch.concatenate(guide_scores, dim=0)
1464
+ scores, _ = scores.softmax(dim=-1).max(dim=-1)
1465
+ original_confidence = math.pow(torch.prod(scores).item(), 1 / len(scores))
1466
+
1467
+ torch.cuda.synchronize()
1468
+ start = time.time()
1469
+ large_visual_tokens = project_visual_tokens(decode_model, raw_visual_tokens)
1470
+ decode_question = build_decode_question(question, guide_text_hint)
1471
+ reasoning = None
1472
+ if args.reasoning_mode == "none":
1473
+ large_answer = run_decode_answer(
1474
+ decode_model,
1475
+ large_tokenizer,
1476
+ large_visual_tokens,
1477
+ decode_question,
1478
+ generation_config,
1479
+ visual_token_importance,
1480
+ args,
1481
+ )
1482
+ elif args.reasoning_mode == "prompt":
1483
+ prompted_question = append_instruction(decode_question, HIDDEN_REASONING_INSTRUCTION)
1484
+ large_answer = run_decode_answer(
1485
+ decode_model,
1486
+ large_tokenizer,
1487
+ large_visual_tokens,
1488
+ prompted_question,
1489
+ generation_config,
1490
+ visual_token_importance,
1491
+ args,
1492
+ )
1493
+ else:
1494
+ if reasoning_generation_config is None:
1495
+ raise ValueError("reasoning_generation_config is required when reasoning_mode='two_pass'.")
1496
+ large_answer, reasoning = generate_with_reasoning(
1497
+ guide_model,
1498
+ guide_tokenizer,
1499
+ decode_model,
1500
+ large_tokenizer,
1501
+ large_visual_tokens,
1502
+ decode_question,
1503
+ generation_config,
1504
+ reasoning_generation_config,
1505
+ visual_token_importance,
1506
+ args,
1507
+ )
1508
+ torch.cuda.synchronize()
1509
+ end = time.time()
1510
+ large_model_time = end - start
1511
+
1512
+ visual_token_count = visual_token_importance.shape[0]
1513
+ kept_visual_token_count = max(1, int(visual_token_count * args.large_model_prune_ratio))
1514
+ result_item = {
1515
+ "question_id": sample["question_id"],
1516
+ "question": sample["question"],
1517
+ "answer": large_answer,
1518
+ "pred_answer": large_answer,
1519
+ "gt_answers": question_id_to_answers[sample["question_id"]],
1520
+ "small_answer": guide_answer,
1521
+ "guide_attention_output": guide_answer,
1522
+ "large_answer": large_answer,
1523
+ "small_model_time": small_model_time,
1524
+ "large_model_time": large_model_time,
1525
+ "original_confidence": original_confidence,
1526
+ "consistency_score": consistency_score.item(),
1527
+ "visual_token_count": visual_token_count,
1528
+ "kept_visual_token_count": kept_visual_token_count,
1529
+ "guide_attention_token_counts": guide_attention_token_counts,
1530
+ }
1531
+ if args.save_visual_token_importance:
1532
+ result_item["visual_token_importance_stats"] = summarize_visual_token_importance(
1533
+ visual_token_importance,
1534
+ topk=args.visual_token_importance_topk,
1535
+ )
1536
+ if question_visual_token_importance is not None:
1537
+ result_item["question_visual_token_importance_stats"] = summarize_visual_token_importance(
1538
+ question_visual_token_importance,
1539
+ topk=args.visual_token_importance_topk,
1540
+ )
1541
+ if answer_visual_token_importance is not None:
1542
+ result_item["answer_visual_token_importance_stats"] = summarize_visual_token_importance(
1543
+ answer_visual_token_importance,
1544
+ topk=args.visual_token_importance_topk,
1545
+ )
1546
+ if guide_text_hint is not None:
1547
+ result_item["guide_text_hint"] = guide_text_hint
1548
+ if args.save_reasoning and guide_reasoning is not None:
1549
+ result_item["guide_reasoning"] = guide_reasoning
1550
+ if args.save_reasoning and reasoning is not None:
1551
+ result_item["large_reasoning"] = reasoning
1552
+ results.append(result_item)
1553
+ filter_debug_results.append(
1554
+ {
1555
+ "question_id": sample["question_id"],
1556
+ "question": sample["question"],
1557
+ "small_answer": guide_answer,
1558
+ "large_answer": large_answer,
1559
+ "guide_reasoning": guide_reasoning,
1560
+ "guide_reasoning_filter_mode": args.guide_reasoning_filter_mode,
1561
+ "guide_reasoning_filter_backend": guide_reasoning_filter_debug.get("backend", "none"),
1562
+ "kept_tokens": guide_reasoning_filter_debug.get("kept_tokens", []),
1563
+ "token_analysis": guide_reasoning_filter_debug.get("token_analysis", []),
1564
+ }
1565
+ )
1566
+ if (idx + 1) % args.log_every == 0 or idx + 1 == num_items:
1567
+ status = (
1568
+ f"[{idx + 1}/{num_items}] question_id={sample['question_id']} "
1569
+ f"small={guide_answer} large={large_answer} kept={kept_visual_token_count}/{visual_token_count}"
1570
+ )
1571
+ if guide_text_hint is not None:
1572
+ status += f" hint={guide_text_hint}"
1573
+ print(status)
1574
+ sys.stdout.flush()
1575
+
1576
+ evaluator = TextVQAAccuracyEvaluator()
1577
+ accuracy = evaluator.eval_pred_list(results)
1578
+
1579
+ os.makedirs(args.out_dir, exist_ok=True)
1580
+ run_name = args.run_name or "textvqa_shared_vision_2bguide_8btext"
1581
+ result_path = os.path.join(args.out_dir, f"{run_name}.json")
1582
+ summary_path = os.path.join(args.out_dir, f"{run_name}.summary.json")
1583
+ filter_debug_path = os.path.join(args.out_dir, f"{run_name}.filter_debug.json")
1584
+
1585
+ with open(result_path, "w") as f:
1586
+ json.dump(results, f, ensure_ascii=False, indent=2)
1587
+ with open(filter_debug_path, "w") as f:
1588
+ json.dump(filter_debug_results, f, ensure_ascii=False, indent=2)
1589
+
1590
+ token_count_keys = [
1591
+ "question_attention_token_count",
1592
+ "answer_attention_token_count",
1593
+ "reasoning_attention_token_count",
1594
+ "guide_answer_generated_token_count",
1595
+ "guide_reasoning_generated_token_count",
1596
+ ]
1597
+ avg_guide_attention_token_counts = {
1598
+ key: (
1599
+ sum(item.get("guide_attention_token_counts", {}).get(key, 0) for item in results)
1600
+ / max(len(results), 1)
1601
+ )
1602
+ for key in token_count_keys
1603
+ }
1604
+
1605
+ summary = {
1606
+ "mode": "shared_vision_guided",
1607
+ "guide_checkpoint": guide_checkpoint,
1608
+ "large_checkpoint": large_checkpoint,
1609
+ "count": num_items,
1610
+ "accuracy": accuracy,
1611
+ "large_model_prune_layer": args.large_model_prune_layer,
1612
+ "large_model_prune_ratio": args.large_model_prune_ratio,
1613
+ "large_model_prune_selection": args.large_model_prune_selection,
1614
+ "consistency_token_ratio": args.consistency_token_ratio,
1615
+ "guide_reasoning_mode": args.guide_reasoning_mode,
1616
+ "guide_reasoning_max_new_tokens": args.guide_reasoning_max_new_tokens,
1617
+ "guide_reasoning_filter_mode": args.guide_reasoning_filter_mode,
1618
+ "guide_attention_aggregation_mode": args.guide_attention_aggregation_mode,
1619
+ "guide_attention_source": resolve_guide_attention_source(args),
1620
+ "guide_reasoning_attention_weight": args.guide_reasoning_attention_weight,
1621
+ "guide_answer_attention_weight": args.guide_answer_attention_weight,
1622
+ "guide_question_attention_weight": args.guide_question_attention_weight,
1623
+ "guide_text_mode": args.guide_text_mode,
1624
+ "guide_text_max_new_tokens": args.guide_text_max_new_tokens,
1625
+ "avg_guide_attention_token_counts": avg_guide_attention_token_counts,
1626
+ "avg_small_model_time": sum(item["small_model_time"] for item in results) / max(len(results), 1),
1627
+ "avg_large_model_time": sum(item["large_model_time"] for item in results) / max(len(results), 1),
1628
+ "results_file": result_path,
1629
+ "filter_debug_file": filter_debug_path,
1630
+ }
1631
+ with open(summary_path, "w") as f:
1632
+ json.dump(summary, f, ensure_ascii=False, indent=2)
1633
+
1634
+ print(f"accuracy: {accuracy:.6f}")
1635
+ print(f"results_file: {result_path}")
1636
+ print(f"summary_file: {summary_path}")
1637
+
1638
+
1639
+ def main():
1640
+ parser = argparse.ArgumentParser()
1641
+ parser.add_argument("--guide-checkpoint", type=str, required=True)
1642
+ parser.add_argument("--large-checkpoint", type=str, required=True)
1643
+ parser.add_argument("--data-root", type=str, default=str(REPO_ROOT))
1644
+ parser.add_argument("--textvqa-root", type=str, default="")
1645
+ parser.add_argument("--out-dir", type=str, default=str(REPO_ROOT / "outputs" / "shared_vision_guided"))
1646
+ parser.add_argument("--run-name", type=str, default="")
1647
+ parser.add_argument("--limit", type=int, default=None)
1648
+ parser.add_argument("--max-new-tokens", type=int, default=10)
1649
+ parser.add_argument("--num-beams", type=int, default=1)
1650
+ parser.add_argument("--temperature", type=float, default=0.0)
1651
+ parser.add_argument("--reasoning-mode", type=str, choices=["none", "prompt", "two_pass"], default="none")
1652
+ parser.add_argument("--reasoning-max-new-tokens", type=int, default=64)
1653
+ parser.add_argument("--reasoning-temperature", type=float, default=0.0)
1654
+ parser.add_argument("--save-reasoning", action="store_true")
1655
+ parser.add_argument(
1656
+ "--guide-reasoning-mode",
1657
+ type=str,
1658
+ choices=["none", "short_cot", "explicit_cot", "two_pass_explicit"],
1659
+ default="none",
1660
+ )
1661
+ parser.add_argument("--guide-reasoning-max-new-tokens", type=int, default=1024)
1662
+ parser.add_argument("--guide-reasoning-temperature", type=float, default=0.0)
1663
+ parser.add_argument(
1664
+ "--guide-reasoning-filter-mode",
1665
+ type=str,
1666
+ choices=["none", "pos_ner"],
1667
+ default="none",
1668
+ )
1669
+ parser.add_argument(
1670
+ "--guide-attention-source",
1671
+ type=str,
1672
+ choices=["default", "reasoning", "answer", "combined"],
1673
+ default="default",
1674
+ )
1675
+ parser.add_argument(
1676
+ "--guide-attention-aggregation-mode",
1677
+ type=str,
1678
+ choices=["raw", "normalized"],
1679
+ default="raw",
1680
+ )
1681
+ parser.add_argument("--guide-question-attention-weight", type=float, default=1.0)
1682
+ parser.add_argument("--guide-reasoning-attention-weight", type=float, default=1.0)
1683
+ parser.add_argument("--guide-answer-attention-weight", type=float, default=1.0)
1684
+ parser.add_argument("--guide-text-mode", type=str, choices=["none", "short_rationale"], default="none")
1685
+ parser.add_argument("--guide-text-max-new-tokens", type=int, default=12)
1686
+ parser.add_argument("--guide-text-temperature", type=float, default=0.0)
1687
+ parser.add_argument("--save-visual-token-importance", action="store_true")
1688
+ parser.add_argument("--visual-token-importance-topk", type=int, default=16)
1689
+ parser.add_argument("--dynamic", action="store_true")
1690
+ parser.add_argument("--max-num", type=int, default=6)
1691
+ parser.add_argument("--log-every", type=int, default=20)
1692
+ parser.add_argument("--seed", type=int, default=0)
1693
+ parser.add_argument("--large-model-prune-layer", type=float, default=0.0)
1694
+ parser.add_argument("--large-model-prune-ratio", type=float, default=0.4)
1695
+ parser.add_argument(
1696
+ "--large-model-prune-selection",
1697
+ type=str,
1698
+ choices=["topk", "random", "similarity_greedy"],
1699
+ default="topk",
1700
+ )
1701
+ parser.add_argument("--consistency-token-ratio", type=float, default=0.05)
1702
+ parser.add_argument("--auto", action="store_true")
1703
+ parser.add_argument("--load-in-8bit", action="store_true")
1704
+ parser.add_argument("--load-in-4bit", action="store_true")
1705
+ parser.add_argument("--use-flash-attn", action="store_true")
1706
+ args = parser.parse_args()
1707
+
1708
+ if not torch.cuda.is_available():
1709
+ raise RuntimeError("CUDA is required for shared-vision guided evaluation.")
1710
+ if args.large_model_prune_ratio <= 0 or args.large_model_prune_ratio > 1:
1711
+ raise ValueError("large-model-prune-ratio must be in (0, 1].")
1712
+ if args.consistency_token_ratio <= 0 or args.consistency_token_ratio > 1:
1713
+ raise ValueError("consistency-token-ratio must be in (0, 1].")
1714
+ if args.guide_reasoning_attention_weight < 0 or args.guide_answer_attention_weight < 0:
1715
+ raise ValueError("guide reasoning/answer attention weights must be >= 0.")
1716
+ if args.guide_question_attention_weight < 0:
1717
+ raise ValueError("guide question attention weight must be >= 0.")
1718
+ if args.guide_reasoning_mode == "two_pass_explicit":
1719
+ attention_source = resolve_guide_attention_source(args)
1720
+ if attention_source == "reasoning" and args.guide_reasoning_attention_weight == 0:
1721
+ raise ValueError("guide_reasoning_attention_weight must be > 0 when guide-attention-source=reasoning.")
1722
+ if (
1723
+ attention_source == "answer"
1724
+ and args.guide_question_attention_weight == 0
1725
+ and args.guide_answer_attention_weight == 0
1726
+ ):
1727
+ raise ValueError(
1728
+ "At least one of guide_question_attention_weight or guide_answer_attention_weight "
1729
+ "must be > 0 when guide-attention-source=answer."
1730
+ )
1731
+ if (
1732
+ attention_source == "combined"
1733
+ and args.guide_question_attention_weight == 0
1734
+ and args.guide_reasoning_attention_weight == 0
1735
+ and args.guide_answer_attention_weight == 0
1736
+ ):
1737
+ raise ValueError("At least one guide attention weight must be > 0 for two_pass_explicit.")
1738
+ if (
1739
+ args.guide_reasoning_mode != "two_pass_explicit"
1740
+ and args.guide_question_attention_weight == 0
1741
+ and args.guide_answer_attention_weight == 0
1742
+ ):
1743
+ raise ValueError("At least one guide question/answer attention weight must be > 0.")
1744
+
1745
+ random.seed(args.seed)
1746
+ torch.manual_seed(args.seed)
1747
+ evaluate(args)
1748
+
1749
+
1750
+ if __name__ == "__main__":
1751
+ main()
eval/vqa/run_single_model_native.py ADDED
@@ -0,0 +1,603 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ import json
3
+ import math
4
+ import os
5
+ import random
6
+ import re
7
+ import sys
8
+ from pathlib import Path
9
+ from typing import Optional, Tuple
10
+
11
+ import torch
12
+ from PIL import Image
13
+ from transformers import AutoTokenizer
14
+
15
+ from internvl.conversation import get_conv_template
16
+ from internvl.conversation import register_conv_template
17
+ from internvl.conversation import Conversation
18
+ from internvl.conversation import SeparatorStyle
19
+ from internvl.model.internvl_chat import InternVLChatModel
20
+ from internvl.model.internvl_chat.configuration_internvl_chat import InternVLChatConfig
21
+ from internvl.train.dataset import build_transform, dynamic_preprocess
22
+
23
+ from evaluate_vqa import VQADataset, ds_collections
24
+ from textvqa_eval import TextVQAAccuracyEvaluator
25
+
26
+
27
+ BASE_PROMPT = "Answer the question using a single word or phrase."
28
+ VIZWIZ_PROMPT = "When the provided information is insufficient, respond with 'Unanswerable'. "
29
+ INFOGRAPHICSVQA_PROMPT = "Answer the question using a single word or phrase."
30
+ AI2D_PROMPT = ""
31
+ HIDDEN_REASONING_INSTRUCTION = (
32
+ "Think through the relevant visual evidence and any text in the image step by step internally before answering."
33
+ )
34
+ EXPLICIT_REASONING_INSTRUCTION = (
35
+ "Explain your reasoning step by step using the relevant visual evidence and any text in the image."
36
+ )
37
+ DEFAULT_FINAL_ANSWER_INSTRUCTION = "Provide the final answer only."
38
+ REPO_ROOT = Path(__file__).resolve().parents[2]
39
+
40
+
41
+ def ensure_internvl2_5_template() -> None:
42
+ try:
43
+ get_conv_template("internvl2_5")
44
+ return
45
+ except KeyError:
46
+ pass
47
+
48
+ register_conv_template(
49
+ Conversation(
50
+ name="internvl2_5",
51
+ system_template="<|im_start|>system\n{system_message}",
52
+ system_message="你是书生·万象,英文名是InternVL,是由上海人工智能实验室、清华大学及多家合作单位联合开发的多模态大语言模型。",
53
+ roles=("<|im_start|>user\n", "<|im_start|>assistant\n"),
54
+ sep_style=SeparatorStyle.MPT,
55
+ sep="<|im_end|>\n",
56
+ )
57
+ )
58
+
59
+
60
+ def configure_model(checkpoint_path: str) -> InternVLChatConfig:
61
+ config = InternVLChatConfig.from_json_file(os.path.join(checkpoint_path, "config.json"))
62
+ match = re.search(r"InternVL2-(\d+B)", checkpoint_path)
63
+ model_size = match.group(1) if match else checkpoint_path.split("-")[-1]
64
+ if model_size in ["1B", "40B"]:
65
+ config.llm_config._attn_implementation = "eager"
66
+ else:
67
+ config.llm_config.attn_implementation = "eager"
68
+ config.vision_config.use_flash_attn = True
69
+ return config
70
+
71
+
72
+ def split_model(num_layers: int, gpus_per_model: int) -> dict:
73
+ if gpus_per_model < 1:
74
+ raise ValueError("gpus_per_model must be >= 1")
75
+
76
+ device_map = {}
77
+ if gpus_per_model == 1:
78
+ for layer_idx in range(num_layers):
79
+ device_map[f"language_model.model.layers.{layer_idx}"] = 0
80
+ else:
81
+ # Keep the vision tower and embeddings on GPU 0 and spread decoder layers.
82
+ num_layers_per_gpu = math.ceil(num_layers / (gpus_per_model - 0.5))
83
+ num_layers_per_gpu = [num_layers_per_gpu] * gpus_per_model
84
+ num_layers_per_gpu[0] = math.ceil(num_layers_per_gpu[0] * 0.5)
85
+
86
+ layer_cnt = 0
87
+ for gpu_idx, layer_count in enumerate(num_layers_per_gpu):
88
+ for _ in range(layer_count):
89
+ if layer_cnt >= num_layers:
90
+ break
91
+ device_map[f"language_model.model.layers.{layer_cnt}"] = gpu_idx
92
+ layer_cnt += 1
93
+
94
+ if layer_cnt < num_layers:
95
+ for layer_idx in range(layer_cnt, num_layers):
96
+ device_map[f"language_model.model.layers.{layer_idx}"] = gpus_per_model - 1
97
+
98
+ device_map["vision_model"] = 0
99
+ device_map["mlp1"] = 0
100
+ device_map["language_model.model.tok_embeddings"] = 0
101
+ device_map["language_model.model.rotary_emb"] = 0
102
+ device_map["language_model.model.embed_tokens"] = 0
103
+ device_map["language_model.output"] = 0
104
+ device_map["language_model.model.norm"] = 0
105
+ device_map["language_model.lm_head"] = 0
106
+ if num_layers > 1 and gpus_per_model > 1:
107
+ device_map[f"language_model.model.layers.{num_layers - 1}"] = 1
108
+ return device_map
109
+
110
+
111
+ def load_model(checkpoint_path: str, config: InternVLChatConfig, args) -> InternVLChatModel:
112
+ ensure_internvl2_5_template()
113
+ kwargs = {"device_map": "auto"} if args.auto else {}
114
+ if args.gpus_per_model > 1 and not args.auto:
115
+ if args.gpus_per_model > torch.cuda.device_count():
116
+ raise ValueError(
117
+ f"gpus_per_model={args.gpus_per_model} exceeds visible CUDA devices={torch.cuda.device_count()}"
118
+ )
119
+ kwargs["device_map"] = split_model(config.llm_config.num_hidden_layers, args.gpus_per_model)
120
+
121
+ model = InternVLChatModel.from_pretrained(
122
+ checkpoint_path,
123
+ config=config,
124
+ low_cpu_mem_usage=True,
125
+ torch_dtype=torch.bfloat16,
126
+ load_in_8bit=args.load_in_8bit,
127
+ load_in_4bit=args.load_in_4bit,
128
+ **kwargs,
129
+ ).eval()
130
+
131
+ if args.gpus_per_model == 1 and not args.auto and not args.load_in_8bit and not args.load_in_4bit:
132
+ model = model.cuda()
133
+ return model
134
+
135
+
136
+ def dataset_prompt(dataset_name: str) -> str:
137
+ if "vizwiz" in dataset_name:
138
+ return VIZWIZ_PROMPT + BASE_PROMPT
139
+ if "ai2d" in dataset_name:
140
+ return AI2D_PROMPT
141
+ if "infographicsvqa" in dataset_name:
142
+ return INFOGRAPHICSVQA_PROMPT
143
+ return BASE_PROMPT
144
+
145
+
146
+ def resolve_dataset_path(data_root: str, path: str) -> str:
147
+ if os.path.isabs(path):
148
+ return path
149
+ return os.path.join(data_root, path)
150
+
151
+
152
+ def resolve_image_path(image_path: str, data_root: str, jsonl_path: str = "") -> str:
153
+ candidates = []
154
+ if os.path.isabs(image_path):
155
+ candidates.append(image_path)
156
+
157
+ jsonl_dir = os.path.dirname(jsonl_path) if jsonl_path else ""
158
+ candidates.append(os.path.join(data_root, image_path))
159
+ if image_path.startswith("data/"):
160
+ candidates.append(os.path.join(data_root, image_path[len("data/"):]))
161
+ if jsonl_dir:
162
+ candidates.append(os.path.join(jsonl_dir, image_path))
163
+ candidates.append(os.path.join(jsonl_dir, os.path.basename(image_path)))
164
+
165
+ for candidate in candidates:
166
+ if candidate and os.path.exists(candidate):
167
+ return candidate
168
+ raise FileNotFoundError(f"Could not resolve image path: {image_path}")
169
+
170
+
171
+ def load_textvqa_sample(jsonl_path: str, sample_index: int) -> Tuple[str, str, Optional[int], Optional[str]]:
172
+ with open(jsonl_path) as f:
173
+ for idx, line in enumerate(f):
174
+ if idx == sample_index:
175
+ item = json.loads(line)
176
+ return item["image"], item["question"], item.get("question_id"), item.get("answer")
177
+ raise IndexError(f"sample_index {sample_index} is out of range for {jsonl_path}")
178
+
179
+
180
+ def build_pixel_values(
181
+ image_path: str,
182
+ image_size: int,
183
+ dynamic: bool,
184
+ use_thumbnail: bool,
185
+ max_num: int,
186
+ ) -> torch.Tensor:
187
+ transform = build_transform(is_train=False, input_size=image_size)
188
+ image = Image.open(image_path).convert("RGB")
189
+ if dynamic:
190
+ images = dynamic_preprocess(
191
+ image,
192
+ image_size=image_size,
193
+ use_thumbnail=use_thumbnail,
194
+ max_num=max_num,
195
+ )
196
+ else:
197
+ images = [image]
198
+ return torch.stack([transform(img) for img in images])
199
+
200
+
201
+ def build_query(model: InternVLChatModel, tokenizer, question: str, num_patches: int):
202
+ img_context_token = "<IMG_CONTEXT>"
203
+ img_start_token = "<img>"
204
+ img_end_token = "</img>"
205
+
206
+ if "<image>" not in question:
207
+ question = "<image>\n" + question
208
+
209
+ model.img_context_token_id = tokenizer.convert_tokens_to_ids(img_context_token)
210
+
211
+ template = get_conv_template(model.template)
212
+ template.system_message = model.system_message
213
+ template.append_message(template.roles[0], question)
214
+ template.append_message(template.roles[1], None)
215
+ query = template.get_prompt()
216
+
217
+ image_tokens = img_start_token + img_context_token * model.num_image_token * num_patches + img_end_token
218
+ query = query.replace("<image>", image_tokens, 1)
219
+ return query, template
220
+
221
+
222
+ def model_input_device(model: InternVLChatModel) -> torch.device:
223
+ return next(model.vision_model.parameters()).device
224
+
225
+
226
+ @torch.inference_mode()
227
+ def generate_answer(
228
+ model: InternVLChatModel,
229
+ tokenizer,
230
+ pixel_values: torch.Tensor,
231
+ question: str,
232
+ generation_config: dict,
233
+ ) -> str:
234
+ query, template = build_query(model, tokenizer, question, pixel_values.shape[0])
235
+ model_inputs = tokenizer(query, return_tensors="pt")
236
+
237
+ device = model_input_device(model)
238
+ input_ids = model_inputs["input_ids"].to(device)
239
+ attention_mask = model_inputs["attention_mask"].to(device)
240
+ eos_token_id = tokenizer.convert_tokens_to_ids(template.sep)
241
+
242
+ output_ids = model.generate(
243
+ pixel_values=pixel_values.to(device=device, dtype=torch.bfloat16),
244
+ input_ids=input_ids,
245
+ attention_mask=attention_mask,
246
+ large_model=True,
247
+ eos_token_id=eos_token_id,
248
+ **generation_config,
249
+ )
250
+ response = tokenizer.batch_decode(output_ids, skip_special_tokens=True)[0]
251
+ return response.split(template.sep)[0].strip()
252
+
253
+
254
+ def build_eval_entries(result_items, annotation_file: str):
255
+ evaluator = TextVQAAccuracyEvaluator()
256
+ with open(annotation_file) as f:
257
+ annotations = json.load(f)["annotations"]
258
+ question_id_to_answers = {
259
+ item["question_id"]: [answer["answer"] for answer in item["answers"]]
260
+ for item in annotations
261
+ }
262
+ eval_entries = [
263
+ {
264
+ "question_id": item["question_id"],
265
+ "answer": item["answer"],
266
+ "pred_answer": item["answer"],
267
+ "gt_answers": question_id_to_answers[item["question_id"]],
268
+ }
269
+ for item in result_items
270
+ ]
271
+ return evaluator, eval_entries
272
+
273
+
274
+ def make_generation_config(num_beams: int, max_new_tokens: int, temperature: float) -> dict:
275
+ generation_config = {
276
+ "num_beams": num_beams,
277
+ "max_new_tokens": max_new_tokens,
278
+ "min_new_tokens": 1,
279
+ "do_sample": temperature > 0,
280
+ }
281
+ if temperature > 0:
282
+ generation_config["temperature"] = temperature
283
+ return generation_config
284
+
285
+
286
+ def append_instruction(question: str, instruction: str) -> str:
287
+ instruction = instruction.strip()
288
+ if not instruction:
289
+ return question
290
+ return f"{question.rstrip()}\n{instruction}"
291
+
292
+
293
+ def render_custom_prompt(question: str, prompt_template: str) -> str:
294
+ prompt_template = prompt_template.strip()
295
+ if not prompt_template:
296
+ raise ValueError("custom_prompt_template must be non-empty when reasoning_mode='custom_prompt'.")
297
+ if "{question}" in prompt_template:
298
+ return prompt_template.replace("{question}", question)
299
+ if "Question:" in prompt_template or "Question:" in prompt_template:
300
+ return f"{prompt_template.rstrip()} {question}"
301
+ return f"{prompt_template.rstrip()}\nQuestion: {question}"
302
+
303
+
304
+ def extract_final_answer(response: str, final_answer_prefix: str) -> str:
305
+ final_answer_prefix = final_answer_prefix.strip()
306
+ if not final_answer_prefix:
307
+ return response.strip()
308
+
309
+ pattern = re.compile(rf"(?im)^{re.escape(final_answer_prefix)}\s*(.*)$")
310
+ match = pattern.search(response)
311
+ if not match:
312
+ return response.strip()
313
+
314
+ inline_answer = match.group(1).strip()
315
+ if inline_answer:
316
+ return inline_answer
317
+
318
+ trailing_lines = response[match.end():].splitlines()
319
+ for line in trailing_lines:
320
+ stripped = line.strip()
321
+ if stripped:
322
+ return stripped
323
+ return ""
324
+
325
+
326
+ def make_reasoning_generation_config(base_generation_config: dict, args) -> dict:
327
+ generation_config = dict(base_generation_config)
328
+ generation_config["max_new_tokens"] = args.reasoning_max_new_tokens
329
+ temperature = args.reasoning_temperature
330
+ generation_config["do_sample"] = temperature > 0
331
+ if temperature > 0:
332
+ generation_config["temperature"] = temperature
333
+ else:
334
+ generation_config.pop("temperature", None)
335
+ return generation_config
336
+
337
+
338
+ def generate_answer_with_reasoning(
339
+ model: InternVLChatModel,
340
+ tokenizer,
341
+ pixel_values: torch.Tensor,
342
+ question: str,
343
+ generation_config: dict,
344
+ reasoning_mode: str,
345
+ reasoning_generation_config: Optional[dict] = None,
346
+ final_answer_instruction: str = "",
347
+ ) -> Tuple[str, Optional[str]]:
348
+ if reasoning_mode == "none":
349
+ return generate_answer(model, tokenizer, pixel_values, question, generation_config), None
350
+
351
+ if reasoning_mode == "prompt":
352
+ prompted_question = append_instruction(question, HIDDEN_REASONING_INSTRUCTION)
353
+ return generate_answer(model, tokenizer, pixel_values, prompted_question, generation_config), None
354
+
355
+ if reasoning_generation_config is None:
356
+ raise ValueError("reasoning_generation_config is required when reasoning_mode='two_pass'.")
357
+
358
+ reasoning_question = append_instruction(question, EXPLICIT_REASONING_INSTRUCTION)
359
+ reasoning = generate_answer(model, tokenizer, pixel_values, reasoning_question, reasoning_generation_config)
360
+ final_instruction = final_answer_instruction or DEFAULT_FINAL_ANSWER_INSTRUCTION
361
+ final_question = append_instruction(
362
+ question,
363
+ f"Reasoning:\n{reasoning}\n{final_instruction}",
364
+ )
365
+ answer = generate_answer(model, tokenizer, pixel_values, final_question, generation_config)
366
+ return answer, reasoning
367
+
368
+
369
+ def run_single(args):
370
+ tokenizer = AutoTokenizer.from_pretrained(
371
+ args.checkpoint,
372
+ trust_remote_code=True,
373
+ use_fast=False,
374
+ )
375
+ config = configure_model(args.checkpoint)
376
+ model = load_model(args.checkpoint, config, args)
377
+
378
+ if args.textvqa_jsonl:
379
+ image_path, prompt, question_id, answer = load_textvqa_sample(args.textvqa_jsonl, args.sample_index)
380
+ image_path = resolve_image_path(image_path, args.data_root, args.textvqa_jsonl)
381
+ else:
382
+ image_path = args.image_path
383
+ prompt = args.prompt
384
+ question_id = None
385
+ answer = None
386
+
387
+ if not image_path or not prompt:
388
+ raise ValueError("Provide either --image-path and --prompt, or --textvqa-jsonl.")
389
+ if not os.path.exists(image_path):
390
+ raise FileNotFoundError(f"image not found: {image_path}")
391
+
392
+ image_size = config.force_image_size or config.vision_config.image_size
393
+ pixel_values = build_pixel_values(
394
+ image_path=image_path,
395
+ image_size=image_size,
396
+ dynamic=args.dynamic,
397
+ use_thumbnail=config.use_thumbnail,
398
+ max_num=args.max_num,
399
+ )
400
+
401
+ generation_config = make_generation_config(
402
+ num_beams=args.num_beams,
403
+ max_new_tokens=args.max_new_tokens,
404
+ temperature=args.temperature,
405
+ )
406
+ reasoning_generation_config = None
407
+ if args.reasoning_mode == "two_pass":
408
+ reasoning_generation_config = make_reasoning_generation_config(generation_config, args)
409
+ raw_prediction = None
410
+ if args.reasoning_mode == "custom_prompt":
411
+ raw_prediction = generate_answer(
412
+ model,
413
+ tokenizer,
414
+ pixel_values,
415
+ render_custom_prompt(prompt, args.custom_prompt_template),
416
+ generation_config,
417
+ )
418
+ prediction = (
419
+ extract_final_answer(raw_prediction, args.final_answer_prefix)
420
+ if args.extract_final_answer
421
+ else raw_prediction
422
+ )
423
+ reasoning = None
424
+ else:
425
+ prediction, reasoning = generate_answer_with_reasoning(
426
+ model=model,
427
+ tokenizer=tokenizer,
428
+ pixel_values=pixel_values,
429
+ question=prompt,
430
+ generation_config=generation_config,
431
+ reasoning_mode=args.reasoning_mode,
432
+ reasoning_generation_config=reasoning_generation_config,
433
+ final_answer_instruction=args.answer_format_prompt,
434
+ )
435
+
436
+ print(f"checkpoint: {args.checkpoint}")
437
+ print(f"image_path: {image_path}")
438
+ if question_id is not None:
439
+ print(f"question_id: {question_id}")
440
+ if answer is not None:
441
+ print(f"reference_answer: {answer}")
442
+ print(f"prompt: {prompt}")
443
+ if reasoning is not None:
444
+ print(f"reasoning: {reasoning}")
445
+ if raw_prediction is not None:
446
+ print(f"raw_prediction: {raw_prediction}")
447
+ print(f"prediction: {prediction}")
448
+
449
+
450
+ def run_textvqa_eval(args):
451
+ if args.dataset not in ds_collections:
452
+ raise KeyError(f"unknown dataset: {args.dataset}")
453
+
454
+ ds_cfg = ds_collections[args.dataset]
455
+ test_file = args.test_file or resolve_dataset_path(args.data_root, ds_cfg["test"])
456
+ train_file = args.train_file or resolve_dataset_path(args.data_root, ds_cfg["train"])
457
+ annotation_file = args.annotation_file or resolve_dataset_path(args.data_root, ds_cfg["annotation"])
458
+
459
+ tokenizer = AutoTokenizer.from_pretrained(
460
+ args.checkpoint,
461
+ trust_remote_code=True,
462
+ use_fast=False,
463
+ )
464
+ config = configure_model(args.checkpoint)
465
+ model = load_model(args.checkpoint, config, args)
466
+
467
+ image_size = config.force_image_size or config.vision_config.image_size
468
+ prompt = args.prompt or dataset_prompt(args.dataset)
469
+ dataset = VQADataset(
470
+ train=train_file,
471
+ test=test_file,
472
+ prompt=prompt,
473
+ few_shot=0,
474
+ input_size=image_size,
475
+ dynamic_image_size=args.dynamic,
476
+ use_thumbnail=config.use_thumbnail,
477
+ max_num=args.max_num,
478
+ )
479
+
480
+ num_items = len(dataset) if args.limit is None else min(len(dataset), args.limit)
481
+ result_items = []
482
+ generation_config = make_generation_config(
483
+ num_beams=args.num_beams,
484
+ max_new_tokens=args.max_new_tokens or ds_cfg["max_new_tokens"],
485
+ temperature=args.temperature,
486
+ )
487
+ reasoning_generation_config = None
488
+ if args.reasoning_mode == "two_pass":
489
+ reasoning_generation_config = make_reasoning_generation_config(generation_config, args)
490
+
491
+ for idx in range(num_items):
492
+ sample = dataset[idx]
493
+ raw_prediction = None
494
+ if args.reasoning_mode == "custom_prompt":
495
+ raw_prediction = generate_answer(
496
+ model,
497
+ tokenizer,
498
+ sample["pixel_values"],
499
+ render_custom_prompt(sample["question"], args.custom_prompt_template),
500
+ generation_config,
501
+ )
502
+ prediction = (
503
+ extract_final_answer(raw_prediction, args.final_answer_prefix)
504
+ if args.extract_final_answer
505
+ else raw_prediction
506
+ )
507
+ reasoning = None
508
+ else:
509
+ prediction, reasoning = generate_answer_with_reasoning(
510
+ model=model,
511
+ tokenizer=tokenizer,
512
+ pixel_values=sample["pixel_values"],
513
+ question=sample["question"],
514
+ generation_config=generation_config,
515
+ reasoning_mode=args.reasoning_mode,
516
+ reasoning_generation_config=reasoning_generation_config,
517
+ )
518
+ result_item = {
519
+ "question": sample["question"],
520
+ "question_id": sample["question_id"],
521
+ "answer": prediction,
522
+ "annotation": sample["annotation"],
523
+ }
524
+ if raw_prediction is not None:
525
+ result_item["raw_answer"] = raw_prediction
526
+ if args.save_reasoning and reasoning is not None:
527
+ result_item["reasoning"] = reasoning
528
+ result_items.append(result_item)
529
+ if (idx + 1) % args.log_every == 0 or idx + 1 == num_items:
530
+ print(f"[{idx + 1}/{num_items}] question_id={sample['question_id']} prediction={prediction}")
531
+ sys.stdout.flush()
532
+
533
+ os.makedirs(args.out_dir, exist_ok=True)
534
+ run_name = args.run_name or f"{args.dataset}_{os.path.basename(args.checkpoint)}"
535
+ output_file = os.path.join(args.out_dir, f"{run_name}.json")
536
+ with open(output_file, "w") as f:
537
+ json.dump(result_items, f, ensure_ascii=False, indent=2)
538
+
539
+ evaluator, eval_entries = build_eval_entries(result_items, annotation_file)
540
+ accuracy = evaluator.eval_pred_list(eval_entries)
541
+ print(f"dataset: {args.dataset}")
542
+ print(f"checkpoint: {args.checkpoint}")
543
+ print(f"count: {num_items}")
544
+ print(f"accuracy: {accuracy:.6f}")
545
+ print(f"results_file: {output_file}")
546
+
547
+
548
+ def main():
549
+ parser = argparse.ArgumentParser()
550
+ parser.add_argument("--checkpoint", type=str, required=True)
551
+ parser.add_argument("--mode", type=str, choices=["single", "textvqa_eval"], default="single")
552
+ parser.add_argument("--image-path", type=str, default="")
553
+ parser.add_argument("--prompt", type=str, default="")
554
+ parser.add_argument("--textvqa-jsonl", type=str, default="")
555
+ parser.add_argument("--sample-index", type=int, default=0)
556
+ parser.add_argument("--dataset", type=str, default="textvqa_val")
557
+ parser.add_argument("--data-root", type=str, default=str(REPO_ROOT))
558
+ parser.add_argument("--test-file", type=str, default="")
559
+ parser.add_argument("--train-file", type=str, default="")
560
+ parser.add_argument("--annotation-file", type=str, default="")
561
+ parser.add_argument("--out-dir", type=str, default=str(REPO_ROOT / "outputs" / "native_single"))
562
+ parser.add_argument("--run-name", type=str, default="")
563
+ parser.add_argument("--limit", type=int, default=None)
564
+ parser.add_argument("--max-new-tokens", type=int, default=0)
565
+ parser.add_argument("--num-beams", type=int, default=1)
566
+ parser.add_argument("--temperature", type=float, default=0.0)
567
+ parser.add_argument("--reasoning-mode", type=str, choices=["none", "prompt", "two_pass", "custom_prompt"], default="none")
568
+ parser.add_argument("--reasoning-max-new-tokens", type=int, default=64)
569
+ parser.add_argument("--reasoning-temperature", type=float, default=0.0)
570
+ parser.add_argument("--save-reasoning", action="store_true")
571
+ parser.add_argument("--answer-format-prompt", type=str, default="")
572
+ parser.add_argument("--custom-prompt-template", type=str, default="")
573
+ parser.add_argument("--extract-final-answer", action="store_true")
574
+ parser.add_argument("--final-answer-prefix", type=str, default="Final answer:")
575
+ parser.add_argument("--dynamic", action="store_true")
576
+ parser.add_argument("--max-num", type=int, default=6)
577
+ parser.add_argument("--log-every", type=int, default=20)
578
+ parser.add_argument("--seed", type=int, default=0)
579
+ parser.add_argument("--gpus-per-model", type=int, default=1)
580
+ parser.add_argument("--auto", action="store_true")
581
+ parser.add_argument("--load-in-8bit", action="store_true")
582
+ parser.add_argument("--load-in-4bit", action="store_true")
583
+ args = parser.parse_args()
584
+
585
+ if not torch.cuda.is_available():
586
+ raise RuntimeError("CUDA is required for native InternVL inference.")
587
+
588
+ random.seed(args.seed)
589
+ torch.manual_seed(args.seed)
590
+
591
+ if args.mode == "single":
592
+ if args.max_new_tokens == 0:
593
+ args.max_new_tokens = 32
594
+ run_single(args)
595
+ return
596
+
597
+ if args.max_new_tokens == 0:
598
+ args.max_new_tokens = None
599
+ run_textvqa_eval(args)
600
+
601
+
602
+ if __name__ == "__main__":
603
+ main()
isolated/sim_greedy/eval/vqa/run_shared_vision_guided_textvqa.py ADDED
@@ -0,0 +1,1742 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ import inspect
3
+ import json
4
+ import math
5
+ import os
6
+ import random
7
+ import re
8
+ import sys
9
+ import time
10
+ from functools import wraps
11
+ from pathlib import Path
12
+ from typing import Dict, List, Optional, Tuple
13
+
14
+ import torch
15
+ from PIL import Image
16
+ from transformers import AutoTokenizer
17
+ from transformers.generation.logits_process import LogitsProcessorList
18
+
19
+
20
+ REPO_ROOT = Path(__file__).resolve().parents[2]
21
+ DEFAULT_UPSTREAM_SGL_ROOT = Path(os.environ.get("UPSTREAM_SGL_ROOT", str(REPO_ROOT / "upstream_sgl")))
22
+ if str(DEFAULT_UPSTREAM_SGL_ROOT) not in sys.path:
23
+ sys.path.insert(0, str(DEFAULT_UPSTREAM_SGL_ROOT))
24
+ eval_vqa_path = DEFAULT_UPSTREAM_SGL_ROOT / "eval" / "vqa"
25
+ if str(eval_vqa_path) not in sys.path:
26
+ sys.path.insert(0, str(eval_vqa_path))
27
+
28
+ from internvl.conversation import get_conv_template
29
+ from internvl.model.internvl_chat import InternVLChatModel
30
+ from internvl.model.internvl_chat.configuration_internvl_chat import InternVLChatConfig
31
+ from internvl.train.dataset import build_transform, dynamic_preprocess
32
+ from textvqa_eval import TextVQAAccuracyEvaluator
33
+
34
+
35
+ BASE_PROMPT = "Answer the question using a single word or phrase."
36
+ BASE_PROMPT_SUFFIX = " " + BASE_PROMPT
37
+ HIDDEN_REASONING_INSTRUCTION = (
38
+ "Think through the relevant visual evidence and any text in the image step by step internally before answering."
39
+ )
40
+ EXPLICIT_REASONING_INSTRUCTION = (
41
+ "Explain your reasoning step by step using the relevant visual evidence and any text in the image."
42
+ )
43
+ DEFAULT_FINAL_ANSWER_INSTRUCTION = "Provide the final answer only."
44
+ GUIDE_ATTENTION_COT_PROMPT_TEMPLATE = """You are solving a TextVQA task.
45
+ Read the image carefully, especially visible text.
46
+ Reason through the answer in at least 5 explicit steps.
47
+ Do not skip the reasoning.
48
+ Question: {question}
49
+ 1.
50
+ 2.
51
+ 3.
52
+ 4.
53
+ 5.
54
+ Final answer:"""
55
+ GUIDE_ATTENTION_REASONING_ONLY_PROMPT_TEMPLATE = """You are solving a TextVQA task.
56
+
57
+ Read the image carefully, especially all visible text.
58
+ Reason using only evidence from the image and OCR text.
59
+ You must output exactly 5 numbered reasoning steps.
60
+ Each step must be a short sentence.
61
+ Do not provide the final answer.
62
+ Do not provide a summary.
63
+ Do not output any text other than the 5 numbered steps.
64
+
65
+ Question: {question}
66
+
67
+ 1. Identify the most relevant visible text or object.
68
+ 2. Explain how that evidence relates to the question.
69
+ 3. Check for another supporting clue in the image.
70
+ 4. Resolve any ambiguity using the strongest evidence.
71
+ 5. State the final reasoning conclusion without giving the final answer."""
72
+ GUIDE_ATTENTION_EXPLICIT_COT_INSTRUCTION = (
73
+ "First reason step by step using the relevant visual evidence and OCR text. "
74
+ "Then end with a new line in the exact format: Answer: <short answer>."
75
+ )
76
+ GUIDE_TEXT_HINT_INSTRUCTION = (
77
+ "Give a very short guide hint grounded in the image and OCR text. Use a short phrase, not a full sentence."
78
+ )
79
+ GUIDED_DECODE_INSTRUCTION = (
80
+ "Use the guide hint only if it matches the image. Answer the question using a single word or phrase."
81
+ )
82
+
83
+ REASONING_FILTER_STOPWORDS = {
84
+ "a", "an", "and", "are", "as", "at", "be", "because", "but", "by", "for", "from", "has",
85
+ "have", "if", "in", "into", "is", "it", "its", "of", "on", "or", "that", "the", "their",
86
+ "there", "this", "those", "to", "was", "were", "with",
87
+ }
88
+ REASONING_FILTER_TEMPLATE_WORDS = {
89
+ "answer", "conclusion", "directly", "evidence", "final", "identify", "indicating",
90
+ "question", "reason", "reasoning", "relates", "relevant", "resolve", "shows", "state",
91
+ "strongest", "supporting", "supports", "using", "visible",
92
+ }
93
+ REASONING_FILTER_POSITION_WORDS = {
94
+ "left", "right", "top", "bottom", "middle", "center", "centre", "upper", "lower",
95
+ }
96
+ REASONING_FILTER_COLOR_WORDS = {
97
+ "black", "blue", "brown", "gold", "gray", "green", "grey", "orange", "pink",
98
+ "purple", "red", "silver", "white", "yellow",
99
+ }
100
+ REASONING_FILTER_KEEP_POS = {"NOUN", "PROPN", "ADJ"}
101
+ SPACY_REASONING_NLP = None
102
+ SPACY_REASONING_LOAD_ATTEMPTED = False
103
+ SPACY_REASONING_FALLBACK_WARNED = False
104
+
105
+
106
+ def resolve_hf_snapshot(path: str) -> str:
107
+ path = os.path.abspath(path)
108
+ config_path = os.path.join(path, "config.json")
109
+ if os.path.isfile(config_path):
110
+ return path
111
+
112
+ refs_main = os.path.join(path, "refs", "main")
113
+ if os.path.isfile(refs_main):
114
+ with open(refs_main) as f:
115
+ revision = f.read().strip()
116
+ snapshot_path = os.path.join(path, "snapshots", revision)
117
+ if os.path.isfile(os.path.join(snapshot_path, "config.json")):
118
+ return snapshot_path
119
+
120
+ raise FileNotFoundError(f"Could not resolve checkpoint snapshot from: {path}")
121
+
122
+
123
+ def configure_model(checkpoint_path: str, use_flash_attn: bool) -> InternVLChatConfig:
124
+ checkpoint_path = resolve_hf_snapshot(checkpoint_path)
125
+ config = InternVLChatConfig.from_json_file(os.path.join(checkpoint_path, "config.json"))
126
+ llm_arch = config.llm_config.architectures[0]
127
+ if llm_arch == "InternLM2ForCausalLM":
128
+ config.llm_config.attn_implementation = "eager"
129
+ else:
130
+ config.llm_config._attn_implementation = "eager"
131
+ config.vision_config.use_flash_attn = use_flash_attn
132
+ return config
133
+
134
+
135
+ def patch_internlm2_sample_signature(model: InternVLChatModel) -> None:
136
+ language_model_cls = model.language_model.__class__
137
+ sample_fn = getattr(language_model_cls, "_sample", None)
138
+ if sample_fn is None or getattr(sample_fn, "_sgl_logits_warper_compat", False):
139
+ return
140
+
141
+ signature = inspect.signature(sample_fn)
142
+ logits_warper_param = signature.parameters.get("logits_warper")
143
+ if logits_warper_param is None or logits_warper_param.default is not inspect._empty:
144
+ return
145
+
146
+ @wraps(sample_fn)
147
+ def compat_sample(
148
+ self,
149
+ input_ids: torch.LongTensor,
150
+ logits_processor,
151
+ stopping_criteria,
152
+ generation_config,
153
+ synced_gpus: bool,
154
+ streamer=None,
155
+ logits_warper=None,
156
+ **model_kwargs,
157
+ ):
158
+ # transformers>=4.49 folds samplers into logits_processor and no longer
159
+ # passes logits_warper to custom _sample overrides.
160
+ if logits_warper is None:
161
+ logits_warper = LogitsProcessorList()
162
+ return sample_fn(
163
+ self,
164
+ input_ids=input_ids,
165
+ logits_processor=logits_processor,
166
+ stopping_criteria=stopping_criteria,
167
+ generation_config=generation_config,
168
+ synced_gpus=synced_gpus,
169
+ streamer=streamer,
170
+ logits_warper=logits_warper,
171
+ **model_kwargs,
172
+ )
173
+
174
+ compat_sample._sgl_logits_warper_compat = True
175
+ language_model_cls._sample = compat_sample
176
+
177
+
178
+ def load_model(
179
+ checkpoint_path: str,
180
+ config: InternVLChatConfig,
181
+ auto: bool,
182
+ load_in_8bit: bool,
183
+ load_in_4bit: bool,
184
+ ) -> InternVLChatModel:
185
+ checkpoint_path = resolve_hf_snapshot(checkpoint_path)
186
+ kwargs = {"device_map": "auto"} if auto else {}
187
+ model = InternVLChatModel.from_pretrained(
188
+ checkpoint_path,
189
+ config=config,
190
+ low_cpu_mem_usage=True,
191
+ torch_dtype=torch.bfloat16,
192
+ load_in_8bit=load_in_8bit,
193
+ load_in_4bit=load_in_4bit,
194
+ **kwargs,
195
+ ).eval()
196
+ if not auto and not load_in_8bit and not load_in_4bit:
197
+ model = model.cuda()
198
+ patch_internlm2_sample_signature(model)
199
+ return model
200
+
201
+
202
+ def build_decode_model(
203
+ guide_model: InternVLChatModel,
204
+ large_checkpoint: str,
205
+ use_flash_attn: bool,
206
+ auto: bool,
207
+ load_in_8bit: bool,
208
+ load_in_4bit: bool,
209
+ ) -> Tuple[InternVLChatModel, AutoTokenizer]:
210
+ large_checkpoint = resolve_hf_snapshot(large_checkpoint)
211
+ large_config = configure_model(large_checkpoint, use_flash_attn=use_flash_attn)
212
+ large_source = load_model(
213
+ large_checkpoint,
214
+ large_config,
215
+ auto=auto,
216
+ load_in_8bit=load_in_8bit,
217
+ load_in_4bit=load_in_4bit,
218
+ )
219
+
220
+ decode_model = InternVLChatModel(
221
+ large_config,
222
+ vision_model=guide_model.vision_model,
223
+ language_model=large_source.language_model,
224
+ )
225
+ decode_model.config.vision_config = guide_model.config.vision_config
226
+ decode_model.vision_model.config = guide_model.config.vision_config
227
+ decode_model.mlp1 = large_source.mlp1
228
+ decode_model.template = large_source.template
229
+ decode_model.system_message = large_source.system_message
230
+ decode_model.num_image_token = large_source.num_image_token
231
+ decode_model.ps_version = guide_model.ps_version
232
+ decode_model.select_layer = guide_model.select_layer
233
+ decode_model.downsample_ratio = guide_model.downsample_ratio
234
+ decode_model.img_context_token_id = large_source.img_context_token_id
235
+ decode_model.eval()
236
+ patch_internlm2_sample_signature(decode_model)
237
+
238
+ large_tokenizer = AutoTokenizer.from_pretrained(
239
+ large_checkpoint,
240
+ trust_remote_code=True,
241
+ use_fast=False,
242
+ )
243
+ return decode_model, large_tokenizer
244
+
245
+
246
+ def model_text_device(model: InternVLChatModel) -> torch.device:
247
+ return next(model.language_model.get_input_embeddings().parameters()).device
248
+
249
+
250
+ def model_vision_device(model: InternVLChatModel) -> torch.device:
251
+ return next(model.vision_model.parameters()).device
252
+
253
+
254
+ def resolve_image_path(image_path: str, data_root: str, jsonl_dir: str) -> str:
255
+ candidates = []
256
+ if os.path.isabs(image_path):
257
+ candidates.append(image_path)
258
+ candidates.append(os.path.join(data_root, image_path))
259
+ if image_path.startswith("data/"):
260
+ candidates.append(os.path.join(data_root, image_path[len("data/"):]))
261
+ candidates.append(os.path.join(jsonl_dir, image_path))
262
+ candidates.append(os.path.join(jsonl_dir, os.path.basename(image_path)))
263
+
264
+ for candidate in candidates:
265
+ if os.path.exists(candidate):
266
+ return candidate
267
+ raise FileNotFoundError(f"Could not resolve image path: {image_path}")
268
+
269
+
270
+ class TextVQADataset:
271
+ def __init__(self, jsonl_path: str, data_root: str, image_size: int, dynamic: bool, use_thumbnail: bool, max_num: int):
272
+ with open(jsonl_path) as f:
273
+ self.items = [json.loads(line) for line in f if line.strip()]
274
+ self.jsonl_dir = os.path.dirname(jsonl_path)
275
+ self.data_root = data_root
276
+ self.image_size = image_size
277
+ self.dynamic = dynamic
278
+ self.use_thumbnail = use_thumbnail
279
+ self.max_num = max_num
280
+ self.transform = build_transform(is_train=False, input_size=image_size)
281
+
282
+ def __len__(self) -> int:
283
+ return len(self.items)
284
+
285
+ def __getitem__(self, idx: int) -> Dict[str, object]:
286
+ item = self.items[idx]
287
+ image_path = resolve_image_path(item["image"], self.data_root, self.jsonl_dir)
288
+ image = Image.open(image_path).convert("RGB")
289
+ if self.dynamic:
290
+ images = dynamic_preprocess(
291
+ image,
292
+ image_size=self.image_size,
293
+ use_thumbnail=self.use_thumbnail,
294
+ max_num=self.max_num,
295
+ )
296
+ else:
297
+ images = [image]
298
+ pixel_values = torch.stack([self.transform(img) for img in images])
299
+ return {
300
+ "question_id": item["question_id"],
301
+ "question": item["question"],
302
+ "pixel_values": pixel_values,
303
+ "annotation": item.get("answer", ""),
304
+ }
305
+
306
+
307
+ def load_annotations(annotation_file: str) -> Dict[int, List[str]]:
308
+ with open(annotation_file) as f:
309
+ annotations = json.load(f)["annotations"]
310
+ return {
311
+ item["question_id"]: [answer["answer"] for answer in item["answers"]]
312
+ for item in annotations
313
+ }
314
+
315
+
316
+ def build_query(model: InternVLChatModel, tokenizer, question: str, num_patches: int):
317
+ img_context_token = "<IMG_CONTEXT>"
318
+ img_start_token = "<img>"
319
+ img_end_token = "</img>"
320
+
321
+ if "<image>" not in question:
322
+ question = "<image>\n" + question
323
+
324
+ model.img_context_token_id = tokenizer.convert_tokens_to_ids(img_context_token)
325
+
326
+ template = get_conv_template(model.template)
327
+ template.system_message = model.system_message
328
+ template.append_message(template.roles[0], question)
329
+ template.append_message(template.roles[1], None)
330
+ query = template.get_prompt()
331
+
332
+ image_tokens = img_start_token + img_context_token * model.num_image_token * num_patches + img_end_token
333
+ query = query.replace("<image>", image_tokens, 1)
334
+ return query, template
335
+
336
+
337
+ @torch.inference_mode()
338
+ def extract_shared_raw_visual_tokens(model: InternVLChatModel, pixel_values: torch.Tensor) -> torch.Tensor:
339
+ vision_device = model_vision_device(model)
340
+ pixel_values = pixel_values.to(device=vision_device, dtype=torch.bfloat16)
341
+ if model.select_layer == -1:
342
+ vit_embeds = model.vision_model(
343
+ pixel_values=pixel_values,
344
+ output_hidden_states=False,
345
+ return_dict=True,
346
+ ).last_hidden_state
347
+ else:
348
+ vit_embeds = model.vision_model(
349
+ pixel_values=pixel_values,
350
+ output_hidden_states=True,
351
+ return_dict=True,
352
+ ).hidden_states[model.select_layer]
353
+ vit_embeds = vit_embeds[:, 1:, :]
354
+ h = w = int(vit_embeds.shape[1] ** 0.5)
355
+ vit_embeds = vit_embeds.reshape(vit_embeds.shape[0], h, w, -1)
356
+ vit_embeds = model.pixel_shuffle(vit_embeds, scale_factor=model.downsample_ratio)
357
+ return vit_embeds.reshape(vit_embeds.shape[0], -1, vit_embeds.shape[-1])
358
+
359
+
360
+ @torch.inference_mode()
361
+ def project_visual_tokens(model: InternVLChatModel, raw_visual_tokens: torch.Tensor) -> torch.Tensor:
362
+ mlp_device = next(model.mlp1.parameters()).device
363
+ raw_visual_tokens = raw_visual_tokens.to(device=mlp_device, dtype=torch.bfloat16)
364
+ return model.mlp1(raw_visual_tokens)
365
+
366
+
367
+ @torch.inference_mode()
368
+ def build_input_embeds_from_visual_features(
369
+ model: InternVLChatModel,
370
+ input_ids: torch.Tensor,
371
+ visual_features: torch.Tensor,
372
+ ) -> Tuple[torch.Tensor, torch.Tensor]:
373
+ input_embeds = model.language_model.get_input_embeddings()(input_ids)
374
+ batch_size, seq_len, hidden_size = input_embeds.shape
375
+ flat_input_embeds = input_embeds.reshape(batch_size * seq_len, hidden_size)
376
+ flat_input_ids = input_ids.reshape(batch_size * seq_len)
377
+ selected = flat_input_ids == model.img_context_token_id
378
+ if selected.sum().item() == 0:
379
+ raise ValueError("No image context tokens found in input_ids.")
380
+ flat_input_embeds[selected] = visual_features.reshape(-1, hidden_size).to(flat_input_embeds.device)
381
+ return flat_input_embeds.reshape(batch_size, seq_len, hidden_size), flat_input_ids
382
+
383
+
384
+ @torch.inference_mode()
385
+ def run_guide_generation(
386
+ model: InternVLChatModel,
387
+ tokenizer,
388
+ projected_visual_tokens: torch.Tensor,
389
+ question: str,
390
+ generation_config: dict,
391
+ ) -> Dict[str, object]:
392
+ query, template = build_query(model, tokenizer, question, projected_visual_tokens.shape[0])
393
+ model_inputs = tokenizer(query, return_tensors="pt")
394
+ input_device = model_text_device(model)
395
+ input_ids = model_inputs["input_ids"].to(input_device)
396
+ attention_mask = model_inputs["attention_mask"].to(input_device)
397
+ eos_token_id = tokenizer.convert_tokens_to_ids(template.sep)
398
+ input_embeds, flat_input_ids = build_input_embeds_from_visual_features(model, input_ids, projected_visual_tokens)
399
+
400
+ visual_token_index = (input_ids == model.img_context_token_id).view(-1).nonzero()
401
+ visual_start_index, visual_end_index = visual_token_index[0], visual_token_index[-1]
402
+
403
+ run_config = dict(generation_config)
404
+ run_config["eos_token_id"] = eos_token_id
405
+
406
+ outputs = model.language_model.generate(
407
+ inputs_embeds=input_embeds,
408
+ attention_mask=attention_mask,
409
+ generation_config=None,
410
+ output_hidden_states=None,
411
+ return_dict=None,
412
+ use_cache=True,
413
+ visual_token_index=(visual_start_index, visual_end_index),
414
+ **run_config,
415
+ )
416
+ response = tokenizer.batch_decode(outputs["sequences"], skip_special_tokens=True)[0]
417
+ response = response.split(template.sep)[0].strip()
418
+ return {
419
+ "response": response,
420
+ "outputs": outputs,
421
+ "input_embeds": input_embeds,
422
+ "flat_input_ids": flat_input_ids,
423
+ "attention_mask": attention_mask,
424
+ "visual_token_index": (visual_start_index, visual_end_index),
425
+ }
426
+
427
+
428
+ def aggregate_attention_from_step(attentions, visual_token_index: Tuple[int, int]) -> torch.Tensor:
429
+ visual_start_index, visual_end_index = visual_token_index
430
+ visual_token_num = visual_end_index - visual_start_index + 1
431
+ visual_token_importance = None
432
+
433
+ for attention in attentions:
434
+ if attention is None:
435
+ continue
436
+ if visual_token_importance is None:
437
+ visual_token_importance = torch.zeros(
438
+ visual_token_num,
439
+ device=attention.device,
440
+ dtype=torch.float32,
441
+ )
442
+
443
+ merged_attention = attention[0].sum(dim=0)
444
+ if attention.shape[2] != 1:
445
+ visual_token_importance += merged_attention[
446
+ visual_end_index + 1 :,
447
+ visual_start_index : visual_end_index + 1,
448
+ ].sum(dim=0)
449
+ else:
450
+ visual_token_importance += merged_attention[
451
+ 0:1,
452
+ visual_start_index : visual_end_index + 1,
453
+ ].sum(dim=0)
454
+
455
+ if visual_token_importance is None:
456
+ raise RuntimeError("Guide model did not return layer attentions for the current decoding step.")
457
+ return visual_token_importance
458
+
459
+
460
+ def get_reasoning_spacy_nlp():
461
+ global SPACY_REASONING_NLP, SPACY_REASONING_LOAD_ATTEMPTED
462
+ if SPACY_REASONING_LOAD_ATTEMPTED:
463
+ return SPACY_REASONING_NLP
464
+
465
+ SPACY_REASONING_LOAD_ATTEMPTED = True
466
+ try:
467
+ import spacy
468
+
469
+ SPACY_REASONING_NLP = spacy.load("en_core_web_sm", disable=["parser", "lemmatizer"])
470
+ except Exception:
471
+ SPACY_REASONING_NLP = None
472
+ return SPACY_REASONING_NLP
473
+
474
+
475
+ def should_keep_reasoning_heuristic_token(token_text: str) -> bool:
476
+ stripped = token_text.strip()
477
+ if not stripped:
478
+ return False
479
+
480
+ lowered = stripped.lower()
481
+ if re.fullmatch(r"\d+[.)]?", stripped):
482
+ return False
483
+ if lowered in REASONING_FILTER_STOPWORDS or lowered in REASONING_FILTER_TEMPLATE_WORDS:
484
+ return False
485
+ if lowered in REASONING_FILTER_POSITION_WORDS or lowered in REASONING_FILTER_COLOR_WORDS:
486
+ return True
487
+ if any(ch.isdigit() for ch in stripped):
488
+ return True
489
+ if any(ch.isupper() for ch in stripped):
490
+ return True
491
+ if any(ch in ".:/-@&" for ch in stripped):
492
+ return True
493
+ alpha_count = sum(ch.isalpha() for ch in stripped)
494
+ return alpha_count >= 4
495
+
496
+
497
+ def should_keep_reasoning_doc_token(token) -> bool:
498
+ stripped = token.text.strip()
499
+ if not stripped:
500
+ return False
501
+
502
+ lowered = stripped.lower()
503
+ if token.is_punct or token.is_space:
504
+ return False
505
+ if lowered in REASONING_FILTER_STOPWORDS or lowered in REASONING_FILTER_TEMPLATE_WORDS:
506
+ return False
507
+ if token.pos_ in REASONING_FILTER_KEEP_POS:
508
+ return True
509
+ return False
510
+
511
+
512
+ def build_generated_token_spans(tokenizer, generated_ids: torch.Tensor) -> Tuple[str, List[Tuple[int, int]]]:
513
+ decoded_text = ""
514
+ token_spans: List[Tuple[int, int]] = []
515
+ for token_id in generated_ids.detach().cpu().tolist():
516
+ piece = tokenizer.decode([int(token_id)], skip_special_tokens=True, clean_up_tokenization_spaces=False)
517
+ start = len(decoded_text)
518
+ decoded_text += piece
519
+ token_spans.append((start, len(decoded_text)))
520
+ return decoded_text, token_spans
521
+
522
+
523
+ def analyze_reasoning_filter(text: str, args) -> Tuple[List[Tuple[int, int]], str, List[Dict[str, object]]]:
524
+ if args.guide_reasoning_filter_mode == "none":
525
+ return [], "none", []
526
+
527
+ if args.guide_reasoning_filter_mode == "pos_ner":
528
+ nlp = get_reasoning_spacy_nlp()
529
+ if nlp is not None:
530
+ doc = nlp(text)
531
+ token_analysis = []
532
+ intervals = [
533
+ (token.idx, token.idx + len(token))
534
+ for token in doc
535
+ if should_keep_reasoning_doc_token(token)
536
+ ]
537
+ for token in doc:
538
+ token_analysis.append(
539
+ {
540
+ "text": token.text,
541
+ "lemma": token.lemma_,
542
+ "pos": token.pos_,
543
+ "tag": token.tag_,
544
+ "dep": token.dep_,
545
+ "ent_type": token.ent_type_,
546
+ "like_num": bool(getattr(token, "like_num", False)),
547
+ "like_url": bool(getattr(token, "like_url", False)),
548
+ "is_stop": bool(token.is_stop),
549
+ "keep": should_keep_reasoning_doc_token(token),
550
+ }
551
+ )
552
+ return intervals, "spacy_pos_ner", token_analysis
553
+
554
+ token_analysis = []
555
+ intervals = [
556
+ (match.start(), match.end())
557
+ for match in re.finditer(r"\S+", text)
558
+ if should_keep_reasoning_heuristic_token(match.group(0))
559
+ ]
560
+ for match in re.finditer(r"\S+", text):
561
+ token_text = match.group(0)
562
+ token_analysis.append(
563
+ {
564
+ "text": token_text,
565
+ "lemma": token_text.lower(),
566
+ "pos": "",
567
+ "tag": "",
568
+ "dep": "",
569
+ "ent_type": "",
570
+ "like_num": any(ch.isdigit() for ch in token_text),
571
+ "like_url": "http" in token_text.lower() or "www." in token_text.lower(),
572
+ "is_stop": token_text.lower() in REASONING_FILTER_STOPWORDS,
573
+ "keep": should_keep_reasoning_heuristic_token(token_text),
574
+ }
575
+ )
576
+ return intervals, "heuristic_fallback", token_analysis
577
+
578
+
579
+ def build_reasoning_attention_step_mask_and_debug(tokenizer, outputs, args) -> Tuple[Optional[List[bool]], Dict[str, object]]:
580
+ if args.guide_reasoning_filter_mode == "none":
581
+ return None, {"backend": "none", "kept_tokens": [], "token_analysis": []}
582
+
583
+ sequences = outputs["sequences"][0]
584
+ decoded_text, token_spans = build_generated_token_spans(tokenizer, sequences)
585
+ intervals, backend, token_analysis = analyze_reasoning_filter(decoded_text, args)
586
+
587
+ global SPACY_REASONING_FALLBACK_WARNED
588
+ if backend == "heuristic_fallback" and not SPACY_REASONING_FALLBACK_WARNED:
589
+ print("Warning: spaCy POS/NER model unavailable; guide reasoning filter is using heuristic fallback.")
590
+ SPACY_REASONING_FALLBACK_WARNED = True
591
+
592
+ debug_info = {
593
+ "backend": backend,
594
+ "token_analysis": token_analysis,
595
+ "kept_tokens": [token["text"] for token in token_analysis if token.get("keep")],
596
+ }
597
+ if not intervals:
598
+ return None, debug_info
599
+
600
+ step_mask = []
601
+ for start, end in token_spans:
602
+ if start == end:
603
+ step_mask.append(False)
604
+ continue
605
+ keep = any(start < interval_end and end > interval_start for interval_start, interval_end in intervals)
606
+ step_mask.append(keep)
607
+
608
+ debug_info["step_mask"] = step_mask
609
+ if not any(step_mask):
610
+ return None, debug_info
611
+ return step_mask, debug_info
612
+
613
+
614
+ def aggregate_attention_from_generation_outputs(
615
+ outputs,
616
+ visual_token_index: Tuple[int, int],
617
+ step_mask: Optional[List[bool]] = None,
618
+ ) -> torch.Tensor:
619
+ aggregated = getattr(outputs, "aggregated_viusal_token_attention", None)
620
+ if aggregated is not None and step_mask is None:
621
+ return aggregated.detach().float()
622
+
623
+ attentions = getattr(outputs, "attentions", None)
624
+ if not attentions:
625
+ raise RuntimeError("Guide generation did not return attentions; enable output_attentions.")
626
+
627
+ visual_token_importance = None
628
+ for step_idx, step_attentions in enumerate(attentions):
629
+ if step_mask is not None and (step_idx >= len(step_mask) or not step_mask[step_idx]):
630
+ continue
631
+ step_importance = aggregate_attention_from_step(step_attentions, visual_token_index)
632
+ if visual_token_importance is None:
633
+ visual_token_importance = step_importance
634
+ else:
635
+ visual_token_importance = visual_token_importance + step_importance
636
+
637
+ if visual_token_importance is None:
638
+ if step_mask is not None:
639
+ return aggregate_attention_from_generation_outputs(outputs, visual_token_index, step_mask=None)
640
+ raise RuntimeError("Guide generation returned no attention steps.")
641
+ return visual_token_importance
642
+
643
+
644
+ def aggregate_question_and_answer_attention_from_generation_outputs(
645
+ outputs,
646
+ visual_token_index: Tuple[int, int],
647
+ ) -> Tuple[torch.Tensor, torch.Tensor]:
648
+ attentions = getattr(outputs, "attentions", None)
649
+ if not attentions:
650
+ raise RuntimeError("Guide generation did not return attentions; enable output_attentions.")
651
+
652
+ question_visual_token_importance = None
653
+ answer_visual_token_importance = None
654
+ for step_idx, step_attentions in enumerate(attentions):
655
+ step_importance = aggregate_attention_from_step(step_attentions, visual_token_index)
656
+ if step_idx == 0:
657
+ if question_visual_token_importance is None:
658
+ question_visual_token_importance = step_importance
659
+ else:
660
+ question_visual_token_importance = question_visual_token_importance + step_importance
661
+ else:
662
+ if answer_visual_token_importance is None:
663
+ answer_visual_token_importance = step_importance
664
+ else:
665
+ answer_visual_token_importance = answer_visual_token_importance + step_importance
666
+
667
+ if question_visual_token_importance is None and answer_visual_token_importance is None:
668
+ raise RuntimeError("Guide generation returned no attention steps.")
669
+ if question_visual_token_importance is None:
670
+ question_visual_token_importance = torch.zeros_like(answer_visual_token_importance)
671
+ if answer_visual_token_importance is None:
672
+ answer_visual_token_importance = torch.zeros_like(question_visual_token_importance)
673
+ return question_visual_token_importance, answer_visual_token_importance
674
+
675
+
676
+ @torch.inference_mode()
677
+ def compute_consistency_score(
678
+ model: InternVLChatModel,
679
+ input_embeds: torch.Tensor,
680
+ flat_input_ids: torch.Tensor,
681
+ attention_mask: torch.Tensor,
682
+ generated_ids: torch.Tensor,
683
+ visual_token_importance: torch.Tensor,
684
+ visual_token_index: Tuple[int, int],
685
+ consistency_token_ratio: float,
686
+ large_model_prune_selection: str,
687
+ ) -> torch.Tensor:
688
+ visual_start_index, visual_end_index = visual_token_index
689
+ new_input_ids_ = generated_ids
690
+ new_token_num = new_input_ids_.shape[-1]
691
+ new_input_embedding = torch.concatenate(
692
+ (input_embeds, model.language_model.get_input_embeddings()(new_input_ids_).unsqueeze(0)),
693
+ dim=1,
694
+ )
695
+ new_attention_mask = torch.concatenate(
696
+ (
697
+ attention_mask,
698
+ torch.ones((1, new_input_ids_.shape[0]), device=attention_mask.device, dtype=attention_mask.dtype),
699
+ ),
700
+ dim=-1,
701
+ )
702
+ new_input_ids = torch.concatenate((flat_input_ids, new_input_ids_), dim=-1)
703
+ consistency_generate_kwargs = {
704
+ "large_model_prune_layer": 0.0,
705
+ "large_model_prune_ratio": consistency_token_ratio,
706
+ "large_model_prune_selection": large_model_prune_selection,
707
+ "visual_token_index": (visual_start_index, visual_end_index),
708
+ "visual_token_importance": visual_token_importance,
709
+ "inputs_embeds": new_input_embedding,
710
+ "attention_mask": new_attention_mask,
711
+ "output_scores": False,
712
+ "output_attentions": False,
713
+ "return_dict_in_generate": False,
714
+ "use_cache": True,
715
+ }
716
+ consistency_generate_kwargs["inputs_embeds"] = new_input_embedding
717
+ consistency_generate_kwargs["attention_mask"] = new_attention_mask
718
+ consistency_generate_kwargs["output_scores"] = False
719
+ consistency_generate_kwargs["output_attentions"] = False
720
+ consistency_generate_kwargs = model.language_model._get_initial_cache_position(new_input_ids, consistency_generate_kwargs)
721
+ model_inputs = model.language_model.prepare_inputs_for_generation(new_input_ids, **consistency_generate_kwargs)
722
+ consistency_output = model.language_model.forward(**model_inputs, return_dict=True)
723
+ consistency_score = torch.gather(
724
+ consistency_output["logits"][:, -new_token_num - 1 : -1, :].softmax(dim=-1),
725
+ index=new_input_ids_[None, :, None],
726
+ dim=-1,
727
+ )
728
+ return torch.prod(consistency_score)
729
+
730
+
731
+ @torch.inference_mode()
732
+ def run_guide_branch(
733
+ model: InternVLChatModel,
734
+ tokenizer,
735
+ projected_visual_tokens: torch.Tensor,
736
+ question: str,
737
+ generation_config: dict,
738
+ consistency_token_ratio: float,
739
+ args,
740
+ ) -> Tuple[str, List[torch.Tensor], torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]:
741
+ generation_result = run_guide_generation(
742
+ model,
743
+ tokenizer,
744
+ projected_visual_tokens,
745
+ question,
746
+ generation_config,
747
+ )
748
+ outputs = generation_result["outputs"]
749
+ question_visual_token_importance, answer_visual_token_importance = (
750
+ aggregate_question_and_answer_attention_from_generation_outputs(
751
+ outputs,
752
+ generation_result["visual_token_index"],
753
+ )
754
+ )
755
+ visual_token_importance = combine_question_and_answer_attention(
756
+ question_visual_token_importance,
757
+ answer_visual_token_importance,
758
+ args,
759
+ )
760
+ if args.large_model_prune_selection in {"similarity_greedy", "similarity_cover_greedy"}:
761
+ consistency_score = torch.tensor(1.0, device=visual_token_importance.device)
762
+ else:
763
+ consistency_score = compute_consistency_score(
764
+ model,
765
+ generation_result["input_embeds"],
766
+ generation_result["flat_input_ids"],
767
+ generation_result["attention_mask"],
768
+ outputs["sequences"][0],
769
+ visual_token_importance,
770
+ generation_result["visual_token_index"],
771
+ consistency_token_ratio,
772
+ args.large_model_prune_selection,
773
+ )
774
+ return (
775
+ generation_result["response"],
776
+ outputs.scores,
777
+ consistency_score,
778
+ visual_token_importance,
779
+ question_visual_token_importance,
780
+ answer_visual_token_importance,
781
+ )
782
+
783
+
784
+ @torch.inference_mode()
785
+ def run_decode_branch(
786
+ model: InternVLChatModel,
787
+ tokenizer,
788
+ projected_visual_tokens: torch.Tensor,
789
+ question: str,
790
+ generation_config: dict,
791
+ visual_token_importance: torch.Tensor,
792
+ large_model_prune_layer: float,
793
+ large_model_prune_ratio: float,
794
+ large_model_prune_keep_count: int,
795
+ large_model_prune_selection: str,
796
+ large_model_similarity_target_coverage: float,
797
+ large_model_similarity_min_gain: float,
798
+ large_model_similarity_min_keep: int,
799
+ large_model_similarity_max_keep_ratio: float,
800
+ ) -> str:
801
+ query, template = build_query(model, tokenizer, question, projected_visual_tokens.shape[0])
802
+ model_inputs = tokenizer(query, return_tensors="pt")
803
+ input_device = model_text_device(model)
804
+ input_ids = model_inputs["input_ids"].to(input_device)
805
+ attention_mask = model_inputs["attention_mask"].to(input_device)
806
+ eos_token_id = tokenizer.convert_tokens_to_ids(template.sep)
807
+ input_embeds, _ = build_input_embeds_from_visual_features(model, input_ids, projected_visual_tokens)
808
+
809
+ visual_token_index = (input_ids == model.img_context_token_id).view(-1).nonzero()
810
+ visual_start_index, visual_end_index = visual_token_index[0], visual_token_index[-1]
811
+
812
+ run_config = dict(generation_config)
813
+ run_config["eos_token_id"] = eos_token_id
814
+ run_config["return_dict_in_generate"] = False
815
+ run_config["output_scores"] = False
816
+ run_config["output_attentions"] = False
817
+ run_config["large_model_prune_layer"] = large_model_prune_layer
818
+ run_config["large_model_prune_ratio"] = large_model_prune_ratio
819
+ run_config["large_model_prune_keep_count"] = large_model_prune_keep_count
820
+ run_config["large_model_prune_selection"] = large_model_prune_selection
821
+ run_config["large_model_similarity_target_coverage"] = large_model_similarity_target_coverage
822
+ run_config["large_model_similarity_min_gain"] = large_model_similarity_min_gain
823
+ run_config["large_model_similarity_min_keep"] = large_model_similarity_min_keep
824
+ run_config["large_model_similarity_max_keep_ratio"] = large_model_similarity_max_keep_ratio
825
+ run_config["visual_token_importance"] = visual_token_importance
826
+ run_config["visual_token_index"] = (visual_start_index, visual_end_index)
827
+
828
+ output_ids = model.language_model.generate(
829
+ inputs_embeds=input_embeds,
830
+ attention_mask=attention_mask,
831
+ generation_config=None,
832
+ output_hidden_states=None,
833
+ return_dict=None,
834
+ use_cache=True,
835
+ **run_config,
836
+ )
837
+ response = tokenizer.batch_decode(output_ids, skip_special_tokens=True)[0]
838
+ return response.split(template.sep)[0].strip()
839
+
840
+
841
+ def make_generation_config(args) -> dict:
842
+ generation_config = {
843
+ "num_beams": args.num_beams,
844
+ "max_new_tokens": args.max_new_tokens,
845
+ "min_new_tokens": 1,
846
+ "do_sample": args.temperature > 0,
847
+ "return_dict_in_generate": True,
848
+ "output_scores": True,
849
+ "output_attentions": True,
850
+ }
851
+ if args.temperature > 0:
852
+ generation_config["temperature"] = args.temperature
853
+ return generation_config
854
+
855
+
856
+ def append_instruction(question: str, instruction: str) -> str:
857
+ instruction = instruction.strip()
858
+ if not instruction:
859
+ return question
860
+ return f"{question.rstrip()}\n{instruction}"
861
+
862
+
863
+ def make_reasoning_generation_config(base_generation_config: dict, args) -> dict:
864
+ generation_config = dict(base_generation_config)
865
+ generation_config["max_new_tokens"] = args.reasoning_max_new_tokens
866
+ generation_config["return_dict_in_generate"] = True
867
+ generation_config["output_scores"] = True
868
+ generation_config["output_attentions"] = True
869
+ temperature = args.reasoning_temperature
870
+ generation_config["do_sample"] = temperature > 0
871
+ if temperature > 0:
872
+ generation_config["temperature"] = temperature
873
+ else:
874
+ generation_config.pop("temperature", None)
875
+ return generation_config
876
+
877
+
878
+ def make_custom_generation_config(
879
+ base_generation_config: dict,
880
+ max_new_tokens: int,
881
+ temperature: float,
882
+ return_dict_in_generate: bool,
883
+ output_scores: bool,
884
+ output_attentions: bool,
885
+ ) -> dict:
886
+ generation_config = dict(base_generation_config)
887
+ generation_config["max_new_tokens"] = max_new_tokens
888
+ generation_config["return_dict_in_generate"] = return_dict_in_generate
889
+ generation_config["output_scores"] = output_scores
890
+ generation_config["output_attentions"] = output_attentions
891
+ generation_config["do_sample"] = temperature > 0
892
+ if temperature > 0:
893
+ generation_config["temperature"] = temperature
894
+ else:
895
+ generation_config.pop("temperature", None)
896
+ return generation_config
897
+
898
+
899
+ def normalize_generated_text(text: str) -> str:
900
+ return " ".join(text.strip().split())
901
+
902
+
903
+ def strip_base_prompt(question: str) -> str:
904
+ if question.endswith(BASE_PROMPT_SUFFIX):
905
+ return question[: -len(BASE_PROMPT_SUFFIX)].rstrip()
906
+ return question
907
+
908
+
909
+ def summarize_visual_token_importance(visual_token_importance: torch.Tensor, topk: int) -> Dict[str, object]:
910
+ values = visual_token_importance.detach().float().view(-1).cpu()
911
+ total = values.sum().item()
912
+ if total > 0:
913
+ normalized = values / total
914
+ else:
915
+ normalized = torch.full_like(values, 1.0 / max(values.numel(), 1))
916
+
917
+ topk = min(topk, normalized.numel())
918
+ top_values, top_indices = torch.topk(normalized, k=topk)
919
+ entropy = -(normalized * torch.clamp(normalized, min=1e-12).log()).sum().item()
920
+ return {
921
+ "raw_sum": total,
922
+ "entropy": entropy,
923
+ "max_weight": normalized.max().item(),
924
+ "top_indices": top_indices.tolist(),
925
+ "top_weights": top_values.tolist(),
926
+ "weights": normalized.tolist(),
927
+ }
928
+
929
+
930
+ def normalize_visual_token_importance(visual_token_importance: torch.Tensor) -> torch.Tensor:
931
+ visual_token_importance = visual_token_importance.detach().float()
932
+ total = visual_token_importance.sum()
933
+ if total.item() > 0:
934
+ return visual_token_importance / total
935
+ return torch.full_like(visual_token_importance, 1.0 / max(visual_token_importance.numel(), 1))
936
+
937
+
938
+ def prepare_decode_visual_token_importance(
939
+ visual_token_importance: torch.Tensor,
940
+ selection_mode: str,
941
+ ) -> torch.Tensor:
942
+ raw_importance = visual_token_importance.detach().float()
943
+ if selection_mode in {"topk", "similarity_greedy", "similarity_cover_greedy"}:
944
+ return raw_importance
945
+ if selection_mode == "random":
946
+ return torch.rand_like(raw_importance)
947
+ raise ValueError(f"Unsupported large model prune selection mode: {selection_mode}")
948
+
949
+
950
+ def resolve_decode_prune_plan(
951
+ selection_visual_tokens: torch.Tensor,
952
+ visual_token_importance: torch.Tensor,
953
+ args,
954
+ ) -> Tuple[torch.Tensor, float, str, int]:
955
+ raw_importance = visual_token_importance.detach().float().view(-1)
956
+ visual_token_count = raw_importance.numel()
957
+ if args.large_model_prune_selection != "similarity_cover_greedy":
958
+ prepared = prepare_decode_visual_token_importance(raw_importance, args.large_model_prune_selection)
959
+ kept_count = max(1, int(visual_token_count * args.large_model_prune_ratio))
960
+ return prepared, args.large_model_prune_ratio, args.large_model_prune_selection, kept_count
961
+
962
+ weights = normalize_visual_token_importance(raw_importance)
963
+ features = selection_visual_tokens.detach().float()
964
+ if features.dim() == 3:
965
+ features = features.reshape(-1, features.shape[-1])
966
+ features = torch.nn.functional.normalize(features, dim=-1)
967
+ similarity = (features @ features.T).clamp_min(0.0)
968
+ distance = 1.0 - similarity
969
+ keep_count = max(1, min(visual_token_count, int(math.ceil(visual_token_count * args.large_model_prune_ratio))))
970
+ seed_index = int(torch.argmax(weights).item())
971
+ selected_list = [seed_index]
972
+ selected_mask = torch.zeros(visual_token_count, device=features.device, dtype=torch.bool)
973
+ selected_mask[seed_index] = True
974
+ min_distance_to_selected = distance[:, seed_index].clone()
975
+
976
+ while len(selected_list) < keep_count:
977
+ candidate_scores = min_distance_to_selected + 0.05 * weights.to(features.device)
978
+ candidate_scores = candidate_scores.masked_fill(selected_mask, float("-inf"))
979
+ next_index = int(torch.argmax(candidate_scores).item())
980
+ selected_list.append(next_index)
981
+ selected_mask[next_index] = True
982
+ min_distance_to_selected = torch.minimum(min_distance_to_selected, distance[:, next_index])
983
+
984
+ selected_indices = torch.tensor(selected_list, device=raw_importance.device, dtype=torch.long)
985
+ prepared = torch.zeros_like(raw_importance)
986
+ prepared[selected_indices] = 1.0 + weights[selected_indices].to(prepared.device)
987
+ kept_count = int(selected_indices.numel())
988
+ keep_ratio = min(1.0, (kept_count + 1e-6) / max(visual_token_count, 1))
989
+ return prepared, keep_ratio, "topk", kept_count
990
+
991
+
992
+ def maybe_normalize_visual_token_importance(visual_token_importance: torch.Tensor, args) -> torch.Tensor:
993
+ if args.guide_attention_aggregation_mode == "normalized":
994
+ return normalize_visual_token_importance(visual_token_importance)
995
+ return visual_token_importance.detach().float()
996
+
997
+
998
+ def combine_question_and_answer_attention(
999
+ question_visual_token_importance: torch.Tensor,
1000
+ answer_visual_token_importance: torch.Tensor,
1001
+ args,
1002
+ ) -> torch.Tensor:
1003
+ question_weight = args.guide_question_attention_weight
1004
+ answer_weight = args.guide_answer_attention_weight
1005
+ if question_weight == 0 and answer_weight == 0:
1006
+ raise ValueError("At least one guide question/answer attention weight must be > 0.")
1007
+
1008
+ return (
1009
+ question_weight * maybe_normalize_visual_token_importance(question_visual_token_importance, args)
1010
+ + answer_weight * maybe_normalize_visual_token_importance(answer_visual_token_importance, args)
1011
+ )
1012
+
1013
+
1014
+ def resolve_guide_attention_source(args) -> str:
1015
+ if args.guide_attention_source != "default":
1016
+ return args.guide_attention_source
1017
+ if args.guide_reasoning_mode == "two_pass_explicit":
1018
+ return "combined"
1019
+ return "answer"
1020
+
1021
+
1022
+ def combine_reasoning_and_answer_attention(
1023
+ reasoning_visual_token_importance: torch.Tensor,
1024
+ answer_visual_token_importance: torch.Tensor,
1025
+ args,
1026
+ ) -> torch.Tensor:
1027
+ attention_source = resolve_guide_attention_source(args)
1028
+ if attention_source == "reasoning":
1029
+ return args.guide_reasoning_attention_weight * maybe_normalize_visual_token_importance(
1030
+ reasoning_visual_token_importance,
1031
+ args,
1032
+ )
1033
+ if attention_source == "answer":
1034
+ return args.guide_answer_attention_weight * maybe_normalize_visual_token_importance(
1035
+ answer_visual_token_importance,
1036
+ args,
1037
+ )
1038
+
1039
+ reasoning_weight = args.guide_reasoning_attention_weight
1040
+ answer_weight = args.guide_answer_attention_weight
1041
+ if reasoning_weight == 0 and answer_weight == 0:
1042
+ raise ValueError("At least one guide attention weight must be > 0.")
1043
+
1044
+ return (
1045
+ reasoning_weight * maybe_normalize_visual_token_importance(reasoning_visual_token_importance, args)
1046
+ + answer_weight * maybe_normalize_visual_token_importance(answer_visual_token_importance, args)
1047
+ )
1048
+
1049
+
1050
+ def combine_question_reasoning_and_answer_attention(
1051
+ question_visual_token_importance: torch.Tensor,
1052
+ reasoning_visual_token_importance: torch.Tensor,
1053
+ answer_visual_token_importance: torch.Tensor,
1054
+ args,
1055
+ ) -> torch.Tensor:
1056
+ attention_source = resolve_guide_attention_source(args)
1057
+ if attention_source == "reasoning":
1058
+ return args.guide_reasoning_attention_weight * maybe_normalize_visual_token_importance(
1059
+ reasoning_visual_token_importance,
1060
+ args,
1061
+ )
1062
+ if attention_source == "answer":
1063
+ return combine_question_and_answer_attention(
1064
+ question_visual_token_importance,
1065
+ answer_visual_token_importance,
1066
+ args,
1067
+ )
1068
+
1069
+ return combine_question_and_answer_attention(
1070
+ question_visual_token_importance,
1071
+ answer_visual_token_importance,
1072
+ args,
1073
+ ) + args.guide_reasoning_attention_weight * reasoning_visual_token_importance.detach().float()
1074
+
1075
+
1076
+ def build_guide_attention_question(question: str, args) -> str:
1077
+ if args.guide_reasoning_mode == "short_cot":
1078
+ return GUIDE_ATTENTION_COT_PROMPT_TEMPLATE.replace("{question}", strip_base_prompt(question))
1079
+ if args.guide_reasoning_mode == "explicit_cot":
1080
+ return append_instruction(strip_base_prompt(question), GUIDE_ATTENTION_EXPLICIT_COT_INSTRUCTION)
1081
+ return question
1082
+
1083
+
1084
+ def build_guide_reasoning_question(question: str) -> str:
1085
+ return GUIDE_ATTENTION_REASONING_ONLY_PROMPT_TEMPLATE.replace(
1086
+ "{question}",
1087
+ strip_base_prompt(question),
1088
+ )
1089
+
1090
+
1091
+ def build_guide_text_question(question: str) -> str:
1092
+ return append_instruction(question, GUIDE_TEXT_HINT_INSTRUCTION)
1093
+
1094
+
1095
+ def build_decode_question(question: str, guide_text_hint: Optional[str]) -> str:
1096
+ if not guide_text_hint:
1097
+ return question
1098
+ return append_instruction(
1099
+ question,
1100
+ f"Guide hint: {guide_text_hint}\n{GUIDED_DECODE_INSTRUCTION}",
1101
+ )
1102
+
1103
+
1104
+ def make_guide_attention_generation_config(base_generation_config: dict, args) -> dict:
1105
+ if args.guide_reasoning_mode in {"short_cot", "explicit_cot", "two_pass_explicit"}:
1106
+ return make_custom_generation_config(
1107
+ base_generation_config,
1108
+ max_new_tokens=args.guide_reasoning_max_new_tokens,
1109
+ temperature=args.guide_reasoning_temperature,
1110
+ return_dict_in_generate=True,
1111
+ output_scores=True,
1112
+ output_attentions=True,
1113
+ )
1114
+ return dict(base_generation_config)
1115
+
1116
+
1117
+ def make_guide_text_generation_config(base_generation_config: dict, args) -> dict:
1118
+ return make_custom_generation_config(
1119
+ base_generation_config,
1120
+ max_new_tokens=args.guide_text_max_new_tokens,
1121
+ temperature=args.guide_text_temperature,
1122
+ return_dict_in_generate=False,
1123
+ output_scores=False,
1124
+ output_attentions=False,
1125
+ )
1126
+
1127
+
1128
+ @torch.inference_mode()
1129
+ def run_text_generation_branch(
1130
+ model: InternVLChatModel,
1131
+ tokenizer,
1132
+ projected_visual_tokens: torch.Tensor,
1133
+ question: str,
1134
+ generation_config: dict,
1135
+ ) -> str:
1136
+ query, template = build_query(model, tokenizer, question, projected_visual_tokens.shape[0])
1137
+ model_inputs = tokenizer(query, return_tensors="pt")
1138
+ input_device = model_text_device(model)
1139
+ input_ids = model_inputs["input_ids"].to(input_device)
1140
+ attention_mask = model_inputs["attention_mask"].to(input_device)
1141
+ eos_token_id = tokenizer.convert_tokens_to_ids(template.sep)
1142
+ input_embeds, _ = build_input_embeds_from_visual_features(model, input_ids, projected_visual_tokens)
1143
+
1144
+ run_config = dict(generation_config)
1145
+ run_config["eos_token_id"] = eos_token_id
1146
+ output_ids = model.language_model.generate(
1147
+ inputs_embeds=input_embeds,
1148
+ attention_mask=attention_mask,
1149
+ generation_config=None,
1150
+ output_hidden_states=None,
1151
+ return_dict=None,
1152
+ use_cache=True,
1153
+ **run_config,
1154
+ )
1155
+ response = tokenizer.batch_decode(output_ids, skip_special_tokens=True)[0]
1156
+ return response.split(template.sep)[0].strip()
1157
+
1158
+
1159
+ def run_decode_answer(
1160
+ model: InternVLChatModel,
1161
+ tokenizer,
1162
+ projected_visual_tokens: torch.Tensor,
1163
+ question: str,
1164
+ generation_config: dict,
1165
+ decode_visual_token_importance: torch.Tensor,
1166
+ decode_prune_ratio: float,
1167
+ decode_prune_keep_count: int,
1168
+ decode_prune_selection: str,
1169
+ args,
1170
+ ) -> str:
1171
+ return run_decode_branch(
1172
+ model,
1173
+ tokenizer,
1174
+ projected_visual_tokens,
1175
+ question,
1176
+ generation_config,
1177
+ decode_visual_token_importance,
1178
+ args.large_model_prune_layer,
1179
+ decode_prune_ratio,
1180
+ decode_prune_keep_count,
1181
+ decode_prune_selection,
1182
+ args.large_model_similarity_target_coverage,
1183
+ args.large_model_similarity_min_gain,
1184
+ args.large_model_similarity_min_keep,
1185
+ args.large_model_similarity_max_keep_ratio,
1186
+ )
1187
+
1188
+
1189
+ @torch.inference_mode()
1190
+ def run_guide_two_pass_explicit_branch(
1191
+ model: InternVLChatModel,
1192
+ tokenizer,
1193
+ projected_visual_tokens: torch.Tensor,
1194
+ question: str,
1195
+ reasoning_generation_config: dict,
1196
+ answer_generation_config: dict,
1197
+ consistency_token_ratio: float,
1198
+ args,
1199
+ ) -> Tuple[str, List[torch.Tensor], torch.Tensor, torch.Tensor, str, Dict[str, object]]:
1200
+ answer_result = run_guide_generation(
1201
+ model,
1202
+ tokenizer,
1203
+ projected_visual_tokens,
1204
+ question,
1205
+ answer_generation_config,
1206
+ )
1207
+ reasoning_result = run_guide_generation(
1208
+ model,
1209
+ tokenizer,
1210
+ projected_visual_tokens,
1211
+ build_guide_reasoning_question(question),
1212
+ reasoning_generation_config,
1213
+ )
1214
+ reasoning = reasoning_result["response"]
1215
+
1216
+ reasoning_step_mask, reasoning_filter_debug = build_reasoning_attention_step_mask_and_debug(
1217
+ tokenizer,
1218
+ reasoning_result["outputs"],
1219
+ args,
1220
+ )
1221
+ reasoning_visual_token_importance = aggregate_attention_from_generation_outputs(
1222
+ reasoning_result["outputs"],
1223
+ reasoning_result["visual_token_index"],
1224
+ reasoning_step_mask,
1225
+ )
1226
+ question_visual_token_importance, answer_visual_token_importance = (
1227
+ aggregate_question_and_answer_attention_from_generation_outputs(
1228
+ answer_result["outputs"],
1229
+ answer_result["visual_token_index"],
1230
+ )
1231
+ )
1232
+ visual_token_importance = combine_question_reasoning_and_answer_attention(
1233
+ question_visual_token_importance,
1234
+ reasoning_visual_token_importance,
1235
+ answer_visual_token_importance,
1236
+ args,
1237
+ )
1238
+ if args.large_model_prune_selection in {"similarity_greedy", "similarity_cover_greedy"}:
1239
+ consistency_score = torch.tensor(1.0, device=visual_token_importance.device)
1240
+ else:
1241
+ consistency_score = compute_consistency_score(
1242
+ model,
1243
+ answer_result["input_embeds"],
1244
+ answer_result["flat_input_ids"],
1245
+ answer_result["attention_mask"],
1246
+ answer_result["outputs"]["sequences"][0],
1247
+ visual_token_importance,
1248
+ answer_result["visual_token_index"],
1249
+ consistency_token_ratio,
1250
+ args.large_model_prune_selection,
1251
+ )
1252
+ return (
1253
+ answer_result["response"],
1254
+ answer_result["outputs"].scores,
1255
+ consistency_score,
1256
+ visual_token_importance,
1257
+ reasoning,
1258
+ reasoning_filter_debug,
1259
+ )
1260
+
1261
+
1262
+ def generate_with_reasoning(
1263
+ guide_model: InternVLChatModel,
1264
+ guide_tokenizer,
1265
+ decode_model: InternVLChatModel,
1266
+ large_tokenizer,
1267
+ selection_visual_tokens: torch.Tensor,
1268
+ projected_visual_tokens: torch.Tensor,
1269
+ question: str,
1270
+ generation_config: dict,
1271
+ reasoning_generation_config: dict,
1272
+ visual_token_importance: torch.Tensor,
1273
+ args,
1274
+ ) -> Tuple[str, str]:
1275
+ reasoning_question = append_instruction(question, EXPLICIT_REASONING_INSTRUCTION)
1276
+ decode_visual_token_importance, decode_prune_ratio, decode_prune_selection, decode_prune_keep_count = resolve_decode_prune_plan(
1277
+ selection_visual_tokens,
1278
+ visual_token_importance,
1279
+ args,
1280
+ )
1281
+ reasoning = run_decode_answer(
1282
+ decode_model,
1283
+ large_tokenizer,
1284
+ projected_visual_tokens,
1285
+ reasoning_question,
1286
+ reasoning_generation_config,
1287
+ decode_visual_token_importance,
1288
+ decode_prune_ratio,
1289
+ decode_prune_keep_count,
1290
+ decode_prune_selection,
1291
+ args,
1292
+ )
1293
+ final_question = append_instruction(
1294
+ question,
1295
+ f"Reasoning:\n{reasoning}\n{DEFAULT_FINAL_ANSWER_INSTRUCTION}",
1296
+ )
1297
+ answer = run_decode_answer(
1298
+ decode_model,
1299
+ large_tokenizer,
1300
+ projected_visual_tokens,
1301
+ final_question,
1302
+ generation_config,
1303
+ decode_visual_token_importance,
1304
+ decode_prune_ratio,
1305
+ decode_prune_keep_count,
1306
+ decode_prune_selection,
1307
+ args,
1308
+ )
1309
+ return answer, reasoning
1310
+
1311
+
1312
+ def evaluate(args):
1313
+ guide_checkpoint = resolve_hf_snapshot(args.guide_checkpoint)
1314
+ large_checkpoint = resolve_hf_snapshot(args.large_checkpoint)
1315
+
1316
+ guide_tokenizer = AutoTokenizer.from_pretrained(guide_checkpoint, trust_remote_code=True, use_fast=False)
1317
+ guide_config = configure_model(guide_checkpoint, use_flash_attn=args.use_flash_attn)
1318
+ guide_model = load_model(
1319
+ guide_checkpoint,
1320
+ guide_config,
1321
+ auto=args.auto,
1322
+ load_in_8bit=args.load_in_8bit,
1323
+ load_in_4bit=args.load_in_4bit,
1324
+ )
1325
+ decode_model, large_tokenizer = build_decode_model(
1326
+ guide_model,
1327
+ large_checkpoint,
1328
+ use_flash_attn=args.use_flash_attn,
1329
+ auto=args.auto,
1330
+ load_in_8bit=args.load_in_8bit,
1331
+ load_in_4bit=args.load_in_4bit,
1332
+ )
1333
+
1334
+ guide_image_size = guide_model.config.force_image_size or guide_model.config.vision_config.image_size
1335
+ large_image_size = decode_model.config.force_image_size or decode_model.config.vision_config.image_size
1336
+ if guide_image_size != large_image_size:
1337
+ raise ValueError(f"Guide and decode image size mismatch: {guide_image_size} vs {large_image_size}")
1338
+ if guide_model.num_image_token != decode_model.num_image_token:
1339
+ raise ValueError(
1340
+ f"Guide and decode image token count mismatch: {guide_model.num_image_token} vs {decode_model.num_image_token}"
1341
+ )
1342
+
1343
+ data_root = os.path.abspath(args.data_root)
1344
+ textvqa_root = os.path.abspath(args.textvqa_root) if args.textvqa_root else os.path.join(data_root, "data", "textvqa")
1345
+ dataset = TextVQADataset(
1346
+ jsonl_path=os.path.join(textvqa_root, "textvqa_val.jsonl"),
1347
+ data_root=data_root,
1348
+ image_size=guide_image_size,
1349
+ dynamic=args.dynamic,
1350
+ use_thumbnail=guide_model.config.use_thumbnail,
1351
+ max_num=args.max_num,
1352
+ )
1353
+ question_id_to_answers = load_annotations(os.path.join(textvqa_root, "textvqa_val_annotations.json"))
1354
+ generation_config = make_generation_config(args)
1355
+ guide_attention_generation_config = make_guide_attention_generation_config(generation_config, args)
1356
+ guide_text_generation_config = None
1357
+ if args.guide_text_mode != "none":
1358
+ guide_text_generation_config = make_guide_text_generation_config(generation_config, args)
1359
+ reasoning_generation_config = None
1360
+ if args.reasoning_mode == "two_pass":
1361
+ reasoning_generation_config = make_reasoning_generation_config(generation_config, args)
1362
+
1363
+ num_items = len(dataset) if args.limit is None else min(len(dataset), args.limit)
1364
+ results = []
1365
+ filter_debug_results = []
1366
+
1367
+ for idx in range(num_items):
1368
+ sample = dataset[idx]
1369
+ question = sample["question"] + " " + BASE_PROMPT
1370
+ pixel_values = sample["pixel_values"]
1371
+ guide_attention_question = build_guide_attention_question(question, args)
1372
+
1373
+ torch.cuda.synchronize()
1374
+ start = time.time()
1375
+ raw_visual_tokens = extract_shared_raw_visual_tokens(guide_model, pixel_values)
1376
+ guide_visual_tokens = project_visual_tokens(guide_model, raw_visual_tokens)
1377
+ guide_reasoning = None
1378
+ guide_reasoning_filter_debug = {"backend": "none", "kept_tokens": [], "token_analysis": []}
1379
+ question_visual_token_importance = None
1380
+ answer_visual_token_importance = None
1381
+ if args.guide_reasoning_mode == "two_pass_explicit":
1382
+ (
1383
+ guide_answer,
1384
+ guide_scores,
1385
+ consistency_score,
1386
+ visual_token_importance,
1387
+ guide_reasoning,
1388
+ guide_reasoning_filter_debug,
1389
+ ) = (
1390
+ run_guide_two_pass_explicit_branch(
1391
+ guide_model,
1392
+ guide_tokenizer,
1393
+ guide_visual_tokens,
1394
+ question,
1395
+ guide_attention_generation_config,
1396
+ generation_config,
1397
+ args.consistency_token_ratio,
1398
+ args,
1399
+ )
1400
+ )
1401
+ else:
1402
+ (
1403
+ guide_answer,
1404
+ guide_scores,
1405
+ consistency_score,
1406
+ visual_token_importance,
1407
+ question_visual_token_importance,
1408
+ answer_visual_token_importance,
1409
+ ) = run_guide_branch(
1410
+ guide_model,
1411
+ guide_tokenizer,
1412
+ guide_visual_tokens,
1413
+ guide_attention_question,
1414
+ guide_attention_generation_config,
1415
+ args.consistency_token_ratio,
1416
+ args,
1417
+ )
1418
+ guide_text_hint = None
1419
+ if args.guide_text_mode != "none":
1420
+ if guide_text_generation_config is None:
1421
+ raise ValueError("guide_text_generation_config is required when guide_text_mode is enabled.")
1422
+ guide_text_hint = normalize_generated_text(
1423
+ run_text_generation_branch(
1424
+ guide_model,
1425
+ guide_tokenizer,
1426
+ guide_visual_tokens,
1427
+ build_guide_text_question(question),
1428
+ guide_text_generation_config,
1429
+ )
1430
+ )
1431
+ torch.cuda.synchronize()
1432
+ end = time.time()
1433
+ small_model_time = end - start
1434
+
1435
+ scores = torch.concatenate(guide_scores, dim=0)
1436
+ scores, _ = scores.softmax(dim=-1).max(dim=-1)
1437
+ original_confidence = math.pow(torch.prod(scores).item(), 1 / len(scores))
1438
+
1439
+ torch.cuda.synchronize()
1440
+ start = time.time()
1441
+ large_visual_tokens = project_visual_tokens(decode_model, raw_visual_tokens)
1442
+ (
1443
+ decode_visual_token_importance,
1444
+ decode_prune_ratio,
1445
+ decode_prune_selection,
1446
+ kept_visual_token_count,
1447
+ ) = resolve_decode_prune_plan(
1448
+ raw_visual_tokens,
1449
+ visual_token_importance,
1450
+ args,
1451
+ )
1452
+ decode_question = build_decode_question(question, guide_text_hint)
1453
+ reasoning = None
1454
+ if args.reasoning_mode == "none":
1455
+ large_answer = run_decode_answer(
1456
+ decode_model,
1457
+ large_tokenizer,
1458
+ large_visual_tokens,
1459
+ decode_question,
1460
+ generation_config,
1461
+ decode_visual_token_importance,
1462
+ decode_prune_ratio,
1463
+ kept_visual_token_count,
1464
+ decode_prune_selection,
1465
+ args,
1466
+ )
1467
+ elif args.reasoning_mode == "prompt":
1468
+ prompted_question = append_instruction(decode_question, HIDDEN_REASONING_INSTRUCTION)
1469
+ large_answer = run_decode_answer(
1470
+ decode_model,
1471
+ large_tokenizer,
1472
+ large_visual_tokens,
1473
+ prompted_question,
1474
+ generation_config,
1475
+ decode_visual_token_importance,
1476
+ decode_prune_ratio,
1477
+ kept_visual_token_count,
1478
+ decode_prune_selection,
1479
+ args,
1480
+ )
1481
+ else:
1482
+ if reasoning_generation_config is None:
1483
+ raise ValueError("reasoning_generation_config is required when reasoning_mode='two_pass'.")
1484
+ large_answer, reasoning = generate_with_reasoning(
1485
+ guide_model,
1486
+ guide_tokenizer,
1487
+ decode_model,
1488
+ large_tokenizer,
1489
+ raw_visual_tokens,
1490
+ large_visual_tokens,
1491
+ decode_question,
1492
+ generation_config,
1493
+ reasoning_generation_config,
1494
+ visual_token_importance,
1495
+ args,
1496
+ )
1497
+ torch.cuda.synchronize()
1498
+ end = time.time()
1499
+ large_model_time = end - start
1500
+
1501
+ visual_token_count = visual_token_importance.shape[0]
1502
+ result_item = {
1503
+ "question_id": sample["question_id"],
1504
+ "question": sample["question"],
1505
+ "answer": large_answer,
1506
+ "pred_answer": large_answer,
1507
+ "gt_answers": question_id_to_answers[sample["question_id"]],
1508
+ "small_answer": guide_answer,
1509
+ "guide_attention_output": guide_answer,
1510
+ "large_answer": large_answer,
1511
+ "small_model_time": small_model_time,
1512
+ "large_model_time": large_model_time,
1513
+ "original_confidence": original_confidence,
1514
+ "consistency_score": consistency_score.item(),
1515
+ "visual_token_count": visual_token_count,
1516
+ "kept_visual_token_count": kept_visual_token_count,
1517
+ }
1518
+ if args.save_visual_token_importance:
1519
+ result_item["visual_token_importance_stats"] = summarize_visual_token_importance(
1520
+ visual_token_importance,
1521
+ topk=args.visual_token_importance_topk,
1522
+ )
1523
+ if question_visual_token_importance is not None:
1524
+ result_item["question_visual_token_importance_stats"] = summarize_visual_token_importance(
1525
+ question_visual_token_importance,
1526
+ topk=args.visual_token_importance_topk,
1527
+ )
1528
+ if answer_visual_token_importance is not None:
1529
+ result_item["answer_visual_token_importance_stats"] = summarize_visual_token_importance(
1530
+ answer_visual_token_importance,
1531
+ topk=args.visual_token_importance_topk,
1532
+ )
1533
+ if guide_text_hint is not None:
1534
+ result_item["guide_text_hint"] = guide_text_hint
1535
+ if args.save_reasoning and guide_reasoning is not None:
1536
+ result_item["guide_reasoning"] = guide_reasoning
1537
+ if args.save_reasoning and reasoning is not None:
1538
+ result_item["large_reasoning"] = reasoning
1539
+ results.append(result_item)
1540
+ filter_debug_results.append(
1541
+ {
1542
+ "question_id": sample["question_id"],
1543
+ "question": sample["question"],
1544
+ "small_answer": guide_answer,
1545
+ "large_answer": large_answer,
1546
+ "guide_reasoning": guide_reasoning,
1547
+ "guide_reasoning_filter_mode": args.guide_reasoning_filter_mode,
1548
+ "guide_reasoning_filter_backend": guide_reasoning_filter_debug.get("backend", "none"),
1549
+ "kept_tokens": guide_reasoning_filter_debug.get("kept_tokens", []),
1550
+ "token_analysis": guide_reasoning_filter_debug.get("token_analysis", []),
1551
+ }
1552
+ )
1553
+ if (idx + 1) % args.log_every == 0 or idx + 1 == num_items:
1554
+ status = (
1555
+ f"[{idx + 1}/{num_items}] question_id={sample['question_id']} "
1556
+ f"small={guide_answer} large={large_answer} kept={kept_visual_token_count}/{visual_token_count}"
1557
+ )
1558
+ if guide_text_hint is not None:
1559
+ status += f" hint={guide_text_hint}"
1560
+ print(status)
1561
+ sys.stdout.flush()
1562
+
1563
+ evaluator = TextVQAAccuracyEvaluator()
1564
+ accuracy = evaluator.eval_pred_list(results)
1565
+
1566
+ os.makedirs(args.out_dir, exist_ok=True)
1567
+ run_name = args.run_name or "textvqa_shared_vision_2bguide_8btext"
1568
+ result_path = os.path.join(args.out_dir, f"{run_name}.json")
1569
+ summary_path = os.path.join(args.out_dir, f"{run_name}.summary.json")
1570
+ filter_debug_path = os.path.join(args.out_dir, f"{run_name}.filter_debug.json")
1571
+
1572
+ with open(result_path, "w") as f:
1573
+ json.dump(results, f, ensure_ascii=False, indent=2)
1574
+ with open(filter_debug_path, "w") as f:
1575
+ json.dump(filter_debug_results, f, ensure_ascii=False, indent=2)
1576
+
1577
+ avg_kept_visual_token_count = sum(item["kept_visual_token_count"] for item in results) / max(len(results), 1)
1578
+ avg_visual_token_count = sum(item["visual_token_count"] for item in results) / max(len(results), 1)
1579
+ avg_kept_visual_token_ratio = sum(
1580
+ item["kept_visual_token_count"] / max(item["visual_token_count"], 1)
1581
+ for item in results
1582
+ ) / max(len(results), 1)
1583
+
1584
+ summary = {
1585
+ "mode": "shared_vision_guided",
1586
+ "guide_checkpoint": guide_checkpoint,
1587
+ "large_checkpoint": large_checkpoint,
1588
+ "count": num_items,
1589
+ "accuracy": accuracy,
1590
+ "large_model_prune_layer": args.large_model_prune_layer,
1591
+ "large_model_prune_ratio": args.large_model_prune_ratio,
1592
+ "large_model_prune_selection": args.large_model_prune_selection,
1593
+ "large_model_similarity_target_coverage": args.large_model_similarity_target_coverage,
1594
+ "large_model_similarity_min_gain": args.large_model_similarity_min_gain,
1595
+ "large_model_similarity_min_keep": args.large_model_similarity_min_keep,
1596
+ "large_model_similarity_max_keep_ratio": args.large_model_similarity_max_keep_ratio,
1597
+ "consistency_token_ratio": args.consistency_token_ratio,
1598
+ "guide_reasoning_mode": args.guide_reasoning_mode,
1599
+ "guide_reasoning_max_new_tokens": args.guide_reasoning_max_new_tokens,
1600
+ "guide_reasoning_filter_mode": args.guide_reasoning_filter_mode,
1601
+ "guide_attention_aggregation_mode": args.guide_attention_aggregation_mode,
1602
+ "guide_attention_source": resolve_guide_attention_source(args),
1603
+ "guide_reasoning_attention_weight": args.guide_reasoning_attention_weight,
1604
+ "guide_answer_attention_weight": args.guide_answer_attention_weight,
1605
+ "guide_question_attention_weight": args.guide_question_attention_weight,
1606
+ "guide_text_mode": args.guide_text_mode,
1607
+ "guide_text_max_new_tokens": args.guide_text_max_new_tokens,
1608
+ "avg_visual_token_count": avg_visual_token_count,
1609
+ "avg_kept_visual_token_count": avg_kept_visual_token_count,
1610
+ "avg_kept_visual_token_ratio": avg_kept_visual_token_ratio,
1611
+ "avg_small_model_time": sum(item["small_model_time"] for item in results) / max(len(results), 1),
1612
+ "avg_large_model_time": sum(item["large_model_time"] for item in results) / max(len(results), 1),
1613
+ "results_file": result_path,
1614
+ "filter_debug_file": filter_debug_path,
1615
+ }
1616
+ with open(summary_path, "w") as f:
1617
+ json.dump(summary, f, ensure_ascii=False, indent=2)
1618
+
1619
+ print(f"accuracy: {accuracy:.6f}")
1620
+ print(f"avg_kept_visual_token_ratio: {avg_kept_visual_token_ratio:.6f}")
1621
+ print(f"avg_kept_visual_token_count: {avg_kept_visual_token_count:.2f}")
1622
+ print(f"results_file: {result_path}")
1623
+ print(f"summary_file: {summary_path}")
1624
+
1625
+
1626
+ def main():
1627
+ parser = argparse.ArgumentParser()
1628
+ parser.add_argument("--guide-checkpoint", type=str, required=True)
1629
+ parser.add_argument("--large-checkpoint", type=str, required=True)
1630
+ parser.add_argument("--data-root", type=str, default=str(REPO_ROOT))
1631
+ parser.add_argument("--textvqa-root", type=str, default="")
1632
+ parser.add_argument("--out-dir", type=str, default=str(REPO_ROOT / "outputs" / "shared_vision_guided"))
1633
+ parser.add_argument("--run-name", type=str, default="")
1634
+ parser.add_argument("--limit", type=int, default=None)
1635
+ parser.add_argument("--max-new-tokens", type=int, default=10)
1636
+ parser.add_argument("--num-beams", type=int, default=1)
1637
+ parser.add_argument("--temperature", type=float, default=0.0)
1638
+ parser.add_argument("--reasoning-mode", type=str, choices=["none", "prompt", "two_pass"], default="none")
1639
+ parser.add_argument("--reasoning-max-new-tokens", type=int, default=64)
1640
+ parser.add_argument("--reasoning-temperature", type=float, default=0.0)
1641
+ parser.add_argument("--save-reasoning", action="store_true")
1642
+ parser.add_argument(
1643
+ "--guide-reasoning-mode",
1644
+ type=str,
1645
+ choices=["none", "short_cot", "explicit_cot", "two_pass_explicit"],
1646
+ default="none",
1647
+ )
1648
+ parser.add_argument("--guide-reasoning-max-new-tokens", type=int, default=1024)
1649
+ parser.add_argument("--guide-reasoning-temperature", type=float, default=0.0)
1650
+ parser.add_argument(
1651
+ "--guide-reasoning-filter-mode",
1652
+ type=str,
1653
+ choices=["none", "pos_ner"],
1654
+ default="none",
1655
+ )
1656
+ parser.add_argument(
1657
+ "--guide-attention-source",
1658
+ type=str,
1659
+ choices=["default", "reasoning", "answer", "combined"],
1660
+ default="default",
1661
+ )
1662
+ parser.add_argument(
1663
+ "--guide-attention-aggregation-mode",
1664
+ type=str,
1665
+ choices=["raw", "normalized"],
1666
+ default="raw",
1667
+ )
1668
+ parser.add_argument("--guide-question-attention-weight", type=float, default=1.0)
1669
+ parser.add_argument("--guide-reasoning-attention-weight", type=float, default=1.0)
1670
+ parser.add_argument("--guide-answer-attention-weight", type=float, default=1.0)
1671
+ parser.add_argument("--guide-text-mode", type=str, choices=["none", "short_rationale"], default="none")
1672
+ parser.add_argument("--guide-text-max-new-tokens", type=int, default=12)
1673
+ parser.add_argument("--guide-text-temperature", type=float, default=0.0)
1674
+ parser.add_argument("--save-visual-token-importance", action="store_true")
1675
+ parser.add_argument("--visual-token-importance-topk", type=int, default=16)
1676
+ parser.add_argument("--dynamic", action="store_true")
1677
+ parser.add_argument("--max-num", type=int, default=6)
1678
+ parser.add_argument("--log-every", type=int, default=20)
1679
+ parser.add_argument("--seed", type=int, default=0)
1680
+ parser.add_argument("--large-model-prune-layer", type=float, default=0.0)
1681
+ parser.add_argument("--large-model-prune-ratio", type=float, default=0.4)
1682
+ parser.add_argument(
1683
+ "--large-model-prune-selection",
1684
+ type=str,
1685
+ choices=["topk", "random", "similarity_greedy", "similarity_cover_greedy"],
1686
+ default="topk",
1687
+ )
1688
+ parser.add_argument("--large-model-similarity-target-coverage", type=float, default=0.9)
1689
+ parser.add_argument("--large-model-similarity-min-gain", type=float, default=0.0)
1690
+ parser.add_argument("--large-model-similarity-min-keep", type=int, default=1)
1691
+ parser.add_argument("--large-model-similarity-max-keep-ratio", type=float, default=1.0)
1692
+ parser.add_argument("--consistency-token-ratio", type=float, default=0.05)
1693
+ parser.add_argument("--auto", action="store_true")
1694
+ parser.add_argument("--load-in-8bit", action="store_true")
1695
+ parser.add_argument("--load-in-4bit", action="store_true")
1696
+ parser.add_argument("--use-flash-attn", action="store_true")
1697
+ args = parser.parse_args()
1698
+
1699
+ if not torch.cuda.is_available():
1700
+ raise RuntimeError("CUDA is required for shared-vision guided evaluation.")
1701
+ if args.large_model_prune_ratio <= 0 or args.large_model_prune_ratio > 1:
1702
+ raise ValueError("large-model-prune-ratio must be in (0, 1].")
1703
+ if args.consistency_token_ratio <= 0 or args.consistency_token_ratio > 1:
1704
+ raise ValueError("consistency-token-ratio must be in (0, 1].")
1705
+ if args.guide_reasoning_attention_weight < 0 or args.guide_answer_attention_weight < 0:
1706
+ raise ValueError("guide reasoning/answer attention weights must be >= 0.")
1707
+ if args.guide_question_attention_weight < 0:
1708
+ raise ValueError("guide question attention weight must be >= 0.")
1709
+ if args.guide_reasoning_mode == "two_pass_explicit":
1710
+ attention_source = resolve_guide_attention_source(args)
1711
+ if attention_source == "reasoning" and args.guide_reasoning_attention_weight == 0:
1712
+ raise ValueError("guide_reasoning_attention_weight must be > 0 when guide-attention-source=reasoning.")
1713
+ if (
1714
+ attention_source == "answer"
1715
+ and args.guide_question_attention_weight == 0
1716
+ and args.guide_answer_attention_weight == 0
1717
+ ):
1718
+ raise ValueError(
1719
+ "At least one of guide_question_attention_weight or guide_answer_attention_weight "
1720
+ "must be > 0 when guide-attention-source=answer."
1721
+ )
1722
+ if (
1723
+ attention_source == "combined"
1724
+ and args.guide_question_attention_weight == 0
1725
+ and args.guide_reasoning_attention_weight == 0
1726
+ and args.guide_answer_attention_weight == 0
1727
+ ):
1728
+ raise ValueError("At least one guide attention weight must be > 0 for two_pass_explicit.")
1729
+ if (
1730
+ args.guide_reasoning_mode != "two_pass_explicit"
1731
+ and args.guide_question_attention_weight == 0
1732
+ and args.guide_answer_attention_weight == 0
1733
+ ):
1734
+ raise ValueError("At least one guide question/answer attention weight must be > 0.")
1735
+
1736
+ random.seed(args.seed)
1737
+ torch.manual_seed(args.seed)
1738
+ evaluate(args)
1739
+
1740
+
1741
+ if __name__ == "__main__":
1742
+ main()
isolated/sim_greedy/upstream_sgl/eval/vqa/textvqa_eval.py ADDED
@@ -0,0 +1,345 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Facebook, Inc. and its affiliates.
2
+ # copied from https://github.com/haotian-liu/LLaVA/blob/main/llava/eval/m4c_evaluator.py
3
+ import re
4
+
5
+ from tqdm import tqdm
6
+
7
+
8
+ class EvalAIAnswerProcessor:
9
+ """
10
+ Processes an answer similar to Eval AI
11
+ copied from
12
+ https://github.com/facebookresearch/mmf/blob/c46b3b3391275b4181567db80943473a89ab98ab/pythia/tasks/processors.py#L897
13
+ """
14
+
15
+ CONTRACTIONS = {
16
+ 'aint': "ain't",
17
+ 'arent': "aren't",
18
+ 'cant': "can't",
19
+ 'couldve': "could've",
20
+ 'couldnt': "couldn't",
21
+ "couldn'tve": "couldn't've",
22
+ "couldnt've": "couldn't've",
23
+ 'didnt': "didn't",
24
+ 'doesnt': "doesn't",
25
+ 'dont': "don't",
26
+ 'hadnt': "hadn't",
27
+ "hadnt've": "hadn't've",
28
+ "hadn'tve": "hadn't've",
29
+ 'hasnt': "hasn't",
30
+ 'havent': "haven't",
31
+ 'hed': "he'd",
32
+ "hed've": "he'd've",
33
+ "he'dve": "he'd've",
34
+ 'hes': "he's",
35
+ 'howd': "how'd",
36
+ 'howll': "how'll",
37
+ 'hows': "how's",
38
+ "Id've": "I'd've",
39
+ "I'dve": "I'd've",
40
+ 'Im': "I'm",
41
+ 'Ive': "I've",
42
+ 'isnt': "isn't",
43
+ 'itd': "it'd",
44
+ "itd've": "it'd've",
45
+ "it'dve": "it'd've",
46
+ 'itll': "it'll",
47
+ "let's": "let's",
48
+ 'maam': "ma'am",
49
+ 'mightnt': "mightn't",
50
+ "mightnt've": "mightn't've",
51
+ "mightn'tve": "mightn't've",
52
+ 'mightve': "might've",
53
+ 'mustnt': "mustn't",
54
+ 'mustve': "must've",
55
+ 'neednt': "needn't",
56
+ 'notve': "not've",
57
+ 'oclock': "o'clock",
58
+ 'oughtnt': "oughtn't",
59
+ "ow's'at": "'ow's'at",
60
+ "'ows'at": "'ow's'at",
61
+ "'ow'sat": "'ow's'at",
62
+ 'shant': "shan't",
63
+ "shed've": "she'd've",
64
+ "she'dve": "she'd've",
65
+ "she's": "she's",
66
+ 'shouldve': "should've",
67
+ 'shouldnt': "shouldn't",
68
+ "shouldnt've": "shouldn't've",
69
+ "shouldn'tve": "shouldn't've",
70
+ "somebody'd": 'somebodyd',
71
+ "somebodyd've": "somebody'd've",
72
+ "somebody'dve": "somebody'd've",
73
+ 'somebodyll': "somebody'll",
74
+ 'somebodys': "somebody's",
75
+ 'someoned': "someone'd",
76
+ "someoned've": "someone'd've",
77
+ "someone'dve": "someone'd've",
78
+ 'someonell': "someone'll",
79
+ 'someones': "someone's",
80
+ 'somethingd': "something'd",
81
+ "somethingd've": "something'd've",
82
+ "something'dve": "something'd've",
83
+ 'somethingll': "something'll",
84
+ 'thats': "that's",
85
+ 'thered': "there'd",
86
+ "thered've": "there'd've",
87
+ "there'dve": "there'd've",
88
+ 'therere': "there're",
89
+ 'theres': "there's",
90
+ 'theyd': "they'd",
91
+ "theyd've": "they'd've",
92
+ "they'dve": "they'd've",
93
+ 'theyll': "they'll",
94
+ 'theyre': "they're",
95
+ 'theyve': "they've",
96
+ 'twas': "'twas",
97
+ 'wasnt': "wasn't",
98
+ "wed've": "we'd've",
99
+ "we'dve": "we'd've",
100
+ 'weve': "we've",
101
+ 'werent': "weren't",
102
+ 'whatll': "what'll",
103
+ 'whatre': "what're",
104
+ 'whats': "what's",
105
+ 'whatve': "what've",
106
+ 'whens': "when's",
107
+ 'whered': "where'd",
108
+ 'wheres': "where's",
109
+ 'whereve': "where've",
110
+ 'whod': "who'd",
111
+ "whod've": "who'd've",
112
+ "who'dve": "who'd've",
113
+ 'wholl': "who'll",
114
+ 'whos': "who's",
115
+ 'whove': "who've",
116
+ 'whyll': "why'll",
117
+ 'whyre': "why're",
118
+ 'whys': "why's",
119
+ 'wont': "won't",
120
+ 'wouldve': "would've",
121
+ 'wouldnt': "wouldn't",
122
+ "wouldnt've": "wouldn't've",
123
+ "wouldn'tve": "wouldn't've",
124
+ 'yall': "y'all",
125
+ "yall'll": "y'all'll",
126
+ "y'allll": "y'all'll",
127
+ "yall'd've": "y'all'd've",
128
+ "y'alld've": "y'all'd've",
129
+ "y'all'dve": "y'all'd've",
130
+ 'youd': "you'd",
131
+ "youd've": "you'd've",
132
+ "you'dve": "you'd've",
133
+ 'youll': "you'll",
134
+ 'youre': "you're",
135
+ 'youve': "you've",
136
+ }
137
+
138
+ NUMBER_MAP = {
139
+ 'none': '0',
140
+ 'zero': '0',
141
+ 'one': '1',
142
+ 'two': '2',
143
+ 'three': '3',
144
+ 'four': '4',
145
+ 'five': '5',
146
+ 'six': '6',
147
+ 'seven': '7',
148
+ 'eight': '8',
149
+ 'nine': '9',
150
+ 'ten': '10',
151
+ }
152
+ ARTICLES = ['a', 'an', 'the']
153
+ PERIOD_STRIP = re.compile(r'(?!<=\d)(\.)(?!\d)')
154
+ COMMA_STRIP = re.compile(r'(?<=\d)(\,)+(?=\d)')
155
+ PUNCTUATIONS = [
156
+ ';',
157
+ r'/',
158
+ '[',
159
+ ']',
160
+ '"',
161
+ '{',
162
+ '}',
163
+ '(',
164
+ ')',
165
+ '=',
166
+ '+',
167
+ '\\',
168
+ '_',
169
+ '-',
170
+ '>',
171
+ '<',
172
+ '@',
173
+ '`',
174
+ ',',
175
+ '?',
176
+ '!',
177
+ ]
178
+
179
+ def __init__(self, *args, **kwargs):
180
+ pass
181
+
182
+ def word_tokenize(self, word):
183
+ word = word.lower()
184
+ word = word.replace(',', '').replace('?', '').replace("'s", " 's")
185
+ return word.strip()
186
+
187
+ def process_punctuation(self, in_text):
188
+ out_text = in_text
189
+ for p in self.PUNCTUATIONS:
190
+ if (p + ' ' in in_text or ' ' + p in in_text) or (
191
+ re.search(self.COMMA_STRIP, in_text) is not None
192
+ ):
193
+ out_text = out_text.replace(p, '')
194
+ else:
195
+ out_text = out_text.replace(p, ' ')
196
+ out_text = self.PERIOD_STRIP.sub('', out_text, re.UNICODE)
197
+ return out_text
198
+
199
+ def process_digit_article(self, in_text):
200
+ out_text = []
201
+ temp_text = in_text.lower().split()
202
+ for word in temp_text:
203
+ word = self.NUMBER_MAP.setdefault(word, word)
204
+ if word not in self.ARTICLES:
205
+ out_text.append(word)
206
+ else:
207
+ pass
208
+ for word_id, word in enumerate(out_text):
209
+ if word in self.CONTRACTIONS:
210
+ out_text[word_id] = self.CONTRACTIONS[word]
211
+ out_text = ' '.join(out_text)
212
+ return out_text
213
+
214
+ def __call__(self, item):
215
+ item = self.word_tokenize(item)
216
+ item = item.replace('\n', ' ').replace('\t', ' ').strip()
217
+ item = self.process_punctuation(item)
218
+ item = self.process_digit_article(item)
219
+ return item
220
+
221
+
222
+ class TextVQAAccuracyEvaluator:
223
+ def __init__(self):
224
+ self.answer_processor = EvalAIAnswerProcessor()
225
+
226
+ def _compute_answer_scores(self, raw_answers):
227
+ """
228
+ compute the accuracy (soft score) of human answers
229
+ """
230
+ answers = [self.answer_processor(a) for a in raw_answers]
231
+ assert len(answers) == 10
232
+ gt_answers = list(enumerate(answers))
233
+ unique_answers = set(answers)
234
+ unique_answer_scores = {}
235
+
236
+ for unique_answer in unique_answers:
237
+ accs = []
238
+ for gt_answer in gt_answers:
239
+ other_answers = [item for item in gt_answers if item != gt_answer]
240
+ matching_answers = [
241
+ item for item in other_answers if item[1] == unique_answer
242
+ ]
243
+ acc = min(1, float(len(matching_answers)) / 3)
244
+ accs.append(acc)
245
+ unique_answer_scores[unique_answer] = sum(accs) / len(accs)
246
+
247
+ return unique_answer_scores
248
+
249
+ def eval_pred_list(self, pred_list):
250
+ pred_scores = []
251
+ for entry in tqdm(pred_list):
252
+ pred_answer = self.answer_processor(entry['pred_answer'])
253
+ unique_answer_scores = self._compute_answer_scores(entry['gt_answers'])
254
+ score = unique_answer_scores.get(pred_answer, 0.0)
255
+ pred_scores.append(score)
256
+
257
+ accuracy = sum(pred_scores) / len(pred_scores)
258
+ return accuracy
259
+
260
+ def eval_pred_scores(self, pred_list):
261
+ pred_scores = []
262
+ for entry in tqdm(pred_list):
263
+ pred_answer = self.answer_processor(entry['pred_answer'])
264
+ unique_answer_scores = self._compute_answer_scores(entry['gt_answers'])
265
+ score = unique_answer_scores.get(pred_answer, 0.0)
266
+ pred_scores.append(score)
267
+
268
+ return pred_scores
269
+
270
+
271
+ class STVQAAccuracyEvaluator:
272
+ def __init__(self):
273
+ self.answer_processor = EvalAIAnswerProcessor()
274
+
275
+ def eval_pred_list(self, pred_list):
276
+ pred_scores = []
277
+ for entry in pred_list:
278
+ pred_answer = self.answer_processor(entry['pred_answer'])
279
+ gts = [self.answer_processor(a) for a in entry['gt_answers']]
280
+ score = 1.0 if pred_answer in gts else 0.0
281
+ pred_scores.append(score)
282
+
283
+ accuracy = sum(pred_scores) / len(pred_scores)
284
+ return accuracy
285
+
286
+
287
+ class STVQAANLSEvaluator:
288
+ def __init__(self):
289
+ import editdistance # install with `pip install editdistance`
290
+
291
+ self.get_edit_distance = editdistance.eval
292
+
293
+ def get_anls(self, s1, s2):
294
+ s1 = s1.lower().strip()
295
+ s2 = s2.lower().strip()
296
+ iou = 1 - self.get_edit_distance(s1, s2) / max(len(s1), len(s2))
297
+ anls = iou if iou >= 0.5 else 0.0
298
+ return anls
299
+
300
+ def eval_pred_list(self, pred_list):
301
+ pred_scores = []
302
+ for entry in pred_list:
303
+ anls = max(
304
+ self.get_anls(entry['pred_answer'], gt) for gt in entry['gt_answers']
305
+ )
306
+ pred_scores.append(anls)
307
+
308
+ accuracy = sum(pred_scores) / len(pred_scores)
309
+ return accuracy
310
+
311
+
312
+ class TextCapsBleu4Evaluator:
313
+ def __init__(self):
314
+ # The following script requires Java 1.8.0 and pycocotools installed.
315
+ # The pycocoevalcap can be installed with pip as
316
+ # pip install git+https://github.com/ronghanghu/coco-caption.git@python23
317
+ # Original pycocoevalcap code is at https://github.com/tylin/coco-caption
318
+ # but has no python3 support yet.
319
+ try:
320
+ from pycocoevalcap.bleu.bleu import Bleu
321
+ from pycocoevalcap.tokenizer.ptbtokenizer import PTBTokenizer
322
+ except ModuleNotFoundError:
323
+ print(
324
+ 'Please install pycocoevalcap module using '
325
+ 'pip install git+https://github.com/ronghanghu/coco-caption.git@python23' # noqa
326
+ )
327
+ raise
328
+
329
+ self.tokenizer = PTBTokenizer()
330
+ self.scorer = Bleu(4)
331
+
332
+ def eval_pred_list(self, pred_list):
333
+ # Create reference and hypotheses captions.
334
+ gts = {}
335
+ res = {}
336
+ for idx, entry in enumerate(pred_list):
337
+ gts[idx] = [{'caption': a} for a in entry['gt_answers']]
338
+ res[idx] = [{'caption': entry['pred_answer']}]
339
+
340
+ gts = self.tokenizer.tokenize(gts)
341
+ res = self.tokenizer.tokenize(res)
342
+ score, _ = self.scorer.compute_score(gts, res)
343
+
344
+ bleu4 = score[3] # score is (Bleu-1, Bleu-2, Bleu-3, Bleu-4)
345
+ return bleu4
isolated/sim_greedy/upstream_sgl/internvl/model/__init__.py ADDED
File without changes
isolated/sim_greedy/upstream_sgl/internvl/model/internlm2/configuration_internlm2.py ADDED
@@ -0,0 +1,150 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) The InternLM team and The HuggingFace Inc. team. All rights reserved.
2
+ #
3
+ # This code is based on transformers/src/transformers/models/llama/configuration_llama.py
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+ """ InternLM2 model configuration"""
17
+
18
+ from transformers.configuration_utils import PretrainedConfig
19
+ from transformers.utils import logging
20
+
21
+ logger = logging.get_logger(__name__)
22
+
23
+ INTERNLM2_PRETRAINED_CONFIG_ARCHIVE_MAP = {}
24
+
25
+
26
+ # Modified from transformers.model.llama.configuration_llama.LlamaConfig
27
+ class InternLM2Config(PretrainedConfig):
28
+ r"""
29
+ This is the configuration class to store the configuration of a [`InternLM2Model`]. It is used to instantiate
30
+ an InternLM2 model according to the specified arguments, defining the model architecture. Instantiating a
31
+ configuration with the defaults will yield a similar configuration to that of the InternLM2-7B.
32
+
33
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
34
+ documentation from [`PretrainedConfig`] for more information.
35
+
36
+
37
+ Args:
38
+ vocab_size (`int`, *optional*, defaults to 32000):
39
+ Vocabulary size of the InternLM2 model. Defines the number of different tokens that can be represented by the
40
+ `inputs_ids` passed when calling [`InternLM2Model`]
41
+ hidden_size (`int`, *optional*, defaults to 4096):
42
+ Dimension of the hidden representations.
43
+ intermediate_size (`int`, *optional*, defaults to 11008):
44
+ Dimension of the MLP representations.
45
+ num_hidden_layers (`int`, *optional*, defaults to 32):
46
+ Number of hidden layers in the Transformer encoder.
47
+ num_attention_heads (`int`, *optional*, defaults to 32):
48
+ Number of attention heads for each attention layer in the Transformer encoder.
49
+ num_key_value_heads (`int`, *optional*):
50
+ This is the number of key_value heads that should be used to implement Grouped Query Attention. If
51
+ `num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if
52
+ `num_key_value_heads=1 the model will use Multi Query Attention (MQA) otherwise GQA is used. When
53
+ converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed
54
+ by meanpooling all the original heads within that group. For more details checkout [this
55
+ paper](https://arxiv.org/pdf/2305.13245.pdf). If it is not specified, will default to
56
+ `num_attention_heads`.
57
+ hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
58
+ The non-linear activation function (function or string) in the decoder.
59
+ max_position_embeddings (`int`, *optional*, defaults to 2048):
60
+ The maximum sequence length that this model might ever be used with. Typically set this to something large
61
+ just in case (e.g., 512 or 1024 or 2048).
62
+ initializer_range (`float`, *optional*, defaults to 0.02):
63
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
64
+ rms_norm_eps (`float`, *optional*, defaults to 1e-12):
65
+ The epsilon used by the rms normalization layers.
66
+ use_cache (`bool`, *optional*, defaults to `True`):
67
+ Whether or not the model should return the last key/values attentions (not used by all models). Only
68
+ relevant if `config.is_decoder=True`.
69
+ tie_word_embeddings(`bool`, *optional*, defaults to `False`):
70
+ Whether to tie weight embeddings
71
+ Example:
72
+
73
+ """
74
+ model_type = 'internlm2'
75
+ _auto_class = 'AutoConfig'
76
+
77
+ def __init__( # pylint: disable=W0102
78
+ self,
79
+ vocab_size=103168,
80
+ hidden_size=4096,
81
+ intermediate_size=11008,
82
+ num_hidden_layers=32,
83
+ num_attention_heads=32,
84
+ num_key_value_heads=None,
85
+ hidden_act='silu',
86
+ max_position_embeddings=2048,
87
+ initializer_range=0.02,
88
+ rms_norm_eps=1e-6,
89
+ use_cache=True,
90
+ pad_token_id=0,
91
+ bos_token_id=1,
92
+ eos_token_id=2,
93
+ tie_word_embeddings=False,
94
+ bias=True,
95
+ rope_theta=10000,
96
+ rope_scaling=None,
97
+ attn_implementation='eager',
98
+ **kwargs,
99
+ ):
100
+ self.vocab_size = vocab_size
101
+ self.max_position_embeddings = max_position_embeddings
102
+ self.hidden_size = hidden_size
103
+ self.intermediate_size = intermediate_size
104
+ self.num_hidden_layers = num_hidden_layers
105
+ self.num_attention_heads = num_attention_heads
106
+ self.bias = bias
107
+
108
+ if num_key_value_heads is None:
109
+ num_key_value_heads = num_attention_heads
110
+ self.num_key_value_heads = num_key_value_heads
111
+
112
+ self.hidden_act = hidden_act
113
+ self.initializer_range = initializer_range
114
+ self.rms_norm_eps = rms_norm_eps
115
+ self.use_cache = use_cache
116
+ self.rope_theta = rope_theta
117
+ self.rope_scaling = rope_scaling
118
+ self._rope_scaling_validation()
119
+
120
+ self.attn_implementation = attn_implementation
121
+ if self.attn_implementation is None:
122
+ self.attn_implementation = 'eager'
123
+ super().__init__(
124
+ pad_token_id=pad_token_id,
125
+ bos_token_id=bos_token_id,
126
+ eos_token_id=eos_token_id,
127
+ tie_word_embeddings=tie_word_embeddings,
128
+ **kwargs,
129
+ )
130
+
131
+ def _rope_scaling_validation(self):
132
+ """
133
+ Validate the `rope_scaling` configuration.
134
+ """
135
+ if self.rope_scaling is None:
136
+ return
137
+
138
+ if not isinstance(self.rope_scaling, dict) or len(self.rope_scaling) != 2:
139
+ raise ValueError(
140
+ '`rope_scaling` must be a dictionary with with two fields, `type` and `factor`, '
141
+ f'got {self.rope_scaling}'
142
+ )
143
+ rope_scaling_type = self.rope_scaling.get('type', None)
144
+ rope_scaling_factor = self.rope_scaling.get('factor', None)
145
+ if rope_scaling_type is None or rope_scaling_type not in ['linear', 'dynamic']:
146
+ raise ValueError(
147
+ f"`rope_scaling`'s type field must be one of ['linear', 'dynamic'], got {rope_scaling_type}"
148
+ )
149
+ if rope_scaling_factor is None or not isinstance(rope_scaling_factor, float) or rope_scaling_factor < 1.0:
150
+ raise ValueError(f"`rope_scaling`'s factor field must be a float >= 1, got {rope_scaling_factor}")
isolated/sim_greedy/upstream_sgl/internvl/model/internlm2/modeling_internlm2.py ADDED
@@ -0,0 +1,1709 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) The InternLM team and The HuggingFace Inc. team. All rights reserved.
2
+ #
3
+ # This code is based on transformers/src/transformers/models/llama/modeling_llama.py
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+ """ PyTorch InternLM2 model."""
17
+ import math
18
+ import queue
19
+ import threading
20
+ import warnings
21
+ from typing import List, Optional, Tuple, Union
22
+
23
+ import torch
24
+ import torch.nn.functional as F
25
+ import torch.utils.checkpoint
26
+ from einops import rearrange
27
+ from torch import nn
28
+ from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
29
+ from transformers.activations import ACT2FN
30
+ from transformers.modeling_outputs import (BaseModelOutputWithPast,
31
+ CausalLMOutputWithPast,
32
+ SequenceClassifierOutputWithPast)
33
+ from transformers.modeling_utils import PreTrainedModel
34
+ from transformers.utils import (add_start_docstrings,
35
+ add_start_docstrings_to_model_forward, logging,
36
+ replace_return_docstrings)
37
+
38
+ from transformers import LogitsProcessorList, StoppingCriteriaList, GenerationConfig
39
+ from transformers.generation.utils import GenerateNonBeamOutput, GenerateDecoderOnlyOutput
40
+
41
+ try:
42
+ from transformers.generation.streamers import BaseStreamer
43
+ except: # noqa # pylint: disable=bare-except
44
+ BaseStreamer = None
45
+
46
+ from .configuration_internlm2 import InternLM2Config
47
+ from ..token_pruning import select_visual_token_indices
48
+
49
+ logger = logging.get_logger(__name__)
50
+
51
+ _CONFIG_FOR_DOC = 'InternLM2Config'
52
+
53
+ flash_attn_func, flash_attn_varlen_func = None, None
54
+ pad_input, index_first_axis, unpad_input = None, None, None
55
+ try:
56
+ from flash_attn import flash_attn_func as _flash_attn_func
57
+ from flash_attn import flash_attn_varlen_func as _flash_attn_varlen_func
58
+ from flash_attn.bert_padding import index_first_axis as _index_first_axis
59
+ from flash_attn.bert_padding import pad_input as _pad_input
60
+ from flash_attn.bert_padding import unpad_input as _unpad_input
61
+
62
+ flash_attn_func, flash_attn_varlen_func = _flash_attn_func, _flash_attn_varlen_func
63
+ pad_input, index_first_axis, unpad_input = _pad_input, _index_first_axis, _unpad_input
64
+ has_flash_attn = True
65
+ except:
66
+ has_flash_attn = False
67
+
68
+
69
+ def _import_flash_attn():
70
+ global flash_attn_func, flash_attn_varlen_func
71
+ global pad_input, index_first_axis, unpad_input
72
+ try:
73
+ from flash_attn import flash_attn_func as _flash_attn_func
74
+ from flash_attn import \
75
+ flash_attn_varlen_func as _flash_attn_varlen_func
76
+ from flash_attn.bert_padding import \
77
+ index_first_axis as _index_first_axis
78
+ from flash_attn.bert_padding import pad_input as _pad_input
79
+ from flash_attn.bert_padding import unpad_input as _unpad_input
80
+ flash_attn_func, flash_attn_varlen_func = _flash_attn_func, _flash_attn_varlen_func
81
+ pad_input, index_first_axis, unpad_input = _pad_input, _index_first_axis, _unpad_input
82
+ except ImportError:
83
+ raise ImportError('flash_attn is not installed.')
84
+
85
+
86
+ # Copied from transformers.models.llama.modeling_llama._get_unpad_data
87
+ def _get_unpad_data(attention_mask):
88
+ seqlens_in_batch = attention_mask.sum(dim=-1, dtype=torch.int32)
89
+ indices = torch.nonzero(attention_mask.flatten(), as_tuple=False).flatten()
90
+ max_seqlen_in_batch = seqlens_in_batch.max().item()
91
+ cu_seqlens = F.pad(torch.cumsum(seqlens_in_batch, dim=0, dtype=torch.torch.int32), (1, 0))
92
+ return (
93
+ indices,
94
+ cu_seqlens,
95
+ max_seqlen_in_batch,
96
+ )
97
+
98
+
99
+ # Copied from transformers.models.bart.modeling_bart._make_causal_mask
100
+ def _make_causal_mask(
101
+ input_ids_shape: torch.Size, dtype: torch.dtype, device: torch.device, past_key_values_length: int = 0
102
+ ):
103
+ """
104
+ Make causal mask used for bi-directional self-attention.
105
+ """
106
+ bsz, tgt_len = input_ids_shape
107
+ mask = torch.full((tgt_len, tgt_len), torch.tensor(torch.finfo(dtype).min, device=device), device=device)
108
+ mask_cond = torch.arange(mask.size(-1), device=device)
109
+ mask.masked_fill_(mask_cond < (mask_cond + 1).view(mask.size(-1), 1), 0)
110
+ mask = mask.to(dtype)
111
+
112
+ if past_key_values_length > 0:
113
+ mask = torch.cat([torch.zeros(tgt_len, past_key_values_length, dtype=dtype, device=device), mask], dim=-1)
114
+ return mask[None, None, :, :].expand(bsz, 1, tgt_len, tgt_len + past_key_values_length)
115
+
116
+
117
+ # Copied from transformers.models.bart.modeling_bart._expand_mask
118
+ def _expand_mask(mask: torch.Tensor, dtype: torch.dtype, tgt_len: Optional[int] = None):
119
+ """
120
+ Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_len, src_seq_len]`.
121
+ """
122
+ bsz, src_len = mask.size()
123
+ tgt_len = tgt_len if tgt_len is not None else src_len
124
+
125
+ expanded_mask = mask[:, None, None, :].expand(bsz, 1, tgt_len, src_len).to(dtype)
126
+
127
+ inverted_mask = 1.0 - expanded_mask
128
+
129
+ return inverted_mask.masked_fill(inverted_mask.to(torch.bool), torch.finfo(dtype).min)
130
+
131
+
132
+ # Copied from transformers.models.llama.modeling_llama.LlamaRMSNorm with Llama->InternLM2
133
+ class InternLM2RMSNorm(nn.Module):
134
+ def __init__(self, hidden_size, eps=1e-6):
135
+ """
136
+ InternLM2RMSNorm is equivalent to T5LayerNorm
137
+ """
138
+ super().__init__()
139
+ self.weight = nn.Parameter(torch.ones(hidden_size))
140
+ self.variance_epsilon = eps
141
+
142
+ def forward(self, hidden_states):
143
+ input_dtype = hidden_states.dtype
144
+ hidden_states = hidden_states.to(torch.float32)
145
+ variance = hidden_states.pow(2).mean(-1, keepdim=True)
146
+ hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
147
+ return self.weight * hidden_states.to(input_dtype)
148
+
149
+
150
+ try:
151
+ from functools import partial
152
+
153
+ from apex.normalization import FusedRMSNorm
154
+ InternLM2RMSNorm = partial(FusedRMSNorm, eps=1e-6) # noqa
155
+ print('Discovered apex.normalization.FusedRMSNorm - will use it instead of InternLM2RMSNorm')
156
+ except ImportError:
157
+ # using the normal LlamaRMSNorm
158
+ pass
159
+ except Exception:
160
+ print('discovered apex but it failed to load, falling back to InternLM2RMSNorm')
161
+ pass
162
+
163
+
164
+ # Copied from transformers.model.llama.modeling_llama.LlamaRotaryEmbedding with Llama->InternLM2
165
+ class InternLM2RotaryEmbedding(nn.Module):
166
+ def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None):
167
+ super().__init__()
168
+
169
+ self.dim = dim
170
+ self.max_position_embeddings = max_position_embeddings
171
+ self.base = base
172
+ inv_freq = 1.0 / (self.base ** (torch.arange(0, self.dim, 2).float().to(device) / self.dim))
173
+ self.register_buffer('inv_freq', inv_freq, persistent=False)
174
+
175
+ # Build here to make `torch.jit.trace` work.
176
+ self._set_cos_sin_cache(
177
+ seq_len=max_position_embeddings, device=self.inv_freq.device, dtype=torch.get_default_dtype()
178
+ )
179
+
180
+ def _set_cos_sin_cache(self, seq_len, device, dtype):
181
+ self.max_seq_len_cached = seq_len
182
+ t = torch.arange(self.max_seq_len_cached, device=device).to(dtype=self.inv_freq.dtype)
183
+
184
+ freqs = torch.einsum('i,j->ij', t, self.inv_freq)
185
+ # Different from paper, but it uses a different permutation in order to obtain the same calculation
186
+ emb = torch.cat((freqs, freqs), dim=-1)
187
+ self.register_buffer('cos_cached', emb.cos().to(dtype), persistent=False)
188
+ self.register_buffer('sin_cached', emb.sin().to(dtype), persistent=False)
189
+
190
+ def forward(self, x, seq_len=None):
191
+ # x: [bs, num_attention_heads, seq_len, head_size]
192
+ if seq_len > self.max_seq_len_cached:
193
+ self._set_cos_sin_cache(seq_len=seq_len, device=x.device, dtype=torch.float32)
194
+
195
+ return (
196
+ self.cos_cached[:seq_len].to(dtype=x.dtype),
197
+ self.sin_cached[:seq_len].to(dtype=x.dtype),
198
+ )
199
+
200
+
201
+ # Copied from transformers.model.llama.modeling_llama.LlamaLinearScalingRotaryEmbedding with Llama->InternLM2
202
+ class InternLM2LinearScalingRotaryEmbedding(InternLM2RotaryEmbedding):
203
+ """InternLM2RotaryEmbedding extended with linear scaling. Credits to the Reddit user /u/kaiokendev"""
204
+
205
+ def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None, scaling_factor=1.0):
206
+ self.scaling_factor = scaling_factor
207
+ super().__init__(dim, max_position_embeddings, base, device)
208
+
209
+ def _set_cos_sin_cache(self, seq_len, device, dtype):
210
+ self.max_seq_len_cached = seq_len
211
+ t = torch.arange(self.max_seq_len_cached, device=device).to(dtype=self.inv_freq.dtype)
212
+ t = t / self.scaling_factor
213
+
214
+ freqs = torch.einsum('i,j->ij', t, self.inv_freq)
215
+ # Different from paper, but it uses a different permutation in order to obtain the same calculation
216
+ emb = torch.cat((freqs, freqs), dim=-1)
217
+ self.register_buffer('cos_cached', emb.cos().to(dtype), persistent=False)
218
+ self.register_buffer('sin_cached', emb.sin().to(dtype), persistent=False)
219
+
220
+
221
+ # Copied from transformers.model.llama.modeling_llama.LlamaDynamicNTKScalingRotaryEmbedding with Llama->InternLM2
222
+ class InternLM2DynamicNTKScalingRotaryEmbedding(InternLM2RotaryEmbedding):
223
+ """InternLM2RotaryEmbedding extended with Dynamic NTK scaling.
224
+ Credits to the Reddit users /u/bloc97 and /u/emozilla.
225
+ """
226
+
227
+ def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None, scaling_factor=1.0):
228
+ self.scaling_factor = scaling_factor
229
+ super().__init__(dim, max_position_embeddings, base, device)
230
+
231
+ def _set_cos_sin_cache(self, seq_len, device, dtype):
232
+ self.max_seq_len_cached = seq_len
233
+
234
+ if seq_len > self.max_position_embeddings:
235
+ base = self.base * (
236
+ (self.scaling_factor * seq_len / self.max_position_embeddings) - (self.scaling_factor - 1)
237
+ ) ** (self.dim / (self.dim - 2))
238
+ inv_freq = 1.0 / (base ** (torch.arange(0, self.dim, 2).float().to(device) / self.dim))
239
+ self.register_buffer('inv_freq', inv_freq, persistent=False)
240
+
241
+ t = torch.arange(self.max_seq_len_cached, device=device).to(dtype=self.inv_freq.dtype)
242
+
243
+ freqs = torch.einsum('i,j->ij', t, self.inv_freq)
244
+ # Different from paper, but it uses a different permutation in order to obtain the same calculation
245
+ emb = torch.cat((freqs, freqs), dim=-1)
246
+ self.register_buffer('cos_cached', emb.cos().to(dtype), persistent=False)
247
+ self.register_buffer('sin_cached', emb.sin().to(dtype), persistent=False)
248
+
249
+
250
+ # Copied from transformers.model.llama.modeling_llama.rotate_half
251
+ def rotate_half(x):
252
+ """Rotates half the hidden dims of the input."""
253
+ x1 = x[..., : x.shape[-1] // 2]
254
+ x2 = x[..., x.shape[-1] // 2:]
255
+ return torch.cat((-x2, x1), dim=-1)
256
+
257
+
258
+ # Copied from transformers.model.llama.modeling_llama.apply_rotary_pos_emb
259
+ def apply_rotary_pos_emb(q, k, cos, sin, position_ids, unsqueeze_dim=1):
260
+ """Applies Rotary Position Embedding to the query and key tensors."""
261
+ cos = cos[position_ids].unsqueeze(unsqueeze_dim)
262
+ sin = sin[position_ids].unsqueeze(unsqueeze_dim)
263
+ q_embed = (q * cos) + (rotate_half(q) * sin)
264
+ k_embed = (k * cos) + (rotate_half(k) * sin)
265
+ return q_embed, k_embed
266
+
267
+
268
+ class InternLM2MLP(nn.Module):
269
+ def __init__(self, config):
270
+ super().__init__()
271
+ self.config = config
272
+ self.hidden_size = config.hidden_size
273
+ self.intermediate_size = config.intermediate_size
274
+ self.w1 = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)
275
+ self.w3 = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)
276
+ self.w2 = nn.Linear(self.intermediate_size, self.hidden_size, bias=False)
277
+ self.act_fn = ACT2FN[config.hidden_act]
278
+
279
+ def forward(self, x):
280
+ down_proj = self.w2(self.act_fn(self.w1(x)) * self.w3(x))
281
+
282
+ return down_proj
283
+
284
+
285
+ # Copied from transformers.model.llama.modeling_llama.repeat_kv
286
+ def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor:
287
+ """
288
+ This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch,
289
+ num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)
290
+ """
291
+ batch, num_key_value_heads, slen, head_dim = hidden_states.shape
292
+ if n_rep == 1:
293
+ return hidden_states
294
+ hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim)
295
+ return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim)
296
+
297
+
298
+ # Modified from transformers.model.llama.modeling_llama.LlamaAttention
299
+ class InternLM2Attention(nn.Module):
300
+ """Multi-headed attention from 'Attention Is All You Need' paper"""
301
+
302
+ def __init__(self, config: InternLM2Config):
303
+ super().__init__()
304
+ self.config = config
305
+ self.hidden_size = config.hidden_size
306
+ self.num_heads = config.num_attention_heads
307
+ self.head_dim = self.hidden_size // self.num_heads
308
+ self.num_key_value_heads = config.num_key_value_heads
309
+ self.num_key_value_groups = self.num_heads // self.num_key_value_heads
310
+ self.max_position_embeddings = config.max_position_embeddings
311
+ self.is_causal = True
312
+
313
+ if (self.head_dim * self.num_heads) != self.hidden_size:
314
+ raise ValueError(
315
+ f'hidden_size must be divisible by num_heads (got `hidden_size`: {self.hidden_size}'
316
+ f' and `num_heads`: {self.num_heads}).'
317
+ )
318
+
319
+ self.wqkv = nn.Linear(
320
+ self.hidden_size,
321
+ (self.num_heads + 2 * self.num_key_value_heads) * self.head_dim,
322
+ bias=config.bias,
323
+ )
324
+
325
+ self.wo = nn.Linear(self.num_heads * self.head_dim, self.hidden_size, bias=config.bias)
326
+ self._init_rope()
327
+
328
+ def _init_rope(self):
329
+ if self.config.rope_scaling is None:
330
+ self.rotary_emb = InternLM2RotaryEmbedding(
331
+ self.head_dim,
332
+ max_position_embeddings=self.max_position_embeddings,
333
+ base=self.config.rope_theta,
334
+ )
335
+ else:
336
+ scaling_type = self.config.rope_scaling['type']
337
+ scaling_factor = self.config.rope_scaling['factor']
338
+ if scaling_type == 'dynamic':
339
+ self.rotary_emb = InternLM2DynamicNTKScalingRotaryEmbedding(
340
+ self.head_dim,
341
+ max_position_embeddings=self.max_position_embeddings,
342
+ base=self.config.rope_theta,
343
+ scaling_factor=scaling_factor,
344
+ )
345
+ elif scaling_type == 'linear':
346
+ self.rotary_emb = InternLM2LinearScalingRotaryEmbedding(
347
+ self.head_dim,
348
+ max_position_embeddings=self.max_position_embeddings,
349
+ base=self.config.rope_theta,
350
+ scaling_factor=scaling_factor,
351
+ )
352
+ else:
353
+ raise ValueError("Currently we only support rotary embedding's type being 'dynamic' or 'linear'.")
354
+ return self.rotary_emb
355
+
356
+ def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int):
357
+ return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous()
358
+
359
+ def forward(
360
+ self,
361
+ hidden_states: torch.Tensor,
362
+ attention_mask: Optional[torch.Tensor] = None,
363
+ position_ids: Optional[torch.LongTensor] = None,
364
+ past_key_value: Optional[Tuple[torch.Tensor]] = None,
365
+ output_attentions: bool = False,
366
+ use_cache: bool = False,
367
+ **kwargs,
368
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
369
+ if 'padding_mask' in kwargs:
370
+ warnings.warn(
371
+ 'Passing `padding_mask` is deprecated and will be removed in v4.37. '
372
+ 'Please make sure use `attention_mask` instead.`'
373
+ )
374
+
375
+ bsz, q_len, _ = hidden_states.size()
376
+
377
+ qkv_states = self.wqkv(hidden_states)
378
+
379
+ qkv_states = rearrange(
380
+ qkv_states,
381
+ 'b q (h gs d) -> b q h gs d',
382
+ gs=2 + self.num_key_value_groups,
383
+ d=self.head_dim,
384
+ )
385
+
386
+ query_states = qkv_states[..., : self.num_key_value_groups, :]
387
+ query_states = rearrange(query_states, 'b q h gs d -> b q (h gs) d')
388
+ key_states = qkv_states[..., -2, :]
389
+ value_states = qkv_states[..., -1, :]
390
+
391
+ query_states = query_states.transpose(1, 2)
392
+ key_states = key_states.transpose(1, 2)
393
+ value_states = value_states.transpose(1, 2)
394
+
395
+ kv_seq_len = key_states.shape[-2]
396
+ if past_key_value is not None:
397
+ kv_seq_len += past_key_value[0].shape[-2]
398
+
399
+ prunded_sequence_length = kwargs["prunded_sequence_length"]
400
+ cos, sin = self.rotary_emb(value_states, seq_len=kv_seq_len + prunded_sequence_length)
401
+ query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin, position_ids)
402
+
403
+ if past_key_value is not None:
404
+ # reuse k, v, self_attention
405
+ key_states = torch.cat([past_key_value[0], key_states], dim=2)
406
+ value_states = torch.cat([past_key_value[1], value_states], dim=2)
407
+
408
+ past_key_value = (key_states, value_states) if use_cache else None
409
+
410
+ key_states = repeat_kv(key_states, self.num_key_value_groups)
411
+ value_states = repeat_kv(value_states, self.num_key_value_groups)
412
+
413
+ attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) / math.sqrt(self.head_dim)
414
+
415
+ if attn_weights.size() != (bsz, self.num_heads, q_len, kv_seq_len):
416
+ raise ValueError(
417
+ f'Attention weights should be of size {(bsz, self.num_heads, q_len, kv_seq_len)}, but is'
418
+ f' {attn_weights.size()}'
419
+ )
420
+
421
+ if attention_mask is not None:
422
+ if attention_mask.size(-1) < kv_seq_len:
423
+ pad_width = kv_seq_len - attention_mask.size(-1)
424
+ attention_mask = F.pad(attention_mask, (0, pad_width), value=0)
425
+ elif attention_mask.size(-1) > kv_seq_len:
426
+ attention_mask = attention_mask[:, :, :, :kv_seq_len]
427
+ if attention_mask.size() != (bsz, 1, q_len, kv_seq_len):
428
+ raise ValueError(
429
+ f'Attention mask should be of size {(bsz, 1, q_len, kv_seq_len)}, but is {attention_mask.size()}'
430
+ )
431
+ attn_weights = attn_weights + attention_mask
432
+
433
+ # upcast attention to fp32
434
+ attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query_states.dtype)
435
+ attn_output = torch.matmul(attn_weights, value_states)
436
+
437
+ if attn_output.size() != (bsz, self.num_heads, q_len, self.head_dim):
438
+ raise ValueError(
439
+ f'`attn_output` should be of size {(bsz, self.num_heads, q_len, self.head_dim)}, but is'
440
+ f' {attn_output.size()}'
441
+ )
442
+
443
+ attn_output = attn_output.transpose(1, 2).contiguous()
444
+ attn_output = attn_output.reshape(bsz, q_len, self.hidden_size)
445
+
446
+ attn_output = self.wo(attn_output)
447
+
448
+ if not output_attentions:
449
+ attn_weights = None
450
+
451
+ return attn_output, attn_weights, past_key_value
452
+
453
+
454
+ # Modified from transformers.model.llama.modeling_llama.InternLM2FlashAttention2
455
+ class InternLM2FlashAttention2(InternLM2Attention):
456
+ """
457
+ InternLM2 flash attention module. This module inherits from `InternLM2Attention` as the weights of the module stays
458
+ untouched. The only required change would be on the forward pass where it needs to correctly call the public API of
459
+ flash attention and deal with padding tokens in case the input contains any of them.
460
+ """
461
+
462
+ def forward(
463
+ self,
464
+ hidden_states: torch.Tensor,
465
+ attention_mask: Optional[torch.LongTensor] = None,
466
+ position_ids: Optional[torch.LongTensor] = None,
467
+ past_key_value: Optional[Tuple[torch.Tensor]] = None,
468
+ output_attentions: bool = False,
469
+ use_cache: bool = False,
470
+ **kwargs,
471
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
472
+
473
+
474
+ # InternLM2FlashAttention2 attention does not support output_attentions
475
+ if 'padding_mask' in kwargs:
476
+ warnings.warn(
477
+ 'Passing `padding_mask` is deprecated and will be removed in v4.37. '
478
+ 'Please make sure use `attention_mask` instead.`'
479
+ )
480
+
481
+ # overwrite attention_mask with padding_mask
482
+ attention_mask = kwargs.pop('padding_mask')
483
+
484
+ output_attentions = False
485
+
486
+ bsz, q_len, _ = hidden_states.size()
487
+
488
+ qkv_states = self.wqkv(hidden_states)
489
+
490
+ qkv_states = rearrange(
491
+ qkv_states,
492
+ 'b q (h gs d) -> b q h gs d',
493
+ gs=2 + self.num_key_value_groups,
494
+ d=self.head_dim,
495
+ )
496
+
497
+ query_states = qkv_states[..., : self.num_key_value_groups, :]
498
+ query_states = rearrange(query_states, 'b q h gs d -> b q (h gs) d')
499
+ key_states = qkv_states[..., -2, :]
500
+ value_states = qkv_states[..., -1, :]
501
+
502
+ query_states = query_states.transpose(1, 2)
503
+ key_states = key_states.transpose(1, 2)
504
+ value_states = value_states.transpose(1, 2)
505
+
506
+ kv_seq_len = key_states.shape[-2]
507
+ if past_key_value is not None:
508
+ kv_seq_len += past_key_value[0].shape[-2]
509
+
510
+ prunded_sequence_length = kwargs["prunded_sequence_length"]
511
+ cos, sin = self.rotary_emb(value_states, seq_len=kv_seq_len + prunded_sequence_length)
512
+
513
+ query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin, position_ids)
514
+
515
+ if past_key_value is not None:
516
+ # reuse k, v, self_attention
517
+ key_states = torch.cat([past_key_value[0], key_states], dim=2)
518
+ value_states = torch.cat([past_key_value[1], value_states], dim=2)
519
+
520
+ past_key_value = (key_states, value_states) if use_cache else None
521
+
522
+ query_states = query_states.transpose(1, 2)
523
+ key_states = key_states.transpose(1, 2)
524
+ value_states = value_states.transpose(1, 2)
525
+
526
+ attn_output = self._flash_attention_forward(
527
+ query_states, key_states, value_states, attention_mask, q_len
528
+ )
529
+ attn_output = attn_output.reshape(bsz, q_len, self.hidden_size).contiguous()
530
+ attn_output = self.wo(attn_output)
531
+
532
+ if not output_attentions:
533
+ attn_weights = None
534
+
535
+ return attn_output, attn_weights, past_key_value
536
+
537
+ def _flash_attention_forward(
538
+ self, query_states, key_states, value_states, attention_mask, query_length, dropout=0.0, softmax_scale=None
539
+ ):
540
+ """
541
+ Calls the forward method of Flash Attention - if the input hidden states contain at least one padding token
542
+ first unpad the input, then computes the attention scores and pad the final attention scores.
543
+
544
+ Args:
545
+ query_states (`torch.Tensor`):
546
+ Input query states to be passed to Flash Attention API
547
+ key_states (`torch.Tensor`):
548
+ Input key states to be passed to Flash Attention API
549
+ value_states (`torch.Tensor`):
550
+ Input value states to be passed to Flash Attention API
551
+ attention_mask (`torch.Tensor`):
552
+ The padding mask - corresponds to a tensor of size `(batch_size, seq_len)` where 0 stands for the
553
+ position of padding tokens and 1 for the position of non-padding tokens.
554
+ dropout (`int`, *optional*):
555
+ Attention dropout
556
+ softmax_scale (`float`, *optional*):
557
+ The scaling of QK^T before applying softmax. Default to 1 / sqrt(head_dim)
558
+ """
559
+ # Contains at least one padding token in the sequence
560
+ causal = self.is_causal and query_length != 1
561
+ if attention_mask is not None:
562
+ batch_size = query_states.shape[0]
563
+ query_states, key_states, value_states, indices_q, cu_seq_lens, max_seq_lens = self._unpad_input(
564
+ query_states, key_states, value_states, attention_mask, query_length
565
+ )
566
+
567
+ cu_seqlens_q, cu_seqlens_k = cu_seq_lens
568
+ max_seqlen_in_batch_q, max_seqlen_in_batch_k = max_seq_lens
569
+
570
+ attn_output_unpad = flash_attn_varlen_func(
571
+ query_states,
572
+ key_states,
573
+ value_states,
574
+ cu_seqlens_q=cu_seqlens_q,
575
+ cu_seqlens_k=cu_seqlens_k,
576
+ max_seqlen_q=max_seqlen_in_batch_q,
577
+ max_seqlen_k=max_seqlen_in_batch_k,
578
+ dropout_p=dropout,
579
+ softmax_scale=softmax_scale,
580
+ causal=causal,
581
+ )
582
+
583
+ attn_output = pad_input(attn_output_unpad, indices_q, batch_size, query_length)
584
+ else:
585
+ attn_output = flash_attn_func(
586
+ query_states, key_states, value_states, dropout, softmax_scale=softmax_scale, causal=causal
587
+ )
588
+
589
+ return attn_output
590
+
591
+ def _unpad_input(self, query_layer, key_layer, value_layer, attention_mask, query_length):
592
+ indices_k, cu_seqlens_k, max_seqlen_in_batch_k = _get_unpad_data(attention_mask)
593
+ batch_size, kv_seq_len, num_key_value_heads, head_dim = key_layer.shape
594
+
595
+ key_layer = index_first_axis(
596
+ key_layer.reshape(batch_size * kv_seq_len, num_key_value_heads, head_dim), indices_k
597
+ )
598
+ value_layer = index_first_axis(
599
+ value_layer.reshape(batch_size * kv_seq_len, num_key_value_heads, head_dim), indices_k
600
+ )
601
+
602
+ if query_length == kv_seq_len:
603
+ query_layer = index_first_axis(
604
+ query_layer.reshape(batch_size * kv_seq_len, self.num_heads, head_dim), indices_k
605
+ )
606
+ cu_seqlens_q = cu_seqlens_k
607
+ max_seqlen_in_batch_q = max_seqlen_in_batch_k
608
+ indices_q = indices_k
609
+ elif query_length == 1:
610
+ max_seqlen_in_batch_q = 1
611
+ cu_seqlens_q = torch.arange(
612
+ batch_size + 1, dtype=torch.int32, device=query_layer.device
613
+ ) # There is a memcpy here, that is very bad.
614
+ indices_q = cu_seqlens_q[:-1]
615
+ query_layer = query_layer.squeeze(1)
616
+ else:
617
+ # The -q_len: slice assumes left padding.
618
+ attention_mask = attention_mask[:, -query_length:]
619
+ query_layer, indices_q, cu_seqlens_q, max_seqlen_in_batch_q = unpad_input(query_layer, attention_mask)
620
+
621
+ return (
622
+ query_layer,
623
+ key_layer,
624
+ value_layer,
625
+ indices_q.to(torch.int64),
626
+ (cu_seqlens_q, cu_seqlens_k),
627
+ (max_seqlen_in_batch_q, max_seqlen_in_batch_k),
628
+ )
629
+
630
+
631
+ INTERNLM2_ATTENTION_CLASSES = {
632
+ 'eager': InternLM2Attention,
633
+ 'flash_attention_2': InternLM2FlashAttention2,
634
+ }
635
+
636
+
637
+ # Modified from transformers.model.llama.modeling_llama.LlamaDecoderLayer
638
+ class InternLM2DecoderLayer(nn.Module):
639
+ def __init__(self, config: InternLM2Config):
640
+ super().__init__()
641
+ self.hidden_size = config.hidden_size
642
+
643
+ self.attention = INTERNLM2_ATTENTION_CLASSES[config.attn_implementation](config=config)
644
+
645
+ self.feed_forward = InternLM2MLP(config)
646
+ self.attention_norm = InternLM2RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
647
+ self.ffn_norm = InternLM2RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
648
+
649
+ def forward(
650
+ self,
651
+ hidden_states: torch.Tensor,
652
+ attention_mask: Optional[torch.Tensor] = None,
653
+ position_ids: Optional[torch.LongTensor] = None,
654
+ past_key_value: Optional[Tuple[torch.Tensor]] = None,
655
+ output_attentions: Optional[bool] = False,
656
+ use_cache: Optional[bool] = False,
657
+ **kwargs,
658
+ ) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]:
659
+ """
660
+ Args:
661
+ hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
662
+ attention_mask (`torch.FloatTensor`, *optional*):
663
+ attention mask of size `(batch_size, sequence_length)` if flash attention is used or `(batch_size, 1,
664
+ query_sequence_length, key_sequence_length)` if default attention is used.
665
+ output_attentions (`bool`, *optional*):
666
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
667
+ returned tensors for more detail.
668
+ use_cache (`bool`, *optional*):
669
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding
670
+ (see `past_key_values`).
671
+ past_key_value (`Tuple(torch.FloatTensor)`, *optional*): cached past key and value projection states
672
+ """
673
+ if 'padding_mask' in kwargs:
674
+ warnings.warn(
675
+ 'Passing `padding_mask` is deprecated and will be removed in v4.37. '
676
+ 'Please make sure use `attention_mask` instead.`'
677
+ )
678
+
679
+ residual = hidden_states
680
+
681
+ hidden_states = self.attention_norm(hidden_states)
682
+
683
+ # Self Attention
684
+ hidden_states, self_attn_weights, present_key_value = self.attention(
685
+ hidden_states=hidden_states,
686
+ attention_mask=attention_mask,
687
+ position_ids=position_ids,
688
+ past_key_value=past_key_value,
689
+ output_attentions=output_attentions,
690
+ use_cache=use_cache,
691
+ **kwargs,
692
+ )
693
+ hidden_states = residual + hidden_states
694
+
695
+ # Fully Connected
696
+ residual = hidden_states
697
+ hidden_states = self.ffn_norm(hidden_states)
698
+ hidden_states = self.feed_forward(hidden_states)
699
+ hidden_states = residual + hidden_states
700
+
701
+ outputs = (hidden_states,)
702
+
703
+ if output_attentions:
704
+ outputs += (self_attn_weights,)
705
+
706
+ if use_cache:
707
+ outputs += (present_key_value,)
708
+
709
+ return outputs
710
+
711
+
712
+ InternLM2_START_DOCSTRING = r"""
713
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
714
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
715
+ etc.)
716
+
717
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
718
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
719
+ and behavior.
720
+
721
+ Parameters:
722
+ config ([`InternLM2Config`]):
723
+ Model configuration class with all the parameters of the model. Initializing with a config file does not
724
+ load the weights associated with the model, only the configuration. Check out the
725
+ [`~PreTrainedModel.from_pretrained`] method to load the model weights.
726
+ """
727
+
728
+
729
+ # Copied from transformers.models.llama.modeling_llama.LlamaPreTrainedModel with Llama->InternLM2
730
+ @add_start_docstrings(
731
+ 'The bare InternLM2 Model outputting raw hidden-states without any specific head on top.',
732
+ InternLM2_START_DOCSTRING,
733
+ )
734
+ class InternLM2PreTrainedModel(PreTrainedModel):
735
+ config_class = InternLM2Config
736
+ base_model_prefix = 'model'
737
+ supports_gradient_checkpointing = True
738
+ _no_split_modules = ['InternLM2DecoderLayer']
739
+ _skip_keys_device_placement = 'past_key_values'
740
+ _supports_flash_attn_2 = True
741
+
742
+ def _init_weights(self, module):
743
+ std = self.config.initializer_range
744
+ if isinstance(module, nn.Linear):
745
+ module.weight.data.normal_(mean=0.0, std=std)
746
+ if module.bias is not None:
747
+ module.bias.data.zero_()
748
+ elif isinstance(module, nn.Embedding):
749
+ module.weight.data.normal_(mean=0.0, std=std)
750
+ if module.padding_idx is not None:
751
+ module.weight.data[module.padding_idx].zero_()
752
+
753
+
754
+ InternLM2_INPUTS_DOCSTRING = r"""
755
+ Args:
756
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
757
+ Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
758
+ it.
759
+
760
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
761
+ [`PreTrainedTokenizer.__call__`] for details.
762
+
763
+ [What are input IDs?](../glossary#input-ids)
764
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
765
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
766
+
767
+ - 1 for tokens that are **not masked**,
768
+ - 0 for tokens that are **masked**.
769
+
770
+ [What are attention masks?](../glossary#attention-mask)
771
+
772
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
773
+ [`PreTrainedTokenizer.__call__`] for details.
774
+
775
+ If `past_key_values` is used, optionally only the last `input_ids` have to be input (see
776
+ `past_key_values`).
777
+
778
+ If you want to change padding behavior, you should read [`modeling_opt._prepare_decoder_attention_mask`]
779
+ and modify to your needs. See diagram 1 in [the paper](https://arxiv.org/abs/1910.13461) for more
780
+ information on the default strategy.
781
+
782
+ - 1 indicates the head is **not masked**,
783
+ - 0 indicates the head is **masked**.
784
+ position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
785
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
786
+ config.n_positions - 1]`.
787
+
788
+ [What are position IDs?](../glossary#position-ids)
789
+ past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or
790
+ when `config.use_cache=True`):
791
+ Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape
792
+ `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape
793
+ `(batch_size, num_heads, decoder_sequence_length, embed_size_per_head)`.
794
+
795
+ Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention
796
+ blocks) that can be used (see `past_key_values` input) to speed up sequential decoding.
797
+
798
+ If `past_key_values` are used, the user can optionally input only the last `input_ids` (those that don't
799
+ have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `input_ids`
800
+ of shape `(batch_size, sequence_length)`.
801
+ inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
802
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
803
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
804
+ model's internal embedding lookup matrix.
805
+ use_cache (`bool`, *optional*):
806
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
807
+ `past_key_values`).
808
+ output_attentions (`bool`, *optional*):
809
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
810
+ tensors for more detail.
811
+ output_hidden_states (`bool`, *optional*):
812
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
813
+ more detail.
814
+ return_dict (`bool`, *optional*):
815
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
816
+ """
817
+
818
+
819
+ # Modified from transformers.model.llama.modeling_llama.LlamaModel
820
+ @add_start_docstrings(
821
+ 'The bare InternLM2 Model outputting raw hidden-states without any specific head on top.',
822
+ InternLM2_START_DOCSTRING,
823
+ )
824
+ class InternLM2Model(InternLM2PreTrainedModel):
825
+ """
826
+ Transformer decoder consisting of *config.num_hidden_layers* layers. Each layer is a [`InternLM2DecoderLayer`]
827
+
828
+ Args:
829
+ config: InternLM2Config
830
+ """
831
+
832
+ _auto_class = 'AutoModel'
833
+
834
+ def __init__(self, config: InternLM2Config):
835
+ super().__init__(config)
836
+ self.padding_idx = config.pad_token_id
837
+ self.vocab_size = config.vocab_size
838
+ self.config = config
839
+ if not has_flash_attn:
840
+ self.config.attn_implementation = 'eager'
841
+ print('Warning: Flash attention is not available, using eager attention instead.')
842
+
843
+ self.tok_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx)
844
+
845
+ self.layers = nn.ModuleList([InternLM2DecoderLayer(config) for _ in range(config.num_hidden_layers)])
846
+ self.norm = InternLM2RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
847
+
848
+ self.gradient_checkpointing = False
849
+ # Initialize weights and apply final processing
850
+ self.post_init()
851
+
852
+ def get_input_embeddings(self):
853
+ return self.tok_embeddings
854
+
855
+ def set_input_embeddings(self, value):
856
+ self.tok_embeddings = value
857
+
858
+ def _prepare_decoder_attention_mask(self, attention_mask, input_shape, inputs_embeds, past_key_values_length):
859
+ # create causal mask
860
+ # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
861
+ combined_attention_mask = None
862
+ if input_shape[-1] > 1:
863
+ combined_attention_mask = _make_causal_mask(
864
+ input_shape,
865
+ inputs_embeds.dtype,
866
+ device=inputs_embeds.device,
867
+ past_key_values_length=past_key_values_length,
868
+ )
869
+
870
+ if attention_mask is not None:
871
+ # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
872
+ expanded_attn_mask = _expand_mask(attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1]).to(
873
+ inputs_embeds.device
874
+ )
875
+ combined_attention_mask = (
876
+ expanded_attn_mask if combined_attention_mask is None else expanded_attn_mask + combined_attention_mask
877
+ )
878
+
879
+ return combined_attention_mask
880
+
881
+ @add_start_docstrings_to_model_forward(InternLM2_INPUTS_DOCSTRING)
882
+ def forward(
883
+ self,
884
+ input_ids: torch.LongTensor = None,
885
+ attention_mask: Optional[torch.Tensor] = None,
886
+ position_ids: Optional[torch.LongTensor] = None,
887
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
888
+ inputs_embeds: Optional[torch.FloatTensor] = None,
889
+ use_cache: Optional[bool] = None,
890
+ output_attentions: Optional[bool] = None,
891
+ output_hidden_states: Optional[bool] = None,
892
+ return_dict: Optional[bool] = None,
893
+ visual_token_index: Optional[torch.Tensor] = None,
894
+ large_model_prune_layer: Optional[float] = None,
895
+ large_model_prune_ratio: Optional[float] = None,
896
+ large_model_prune_keep_count: Optional[int] = None,
897
+ large_model_prune_selection: Optional[str] = None,
898
+ large_model_similarity_target_coverage: Optional[float] = None,
899
+ large_model_similarity_min_gain: Optional[float] = None,
900
+ large_model_similarity_min_keep: Optional[int] = None,
901
+ large_model_similarity_max_keep_ratio: Optional[float] = None,
902
+ visual_token_importance: Optional[torch.Tensor] = None,
903
+ ) -> Union[Tuple, BaseModelOutputWithPast]:
904
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
905
+ output_hidden_states = (
906
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
907
+ )
908
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
909
+
910
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
911
+
912
+ if self.config.attn_implementation == 'flash_attention_2':
913
+ _import_flash_attn()
914
+
915
+ # retrieve input_ids and inputs_embeds
916
+ if input_ids is not None and inputs_embeds is not None:
917
+ raise ValueError('You cannot specify both input_ids and inputs_embeds at the same time')
918
+ elif input_ids is not None:
919
+ batch_size, seq_length = input_ids.shape[:2]
920
+ elif inputs_embeds is not None:
921
+ batch_size, seq_length = inputs_embeds.shape[:2]
922
+ else:
923
+ raise ValueError('You have to specify either input_ids or inputs_embeds')
924
+
925
+ seq_length_with_past = seq_length
926
+ past_key_values_length = 0
927
+ if past_key_values is not None:
928
+ past_key_values_length = past_key_values[0][0].shape[2]
929
+ seq_length_with_past = seq_length_with_past + past_key_values_length
930
+
931
+ if position_ids is None:
932
+ device = input_ids.device if input_ids is not None else inputs_embeds.device
933
+ position_ids = torch.arange(
934
+ past_key_values_length, seq_length + past_key_values_length, dtype=torch.long, device=device
935
+ )
936
+ position_ids = position_ids.unsqueeze(0)
937
+
938
+ if inputs_embeds is None:
939
+ inputs_embeds = self.tok_embeddings(input_ids)
940
+
941
+ if self.config.attn_implementation == 'flash_attention_2':
942
+ # 2d mask is passed through the layers
943
+ attention_mask = attention_mask if (attention_mask is not None and 0 in attention_mask) else None
944
+ else:
945
+ if attention_mask is None:
946
+ attention_mask = torch.ones(
947
+ (batch_size, seq_length_with_past), dtype=torch.bool, device=inputs_embeds.device
948
+ )
949
+ attention_mask = self._prepare_decoder_attention_mask(
950
+ attention_mask, (batch_size, seq_length), inputs_embeds, past_key_values_length
951
+ )
952
+
953
+ # embed positions
954
+ hidden_states = inputs_embeds
955
+
956
+ if self.gradient_checkpointing and self.training:
957
+ if use_cache:
958
+ logger.warning_once(
959
+ '`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`...'
960
+ )
961
+ use_cache = False
962
+
963
+ # decoder layers
964
+ all_hidden_states = () if output_hidden_states else None
965
+ all_self_attns = () if output_attentions else None
966
+ next_decoder_cache = () if use_cache else None
967
+
968
+ if large_model_prune_layer is not None:
969
+ token_prune = True
970
+ K = int(len(self.layers) * large_model_prune_layer)
971
+ keep_ratio = large_model_prune_ratio
972
+ visual_token_length = int(visual_token_index[1] - visual_token_index[0] + 1)
973
+ else:
974
+ token_prune = False
975
+
976
+
977
+ aggregated_viusal_token_attention = 0 if output_attentions else None
978
+ prunded_sequence_length = 0
979
+ for idx, decoder_layer in enumerate(self.layers):
980
+ if output_hidden_states:
981
+ all_hidden_states += (hidden_states,)
982
+
983
+ past_key_value = past_key_values[idx] if past_key_values is not None else None
984
+
985
+ if self.gradient_checkpointing and self.training:
986
+
987
+ def create_custom_forward(module):
988
+ def custom_forward(*inputs):
989
+ # None for past_key_value
990
+ return module(*inputs, output_attentions, None)
991
+
992
+ return custom_forward
993
+
994
+ layer_outputs = torch.utils.checkpoint.checkpoint(
995
+ create_custom_forward(decoder_layer),
996
+ hidden_states,
997
+ attention_mask,
998
+ position_ids,
999
+ None,
1000
+ )
1001
+ else:
1002
+
1003
+ ##### 某一层 random pruning #########
1004
+ if token_prune:
1005
+ if hidden_states.shape[1] != 1:
1006
+ if idx == K:
1007
+ device = hidden_states.device
1008
+ selected_visual_index = select_visual_token_indices(
1009
+ hidden_states,
1010
+ visual_token_importance,
1011
+ visual_token_index,
1012
+ keep_ratio,
1013
+ large_model_prune_selection or "topk",
1014
+ similarity_target_coverage=large_model_similarity_target_coverage or 0.9,
1015
+ similarity_min_gain=large_model_similarity_min_gain or 0.0,
1016
+ similarity_min_keep=large_model_similarity_min_keep or 1,
1017
+ similarity_max_keep_ratio=large_model_similarity_max_keep_ratio or 1.0,
1018
+ ) + int(visual_token_index[0])
1019
+ keep_indexs = torch.cat((
1020
+ torch.arange(int(visual_token_index[0]), device=device),
1021
+ selected_visual_index.to(device),
1022
+ torch.arange(int(visual_token_index[1] + 1), seq_length, device=device),
1023
+ ))
1024
+ keep_indexs = keep_indexs.sort().values
1025
+ hidden_states = hidden_states[:, keep_indexs,:]
1026
+ if attention_mask is not None:
1027
+ attention_mask = attention_mask[:,:,:hidden_states.shape[1], :hidden_states.shape[1]]
1028
+ position_ids = keep_indexs.unsqueeze(0)
1029
+ prunded_sequence_length = visual_token_length - (
1030
+ large_model_prune_keep_count if large_model_prune_keep_count is not None else selected_visual_index.numel()
1031
+ )
1032
+
1033
+
1034
+ else:
1035
+ if idx == K:
1036
+ visual_token_length = visual_token_index[1] - visual_token_index[0] + 1
1037
+ kept_count = (
1038
+ large_model_prune_keep_count
1039
+ if large_model_prune_keep_count is not None
1040
+ else int(visual_token_length * keep_ratio)
1041
+ )
1042
+ prunded_sequence_length = visual_token_length - kept_count
1043
+ if attention_mask is not None:
1044
+ attention_mask = attention_mask[:, :, :, prunded_sequence_length:]
1045
+
1046
+
1047
+
1048
+
1049
+ layer_outputs = decoder_layer(
1050
+ hidden_states,
1051
+ attention_mask=attention_mask,
1052
+ position_ids=position_ids,
1053
+ past_key_value=past_key_value,
1054
+ output_attentions=output_attentions,
1055
+ use_cache=use_cache,
1056
+ prunded_sequence_length=prunded_sequence_length
1057
+ )
1058
+
1059
+ hidden_states = layer_outputs[0]
1060
+
1061
+ if use_cache:
1062
+ next_decoder_cache += (layer_outputs[2 if output_attentions else 1],)
1063
+
1064
+ if output_attentions:
1065
+ # all_self_attns += (layer_outputs[1],)
1066
+ if layer_outputs[1].shape[2] != 1:
1067
+ aggregated_viusal_token_attention = aggregated_viusal_token_attention + layer_outputs[1][:, :, visual_token_index[1]:, visual_token_index[0]:visual_token_index[1]+1].sum(dim=(0, 1, 2))
1068
+ else:
1069
+ aggregated_viusal_token_attention = aggregated_viusal_token_attention + layer_outputs[1][:, :, :, visual_token_index[0]:visual_token_index[1]+1].sum(dim=(0, 1, 2))
1070
+
1071
+ hidden_states = self.norm(hidden_states)
1072
+
1073
+ # add hidden states from the last decoder layer
1074
+ if output_hidden_states:
1075
+ all_hidden_states += (hidden_states,)
1076
+
1077
+ next_cache = next_decoder_cache if use_cache else None
1078
+ if not return_dict:
1079
+ return tuple(v for v in [hidden_states, next_cache, all_hidden_states, all_self_attns] if v is not None)
1080
+
1081
+
1082
+ out_dict = BaseModelOutputWithPast(
1083
+ last_hidden_state=hidden_states,
1084
+ past_key_values=next_cache,
1085
+ hidden_states=all_hidden_states,
1086
+ attentions=all_self_attns,
1087
+ )
1088
+ out_dict.aggregated_viusal_token_attention = aggregated_viusal_token_attention
1089
+
1090
+ return out_dict
1091
+
1092
+
1093
+ # Modified from transformers.model.llama.modeling_llama.LlamaForCausalLM
1094
+ class InternLM2ForCausalLM(InternLM2PreTrainedModel):
1095
+ _auto_class = 'AutoModelForCausalLM'
1096
+
1097
+ _tied_weights_keys = ['output.weight']
1098
+
1099
+ def __init__(self, config):
1100
+ super().__init__(config)
1101
+ self.model = InternLM2Model(config)
1102
+ self.vocab_size = config.vocab_size
1103
+ self.output = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
1104
+
1105
+ # Initialize weights and apply final processing
1106
+ self.post_init()
1107
+
1108
+ def get_input_embeddings(self):
1109
+ return self.model.tok_embeddings
1110
+
1111
+ def set_input_embeddings(self, value):
1112
+ self.model.tok_embeddings = value
1113
+
1114
+ def get_output_embeddings(self):
1115
+ return self.output
1116
+
1117
+ def set_output_embeddings(self, new_embeddings):
1118
+ self.output = new_embeddings
1119
+
1120
+ def set_decoder(self, decoder):
1121
+ self.model = decoder
1122
+
1123
+ def get_decoder(self):
1124
+ return self.model
1125
+
1126
+ @add_start_docstrings_to_model_forward(InternLM2_INPUTS_DOCSTRING)
1127
+ @replace_return_docstrings(output_type=CausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC)
1128
+ def forward(
1129
+ self,
1130
+ input_ids: torch.LongTensor = None,
1131
+ attention_mask: Optional[torch.Tensor] = None,
1132
+ position_ids: Optional[torch.LongTensor] = None,
1133
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
1134
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1135
+ labels: Optional[torch.LongTensor] = None,
1136
+ use_cache: Optional[bool] = None,
1137
+ output_attentions: Optional[bool] = None,
1138
+ output_hidden_states: Optional[bool] = None,
1139
+ return_dict: Optional[bool] = None,
1140
+ visual_token_index: Optional[torch.Tensor] = None,
1141
+ large_model_prune_layer: Optional[float] = None,
1142
+ large_model_prune_ratio: Optional[float] = None,
1143
+ large_model_prune_keep_count: Optional[int] = None,
1144
+ large_model_prune_selection: Optional[str] = None,
1145
+ large_model_similarity_target_coverage: Optional[float] = None,
1146
+ large_model_similarity_min_gain: Optional[float] = None,
1147
+ large_model_similarity_min_keep: Optional[int] = None,
1148
+ large_model_similarity_max_keep_ratio: Optional[float] = None,
1149
+ visual_token_importance: Optional[torch.Tensor] = None
1150
+
1151
+ ) -> Union[Tuple, CausalLMOutputWithPast]:
1152
+ r"""
1153
+ Args:
1154
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
1155
+ Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
1156
+ config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
1157
+ (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
1158
+
1159
+ Returns:
1160
+
1161
+ Example:
1162
+
1163
+ ```python
1164
+ >>> from transformers import AutoTokenizer, InternLM2ForCausalLM
1165
+
1166
+ >>> model = InternLM2ForCausalLM.from_pretrained(PATH_TO_CONVERTED_WEIGHTS)
1167
+ >>> tokenizer = AutoTokenizer.from_pretrained(PATH_TO_CONVERTED_TOKENIZER)
1168
+
1169
+ >>> prompt = "Hey, are you conscious? Can you talk to me?"
1170
+ >>> inputs = tokenizer(prompt, return_tensors="pt")
1171
+
1172
+ >>> # Generate
1173
+ >>> generate_ids = model.generate(inputs.input_ids, max_length=30)
1174
+ >>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
1175
+ "Hey, are you conscious? Can you talk to me?\nI'm not conscious, but I can talk to you."
1176
+ ```"""
1177
+
1178
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
1179
+ output_hidden_states = (
1180
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
1181
+ )
1182
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1183
+
1184
+ # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)
1185
+ outputs = self.model(
1186
+ input_ids=input_ids,
1187
+ attention_mask=attention_mask,
1188
+ position_ids=position_ids,
1189
+ past_key_values=past_key_values,
1190
+ inputs_embeds=inputs_embeds,
1191
+ use_cache=use_cache,
1192
+ output_attentions=output_attentions,
1193
+ output_hidden_states=output_hidden_states,
1194
+ return_dict=return_dict,
1195
+ visual_token_index=visual_token_index,
1196
+ large_model_prune_layer=large_model_prune_layer,
1197
+ large_model_prune_ratio=large_model_prune_ratio,
1198
+ large_model_prune_keep_count=large_model_prune_keep_count,
1199
+ large_model_prune_selection=large_model_prune_selection,
1200
+ large_model_similarity_target_coverage=large_model_similarity_target_coverage,
1201
+ large_model_similarity_min_gain=large_model_similarity_min_gain,
1202
+ large_model_similarity_min_keep=large_model_similarity_min_keep,
1203
+ large_model_similarity_max_keep_ratio=large_model_similarity_max_keep_ratio,
1204
+ visual_token_importance=visual_token_importance
1205
+ )
1206
+
1207
+ hidden_states = outputs[0]
1208
+ logits = self.output(hidden_states)
1209
+ logits = logits.float()
1210
+
1211
+ loss = None
1212
+ if labels is not None:
1213
+ # Shift so that tokens < n predict n
1214
+ shift_logits = logits[..., :-1, :].contiguous()
1215
+ shift_labels = labels[..., 1:].contiguous()
1216
+ # Flatten the tokens
1217
+ loss_fct = CrossEntropyLoss()
1218
+ shift_logits = shift_logits.view(-1, self.config.vocab_size)
1219
+ shift_labels = shift_labels.view(-1)
1220
+ # Enable model parallelism
1221
+ shift_labels = shift_labels.to(shift_logits.device)
1222
+ loss = loss_fct(shift_logits, shift_labels)
1223
+
1224
+ if not return_dict:
1225
+ output = (logits,) + outputs[1:]
1226
+ return (loss,) + output if loss is not None else output
1227
+
1228
+ device = input_ids.device if input_ids is not None else inputs_embeds.device
1229
+ output = CausalLMOutputWithPast(
1230
+ loss=loss,
1231
+ logits=logits,
1232
+ past_key_values=outputs.past_key_values,
1233
+ hidden_states=outputs.hidden_states,
1234
+ attentions=outputs.attentions,
1235
+ )
1236
+ output['aggregated_viusal_token_attention'] = outputs.aggregated_viusal_token_attention
1237
+ output['logits'] = output['logits'].to(device)
1238
+ return output
1239
+
1240
+
1241
+ def _sample(
1242
+ self,
1243
+ input_ids: torch.LongTensor,
1244
+ logits_processor: LogitsProcessorList,
1245
+ stopping_criteria: StoppingCriteriaList,
1246
+ generation_config: GenerationConfig,
1247
+ synced_gpus: bool,
1248
+ streamer: Optional["BaseStreamer"],
1249
+ logits_warper: Optional[LogitsProcessorList],
1250
+ **model_kwargs,
1251
+ ) -> Union[GenerateNonBeamOutput, torch.LongTensor]:
1252
+ # init values
1253
+ pad_token_id = generation_config._pad_token_tensor
1254
+ output_attentions = generation_config.output_attentions
1255
+ output_hidden_states = generation_config.output_hidden_states
1256
+ output_scores = generation_config.output_scores
1257
+ output_logits = generation_config.output_logits
1258
+ return_dict_in_generate = generation_config.return_dict_in_generate
1259
+ max_length = generation_config.max_length
1260
+ has_eos_stopping_criteria = any(hasattr(criteria, "eos_token_id") for criteria in stopping_criteria)
1261
+ do_sample = generation_config.do_sample
1262
+ if do_sample is True and not isinstance(logits_warper, LogitsProcessorList):
1263
+ raise ValueError(
1264
+ "`do_sample` is set to `True`, `logits_warper` must be a `LogitsProcessorList` instance (it is "
1265
+ f"{logits_warper})."
1266
+ )
1267
+
1268
+ # init attention / hidden states / scores tuples
1269
+ scores = () if (return_dict_in_generate and output_scores) else None
1270
+ raw_logits = () if (return_dict_in_generate and output_logits) else None
1271
+ decoder_attentions = () if (return_dict_in_generate and output_attentions) else None
1272
+ cross_attentions = () if (return_dict_in_generate and output_attentions) else None
1273
+ decoder_hidden_states = () if (return_dict_in_generate and output_hidden_states) else None
1274
+
1275
+ # if model is an encoder-decoder, retrieve encoder attention weights and hidden states
1276
+ if return_dict_in_generate and self.config.is_encoder_decoder:
1277
+ encoder_attentions = model_kwargs["encoder_outputs"].get("attentions") if output_attentions else None
1278
+ encoder_hidden_states = (
1279
+ model_kwargs["encoder_outputs"].get("hidden_states") if output_hidden_states else None
1280
+ )
1281
+
1282
+ # keep track of which sequences are already finished
1283
+ batch_size, cur_len = input_ids.shape
1284
+ this_peer_finished = False
1285
+ unfinished_sequences = torch.ones(batch_size, dtype=torch.long, device=input_ids.device)
1286
+ model_kwargs = self._get_initial_cache_position(input_ids, model_kwargs)
1287
+
1288
+ aggregated_viusal_token_attention = 0 if output_attentions else None
1289
+ while self._has_unfinished_sequences(
1290
+ this_peer_finished, synced_gpus, device=input_ids.device, cur_len=cur_len, max_length=max_length
1291
+ ):
1292
+ # prepare model inputs
1293
+ model_inputs = self.prepare_inputs_for_generation(input_ids, **model_kwargs)
1294
+
1295
+ # prepare variable output controls (note: some models won't accept all output controls)
1296
+ model_inputs.update({"output_attentions": output_attentions} if output_attentions else {})
1297
+ model_inputs.update({"output_hidden_states": output_hidden_states} if output_hidden_states else {})
1298
+
1299
+ # forward pass to get next token
1300
+ outputs = self(**model_inputs, return_dict=True)
1301
+ if output_attentions:
1302
+ aggregated_viusal_token_attention = aggregated_viusal_token_attention + outputs['aggregated_viusal_token_attention']
1303
+
1304
+ if synced_gpus and this_peer_finished:
1305
+ continue # don't waste resources running the code we don't need
1306
+
1307
+ # Clone is needed to avoid keeping a hanging ref to outputs.logits which may be very large for first iteration
1308
+ # (the clone itself is always small)
1309
+ next_token_logits = outputs.logits[:, -1, :].clone()
1310
+
1311
+ # pre-process distribution
1312
+ next_token_scores = logits_processor(input_ids, next_token_logits)
1313
+ if do_sample:
1314
+ next_token_scores = logits_warper(input_ids, next_token_scores)
1315
+
1316
+ # Store scores, attentions and hidden_states when required
1317
+ if return_dict_in_generate:
1318
+ if output_scores:
1319
+ scores += (next_token_scores,)
1320
+ if output_logits:
1321
+ raw_logits += (next_token_logits,)
1322
+ if output_attentions:
1323
+ decoder_attentions += (
1324
+ (outputs.decoder_attentions,) if self.config.is_encoder_decoder else (outputs.attentions,)
1325
+ )
1326
+ if self.config.is_encoder_decoder:
1327
+ cross_attentions += (outputs.cross_attentions,)
1328
+
1329
+ if output_hidden_states:
1330
+ decoder_hidden_states += (
1331
+ (outputs.decoder_hidden_states,)
1332
+ if self.config.is_encoder_decoder
1333
+ else (outputs.hidden_states,)
1334
+ )
1335
+
1336
+ # token selection
1337
+ if do_sample:
1338
+ probs = nn.functional.softmax(next_token_scores, dim=-1)
1339
+ # TODO (joao): this OP throws "skipping cudagraphs due to ['incompatible ops']", find solution
1340
+ next_tokens = torch.multinomial(probs, num_samples=1).squeeze(1)
1341
+ else:
1342
+ next_tokens = torch.argmax(next_token_scores, dim=-1)
1343
+
1344
+ # finished sentences should have their next token be a padding token
1345
+ if has_eos_stopping_criteria:
1346
+ next_tokens = next_tokens * unfinished_sequences + pad_token_id * (1 - unfinished_sequences)
1347
+
1348
+ # update generated ids, model inputs, and length for next step
1349
+ input_ids = torch.cat([input_ids, next_tokens[:, None]], dim=-1)
1350
+ if streamer is not None:
1351
+ streamer.put(next_tokens.cpu())
1352
+ model_kwargs = self._update_model_kwargs_for_generation(
1353
+ outputs,
1354
+ model_kwargs,
1355
+ is_encoder_decoder=self.config.is_encoder_decoder,
1356
+ )
1357
+
1358
+ unfinished_sequences = unfinished_sequences & ~stopping_criteria(input_ids, scores)
1359
+ this_peer_finished = unfinished_sequences.max() == 0
1360
+ cur_len += 1
1361
+
1362
+ # This is needed to properly delete outputs.logits which may be very large for first iteration
1363
+ # Otherwise a reference to outputs is kept which keeps the logits alive in the next iteration
1364
+ del outputs
1365
+
1366
+ if streamer is not None:
1367
+ streamer.end()
1368
+
1369
+ if return_dict_in_generate:
1370
+ if self.config.is_encoder_decoder:
1371
+ return GenerateEncoderDecoderOutput(
1372
+ sequences=input_ids,
1373
+ scores=scores,
1374
+ logits=raw_logits,
1375
+ encoder_attentions=encoder_attentions,
1376
+ encoder_hidden_states=encoder_hidden_states,
1377
+ decoder_attentions=decoder_attentions,
1378
+ cross_attentions=cross_attentions,
1379
+ decoder_hidden_states=decoder_hidden_states,
1380
+ past_key_values=model_kwargs.get("past_key_values"),
1381
+ )
1382
+ else:
1383
+ out_dict = GenerateDecoderOnlyOutput(
1384
+ sequences=input_ids,
1385
+ scores=scores,
1386
+ logits=raw_logits,
1387
+ attentions=decoder_attentions,
1388
+ hidden_states=decoder_hidden_states,
1389
+ past_key_values=model_kwargs.get("past_key_values"),
1390
+ )
1391
+ out_dict["aggregated_viusal_token_attention"] = aggregated_viusal_token_attention
1392
+ return out_dict
1393
+ else:
1394
+ return input_ids
1395
+
1396
+ def prepare_inputs_for_generation(
1397
+ self, input_ids, past_key_values=None, attention_mask=None, inputs_embeds=None, **kwargs
1398
+ ):
1399
+ if past_key_values is not None:
1400
+ past_length = past_key_values[0][0].shape[2]
1401
+
1402
+ # Some generation methods already pass only the last input ID
1403
+ if input_ids.shape[1] > past_length:
1404
+ remove_prefix_length = past_length
1405
+ else:
1406
+ # Default to old behavior: keep only final ID
1407
+ remove_prefix_length = input_ids.shape[1] - 1
1408
+
1409
+ input_ids = input_ids[:, remove_prefix_length:]
1410
+
1411
+ position_ids = kwargs.get('position_ids', None)
1412
+ if attention_mask is not None and position_ids is None:
1413
+ # create position_ids on the fly for batch generation
1414
+ position_ids = attention_mask.long().cumsum(-1) - 1
1415
+ position_ids.masked_fill_(attention_mask == 0, 1)
1416
+ if past_key_values:
1417
+ position_ids = position_ids[:, -input_ids.shape[1]:]
1418
+
1419
+ # if `inputs_embeds` are passed, we only want to use them in the 1st generation step
1420
+ if inputs_embeds is not None and past_key_values is None:
1421
+ model_inputs = {'inputs_embeds': inputs_embeds}
1422
+ else:
1423
+ model_inputs = {'input_ids': input_ids}
1424
+
1425
+ model_inputs.update(
1426
+ {
1427
+ 'position_ids': position_ids,
1428
+ 'past_key_values': past_key_values,
1429
+ 'use_cache': kwargs.get('use_cache'),
1430
+ 'attention_mask': attention_mask,
1431
+ 'visual_token_index': kwargs.get('visual_token_index'),
1432
+ 'large_model_prune_layer': kwargs.get('large_model_prune_layer'),
1433
+ 'large_model_prune_ratio': kwargs.get('large_model_prune_ratio'),
1434
+ 'large_model_prune_keep_count': kwargs.get('large_model_prune_keep_count'),
1435
+ 'large_model_prune_selection': kwargs.get('large_model_prune_selection'),
1436
+ 'large_model_similarity_target_coverage': kwargs.get('large_model_similarity_target_coverage'),
1437
+ 'large_model_similarity_min_gain': kwargs.get('large_model_similarity_min_gain'),
1438
+ 'large_model_similarity_min_keep': kwargs.get('large_model_similarity_min_keep'),
1439
+ 'large_model_similarity_max_keep_ratio': kwargs.get('large_model_similarity_max_keep_ratio'),
1440
+ 'visual_token_importance': kwargs.get('visual_token_importance')
1441
+ }
1442
+ )
1443
+ return model_inputs
1444
+
1445
+ @staticmethod
1446
+ def _reorder_cache(past_key_values, beam_idx):
1447
+ reordered_past = ()
1448
+ for layer_past in past_key_values:
1449
+ reordered_past += (
1450
+ tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past),
1451
+ )
1452
+ return reordered_past
1453
+
1454
+ def build_inputs(self, tokenizer, query: str, history: List[Tuple[str, str]] = [], meta_instruction=''):
1455
+ if tokenizer.add_bos_token:
1456
+ prompt = ''
1457
+ else:
1458
+ prompt = tokenizer.bos_token
1459
+ if meta_instruction:
1460
+ prompt += f"""<|im_start|>system\n{meta_instruction}<|im_end|>\n"""
1461
+ for record in history:
1462
+ prompt += f"""<|im_start|>user\n{record[0]}<|im_end|>\n<|im_start|>assistant\n{record[1]}<|im_end|>\n"""
1463
+ prompt += f"""<|im_start|>user\n{query}<|im_end|>\n<|im_start|>assistant\n"""
1464
+ return tokenizer([prompt], return_tensors='pt')
1465
+
1466
+ @torch.no_grad()
1467
+ def chat(
1468
+ self,
1469
+ tokenizer,
1470
+ query: str,
1471
+ history: List[Tuple[str, str]] = [],
1472
+ streamer: Optional[BaseStreamer] = None,
1473
+ max_new_tokens: int = 1024,
1474
+ do_sample: bool = True,
1475
+ temperature: float = 0.8,
1476
+ top_p: float = 0.8,
1477
+ meta_instruction: str = 'You are an AI assistant whose name is InternLM (书生·浦语).\n'
1478
+ '- InternLM (书生·浦语) is a conversational language model that is developed by Shanghai AI Laboratory (上海人工智能实验室). It is designed to be helpful, honest, and harmless.\n'
1479
+ '- InternLM (书生·浦语) can understand and communicate fluently in the language chosen by the user such as English and 中文.',
1480
+ **kwargs,
1481
+ ):
1482
+ inputs = self.build_inputs(tokenizer, query, history, meta_instruction)
1483
+ inputs = {k: v.to(self.device) for k, v in inputs.items() if torch.is_tensor(v)}
1484
+ # also add end-of-assistant token in eos token id to avoid unnecessary generation
1485
+ eos_token_id = [tokenizer.eos_token_id, tokenizer.convert_tokens_to_ids(['<|im_end|>'])[0]]
1486
+ outputs = self.generate(
1487
+ **inputs,
1488
+ streamer=streamer,
1489
+ max_new_tokens=max_new_tokens,
1490
+ do_sample=do_sample,
1491
+ temperature=temperature,
1492
+ top_p=top_p,
1493
+ eos_token_id=eos_token_id,
1494
+ **kwargs,
1495
+ )
1496
+ outputs = outputs[0].cpu().tolist()[len(inputs['input_ids'][0]):]
1497
+ response = tokenizer.decode(outputs, skip_special_tokens=True)
1498
+ response = response.split('<|im_end|>')[0]
1499
+ history = history + [(query, response)]
1500
+ return response, history
1501
+
1502
+ @torch.no_grad()
1503
+ def stream_chat(
1504
+ self,
1505
+ tokenizer,
1506
+ query: str,
1507
+ history: List[Tuple[str, str]] = [],
1508
+ max_new_tokens: int = 1024,
1509
+ do_sample: bool = True,
1510
+ temperature: float = 0.8,
1511
+ top_p: float = 0.8,
1512
+ **kwargs,
1513
+ ):
1514
+ """
1515
+ Return a generator in format: (response, history)
1516
+ Eg.
1517
+ ('你好,有什么可以帮助您的吗', [('你好', '你好,有什么可以帮助您的吗')])
1518
+ ('你好,有什么可以帮助您的吗?', [('你好', '你好,有什么可以帮助您的吗?')])
1519
+ """
1520
+ if BaseStreamer is None:
1521
+ raise ModuleNotFoundError(
1522
+ 'The version of `transformers` is too low. Please make sure '
1523
+ 'that you have installed `transformers>=4.28.0`.'
1524
+ )
1525
+
1526
+ response_queue = queue.Queue(maxsize=20)
1527
+
1528
+ class ChatStreamer(BaseStreamer):
1529
+ def __init__(self, tokenizer) -> None:
1530
+ super().__init__()
1531
+ self.tokenizer = tokenizer
1532
+ self.queue = response_queue
1533
+ self.query = query
1534
+ self.history = history
1535
+ self.response = ''
1536
+ self.cache = []
1537
+ self.received_inputs = False
1538
+ self.queue.put((self.response, history + [(self.query, self.response)]))
1539
+
1540
+ def put(self, value):
1541
+ if len(value.shape) > 1 and value.shape[0] > 1:
1542
+ raise ValueError('ChatStreamer only supports batch size 1')
1543
+ elif len(value.shape) > 1:
1544
+ value = value[0]
1545
+
1546
+ if not self.received_inputs:
1547
+ # The first received value is input_ids, ignore here
1548
+ self.received_inputs = True
1549
+ return
1550
+
1551
+ self.cache.extend(value.tolist())
1552
+ token = self.tokenizer.decode(self.cache, skip_special_tokens=True)
1553
+ if token.strip() != '<|im_end|>':
1554
+ self.response = self.response + token
1555
+ history = self.history + [(self.query, self.response)]
1556
+ self.queue.put((self.response, history))
1557
+ self.cache = []
1558
+ else:
1559
+ self.end()
1560
+
1561
+ def end(self):
1562
+ self.queue.put(None)
1563
+
1564
+ def stream_producer():
1565
+ return self.chat(
1566
+ tokenizer=tokenizer,
1567
+ query=query,
1568
+ streamer=ChatStreamer(tokenizer=tokenizer),
1569
+ history=history,
1570
+ max_new_tokens=max_new_tokens,
1571
+ do_sample=do_sample,
1572
+ temperature=temperature,
1573
+ top_p=top_p,
1574
+ **kwargs,
1575
+ )
1576
+
1577
+ def consumer():
1578
+ producer = threading.Thread(target=stream_producer)
1579
+ producer.start()
1580
+ while True:
1581
+ res = response_queue.get()
1582
+ if res is None:
1583
+ return
1584
+ yield res
1585
+
1586
+ return consumer()
1587
+
1588
+
1589
+ # Copied from transformers.model.llama.modeling_llama.LlamaForSequenceClassification with Llama->InternLM2
1590
+ @add_start_docstrings(
1591
+ """
1592
+ The InternLM2 Model transformer with a sequence classification head on top (linear layer).
1593
+
1594
+ [`InternLM2ForSequenceClassification`] uses the last token in order to do the classification,
1595
+ as other causal models (e.g. GPT-2) do.
1596
+
1597
+ Since it does classification on the last token, it requires to know the position of the last token. If a
1598
+ `pad_token_id` is defined in the configuration, it finds the last token that is not a padding token in each row. If
1599
+ no `pad_token_id` is defined, it simply takes the last value in each row of the batch. Since it cannot guess the
1600
+ padding tokens when `inputs_embeds` are passed instead of `input_ids`, it does the same (take the last value in
1601
+ each row of the batch).
1602
+ """,
1603
+ InternLM2_START_DOCSTRING,
1604
+ )
1605
+ class InternLM2ForSequenceClassification(InternLM2PreTrainedModel):
1606
+ def __init__(self, config):
1607
+ super().__init__(config)
1608
+ self.num_labels = config.num_labels
1609
+ self.model = InternLM2Model(config)
1610
+ self.score = nn.Linear(config.hidden_size, self.num_labels, bias=False)
1611
+
1612
+ # Initialize weights and apply final processing
1613
+ self.post_init()
1614
+
1615
+ def get_input_embeddings(self):
1616
+ return self.model.tok_embeddings
1617
+
1618
+ def set_input_embeddings(self, value):
1619
+ self.model.tok_embeddings = value
1620
+
1621
+ @add_start_docstrings_to_model_forward(InternLM2_INPUTS_DOCSTRING)
1622
+ def forward(
1623
+ self,
1624
+ input_ids: torch.LongTensor = None,
1625
+ attention_mask: Optional[torch.Tensor] = None,
1626
+ position_ids: Optional[torch.LongTensor] = None,
1627
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
1628
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1629
+ labels: Optional[torch.LongTensor] = None,
1630
+ use_cache: Optional[bool] = None,
1631
+ output_attentions: Optional[bool] = None,
1632
+ output_hidden_states: Optional[bool] = None,
1633
+ return_dict: Optional[bool] = None,
1634
+ ) -> Union[Tuple, SequenceClassifierOutputWithPast]:
1635
+ r"""
1636
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1637
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
1638
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
1639
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
1640
+ """
1641
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1642
+
1643
+ transformer_outputs = self.model(
1644
+ input_ids,
1645
+ attention_mask=attention_mask,
1646
+ position_ids=position_ids,
1647
+ past_key_values=past_key_values,
1648
+ inputs_embeds=inputs_embeds,
1649
+ use_cache=use_cache,
1650
+ output_attentions=output_attentions,
1651
+ output_hidden_states=output_hidden_states,
1652
+ return_dict=return_dict,
1653
+ )
1654
+ hidden_states = transformer_outputs[0]
1655
+ logits = self.score(hidden_states)
1656
+
1657
+ if input_ids is not None:
1658
+ batch_size = input_ids.shape[0]
1659
+ else:
1660
+ batch_size = inputs_embeds.shape[0]
1661
+
1662
+ if self.config.pad_token_id is None and batch_size != 1:
1663
+ raise ValueError('Cannot handle batch sizes > 1 if no padding token is defined.')
1664
+ if self.config.pad_token_id is None:
1665
+ sequence_lengths = -1
1666
+ else:
1667
+ if input_ids is not None:
1668
+ sequence_lengths = (torch.eq(input_ids, self.config.pad_token_id).int().argmax(-1) - 1).to(
1669
+ logits.device
1670
+ )
1671
+ else:
1672
+ sequence_lengths = -1
1673
+
1674
+ pooled_logits = logits[torch.arange(batch_size, device=logits.device), sequence_lengths]
1675
+
1676
+ loss = None
1677
+ if labels is not None:
1678
+ labels = labels.to(logits.device)
1679
+ if self.config.problem_type is None:
1680
+ if self.num_labels == 1:
1681
+ self.config.problem_type = 'regression'
1682
+ elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
1683
+ self.config.problem_type = 'single_label_classification'
1684
+ else:
1685
+ self.config.problem_type = 'multi_label_classification'
1686
+
1687
+ if self.config.problem_type == 'regression':
1688
+ loss_fct = MSELoss()
1689
+ if self.num_labels == 1:
1690
+ loss = loss_fct(pooled_logits.squeeze(), labels.squeeze())
1691
+ else:
1692
+ loss = loss_fct(pooled_logits, labels)
1693
+ elif self.config.problem_type == 'single_label_classification':
1694
+ loss_fct = CrossEntropyLoss()
1695
+ loss = loss_fct(pooled_logits.view(-1, self.num_labels), labels.view(-1))
1696
+ elif self.config.problem_type == 'multi_label_classification':
1697
+ loss_fct = BCEWithLogitsLoss()
1698
+ loss = loss_fct(pooled_logits, labels)
1699
+ if not return_dict:
1700
+ output = (pooled_logits,) + transformer_outputs[1:]
1701
+ return ((loss,) + output) if loss is not None else output
1702
+
1703
+ return SequenceClassifierOutputWithPast(
1704
+ loss=loss,
1705
+ logits=pooled_logits,
1706
+ past_key_values=transformer_outputs.past_key_values,
1707
+ hidden_states=transformer_outputs.hidden_states,
1708
+ attentions=transformer_outputs.attentions,
1709
+ )
isolated/sim_greedy/upstream_sgl/internvl/model/internlm2/tokenization_internlm2.py ADDED
@@ -0,0 +1,235 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) The InternLM team and The HuggingFace Inc. team. All rights reserved.
2
+ #
3
+ # This code is based on transformers/src/transformers/models/llama/tokenization_llama.py
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+
17
+ """Tokenization classes for InternLM."""
18
+ import os
19
+ from shutil import copyfile
20
+ from typing import Any, Dict, List, Optional, Tuple
21
+
22
+ import sentencepiece as spm
23
+ from transformers.tokenization_utils import PreTrainedTokenizer
24
+ from transformers.utils import logging
25
+
26
+ logger = logging.get_logger(__name__)
27
+
28
+ VOCAB_FILES_NAMES = {'vocab_file': './tokenizer.model'}
29
+
30
+ PRETRAINED_VOCAB_FILES_MAP = {}
31
+
32
+
33
+ # Modified from transformers.model.llama.tokenization_llama.LlamaTokenizer
34
+ class InternLM2Tokenizer(PreTrainedTokenizer):
35
+ """
36
+ Construct a InternLM2 tokenizer. Based on byte-level Byte-Pair-Encoding.
37
+
38
+ Args:
39
+ vocab_file (`str`):
40
+ Path to the vocabulary file.
41
+ """
42
+
43
+ vocab_files_names = VOCAB_FILES_NAMES
44
+ pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
45
+ model_input_names = ['input_ids', 'attention_mask']
46
+ _auto_class = 'AutoTokenizer'
47
+
48
+ def __init__(
49
+ self,
50
+ vocab_file,
51
+ unk_token='<unk>',
52
+ bos_token='<s>',
53
+ eos_token='</s>',
54
+ pad_token='</s>',
55
+ sp_model_kwargs: Optional[Dict[str, Any]] = None,
56
+ add_bos_token=True,
57
+ add_eos_token=False,
58
+ decode_with_prefix_space=False,
59
+ clean_up_tokenization_spaces=False,
60
+ **kwargs,
61
+ ):
62
+ self.sp_model_kwargs = {} if sp_model_kwargs is None else sp_model_kwargs
63
+ self.vocab_file = vocab_file
64
+ self.add_bos_token = add_bos_token
65
+ self.add_eos_token = add_eos_token
66
+ self.decode_with_prefix_space = decode_with_prefix_space
67
+ self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
68
+ self.sp_model.Load(vocab_file)
69
+ self._no_prefix_space_tokens = None
70
+ super().__init__(
71
+ bos_token=bos_token,
72
+ eos_token=eos_token,
73
+ unk_token=unk_token,
74
+ pad_token=pad_token,
75
+ clean_up_tokenization_spaces=clean_up_tokenization_spaces,
76
+ **kwargs,
77
+ )
78
+
79
+ @property
80
+ def no_prefix_space_tokens(self):
81
+ if self._no_prefix_space_tokens is None:
82
+ vocab = self.convert_ids_to_tokens(list(range(self.vocab_size)))
83
+ self._no_prefix_space_tokens = {i for i, tok in enumerate(vocab) if not tok.startswith('▁')}
84
+ return self._no_prefix_space_tokens
85
+
86
+ @property
87
+ def vocab_size(self):
88
+ """Returns vocab size"""
89
+ return self.sp_model.get_piece_size()
90
+
91
+ @property
92
+ def bos_token_id(self) -> Optional[int]:
93
+ return self.sp_model.bos_id()
94
+
95
+ @property
96
+ def eos_token_id(self) -> Optional[int]:
97
+ return self.sp_model.eos_id()
98
+
99
+ def get_vocab(self):
100
+ """Returns vocab as a dict"""
101
+ vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size)}
102
+ vocab.update(self.added_tokens_encoder)
103
+ return vocab
104
+
105
+ def _tokenize(self, text):
106
+ """Returns a tokenized string."""
107
+ return self.sp_model.encode(text, out_type=str)
108
+
109
+ def _convert_token_to_id(self, token):
110
+ """Converts a token (str) in an id using the vocab."""
111
+ return self.sp_model.piece_to_id(token)
112
+
113
+ def _convert_id_to_token(self, index):
114
+ """Converts an index (integer) in a token (str) using the vocab."""
115
+ token = self.sp_model.IdToPiece(index)
116
+ return token
117
+
118
+ def _maybe_add_prefix_space(self, tokens, decoded):
119
+ if tokens and tokens[0] not in self.no_prefix_space_tokens:
120
+ return ' ' + decoded
121
+ else:
122
+ return decoded
123
+
124
+ def convert_tokens_to_string(self, tokens):
125
+ """Converts a sequence of tokens (string) in a single string."""
126
+ current_sub_tokens = []
127
+ out_string = ''
128
+ prev_is_special = False
129
+ for token in tokens:
130
+ # make sure that special tokens are not decoded using sentencepiece model
131
+ if token in self.all_special_tokens:
132
+ if not prev_is_special:
133
+ out_string += ' '
134
+ out_string += self.sp_model.decode(current_sub_tokens) + token
135
+ prev_is_special = True
136
+ current_sub_tokens = []
137
+ else:
138
+ current_sub_tokens.append(token)
139
+ prev_is_special = False
140
+ out_string += self.sp_model.decode(current_sub_tokens)
141
+ out_string = self.clean_up_tokenization(out_string)
142
+ out_string = self._maybe_add_prefix_space(tokens=tokens, decoded=out_string)
143
+ return out_string[1:]
144
+
145
+ def save_vocabulary(self, save_directory, filename_prefix: Optional[str] = None) -> Tuple[str]:
146
+ """
147
+ Save the vocabulary and special tokens file to a directory.
148
+
149
+ Args:
150
+ save_directory (`str`):
151
+ The directory in which to save the vocabulary.
152
+
153
+ Returns:
154
+ `Tuple(str)`: Paths to the files saved.
155
+ """
156
+ if not os.path.isdir(save_directory):
157
+ logger.error(f'Vocabulary path ({save_directory}) should be a directory')
158
+ return
159
+ out_vocab_file = os.path.join(
160
+ save_directory, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file']
161
+ )
162
+
163
+ if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file) and os.path.isfile(self.vocab_file):
164
+ copyfile(self.vocab_file, out_vocab_file)
165
+ elif not os.path.isfile(self.vocab_file):
166
+ with open(out_vocab_file, 'wb') as fi:
167
+ content_spiece_model = self.sp_model.serialized_model_proto()
168
+ fi.write(content_spiece_model)
169
+
170
+ return (out_vocab_file,)
171
+
172
+ def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None):
173
+ if self.add_bos_token:
174
+ bos_token_ids = [self.bos_token_id]
175
+ else:
176
+ bos_token_ids = []
177
+
178
+ output = bos_token_ids + token_ids_0
179
+
180
+ if token_ids_1 is not None:
181
+ output = output + token_ids_1
182
+
183
+ if self.add_eos_token:
184
+ output = output + [self.eos_token_id]
185
+
186
+ return output
187
+
188
+ def get_special_tokens_mask(
189
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False
190
+ ) -> List[int]:
191
+ """
192
+ Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
193
+ special tokens using the tokenizer `prepare_for_model` method.
194
+
195
+ Args:
196
+ token_ids_0 (`List[int]`):
197
+ List of IDs.
198
+ token_ids_1 (`List[int]`, *optional*):
199
+ Optional second list of IDs for sequence pairs.
200
+ already_has_special_tokens (`bool`, *optional*, defaults to `False`):
201
+ Whether or not the token list is already formatted with special tokens for the model.
202
+
203
+ Returns:
204
+ `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
205
+ """
206
+ if already_has_special_tokens:
207
+ return super().get_special_tokens_mask(
208
+ token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True
209
+ )
210
+
211
+ if token_ids_1 is None:
212
+ return [1] + ([0] * len(token_ids_0)) + [1]
213
+ return [1] + ([0] * len(token_ids_0)) + [1, 1] + ([0] * len(token_ids_1)) + [1]
214
+
215
+ def create_token_type_ids_from_sequences(
216
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
217
+ ) -> List[int]:
218
+ """
219
+ Create a mask from the two sequences passed to be used in a sequence-pair classification task. T5 does not make
220
+ use of token type ids, therefore a list of zeros is returned.
221
+
222
+ Args:
223
+ token_ids_0 (`List[int]`):
224
+ List of IDs.
225
+ token_ids_1 (`List[int]`, *optional*):
226
+ Optional second list of IDs for sequence pairs.
227
+
228
+ Returns:
229
+ `List[int]`: List of zeros.
230
+ """
231
+ eos = [self.eos_token_id]
232
+
233
+ if token_ids_1 is None:
234
+ return len(token_ids_0 + eos) * [0]
235
+ return len(token_ids_0 + eos + token_ids_1 + eos) * [0]
isolated/sim_greedy/upstream_sgl/internvl/model/internlm2/tokenization_internlm2_fast.py ADDED
@@ -0,0 +1,211 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) The InternLM team and The HuggingFace Inc. team. All rights reserved.
2
+ #
3
+ # This code is based on transformers/src/transformers/models/llama/tokenization_llama_fast.py
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+
17
+ """Tokenization Fast class for InternLM."""
18
+ import os
19
+ from shutil import copyfile
20
+ from typing import Any, Dict, Optional, Tuple
21
+
22
+ from tokenizers import Tokenizer, decoders, normalizers, processors
23
+ from tokenizers.models import BPE
24
+ from transformers.convert_slow_tokenizer import (SLOW_TO_FAST_CONVERTERS,
25
+ SentencePieceExtractor,
26
+ SpmConverter)
27
+ from transformers.tokenization_utils_fast import PreTrainedTokenizerFast
28
+ from transformers.utils import logging
29
+
30
+ from .tokenization_internlm2 import InternLM2Tokenizer
31
+
32
+ logger = logging.get_logger(__name__)
33
+
34
+ VOCAB_FILES_NAMES = {'vocab_file': './tokenizer.model'}
35
+
36
+
37
+ # Modified from transformers.convert_slow_tokenizer.LlamaConverter
38
+ class InternLM2Converter(SpmConverter):
39
+ handle_byte_fallback = True
40
+
41
+ def vocab(self, proto):
42
+ vocab = [
43
+ ('<unk>', 0.0),
44
+ ('<s>', 0.0),
45
+ ('</s>', 0.0),
46
+ ]
47
+ vocab += [(piece.piece, piece.score) for piece in proto.pieces[3:]]
48
+ return vocab
49
+
50
+ def unk_id(self, proto):
51
+ unk_id = 0
52
+ return unk_id
53
+
54
+ def decoder(self, replacement, add_prefix_space):
55
+ return decoders.Sequence(
56
+ [
57
+ decoders.Replace('▁', ' '),
58
+ decoders.ByteFallback(),
59
+ decoders.Fuse(),
60
+ decoders.Strip(content=' ', left=1),
61
+ ]
62
+ )
63
+
64
+ def tokenizer(self, proto):
65
+ model_type = proto.trainer_spec.model_type
66
+ vocab_scores = self.vocab(proto)
67
+ # special tokens
68
+ added_tokens = self.original_tokenizer.added_tokens_decoder
69
+ for i in range(len(vocab_scores)):
70
+ piece, score = vocab_scores[i]
71
+ if i in added_tokens:
72
+ vocab_scores[i] = (added_tokens[i].content, score)
73
+ if model_type == 1:
74
+ raise RuntimeError('InternLM2 is supposed to be a BPE model!')
75
+
76
+ elif model_type == 2:
77
+ _, merges = SentencePieceExtractor(self.original_tokenizer.vocab_file).extract(vocab_scores)
78
+ bpe_vocab = {word: i for i, (word, _score) in enumerate(vocab_scores)}
79
+ tokenizer = Tokenizer(
80
+ BPE(bpe_vocab, merges, unk_token=proto.trainer_spec.unk_piece, fuse_unk=True, byte_fallback=True)
81
+ )
82
+ tokenizer.add_special_tokens(
83
+ [ added_token for index, added_token in added_tokens.items()]
84
+ )
85
+ else:
86
+ raise Exception(
87
+ "You're trying to run a `Unigram` model but you're file was trained with a different algorithm"
88
+ )
89
+
90
+ return tokenizer
91
+
92
+ def normalizer(self, proto):
93
+ normalizers_list = []
94
+ if proto.normalizer_spec.add_dummy_prefix:
95
+ normalizers_list.append(normalizers.Prepend(prepend='▁'))
96
+ normalizers_list.append(normalizers.Replace(pattern=' ', content='▁'))
97
+ return normalizers.Sequence(normalizers_list)
98
+
99
+ def pre_tokenizer(self, replacement, add_prefix_space):
100
+ return None
101
+
102
+
103
+ SLOW_TO_FAST_CONVERTERS['InternLM2Tokenizer'] = InternLM2Converter
104
+
105
+
106
+ # Modified from transformers.model.llama.tokenization_llama_fast.LlamaTokenizerFast -> InternLM2TokenizerFast
107
+ class InternLM2TokenizerFast(PreTrainedTokenizerFast):
108
+ vocab_files_names = VOCAB_FILES_NAMES
109
+ slow_tokenizer_class = InternLM2Tokenizer
110
+ padding_side = 'left'
111
+ model_input_names = ['input_ids', 'attention_mask']
112
+ _auto_class = 'AutoTokenizer'
113
+
114
+ def __init__(
115
+ self,
116
+ vocab_file,
117
+ unk_token='<unk>',
118
+ bos_token='<s>',
119
+ eos_token='</s>',
120
+ pad_token='</s>',
121
+ sp_model_kwargs: Optional[Dict[str, Any]] = None,
122
+ add_bos_token=True,
123
+ add_eos_token=False,
124
+ decode_with_prefix_space=False,
125
+ clean_up_tokenization_spaces=False,
126
+ **kwargs,
127
+ ):
128
+ super().__init__(
129
+ vocab_file=vocab_file,
130
+ unk_token=unk_token,
131
+ bos_token=bos_token,
132
+ eos_token=eos_token,
133
+ pad_token=pad_token,
134
+ sp_model_kwargs=sp_model_kwargs,
135
+ add_bos_token=add_bos_token,
136
+ add_eos_token=add_eos_token,
137
+ decode_with_prefix_space=decode_with_prefix_space,
138
+ clean_up_tokenization_spaces=clean_up_tokenization_spaces,
139
+ **kwargs,
140
+ )
141
+ self._add_bos_token = add_bos_token
142
+ self._add_eos_token = add_eos_token
143
+ self.update_post_processor()
144
+ self.vocab_file = vocab_file
145
+
146
+ @property
147
+ def can_save_slow_tokenizer(self) -> bool:
148
+ return os.path.isfile(self.vocab_file) if self.vocab_file else False
149
+
150
+ def update_post_processor(self):
151
+ """
152
+ Updates the underlying post processor with the current `bos_token` and `eos_token`.
153
+ """
154
+ bos = self.bos_token
155
+ bos_token_id = self.bos_token_id
156
+ if bos is None and self.add_bos_token:
157
+ raise ValueError('add_bos_token = True but bos_token = None')
158
+
159
+ eos = self.eos_token
160
+ eos_token_id = self.eos_token_id
161
+ if eos is None and self.add_eos_token:
162
+ raise ValueError('add_eos_token = True but eos_token = None')
163
+
164
+ single = f"{(bos+':0 ') if self.add_bos_token else ''}$A:0{(' '+eos+':0') if self.add_eos_token else ''}"
165
+ pair = f"{single}{(' '+bos+':1') if self.add_bos_token else ''} $B:1{(' '+eos+':1') if self.add_eos_token else ''}"
166
+
167
+ special_tokens = []
168
+ if self.add_bos_token:
169
+ special_tokens.append((bos, bos_token_id))
170
+ if self.add_eos_token:
171
+ special_tokens.append((eos, eos_token_id))
172
+ self._tokenizer.post_processor = processors.TemplateProcessing(
173
+ single=single, pair=pair, special_tokens=special_tokens
174
+ )
175
+
176
+ @property
177
+ def add_eos_token(self):
178
+ return self._add_eos_token
179
+
180
+ @property
181
+ def add_bos_token(self):
182
+ return self._add_bos_token
183
+
184
+ @add_eos_token.setter
185
+ def add_eos_token(self, value):
186
+ self._add_eos_token = value
187
+ self.update_post_processor()
188
+
189
+ @add_bos_token.setter
190
+ def add_bos_token(self, value):
191
+ self._add_bos_token = value
192
+ self.update_post_processor()
193
+
194
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
195
+ if not self.can_save_slow_tokenizer:
196
+ raise ValueError(
197
+ 'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
198
+ 'tokenizer.'
199
+ )
200
+
201
+ if not os.path.isdir(save_directory):
202
+ logger.error(f'Vocabulary path ({save_directory}) should be a directory')
203
+ return
204
+ out_vocab_file = os.path.join(
205
+ save_directory, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file']
206
+ )
207
+
208
+ if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file):
209
+ copyfile(self.vocab_file, out_vocab_file)
210
+
211
+ return (out_vocab_file,)
isolated/sim_greedy/upstream_sgl/internvl/model/internvl_chat/__init__.py ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # --------------------------------------------------------
2
+ # InternVL
3
+ # Copyright (c) 2024 OpenGVLab
4
+ # Licensed under The MIT License [see LICENSE for details]
5
+ # --------------------------------------------------------
6
+
7
+ from .configuration_intern_vit import InternVisionConfig
8
+ from .configuration_internvl_chat import InternVLChatConfig
9
+ from .modeling_intern_vit import InternVisionModel
10
+ from .modeling_internvl_chat import InternVLChatModel
11
+
12
+ __all__ = ['InternVisionConfig', 'InternVisionModel',
13
+ 'InternVLChatConfig', 'InternVLChatModel']
isolated/sim_greedy/upstream_sgl/internvl/model/internvl_chat/configuration_intern_vit.py ADDED
@@ -0,0 +1,119 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # --------------------------------------------------------
2
+ # InternVL
3
+ # Copyright (c) 2024 OpenGVLab
4
+ # Licensed under The MIT License [see LICENSE for details]
5
+ # --------------------------------------------------------
6
+ import os
7
+ from typing import Union
8
+
9
+ from transformers.configuration_utils import PretrainedConfig
10
+ from transformers.utils import logging
11
+
12
+ logger = logging.get_logger(__name__)
13
+
14
+
15
+ class InternVisionConfig(PretrainedConfig):
16
+ r"""
17
+ This is the configuration class to store the configuration of a [`InternVisionModel`]. It is used to
18
+ instantiate a vision encoder according to the specified arguments, defining the model architecture.
19
+
20
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
21
+ documentation from [`PretrainedConfig`] for more information.
22
+
23
+ Args:
24
+ num_channels (`int`, *optional*, defaults to 3):
25
+ Number of color channels in the input images (e.g., 3 for RGB).
26
+ patch_size (`int`, *optional*, defaults to 14):
27
+ The size (resolution) of each patch.
28
+ image_size (`int`, *optional*, defaults to 224):
29
+ The size (resolution) of each image.
30
+ qkv_bias (`bool`, *optional*, defaults to `False`):
31
+ Whether to add a bias to the queries and values in the self-attention layers.
32
+ hidden_size (`int`, *optional*, defaults to 3200):
33
+ Dimensionality of the encoder layers and the pooler layer.
34
+ num_attention_heads (`int`, *optional*, defaults to 25):
35
+ Number of attention heads for each attention layer in the Transformer encoder.
36
+ intermediate_size (`int`, *optional*, defaults to 12800):
37
+ Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
38
+ qk_normalization (`bool`, *optional*, defaults to `True`):
39
+ Whether to normalize the queries and keys in the self-attention layers.
40
+ num_hidden_layers (`int`, *optional*, defaults to 48):
41
+ Number of hidden layers in the Transformer encoder.
42
+ use_flash_attn (`bool`, *optional*, defaults to `True`):
43
+ Whether to use flash attention mechanism.
44
+ hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
45
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
46
+ `"relu"`, `"selu"` and `"gelu_new"` ``"gelu"` are supported.
47
+ layer_norm_eps (`float`, *optional*, defaults to 1e-6):
48
+ The epsilon used by the layer normalization layers.
49
+ dropout (`float`, *optional*, defaults to 0.0):
50
+ The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
51
+ drop_path_rate (`float`, *optional*, defaults to 0.0):
52
+ Dropout rate for stochastic depth.
53
+ attention_dropout (`float`, *optional*, defaults to 0.0):
54
+ The dropout ratio for the attention probabilities.
55
+ initializer_range (`float`, *optional*, defaults to 0.02):
56
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
57
+ initializer_factor (`float`, *optional*, defaults to 0.1):
58
+ A factor for layer scale.
59
+ """
60
+
61
+ model_type = 'intern_vit_6b'
62
+
63
+ def __init__(
64
+ self,
65
+ num_channels=3,
66
+ patch_size=14,
67
+ image_size=224,
68
+ qkv_bias=False,
69
+ hidden_size=3200,
70
+ num_attention_heads=25,
71
+ intermediate_size=12800,
72
+ qk_normalization=True,
73
+ num_hidden_layers=48,
74
+ use_flash_attn=True,
75
+ hidden_act='gelu',
76
+ norm_type='rms_norm',
77
+ layer_norm_eps=1e-6,
78
+ dropout=0.0,
79
+ drop_path_rate=0.0,
80
+ attention_dropout=0.0,
81
+ initializer_range=0.02,
82
+ initializer_factor=0.1,
83
+ **kwargs,
84
+ ):
85
+ super().__init__(**kwargs)
86
+
87
+ self.hidden_size = hidden_size
88
+ self.intermediate_size = intermediate_size
89
+ self.dropout = dropout
90
+ self.drop_path_rate = drop_path_rate
91
+ self.num_hidden_layers = num_hidden_layers
92
+ self.num_attention_heads = num_attention_heads
93
+ self.num_channels = num_channels
94
+ self.patch_size = patch_size
95
+ self.image_size = image_size
96
+ self.initializer_range = initializer_range
97
+ self.initializer_factor = initializer_factor
98
+ self.attention_dropout = attention_dropout
99
+ self.layer_norm_eps = layer_norm_eps
100
+ self.hidden_act = hidden_act
101
+ self.norm_type = norm_type
102
+ self.qkv_bias = qkv_bias
103
+ self.qk_normalization = qk_normalization
104
+ self.use_flash_attn = use_flash_attn
105
+
106
+ @classmethod
107
+ def from_pretrained(cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs) -> 'PretrainedConfig':
108
+ config_dict, kwargs = cls.get_config_dict(pretrained_model_name_or_path, **kwargs)
109
+
110
+ if 'vision_config' in config_dict:
111
+ config_dict = config_dict['vision_config']
112
+
113
+ if 'model_type' in config_dict and hasattr(cls, 'model_type') and config_dict['model_type'] != cls.model_type:
114
+ logger.warning(
115
+ f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
116
+ f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.'
117
+ )
118
+
119
+ return cls.from_dict(config_dict, **kwargs)
isolated/sim_greedy/upstream_sgl/internvl/model/internvl_chat/configuration_internvl_chat.py ADDED
@@ -0,0 +1,106 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # --------------------------------------------------------
2
+ # InternVL
3
+ # Copyright (c) 2024 OpenGVLab
4
+ # Licensed under The MIT License [see LICENSE for details]
5
+ # --------------------------------------------------------
6
+
7
+ import copy
8
+
9
+ from internvl.model.internlm2.configuration_internlm2 import InternLM2Config
10
+ from internvl.model.phi3.configuration_phi3 import Phi3Config
11
+ from internvl.model.llama.configuration_llama import LlamaConfig
12
+ from transformers import AutoConfig, Qwen2Config
13
+
14
+ from transformers.configuration_utils import PretrainedConfig
15
+ from transformers.utils import logging
16
+
17
+ from .configuration_intern_vit import InternVisionConfig
18
+
19
+ logger = logging.get_logger(__name__)
20
+
21
+
22
+ class InternVLChatConfig(PretrainedConfig):
23
+ model_type = 'internvl_chat'
24
+ is_composition = True
25
+
26
+ def __init__(
27
+ self,
28
+ vision_config=None,
29
+ llm_config=None,
30
+ use_backbone_lora=0,
31
+ use_llm_lora=0,
32
+ pad2square=False,
33
+ select_layer=-1,
34
+ force_image_size=None,
35
+ downsample_ratio=0.5,
36
+ template=None,
37
+ dynamic_image_size=False,
38
+ use_thumbnail=False,
39
+ ps_version='v1',
40
+ min_dynamic_patch=1,
41
+ max_dynamic_patch=6,
42
+ **kwargs):
43
+ super().__init__(**kwargs)
44
+
45
+ if vision_config is None:
46
+ vision_config = {}
47
+ logger.info('vision_config is None. Initializing the InternVisionConfig with default values.')
48
+
49
+ if llm_config is None:
50
+ llm_config = {}
51
+ logger.info('llm_config is None. Initializing the LlamaConfig config with default values (`LlamaConfig`).')
52
+
53
+ self.vision_config = InternVisionConfig(**vision_config)
54
+ if llm_config['architectures'][0] == 'LlamaForCausalLM':
55
+ self.llm_config = LlamaConfig(**llm_config)
56
+ elif llm_config['architectures'][0] == 'InternLM2ForCausalLM':
57
+ self.llm_config = InternLM2Config(**llm_config)
58
+ elif llm_config['architectures'][0] == 'Phi3ForCausalLM':
59
+ self.llm_config = Phi3Config(**llm_config)
60
+ elif llm_config['architectures'][0] == 'Qwen2ForCausalLM':
61
+ self.llm_config = Qwen2Config(**llm_config)
62
+ else:
63
+ raise ValueError('Unsupported architecture: {}'.format(llm_config['architectures'][0]))
64
+ self.use_backbone_lora = use_backbone_lora
65
+ self.use_llm_lora = use_llm_lora
66
+ self.pad2square = pad2square
67
+ self.select_layer = select_layer
68
+ self.force_image_size = force_image_size
69
+ self.downsample_ratio = downsample_ratio
70
+ self.template = template
71
+ self.dynamic_image_size = dynamic_image_size
72
+ self.use_thumbnail = use_thumbnail
73
+ self.ps_version = ps_version # pixel shuffle version
74
+ self.min_dynamic_patch = min_dynamic_patch
75
+ self.max_dynamic_patch = max_dynamic_patch
76
+
77
+ logger.info(f'vision_select_layer: {self.select_layer}')
78
+ logger.info(f'ps_version: {self.ps_version}')
79
+ logger.info(f'min_dynamic_patch: {self.min_dynamic_patch}')
80
+ logger.info(f'max_dynamic_patch: {self.max_dynamic_patch}')
81
+
82
+ def to_dict(self):
83
+ """
84
+ Serializes this instance to a Python dictionary. Override the default [`~PretrainedConfig.to_dict`].
85
+
86
+ Returns:
87
+ `Dict[str, any]`: Dictionary of all the attributes that make up this configuration instance,
88
+ """
89
+ output = copy.deepcopy(self.__dict__)
90
+ output['vision_config'] = self.vision_config.to_dict()
91
+ output['llm_config'] = self.llm_config.to_dict()
92
+ output['model_type'] = self.__class__.model_type
93
+ output['use_backbone_lora'] = self.use_backbone_lora
94
+ output['use_llm_lora'] = self.use_llm_lora
95
+ output['pad2square'] = self.pad2square
96
+ output['select_layer'] = self.select_layer
97
+ output['force_image_size'] = self.force_image_size
98
+ output['downsample_ratio'] = self.downsample_ratio
99
+ output['template'] = self.template
100
+ output['dynamic_image_size'] = self.dynamic_image_size
101
+ output['use_thumbnail'] = self.use_thumbnail
102
+ output['ps_version'] = self.ps_version
103
+ output['min_dynamic_patch'] = self.min_dynamic_patch
104
+ output['max_dynamic_patch'] = self.max_dynamic_patch
105
+
106
+ return output
isolated/sim_greedy/upstream_sgl/internvl/model/internvl_chat/flash_attention.py ADDED
@@ -0,0 +1,76 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # https://github.com/Dao-AILab/flash-attention/blob/v0.2.8/flash_attn/flash_attention.py
2
+ import torch
3
+ import torch.nn as nn
4
+ from einops import rearrange
5
+
6
+ try: # v1
7
+ from flash_attn.flash_attn_interface import \
8
+ flash_attn_unpadded_qkvpacked_func
9
+ except: # v2
10
+ from flash_attn.flash_attn_interface import flash_attn_varlen_qkvpacked_func as flash_attn_unpadded_qkvpacked_func
11
+
12
+ from flash_attn.bert_padding import pad_input, unpad_input
13
+
14
+
15
+ class FlashAttention(nn.Module):
16
+ """Implement the scaled dot product attention with softmax.
17
+ Arguments
18
+ ---------
19
+ softmax_scale: The temperature to use for the softmax attention.
20
+ (default: 1/sqrt(d_keys) where d_keys is computed at
21
+ runtime)
22
+ attention_dropout: The dropout rate to apply to the attention
23
+ (default: 0.0)
24
+ """
25
+
26
+ def __init__(self, softmax_scale=None, attention_dropout=0.0, device=None, dtype=None):
27
+ super().__init__()
28
+ self.softmax_scale = softmax_scale
29
+ self.dropout_p = attention_dropout
30
+
31
+ def forward(self, qkv, key_padding_mask=None, causal=False, cu_seqlens=None,
32
+ max_s=None, need_weights=False):
33
+ """Implements the multihead softmax attention.
34
+ Arguments
35
+ ---------
36
+ qkv: The tensor containing the query, key, and value. (B, S, 3, H, D) if key_padding_mask is None
37
+ if unpadded: (nnz, 3, h, d)
38
+ key_padding_mask: a bool tensor of shape (B, S)
39
+ """
40
+ assert not need_weights
41
+ assert qkv.dtype in [torch.float16, torch.bfloat16]
42
+ assert qkv.is_cuda
43
+
44
+ if cu_seqlens is None:
45
+ batch_size = qkv.shape[0]
46
+ seqlen = qkv.shape[1]
47
+ if key_padding_mask is None:
48
+ qkv = rearrange(qkv, 'b s ... -> (b s) ...')
49
+ max_s = seqlen
50
+ cu_seqlens = torch.arange(0, (batch_size + 1) * seqlen, step=seqlen, dtype=torch.int32,
51
+ device=qkv.device)
52
+ output = flash_attn_unpadded_qkvpacked_func(
53
+ qkv, cu_seqlens, max_s, self.dropout_p if self.training else 0.0,
54
+ softmax_scale=self.softmax_scale, causal=causal
55
+ )
56
+ output = rearrange(output, '(b s) ... -> b s ...', b=batch_size)
57
+ else:
58
+ nheads = qkv.shape[-2]
59
+ x = rearrange(qkv, 'b s three h d -> b s (three h d)')
60
+ x_unpad, indices, cu_seqlens, max_s = unpad_input(x, key_padding_mask)
61
+ x_unpad = rearrange(x_unpad, 'nnz (three h d) -> nnz three h d', three=3, h=nheads)
62
+ output_unpad = flash_attn_unpadded_qkvpacked_func(
63
+ x_unpad, cu_seqlens, max_s, self.dropout_p if self.training else 0.0,
64
+ softmax_scale=self.softmax_scale, causal=causal
65
+ )
66
+ output = rearrange(pad_input(rearrange(output_unpad, 'nnz h d -> nnz (h d)'),
67
+ indices, batch_size, seqlen),
68
+ 'b s (h d) -> b s h d', h=nheads)
69
+ else:
70
+ assert max_s is not None
71
+ output = flash_attn_unpadded_qkvpacked_func(
72
+ qkv, cu_seqlens, max_s, self.dropout_p if self.training else 0.0,
73
+ softmax_scale=self.softmax_scale, causal=causal
74
+ )
75
+
76
+ return output, None
isolated/sim_greedy/upstream_sgl/internvl/model/internvl_chat/modeling_intern_vit.py ADDED
@@ -0,0 +1,362 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # --------------------------------------------------------
2
+ # InternVL
3
+ # Copyright (c) 2024 OpenGVLab
4
+ # Licensed under The MIT License [see LICENSE for details]
5
+ # --------------------------------------------------------
6
+ from typing import Optional, Tuple, Union
7
+
8
+ import torch
9
+ import torch.nn.functional as F
10
+ import torch.utils.checkpoint
11
+ from einops import rearrange
12
+ from timm.models.layers import DropPath
13
+ from torch import nn
14
+ from transformers.activations import ACT2FN
15
+ from transformers.modeling_outputs import (BaseModelOutput,
16
+ BaseModelOutputWithPooling)
17
+ from transformers.modeling_utils import PreTrainedModel
18
+ from transformers.utils import logging
19
+
20
+ from .configuration_intern_vit import InternVisionConfig
21
+
22
+ try:
23
+ from .flash_attention import FlashAttention
24
+ has_flash_attn = True
25
+ except:
26
+ print('FlashAttention is not installed.')
27
+ has_flash_attn = False
28
+
29
+ logger = logging.get_logger(__name__)
30
+
31
+
32
+ class InternRMSNorm(nn.Module):
33
+ def __init__(self, hidden_size, eps=1e-6):
34
+ super().__init__()
35
+ self.weight = nn.Parameter(torch.ones(hidden_size))
36
+ self.variance_epsilon = eps
37
+
38
+ def forward(self, hidden_states):
39
+ input_dtype = hidden_states.dtype
40
+ hidden_states = hidden_states.to(torch.float32)
41
+ variance = hidden_states.pow(2).mean(-1, keepdim=True)
42
+ hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
43
+ return self.weight * hidden_states.to(input_dtype)
44
+
45
+
46
+ try:
47
+ from apex.normalization import FusedRMSNorm
48
+
49
+ InternRMSNorm = FusedRMSNorm # noqa
50
+
51
+ logger.info('Discovered apex.normalization.FusedRMSNorm - will use it instead of InternRMSNorm')
52
+ except ImportError:
53
+ # using the normal InternRMSNorm
54
+ pass
55
+ except Exception:
56
+ logger.warning('discovered apex but it failed to load, falling back to InternRMSNorm')
57
+ pass
58
+
59
+
60
+ NORM2FN = {
61
+ 'rms_norm': InternRMSNorm,
62
+ 'layer_norm': nn.LayerNorm,
63
+ }
64
+
65
+
66
+ class InternVisionEmbeddings(nn.Module):
67
+ def __init__(self, config: InternVisionConfig):
68
+ super().__init__()
69
+ self.config = config
70
+ self.embed_dim = config.hidden_size
71
+ self.image_size = config.image_size
72
+ self.patch_size = config.patch_size
73
+
74
+ self.class_embedding = nn.Parameter(
75
+ torch.randn(1, 1, self.embed_dim),
76
+ )
77
+
78
+ self.patch_embedding = nn.Conv2d(
79
+ in_channels=3, out_channels=self.embed_dim, kernel_size=self.patch_size, stride=self.patch_size
80
+ )
81
+
82
+ self.num_patches = (self.image_size // self.patch_size) ** 2
83
+ self.num_positions = self.num_patches + 1
84
+
85
+ self.position_embedding = nn.Parameter(torch.randn(1, self.num_positions, self.embed_dim))
86
+
87
+ def _get_pos_embed(self, pos_embed, H, W):
88
+ target_dtype = pos_embed.dtype
89
+ pos_embed = pos_embed.float().reshape(
90
+ 1, self.image_size // self.patch_size, self.image_size // self.patch_size, -1).permute(0, 3, 1, 2)
91
+ pos_embed = F.interpolate(pos_embed, size=(H, W), mode='bicubic', align_corners=False). \
92
+ reshape(1, -1, H * W).permute(0, 2, 1).to(target_dtype)
93
+ return pos_embed
94
+
95
+ def forward(self, pixel_values: torch.FloatTensor) -> torch.Tensor:
96
+ target_dtype = self.patch_embedding.weight.dtype
97
+ patch_embeds = self.patch_embedding(pixel_values) # shape = [*, channel, width, height]
98
+ batch_size, _, height, width = patch_embeds.shape
99
+ patch_embeds = patch_embeds.flatten(2).transpose(1, 2)
100
+ class_embeds = self.class_embedding.expand(batch_size, 1, -1).to(target_dtype)
101
+ embeddings = torch.cat([class_embeds, patch_embeds], dim=1)
102
+ position_embedding = torch.cat([
103
+ self.position_embedding[:, :1, :],
104
+ self._get_pos_embed(self.position_embedding[:, 1:, :], height, width)
105
+ ], dim=1)
106
+ embeddings = embeddings + position_embedding.to(target_dtype)
107
+ return embeddings
108
+
109
+
110
+ class InternAttention(nn.Module):
111
+ """Multi-headed attention from 'Attention Is All You Need' paper"""
112
+
113
+ def __init__(self, config: InternVisionConfig):
114
+ super().__init__()
115
+ self.config = config
116
+ self.embed_dim = config.hidden_size
117
+ self.num_heads = config.num_attention_heads
118
+ self.use_flash_attn = config.use_flash_attn and has_flash_attn
119
+ if config.use_flash_attn and not has_flash_attn:
120
+ print('Warning: Flash Attention is not available, use_flash_attn is set to False.')
121
+ self.head_dim = self.embed_dim // self.num_heads
122
+ if self.head_dim * self.num_heads != self.embed_dim:
123
+ raise ValueError(
124
+ f'embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`:'
125
+ f' {self.num_heads}).'
126
+ )
127
+
128
+ self.scale = self.head_dim ** -0.5
129
+ self.qkv = nn.Linear(self.embed_dim, 3 * self.embed_dim, bias=config.qkv_bias)
130
+ self.attn_drop = nn.Dropout(config.attention_dropout)
131
+ self.proj_drop = nn.Dropout(config.dropout)
132
+
133
+ self.qk_normalization = config.qk_normalization
134
+
135
+ if self.qk_normalization:
136
+ self.q_norm = InternRMSNorm(self.embed_dim, eps=config.layer_norm_eps)
137
+ self.k_norm = InternRMSNorm(self.embed_dim, eps=config.layer_norm_eps)
138
+
139
+ if self.use_flash_attn:
140
+ self.inner_attn = FlashAttention(attention_dropout=config.attention_dropout)
141
+ self.proj = nn.Linear(self.embed_dim, self.embed_dim)
142
+
143
+ def _naive_attn(self, x):
144
+ B, N, C = x.shape
145
+ qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
146
+ q, k, v = qkv.unbind(0) # make torchscript happy (cannot use tensor as tuple)
147
+
148
+ if self.qk_normalization:
149
+ B_, H_, N_, D_ = q.shape
150
+ q = self.q_norm(q.transpose(1, 2).flatten(-2, -1)).view(B_, N_, H_, D_).transpose(1, 2)
151
+ k = self.k_norm(k.transpose(1, 2).flatten(-2, -1)).view(B_, N_, H_, D_).transpose(1, 2)
152
+
153
+ attn = ((q * self.scale) @ k.transpose(-2, -1))
154
+ attn = attn.softmax(dim=-1)
155
+ attn = self.attn_drop(attn)
156
+
157
+ x = (attn @ v).transpose(1, 2).reshape(B, N, C)
158
+ x = self.proj(x)
159
+ x = self.proj_drop(x)
160
+ return x
161
+
162
+ def _flash_attn(self, x, key_padding_mask=None, need_weights=False):
163
+ qkv = self.qkv(x)
164
+ qkv = rearrange(qkv, 'b s (three h d) -> b s three h d', three=3, h=self.num_heads)
165
+
166
+ if self.qk_normalization:
167
+ q, k, v = qkv.unbind(2)
168
+ q = self.q_norm(q.flatten(-2, -1)).view(q.shape)
169
+ k = self.k_norm(k.flatten(-2, -1)).view(k.shape)
170
+ qkv = torch.stack([q, k, v], dim=2)
171
+
172
+ context, _ = self.inner_attn(
173
+ qkv, key_padding_mask=key_padding_mask, need_weights=need_weights, causal=False
174
+ )
175
+ outs = self.proj(rearrange(context, 'b s h d -> b s (h d)'))
176
+ outs = self.proj_drop(outs)
177
+ return outs
178
+
179
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
180
+ x = self._naive_attn(hidden_states) if not self.use_flash_attn else self._flash_attn(hidden_states)
181
+ return x
182
+
183
+
184
+ class InternMLP(nn.Module):
185
+ def __init__(self, config: InternVisionConfig):
186
+ super().__init__()
187
+ self.config = config
188
+ self.act = ACT2FN[config.hidden_act]
189
+ self.fc1 = nn.Linear(config.hidden_size, config.intermediate_size)
190
+ self.fc2 = nn.Linear(config.intermediate_size, config.hidden_size)
191
+
192
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
193
+ hidden_states = self.fc1(hidden_states)
194
+ hidden_states = self.act(hidden_states)
195
+ hidden_states = self.fc2(hidden_states)
196
+ return hidden_states
197
+
198
+
199
+ class InternVisionEncoderLayer(nn.Module):
200
+ def __init__(self, config: InternVisionConfig, drop_path_rate: float):
201
+ super().__init__()
202
+ self.embed_dim = config.hidden_size
203
+ self.intermediate_size = config.intermediate_size
204
+ self.norm_type = config.norm_type
205
+
206
+ self.attn = InternAttention(config)
207
+ self.mlp = InternMLP(config)
208
+ self.norm1 = NORM2FN[self.norm_type](self.embed_dim, eps=config.layer_norm_eps)
209
+ self.norm2 = NORM2FN[self.norm_type](self.embed_dim, eps=config.layer_norm_eps)
210
+
211
+ self.ls1 = nn.Parameter(config.initializer_factor * torch.ones(self.embed_dim))
212
+ self.ls2 = nn.Parameter(config.initializer_factor * torch.ones(self.embed_dim))
213
+ self.drop_path1 = DropPath(drop_path_rate) if drop_path_rate > 0. else nn.Identity()
214
+ self.drop_path2 = DropPath(drop_path_rate) if drop_path_rate > 0. else nn.Identity()
215
+
216
+ def forward(
217
+ self,
218
+ hidden_states: torch.Tensor,
219
+ ) -> Tuple[torch.FloatTensor, Optional[torch.FloatTensor], Optional[Tuple[torch.FloatTensor]]]:
220
+ """
221
+ Args:
222
+ hidden_states (`Tuple[torch.FloatTensor, Optional[torch.FloatTensor]]`): input to the layer of shape `(batch, seq_len, embed_dim)`
223
+ """
224
+ hidden_states = hidden_states + self.drop_path1(self.attn(self.norm1(hidden_states)) * self.ls1)
225
+
226
+ hidden_states = hidden_states + self.drop_path2(self.mlp(self.norm2(hidden_states)) * self.ls2)
227
+
228
+ return hidden_states
229
+
230
+
231
+ class InternVisionEncoder(nn.Module):
232
+ """
233
+ Transformer encoder consisting of `config.num_hidden_layers` self attention layers. Each layer is a
234
+ [`InternEncoderLayer`].
235
+
236
+ Args:
237
+ config (`InternConfig`):
238
+ The corresponding vision configuration for the `InternEncoder`.
239
+ """
240
+
241
+ def __init__(self, config: InternVisionConfig):
242
+ super().__init__()
243
+ self.config = config
244
+ # stochastic depth decay rule
245
+ dpr = [x.item() for x in torch.linspace(0, config.drop_path_rate, config.num_hidden_layers)]
246
+ self.layers = nn.ModuleList([
247
+ InternVisionEncoderLayer(config, dpr[idx]) for idx in range(config.num_hidden_layers)])
248
+ self.gradient_checkpointing = True
249
+
250
+ def forward(
251
+ self,
252
+ inputs_embeds,
253
+ output_hidden_states: Optional[bool] = None,
254
+ return_dict: Optional[bool] = None,
255
+ ) -> Union[Tuple, BaseModelOutput]:
256
+ r"""
257
+ Args:
258
+ inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
259
+ Embedded representation of the inputs. Should be float, not int tokens.
260
+ output_hidden_states (`bool`, *optional*):
261
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
262
+ for more detail.
263
+ return_dict (`bool`, *optional*):
264
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
265
+ """
266
+ output_hidden_states = (
267
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
268
+ )
269
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
270
+
271
+ encoder_states = () if output_hidden_states else None
272
+ hidden_states = inputs_embeds
273
+
274
+ for idx, encoder_layer in enumerate(self.layers):
275
+ if output_hidden_states:
276
+ encoder_states = encoder_states + (hidden_states,)
277
+ if self.gradient_checkpointing and self.training:
278
+ layer_outputs = torch.utils.checkpoint.checkpoint(
279
+ encoder_layer,
280
+ hidden_states)
281
+ else:
282
+ layer_outputs = encoder_layer(
283
+ hidden_states,
284
+ )
285
+ hidden_states = layer_outputs
286
+
287
+ if output_hidden_states:
288
+ encoder_states = encoder_states + (hidden_states,)
289
+
290
+ if not return_dict:
291
+ return tuple(v for v in [hidden_states, encoder_states] if v is not None)
292
+ return BaseModelOutput(
293
+ last_hidden_state=hidden_states, hidden_states=encoder_states
294
+ )
295
+
296
+
297
+ class InternVisionModel(PreTrainedModel):
298
+ main_input_name = 'pixel_values'
299
+ config_class = InternVisionConfig
300
+ _no_split_modules = ['InternVisionEncoderLayer']
301
+
302
+ def __init__(self, config: InternVisionConfig):
303
+ super().__init__(config)
304
+ self.config = config
305
+
306
+ self.embeddings = InternVisionEmbeddings(config)
307
+ self.encoder = InternVisionEncoder(config)
308
+
309
+ def resize_pos_embeddings(self, old_size, new_size, patch_size):
310
+ pos_emb = self.embeddings.position_embedding
311
+ _, num_positions, embed_dim = pos_emb.shape
312
+ cls_emb = pos_emb[:, :1, :]
313
+ pos_emb = pos_emb[:, 1:, :].reshape(1, old_size // patch_size, old_size // patch_size, -1).permute(0, 3, 1, 2)
314
+ pos_emb = F.interpolate(pos_emb.float(), size=new_size // patch_size, mode='bicubic', align_corners=False)
315
+ pos_emb = pos_emb.to(cls_emb.dtype).reshape(1, embed_dim, -1).permute(0, 2, 1)
316
+ pos_emb = torch.cat([cls_emb, pos_emb], dim=1)
317
+ self.embeddings.position_embedding = nn.Parameter(pos_emb)
318
+ self.embeddings.image_size = new_size
319
+ logger.info('Resized position embeddings from {} to {}'.format(old_size, new_size))
320
+
321
+ def get_input_embeddings(self):
322
+ return self.embeddings
323
+
324
+ def forward(
325
+ self,
326
+ pixel_values: Optional[torch.FloatTensor] = None,
327
+ output_hidden_states: Optional[bool] = None,
328
+ return_dict: Optional[bool] = None,
329
+ pixel_embeds: Optional[torch.FloatTensor] = None,
330
+ ) -> Union[Tuple, BaseModelOutputWithPooling]:
331
+ output_hidden_states = (
332
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
333
+ )
334
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
335
+
336
+ if pixel_values is None and pixel_embeds is None:
337
+ raise ValueError('You have to specify pixel_values or pixel_embeds')
338
+
339
+ if pixel_embeds is not None:
340
+ hidden_states = pixel_embeds
341
+ else:
342
+ if len(pixel_values.shape) == 4:
343
+ hidden_states = self.embeddings(pixel_values)
344
+ else:
345
+ raise ValueError(f'wrong pixel_values size: {pixel_values.shape}')
346
+ encoder_outputs = self.encoder(
347
+ inputs_embeds=hidden_states,
348
+ output_hidden_states=output_hidden_states,
349
+ return_dict=return_dict,
350
+ )
351
+ last_hidden_state = encoder_outputs.last_hidden_state
352
+ pooled_output = last_hidden_state[:, 0, :]
353
+
354
+ if not return_dict:
355
+ return (last_hidden_state, pooled_output) + encoder_outputs[1:]
356
+
357
+ return BaseModelOutputWithPooling(
358
+ last_hidden_state=last_hidden_state,
359
+ pooler_output=pooled_output,
360
+ hidden_states=encoder_outputs.hidden_states,
361
+ attentions=encoder_outputs.attentions,
362
+ )
isolated/sim_greedy/upstream_sgl/internvl/model/internvl_chat/modeling_internvl_chat.py ADDED
@@ -0,0 +1,506 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # --------------------------------------------------------
2
+ # InternVL
3
+ # Copyright (c) 2024 OpenGVLab
4
+ # Licensed under The MIT License [see LICENSE for details]
5
+ # --------------------------------------------------------
6
+ import warnings
7
+ from typing import Any, List, Optional, Tuple, Union
8
+
9
+ import torch.distributed as dist
10
+ import torch.utils.checkpoint
11
+ import transformers
12
+ from internvl.conversation import get_conv_template
13
+ from internvl.model.internlm2.modeling_internlm2 import InternLM2ForCausalLM
14
+ from internvl.model.phi3.modeling_phi3 import Phi3ForCausalLM
15
+ from internvl.model.llama.modeling_llama import LlamaForCausalLM
16
+ from internvl.model.qwen2.modeling_qwen2 import Qwen2ForCausalLM
17
+
18
+ from peft import LoraConfig, get_peft_model
19
+ from torch import nn
20
+ from torch.nn import CrossEntropyLoss
21
+ from transformers import (AutoModel, GenerationConfig)
22
+ from transformers.modeling_outputs import CausalLMOutputWithPast
23
+ from transformers.modeling_utils import PreTrainedModel
24
+ from transformers.utils import ModelOutput, logging
25
+
26
+ from .configuration_internvl_chat import InternVLChatConfig
27
+ from .modeling_intern_vit import InternVisionModel
28
+ import time
29
+ import torch
30
+
31
+
32
+ logger = logging.get_logger(__name__)
33
+
34
+
35
+ def version_cmp(v1, v2, op='eq'):
36
+ import operator
37
+
38
+ from packaging import version
39
+ op_func = getattr(operator, op)
40
+ return op_func(version.parse(v1), version.parse(v2))
41
+
42
+
43
+ def get_attention_rank(visual_token_index, attentions):
44
+
45
+ # assert visual_token_index.shape[0] == 1 # batchsize = 1
46
+ # visual_token_index = visual_token_index.view(-1).nonzero()
47
+ visual_start_index, visual_end_index = visual_token_index[0], visual_token_index[-1]
48
+
49
+ attentions = [torch.stack(attention, dim=1) for attention in attentions] # [n l heads tokens, tokens]
50
+
51
+
52
+ visual_token_importance = 0.0
53
+ for i, attn in enumerate(attentions):
54
+ if i == 0:
55
+ visual_token_importance += attn[0].sum(dim=0).sum(dim=0)[visual_end_index+1:, visual_start_index:visual_end_index+1].sum(dim=0)
56
+ else:
57
+ visual_token_importance += attn[0].sum(dim=0).sum(dim=0)[0:1, visual_start_index:visual_end_index+1].sum(dim=0)
58
+
59
+ return visual_token_importance
60
+
61
+
62
+
63
+ class InternVLChatModel(PreTrainedModel):
64
+ config_class = InternVLChatConfig
65
+ main_input_name = 'pixel_values'
66
+ _no_split_modules = ['InternVisionModel', 'LlamaDecoderLayer', 'InternLM2DecoderLayer',
67
+ 'Phi3DecoderLayer', 'Qwen2DecoderLayer']
68
+ _supports_flash_attn_2 = True
69
+
70
+ def __init__(self, config: InternVLChatConfig, vision_model=None, language_model=None):
71
+ super().__init__(config)
72
+
73
+ assert version_cmp(transformers.__version__, '4.37.0', 'ge')
74
+ image_size = config.force_image_size or config.vision_config.image_size
75
+ patch_size = config.vision_config.patch_size
76
+ self.patch_size = patch_size
77
+ self.select_layer = config.select_layer
78
+ self.template = config.template
79
+ self.num_image_token = int((image_size // patch_size) ** 2 * (config.downsample_ratio ** 2))
80
+ self.downsample_ratio = config.downsample_ratio
81
+ self.ps_version = config.ps_version
82
+ self.llm_arch_name = config.llm_config.architectures[0]
83
+
84
+ logger.info(f'num_image_token: {self.num_image_token}')
85
+ logger.info(f'ps_version: {self.ps_version}')
86
+ if vision_model is not None:
87
+ self.vision_model = vision_model
88
+ else:
89
+ self.vision_model = InternVisionModel(config.vision_config)
90
+ if language_model is not None:
91
+ self.language_model = language_model
92
+ else:
93
+ if config.llm_config.architectures[0] == 'LlamaForCausalLM':
94
+ self.language_model = LlamaForCausalLM(config.llm_config)
95
+ elif config.llm_config.architectures[0] == 'InternLM2ForCausalLM':
96
+ self.language_model = InternLM2ForCausalLM(config.llm_config)
97
+ elif config.llm_config.architectures[0] == 'Phi3ForCausalLM':
98
+ self.language_model = Phi3ForCausalLM(config.llm_config)
99
+ elif config.llm_config.architectures[0] == 'Qwen2ForCausalLM':
100
+ self.language_model = Qwen2ForCausalLM(config.llm_config)
101
+ else:
102
+ raise NotImplementedError(f'{config.llm_config.architectures[0]} is not implemented.')
103
+
104
+ vit_hidden_size = config.vision_config.hidden_size
105
+ llm_hidden_size = config.llm_config.hidden_size
106
+
107
+ self.mlp1 = nn.Sequential(
108
+ nn.LayerNorm(vit_hidden_size * int(1 / self.downsample_ratio) ** 2),
109
+ nn.Linear(vit_hidden_size * int(1 / self.downsample_ratio) ** 2, llm_hidden_size),
110
+ nn.GELU(),
111
+ nn.Linear(llm_hidden_size, llm_hidden_size)
112
+ )
113
+
114
+ self.img_context_token_id = None
115
+ self.conv_template = get_conv_template(self.template)
116
+ self.system_message = self.conv_template.system_message
117
+ self.num_samples = 0
118
+
119
+ if config.use_backbone_lora:
120
+ self.wrap_backbone_lora(r=config.use_backbone_lora, lora_alpha=2 * config.use_backbone_lora)
121
+
122
+ if config.use_llm_lora:
123
+ self.wrap_llm_lora(r=config.use_llm_lora, lora_alpha=2 * config.use_llm_lora)
124
+
125
+ def wrap_backbone_lora(self, r=128, lora_alpha=256, lora_dropout=0.05):
126
+ lora_config = LoraConfig(
127
+ r=r,
128
+ target_modules=['attn.qkv', 'attn.proj', 'mlp.fc1', 'mlp.fc2'],
129
+ lora_alpha=lora_alpha,
130
+ lora_dropout=lora_dropout,
131
+ )
132
+ self.vision_model = get_peft_model(self.vision_model, lora_config)
133
+ self.vision_model.print_trainable_parameters()
134
+
135
+ def wrap_llm_lora(self, r=128, lora_alpha=256, lora_dropout=0.05):
136
+ # Determine the target modules based on the architecture of the language model
137
+ if self.llm_arch_name == 'InternLM2ForCausalLM':
138
+ target_modules = ['attention.wqkv', 'attention.wo', 'feed_forward.w1', 'feed_forward.w2', 'feed_forward.w3']
139
+ elif self.llm_arch_name == 'Phi3ForCausalLM':
140
+ target_modules = ['mlp.down_proj', 'mlp.gate_up_proj', 'self_attn.o_proj', 'self_attn.qkv_proj']
141
+ elif self.llm_arch_name in ['Qwen2ForCausalLM', 'LlamaForCausalLM']:
142
+ target_modules = ['self_attn.q_proj', 'self_attn.k_proj', 'self_attn.v_proj', 'self_attn.o_proj',
143
+ 'mlp.gate_proj', 'mlp.down_proj', 'mlp.up_proj']
144
+ else:
145
+ raise NotImplemented
146
+ lora_config = LoraConfig(
147
+ r=r,
148
+ target_modules=target_modules,
149
+ lora_alpha=lora_alpha,
150
+ lora_dropout=lora_dropout,
151
+ task_type='CAUSAL_LM'
152
+ )
153
+ self.language_model = get_peft_model(self.language_model, lora_config)
154
+ self.language_model.enable_input_require_grads()
155
+ self.language_model.print_trainable_parameters()
156
+
157
+ def forward(
158
+ self,
159
+ pixel_values: torch.FloatTensor,
160
+ input_ids: torch.LongTensor = None,
161
+ attention_mask: Optional[torch.Tensor] = None,
162
+ position_ids: Optional[torch.LongTensor] = None,
163
+ image_flags: Optional[torch.LongTensor] = None,
164
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
165
+ labels: Optional[torch.LongTensor] = None,
166
+ use_cache: Optional[bool] = None,
167
+ output_attentions: Optional[bool] = None,
168
+ output_hidden_states: Optional[bool] = None,
169
+ return_dict: Optional[bool] = None,
170
+ ) -> Union[Tuple, CausalLMOutputWithPast]:
171
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
172
+
173
+ image_flags = image_flags.squeeze(-1)
174
+ input_embeds = self.language_model.get_input_embeddings()(input_ids).clone()
175
+
176
+ vit_embeds = self.extract_feature(pixel_values)
177
+ vit_embeds = vit_embeds[image_flags == 1]
178
+ vit_batch_size = pixel_values.shape[0]
179
+
180
+ B, N, C = input_embeds.shape
181
+ input_embeds = input_embeds.reshape(B * N, C)
182
+
183
+ if torch.distributed.is_initialized() and torch.distributed.get_rank() == 0:
184
+ print(f'dynamic ViT batch size: {vit_batch_size}, images per sample: {vit_batch_size / B}, dynamic token length: {N}')
185
+
186
+ input_ids = input_ids.reshape(B * N)
187
+ selected = (input_ids == self.img_context_token_id)
188
+ try:
189
+ input_embeds[selected] = input_embeds[selected] * 0.0 + vit_embeds.reshape(-1, C)
190
+ ignore_flag = False
191
+ except Exception as e:
192
+ vit_embeds = vit_embeds.reshape(-1, C)
193
+ print(f'warning: {e}, input_embeds[selected].shape={input_embeds[selected].shape}, '
194
+ f'vit_embeds.shape={vit_embeds.shape}')
195
+ n_token = selected.sum()
196
+ input_embeds[selected] = input_embeds[selected] * 0.0 + vit_embeds[:n_token]
197
+ ignore_flag = True
198
+
199
+ input_embeds = input_embeds.reshape(B, N, C)
200
+
201
+ outputs = self.language_model(
202
+ inputs_embeds=input_embeds,
203
+ attention_mask=attention_mask,
204
+ position_ids=position_ids,
205
+ past_key_values=past_key_values,
206
+ use_cache=use_cache,
207
+ output_attentions=output_attentions,
208
+ output_hidden_states=output_hidden_states,
209
+ return_dict=return_dict,
210
+ )
211
+ logits = outputs.logits
212
+
213
+ loss = None
214
+ if labels is not None:
215
+ # Shift so that tokens < n predict n
216
+ shift_logits = logits[..., :-1, :].contiguous()
217
+ shift_labels = labels[..., 1:].contiguous()
218
+ # Flatten the tokens
219
+ loss_fct = CrossEntropyLoss()
220
+ shift_logits = shift_logits.view(-1, self.language_model.config.vocab_size)
221
+ shift_labels = shift_labels.view(-1)
222
+ # Enable model parallelism
223
+ shift_labels = shift_labels.to(shift_logits.device)
224
+ loss = loss_fct(shift_logits, shift_labels)
225
+ if ignore_flag:
226
+ loss = loss * 0.0
227
+
228
+ if not return_dict:
229
+ output = (logits,) + outputs[1:]
230
+ return (loss,) + output if loss is not None else output
231
+
232
+ return CausalLMOutputWithPast(
233
+ loss=loss,
234
+ logits=logits,
235
+ past_key_values=outputs.past_key_values,
236
+ hidden_states=outputs.hidden_states,
237
+ attentions=outputs.attentions,
238
+ )
239
+
240
+ def pixel_shuffle(self, x, scale_factor=0.5):
241
+ n, w, h, c = x.size()
242
+ # N, W, H, C --> N, W, H * scale, C // scale
243
+ x = x.view(n, w, int(h * scale_factor), int(c / scale_factor))
244
+ # N, W, H * scale, C // scale --> N, H * scale, W, C // scale
245
+ x = x.permute(0, 2, 1, 3).contiguous()
246
+ # N, H * scale, W, C // scale --> N, H * scale, W * scale, C // (scale ** 2)
247
+ x = x.view(n, int(h * scale_factor), int(w * scale_factor),
248
+ int(c / (scale_factor * scale_factor)))
249
+ if self.ps_version == 'v1':
250
+ warnings.warn("In ps_version 'v1', the height and width have not been swapped back, "
251
+ 'which results in a transposed image.')
252
+ else:
253
+ x = x.permute(0, 2, 1, 3).contiguous()
254
+ return x
255
+
256
+ def extract_feature(self, pixel_values):
257
+ if self.select_layer == -1:
258
+ vit_embeds = self.vision_model(
259
+ pixel_values=pixel_values,
260
+ output_hidden_states=False,
261
+ return_dict=True).last_hidden_state
262
+ else:
263
+ vit_embeds = self.vision_model(
264
+ pixel_values=pixel_values,
265
+ output_hidden_states=True,
266
+ return_dict=True).hidden_states[self.select_layer]
267
+ vit_embeds = vit_embeds[:, 1:, :]
268
+
269
+ h = w = int(vit_embeds.shape[1] ** 0.5)
270
+ vit_embeds = vit_embeds.reshape(vit_embeds.shape[0], h, w, -1)
271
+ vit_embeds = self.pixel_shuffle(vit_embeds, scale_factor=self.downsample_ratio)
272
+ vit_embeds = vit_embeds.reshape(vit_embeds.shape[0], -1, vit_embeds.shape[-1])
273
+ vit_embeds = self.mlp1(vit_embeds)
274
+ return vit_embeds
275
+
276
+ def batch_chat(self, tokenizer, pixel_values, questions, generation_config, num_patches_list=None,
277
+ history=None, return_history=False, IMG_START_TOKEN='<img>', IMG_END_TOKEN='</img>',
278
+ IMG_CONTEXT_TOKEN='<IMG_CONTEXT>', verbose=False, image_counts=None):
279
+ if history is not None or return_history:
280
+ print('Now multi-turn chat is not supported in batch_chat.')
281
+ raise NotImplementedError
282
+
283
+ if image_counts is not None:
284
+ num_patches_list = image_counts
285
+ print('Warning: `image_counts` is deprecated. Please use `num_patches_list` instead.')
286
+
287
+ img_context_token_id = tokenizer.convert_tokens_to_ids(IMG_CONTEXT_TOKEN)
288
+ self.img_context_token_id = img_context_token_id
289
+
290
+ if verbose and pixel_values is not None:
291
+ image_bs = pixel_values.shape[0]
292
+ print(f'dynamic ViT batch size: {image_bs}')
293
+
294
+ queries = []
295
+ for idx, num_patches in enumerate(num_patches_list):
296
+ question = questions[idx]
297
+ if pixel_values is not None and '<image>' not in question:
298
+ question = '<image>\n' + question
299
+ template = get_conv_template(self.template)
300
+ template.append_message(template.roles[0], question)
301
+ template.append_message(template.roles[1], None)
302
+ query = template.get_prompt()
303
+
304
+ image_tokens = IMG_START_TOKEN + IMG_CONTEXT_TOKEN * self.num_image_token * num_patches + IMG_END_TOKEN
305
+ query = query.replace('<image>', image_tokens, 1)
306
+ queries.append(query)
307
+
308
+ tokenizer.padding_side = 'left'
309
+ model_inputs = tokenizer(queries, return_tensors='pt', padding=True)
310
+ input_ids = model_inputs['input_ids'].cuda()
311
+ attention_mask = model_inputs['attention_mask'].cuda()
312
+ eos_token_id = tokenizer.convert_tokens_to_ids(template.sep)
313
+ generation_config['eos_token_id'] = eos_token_id
314
+ generation_output = self.generate(
315
+ pixel_values=pixel_values,
316
+ input_ids=input_ids,
317
+ attention_mask=attention_mask,
318
+ **generation_config
319
+ )
320
+ responses = tokenizer.batch_decode(generation_output, skip_special_tokens=True)
321
+ responses = [response.split(template.sep)[0].strip() for response in responses]
322
+ return responses
323
+
324
+ def chat(self, tokenizer, pixel_values, question, generation_config, history=None, return_history=False,
325
+ num_patches_list=None, IMG_START_TOKEN='<img>', IMG_END_TOKEN='</img>', IMG_CONTEXT_TOKEN='<IMG_CONTEXT>',
326
+ verbose=False, large_model=False):
327
+
328
+ if history is None and pixel_values is not None and '<image>' not in question:
329
+ question = '<image>\n' + question
330
+
331
+ if num_patches_list is None:
332
+ num_patches_list = [pixel_values.shape[0]] if pixel_values is not None else []
333
+ assert pixel_values is None or len(pixel_values) == sum(num_patches_list)
334
+
335
+ img_context_token_id = tokenizer.convert_tokens_to_ids(IMG_CONTEXT_TOKEN)
336
+ self.img_context_token_id = img_context_token_id
337
+
338
+ template = get_conv_template(self.template)
339
+ template.system_message = self.system_message
340
+ eos_token_id = tokenizer.convert_tokens_to_ids(template.sep)
341
+
342
+ history = [] if history is None else history
343
+ for (old_question, old_answer) in history:
344
+ template.append_message(template.roles[0], old_question)
345
+ template.append_message(template.roles[1], old_answer)
346
+ template.append_message(template.roles[0], question)
347
+ template.append_message(template.roles[1], None)
348
+ query = template.get_prompt()
349
+
350
+ if verbose and pixel_values is not None:
351
+ image_bs = pixel_values.shape[0]
352
+ print(f'dynamic ViT batch size: {image_bs}')
353
+
354
+ for num_patches in num_patches_list:
355
+ image_tokens = IMG_START_TOKEN + IMG_CONTEXT_TOKEN * self.num_image_token * num_patches + IMG_END_TOKEN
356
+ query = query.replace('<image>', image_tokens, 1)
357
+
358
+ model_inputs = tokenizer(query, return_tensors='pt')
359
+ input_ids = model_inputs['input_ids'].cuda()
360
+ attention_mask = model_inputs['attention_mask'].cuda()
361
+ generation_config['eos_token_id'] = eos_token_id
362
+
363
+
364
+ visual_token_index = (input_ids == self.img_context_token_id)
365
+
366
+ visual_token_index = visual_token_index.view(-1).nonzero()
367
+ visual_start_index, visual_end_index = visual_token_index[0], visual_token_index[-1]
368
+
369
+
370
+ if large_model:
371
+ generation_config["visual_token_index"] = (visual_start_index, visual_end_index)
372
+ assert (visual_end_index - visual_start_index + 1) == generation_config["visual_token_importance"].shape[0]
373
+ else:
374
+ generation_config['consistency_config']["visual_token_index"] = (visual_start_index, visual_end_index)
375
+
376
+
377
+ if not large_model:
378
+ generation_output, consistency_score, visual_token_importance = self.generate(
379
+ pixel_values=pixel_values,
380
+ input_ids=input_ids,
381
+ attention_mask=attention_mask,
382
+ large_model=large_model,
383
+ **generation_config
384
+ )
385
+
386
+ response = tokenizer.batch_decode(generation_output['sequences'], skip_special_tokens=True)[0]
387
+ response = response.split(template.sep)[0].strip()
388
+ history.append((question, response))
389
+
390
+ if return_history:
391
+ return response, history
392
+ else:
393
+ query_to_print = query.replace(IMG_CONTEXT_TOKEN, '')
394
+ query_to_print = query_to_print.replace(f'{IMG_START_TOKEN}{IMG_END_TOKEN}', '<image>')
395
+ if verbose:
396
+ print(query_to_print, response)
397
+ return response, generation_output.scores, consistency_score, visual_token_importance
398
+
399
+
400
+ else:
401
+ generation_output = self.generate(
402
+ pixel_values=pixel_values,
403
+ input_ids=input_ids,
404
+ attention_mask=attention_mask,
405
+ large_model=large_model,
406
+ **generation_config
407
+ )
408
+
409
+ response = tokenizer.batch_decode(generation_output, skip_special_tokens=True)[0]
410
+ response = response.split(template.sep)[0].strip()
411
+ history.append((question, response))
412
+ if return_history:
413
+ return response, history
414
+ else:
415
+ query_to_print = query.replace(IMG_CONTEXT_TOKEN, '')
416
+ query_to_print = query_to_print.replace(f'{IMG_START_TOKEN}{IMG_END_TOKEN}', '<image>')
417
+ if verbose:
418
+ print(query_to_print, response)
419
+ return response
420
+
421
+
422
+
423
+ @torch.no_grad()
424
+ def generate(
425
+ self,
426
+ pixel_values: Optional[torch.FloatTensor] = None,
427
+ input_ids: Optional[torch.FloatTensor] = None,
428
+ attention_mask: Optional[torch.LongTensor] = None,
429
+ visual_features: Optional[torch.FloatTensor] = None,
430
+ generation_config: Optional[GenerationConfig] = None,
431
+ output_hidden_states: Optional[bool] = None,
432
+ return_dict: Optional[bool] = None,
433
+ large_model: Optional[bool] = False,
434
+ **generate_kwargs,
435
+ ) -> torch.LongTensor:
436
+
437
+ assert self.img_context_token_id is not None
438
+ if pixel_values is not None:
439
+ if visual_features is not None:
440
+ vit_embeds = visual_features
441
+ else:
442
+ vit_embeds = self.extract_feature(pixel_values)
443
+ input_embeds = self.language_model.get_input_embeddings()(input_ids)
444
+ B, N, C = input_embeds.shape
445
+ input_embeds = input_embeds.reshape(B * N, C)
446
+
447
+ input_ids = input_ids.reshape(B * N)
448
+ selected = (input_ids == self.img_context_token_id)
449
+ assert selected.sum() != 0
450
+ input_embeds[selected] = vit_embeds.reshape(-1, C).to(input_embeds.device)
451
+
452
+ input_embeds = input_embeds.reshape(B, N, C)
453
+ else:
454
+ input_embeds = self.language_model.get_input_embeddings()(input_ids)
455
+
456
+
457
+
458
+ if not large_model:
459
+ consistency_generate_kwargs = generate_kwargs.pop('consistency_config')
460
+ generate_kwargs['visual_token_index'] = consistency_generate_kwargs['visual_token_index']
461
+ outputs = self.language_model.generate(
462
+ inputs_embeds=input_embeds,
463
+ attention_mask=attention_mask,
464
+ generation_config=generation_config,
465
+ output_hidden_states=output_hidden_states,
466
+ return_dict=return_dict,
467
+ use_cache=True,
468
+ **generate_kwargs,
469
+ )
470
+
471
+ visual_token_importance = outputs.aggregated_viusal_token_attention
472
+ consistency_generate_kwargs['visual_token_importance'] = visual_token_importance
473
+
474
+ new_input_ids_ = outputs['sequences'][0]
475
+ new_token_num = outputs['sequences'].shape[-1]
476
+ new_input_embedding = torch.concatenate((input_embeds, self.language_model.get_input_embeddings()(new_input_ids_).unsqueeze(0)), dim=1)
477
+ new_attention_mask = torch.concatenate((attention_mask, torch.ones((1, new_input_ids_.shape[0]), device=attention_mask.device, dtype=attention_mask.dtype)), dim=-1)
478
+ new_input_ids = torch.concatenate((input_ids, new_input_ids_), dim=-1)
479
+ consistency_generate_kwargs['inputs_embeds'] = new_input_embedding
480
+ consistency_generate_kwargs['attention_mask'] = new_attention_mask
481
+ consistency_generate_kwargs['output_scores'] = False
482
+ consistency_generate_kwargs['output_attentions'] = False
483
+ consistency_generate_kwargs = self.language_model._get_initial_cache_position(new_input_ids, consistency_generate_kwargs)
484
+
485
+ model_inputs = self.language_model.prepare_inputs_for_generation(new_input_ids, **consistency_generate_kwargs)
486
+ consistency_output = self.language_model.forward(**model_inputs, return_dict=True)
487
+ consistency_score = torch.gather(consistency_output['logits'][:, -new_token_num-1:-1, :].softmax(dim=-1), index=new_input_ids_[None, :, None], dim=-1)
488
+
489
+ consistency_score = torch.prod(consistency_score)
490
+
491
+
492
+
493
+ return outputs, consistency_score, visual_token_importance
494
+
495
+
496
+
497
+ else:
498
+ return self.language_model.generate(
499
+ inputs_embeds=input_embeds,
500
+ attention_mask=attention_mask,
501
+ generation_config=generation_config,
502
+ output_hidden_states=output_hidden_states,
503
+ return_dict=return_dict,
504
+ use_cache=True,
505
+ **generate_kwargs,
506
+ )
isolated/sim_greedy/upstream_sgl/internvl/model/llama/__init__.py ADDED
@@ -0,0 +1,116 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2022 EleutherAI and The HuggingFace Inc. team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from typing import TYPE_CHECKING
15
+
16
+ from transformers.utils import (
17
+ OptionalDependencyNotAvailable,
18
+ _LazyModule,
19
+ is_flax_available,
20
+ is_sentencepiece_available,
21
+ is_tokenizers_available,
22
+ is_torch_available,
23
+ )
24
+
25
+
26
+ _import_structure = {
27
+ "configuration_llama": ["LlamaConfig"],
28
+ }
29
+
30
+ try:
31
+ if not is_sentencepiece_available():
32
+ raise OptionalDependencyNotAvailable()
33
+ except OptionalDependencyNotAvailable:
34
+ pass
35
+ else:
36
+ _import_structure["tokenization_llama"] = ["LlamaTokenizer"]
37
+
38
+ try:
39
+ if not is_tokenizers_available():
40
+ raise OptionalDependencyNotAvailable()
41
+ except OptionalDependencyNotAvailable:
42
+ pass
43
+ else:
44
+ _import_structure["tokenization_llama_fast"] = ["LlamaTokenizerFast"]
45
+
46
+ try:
47
+ if not is_torch_available():
48
+ raise OptionalDependencyNotAvailable()
49
+ except OptionalDependencyNotAvailable:
50
+ pass
51
+ else:
52
+ _import_structure["modeling_llama"] = [
53
+ "LlamaForCausalLM",
54
+ "LlamaModel",
55
+ "LlamaPreTrainedModel",
56
+ "LlamaForSequenceClassification",
57
+ "LlamaForQuestionAnswering",
58
+ "LlamaForTokenClassification",
59
+ ]
60
+
61
+ try:
62
+ if not is_flax_available():
63
+ raise OptionalDependencyNotAvailable()
64
+ except OptionalDependencyNotAvailable:
65
+ pass
66
+ else:
67
+ _import_structure["modeling_flax_llama"] = ["FlaxLlamaForCausalLM", "FlaxLlamaModel", "FlaxLlamaPreTrainedModel"]
68
+
69
+
70
+ if TYPE_CHECKING:
71
+ from .configuration_llama import LlamaConfig
72
+
73
+ try:
74
+ if not is_sentencepiece_available():
75
+ raise OptionalDependencyNotAvailable()
76
+ except OptionalDependencyNotAvailable:
77
+ pass
78
+ else:
79
+ from .tokenization_llama import LlamaTokenizer
80
+
81
+ try:
82
+ if not is_tokenizers_available():
83
+ raise OptionalDependencyNotAvailable()
84
+ except OptionalDependencyNotAvailable:
85
+ pass
86
+ else:
87
+ from .tokenization_llama_fast import LlamaTokenizerFast
88
+
89
+ try:
90
+ if not is_torch_available():
91
+ raise OptionalDependencyNotAvailable()
92
+ except OptionalDependencyNotAvailable:
93
+ pass
94
+ else:
95
+ from .modeling_llama import (
96
+ LlamaForCausalLM,
97
+ LlamaForQuestionAnswering,
98
+ LlamaForSequenceClassification,
99
+ LlamaForTokenClassification,
100
+ LlamaModel,
101
+ LlamaPreTrainedModel,
102
+ )
103
+
104
+ try:
105
+ if not is_flax_available():
106
+ raise OptionalDependencyNotAvailable()
107
+ except OptionalDependencyNotAvailable:
108
+ pass
109
+ else:
110
+ from .modeling_flax_llama import FlaxLlamaForCausalLM, FlaxLlamaModel, FlaxLlamaPreTrainedModel
111
+
112
+
113
+ else:
114
+ import sys
115
+
116
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
isolated/sim_greedy/upstream_sgl/internvl/model/llama/configuration_llama.py ADDED
@@ -0,0 +1,203 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 EleutherAI and the HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX
5
+ # and OPT implementations in this library. It has been modified from its
6
+ # original forms to accommodate minor architectural differences compared
7
+ # to GPT-NeoX and OPT used by the Meta AI team that trained the model.
8
+ #
9
+ # Licensed under the Apache License, Version 2.0 (the "License");
10
+ # you may not use this file except in compliance with the License.
11
+ # You may obtain a copy of the License at
12
+ #
13
+ # http://www.apache.org/licenses/LICENSE-2.0
14
+ #
15
+ # Unless required by applicable law or agreed to in writing, software
16
+ # distributed under the License is distributed on an "AS IS" BASIS,
17
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
18
+ # See the License for the specific language governing permissions and
19
+ # limitations under the License.
20
+ """LLaMA model configuration"""
21
+
22
+ from transformers.configuration_utils import PretrainedConfig
23
+ from transformers.modeling_rope_utils import rope_config_validation
24
+
25
+
26
+ class LlamaConfig(PretrainedConfig):
27
+ r"""
28
+ This is the configuration class to store the configuration of a [`LlamaModel`]. It is used to instantiate an LLaMA
29
+ model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
30
+ defaults will yield a similar configuration to that of the LLaMA-7B.
31
+
32
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
33
+ documentation from [`PretrainedConfig`] for more information.
34
+
35
+
36
+ Args:
37
+ vocab_size (`int`, *optional*, defaults to 32000):
38
+ Vocabulary size of the LLaMA model. Defines the number of different tokens that can be represented by the
39
+ `inputs_ids` passed when calling [`LlamaModel`]
40
+ hidden_size (`int`, *optional*, defaults to 4096):
41
+ Dimension of the hidden representations.
42
+ intermediate_size (`int`, *optional*, defaults to 11008):
43
+ Dimension of the MLP representations.
44
+ num_hidden_layers (`int`, *optional*, defaults to 32):
45
+ Number of hidden layers in the Transformer decoder.
46
+ num_attention_heads (`int`, *optional*, defaults to 32):
47
+ Number of attention heads for each attention layer in the Transformer decoder.
48
+ num_key_value_heads (`int`, *optional*):
49
+ This is the number of key_value heads that should be used to implement Grouped Query Attention. If
50
+ `num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if
51
+ `num_key_value_heads=1` the model will use Multi Query Attention (MQA) otherwise GQA is used. When
52
+ converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed
53
+ by meanpooling all the original heads within that group. For more details checkout [this
54
+ paper](https://arxiv.org/pdf/2305.13245.pdf). If it is not specified, will default to
55
+ `num_attention_heads`.
56
+ hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
57
+ The non-linear activation function (function or string) in the decoder.
58
+ max_position_embeddings (`int`, *optional*, defaults to 2048):
59
+ The maximum sequence length that this model might ever be used with. Llama 1 supports up to 2048 tokens,
60
+ Llama 2 up to 4096, CodeLlama up to 16384.
61
+ initializer_range (`float`, *optional*, defaults to 0.02):
62
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
63
+ rms_norm_eps (`float`, *optional*, defaults to 1e-06):
64
+ The epsilon used by the rms normalization layers.
65
+ use_cache (`bool`, *optional*, defaults to `True`):
66
+ Whether or not the model should return the last key/values attentions (not used by all models). Only
67
+ relevant if `config.is_decoder=True`.
68
+ pad_token_id (`int`, *optional*):
69
+ Padding token id.
70
+ bos_token_id (`int`, *optional*, defaults to 1):
71
+ Beginning of stream token id.
72
+ eos_token_id (`int`, *optional*, defaults to 2):
73
+ End of stream token id.
74
+ pretraining_tp (`int`, *optional*, defaults to 1):
75
+ Experimental feature. Tensor parallelism rank used during pretraining. Please refer to [this
76
+ document](https://huggingface.co/docs/transformers/main/perf_train_gpu_many#tensor-parallelism) to
77
+ understand more about it. This value is necessary to ensure exact reproducibility of the pretraining
78
+ results. Please refer to [this issue](https://github.com/pytorch/pytorch/issues/76232).
79
+ tie_word_embeddings (`bool`, *optional*, defaults to `False`):
80
+ Whether to tie weight embeddings
81
+ rope_theta (`float`, *optional*, defaults to 10000.0):
82
+ The base period of the RoPE embeddings.
83
+ rope_scaling (`Dict`, *optional*):
84
+ Dictionary containing the scaling configuration for the RoPE embeddings. NOTE: if you apply new rope type
85
+ and you expect the model to work on longer `max_position_embeddings`, we recommend you to update this value
86
+ accordingly.
87
+ Expected contents:
88
+ `rope_type` (`str`):
89
+ The sub-variant of RoPE to use. Can be one of ['default', 'linear', 'dynamic', 'yarn', 'longrope',
90
+ 'llama3'], with 'default' being the original RoPE implementation.
91
+ `factor` (`float`, *optional*):
92
+ Used with all rope types except 'default'. The scaling factor to apply to the RoPE embeddings. In
93
+ most scaling types, a `factor` of x will enable the model to handle sequences of length x *
94
+ original maximum pre-trained length.
95
+ `original_max_position_embeddings` (`int`, *optional*):
96
+ Used with 'dynamic', 'longrope' and 'llama3'. The original max position embeddings used during
97
+ pretraining.
98
+ `attention_factor` (`float`, *optional*):
99
+ Used with 'yarn' and 'longrope'. The scaling factor to be applied on the attention
100
+ computation. If unspecified, it defaults to value recommended by the implementation, using the
101
+ `factor` field to infer the suggested value.
102
+ `beta_fast` (`float`, *optional*):
103
+ Only used with 'yarn'. Parameter to set the boundary for extrapolation (only) in the linear
104
+ ramp function. If unspecified, it defaults to 32.
105
+ `beta_slow` (`float`, *optional*):
106
+ Only used with 'yarn'. Parameter to set the boundary for interpolation (only) in the linear
107
+ ramp function. If unspecified, it defaults to 1.
108
+ `short_factor` (`List[float]`, *optional*):
109
+ Only used with 'longrope'. The scaling factor to be applied to short contexts (<
110
+ `original_max_position_embeddings`). Must be a list of numbers with the same length as the hidden
111
+ size divided by the number of attention heads divided by 2
112
+ `long_factor` (`List[float]`, *optional*):
113
+ Only used with 'longrope'. The scaling factor to be applied to long contexts (<
114
+ `original_max_position_embeddings`). Must be a list of numbers with the same length as the hidden
115
+ size divided by the number of attention heads divided by 2
116
+ `low_freq_factor` (`float`, *optional*):
117
+ Only used with 'llama3'. Scaling factor applied to low frequency components of the RoPE
118
+ `high_freq_factor` (`float`, *optional*):
119
+ Only used with 'llama3'. Scaling factor applied to high frequency components of the RoPE
120
+ attention_bias (`bool`, *optional*, defaults to `False`):
121
+ Whether to use a bias in the query, key, value and output projection layers during self-attention.
122
+ attention_dropout (`float`, *optional*, defaults to 0.0):
123
+ The dropout ratio for the attention probabilities.
124
+ mlp_bias (`bool`, *optional*, defaults to `False`):
125
+ Whether to use a bias in up_proj, down_proj and gate_proj layers in the MLP layers.
126
+
127
+ ```python
128
+ >>> from transformers import LlamaModel, LlamaConfig
129
+
130
+ >>> # Initializing a LLaMA llama-7b style configuration
131
+ >>> configuration = LlamaConfig()
132
+
133
+ >>> # Initializing a model from the llama-7b style configuration
134
+ >>> model = LlamaModel(configuration)
135
+
136
+ >>> # Accessing the model configuration
137
+ >>> configuration = model.config
138
+ ```"""
139
+
140
+ model_type = "llama"
141
+ keys_to_ignore_at_inference = ["past_key_values"]
142
+
143
+ def __init__(
144
+ self,
145
+ vocab_size=32000,
146
+ hidden_size=4096,
147
+ intermediate_size=11008,
148
+ num_hidden_layers=32,
149
+ num_attention_heads=32,
150
+ num_key_value_heads=None,
151
+ hidden_act="silu",
152
+ max_position_embeddings=2048,
153
+ initializer_range=0.02,
154
+ rms_norm_eps=1e-6,
155
+ use_cache=True,
156
+ pad_token_id=None,
157
+ bos_token_id=1,
158
+ eos_token_id=2,
159
+ pretraining_tp=1,
160
+ tie_word_embeddings=False,
161
+ rope_theta=10000.0,
162
+ rope_scaling=None,
163
+ attention_bias=False,
164
+ attention_dropout=0.0,
165
+ mlp_bias=False,
166
+ **kwargs,
167
+ ):
168
+ self.vocab_size = vocab_size
169
+ self.max_position_embeddings = max_position_embeddings
170
+ self.hidden_size = hidden_size
171
+ self.intermediate_size = intermediate_size
172
+ self.num_hidden_layers = num_hidden_layers
173
+ self.num_attention_heads = num_attention_heads
174
+
175
+ # for backward compatibility
176
+ if num_key_value_heads is None:
177
+ num_key_value_heads = num_attention_heads
178
+
179
+ self.num_key_value_heads = num_key_value_heads
180
+ self.hidden_act = hidden_act
181
+ self.initializer_range = initializer_range
182
+ self.rms_norm_eps = rms_norm_eps
183
+ self.pretraining_tp = pretraining_tp
184
+ self.use_cache = use_cache
185
+ self.rope_theta = rope_theta
186
+ self.rope_scaling = rope_scaling
187
+ self.attention_bias = attention_bias
188
+ self.attention_dropout = attention_dropout
189
+ self.mlp_bias = mlp_bias
190
+
191
+ # Validate the correctness of rotary position embeddings parameters
192
+ # BC: if there is a 'type' field, move it to 'rope_type'.
193
+ if self.rope_scaling is not None and "type" in self.rope_scaling:
194
+ self.rope_scaling["rope_type"] = self.rope_scaling["type"]
195
+ rope_config_validation(self)
196
+
197
+ super().__init__(
198
+ pad_token_id=pad_token_id,
199
+ bos_token_id=bos_token_id,
200
+ eos_token_id=eos_token_id,
201
+ tie_word_embeddings=tie_word_embeddings,
202
+ **kwargs,
203
+ )
isolated/sim_greedy/upstream_sgl/internvl/model/llama/convert_llama_weights_to_hf.py ADDED
@@ -0,0 +1,479 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2022 EleutherAI and The HuggingFace Inc. team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ import argparse
15
+ import gc
16
+ import json
17
+ import os
18
+ import shutil
19
+ import warnings
20
+ from typing import List
21
+
22
+ import torch
23
+
24
+ from transformers import GenerationConfig, LlamaConfig, LlamaForCausalLM, LlamaTokenizer, PreTrainedTokenizerFast
25
+ from transformers.convert_slow_tokenizer import TikTokenConverter
26
+
27
+
28
+ try:
29
+ from transformers import LlamaTokenizerFast
30
+ except ImportError as e:
31
+ warnings.warn(e)
32
+ warnings.warn(
33
+ "The converted tokenizer will be the `slow` tokenizer. To use the fast, update your `tokenizers` library and re-run the tokenizer conversion"
34
+ )
35
+ LlamaTokenizerFast = None
36
+
37
+ """
38
+ Sample usage:
39
+
40
+ ```
41
+ python src/transformers/models/llama/convert_llama_weights_to_hf.py \
42
+ --input_dir /path/to/downloaded/llama/weights --model_size 7B --output_dir /output/path
43
+ ```
44
+
45
+ Thereafter, models can be loaded via:
46
+
47
+ ```py
48
+ from transformers import LlamaForCausalLM, LlamaTokenizer
49
+
50
+ model = LlamaForCausalLM.from_pretrained("/output/path")
51
+ tokenizer = LlamaTokenizer.from_pretrained("/output/path")
52
+ ```
53
+
54
+ Important note: you need to be able to host the whole model in RAM to execute this script (even if the biggest versions
55
+ come in several checkpoints they each contain a part of each weight of the model, so we need to load them all in RAM).
56
+
57
+ If you want you tokenizer to add a bos automatically you should update the tokenizer._tokenizers.post_processor:
58
+
59
+ ```py
60
+ from tokenizers import processors
61
+ bos = "<|begin_of_text|>"
62
+ tokenizer._tokenizers.post_processor = processors.Sequence(
63
+ [
64
+ processors.ByteLevel(trim_offsets=False),
65
+ processors.TemplateProcessing(
66
+ single=f"{bos}:0 $A:0",
67
+ pair=f"{bos}:0 $A:0 {bos}:1 $B:1",
68
+ special_tokens=[
69
+ (bos, tokenizer.encode(bos)),
70
+ ],
71
+ ),
72
+ ]
73
+ )
74
+ ```
75
+ """
76
+
77
+ NUM_SHARDS = {
78
+ "7B": 1,
79
+ "8B": 1,
80
+ "8Bf": 1,
81
+ "7Bf": 1,
82
+ "13B": 2,
83
+ "13Bf": 2,
84
+ "34B": 4,
85
+ "30B": 4,
86
+ "65B": 8,
87
+ "70B": 8,
88
+ "70Bf": 8,
89
+ "405B": 8,
90
+ "405B-MP16": 16,
91
+ }
92
+
93
+ CONTEXT_LENGTH_FOR_VERSION = {"3.1": 131072, "3": 8192, "2": 4096, "1": 2048}
94
+
95
+
96
+ def compute_intermediate_size(n, ffn_dim_multiplier=1, multiple_of=256):
97
+ return multiple_of * ((int(ffn_dim_multiplier * int(8 * n / 3)) + multiple_of - 1) // multiple_of)
98
+
99
+
100
+ def read_json(path):
101
+ with open(path, "r") as f:
102
+ return json.load(f)
103
+
104
+
105
+ def write_json(text, path):
106
+ with open(path, "w") as f:
107
+ json.dump(text, f)
108
+
109
+
110
+ def write_model(
111
+ model_path,
112
+ input_base_path,
113
+ model_size=None,
114
+ safe_serialization=True,
115
+ llama_version="1",
116
+ vocab_size=None,
117
+ num_shards=None,
118
+ instruct=False,
119
+ ):
120
+ os.makedirs(model_path, exist_ok=True)
121
+ tmp_model_path = os.path.join(model_path, "tmp")
122
+ os.makedirs(tmp_model_path, exist_ok=True)
123
+
124
+ params = read_json(os.path.join(input_base_path, "params.json"))
125
+ num_shards = NUM_SHARDS[model_size] if num_shards is None else num_shards
126
+ params = params.get("model", params)
127
+ n_layers = params["n_layers"]
128
+ n_heads = params["n_heads"]
129
+ n_heads_per_shard = n_heads // num_shards
130
+ dim = params["dim"]
131
+ dims_per_head = dim // n_heads
132
+ base = params.get("rope_theta", 10000.0)
133
+ inv_freq = 1.0 / (base ** (torch.arange(0, dims_per_head, 2).float() / dims_per_head))
134
+ if base > 10000.0 and float(llama_version) < 3:
135
+ max_position_embeddings = 16384
136
+ else:
137
+ max_position_embeddings = CONTEXT_LENGTH_FOR_VERSION[llama_version]
138
+
139
+ if params.get("n_kv_heads", None) is not None:
140
+ num_key_value_heads = params["n_kv_heads"] # for GQA / MQA
141
+ num_key_value_heads_per_shard = num_key_value_heads // num_shards
142
+ key_value_dim = dims_per_head * num_key_value_heads
143
+ else: # compatibility with other checkpoints
144
+ num_key_value_heads = n_heads
145
+ num_key_value_heads_per_shard = n_heads_per_shard
146
+ key_value_dim = dim
147
+
148
+ # permute for sliced rotary
149
+ def permute(w, n_heads, dim1=dim, dim2=dim):
150
+ return w.view(n_heads, dim1 // n_heads // 2, 2, dim2).transpose(1, 2).reshape(dim1, dim2)
151
+
152
+ print(f"Fetching all parameters from the checkpoint at {input_base_path}.")
153
+ # Load weights
154
+ if num_shards == 1:
155
+ # Not sharded
156
+ # (The sharded implementation would also work, but this is simpler.)
157
+ loaded = torch.load(os.path.join(input_base_path, "consolidated.00.pth"), map_location="cpu")
158
+ else:
159
+ # Sharded
160
+ checkpoint_list = sorted([file for file in os.listdir(input_base_path) if file.endswith(".pth")])
161
+ print("Loading in order:", checkpoint_list)
162
+ loaded = [torch.load(os.path.join(input_base_path, file), map_location="cpu") for file in checkpoint_list]
163
+ param_count = 0
164
+ index_dict = {"weight_map": {}}
165
+ for layer_i in range(n_layers):
166
+ filename = f"pytorch_model-{layer_i + 1}-of-{n_layers + 1}.bin"
167
+ if num_shards == 1:
168
+ # Unsharded
169
+ state_dict = {
170
+ f"model.layers.{layer_i}.self_attn.q_proj.weight": permute(
171
+ loaded[f"layers.{layer_i}.attention.wq.weight"], n_heads=n_heads
172
+ ),
173
+ f"model.layers.{layer_i}.self_attn.k_proj.weight": permute(
174
+ loaded[f"layers.{layer_i}.attention.wk.weight"],
175
+ n_heads=num_key_value_heads,
176
+ dim1=key_value_dim,
177
+ ),
178
+ f"model.layers.{layer_i}.self_attn.v_proj.weight": loaded[f"layers.{layer_i}.attention.wv.weight"],
179
+ f"model.layers.{layer_i}.self_attn.o_proj.weight": loaded[f"layers.{layer_i}.attention.wo.weight"],
180
+ f"model.layers.{layer_i}.mlp.gate_proj.weight": loaded[f"layers.{layer_i}.feed_forward.w1.weight"],
181
+ f"model.layers.{layer_i}.mlp.down_proj.weight": loaded[f"layers.{layer_i}.feed_forward.w2.weight"],
182
+ f"model.layers.{layer_i}.mlp.up_proj.weight": loaded[f"layers.{layer_i}.feed_forward.w3.weight"],
183
+ f"model.layers.{layer_i}.input_layernorm.weight": loaded[f"layers.{layer_i}.attention_norm.weight"],
184
+ f"model.layers.{layer_i}.post_attention_layernorm.weight": loaded[f"layers.{layer_i}.ffn_norm.weight"],
185
+ }
186
+ else:
187
+ # Sharded
188
+ # Note that attention.w{q,k,v,o}, feed_fordward.w[1,2,3], attention_norm.weight and ffn_norm.weight share
189
+ # the same storage object, saving attention_norm and ffn_norm will save other weights too, which is
190
+ # redundant as other weights will be stitched from multiple shards. To avoid that, they are cloned.
191
+
192
+ state_dict = {
193
+ f"model.layers.{layer_i}.input_layernorm.weight": loaded[0][
194
+ f"layers.{layer_i}.attention_norm.weight"
195
+ ].clone(),
196
+ f"model.layers.{layer_i}.post_attention_layernorm.weight": loaded[0][
197
+ f"layers.{layer_i}.ffn_norm.weight"
198
+ ].clone(),
199
+ }
200
+ state_dict[f"model.layers.{layer_i}.self_attn.q_proj.weight"] = permute(
201
+ torch.cat(
202
+ [
203
+ loaded[i][f"layers.{layer_i}.attention.wq.weight"].view(n_heads_per_shard, dims_per_head, dim)
204
+ for i in range(len(loaded))
205
+ ],
206
+ dim=0,
207
+ ).reshape(dim, dim),
208
+ n_heads=n_heads,
209
+ )
210
+ state_dict[f"model.layers.{layer_i}.self_attn.k_proj.weight"] = permute(
211
+ torch.cat(
212
+ [
213
+ loaded[i][f"layers.{layer_i}.attention.wk.weight"].view(
214
+ num_key_value_heads_per_shard, dims_per_head, dim
215
+ )
216
+ for i in range(len(loaded))
217
+ ],
218
+ dim=0,
219
+ ).reshape(key_value_dim, dim),
220
+ num_key_value_heads,
221
+ key_value_dim,
222
+ dim,
223
+ )
224
+ state_dict[f"model.layers.{layer_i}.self_attn.v_proj.weight"] = torch.cat(
225
+ [
226
+ loaded[i][f"layers.{layer_i}.attention.wv.weight"].view(
227
+ num_key_value_heads_per_shard, dims_per_head, dim
228
+ )
229
+ for i in range(len(loaded))
230
+ ],
231
+ dim=0,
232
+ ).reshape(key_value_dim, dim)
233
+
234
+ state_dict[f"model.layers.{layer_i}.self_attn.o_proj.weight"] = torch.cat(
235
+ [loaded[i][f"layers.{layer_i}.attention.wo.weight"] for i in range(len(loaded))], dim=1
236
+ )
237
+ state_dict[f"model.layers.{layer_i}.mlp.gate_proj.weight"] = torch.cat(
238
+ [loaded[i][f"layers.{layer_i}.feed_forward.w1.weight"] for i in range(len(loaded))], dim=0
239
+ )
240
+ state_dict[f"model.layers.{layer_i}.mlp.down_proj.weight"] = torch.cat(
241
+ [loaded[i][f"layers.{layer_i}.feed_forward.w2.weight"] for i in range(len(loaded))], dim=1
242
+ )
243
+ state_dict[f"model.layers.{layer_i}.mlp.up_proj.weight"] = torch.cat(
244
+ [loaded[i][f"layers.{layer_i}.feed_forward.w3.weight"] for i in range(len(loaded))], dim=0
245
+ )
246
+
247
+ state_dict[f"model.layers.{layer_i}.self_attn.rotary_emb.inv_freq"] = inv_freq
248
+ for k, v in state_dict.items():
249
+ index_dict["weight_map"][k] = filename
250
+ param_count += v.numel()
251
+ torch.save(state_dict, os.path.join(tmp_model_path, filename))
252
+
253
+ filename = f"pytorch_model-{n_layers + 1}-of-{n_layers + 1}.bin"
254
+ if num_shards == 1:
255
+ # Unsharded
256
+ state_dict = {
257
+ "model.embed_tokens.weight": loaded["tok_embeddings.weight"],
258
+ "model.norm.weight": loaded["norm.weight"],
259
+ "lm_head.weight": loaded["output.weight"],
260
+ }
261
+ else:
262
+ concat_dim = 0 if llama_version in ["3", "3.1"] else 1
263
+ state_dict = {
264
+ "model.norm.weight": loaded[0]["norm.weight"],
265
+ "model.embed_tokens.weight": torch.cat(
266
+ [loaded[i]["tok_embeddings.weight"] for i in range(len(loaded))], dim=concat_dim
267
+ ),
268
+ "lm_head.weight": torch.cat([loaded[i]["output.weight"] for i in range(len(loaded))], dim=0),
269
+ }
270
+
271
+ for k, v in state_dict.items():
272
+ index_dict["weight_map"][k] = filename
273
+ param_count += v.numel()
274
+ torch.save(state_dict, os.path.join(tmp_model_path, filename))
275
+
276
+ # Write configs
277
+ index_dict["metadata"] = {"total_size": param_count * 2}
278
+ write_json(index_dict, os.path.join(tmp_model_path, "pytorch_model.bin.index.json"))
279
+ ffn_dim_multiplier = params["ffn_dim_multiplier"] if "ffn_dim_multiplier" in params else 1
280
+ multiple_of = params["multiple_of"] if "multiple_of" in params else 256
281
+
282
+ if llama_version in ["3", "3.1"]:
283
+ bos_token_id = 128000
284
+
285
+ if instruct:
286
+ eos_token_id = [128001, 128008, 128009]
287
+ else:
288
+ eos_token_id = 128001
289
+ else:
290
+ bos_token_id = 1
291
+ eos_token_id = 2
292
+
293
+ config = LlamaConfig(
294
+ hidden_size=dim,
295
+ intermediate_size=compute_intermediate_size(dim, ffn_dim_multiplier, multiple_of),
296
+ num_attention_heads=params["n_heads"],
297
+ num_hidden_layers=params["n_layers"],
298
+ rms_norm_eps=params["norm_eps"],
299
+ num_key_value_heads=num_key_value_heads,
300
+ vocab_size=vocab_size,
301
+ rope_theta=base,
302
+ max_position_embeddings=max_position_embeddings,
303
+ bos_token_id=bos_token_id,
304
+ eos_token_id=eos_token_id,
305
+ )
306
+ config.save_pretrained(tmp_model_path)
307
+
308
+ if instruct:
309
+ generation_config = GenerationConfig(
310
+ do_sample=True,
311
+ temperature=0.6,
312
+ top_p=0.9,
313
+ bos_token_id=bos_token_id,
314
+ eos_token_id=eos_token_id,
315
+ )
316
+ generation_config.save_pretrained(tmp_model_path)
317
+
318
+ # Make space so we can load the model properly now.
319
+ del state_dict
320
+ del loaded
321
+ gc.collect()
322
+
323
+ print("Loading the checkpoint in a Llama model.")
324
+ model = LlamaForCausalLM.from_pretrained(tmp_model_path, torch_dtype=torch.bfloat16, low_cpu_mem_usage=True)
325
+ # Avoid saving this as part of the config.
326
+ del model.config._name_or_path
327
+ model.config.torch_dtype = torch.float16
328
+ print("Saving in the Transformers format.")
329
+ model.save_pretrained(model_path, safe_serialization=safe_serialization)
330
+ shutil.rmtree(tmp_model_path, ignore_errors=True)
331
+
332
+
333
+ class Llama3Converter(TikTokenConverter):
334
+ def __init__(self, vocab_file, special_tokens=None, instruct=False, model_max_length=None, **kwargs):
335
+ super().__init__(vocab_file, **kwargs)
336
+ tokenizer = self.converted()
337
+ chat_template = (
338
+ "{% set loop_messages = messages %}"
339
+ "{% for message in loop_messages %}"
340
+ "{% set content = '<|start_header_id|>' + message['role'] + '<|end_header_id|>\n\n'+ message['content'] | trim + '<|eot_id|>' %}"
341
+ "{% if loop.index0 == 0 %}"
342
+ "{% set content = bos_token + content %}"
343
+ "{% endif %}"
344
+ "{{ content }}"
345
+ "{% endfor %}"
346
+ "{{ '<|start_header_id|>assistant<|end_header_id|>\n\n' }}"
347
+ )
348
+ tokenizer.add_special_tokens(special_tokens)
349
+
350
+ self.tokenizer = PreTrainedTokenizerFast(
351
+ tokenizer_object=tokenizer,
352
+ bos_token="<|begin_of_text|>",
353
+ eos_token="<|end_of_text|>" if not instruct else "<|eot_id|>",
354
+ chat_template=chat_template if instruct else None,
355
+ model_input_names=["input_ids", "attention_mask"],
356
+ model_max_length=model_max_length,
357
+ )
358
+
359
+
360
+ def write_tokenizer(tokenizer_path, input_tokenizer_path, llama_version="2", special_tokens=None, instruct=False):
361
+ tokenizer_class = LlamaTokenizer if LlamaTokenizerFast is None else LlamaTokenizerFast
362
+ if llama_version in ["3", "3.1"]:
363
+ tokenizer = Llama3Converter(
364
+ input_tokenizer_path, special_tokens, instruct, model_max_length=CONTEXT_LENGTH_FOR_VERSION[llama_version]
365
+ ).tokenizer
366
+ else:
367
+ tokenizer = tokenizer_class(input_tokenizer_path)
368
+ print(f"Saving a {tokenizer_class.__name__} to {tokenizer_path}.")
369
+ tokenizer.save_pretrained(tokenizer_path)
370
+ return tokenizer
371
+
372
+
373
+ DEFAULT_LLAMA_SPECIAL_TOKENS = {
374
+ "3": [
375
+ "<|begin_of_text|>",
376
+ "<|end_of_text|>",
377
+ "<|reserved_special_token_0|>",
378
+ "<|reserved_special_token_1|>",
379
+ "<|reserved_special_token_2|>",
380
+ "<|reserved_special_token_3|>",
381
+ "<|start_header_id|>",
382
+ "<|end_header_id|>",
383
+ "<|reserved_special_token_4|>",
384
+ "<|eot_id|>", # end of turn
385
+ ]
386
+ + [f"<|reserved_special_token_{i}|>" for i in range(5, 256 - 5)],
387
+ "3.1": [
388
+ "<|begin_of_text|>",
389
+ "<|end_of_text|>",
390
+ "<|reserved_special_token_0|>",
391
+ "<|reserved_special_token_1|>",
392
+ "<|finetune_right_pad_id|>",
393
+ "<|reserved_special_token_2|>",
394
+ "<|start_header_id|>",
395
+ "<|end_header_id|>",
396
+ "<|eom_id|>", # end of message
397
+ "<|eot_id|>", # end of turn
398
+ "<|python_tag|>",
399
+ ]
400
+ + [f"<|reserved_special_token_{i}|>" for i in range(3, 256 - 8)],
401
+ }
402
+
403
+
404
+ def main():
405
+ parser = argparse.ArgumentParser()
406
+ parser.add_argument(
407
+ "--input_dir",
408
+ help="Location of LLaMA weights, which contains tokenizer.model and model folders",
409
+ )
410
+ parser.add_argument(
411
+ "--model_size",
412
+ default=None,
413
+ help="'f' Deprecated in favor of `num_shards`: models correspond to the finetuned versions, and are specific to the Llama2 official release. For more details on Llama2, checkout the original repo: https://huggingface.co/meta-llama",
414
+ )
415
+ parser.add_argument(
416
+ "--output_dir",
417
+ help="Location to write HF model and tokenizer",
418
+ )
419
+ parser.add_argument(
420
+ "--safe_serialization", default=True, type=bool, help="Whether or not to save using `safetensors`."
421
+ )
422
+ # Different Llama versions used different default values for max_position_embeddings, hence the need to be able to specify which version is being used.
423
+ parser.add_argument(
424
+ "--llama_version",
425
+ choices=["1", "2", "3", "3.1"],
426
+ default="1",
427
+ type=str,
428
+ help="Version of the Llama model to convert. Currently supports Llama1 and Llama2. Controls the context size",
429
+ )
430
+ parser.add_argument(
431
+ "--num_shards",
432
+ default=None,
433
+ type=int,
434
+ help="The number of individual shards used for the model. Does not have to be the same as the number of consolidated_xx.pth",
435
+ )
436
+ parser.add_argument(
437
+ "--special_tokens",
438
+ default=None,
439
+ type=List[str],
440
+ help="The list of special tokens that should be added to the model.",
441
+ )
442
+ parser.add_argument(
443
+ "--instruct",
444
+ default=False,
445
+ type=bool,
446
+ help="Whether the model is an instruct model or not. Will affect special tokens for llama 3.1.",
447
+ )
448
+ args = parser.parse_args()
449
+ if args.model_size is None and args.num_shards is None:
450
+ raise ValueError("You have to set at least `num_shards` if you are not giving the `model_size`")
451
+ if args.special_tokens is None:
452
+ # no special tokens by default
453
+ args.special_tokens = DEFAULT_LLAMA_SPECIAL_TOKENS.get(str(args.llama_version), [])
454
+
455
+ spm_path = os.path.join(args.input_dir, "tokenizer.model")
456
+ vocab_size = len(
457
+ write_tokenizer(
458
+ args.output_dir,
459
+ spm_path,
460
+ llama_version=args.llama_version,
461
+ special_tokens=args.special_tokens,
462
+ instruct=args.instruct,
463
+ )
464
+ )
465
+ if args.model_size != "tokenizer_only":
466
+ write_model(
467
+ model_path=args.output_dir,
468
+ input_base_path=args.input_dir,
469
+ model_size=args.model_size,
470
+ safe_serialization=args.safe_serialization,
471
+ llama_version=args.llama_version,
472
+ vocab_size=vocab_size,
473
+ num_shards=args.num_shards,
474
+ instruct=args.instruct,
475
+ )
476
+
477
+
478
+ if __name__ == "__main__":
479
+ main()
isolated/sim_greedy/upstream_sgl/internvl/model/llama/modeling_flax_llama.py ADDED
@@ -0,0 +1,750 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2023 Meta AI, EleutherAI and the HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX
5
+ # and OPT implementations in this library. It has been modified from its
6
+ # original forms to accommodate minor architectural differences compared
7
+ # to GPT-NeoX and OPT used by the Meta AI team that trained the model.
8
+ #
9
+ # Licensed under the Apache License, Version 2.0 (the "License");
10
+ # you may not use this file except in compliance with the License.
11
+ # You may obtain a copy of the License at
12
+ #
13
+ # http://www.apache.org/licenses/LICENSE-2.0
14
+ #
15
+ # Unless required by applicable law or agreed to in writing, software
16
+ # distributed under the License is distributed on an "AS IS" BASIS,
17
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
18
+ # See the License for the specific language governing permissions and
19
+ # limitations under the License.
20
+ """Flax LLaMA model."""
21
+
22
+ from functools import partial
23
+ from typing import Optional, Tuple
24
+
25
+ import flax.linen as nn
26
+ import jax
27
+ import jax.numpy as jnp
28
+ import numpy as np
29
+ from flax.core.frozen_dict import FrozenDict, freeze, unfreeze
30
+ from flax.linen import combine_masks, make_causal_mask
31
+ from flax.linen.attention import dot_product_attention_weights
32
+ from flax.traverse_util import flatten_dict, unflatten_dict
33
+ from jax import lax
34
+
35
+ from transformers.modeling_flax_outputs import FlaxBaseModelOutput, FlaxCausalLMOutput
36
+ from transformers.modeling_flax_utils import ACT2FN, FlaxPreTrainedModel, append_call_sample_docstring
37
+ from transformers.utils import add_start_docstrings, add_start_docstrings_to_model_forward, logging
38
+ from transformers.configuration_llama import LlamaConfig
39
+
40
+
41
+ logger = logging.get_logger(__name__)
42
+
43
+ _CONFIG_FOR_DOC = "LlamaConfig"
44
+ _CHECKPOINT_FOR_DOC = "afmck/testing-llama-tiny"
45
+ _REAL_CHECKPOINT_FOR_DOC = "openlm-research/open_llama_3b_v2"
46
+
47
+ LLAMA_START_DOCSTRING = r"""
48
+
49
+ This model inherits from [`FlaxPreTrainedModel`]. Check the superclass documentation for the generic methods the
50
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
51
+ etc.)
52
+
53
+ This model is also a Flax Linen
54
+ [flax.nn.Module](https://flax.readthedocs.io/en/latest/_autosummary/flax.nn.module.html) subclass. Use it as a
55
+ regular Flax Module and refer to the Flax documentation for all matter related to general usage and behavior.
56
+
57
+ Finally, this model supports inherent JAX features such as:
58
+
59
+ - [Just-In-Time (JIT) compilation](https://jax.readthedocs.io/en/latest/jax.html#just-in-time-compilation-jit)
60
+ - [Automatic Differentiation](https://jax.readthedocs.io/en/latest/jax.html#automatic-differentiation)
61
+ - [Vectorization](https://jax.readthedocs.io/en/latest/jax.html#vectorization-vmap)
62
+ - [Parallelization](https://jax.readthedocs.io/en/latest/jax.html#parallelization-pmap)
63
+
64
+ Parameters:
65
+ config ([`LlamaConfig`]): Model configuration class with all the parameters of the model.
66
+ Initializing with a config file does not load the weights associated with the model, only the
67
+ configuration. Check out the [`~FlaxPreTrainedModel.from_pretrained`] method to load the model weights.
68
+ dtype (`jax.numpy.dtype`, *optional*, defaults to `jax.numpy.float32`):
69
+ The data type of the computation. Can be one of `jax.numpy.float32`, `jax.numpy.float16`, or
70
+ `jax.numpy.bfloat16`.
71
+
72
+ This can be used to enable mixed-precision training or half-precision inference on GPUs or TPUs. If
73
+ specified all the computation will be performed with the given `dtype`.
74
+
75
+ **Note that this only specifies the dtype of the computation and does not influence the dtype of model
76
+ parameters.**
77
+
78
+ If you wish to change the dtype of the model parameters, see [`~FlaxPreTrainedModel.to_fp16`] and
79
+ [`~FlaxPreTrainedModel.to_bf16`].
80
+ """
81
+
82
+ LLAMA_INPUTS_DOCSTRING = r"""
83
+ Args:
84
+ input_ids (`numpy.ndarray` of shape `(batch_size, input_ids_length)`):
85
+ Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
86
+ it.
87
+
88
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
89
+ [`PreTrainedTokenizer.__call__`] for details.
90
+
91
+ [What are input IDs?](../glossary#input-ids)
92
+ attention_mask (`numpy.ndarray` of shape `(batch_size, sequence_length)`, *optional*):
93
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
94
+
95
+ - 1 for tokens that are **not masked**,
96
+ - 0 for tokens that are **masked**.
97
+
98
+ [What are attention masks?](../glossary#attention-mask)
99
+
100
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
101
+ [`PreTrainedTokenizer.__call__`] for details.
102
+
103
+ If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see
104
+ `past_key_values`).
105
+
106
+ If you want to change padding behavior, you should read [`modeling_opt._prepare_decoder_attention_mask`]
107
+ and modify to your needs. See diagram 1 in [the paper](https://arxiv.org/abs/1910.13461) for more
108
+ information on the default strategy.
109
+
110
+ - 1 indicates the head is **not masked**,
111
+ - 0 indicates the head is **masked**.
112
+ position_ids (`numpy.ndarray` of shape `(batch_size, sequence_length)`, *optional*):
113
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
114
+ config.n_positions - 1]`.
115
+
116
+ [What are position IDs?](../glossary#position-ids)
117
+ past_key_values (`Dict[str, np.ndarray]`, *optional*, returned by `init_cache` or when passing previous `past_key_values`):
118
+ Dictionary of pre-computed hidden-states (key and values in the attention blocks) that can be used for fast
119
+ auto-regressive decoding. Pre-computed key and value hidden-states are of shape *[batch_size, max_length]*.
120
+ output_attentions (`bool`, *optional*):
121
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
122
+ tensors for more detail.
123
+ output_hidden_states (`bool`, *optional*):
124
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
125
+ more detail.
126
+ return_dict (`bool`, *optional*):
127
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
128
+ """
129
+
130
+
131
+ def create_sinusoidal_positions(num_pos, dim):
132
+ inv_freq = 1.0 / (10000 ** (np.arange(0, dim, 2) / dim))
133
+ freqs = np.einsum("i , j -> i j", np.arange(num_pos), inv_freq).astype("float32")
134
+
135
+ emb = np.concatenate((freqs, freqs), axis=-1)
136
+ out = np.concatenate((np.sin(emb)[:, None, :], np.cos(emb)[:, None, :]), axis=-1)
137
+ return jnp.array(out[:, :, :num_pos])
138
+
139
+
140
+ def rotate_half(tensor):
141
+ """Rotates half the hidden dims of the input."""
142
+ rotate_half_tensor = jnp.concatenate(
143
+ (-tensor[..., tensor.shape[-1] // 2 :], tensor[..., : tensor.shape[-1] // 2]), axis=-1
144
+ )
145
+ return rotate_half_tensor
146
+
147
+
148
+ def apply_rotary_pos_emb(tensor, sin_pos, cos_pos):
149
+ return (tensor * cos_pos) + (rotate_half(tensor) * sin_pos)
150
+
151
+
152
+ class FlaxLlamaRMSNorm(nn.Module):
153
+ config: LlamaConfig
154
+ dtype: jnp.dtype = jnp.float32
155
+
156
+ def setup(self):
157
+ self.epsilon = self.config.rms_norm_eps
158
+ self.weight = self.param("weight", lambda _, shape: jnp.ones(shape), self.config.hidden_size)
159
+
160
+ def __call__(self, hidden_states):
161
+ variance = jnp.asarray(hidden_states, dtype=jnp.float32)
162
+ variance = jnp.power(variance, 2)
163
+ variance = variance.mean(-1, keepdims=True)
164
+ # use `jax.numpy.sqrt` as `jax.lax.rsqrt` does not match `torch.rsqrt`
165
+ hidden_states = hidden_states / jnp.sqrt(variance + self.epsilon)
166
+
167
+ return self.weight * jnp.asarray(hidden_states, dtype=self.dtype)
168
+
169
+
170
+ class FlaxLlamaRotaryEmbedding(nn.Module):
171
+ config: LlamaConfig
172
+ dtype: jnp.dtype = jnp.float32
173
+
174
+ def setup(self):
175
+ head_dim = self.config.hidden_size // self.config.num_attention_heads
176
+ self.sincos = create_sinusoidal_positions(self.config.max_position_embeddings, head_dim)
177
+
178
+ def __call__(self, key, query, position_ids):
179
+ sincos = self.sincos[position_ids]
180
+ sin_pos, cos_pos = jnp.split(sincos, 2, axis=-1)
181
+
182
+ key = apply_rotary_pos_emb(key, sin_pos, cos_pos)
183
+ query = apply_rotary_pos_emb(query, sin_pos, cos_pos)
184
+
185
+ key = jnp.asarray(key, dtype=self.dtype)
186
+ query = jnp.asarray(query, dtype=self.dtype)
187
+
188
+ return key, query
189
+
190
+
191
+ class FlaxLlamaAttention(nn.Module):
192
+ config: LlamaConfig
193
+ dtype: jnp.dtype = jnp.float32
194
+ causal: bool = True
195
+ is_cross_attention: bool = False
196
+
197
+ def setup(self):
198
+ config = self.config
199
+ self.embed_dim = config.hidden_size
200
+ self.num_heads = config.num_attention_heads
201
+ self.head_dim = self.embed_dim // self.num_heads
202
+ self.num_key_value_heads = config.num_key_value_heads
203
+ self.num_key_value_groups = self.num_heads // self.num_key_value_heads
204
+ self.attention_softmax_in_fp32 = self.dtype is not jnp.float32
205
+
206
+ dense = partial(
207
+ nn.Dense,
208
+ use_bias=config.attention_bias,
209
+ dtype=self.dtype,
210
+ kernel_init=jax.nn.initializers.normal(self.config.initializer_range),
211
+ )
212
+
213
+ self.q_proj = dense(self.num_heads * self.head_dim)
214
+ self.k_proj = dense(self.num_key_value_heads * self.head_dim)
215
+ self.v_proj = dense(self.num_key_value_heads * self.head_dim)
216
+ self.o_proj = dense(self.embed_dim)
217
+ if (self.head_dim * self.num_heads) != self.embed_dim:
218
+ raise ValueError(
219
+ f"hidden_size must be divisible by num_heads (got `hidden_size`: {self.embed_dim}"
220
+ f" and `num_heads`: {self.num_heads})."
221
+ )
222
+
223
+ self.causal_mask = make_causal_mask(jnp.ones((1, config.max_position_embeddings), dtype="bool"), dtype="bool")
224
+ self.rotary_emb = FlaxLlamaRotaryEmbedding(config, dtype=self.dtype)
225
+
226
+ def _split_heads(self, hidden_states, num_heads):
227
+ return hidden_states.reshape(hidden_states.shape[:2] + (num_heads, self.head_dim))
228
+
229
+ def _merge_heads(self, hidden_states):
230
+ return hidden_states.reshape(hidden_states.shape[:2] + (self.embed_dim,))
231
+
232
+ @nn.compact
233
+ # Copied from transformers.models.gpt_neo.modeling_flax_gpt_neo.FlaxGPTNeoSelfAttention._concatenate_to_cache
234
+ def _concatenate_to_cache(self, key, value, query, attention_mask):
235
+ """
236
+ This function takes projected key, value states from a single input token and concatenates the states to cached
237
+ states from previous steps. This function is slighly adapted from the official Flax repository:
238
+ https://github.com/google/flax/blob/491ce18759622506588784b4fca0e4bf05f8c8cd/flax/linen/attention.py#L252
239
+ """
240
+ # detect if we're initializing by absence of existing cache data.
241
+ is_initialized = self.has_variable("cache", "cached_key")
242
+ cached_key = self.variable("cache", "cached_key", jnp.zeros, key.shape, key.dtype)
243
+ cached_value = self.variable("cache", "cached_value", jnp.zeros, value.shape, value.dtype)
244
+ cache_index = self.variable("cache", "cache_index", lambda: jnp.array(0, dtype=jnp.int32))
245
+
246
+ if is_initialized:
247
+ *batch_dims, max_length, num_heads, depth_per_head = cached_key.value.shape
248
+ # update key, value caches with our new 1d spatial slices
249
+ cur_index = cache_index.value
250
+ indices = (0,) * len(batch_dims) + (cur_index, 0, 0)
251
+ key = lax.dynamic_update_slice(cached_key.value, key, indices)
252
+ value = lax.dynamic_update_slice(cached_value.value, value, indices)
253
+ cached_key.value = key
254
+ cached_value.value = value
255
+ num_updated_cache_vectors = query.shape[1]
256
+ cache_index.value = cache_index.value + num_updated_cache_vectors
257
+ # causal mask for cached decoder self-attention: our single query position should only attend to those key positions that have already been generated and cached, not the remaining zero elements.
258
+ pad_mask = jnp.broadcast_to(
259
+ jnp.arange(max_length) < cur_index + num_updated_cache_vectors,
260
+ tuple(batch_dims) + (1, num_updated_cache_vectors, max_length),
261
+ )
262
+ attention_mask = combine_masks(pad_mask, attention_mask)
263
+ return key, value, attention_mask
264
+
265
+ def __call__(
266
+ self,
267
+ hidden_states,
268
+ attention_mask,
269
+ position_ids,
270
+ deterministic: bool = True,
271
+ init_cache: bool = False,
272
+ output_attentions: bool = False,
273
+ ):
274
+ query = self.q_proj(hidden_states)
275
+ key = self.k_proj(hidden_states)
276
+ value = self.v_proj(hidden_states)
277
+
278
+ query = self._split_heads(query, self.num_heads)
279
+ key = self._split_heads(key, self.num_key_value_heads)
280
+ value = self._split_heads(value, self.num_key_value_heads)
281
+
282
+ key, query = self.rotary_emb(key, query, position_ids)
283
+
284
+ query_length, key_length = query.shape[1], key.shape[1]
285
+
286
+ if self.has_variable("cache", "cached_key"):
287
+ mask_shift = self.variables["cache"]["cache_index"]
288
+ max_decoder_length = self.variables["cache"]["cached_key"].shape[1]
289
+ causal_mask = lax.dynamic_slice(
290
+ self.causal_mask, (0, 0, mask_shift, 0), (1, 1, query_length, max_decoder_length)
291
+ )
292
+ else:
293
+ causal_mask = self.causal_mask[:, :, :query_length, :key_length]
294
+
295
+ batch_size = hidden_states.shape[0]
296
+ causal_mask = jnp.broadcast_to(causal_mask, (batch_size,) + causal_mask.shape[1:])
297
+
298
+ attention_mask = jnp.broadcast_to(jnp.expand_dims(attention_mask, axis=(-3, -2)), causal_mask.shape)
299
+ attention_mask = combine_masks(attention_mask, causal_mask)
300
+
301
+ dropout_rng = None
302
+ if not deterministic and self.config.attention_dropout > 0.0:
303
+ dropout_rng = self.make_rng("dropout")
304
+
305
+ # During fast autoregressive decoding, we feed one position at a time,
306
+ # and cache the keys and values step by step.
307
+ if self.has_variable("cache", "cached_key") or init_cache:
308
+ key, value, attention_mask = self._concatenate_to_cache(key, value, query, attention_mask)
309
+
310
+ key = jnp.repeat(key, self.num_key_value_groups, axis=2)
311
+ value = jnp.repeat(value, self.num_key_value_groups, axis=2)
312
+
313
+ # transform boolean mask into float mask
314
+ attention_bias = lax.select(
315
+ attention_mask > 0,
316
+ jnp.full(attention_mask.shape, 0.0).astype(self.dtype),
317
+ jnp.full(attention_mask.shape, jnp.finfo(self.dtype).min).astype(self.dtype),
318
+ )
319
+
320
+ # usual dot product attention
321
+ attention_dtype = jnp.float32 if self.attention_softmax_in_fp32 else self.dtype
322
+ attn_weights = dot_product_attention_weights(
323
+ query,
324
+ key,
325
+ bias=attention_bias,
326
+ dropout_rng=dropout_rng,
327
+ dropout_rate=self.config.attention_dropout,
328
+ deterministic=deterministic,
329
+ dtype=attention_dtype,
330
+ )
331
+
332
+ if self.attention_softmax_in_fp32:
333
+ attn_weights = attn_weights.astype(self.dtype)
334
+
335
+ attn_output = jnp.einsum("...hqk,...khd->...qhd", attn_weights, value)
336
+ attn_output = self._merge_heads(attn_output)
337
+ attn_output = self.o_proj(attn_output)
338
+
339
+ outputs = (attn_output, attn_weights) if output_attentions else (attn_output,)
340
+ return outputs
341
+
342
+
343
+ class FlaxLlamaMLP(nn.Module):
344
+ config: LlamaConfig
345
+ dtype: jnp.dtype = jnp.float32
346
+
347
+ def setup(self):
348
+ embed_dim = self.config.hidden_size
349
+ inner_dim = self.config.intermediate_size if self.config.intermediate_size is not None else 4 * embed_dim
350
+
351
+ kernel_init = jax.nn.initializers.normal(self.config.initializer_range)
352
+ self.act = ACT2FN[self.config.hidden_act]
353
+
354
+ self.gate_proj = nn.Dense(inner_dim, use_bias=False, dtype=self.dtype, kernel_init=kernel_init)
355
+ self.down_proj = nn.Dense(embed_dim, use_bias=False, dtype=self.dtype, kernel_init=kernel_init)
356
+ self.up_proj = nn.Dense(inner_dim, use_bias=False, dtype=self.dtype, kernel_init=kernel_init)
357
+
358
+ def __call__(self, hidden_states):
359
+ up_proj_states = self.up_proj(hidden_states)
360
+ gate_states = self.act(self.gate_proj(hidden_states))
361
+
362
+ hidden_states = self.down_proj(up_proj_states * gate_states)
363
+ return hidden_states
364
+
365
+
366
+ class FlaxLlamaDecoderLayer(nn.Module):
367
+ config: LlamaConfig
368
+ dtype: jnp.dtype = jnp.float32
369
+
370
+ def setup(self):
371
+ self.input_layernorm = FlaxLlamaRMSNorm(self.config, dtype=self.dtype)
372
+ self.self_attn = FlaxLlamaAttention(self.config, dtype=self.dtype)
373
+ self.post_attention_layernorm = FlaxLlamaRMSNorm(self.config, dtype=self.dtype)
374
+ self.mlp = FlaxLlamaMLP(self.config, dtype=self.dtype)
375
+
376
+ def __call__(
377
+ self,
378
+ hidden_states,
379
+ attention_mask=None,
380
+ position_ids=None,
381
+ deterministic: bool = True,
382
+ init_cache: bool = False,
383
+ output_attentions: bool = False,
384
+ ):
385
+ residual = hidden_states
386
+ hidden_states = self.input_layernorm(hidden_states)
387
+ outputs = self.self_attn(
388
+ hidden_states,
389
+ attention_mask=attention_mask,
390
+ position_ids=position_ids,
391
+ deterministic=deterministic,
392
+ init_cache=init_cache,
393
+ output_attentions=output_attentions,
394
+ )
395
+ # residual connection
396
+ attn_output = outputs[0]
397
+ hidden_states = residual + attn_output
398
+
399
+ residual = hidden_states
400
+ hidden_states = self.post_attention_layernorm(hidden_states)
401
+ hidden_states = self.mlp(hidden_states)
402
+ # residual connection
403
+ hidden_states = residual + hidden_states
404
+
405
+ return (hidden_states,) + outputs[1:]
406
+
407
+
408
+ # Copied from transformers.models.gpt_neo.modeling_flax_gpt_neo.FlaxGPTNeoPreTrainedModel with GPTNeo->Llama, GPT_NEO->LLAMA, transformer->model
409
+ class FlaxLlamaPreTrainedModel(FlaxPreTrainedModel):
410
+ """
411
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
412
+ models.
413
+ """
414
+
415
+ config_class = LlamaConfig
416
+ base_model_prefix = "model"
417
+ module_class: nn.Module = None
418
+
419
+ def __init__(
420
+ self,
421
+ config: LlamaConfig,
422
+ input_shape: Tuple = (1, 1),
423
+ seed: int = 0,
424
+ dtype: jnp.dtype = jnp.float32,
425
+ _do_init: bool = True,
426
+ **kwargs,
427
+ ):
428
+ module = self.module_class(config=config, dtype=dtype, **kwargs)
429
+ super().__init__(config, module, input_shape=input_shape, seed=seed, dtype=dtype, _do_init=_do_init)
430
+
431
+ def init_weights(self, rng: jax.random.PRNGKey, input_shape: Tuple, params: FrozenDict = None) -> FrozenDict:
432
+ # init input tensors
433
+ input_ids = jnp.zeros(input_shape, dtype="i4")
434
+ attention_mask = jnp.ones_like(input_ids)
435
+ position_ids = jnp.broadcast_to(jnp.arange(jnp.atleast_2d(input_ids).shape[-1]), input_shape)
436
+ params_rng, dropout_rng = jax.random.split(rng)
437
+ rngs = {"params": params_rng, "dropout": dropout_rng}
438
+
439
+ random_params = self.module.init(rngs, input_ids, attention_mask, position_ids, return_dict=False)["params"]
440
+
441
+ if params is not None:
442
+ random_params = flatten_dict(unfreeze(random_params))
443
+ params = flatten_dict(unfreeze(params))
444
+ for missing_key in self._missing_keys:
445
+ params[missing_key] = random_params[missing_key]
446
+ self._missing_keys = set()
447
+ return freeze(unflatten_dict(params))
448
+ else:
449
+ return random_params
450
+
451
+ def init_cache(self, batch_size, max_length):
452
+ r"""
453
+ Args:
454
+ batch_size (`int`):
455
+ batch_size used for fast auto-regressive decoding. Defines the batch size of the initialized cache.
456
+ max_length (`int`):
457
+ maximum possible length for auto-regressive decoding. Defines the sequence length of the initialized
458
+ cache.
459
+ """
460
+ # init input variables to retrieve cache
461
+ input_ids = jnp.ones((batch_size, max_length))
462
+ attention_mask = jnp.ones_like(input_ids)
463
+ position_ids = jnp.broadcast_to(jnp.arange(jnp.atleast_2d(input_ids).shape[-1]), input_ids.shape)
464
+
465
+ init_variables = self.module.init(
466
+ jax.random.PRNGKey(0), input_ids, attention_mask, position_ids, return_dict=False, init_cache=True
467
+ )
468
+ return unfreeze(init_variables["cache"])
469
+
470
+ @add_start_docstrings_to_model_forward(LLAMA_INPUTS_DOCSTRING)
471
+ def __call__(
472
+ self,
473
+ input_ids,
474
+ attention_mask=None,
475
+ position_ids=None,
476
+ params: dict = None,
477
+ past_key_values: dict = None,
478
+ dropout_rng: jax.random.PRNGKey = None,
479
+ train: bool = False,
480
+ output_attentions: Optional[bool] = None,
481
+ output_hidden_states: Optional[bool] = None,
482
+ return_dict: Optional[bool] = None,
483
+ ):
484
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
485
+ output_hidden_states = (
486
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
487
+ )
488
+ return_dict = return_dict if return_dict is not None else self.config.return_dict
489
+
490
+ batch_size, sequence_length = input_ids.shape
491
+
492
+ if position_ids is None:
493
+ if past_key_values is not None:
494
+ raise ValueError("Make sure to provide `position_ids` when passing `past_key_values`.")
495
+
496
+ position_ids = jnp.broadcast_to(jnp.arange(sequence_length)[None, :], (batch_size, sequence_length))
497
+
498
+ if attention_mask is None:
499
+ attention_mask = jnp.ones((batch_size, sequence_length))
500
+
501
+ # Handle any PRNG if needed
502
+ rngs = {}
503
+ if dropout_rng is not None:
504
+ rngs["dropout"] = dropout_rng
505
+
506
+ inputs = {"params": params or self.params}
507
+
508
+ # if past_key_values are passed then cache is already initialized a private flag init_cache has to be passed down to ensure cache is used. It has to be made sure that cache is marked as mutable so that it can be changed by FlaxLlamaAttention module
509
+ if past_key_values:
510
+ inputs["cache"] = past_key_values
511
+ mutable = ["cache"]
512
+ else:
513
+ mutable = False
514
+
515
+ outputs = self.module.apply(
516
+ inputs,
517
+ jnp.array(input_ids, dtype="i4"),
518
+ jnp.array(attention_mask, dtype="i4"),
519
+ jnp.array(position_ids, dtype="i4"),
520
+ not train,
521
+ False,
522
+ output_attentions,
523
+ output_hidden_states,
524
+ return_dict,
525
+ rngs=rngs,
526
+ mutable=mutable,
527
+ )
528
+
529
+ # add updated cache to model output
530
+ if past_key_values is not None and return_dict:
531
+ outputs, past_key_values = outputs
532
+ outputs["past_key_values"] = unfreeze(past_key_values["cache"])
533
+ return outputs
534
+ elif past_key_values is not None and not return_dict:
535
+ outputs, past_key_values = outputs
536
+ outputs = outputs[:1] + (unfreeze(past_key_values["cache"]),) + outputs[1:]
537
+
538
+ return outputs
539
+
540
+
541
+ class FlaxLlamaLayerCollection(nn.Module):
542
+ config: LlamaConfig
543
+ dtype: jnp.dtype = jnp.float32
544
+
545
+ def setup(self):
546
+ self.blocks = [
547
+ FlaxLlamaDecoderLayer(self.config, dtype=self.dtype, name=str(i))
548
+ for i in range(self.config.num_hidden_layers)
549
+ ]
550
+
551
+ def __call__(
552
+ self,
553
+ hidden_states,
554
+ attention_mask=None,
555
+ position_ids=None,
556
+ deterministic: bool = True,
557
+ init_cache: bool = False,
558
+ output_attentions: bool = False,
559
+ output_hidden_states: bool = False,
560
+ return_dict: bool = False,
561
+ ):
562
+ all_attentions = () if output_attentions else None
563
+ all_hidden_states = () if output_hidden_states else None
564
+
565
+ for block in self.blocks:
566
+ if output_hidden_states:
567
+ all_hidden_states += (hidden_states,)
568
+ layer_outputs = block(
569
+ hidden_states,
570
+ attention_mask=attention_mask,
571
+ position_ids=position_ids,
572
+ deterministic=deterministic,
573
+ init_cache=init_cache,
574
+ output_attentions=output_attentions,
575
+ )
576
+ hidden_states = layer_outputs[0]
577
+
578
+ if output_attentions:
579
+ all_attentions += (layer_outputs[1],)
580
+
581
+ # this contains possible `None` values - `FlaxLlamaModule` will filter them out
582
+ outputs = (hidden_states, all_hidden_states, all_attentions)
583
+
584
+ return outputs
585
+
586
+
587
+ class FlaxLlamaModule(nn.Module):
588
+ config: LlamaConfig
589
+ dtype: jnp.dtype = jnp.float32
590
+
591
+ def setup(self):
592
+ self.hidden_size = self.config.hidden_size
593
+ embedding_init = jax.nn.initializers.normal(stddev=self.config.initializer_range)
594
+ self.embed_tokens = nn.Embed(
595
+ self.config.vocab_size,
596
+ self.hidden_size,
597
+ embedding_init=embedding_init,
598
+ dtype=self.dtype,
599
+ )
600
+ self.layers = FlaxLlamaLayerCollection(self.config, dtype=self.dtype)
601
+ self.norm = FlaxLlamaRMSNorm(self.config, dtype=self.dtype)
602
+
603
+ def __call__(
604
+ self,
605
+ input_ids,
606
+ attention_mask=None,
607
+ position_ids=None,
608
+ deterministic=True,
609
+ init_cache: bool = False,
610
+ output_attentions: bool = False,
611
+ output_hidden_states: bool = False,
612
+ return_dict: bool = True,
613
+ ):
614
+ input_embeds = self.embed_tokens(input_ids.astype("i4"))
615
+
616
+ outputs = self.layers(
617
+ input_embeds,
618
+ position_ids=position_ids,
619
+ attention_mask=attention_mask,
620
+ deterministic=deterministic,
621
+ init_cache=init_cache,
622
+ output_attentions=output_attentions,
623
+ output_hidden_states=output_hidden_states,
624
+ return_dict=return_dict,
625
+ )
626
+
627
+ hidden_states = outputs[0]
628
+ hidden_states = self.norm(hidden_states)
629
+
630
+ if output_hidden_states:
631
+ all_hidden_states = outputs[1] + (hidden_states,)
632
+ outputs = (hidden_states, all_hidden_states) + outputs[2:]
633
+ else:
634
+ outputs = (hidden_states,) + outputs[1:]
635
+
636
+ if not return_dict:
637
+ return tuple(v for v in outputs if v is not None)
638
+
639
+ return FlaxBaseModelOutput(
640
+ last_hidden_state=hidden_states,
641
+ hidden_states=outputs[1],
642
+ attentions=outputs[-1],
643
+ )
644
+
645
+
646
+ @add_start_docstrings(
647
+ "The bare Llama Model transformer outputting raw hidden-states without any specific head on top.",
648
+ LLAMA_START_DOCSTRING,
649
+ )
650
+ class FlaxLlamaModel(FlaxLlamaPreTrainedModel):
651
+ module_class = FlaxLlamaModule
652
+
653
+
654
+ append_call_sample_docstring(
655
+ FlaxLlamaModel,
656
+ _CHECKPOINT_FOR_DOC,
657
+ FlaxBaseModelOutput,
658
+ _CONFIG_FOR_DOC,
659
+ real_checkpoint=_REAL_CHECKPOINT_FOR_DOC,
660
+ )
661
+
662
+
663
+ class FlaxLlamaForCausalLMModule(nn.Module):
664
+ config: LlamaConfig
665
+ dtype: jnp.dtype = jnp.float32
666
+
667
+ def setup(self):
668
+ self.model = FlaxLlamaModule(self.config, dtype=self.dtype)
669
+ self.lm_head = nn.Dense(
670
+ self.config.vocab_size,
671
+ use_bias=False,
672
+ dtype=self.dtype,
673
+ kernel_init=jax.nn.initializers.normal(stddev=self.config.initializer_range),
674
+ )
675
+
676
+ def __call__(
677
+ self,
678
+ input_ids,
679
+ attention_mask=None,
680
+ position_ids=None,
681
+ deterministic: bool = True,
682
+ init_cache: bool = False,
683
+ output_attentions: bool = False,
684
+ output_hidden_states: bool = False,
685
+ return_dict: bool = True,
686
+ ):
687
+ outputs = self.model(
688
+ input_ids,
689
+ position_ids=position_ids,
690
+ attention_mask=attention_mask,
691
+ deterministic=deterministic,
692
+ init_cache=init_cache,
693
+ output_attentions=output_attentions,
694
+ output_hidden_states=output_hidden_states,
695
+ return_dict=return_dict,
696
+ )
697
+
698
+ hidden_states = outputs[0]
699
+ lm_logits = self.lm_head(hidden_states)
700
+
701
+ if not return_dict:
702
+ return (lm_logits,) + outputs[1:]
703
+
704
+ return FlaxCausalLMOutput(logits=lm_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions)
705
+
706
+
707
+ @add_start_docstrings(
708
+ """
709
+ The Llama Model transformer with a language modeling head (linear layer) on top.
710
+ """,
711
+ LLAMA_START_DOCSTRING,
712
+ )
713
+ # Copied from transformers.models.gptj.modeling_flax_gptj.FlaxGPTJForCausalLM with GPTJ->Llama
714
+ class FlaxLlamaForCausalLM(FlaxLlamaPreTrainedModel):
715
+ module_class = FlaxLlamaForCausalLMModule
716
+
717
+ def prepare_inputs_for_generation(self, input_ids, max_length, attention_mask: Optional[jax.Array] = None):
718
+ # initializing the cache
719
+ batch_size, seq_length = input_ids.shape
720
+
721
+ past_key_values = self.init_cache(batch_size, max_length)
722
+ # Note that usually one would have to put 0's in the attention_mask for x > input_ids.shape[-1] and x < cache_length.
723
+ # But since Llama uses a causal mask, those positions are masked anyways.
724
+ # Thus we can create a single static attention_mask here, which is more efficient for compilation
725
+ extended_attention_mask = jnp.ones((batch_size, max_length), dtype="i4")
726
+ if attention_mask is not None:
727
+ position_ids = attention_mask.cumsum(axis=-1) - 1
728
+ extended_attention_mask = lax.dynamic_update_slice(extended_attention_mask, attention_mask, (0, 0))
729
+ else:
730
+ position_ids = jnp.broadcast_to(jnp.arange(seq_length, dtype="i4")[None, :], (batch_size, seq_length))
731
+
732
+ return {
733
+ "past_key_values": past_key_values,
734
+ "attention_mask": extended_attention_mask,
735
+ "position_ids": position_ids,
736
+ }
737
+
738
+ def update_inputs_for_generation(self, model_outputs, model_kwargs):
739
+ model_kwargs["past_key_values"] = model_outputs.past_key_values
740
+ model_kwargs["position_ids"] = model_kwargs["position_ids"][:, -1:] + 1
741
+ return model_kwargs
742
+
743
+
744
+ append_call_sample_docstring(
745
+ FlaxLlamaForCausalLM,
746
+ _CHECKPOINT_FOR_DOC,
747
+ FlaxCausalLMOutput,
748
+ _CONFIG_FOR_DOC,
749
+ real_checkpoint=_REAL_CHECKPOINT_FOR_DOC,
750
+ )
isolated/sim_greedy/upstream_sgl/internvl/model/llama/modeling_llama.py ADDED
@@ -0,0 +1,1872 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 EleutherAI and the HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX
5
+ # and OPT implementations in this library. It has been modified from its
6
+ # original forms to accommodate minor architectural differences compared
7
+ # to GPT-NeoX and OPT used by the Meta AI team that trained the model.
8
+ #
9
+ # Licensed under the Apache License, Version 2.0 (the "License");
10
+ # you may not use this file except in compliance with the License.
11
+ # You may obtain a copy of the License at
12
+ #
13
+ # http://www.apache.org/licenses/LICENSE-2.0
14
+ #
15
+ # Unless required by applicable law or agreed to in writing, software
16
+ # distributed under the License is distributed on an "AS IS" BASIS,
17
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
18
+ # See the License for the specific language governing permissions and
19
+ # limitations under the License.
20
+ import math
21
+ from typing import List, Optional, Tuple, Union
22
+
23
+ import torch
24
+ import torch.nn.functional as F
25
+ import torch.utils.checkpoint
26
+ from torch import nn
27
+ from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
28
+
29
+ from transformers.activations import ACT2FN
30
+ from transformers.cache_utils import Cache, DynamicCache, StaticCache
31
+ from transformers.modeling_attn_mask_utils import AttentionMaskConverter
32
+ from transformers.modeling_flash_attention_utils import _flash_attention_forward
33
+ from transformers.modeling_outputs import (
34
+ BaseModelOutputWithPast,
35
+ CausalLMOutputWithPast,
36
+ QuestionAnsweringModelOutput,
37
+ SequenceClassifierOutputWithPast,
38
+ TokenClassifierOutput,
39
+ )
40
+ from transformers.modeling_rope_utils import ROPE_INIT_FUNCTIONS
41
+ from transformers.modeling_utils import PreTrainedModel
42
+ from transformers.pytorch_utils import ALL_LAYERNORM_LAYERS
43
+ from transformers.utils import (
44
+ add_start_docstrings,
45
+ add_start_docstrings_to_model_forward,
46
+ is_flash_attn_greater_or_equal_2_10,
47
+ logging,
48
+ replace_return_docstrings,
49
+ )
50
+ from .configuration_llama import LlamaConfig
51
+ from transformers import LogitsProcessorList, StoppingCriteriaList, GenerationConfig
52
+ from transformers.generation.utils import GenerateNonBeamOutput, GenerateDecoderOnlyOutput
53
+ from ..token_pruning import select_visual_token_indices
54
+
55
+ logger = logging.get_logger(__name__)
56
+
57
+ _CONFIG_FOR_DOC = "LlamaConfig"
58
+
59
+
60
+ def _prepare_4d_causal_attention_mask_with_cache_position(
61
+ attention_mask: torch.Tensor,
62
+ sequence_length: int,
63
+ target_length: int,
64
+ dtype: torch.dtype,
65
+ device: torch.device,
66
+ min_dtype: float,
67
+ cache_position: torch.Tensor,
68
+ batch_size: int,
69
+ ):
70
+ """
71
+ Creates a causal 4D mask of shape `(batch_size, 1, query_length, key_value_length)` from a 2D mask of shape
72
+ `(batch_size, key_value_length)`, or if the input `attention_mask` is already 4D, do nothing.
73
+
74
+ Args:
75
+ attention_mask (`torch.Tensor`):
76
+ A 2D attention mask of shape `(batch_size, key_value_length)` or a 4D attention mask of shape `(batch_size, 1, query_length, key_value_length)`.
77
+ sequence_length (`int`):
78
+ The sequence length being processed.
79
+ target_length (`int`):
80
+ The target length: when generating with static cache, the mask should be as long as the static cache, to account for the 0 padding, the part of the cache that is not filled yet.
81
+ dtype (`torch.dtype`):
82
+ The dtype to use for the 4D attention mask.
83
+ device (`torch.device`):
84
+ The device to plcae the 4D attention mask on.
85
+ min_dtype (`float`):
86
+ The minimum value representable with the dtype `dtype`.
87
+ cache_position (`torch.Tensor`):
88
+ Indices depicting the position of the input sequence tokens in the sequence.
89
+ batch_size (`torch.Tensor`):
90
+ Batch size.
91
+ """
92
+ if attention_mask is not None and attention_mask.dim() == 4:
93
+ # In this case we assume that the mask comes already in inverted form and requires no inversion or slicing.
94
+ causal_mask = attention_mask
95
+ else:
96
+ causal_mask = torch.full((sequence_length, target_length), fill_value=min_dtype, dtype=dtype, device=device)
97
+ if sequence_length != 1:
98
+ causal_mask = torch.triu(causal_mask, diagonal=1)
99
+ causal_mask *= torch.arange(target_length, device=device) > cache_position.reshape(-1, 1)
100
+ causal_mask = causal_mask[None, None, :, :].expand(batch_size, 1, -1, -1)
101
+ if attention_mask is not None:
102
+ causal_mask = causal_mask.clone() # copy to contiguous memory for in-place edit
103
+ mask_length = attention_mask.shape[-1]
104
+ padding_mask = causal_mask[:, :, :, :mask_length] + attention_mask[:, None, None, :]
105
+ padding_mask = padding_mask == 0
106
+ causal_mask[:, :, :, :mask_length] = causal_mask[:, :, :, :mask_length].masked_fill(
107
+ padding_mask, min_dtype
108
+ )
109
+
110
+ return causal_mask
111
+
112
+
113
+ class LlamaRMSNorm(nn.Module):
114
+ def __init__(self, hidden_size, eps=1e-6):
115
+ """
116
+ LlamaRMSNorm is equivalent to T5LayerNorm
117
+ """
118
+ super().__init__()
119
+ self.weight = nn.Parameter(torch.ones(hidden_size))
120
+ self.variance_epsilon = eps
121
+
122
+ def forward(self, hidden_states):
123
+ input_dtype = hidden_states.dtype
124
+ hidden_states = hidden_states.to(torch.float32)
125
+ variance = hidden_states.pow(2).mean(-1, keepdim=True)
126
+ hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
127
+ return self.weight * hidden_states.to(input_dtype)
128
+
129
+ def extra_repr(self):
130
+ return f"{tuple(self.weight.shape)}, eps={self.variance_epsilon}"
131
+
132
+
133
+ ALL_LAYERNORM_LAYERS.append(LlamaRMSNorm)
134
+
135
+
136
+ class LlamaRotaryEmbedding(nn.Module):
137
+ def __init__(
138
+ self,
139
+ dim=None,
140
+ max_position_embeddings=2048,
141
+ base=10000,
142
+ device=None,
143
+ scaling_factor=1.0,
144
+ rope_type="default",
145
+ config: Optional[LlamaConfig] = None,
146
+ ):
147
+ super().__init__()
148
+ # TODO (joao): remove the `if` below, only used for BC
149
+ self.rope_kwargs = {}
150
+ if config is None:
151
+ logger.warning_once(
152
+ "`LlamaRotaryEmbedding` can now be fully parameterized by passing the model config through the "
153
+ "`config` argument. All other arguments will be removed in v4.45"
154
+ )
155
+ self.rope_kwargs = {
156
+ "rope_type": rope_type,
157
+ "factor": scaling_factor,
158
+ "dim": dim,
159
+ "base": base,
160
+ "max_position_embeddings": max_position_embeddings,
161
+ }
162
+ self.rope_type = rope_type
163
+ self.max_seq_len_cached = max_position_embeddings
164
+ self.original_max_seq_len = max_position_embeddings
165
+ else:
166
+ # BC: "rope_type" was originally "type"
167
+ if config.rope_scaling is not None:
168
+ self.rope_type = config.rope_scaling.get("rope_type", config.rope_scaling.get("type"))
169
+ else:
170
+ self.rope_type = "default"
171
+ self.max_seq_len_cached = config.max_position_embeddings
172
+ self.original_max_seq_len = config.max_position_embeddings
173
+
174
+ self.config = config
175
+ self.rope_init_fn = ROPE_INIT_FUNCTIONS[self.rope_type]
176
+
177
+ inv_freq, self.attention_scaling = self.rope_init_fn(self.config, device, **self.rope_kwargs)
178
+ self.register_buffer("inv_freq", inv_freq, persistent=False)
179
+ self.original_inv_freq = self.inv_freq
180
+
181
+ def _dynamic_frequency_update(self, position_ids, device):
182
+ """
183
+ dynamic RoPE layers should recompute `inv_freq` in the following situations:
184
+ 1 - growing beyond the cached sequence length (allow scaling)
185
+ 2 - the current sequence length is in the original scale (avoid losing precision with small sequences)
186
+ """
187
+ seq_len = torch.max(position_ids) + 1
188
+ if seq_len > self.max_seq_len_cached: # growth
189
+ inv_freq, self.attention_scaling = self.rope_init_fn(
190
+ self.config, device, seq_len=seq_len, **self.rope_kwargs
191
+ )
192
+ self.register_buffer("inv_freq", inv_freq, persistent=False) # TODO joao: may break with compilation
193
+ self.max_seq_len_cached = seq_len
194
+
195
+ if seq_len < self.original_max_seq_len and self.max_seq_len_cached > self.original_max_seq_len: # reset
196
+ self.register_buffer("inv_freq", self.original_inv_freq, persistent=False)
197
+ self.max_seq_len_cached = self.original_max_seq_len
198
+
199
+ @torch.no_grad()
200
+ def forward(self, x, position_ids):
201
+ if "dynamic" in self.rope_type:
202
+ self._dynamic_frequency_update(position_ids, device=x.device)
203
+
204
+ # Core RoPE block
205
+ inv_freq_expanded = self.inv_freq[None, :, None].float().expand(position_ids.shape[0], -1, 1)
206
+ position_ids_expanded = position_ids[:, None, :].float()
207
+ # Force float32 (see https://github.com/huggingface/transformers/pull/29285)
208
+ device_type = x.device.type
209
+ device_type = device_type if isinstance(device_type, str) and device_type != "mps" else "cpu"
210
+ with torch.autocast(device_type=device_type, enabled=False):
211
+ freqs = (inv_freq_expanded.float() @ position_ids_expanded.float()).transpose(1, 2)
212
+ emb = torch.cat((freqs, freqs), dim=-1)
213
+ cos = emb.cos()
214
+ sin = emb.sin()
215
+
216
+ # Advanced RoPE types (e.g. yarn) apply a post-processing scaling factor, equivalent to scaling attention
217
+ cos = cos * self.attention_scaling
218
+ sin = sin * self.attention_scaling
219
+
220
+ return cos.to(dtype=x.dtype), sin.to(dtype=x.dtype)
221
+
222
+
223
+ class LlamaLinearScalingRotaryEmbedding(LlamaRotaryEmbedding):
224
+ """LlamaRotaryEmbedding extended with linear scaling. Credits to the Reddit user /u/kaiokendev"""
225
+
226
+ def __init__(self, *args, **kwargs):
227
+ logger.warning_once(
228
+ "`LlamaLinearScalingRotaryEmbedding` is deprecated an will be removed in v4.45. Please use "
229
+ "`LlamaRotaryEmbedding`, which now also does linear scaling (simply pass the model config to __init__)."
230
+ )
231
+ kwargs["rope_type"] = "linear"
232
+ super().__init__(*args, **kwargs)
233
+
234
+
235
+ class LlamaDynamicNTKScalingRotaryEmbedding(LlamaRotaryEmbedding):
236
+ """LlamaRotaryEmbedding extended with Dynamic NTK scaling. Credits to the Reddit users /u/bloc97 and /u/emozilla"""
237
+
238
+ def __init__(self, *args, **kwargs):
239
+ logger.warning_once(
240
+ "`LlamaDynamicNTKScalingRotaryEmbedding` is deprecated an will be removed in v4.45. Please use "
241
+ "`LlamaRotaryEmbedding`, which now also does dynamic ntk scaling (simply pass the model config to "
242
+ "__init__)."
243
+ )
244
+ kwargs["rope_type"] = "dynamic"
245
+ super().__init__(*args, **kwargs)
246
+
247
+
248
+ def rotate_half(x):
249
+ """Rotates half the hidden dims of the input."""
250
+ x1 = x[..., : x.shape[-1] // 2]
251
+ x2 = x[..., x.shape[-1] // 2 :]
252
+ return torch.cat((-x2, x1), dim=-1)
253
+
254
+
255
+ def apply_rotary_pos_emb(q, k, cos, sin, position_ids=None, unsqueeze_dim=1):
256
+ """Applies Rotary Position Embedding to the query and key tensors.
257
+
258
+ Args:
259
+ q (`torch.Tensor`): The query tensor.
260
+ k (`torch.Tensor`): The key tensor.
261
+ cos (`torch.Tensor`): The cosine part of the rotary embedding.
262
+ sin (`torch.Tensor`): The sine part of the rotary embedding.
263
+ position_ids (`torch.Tensor`, *optional*):
264
+ Deprecated and unused.
265
+ unsqueeze_dim (`int`, *optional*, defaults to 1):
266
+ The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and
267
+ sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note
268
+ that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and
269
+ k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes
270
+ cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have
271
+ the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2.
272
+ Returns:
273
+ `tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding.
274
+ """
275
+ cos = cos.unsqueeze(unsqueeze_dim)
276
+ sin = sin.unsqueeze(unsqueeze_dim)
277
+ q_embed = (q * cos) + (rotate_half(q) * sin)
278
+ k_embed = (k * cos) + (rotate_half(k) * sin)
279
+ return q_embed, k_embed
280
+
281
+
282
+ class LlamaMLP(nn.Module):
283
+ def __init__(self, config):
284
+ super().__init__()
285
+ self.config = config
286
+ self.hidden_size = config.hidden_size
287
+ self.intermediate_size = config.intermediate_size
288
+ self.gate_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=config.mlp_bias)
289
+ self.up_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=config.mlp_bias)
290
+ self.down_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=config.mlp_bias)
291
+ self.act_fn = ACT2FN[config.hidden_act]
292
+
293
+ def forward(self, x):
294
+ if self.config.pretraining_tp > 1:
295
+ slice = self.intermediate_size // self.config.pretraining_tp
296
+ gate_proj_slices = self.gate_proj.weight.split(slice, dim=0)
297
+ up_proj_slices = self.up_proj.weight.split(slice, dim=0)
298
+ down_proj_slices = self.down_proj.weight.split(slice, dim=1)
299
+
300
+ gate_proj = torch.cat(
301
+ [F.linear(x, gate_proj_slices[i]) for i in range(self.config.pretraining_tp)], dim=-1
302
+ )
303
+ up_proj = torch.cat([F.linear(x, up_proj_slices[i]) for i in range(self.config.pretraining_tp)], dim=-1)
304
+
305
+ intermediate_states = (self.act_fn(gate_proj) * up_proj).split(slice, dim=2)
306
+ down_proj = [
307
+ F.linear(intermediate_states[i], down_proj_slices[i]) for i in range(self.config.pretraining_tp)
308
+ ]
309
+ down_proj = sum(down_proj)
310
+ else:
311
+ down_proj = self.down_proj(self.act_fn(self.gate_proj(x)) * self.up_proj(x))
312
+
313
+ return down_proj
314
+
315
+
316
+ def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor:
317
+ """
318
+ This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch,
319
+ num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)
320
+ """
321
+ batch, num_key_value_heads, slen, head_dim = hidden_states.shape
322
+ if n_rep == 1:
323
+ return hidden_states
324
+ hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim)
325
+ return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim)
326
+
327
+
328
+ class LlamaAttention(nn.Module):
329
+ """Multi-headed attention from 'Attention Is All You Need' paper"""
330
+
331
+ def __init__(self, config: LlamaConfig, layer_idx: Optional[int] = None):
332
+ super().__init__()
333
+ self.config = config
334
+ self.layer_idx = layer_idx
335
+ if layer_idx is None:
336
+ logger.warning_once(
337
+ f"Instantiating {self.__class__.__name__} without passing a `layer_idx` is not recommended and will "
338
+ "lead to errors during the forward call if caching is used. Please make sure to provide a `layer_idx` "
339
+ "when creating this class."
340
+ )
341
+
342
+ self.attention_dropout = config.attention_dropout
343
+ self.hidden_size = config.hidden_size
344
+ self.num_heads = config.num_attention_heads
345
+ self.head_dim = self.hidden_size // self.num_heads
346
+ self.num_key_value_heads = config.num_key_value_heads
347
+ self.num_key_value_groups = self.num_heads // self.num_key_value_heads
348
+ self.max_position_embeddings = config.max_position_embeddings
349
+ self.rope_theta = config.rope_theta
350
+ self.is_causal = True
351
+
352
+ if (self.head_dim * self.num_heads) != self.hidden_size:
353
+ raise ValueError(
354
+ f"hidden_size must be divisible by num_heads (got `hidden_size`: {self.hidden_size}"
355
+ f" and `num_heads`: {self.num_heads})."
356
+ )
357
+
358
+ self.q_proj = nn.Linear(self.hidden_size, self.num_heads * self.head_dim, bias=config.attention_bias)
359
+ self.k_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=config.attention_bias)
360
+ self.v_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=config.attention_bias)
361
+ self.o_proj = nn.Linear(self.hidden_size, self.hidden_size, bias=config.attention_bias)
362
+
363
+ # TODO (joao): remove in v4.45 (RoPE is computed in the model, not in the decoder layers)
364
+ self.rotary_emb = LlamaRotaryEmbedding(config=self.config)
365
+
366
+ def forward(
367
+ self,
368
+ hidden_states: torch.Tensor,
369
+ attention_mask: Optional[torch.Tensor] = None,
370
+ position_ids: Optional[torch.LongTensor] = None,
371
+ past_key_value: Optional[Cache] = None,
372
+ output_attentions: bool = False,
373
+ use_cache: bool = False,
374
+ cache_position: Optional[torch.LongTensor] = None,
375
+ position_embeddings: Optional[Tuple[torch.Tensor, torch.Tensor]] = None, # will become mandatory in v4.45
376
+ **kwargs,
377
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
378
+ bsz, q_len, _ = hidden_states.size()
379
+
380
+ if self.config.pretraining_tp > 1:
381
+ key_value_slicing = (self.num_key_value_heads * self.head_dim) // self.config.pretraining_tp
382
+ query_slices = self.q_proj.weight.split(
383
+ (self.num_heads * self.head_dim) // self.config.pretraining_tp, dim=0
384
+ )
385
+ key_slices = self.k_proj.weight.split(key_value_slicing, dim=0)
386
+ value_slices = self.v_proj.weight.split(key_value_slicing, dim=0)
387
+
388
+ query_states = [F.linear(hidden_states, query_slices[i]) for i in range(self.config.pretraining_tp)]
389
+ query_states = torch.cat(query_states, dim=-1)
390
+
391
+ key_states = [F.linear(hidden_states, key_slices[i]) for i in range(self.config.pretraining_tp)]
392
+ key_states = torch.cat(key_states, dim=-1)
393
+
394
+ value_states = [F.linear(hidden_states, value_slices[i]) for i in range(self.config.pretraining_tp)]
395
+ value_states = torch.cat(value_states, dim=-1)
396
+
397
+ else:
398
+ query_states = self.q_proj(hidden_states)
399
+ key_states = self.k_proj(hidden_states)
400
+ value_states = self.v_proj(hidden_states)
401
+
402
+ query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
403
+ key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
404
+ value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
405
+
406
+ if position_embeddings is None:
407
+ logger.warning_once(
408
+ "The attention layers in this model are transitioning from computing the RoPE embeddings internally "
409
+ "through `position_ids` (2D tensor with the indexes of the tokens), to using externally computed "
410
+ "`position_embeddings` (Tuple of tensors, containing cos and sin). In v4.45 `position_ids` will be "
411
+ "removed and `position_embeddings` will be mandatory."
412
+ )
413
+ cos, sin = self.rotary_emb(value_states, position_ids)
414
+ else:
415
+ cos, sin = position_embeddings
416
+ query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin)
417
+
418
+ if past_key_value is not None:
419
+ # sin and cos are specific to RoPE models; cache_position needed for the static cache
420
+ cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position}
421
+ key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs)
422
+
423
+ key_states = repeat_kv(key_states, self.num_key_value_groups)
424
+ value_states = repeat_kv(value_states, self.num_key_value_groups)
425
+
426
+ attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) / math.sqrt(self.head_dim)
427
+
428
+ if attention_mask is not None: # no matter the length, we just slice it
429
+ causal_mask = attention_mask[:, :, :, : key_states.shape[-2]]
430
+ attn_weights = attn_weights + causal_mask
431
+
432
+ # upcast attention to fp32
433
+ attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query_states.dtype)
434
+ attn_weights = nn.functional.dropout(attn_weights, p=self.attention_dropout, training=self.training)
435
+ attn_output = torch.matmul(attn_weights, value_states)
436
+
437
+ if attn_output.size() != (bsz, self.num_heads, q_len, self.head_dim):
438
+ raise ValueError(
439
+ f"`attn_output` should be of size {(bsz, self.num_heads, q_len, self.head_dim)}, but is"
440
+ f" {attn_output.size()}"
441
+ )
442
+
443
+ attn_output = attn_output.transpose(1, 2).contiguous()
444
+
445
+ attn_output = attn_output.reshape(bsz, q_len, -1)
446
+
447
+ if self.config.pretraining_tp > 1:
448
+ attn_output = attn_output.split(self.hidden_size // self.config.pretraining_tp, dim=2)
449
+ o_proj_slices = self.o_proj.weight.split(self.hidden_size // self.config.pretraining_tp, dim=1)
450
+ attn_output = sum([F.linear(attn_output[i], o_proj_slices[i]) for i in range(self.config.pretraining_tp)])
451
+ else:
452
+ attn_output = self.o_proj(attn_output)
453
+
454
+ if not output_attentions:
455
+ attn_weights = None
456
+
457
+ return attn_output, attn_weights, past_key_value
458
+
459
+
460
+ class LlamaFlashAttention2(LlamaAttention):
461
+ """
462
+ Llama flash attention module. This module inherits from `LlamaAttention` as the weights of the module stays
463
+ untouched. The only required change would be on the forward pass where it needs to correctly call the public API of
464
+ flash attention and deal with padding tokens in case the input contains any of them.
465
+ """
466
+
467
+ def __init__(self, *args, **kwargs):
468
+ super().__init__(*args, **kwargs)
469
+
470
+ # TODO: Should be removed once Flash Attention for RoCm is bumped to 2.1.
471
+ # flash_attn<2.1 generates top-left aligned causal mask, while what is needed here is bottom-right alignement, that was made default for flash_attn>=2.1. This attribute is used to handle this difference. Reference: https://github.com/Dao-AILab/flash-attention/releases/tag/v2.1.0.
472
+ # Beware that with flash_attn<2.1, using q_seqlen != k_seqlen (except for the case q_seqlen == 1) produces a wrong mask (top-left).
473
+ self._flash_attn_uses_top_left_mask = not is_flash_attn_greater_or_equal_2_10()
474
+
475
+ def forward(
476
+ self,
477
+ hidden_states: torch.Tensor,
478
+ attention_mask: Optional[torch.LongTensor] = None,
479
+ position_ids: Optional[torch.LongTensor] = None,
480
+ past_key_value: Optional[Cache] = None,
481
+ output_attentions: bool = False,
482
+ use_cache: bool = False,
483
+ cache_position: Optional[torch.LongTensor] = None,
484
+ position_embeddings: Optional[Tuple[torch.Tensor, torch.Tensor]] = None, # will become mandatory in v4.45
485
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
486
+ if isinstance(past_key_value, StaticCache):
487
+ raise ValueError(
488
+ "`static` cache implementation is not compatible with `attn_implementation==flash_attention_2` "
489
+ "make sure to use `sdpa` in the mean time, and open an issue at https://github.com/huggingface/transformers"
490
+ )
491
+
492
+ output_attentions = False
493
+
494
+ bsz, q_len, _ = hidden_states.size()
495
+
496
+ query_states = self.q_proj(hidden_states)
497
+ key_states = self.k_proj(hidden_states)
498
+ value_states = self.v_proj(hidden_states)
499
+
500
+ # Flash attention requires the input to have the shape
501
+ # batch_size x seq_length x head_dim x hidden_dim
502
+ # therefore we just need to keep the original shape
503
+ query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
504
+ key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
505
+ value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
506
+
507
+ if position_embeddings is None:
508
+ logger.warning_once(
509
+ "The attention layers in this model are transitioning from computing the RoPE embeddings internally "
510
+ "through `position_ids` (2D tensor with the indexes of the tokens), to using externally computed "
511
+ "`position_embeddings` (Tuple of tensors, containing cos and sin). In v4.45 `position_ids` will be "
512
+ "removed and `position_embeddings` will be mandatory."
513
+ )
514
+ cos, sin = self.rotary_emb(value_states, position_ids)
515
+ else:
516
+ cos, sin = position_embeddings
517
+ query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin)
518
+
519
+ if past_key_value is not None:
520
+ # sin and cos are specific to RoPE models; cache_position needed for the static cache
521
+ cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position}
522
+ key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs)
523
+
524
+ # TODO: These transpose are quite inefficient but Flash Attention requires the layout [batch_size, sequence_length, num_heads, head_dim]. We would need to refactor the KV cache
525
+ # to be able to avoid many of these transpose/reshape/view.
526
+ query_states = query_states.transpose(1, 2)
527
+ key_states = key_states.transpose(1, 2)
528
+ value_states = value_states.transpose(1, 2)
529
+
530
+ dropout_rate = self.attention_dropout if self.training else 0.0
531
+
532
+ # In PEFT, usually we cast the layer norms in float32 for training stability reasons
533
+ # therefore the input hidden states gets silently casted in float32. Hence, we need
534
+ # cast them back in the correct dtype just to be sure everything works as expected.
535
+ # This might slowdown training & inference so it is recommended to not cast the LayerNorms
536
+ # in fp32. (LlamaRMSNorm handles it correctly)
537
+
538
+ input_dtype = query_states.dtype
539
+ if input_dtype == torch.float32:
540
+ if torch.is_autocast_enabled():
541
+ target_dtype = torch.get_autocast_gpu_dtype()
542
+ # Handle the case where the model is quantized
543
+ elif hasattr(self.config, "_pre_quantization_dtype"):
544
+ target_dtype = self.config._pre_quantization_dtype
545
+ else:
546
+ target_dtype = self.q_proj.weight.dtype
547
+
548
+ logger.warning_once(
549
+ f"The input hidden states seems to be silently casted in float32, this might be related to"
550
+ f" the fact you have upcasted embedding or layer norm layers in float32. We will cast back the input in"
551
+ f" {target_dtype}."
552
+ )
553
+
554
+ query_states = query_states.to(target_dtype)
555
+ key_states = key_states.to(target_dtype)
556
+ value_states = value_states.to(target_dtype)
557
+
558
+ attn_output = _flash_attention_forward(
559
+ query_states,
560
+ key_states,
561
+ value_states,
562
+ attention_mask,
563
+ q_len,
564
+ position_ids=position_ids,
565
+ dropout=dropout_rate,
566
+ sliding_window=getattr(self, "sliding_window", None),
567
+ use_top_left_mask=self._flash_attn_uses_top_left_mask,
568
+ is_causal=self.is_causal,
569
+ )
570
+
571
+ attn_output = attn_output.reshape(bsz, q_len, -1).contiguous()
572
+ attn_output = self.o_proj(attn_output)
573
+
574
+ if not output_attentions:
575
+ attn_weights = None
576
+
577
+ return attn_output, attn_weights, past_key_value
578
+
579
+
580
+ class LlamaSdpaAttention(LlamaAttention):
581
+ """
582
+ Llama attention module using torch.nn.functional.scaled_dot_product_attention. This module inherits from
583
+ `LlamaAttention` as the weights of the module stays untouched. The only changes are on the forward pass to adapt to
584
+ SDPA API.
585
+ """
586
+
587
+ # Adapted from LlamaAttention.forward
588
+ def forward(
589
+ self,
590
+ hidden_states: torch.Tensor,
591
+ attention_mask: Optional[torch.Tensor] = None,
592
+ position_ids: Optional[torch.LongTensor] = None,
593
+ past_key_value: Optional[Cache] = None,
594
+ output_attentions: bool = False,
595
+ use_cache: bool = False,
596
+ cache_position: Optional[torch.LongTensor] = None,
597
+ position_embeddings: Optional[Tuple[torch.Tensor, torch.Tensor]] = None, # will become mandatory in v4.45
598
+ **kwargs,
599
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
600
+ if output_attentions:
601
+ # TODO: Improve this warning with e.g. `model.config.attn_implementation = "manual"` once this is implemented.
602
+ logger.warning_once(
603
+ "LlamaModel is using LlamaSdpaAttention, but `torch.nn.functional.scaled_dot_product_attention` does not support `output_attentions=True`. Falling back to the manual attention implementation, "
604
+ 'but specifying the manual implementation will be required from Transformers version v5.0.0 onwards. This warning can be removed using the argument `attn_implementation="eager"` when loading the model.'
605
+ )
606
+ return super().forward(
607
+ hidden_states=hidden_states,
608
+ attention_mask=attention_mask,
609
+ position_ids=position_ids,
610
+ past_key_value=past_key_value,
611
+ output_attentions=output_attentions,
612
+ use_cache=use_cache,
613
+ cache_position=cache_position,
614
+ position_embeddings=position_embeddings,
615
+ )
616
+
617
+ bsz, q_len, _ = hidden_states.size()
618
+
619
+ query_states = self.q_proj(hidden_states)
620
+ key_states = self.k_proj(hidden_states)
621
+ value_states = self.v_proj(hidden_states)
622
+
623
+ query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
624
+ key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
625
+ value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
626
+
627
+ if position_embeddings is None:
628
+ logger.warning_once(
629
+ "The attention layers in this model are transitioning from computing the RoPE embeddings internally "
630
+ "through `position_ids` (2D tensor with the indexes of the tokens), to using externally computed "
631
+ "`position_embeddings` (Tuple of tensors, containing cos and sin). In v4.45 `position_ids` will be "
632
+ "removed and `position_embeddings` will be mandatory."
633
+ )
634
+ cos, sin = self.rotary_emb(value_states, position_ids)
635
+ else:
636
+ cos, sin = position_embeddings
637
+ query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin)
638
+
639
+ if past_key_value is not None:
640
+ # sin and cos are specific to RoPE models; cache_position needed for the static cache
641
+ cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position}
642
+ key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs)
643
+
644
+ key_states = repeat_kv(key_states, self.num_key_value_groups)
645
+ value_states = repeat_kv(value_states, self.num_key_value_groups)
646
+
647
+ causal_mask = attention_mask
648
+ if attention_mask is not None:
649
+ causal_mask = causal_mask[:, :, :, : key_states.shape[-2]]
650
+
651
+ # SDPA with memory-efficient backend is currently (torch==2.1.2) bugged with non-contiguous inputs with custom attn_mask,
652
+ # Reference: https://github.com/pytorch/pytorch/issues/112577.
653
+ if query_states.device.type == "cuda" and causal_mask is not None:
654
+ query_states = query_states.contiguous()
655
+ key_states = key_states.contiguous()
656
+ value_states = value_states.contiguous()
657
+
658
+ # We dispatch to SDPA's Flash Attention or Efficient kernels via this `is_causal` if statement instead of an inline conditional assignment
659
+ # in SDPA to support both torch.compile's dynamic shapes and full graph options. An inline conditional prevents dynamic shapes from compiling.
660
+ is_causal = True if causal_mask is None and q_len > 1 else False
661
+
662
+ attn_output = torch.nn.functional.scaled_dot_product_attention(
663
+ query_states,
664
+ key_states,
665
+ value_states,
666
+ attn_mask=causal_mask,
667
+ dropout_p=self.attention_dropout if self.training else 0.0,
668
+ is_causal=is_causal,
669
+ )
670
+
671
+ attn_output = attn_output.transpose(1, 2).contiguous()
672
+ attn_output = attn_output.view(bsz, q_len, -1)
673
+
674
+ attn_output = self.o_proj(attn_output)
675
+
676
+ return attn_output, None, past_key_value
677
+
678
+
679
+ LLAMA_ATTENTION_CLASSES = {
680
+ "eager": LlamaAttention,
681
+ "flash_attention_2": LlamaFlashAttention2,
682
+ "sdpa": LlamaSdpaAttention,
683
+ }
684
+
685
+
686
+ class LlamaDecoderLayer(nn.Module):
687
+ def __init__(self, config: LlamaConfig, layer_idx: int):
688
+ super().__init__()
689
+ self.hidden_size = config.hidden_size
690
+
691
+ self.self_attn = LLAMA_ATTENTION_CLASSES[config._attn_implementation](config=config, layer_idx=layer_idx)
692
+
693
+ self.mlp = LlamaMLP(config)
694
+ self.input_layernorm = LlamaRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
695
+ self.post_attention_layernorm = LlamaRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
696
+
697
+ def forward(
698
+ self,
699
+ hidden_states: torch.Tensor,
700
+ attention_mask: Optional[torch.Tensor] = None,
701
+ position_ids: Optional[torch.LongTensor] = None,
702
+ past_key_value: Optional[Cache] = None,
703
+ output_attentions: Optional[bool] = False,
704
+ use_cache: Optional[bool] = False,
705
+ cache_position: Optional[torch.LongTensor] = None,
706
+ position_embeddings: Optional[Tuple[torch.Tensor, torch.Tensor]] = None, # will become mandatory in v4.45
707
+ **kwargs,
708
+ ) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]:
709
+ """
710
+ Args:
711
+ hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
712
+ attention_mask (`torch.FloatTensor`, *optional*):
713
+ attention mask of size `(batch_size, sequence_length)` if flash attention is used or `(batch_size, 1,
714
+ query_sequence_length, key_sequence_length)` if default attention is used.
715
+ output_attentions (`bool`, *optional*):
716
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
717
+ returned tensors for more detail.
718
+ use_cache (`bool`, *optional*):
719
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding
720
+ (see `past_key_values`).
721
+ past_key_value (`Tuple(torch.FloatTensor)`, *optional*): cached past key and value projection states
722
+ cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*):
723
+ Indices depicting the position of the input sequence tokens in the sequence
724
+ position_embeddings (`Tuple[torch.FloatTensor, torch.FloatTensor]`, *optional*):
725
+ Tuple containing the cosine and sine positional embeddings of shape `(batch_size, seq_len, head_dim)`,
726
+ with `head_dim` being the embedding dimension of each attention head.
727
+ kwargs (`dict`, *optional*):
728
+ Arbitrary kwargs to be ignored, used for FSDP and other methods that injects code
729
+ into the model
730
+ """
731
+ residual = hidden_states
732
+
733
+ hidden_states = self.input_layernorm(hidden_states)
734
+
735
+ # Self Attention
736
+ hidden_states, self_attn_weights, present_key_value = self.self_attn(
737
+ hidden_states=hidden_states,
738
+ attention_mask=attention_mask,
739
+ position_ids=position_ids,
740
+ past_key_value=past_key_value,
741
+ output_attentions=output_attentions,
742
+ use_cache=use_cache,
743
+ cache_position=cache_position,
744
+ position_embeddings=position_embeddings,
745
+ **kwargs,
746
+ )
747
+ hidden_states = residual + hidden_states
748
+
749
+ # Fully Connected
750
+ residual = hidden_states
751
+ hidden_states = self.post_attention_layernorm(hidden_states)
752
+ hidden_states = self.mlp(hidden_states)
753
+ hidden_states = residual + hidden_states
754
+
755
+ outputs = (hidden_states,)
756
+
757
+ if output_attentions:
758
+ outputs += (self_attn_weights,)
759
+
760
+ if use_cache:
761
+ outputs += (present_key_value,)
762
+
763
+ return outputs
764
+
765
+
766
+ LLAMA_START_DOCSTRING = r"""
767
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
768
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
769
+ etc.)
770
+
771
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
772
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
773
+ and behavior.
774
+
775
+ Parameters:
776
+ config ([`LlamaConfig`]):
777
+ Model configuration class with all the parameters of the model. Initializing with a config file does not
778
+ load the weights associated with the model, only the configuration. Check out the
779
+ [`~PreTrainedModel.from_pretrained`] method to load the model weights.
780
+ """
781
+
782
+
783
+ @add_start_docstrings(
784
+ "The bare LLaMA Model outputting raw hidden-states without any specific head on top.",
785
+ LLAMA_START_DOCSTRING,
786
+ )
787
+ class LlamaPreTrainedModel(PreTrainedModel):
788
+ config_class = LlamaConfig
789
+ base_model_prefix = "model"
790
+ supports_gradient_checkpointing = True
791
+ _no_split_modules = ["LlamaDecoderLayer"]
792
+ _skip_keys_device_placement = ["past_key_values"]
793
+ _supports_flash_attn_2 = True
794
+ _supports_sdpa = True
795
+ _supports_cache_class = True
796
+ _supports_quantized_cache = True
797
+ _supports_static_cache = True
798
+
799
+ def _init_weights(self, module):
800
+ std = self.config.initializer_range
801
+ if isinstance(module, nn.Linear):
802
+ module.weight.data.normal_(mean=0.0, std=std)
803
+ if module.bias is not None:
804
+ module.bias.data.zero_()
805
+ elif isinstance(module, nn.Embedding):
806
+ module.weight.data.normal_(mean=0.0, std=std)
807
+ if module.padding_idx is not None:
808
+ module.weight.data[module.padding_idx].zero_()
809
+
810
+
811
+ LLAMA_INPUTS_DOCSTRING = r"""
812
+ Args:
813
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
814
+ Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
815
+ it.
816
+
817
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
818
+ [`PreTrainedTokenizer.__call__`] for details.
819
+
820
+ [What are input IDs?](../glossary#input-ids)
821
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
822
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
823
+
824
+ - 1 for tokens that are **not masked**,
825
+ - 0 for tokens that are **masked**.
826
+
827
+ [What are attention masks?](../glossary#attention-mask)
828
+
829
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
830
+ [`PreTrainedTokenizer.__call__`] for details.
831
+
832
+ If `past_key_values` is used, optionally only the last `input_ids` have to be input (see
833
+ `past_key_values`).
834
+
835
+ If you want to change padding behavior, you should read [`modeling_opt._prepare_decoder_attention_mask`]
836
+ and modify to your needs. See diagram 1 in [the paper](https://arxiv.org/abs/1910.13461) for more
837
+ information on the default strategy.
838
+
839
+ - 1 indicates the head is **not masked**,
840
+ - 0 indicates the head is **masked**.
841
+ position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
842
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
843
+ config.n_positions - 1]`.
844
+
845
+ [What are position IDs?](../glossary#position-ids)
846
+ past_key_values (`Cache` or `tuple(tuple(torch.FloatTensor))`, *optional*):
847
+ Pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention
848
+ blocks) that can be used to speed up sequential decoding. This typically consists in the `past_key_values`
849
+ returned by the model at a previous stage of decoding, when `use_cache=True` or `config.use_cache=True`.
850
+
851
+ Two formats are allowed:
852
+ - a [`~cache_utils.Cache`] instance;
853
+ - Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of
854
+ shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`). This is also known as the legacy
855
+ cache format.
856
+
857
+ The model will output the same cache format that is fed as input. If no `past_key_values` are passed, the
858
+ legacy cache format will be returned.
859
+
860
+ If `past_key_values` are used, the user can optionally input only the last `input_ids` (those that don't
861
+ have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `input_ids`
862
+ of shape `(batch_size, sequence_length)`.
863
+ inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
864
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
865
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
866
+ model's internal embedding lookup matrix.
867
+ use_cache (`bool`, *optional*):
868
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
869
+ `past_key_values`).
870
+ output_attentions (`bool`, *optional*):
871
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
872
+ tensors for more detail.
873
+ output_hidden_states (`bool`, *optional*):
874
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
875
+ more detail.
876
+ return_dict (`bool`, *optional*):
877
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
878
+ cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*):
879
+ Indices depicting the position of the input sequence tokens in the sequence. Contrarily to `position_ids`,
880
+ this tensor is not affected by padding. It is used to update the cache in the correct position and to infer
881
+ the complete sequence length.
882
+ """
883
+
884
+
885
+ @add_start_docstrings(
886
+ "The bare LLaMA Model outputting raw hidden-states without any specific head on top.",
887
+ LLAMA_START_DOCSTRING,
888
+ )
889
+ class LlamaModel(LlamaPreTrainedModel):
890
+ """
891
+ Transformer decoder consisting of *config.num_hidden_layers* layers. Each layer is a [`LlamaDecoderLayer`]
892
+
893
+ Args:
894
+ config: LlamaConfig
895
+ """
896
+
897
+ def __init__(self, config: LlamaConfig):
898
+ super().__init__(config)
899
+ self.padding_idx = config.pad_token_id
900
+ self.vocab_size = config.vocab_size
901
+
902
+ self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx)
903
+ self.layers = nn.ModuleList(
904
+ [LlamaDecoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)]
905
+ )
906
+ self.norm = LlamaRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
907
+ self.rotary_emb = LlamaRotaryEmbedding(config=config)
908
+ self.gradient_checkpointing = False
909
+
910
+ # Initialize weights and apply final processing
911
+ self.post_init()
912
+
913
+ def get_input_embeddings(self):
914
+ return self.embed_tokens
915
+
916
+ def set_input_embeddings(self, value):
917
+ self.embed_tokens = value
918
+
919
+ @add_start_docstrings_to_model_forward(LLAMA_INPUTS_DOCSTRING)
920
+ def forward(
921
+ self,
922
+ input_ids: torch.LongTensor = None,
923
+ attention_mask: Optional[torch.Tensor] = None,
924
+ position_ids: Optional[torch.LongTensor] = None,
925
+ past_key_values: Optional[Union[Cache, List[torch.FloatTensor]]] = None,
926
+ inputs_embeds: Optional[torch.FloatTensor] = None,
927
+ use_cache: Optional[bool] = None,
928
+ output_attentions: Optional[bool] = None,
929
+ output_hidden_states: Optional[bool] = None,
930
+ return_dict: Optional[bool] = None,
931
+ cache_position: Optional[torch.LongTensor] = None,
932
+ visual_token_index: Optional[torch.Tensor] = None,
933
+ large_model_prune_layer: Optional[float] = None,
934
+ large_model_prune_ratio: Optional[float] = None,
935
+ large_model_prune_selection: Optional[str] = None,
936
+ large_model_similarity_target_coverage: Optional[float] = None,
937
+ large_model_similarity_min_gain: Optional[float] = None,
938
+ large_model_similarity_min_keep: Optional[int] = None,
939
+ large_model_similarity_max_keep_ratio: Optional[float] = None,
940
+ visual_token_importance: Optional[torch.Tensor] = None,
941
+ ) -> Union[Tuple, BaseModelOutputWithPast]:
942
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
943
+ output_hidden_states = (
944
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
945
+ )
946
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
947
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
948
+
949
+ if (input_ids is None) ^ (inputs_embeds is not None):
950
+ raise ValueError(
951
+ "You cannot specify both input_ids and inputs_embeds at the same time, and must specify either one"
952
+ )
953
+
954
+ if self.gradient_checkpointing and self.training and use_cache:
955
+ logger.warning_once(
956
+ "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`."
957
+ )
958
+ use_cache = False
959
+
960
+ # retrieve input_ids and inputs_embeds
961
+ if input_ids is not None and inputs_embeds is not None:
962
+ raise ValueError('You cannot specify both input_ids and inputs_embeds at the same time')
963
+ elif input_ids is not None:
964
+ batch_size, seq_length = input_ids.shape[:2]
965
+ elif inputs_embeds is not None:
966
+ batch_size, seq_length = inputs_embeds.shape[:2]
967
+ else:
968
+ raise ValueError('You have to specify either input_ids or inputs_embeds')
969
+
970
+
971
+
972
+ if inputs_embeds is None:
973
+ inputs_embeds = self.embed_tokens(input_ids)
974
+
975
+ return_legacy_cache = False
976
+ if (
977
+ use_cache and not isinstance(past_key_values, Cache) and not self.training
978
+ ): # kept for BC (non `Cache` `past_key_values` inputs)
979
+ return_legacy_cache = True
980
+ past_key_values = DynamicCache.from_legacy_cache(past_key_values)
981
+ logger.warning_once(
982
+ "We detected that you are passing `past_key_values` as a tuple and this is deprecated and will be removed in v4.43. "
983
+ "Please use an appropriate `Cache` class (https://huggingface.co/docs/transformers/v4.41.3/en/internal/generation_utils#transformers.Cache)"
984
+ )
985
+
986
+ if cache_position is None:
987
+ past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0
988
+ cache_position = torch.arange(
989
+ past_seen_tokens, past_seen_tokens + inputs_embeds.shape[1], device=inputs_embeds.device
990
+ )
991
+ if position_ids is None:
992
+ position_ids = cache_position.unsqueeze(0)
993
+
994
+ causal_mask = self._update_causal_mask(
995
+ attention_mask, inputs_embeds, cache_position, past_key_values, output_attentions
996
+ )
997
+ hidden_states = inputs_embeds
998
+
999
+ # create position embeddings to be shared across the decoder layers
1000
+ position_embeddings = self.rotary_emb(hidden_states, position_ids)
1001
+
1002
+ # decoder layers
1003
+ all_hidden_states = () if output_hidden_states else None
1004
+ all_self_attns = () if output_attentions else None
1005
+ next_decoder_cache = None
1006
+
1007
+ aggregated_viusal_token_attention = 0 if output_attentions else None
1008
+ for idx, decoder_layer in enumerate(self.layers):
1009
+ if output_hidden_states:
1010
+ all_hidden_states += (hidden_states,)
1011
+
1012
+ if self.gradient_checkpointing and self.training:
1013
+ layer_outputs = self._gradient_checkpointing_func(
1014
+ decoder_layer.__call__,
1015
+ hidden_states,
1016
+ causal_mask,
1017
+ position_ids,
1018
+ past_key_values,
1019
+ output_attentions,
1020
+ use_cache,
1021
+ cache_position,
1022
+ position_embeddings,
1023
+ )
1024
+ else:
1025
+
1026
+ ##### 某一层 random pruning #########
1027
+ K = int(len(self.layers) * large_model_prune_layer)
1028
+ keep_ratio = large_model_prune_ratio
1029
+ visual_token_length = int(visual_token_index[1] - visual_token_index[0] + 1)
1030
+
1031
+ if hidden_states.shape[1] != 1:
1032
+ if idx == K:
1033
+ device = hidden_states.device
1034
+ selected_visual_index = select_visual_token_indices(
1035
+ hidden_states,
1036
+ visual_token_importance,
1037
+ visual_token_index,
1038
+ keep_ratio,
1039
+ large_model_prune_selection or "topk",
1040
+ similarity_target_coverage=large_model_similarity_target_coverage or 0.9,
1041
+ similarity_min_gain=large_model_similarity_min_gain or 0.0,
1042
+ similarity_min_keep=large_model_similarity_min_keep or 1,
1043
+ similarity_max_keep_ratio=large_model_similarity_max_keep_ratio or 1.0,
1044
+ ) + int(visual_token_index[0])
1045
+ keep_indexs = torch.cat((
1046
+ torch.arange(int(visual_token_index[0]), device=device),
1047
+ selected_visual_index.to(device),
1048
+ torch.arange(int(visual_token_index[1] + 1), seq_length, device=device),
1049
+ ))
1050
+ keep_indexs = keep_indexs.sort().values
1051
+ hidden_states = hidden_states[:, keep_indexs,:]
1052
+ position_embeddings = (position_embeddings[0].to(device)[:, keep_indexs, :], position_embeddings[1].to(device)[:, keep_indexs,:], )
1053
+
1054
+ if causal_mask is not None:
1055
+ causal_mask = causal_mask[:,:,:hidden_states.shape[1], :hidden_states.shape[1]]
1056
+ position_ids = keep_indexs.unsqueeze(0)
1057
+ prunded_sequence_length = visual_token_length - selected_visual_index.numel()
1058
+
1059
+
1060
+ else:
1061
+ if idx == K:
1062
+ visual_token_length = visual_token_index[1] - visual_token_index[0] + 1
1063
+ prunded_sequence_length = visual_token_length - int(visual_token_length * keep_ratio)
1064
+ if causal_mask is not None:
1065
+ causal_mask = causal_mask[:, :, :, prunded_sequence_length:]
1066
+
1067
+
1068
+
1069
+ layer_outputs = decoder_layer(
1070
+ hidden_states,
1071
+ attention_mask=causal_mask,
1072
+ position_ids=position_ids,
1073
+ past_key_value=past_key_values,
1074
+ output_attentions=output_attentions,
1075
+ use_cache=use_cache,
1076
+ cache_position=cache_position,
1077
+ position_embeddings=position_embeddings
1078
+ )
1079
+
1080
+ hidden_states = layer_outputs[0]
1081
+
1082
+ if use_cache:
1083
+ next_decoder_cache = layer_outputs[2 if output_attentions else 1]
1084
+
1085
+ if output_attentions:
1086
+ # all_self_attns += (layer_outputs[1],)
1087
+ if layer_outputs[1].shape[2] != 1:
1088
+ aggregated_viusal_token_attention = aggregated_viusal_token_attention + layer_outputs[1][:, :, visual_token_index[1]:, visual_token_index[0]:visual_token_index[1]+1].sum(dim=(0, 1, 2))
1089
+ else:
1090
+ aggregated_viusal_token_attention = aggregated_viusal_token_attention + layer_outputs[1][:, :, :, visual_token_index[0]:visual_token_index[1]+1].sum(dim=(0, 1, 2))
1091
+
1092
+
1093
+ hidden_states = self.norm(hidden_states)
1094
+
1095
+ # add hidden states from the last decoder layer
1096
+ if output_hidden_states:
1097
+ all_hidden_states += (hidden_states,)
1098
+
1099
+ next_cache = next_decoder_cache if use_cache else None
1100
+ if return_legacy_cache:
1101
+ next_cache = next_cache.to_legacy_cache()
1102
+
1103
+ if not return_dict:
1104
+ return tuple(v for v in [hidden_states, next_cache, all_hidden_states, all_self_attns] if v is not None)
1105
+
1106
+ out_dict = BaseModelOutputWithPast(
1107
+ last_hidden_state=hidden_states,
1108
+ past_key_values=next_cache,
1109
+ hidden_states=all_hidden_states,
1110
+ attentions=all_self_attns,
1111
+ )
1112
+ out_dict.aggregated_viusal_token_attention = aggregated_viusal_token_attention
1113
+ return out_dict
1114
+
1115
+ def _update_causal_mask(
1116
+ self,
1117
+ attention_mask: torch.Tensor,
1118
+ input_tensor: torch.Tensor,
1119
+ cache_position: torch.Tensor,
1120
+ past_key_values: Cache,
1121
+ output_attentions: bool,
1122
+ ):
1123
+ # TODO: As of torch==2.2.0, the `attention_mask` passed to the model in `generate` is 2D and of dynamic length even when the static
1124
+ # KV cache is used. This is an issue for torch.compile which then recaptures cudagraphs at each decode steps due to the dynamic shapes.
1125
+ # (`recording cudagraph tree for symint key 13`, etc.), which is VERY slow. A workaround is `@torch.compiler.disable`, but this prevents using
1126
+ # `fullgraph=True`. See more context in https://github.com/huggingface/transformers/pull/29114
1127
+
1128
+ if self.config._attn_implementation == "flash_attention_2":
1129
+ if attention_mask is not None and 0.0 in attention_mask:
1130
+ return attention_mask
1131
+ return None
1132
+
1133
+ # For SDPA, when possible, we will rely on its `is_causal` argument instead of its `attn_mask` argument, in
1134
+ # order to dispatch on Flash Attention 2. This feature is not compatible with static cache, as SDPA will fail
1135
+ # to infer the attention mask.
1136
+ past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0
1137
+ using_static_cache = isinstance(past_key_values, StaticCache)
1138
+
1139
+ # When output attentions is True, sdpa implementation's forward method calls the eager implementation's forward
1140
+ if self.config._attn_implementation == "sdpa" and not using_static_cache and not output_attentions:
1141
+ if AttentionMaskConverter._ignore_causal_mask_sdpa(
1142
+ attention_mask,
1143
+ inputs_embeds=input_tensor,
1144
+ past_key_values_length=past_seen_tokens,
1145
+ is_training=self.training,
1146
+ ):
1147
+ return None
1148
+
1149
+ dtype, device = input_tensor.dtype, input_tensor.device
1150
+ min_dtype = torch.finfo(dtype).min
1151
+ sequence_length = input_tensor.shape[1]
1152
+ if using_static_cache:
1153
+ target_length = past_key_values.get_max_length()
1154
+ else:
1155
+ target_length = (
1156
+ attention_mask.shape[-1]
1157
+ if isinstance(attention_mask, torch.Tensor)
1158
+ else past_seen_tokens + sequence_length + 1
1159
+ )
1160
+
1161
+ # In case the provided `attention` mask is 2D, we generate a causal mask here (4D).
1162
+ causal_mask = _prepare_4d_causal_attention_mask_with_cache_position(
1163
+ attention_mask,
1164
+ sequence_length=sequence_length,
1165
+ target_length=target_length,
1166
+ dtype=dtype,
1167
+ device=device,
1168
+ min_dtype=min_dtype,
1169
+ cache_position=cache_position,
1170
+ batch_size=input_tensor.shape[0],
1171
+ )
1172
+
1173
+ if (
1174
+ self.config._attn_implementation == "sdpa"
1175
+ and attention_mask is not None
1176
+ and attention_mask.device.type == "cuda"
1177
+ and not output_attentions
1178
+ ):
1179
+ # Attend to all tokens in fully masked rows in the causal_mask, for example the relevant first rows when
1180
+ # using left padding. This is required by F.scaled_dot_product_attention memory-efficient attention path.
1181
+ # Details: https://github.com/pytorch/pytorch/issues/110213
1182
+ causal_mask = AttentionMaskConverter._unmask_unattended(causal_mask, min_dtype)
1183
+
1184
+ return causal_mask
1185
+
1186
+
1187
+ class LlamaForCausalLM(LlamaPreTrainedModel):
1188
+ _tied_weights_keys = ["lm_head.weight"]
1189
+
1190
+ def __init__(self, config):
1191
+ super().__init__(config)
1192
+ self.model = LlamaModel(config)
1193
+ self.vocab_size = config.vocab_size
1194
+ self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
1195
+
1196
+ # Initialize weights and apply final processing
1197
+ self.post_init()
1198
+
1199
+ def get_input_embeddings(self):
1200
+ return self.model.embed_tokens
1201
+
1202
+ def set_input_embeddings(self, value):
1203
+ self.model.embed_tokens = value
1204
+
1205
+ def get_output_embeddings(self):
1206
+ return self.lm_head
1207
+
1208
+ def set_output_embeddings(self, new_embeddings):
1209
+ self.lm_head = new_embeddings
1210
+
1211
+ def set_decoder(self, decoder):
1212
+ self.model = decoder
1213
+
1214
+ def get_decoder(self):
1215
+ return self.model
1216
+
1217
+ @add_start_docstrings_to_model_forward(LLAMA_INPUTS_DOCSTRING)
1218
+ @replace_return_docstrings(output_type=CausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC)
1219
+ def forward(
1220
+ self,
1221
+ input_ids: torch.LongTensor = None,
1222
+ attention_mask: Optional[torch.Tensor] = None,
1223
+ position_ids: Optional[torch.LongTensor] = None,
1224
+ past_key_values: Optional[Union[Cache, List[torch.FloatTensor]]] = None,
1225
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1226
+ labels: Optional[torch.LongTensor] = None,
1227
+ use_cache: Optional[bool] = None,
1228
+ output_attentions: Optional[bool] = None,
1229
+ output_hidden_states: Optional[bool] = None,
1230
+ return_dict: Optional[bool] = None,
1231
+ cache_position: Optional[torch.LongTensor] = None,
1232
+ visual_token_index: Optional[torch.Tensor] = None,
1233
+ large_model_prune_layer: Optional[float] = None,
1234
+ large_model_prune_ratio: Optional[float] = None,
1235
+ large_model_prune_selection: Optional[str] = None,
1236
+ large_model_similarity_target_coverage: Optional[float] = None,
1237
+ large_model_similarity_min_gain: Optional[float] = None,
1238
+ large_model_similarity_min_keep: Optional[int] = None,
1239
+ large_model_similarity_max_keep_ratio: Optional[float] = None,
1240
+ visual_token_importance: Optional[torch.Tensor] = None,
1241
+ ) -> Union[Tuple, CausalLMOutputWithPast]:
1242
+ r"""
1243
+ Args:
1244
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
1245
+ Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
1246
+ config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
1247
+ (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
1248
+
1249
+ Returns:
1250
+
1251
+ Example:
1252
+
1253
+ ```python
1254
+ >>> from transformers import AutoTokenizer, LlamaForCausalLM
1255
+
1256
+ >>> model = LlamaForCausalLM.from_pretrained("meta-llama/Llama-2-7b-hf")
1257
+ >>> tokenizer = AutoTokenizer.from_pretrained("meta-llama/Llama-2-7b-hf")
1258
+
1259
+ >>> prompt = "Hey, are you conscious? Can you talk to me?"
1260
+ >>> inputs = tokenizer(prompt, return_tensors="pt")
1261
+
1262
+ >>> # Generate
1263
+ >>> generate_ids = model.generate(inputs.input_ids, max_length=30)
1264
+ >>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
1265
+ "Hey, are you conscious? Can you talk to me?\nI'm not conscious, but I can talk to you."
1266
+ ```"""
1267
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
1268
+ output_hidden_states = (
1269
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
1270
+ )
1271
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1272
+
1273
+ # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)
1274
+ outputs = self.model(
1275
+ input_ids=input_ids,
1276
+ attention_mask=attention_mask,
1277
+ position_ids=position_ids,
1278
+ past_key_values=past_key_values,
1279
+ inputs_embeds=inputs_embeds,
1280
+ use_cache=use_cache,
1281
+ output_attentions=output_attentions,
1282
+ output_hidden_states=output_hidden_states,
1283
+ return_dict=return_dict,
1284
+ cache_position=cache_position,
1285
+ visual_token_index=visual_token_index,
1286
+ large_model_prune_layer=large_model_prune_layer,
1287
+ large_model_prune_ratio=large_model_prune_ratio,
1288
+ large_model_prune_selection=large_model_prune_selection,
1289
+ large_model_similarity_target_coverage=large_model_similarity_target_coverage,
1290
+ large_model_similarity_min_gain=large_model_similarity_min_gain,
1291
+ large_model_similarity_min_keep=large_model_similarity_min_keep,
1292
+ large_model_similarity_max_keep_ratio=large_model_similarity_max_keep_ratio,
1293
+ visual_token_importance=visual_token_importance
1294
+ )
1295
+
1296
+ hidden_states = outputs[0]
1297
+ if self.config.pretraining_tp > 1:
1298
+ lm_head_slices = self.lm_head.weight.split(self.vocab_size // self.config.pretraining_tp, dim=0)
1299
+ logits = [F.linear(hidden_states, lm_head_slices[i]) for i in range(self.config.pretraining_tp)]
1300
+ logits = torch.cat(logits, dim=-1)
1301
+ else:
1302
+ logits = self.lm_head(hidden_states)
1303
+ logits = logits.float()
1304
+
1305
+ loss = None
1306
+ if labels is not None:
1307
+ # Shift so that tokens < n predict n
1308
+ shift_logits = logits[..., :-1, :].contiguous()
1309
+ shift_labels = labels[..., 1:].contiguous()
1310
+ # Flatten the tokens
1311
+ loss_fct = CrossEntropyLoss()
1312
+ shift_logits = shift_logits.view(-1, self.config.vocab_size)
1313
+ shift_labels = shift_labels.view(-1)
1314
+ # Enable model parallelism
1315
+ shift_labels = shift_labels.to(shift_logits.device)
1316
+ loss = loss_fct(shift_logits, shift_labels)
1317
+
1318
+ if not return_dict:
1319
+ output = (logits,) + outputs[1:]
1320
+ return (loss,) + output if loss is not None else output
1321
+
1322
+
1323
+ output = CausalLMOutputWithPast(
1324
+ loss=loss,
1325
+ logits=logits,
1326
+ past_key_values=outputs.past_key_values,
1327
+ hidden_states=outputs.hidden_states,
1328
+ attentions=outputs.attentions,
1329
+ )
1330
+ output['aggregated_viusal_token_attention'] = outputs.aggregated_viusal_token_attention
1331
+ return output
1332
+
1333
+ def _sample(
1334
+ self,
1335
+ input_ids: torch.LongTensor,
1336
+ logits_processor: LogitsProcessorList,
1337
+ stopping_criteria: StoppingCriteriaList,
1338
+ generation_config: GenerationConfig,
1339
+ synced_gpus: bool,
1340
+ streamer: Optional["BaseStreamer"],
1341
+ logits_warper: Optional[LogitsProcessorList],
1342
+ **model_kwargs,
1343
+ ) -> Union[GenerateNonBeamOutput, torch.LongTensor]:
1344
+ # init values
1345
+ pad_token_id = generation_config._pad_token_tensor
1346
+ output_attentions = generation_config.output_attentions
1347
+ output_hidden_states = generation_config.output_hidden_states
1348
+ output_scores = generation_config.output_scores
1349
+ output_logits = generation_config.output_logits
1350
+ return_dict_in_generate = generation_config.return_dict_in_generate
1351
+ max_length = generation_config.max_length
1352
+ has_eos_stopping_criteria = any(hasattr(criteria, "eos_token_id") for criteria in stopping_criteria)
1353
+ do_sample = generation_config.do_sample
1354
+ if do_sample is True and not isinstance(logits_warper, LogitsProcessorList):
1355
+ raise ValueError(
1356
+ "`do_sample` is set to `True`, `logits_warper` must be a `LogitsProcessorList` instance (it is "
1357
+ f"{logits_warper})."
1358
+ )
1359
+
1360
+ # init attention / hidden states / scores tuples
1361
+ scores = () if (return_dict_in_generate and output_scores) else None
1362
+ raw_logits = () if (return_dict_in_generate and output_logits) else None
1363
+ decoder_attentions = () if (return_dict_in_generate and output_attentions) else None
1364
+ cross_attentions = () if (return_dict_in_generate and output_attentions) else None
1365
+ decoder_hidden_states = () if (return_dict_in_generate and output_hidden_states) else None
1366
+
1367
+ # if model is an encoder-decoder, retrieve encoder attention weights and hidden states
1368
+ if return_dict_in_generate and self.config.is_encoder_decoder:
1369
+ encoder_attentions = model_kwargs["encoder_outputs"].get("attentions") if output_attentions else None
1370
+ encoder_hidden_states = (
1371
+ model_kwargs["encoder_outputs"].get("hidden_states") if output_hidden_states else None
1372
+ )
1373
+
1374
+ # keep track of which sequences are already finished
1375
+ batch_size, cur_len = input_ids.shape
1376
+ this_peer_finished = False
1377
+ unfinished_sequences = torch.ones(batch_size, dtype=torch.long, device=input_ids.device)
1378
+ model_kwargs = self._get_initial_cache_position(input_ids, model_kwargs)
1379
+
1380
+ aggregated_viusal_token_attention = 0 if output_attentions else None
1381
+ while self._has_unfinished_sequences(
1382
+ this_peer_finished, synced_gpus, device=input_ids.device, cur_len=cur_len, max_length=max_length
1383
+ ):
1384
+ # prepare model inputs
1385
+ model_inputs = self.prepare_inputs_for_generation(input_ids, **model_kwargs)
1386
+
1387
+ # prepare variable output controls (note: some models won't accept all output controls)
1388
+ model_inputs.update({"output_attentions": output_attentions} if output_attentions else {})
1389
+ model_inputs.update({"output_hidden_states": output_hidden_states} if output_hidden_states else {})
1390
+
1391
+ # forward pass to get next token
1392
+ outputs = self(**model_inputs, return_dict=True)
1393
+ if output_attentions:
1394
+ aggregated_viusal_token_attention = aggregated_viusal_token_attention + outputs['aggregated_viusal_token_attention']
1395
+
1396
+ if synced_gpus and this_peer_finished:
1397
+ continue # don't waste resources running the code we don't need
1398
+
1399
+ # Clone is needed to avoid keeping a hanging ref to outputs.logits which may be very large for first iteration
1400
+ # (the clone itself is always small)
1401
+ next_token_logits = outputs.logits[:, -1, :].clone()
1402
+
1403
+ # pre-process distribution
1404
+ next_token_scores = logits_processor(input_ids, next_token_logits)
1405
+ if do_sample:
1406
+ next_token_scores = logits_warper(input_ids, next_token_scores)
1407
+
1408
+ # Store scores, attentions and hidden_states when required
1409
+ if return_dict_in_generate:
1410
+ if output_scores:
1411
+ scores += (next_token_scores,)
1412
+ if output_logits:
1413
+ raw_logits += (next_token_logits,)
1414
+ if output_attentions:
1415
+ decoder_attentions += (
1416
+ (outputs.decoder_attentions,) if self.config.is_encoder_decoder else (outputs.attentions,)
1417
+ )
1418
+ if self.config.is_encoder_decoder:
1419
+ cross_attentions += (outputs.cross_attentions,)
1420
+
1421
+ if output_hidden_states:
1422
+ decoder_hidden_states += (
1423
+ (outputs.decoder_hidden_states,)
1424
+ if self.config.is_encoder_decoder
1425
+ else (outputs.hidden_states,)
1426
+ )
1427
+
1428
+ # token selection
1429
+ if do_sample:
1430
+ probs = nn.functional.softmax(next_token_scores, dim=-1)
1431
+ # TODO (joao): this OP throws "skipping cudagraphs due to ['incompatible ops']", find solution
1432
+ next_tokens = torch.multinomial(probs, num_samples=1).squeeze(1)
1433
+ else:
1434
+ next_tokens = torch.argmax(next_token_scores, dim=-1)
1435
+
1436
+ # finished sentences should have their next token be a padding token
1437
+ if has_eos_stopping_criteria:
1438
+ next_tokens = next_tokens * unfinished_sequences + pad_token_id * (1 - unfinished_sequences)
1439
+
1440
+ # update generated ids, model inputs, and length for next step
1441
+ input_ids = torch.cat([input_ids, next_tokens[:, None]], dim=-1)
1442
+ if streamer is not None:
1443
+ streamer.put(next_tokens.cpu())
1444
+ model_kwargs = self._update_model_kwargs_for_generation(
1445
+ outputs,
1446
+ model_kwargs,
1447
+ is_encoder_decoder=self.config.is_encoder_decoder,
1448
+ )
1449
+
1450
+ unfinished_sequences = unfinished_sequences & ~stopping_criteria(input_ids, scores)
1451
+ this_peer_finished = unfinished_sequences.max() == 0
1452
+ cur_len += 1
1453
+
1454
+ # This is needed to properly delete outputs.logits which may be very large for first iteration
1455
+ # Otherwise a reference to outputs is kept which keeps the logits alive in the next iteration
1456
+ del outputs
1457
+
1458
+ if streamer is not None:
1459
+ streamer.end()
1460
+
1461
+ if return_dict_in_generate:
1462
+ if self.config.is_encoder_decoder:
1463
+ return GenerateEncoderDecoderOutput(
1464
+ sequences=input_ids,
1465
+ scores=scores,
1466
+ logits=raw_logits,
1467
+ encoder_attentions=encoder_attentions,
1468
+ encoder_hidden_states=encoder_hidden_states,
1469
+ decoder_attentions=decoder_attentions,
1470
+ cross_attentions=cross_attentions,
1471
+ decoder_hidden_states=decoder_hidden_states,
1472
+ past_key_values=model_kwargs.get("past_key_values"),
1473
+ )
1474
+ else:
1475
+ out_dict = GenerateDecoderOnlyOutput(
1476
+ sequences=input_ids,
1477
+ scores=scores,
1478
+ logits=raw_logits,
1479
+ attentions=decoder_attentions,
1480
+ hidden_states=decoder_hidden_states,
1481
+ past_key_values=model_kwargs.get("past_key_values"),
1482
+ )
1483
+ out_dict["aggregated_viusal_token_attention"] = aggregated_viusal_token_attention
1484
+ return out_dict
1485
+ else:
1486
+ return input_ids
1487
+
1488
+
1489
+ def prepare_inputs_for_generation(
1490
+ self,
1491
+ input_ids,
1492
+ past_key_values=None,
1493
+ attention_mask=None,
1494
+ inputs_embeds=None,
1495
+ cache_position=None,
1496
+ position_ids=None,
1497
+ use_cache=True,
1498
+ **kwargs,
1499
+ ):
1500
+ # If we have cache: let's slice `input_ids` through `cache_position`, to keep only the unprocessed tokens
1501
+ # Exception 1: when passing input_embeds, input_ids may be missing entries
1502
+ # Exception 2: some generation methods do special slicing of input_ids, so we don't need to do it here
1503
+ if past_key_values is not None:
1504
+ if inputs_embeds is not None: # Exception 1
1505
+ input_ids = input_ids[:, -cache_position.shape[0] :]
1506
+ elif input_ids.shape[1] != cache_position.shape[0]: # Default case (the "else", a no op, is Exception 2)
1507
+ input_ids = input_ids[:, cache_position]
1508
+
1509
+ if attention_mask is not None and position_ids is None:
1510
+ # create position_ids on the fly for batch generation
1511
+ position_ids = attention_mask.long().cumsum(-1) - 1
1512
+ position_ids.masked_fill_(attention_mask == 0, 1)
1513
+ if past_key_values:
1514
+ position_ids = position_ids[:, -input_ids.shape[1] :]
1515
+
1516
+ # This `clone` call is needed to avoid recapturing cuda graphs with `torch.compile`'s `mode="reduce-overhead`, as otherwise the input `position_ids` would have various stride during the decoding. Here, simply using `.contiguous()` is not sufficient as in the batch size = 1 case, `position_ids` is already contiguous but with varying stride which retriggers a capture.
1517
+ position_ids = position_ids.clone(memory_format=torch.contiguous_format)
1518
+
1519
+ # if `inputs_embeds` are passed, we only want to use them in the 1st generation step
1520
+ if inputs_embeds is not None and cache_position[0] == 0:
1521
+ model_inputs = {"inputs_embeds": inputs_embeds, "input_ids": None}
1522
+ else:
1523
+ # The clone here is for the same reason as for `position_ids`.
1524
+ model_inputs = {"input_ids": input_ids.clone(memory_format=torch.contiguous_format), "inputs_embeds": None}
1525
+
1526
+ if isinstance(past_key_values, StaticCache) and attention_mask.ndim == 2:
1527
+ if model_inputs["inputs_embeds"] is not None:
1528
+ batch_size, sequence_length, _ = model_inputs["inputs_embeds"].shape
1529
+ device = model_inputs["inputs_embeds"].device
1530
+ else:
1531
+ batch_size, sequence_length = model_inputs["input_ids"].shape
1532
+ device = model_inputs["input_ids"].device
1533
+
1534
+ dtype = self.lm_head.weight.dtype
1535
+ min_dtype = torch.finfo(dtype).min
1536
+
1537
+ attention_mask = _prepare_4d_causal_attention_mask_with_cache_position(
1538
+ attention_mask,
1539
+ sequence_length=sequence_length,
1540
+ target_length=past_key_values.get_max_length(),
1541
+ dtype=dtype,
1542
+ device=device,
1543
+ min_dtype=min_dtype,
1544
+ cache_position=cache_position,
1545
+ batch_size=batch_size,
1546
+ )
1547
+
1548
+ model_inputs.update(
1549
+ {
1550
+ "position_ids": position_ids,
1551
+ "cache_position": cache_position,
1552
+ "past_key_values": past_key_values,
1553
+ "use_cache": use_cache,
1554
+ "attention_mask": attention_mask,
1555
+ 'visual_token_index': kwargs.get('visual_token_index'),
1556
+ 'large_model_prune_layer': kwargs.get('large_model_prune_layer'),
1557
+ 'large_model_prune_ratio': kwargs.get('large_model_prune_ratio'),
1558
+ 'large_model_prune_selection': kwargs.get('large_model_prune_selection'),
1559
+ 'large_model_similarity_target_coverage': kwargs.get('large_model_similarity_target_coverage'),
1560
+ 'large_model_similarity_min_gain': kwargs.get('large_model_similarity_min_gain'),
1561
+ 'large_model_similarity_min_keep': kwargs.get('large_model_similarity_min_keep'),
1562
+ 'large_model_similarity_max_keep_ratio': kwargs.get('large_model_similarity_max_keep_ratio'),
1563
+ 'visual_token_importance': kwargs.get('visual_token_importance')
1564
+ }
1565
+ )
1566
+ return model_inputs
1567
+
1568
+
1569
+ @add_start_docstrings(
1570
+ """
1571
+ The LLaMa Model transformer with a sequence classification head on top (linear layer).
1572
+
1573
+ [`LlamaForSequenceClassification`] uses the last token in order to do the classification, as other causal models
1574
+ (e.g. GPT-2) do.
1575
+
1576
+ Since it does classification on the last token, it requires to know the position of the last token. If a
1577
+ `pad_token_id` is defined in the configuration, it finds the last token that is not a padding token in each row. If
1578
+ no `pad_token_id` is defined, it simply takes the last value in each row of the batch. Since it cannot guess the
1579
+ padding tokens when `inputs_embeds` are passed instead of `input_ids`, it does the same (take the last value in
1580
+ each row of the batch).
1581
+ """,
1582
+ LLAMA_START_DOCSTRING,
1583
+ )
1584
+ class LlamaForSequenceClassification(LlamaPreTrainedModel):
1585
+ def __init__(self, config):
1586
+ super().__init__(config)
1587
+ self.num_labels = config.num_labels
1588
+ self.model = LlamaModel(config)
1589
+ self.score = nn.Linear(config.hidden_size, self.num_labels, bias=False)
1590
+
1591
+ # Initialize weights and apply final processing
1592
+ self.post_init()
1593
+
1594
+ def get_input_embeddings(self):
1595
+ return self.model.embed_tokens
1596
+
1597
+ def set_input_embeddings(self, value):
1598
+ self.model.embed_tokens = value
1599
+
1600
+ @add_start_docstrings_to_model_forward(LLAMA_INPUTS_DOCSTRING)
1601
+ def forward(
1602
+ self,
1603
+ input_ids: Optional[torch.LongTensor] = None,
1604
+ attention_mask: Optional[torch.Tensor] = None,
1605
+ position_ids: Optional[torch.LongTensor] = None,
1606
+ past_key_values: Optional[Union[Cache, List[torch.FloatTensor]]] = None,
1607
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1608
+ labels: Optional[torch.LongTensor] = None,
1609
+ use_cache: Optional[bool] = None,
1610
+ output_attentions: Optional[bool] = None,
1611
+ output_hidden_states: Optional[bool] = None,
1612
+ return_dict: Optional[bool] = None,
1613
+ ) -> Union[Tuple, SequenceClassifierOutputWithPast]:
1614
+ r"""
1615
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1616
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
1617
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
1618
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
1619
+ """
1620
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1621
+
1622
+ transformer_outputs = self.model(
1623
+ input_ids,
1624
+ attention_mask=attention_mask,
1625
+ position_ids=position_ids,
1626
+ past_key_values=past_key_values,
1627
+ inputs_embeds=inputs_embeds,
1628
+ use_cache=use_cache,
1629
+ output_attentions=output_attentions,
1630
+ output_hidden_states=output_hidden_states,
1631
+ return_dict=return_dict,
1632
+ )
1633
+ hidden_states = transformer_outputs[0]
1634
+ logits = self.score(hidden_states)
1635
+
1636
+ if input_ids is not None:
1637
+ batch_size = input_ids.shape[0]
1638
+ else:
1639
+ batch_size = inputs_embeds.shape[0]
1640
+
1641
+ if self.config.pad_token_id is None and batch_size != 1:
1642
+ raise ValueError("Cannot handle batch sizes > 1 if no padding token is defined.")
1643
+ if self.config.pad_token_id is None:
1644
+ sequence_lengths = -1
1645
+ else:
1646
+ if input_ids is not None:
1647
+ # if no pad token found, use modulo instead of reverse indexing for ONNX compatibility
1648
+ sequence_lengths = torch.eq(input_ids, self.config.pad_token_id).int().argmax(-1) - 1
1649
+ sequence_lengths = sequence_lengths % input_ids.shape[-1]
1650
+ sequence_lengths = sequence_lengths.to(logits.device)
1651
+ else:
1652
+ sequence_lengths = -1
1653
+
1654
+ pooled_logits = logits[torch.arange(batch_size, device=logits.device), sequence_lengths]
1655
+
1656
+ loss = None
1657
+ if labels is not None:
1658
+ labels = labels.to(logits.device)
1659
+ if self.config.problem_type is None:
1660
+ if self.num_labels == 1:
1661
+ self.config.problem_type = "regression"
1662
+ elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
1663
+ self.config.problem_type = "single_label_classification"
1664
+ else:
1665
+ self.config.problem_type = "multi_label_classification"
1666
+
1667
+ if self.config.problem_type == "regression":
1668
+ loss_fct = MSELoss()
1669
+ if self.num_labels == 1:
1670
+ loss = loss_fct(pooled_logits.squeeze(), labels.squeeze())
1671
+ else:
1672
+ loss = loss_fct(pooled_logits, labels)
1673
+ elif self.config.problem_type == "single_label_classification":
1674
+ loss_fct = CrossEntropyLoss()
1675
+ loss = loss_fct(pooled_logits.view(-1, self.num_labels), labels.view(-1))
1676
+ elif self.config.problem_type == "multi_label_classification":
1677
+ loss_fct = BCEWithLogitsLoss()
1678
+ loss = loss_fct(pooled_logits, labels)
1679
+ if not return_dict:
1680
+ output = (pooled_logits,) + transformer_outputs[1:]
1681
+ return ((loss,) + output) if loss is not None else output
1682
+
1683
+ return SequenceClassifierOutputWithPast(
1684
+ loss=loss,
1685
+ logits=pooled_logits,
1686
+ past_key_values=transformer_outputs.past_key_values,
1687
+ hidden_states=transformer_outputs.hidden_states,
1688
+ attentions=transformer_outputs.attentions,
1689
+ )
1690
+
1691
+
1692
+ @add_start_docstrings(
1693
+ """
1694
+ The Llama Model transformer with a span classification head on top for extractive question-answering tasks like
1695
+ SQuAD (a linear layer on top of the hidden-states output to compute `span start logits` and `span end logits`).
1696
+ """,
1697
+ LLAMA_START_DOCSTRING,
1698
+ )
1699
+ class LlamaForQuestionAnswering(LlamaPreTrainedModel):
1700
+ base_model_prefix = "transformer"
1701
+
1702
+ # Copied from transformers.models.bloom.modeling_bloom.BloomForQuestionAnswering.__init__ with Bloom->Llama
1703
+ def __init__(self, config):
1704
+ super().__init__(config)
1705
+ self.transformer = LlamaModel(config)
1706
+ self.qa_outputs = nn.Linear(config.hidden_size, 2)
1707
+
1708
+ # Initialize weights and apply final processing
1709
+ self.post_init()
1710
+
1711
+ def get_input_embeddings(self):
1712
+ return self.transformer.embed_tokens
1713
+
1714
+ def set_input_embeddings(self, value):
1715
+ self.transformer.embed_tokens = value
1716
+
1717
+ @add_start_docstrings_to_model_forward(LLAMA_INPUTS_DOCSTRING)
1718
+ def forward(
1719
+ self,
1720
+ input_ids: Optional[torch.LongTensor] = None,
1721
+ attention_mask: Optional[torch.FloatTensor] = None,
1722
+ position_ids: Optional[torch.LongTensor] = None,
1723
+ past_key_values: Optional[Union[Cache, List[torch.FloatTensor]]] = None,
1724
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1725
+ start_positions: Optional[torch.LongTensor] = None,
1726
+ end_positions: Optional[torch.LongTensor] = None,
1727
+ output_attentions: Optional[bool] = None,
1728
+ output_hidden_states: Optional[bool] = None,
1729
+ return_dict: Optional[bool] = None,
1730
+ ) -> Union[Tuple, QuestionAnsweringModelOutput]:
1731
+ r"""
1732
+ start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1733
+ Labels for position (index) of the start of the labelled span for computing the token classification loss.
1734
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
1735
+ are not taken into account for computing the loss.
1736
+ end_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1737
+ Labels for position (index) of the end of the labelled span for computing the token classification loss.
1738
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
1739
+ are not taken into account for computing the loss.
1740
+ """
1741
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1742
+
1743
+ outputs = self.transformer(
1744
+ input_ids,
1745
+ attention_mask=attention_mask,
1746
+ position_ids=position_ids,
1747
+ past_key_values=past_key_values,
1748
+ inputs_embeds=inputs_embeds,
1749
+ output_attentions=output_attentions,
1750
+ output_hidden_states=output_hidden_states,
1751
+ return_dict=return_dict,
1752
+ )
1753
+
1754
+ sequence_output = outputs[0]
1755
+
1756
+ logits = self.qa_outputs(sequence_output)
1757
+ start_logits, end_logits = logits.split(1, dim=-1)
1758
+ start_logits = start_logits.squeeze(-1).contiguous()
1759
+ end_logits = end_logits.squeeze(-1).contiguous()
1760
+
1761
+ total_loss = None
1762
+ if start_positions is not None and end_positions is not None:
1763
+ # If we are on multi-GPU, split add a dimension
1764
+ if len(start_positions.size()) > 1:
1765
+ start_positions = start_positions.squeeze(-1).to(start_logits.device)
1766
+ if len(end_positions.size()) > 1:
1767
+ end_positions = end_positions.squeeze(-1).to(end_logits.device)
1768
+ # sometimes the start/end positions are outside our model inputs, we ignore these terms
1769
+ ignored_index = start_logits.size(1)
1770
+ start_positions = start_positions.clamp(0, ignored_index)
1771
+ end_positions = end_positions.clamp(0, ignored_index)
1772
+
1773
+ loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
1774
+ start_loss = loss_fct(start_logits, start_positions)
1775
+ end_loss = loss_fct(end_logits, end_positions)
1776
+ total_loss = (start_loss + end_loss) / 2
1777
+
1778
+ if not return_dict:
1779
+ output = (start_logits, end_logits) + outputs[2:]
1780
+ return ((total_loss,) + output) if total_loss is not None else output
1781
+
1782
+ return QuestionAnsweringModelOutput(
1783
+ loss=total_loss,
1784
+ start_logits=start_logits,
1785
+ end_logits=end_logits,
1786
+ hidden_states=outputs.hidden_states,
1787
+ attentions=outputs.attentions,
1788
+ )
1789
+
1790
+
1791
+ @add_start_docstrings(
1792
+ """
1793
+ The Llama Model transformer with a token classification head on top (a linear layer on top of the hidden-states
1794
+ output) e.g. for Named-Entity-Recognition (NER) tasks.
1795
+ """,
1796
+ LLAMA_START_DOCSTRING,
1797
+ )
1798
+ class LlamaForTokenClassification(LlamaPreTrainedModel):
1799
+ def __init__(self, config):
1800
+ super().__init__(config)
1801
+ self.num_labels = config.num_labels
1802
+ self.model = LlamaModel(config)
1803
+ if getattr(config, "classifier_dropout", None) is not None:
1804
+ classifier_dropout = config.classifier_dropout
1805
+ elif getattr(config, "hidden_dropout", None) is not None:
1806
+ classifier_dropout = config.hidden_dropout
1807
+ else:
1808
+ classifier_dropout = 0.1
1809
+ self.dropout = nn.Dropout(classifier_dropout)
1810
+ self.score = nn.Linear(config.hidden_size, config.num_labels)
1811
+
1812
+ # Initialize weights and apply final processing
1813
+ self.post_init()
1814
+
1815
+ def get_input_embeddings(self):
1816
+ return self.model.embed_tokens
1817
+
1818
+ def set_input_embeddings(self, value):
1819
+ self.model.embed_tokens = value
1820
+
1821
+ @add_start_docstrings_to_model_forward(LLAMA_INPUTS_DOCSTRING)
1822
+ def forward(
1823
+ self,
1824
+ input_ids: Optional[torch.LongTensor] = None,
1825
+ attention_mask: Optional[torch.Tensor] = None,
1826
+ position_ids: Optional[torch.LongTensor] = None,
1827
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
1828
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1829
+ labels: Optional[torch.LongTensor] = None,
1830
+ use_cache: Optional[bool] = None,
1831
+ output_attentions: Optional[bool] = None,
1832
+ output_hidden_states: Optional[bool] = None,
1833
+ return_dict: Optional[bool] = None,
1834
+ ) -> Union[Tuple, TokenClassifierOutput]:
1835
+ r"""
1836
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1837
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
1838
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
1839
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
1840
+ """
1841
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1842
+
1843
+ outputs = self.model(
1844
+ input_ids,
1845
+ attention_mask=attention_mask,
1846
+ position_ids=position_ids,
1847
+ past_key_values=past_key_values,
1848
+ inputs_embeds=inputs_embeds,
1849
+ use_cache=use_cache,
1850
+ output_attentions=output_attentions,
1851
+ output_hidden_states=output_hidden_states,
1852
+ return_dict=return_dict,
1853
+ )
1854
+ sequence_output = outputs[0]
1855
+ sequence_output = self.dropout(sequence_output)
1856
+ logits = self.score(sequence_output)
1857
+
1858
+ loss = None
1859
+ if labels is not None:
1860
+ loss_fct = CrossEntropyLoss()
1861
+ loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
1862
+
1863
+ if not return_dict:
1864
+ output = (logits,) + outputs[2:]
1865
+ return ((loss,) + output) if loss is not None else output
1866
+
1867
+ return TokenClassifierOutput(
1868
+ loss=loss,
1869
+ logits=logits,
1870
+ hidden_states=outputs.hidden_states,
1871
+ attentions=outputs.attentions,
1872
+ )
isolated/sim_greedy/upstream_sgl/internvl/model/llama/tokenization_llama.py ADDED
@@ -0,0 +1,412 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 EleutherAI and the HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX
5
+ # and OPT implementations in this library. It has been modified from its
6
+ # original forms to accommodate minor architectural differences compared
7
+ # to GPT-NeoX and OPT used by the Meta AI team that trained the model.
8
+ #
9
+ # Licensed under the Apache License, Version 2.0 (the "License");
10
+ # you may not use this file except in compliance with the License.
11
+ # You may obtain a copy of the License at
12
+ #
13
+ # http://www.apache.org/licenses/LICENSE-2.0
14
+ #
15
+ # Unless required by applicable law or agreed to in writing, software
16
+ # distributed under the License is distributed on an "AS IS" BASIS,
17
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
18
+ # See the License for the specific language governing permissions and
19
+ # limitations under the License.
20
+
21
+ """Tokenization classes for LLaMA."""
22
+
23
+ import os
24
+ from shutil import copyfile
25
+ from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple
26
+
27
+ import sentencepiece as spm
28
+
29
+ from transformers.convert_slow_tokenizer import import_protobuf
30
+ from transformers.tokenization_utils import AddedToken, PreTrainedTokenizer
31
+ from transformers.tils import logging
32
+
33
+
34
+ if TYPE_CHECKING:
35
+ from transformers.tokenization_utils_base import TextInput
36
+
37
+ logger = logging.get_logger(__name__)
38
+
39
+ VOCAB_FILES_NAMES = {"vocab_file": "tokenizer.model"}
40
+
41
+ SPIECE_UNDERLINE = "▁"
42
+
43
+ B_INST, E_INST = "[INST]", "[/INST]"
44
+ B_SYS, E_SYS = "<<SYS>>\n", "\n<</SYS>>\n\n"
45
+
46
+ # fmt: off
47
+ DEFAULT_SYSTEM_PROMPT = """You are a helpful, respectful and honest assistant. Always answer as helpfully as possible, while being safe. Your \
48
+ answers should not include any harmful, unethical, racist, sexist, toxic, dangerous, or illegal content. Please ensure\
49
+ that your responses are socially unbiased and positive in nature.
50
+
51
+ If a question does not make any sense, or is not factually coherent, explain why instead of answering something not \
52
+ correct. If you don't know the answer to a question, please don't share false information."""
53
+ # fmt: on
54
+
55
+
56
+ class LlamaTokenizer(PreTrainedTokenizer):
57
+ """
58
+ Construct a Llama tokenizer. Based on byte-level Byte-Pair-Encoding. The default padding token is unset as there is
59
+ no padding token in the original model.
60
+
61
+ Args:
62
+ vocab_file (`str`):
63
+ Path to the vocabulary file.
64
+ unk_token (`str` or `tokenizers.AddedToken`, *optional*, defaults to `"<unk>"`):
65
+ The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
66
+ token instead.
67
+ bos_token (`str` or `tokenizers.AddedToken`, *optional*, defaults to `"<s>"`):
68
+ The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token.
69
+ eos_token (`str` or `tokenizers.AddedToken`, *optional*, defaults to `"</s>"`):
70
+ The end of sequence token.
71
+ pad_token (`str` or `tokenizers.AddedToken`, *optional*):
72
+ A special token used to make arrays of tokens the same size for batching purpose. Will then be ignored by
73
+ attention mechanisms or loss computation.
74
+ sp_model_kwargs (`Dict[str, Any]`, `Optional`, *optional*):
75
+ Will be passed to the `SentencePieceProcessor.__init__()` method. The [Python wrapper for
76
+ SentencePiece](https://github.com/google/sentencepiece/tree/master/python) can be used, among other things,
77
+ to set:
78
+
79
+ - `enable_sampling`: Enable subword regularization.
80
+ - `nbest_size`: Sampling parameters for unigram. Invalid for BPE-Dropout.
81
+
82
+ - `nbest_size = {0,1}`: No sampling is performed.
83
+ - `nbest_size > 1`: samples from the nbest_size results.
84
+ - `nbest_size < 0`: assuming that nbest_size is infinite and samples from the all hypothesis (lattice)
85
+ using forward-filtering-and-backward-sampling algorithm.
86
+
87
+ - `alpha`: Smoothing parameter for unigram sampling, and dropout probability of merge operations for
88
+ BPE-dropout.
89
+
90
+ add_bos_token (`bool`, *optional*, defaults to `True`):
91
+ Whether or not to add an `bos_token` at the start of sequences.
92
+ add_eos_token (`bool`, *optional*, defaults to `False`):
93
+ Whether or not to add an `eos_token` at the end of sequences.
94
+ clean_up_tokenization_spaces (`bool`, *optional*, defaults to `False`):
95
+ Whether or not to cleanup spaces after decoding, cleanup consists in removing potential artifacts like
96
+ extra spaces.
97
+ use_default_system_prompt (`bool`, *optional*, defaults to `False`):
98
+ Whether or not the default system prompt for Llama should be used.
99
+ spaces_between_special_tokens (`bool`, *optional*, defaults to `False`):
100
+ Whether or not to add spaces between special tokens.
101
+ legacy (`bool`, *optional*):
102
+ Whether or not the `legacy` behavior of the tokenizer should be used. Legacy is before the merge of #24622
103
+ and #25224 which includes fixes to properly handle tokens that appear after special tokens.
104
+ Make sure to also set `from_slow` to `True`.
105
+ A simple example:
106
+
107
+ - `legacy=True`:
108
+ ```python
109
+ >>> from transformers import LlamaTokenizerFast
110
+
111
+ >>> tokenizer = LlamaTokenizerFast.from_pretrained("huggyllama/llama-7b", legacy=True, from_slow=True)
112
+ >>> tokenizer.encode("Hello <s>.") # 869 is '▁.'
113
+ [1, 15043, 29871, 1, 869]
114
+ ```
115
+ - `legacy=False`:
116
+ ```python
117
+ >>> from transformers import LlamaTokenizerFast
118
+
119
+ >>> tokenizer = LlamaTokenizerFast.from_pretrained("huggyllama/llama-7b", legacy=False, from_slow=True)
120
+ >>> tokenizer.encode("Hello <s>.") # 29889 is '.'
121
+ [1, 15043, 29871, 1, 29889]
122
+ ```
123
+ Checkout the [pull request](https://github.com/huggingface/transformers/pull/24565) for more details.
124
+ add_prefix_space (`bool`, *optional*, defaults to `True`):
125
+ Whether or not to add an initial space to the input. This allows to treat the leading word just as any
126
+ other word. Again, this should be set with `from_slow=True` to make sure it's taken into account.
127
+ """
128
+
129
+ vocab_files_names = VOCAB_FILES_NAMES
130
+ model_input_names = ["input_ids", "attention_mask"]
131
+
132
+ def __init__(
133
+ self,
134
+ vocab_file,
135
+ unk_token="<unk>",
136
+ bos_token="<s>",
137
+ eos_token="</s>",
138
+ pad_token=None,
139
+ sp_model_kwargs: Optional[Dict[str, Any]] = None,
140
+ add_bos_token=True,
141
+ add_eos_token=False,
142
+ clean_up_tokenization_spaces=False,
143
+ use_default_system_prompt=False,
144
+ spaces_between_special_tokens=False,
145
+ legacy=None,
146
+ add_prefix_space=True,
147
+ **kwargs,
148
+ ):
149
+ self.sp_model_kwargs = {} if sp_model_kwargs is None else sp_model_kwargs
150
+ bos_token = AddedToken(bos_token, normalized=False, special=True) if isinstance(bos_token, str) else bos_token
151
+ eos_token = AddedToken(eos_token, normalized=False, special=True) if isinstance(eos_token, str) else eos_token
152
+ unk_token = AddedToken(unk_token, normalized=False, special=True) if isinstance(unk_token, str) else unk_token
153
+ pad_token = AddedToken(pad_token, normalized=False, special=True) if isinstance(pad_token, str) else pad_token
154
+
155
+ if legacy is None:
156
+ logger.warning_once(
157
+ f"You are using the default legacy behaviour of the {self.__class__}. This is"
158
+ " expected, and simply means that the `legacy` (previous) behavior will be used so nothing changes for you."
159
+ " If you want to use the new behaviour, set `legacy=False`. This should only be set if you understand what it"
160
+ " means, and thoroughly read the reason why this was added as explained in"
161
+ " https://github.com/huggingface/transformers/pull/24565 - if you loaded a llama tokenizer from a GGUF file"
162
+ " you can ignore this message"
163
+ )
164
+ legacy = True
165
+
166
+ self.legacy = legacy
167
+ self.vocab_file = vocab_file
168
+ self.add_bos_token = add_bos_token
169
+ self.add_eos_token = add_eos_token
170
+ self.use_default_system_prompt = use_default_system_prompt
171
+ self.sp_model = self.get_spm_processor(kwargs.pop("from_slow", False))
172
+ self.add_prefix_space = add_prefix_space
173
+
174
+ super().__init__(
175
+ bos_token=bos_token,
176
+ eos_token=eos_token,
177
+ unk_token=unk_token,
178
+ pad_token=pad_token,
179
+ add_bos_token=add_bos_token,
180
+ add_eos_token=add_eos_token,
181
+ sp_model_kwargs=self.sp_model_kwargs,
182
+ clean_up_tokenization_spaces=clean_up_tokenization_spaces,
183
+ use_default_system_prompt=use_default_system_prompt,
184
+ spaces_between_special_tokens=spaces_between_special_tokens,
185
+ legacy=legacy,
186
+ add_prefix_space=add_prefix_space,
187
+ **kwargs,
188
+ )
189
+
190
+ @property
191
+ def unk_token_length(self):
192
+ return len(self.sp_model.encode(str(self.unk_token)))
193
+
194
+ # Copied from transformers.models.t5.tokenization_t5.T5Tokenizer.get_spm_processor
195
+ def get_spm_processor(self, from_slow=False):
196
+ tokenizer = spm.SentencePieceProcessor(**self.sp_model_kwargs)
197
+ if self.legacy or from_slow: # no dependency on protobuf
198
+ tokenizer.Load(self.vocab_file)
199
+ return tokenizer
200
+
201
+ with open(self.vocab_file, "rb") as f:
202
+ sp_model = f.read()
203
+ model_pb2 = import_protobuf(f"The new behaviour of {self.__class__.__name__} (with `self.legacy = False`)")
204
+ model = model_pb2.ModelProto.FromString(sp_model)
205
+ normalizer_spec = model_pb2.NormalizerSpec()
206
+ normalizer_spec.add_dummy_prefix = False
207
+ model.normalizer_spec.MergeFrom(normalizer_spec)
208
+ sp_model = model.SerializeToString()
209
+ tokenizer.LoadFromSerializedProto(sp_model)
210
+ return tokenizer
211
+
212
+ def __getstate__(self):
213
+ state = self.__dict__.copy()
214
+ state["sp_model"] = None
215
+ state["sp_model_proto"] = self.sp_model.serialized_model_proto()
216
+ return state
217
+
218
+ def __setstate__(self, d):
219
+ self.__dict__ = d
220
+ self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
221
+ self.sp_model.LoadFromSerializedProto(self.sp_model_proto)
222
+
223
+ @property
224
+ def vocab_size(self):
225
+ """Returns vocab size"""
226
+ return self.sp_model.get_piece_size()
227
+
228
+ def get_vocab(self):
229
+ """Returns vocab as a dict"""
230
+ vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size)}
231
+ vocab.update(self.added_tokens_encoder)
232
+ return vocab
233
+
234
+ # Copied from transformers.models.t5.tokenization_t5.T5Tokenizer.tokenize
235
+ def tokenize(self, text: "TextInput", **kwargs) -> List[str]:
236
+ """
237
+ Converts a string to a list of tokens. If `self.legacy` is set to `False`, a prefix token is added unless the
238
+ first token is special.
239
+ """
240
+ if self.legacy or len(text) == 0:
241
+ return super().tokenize(text, **kwargs)
242
+
243
+ text = text.replace(SPIECE_UNDERLINE, " ")
244
+ if self.add_prefix_space:
245
+ text = SPIECE_UNDERLINE + text
246
+
247
+ tokens = super().tokenize(text, **kwargs)
248
+
249
+ if len(tokens) > 1 and tokens[0] == SPIECE_UNDERLINE and tokens[1] in self.all_special_tokens:
250
+ tokens = tokens[1:]
251
+ return tokens
252
+
253
+ # Copied from transformers.models.t5.tokenization_t5.T5Tokenizer._tokenize
254
+ def _tokenize(self, text, **kwargs):
255
+ """
256
+ Returns a tokenized string.
257
+
258
+ We de-activated the `add_dummy_prefix` option, thus the sentencepiece internals will always strip any
259
+ SPIECE_UNDERLINE. For example: `self.sp_model.encode(f"{SPIECE_UNDERLINE}Hey", out_type = str)` will give
260
+ `['H', 'e', 'y']` instead of `['▁He', 'y']`. Thus we always encode `f"{unk_token}text"` and strip the
261
+ `unk_token`. Here is an example with `unk_token = "<unk>"` and `unk_token_length = 4`.
262
+ `self.tokenizer.sp_model.encode("<unk> Hey", out_type = str)[4:]`.
263
+ """
264
+ if self.legacy or not text.startswith((SPIECE_UNDERLINE, " ")):
265
+ return self.sp_model.encode(text, out_type=str)
266
+
267
+ # 1. Encode string + prefix ex: "<unk> Hey"
268
+ tokens = self.sp_model.encode(self.unk_token + text, out_type=str)
269
+ # 2. Remove self.unk_token from ['<','unk','>', '▁Hey']
270
+ return tokens[self.unk_token_length :] if len(tokens) >= self.unk_token_length else tokens
271
+
272
+ def _convert_token_to_id(self, token):
273
+ """Converts a token (str) in an id using the vocab."""
274
+ return self.sp_model.piece_to_id(token)
275
+
276
+ def _convert_id_to_token(self, index):
277
+ """Converts an index (integer) in a token (str) using the vocab."""
278
+ token = self.sp_model.IdToPiece(index)
279
+ return token
280
+
281
+ def convert_tokens_to_string(self, tokens):
282
+ """Converts a sequence of tokens (string) in a single string."""
283
+ # since we manually add the prefix space, we have to remove it when decoding
284
+ if tokens[0].startswith(SPIECE_UNDERLINE) and self.add_prefix_space:
285
+ tokens[0] = tokens[0][1:]
286
+
287
+ current_sub_tokens = []
288
+ out_string = ""
289
+ prev_is_special = False
290
+ for i, token in enumerate(tokens):
291
+ # make sure that special tokens are not decoded using sentencepiece model
292
+ if token in self.all_special_tokens:
293
+ if not prev_is_special and i != 0 and self.legacy:
294
+ out_string += " "
295
+ out_string += self.sp_model.decode(current_sub_tokens) + token
296
+ prev_is_special = True
297
+ current_sub_tokens = []
298
+ else:
299
+ if prev_is_special and i == 1 and self.add_prefix_space and not token.startswith(SPIECE_UNDERLINE):
300
+ out_string += " "
301
+ current_sub_tokens.append(token)
302
+ prev_is_special = False
303
+ out_string += self.sp_model.decode(current_sub_tokens)
304
+ return out_string
305
+
306
+ def save_vocabulary(self, save_directory, filename_prefix: Optional[str] = None) -> Tuple[str]:
307
+ """
308
+ Save the vocabulary and special tokens file to a directory.
309
+
310
+ Args:
311
+ save_directory (`str`):
312
+ The directory in which to save the vocabulary.
313
+
314
+ Returns:
315
+ `Tuple(str)`: Paths to the files saved.
316
+ """
317
+ if not os.path.isdir(save_directory):
318
+ logger.error(f"Vocabulary path ({save_directory}) should be a directory")
319
+ return
320
+ out_vocab_file = os.path.join(
321
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
322
+ )
323
+
324
+ if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file) and os.path.isfile(self.vocab_file):
325
+ copyfile(self.vocab_file, out_vocab_file)
326
+ elif not os.path.isfile(self.vocab_file):
327
+ with open(out_vocab_file, "wb") as fi:
328
+ content_spiece_model = self.sp_model.serialized_model_proto()
329
+ fi.write(content_spiece_model)
330
+
331
+ return (out_vocab_file,)
332
+
333
+ def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None):
334
+ bos_token_id = [self.bos_token_id] if self.add_bos_token else []
335
+ eos_token_id = [self.eos_token_id] if self.add_eos_token else []
336
+
337
+ output = bos_token_id + token_ids_0 + eos_token_id
338
+
339
+ if token_ids_1 is not None:
340
+ output = output + bos_token_id + token_ids_1 + eos_token_id
341
+
342
+ return output
343
+
344
+ def get_special_tokens_mask(
345
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False
346
+ ) -> List[int]:
347
+ """
348
+ Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
349
+ special tokens using the tokenizer `prepare_for_model` method.
350
+
351
+ Args:
352
+ token_ids_0 (`List[int]`):
353
+ List of IDs.
354
+ token_ids_1 (`List[int]`, *optional*):
355
+ Optional second list of IDs for sequence pairs.
356
+ already_has_special_tokens (`bool`, *optional*, defaults to `False`):
357
+ Whether or not the token list is already formatted with special tokens for the model.
358
+
359
+ Returns:
360
+ `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
361
+ """
362
+ if already_has_special_tokens:
363
+ return super().get_special_tokens_mask(
364
+ token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True
365
+ )
366
+
367
+ bos_token_id = [1] if self.add_bos_token else []
368
+ eos_token_id = [1] if self.add_eos_token else []
369
+
370
+ if token_ids_1 is None:
371
+ return bos_token_id + ([0] * len(token_ids_0)) + eos_token_id
372
+ return (
373
+ bos_token_id
374
+ + ([0] * len(token_ids_0))
375
+ + eos_token_id
376
+ + bos_token_id
377
+ + ([0] * len(token_ids_1))
378
+ + eos_token_id
379
+ )
380
+
381
+ def create_token_type_ids_from_sequences(
382
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
383
+ ) -> List[int]:
384
+ """
385
+ Creates a mask from the two sequences passed to be used in a sequence-pair classification task. An ALBERT
386
+ sequence pair mask has the following format:
387
+
388
+ ```
389
+ 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1
390
+ | first sequence | second sequence |
391
+ ```
392
+
393
+ if token_ids_1 is None, only returns the first portion of the mask (0s).
394
+
395
+ Args:
396
+ token_ids_0 (`List[int]`):
397
+ List of ids.
398
+ token_ids_1 (`List[int]`, *optional*):
399
+ Optional second list of IDs for sequence pairs.
400
+
401
+ Returns:
402
+ `List[int]`: List of [token type IDs](../glossary#token-type-ids) according to the given sequence(s).
403
+ """
404
+ bos_token_id = [self.bos_token_id] if self.add_bos_token else []
405
+ eos_token_id = [self.eos_token_id] if self.add_eos_token else []
406
+
407
+ output = [0] * len(bos_token_id + token_ids_0 + eos_token_id)
408
+
409
+ if token_ids_1 is not None:
410
+ output += [1] * len(bos_token_id + token_ids_1 + eos_token_id)
411
+
412
+ return output
isolated/sim_greedy/upstream_sgl/internvl/model/llama/tokenization_llama_fast.py ADDED
@@ -0,0 +1,255 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020 The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ import os
16
+ from shutil import copyfile
17
+ from typing import Optional, Tuple
18
+
19
+ from tokenizers import processors
20
+
21
+ from transformers.tokenization_utils_fast import PreTrainedTokenizerFast
22
+ from transformers.utils import is_sentencepiece_available, logging
23
+ from transformers.utils.versions import require_version
24
+
25
+
26
+ require_version("tokenizers>=0.13.3")
27
+
28
+ if is_sentencepiece_available():
29
+ from .tokenization_llama import LlamaTokenizer
30
+ else:
31
+ LlamaTokenizer = None
32
+
33
+ logger = logging.get_logger(__name__)
34
+ VOCAB_FILES_NAMES = {"vocab_file": "tokenizer.model", "tokenizer_file": "tokenizer.json"}
35
+
36
+ B_INST, E_INST = "[INST]", "[/INST]"
37
+ B_SYS, E_SYS = "<<SYS>>\n", "\n<</SYS>>\n\n"
38
+
39
+ # fmt: off
40
+ DEFAULT_SYSTEM_PROMPT = """You are a helpful, respectful and honest assistant. Always answer as helpfully as possible, while being safe. Your \
41
+ answers should not include any harmful, unethical, racist, sexist, toxic, dangerous, or illegal content. Please ensure\
42
+ that your responses are socially unbiased and positive in nature.
43
+
44
+ If a question does not make any sense, or is not factually coherent, explain why instead of answering something not \
45
+ correct. If you don't know the answer to a question, please don't share false information."""
46
+ # fmt: on
47
+
48
+
49
+ class LlamaTokenizerFast(PreTrainedTokenizerFast):
50
+ """
51
+ Construct a Llama tokenizer. Based on byte-level Byte-Pair-Encoding.
52
+
53
+ This uses notably ByteFallback and no normalization.
54
+
55
+ ```python
56
+ >>> from transformers import LlamaTokenizerFast
57
+
58
+ >>> tokenizer = LlamaTokenizerFast.from_pretrained("hf-internal-testing/llama-tokenizer")
59
+ >>> tokenizer.encode("Hello this is a test")
60
+ [1, 15043, 445, 338, 263, 1243]
61
+ ```
62
+
63
+ If you want to change the `bos_token` or the `eos_token`, make sure to specify them when initializing the model, or
64
+ call `tokenizer.update_post_processor()` to make sure that the post-processing is correctly done (otherwise the
65
+ values of the first token and final token of an encoded sequence will not be correct). For more details, checkout
66
+ [post-processors] (https://huggingface.co/docs/tokenizers/api/post-processors) documentation.
67
+
68
+
69
+ This tokenizer inherits from [`PreTrainedTokenizerFast`] which contains most of the main methods. Users should
70
+ refer to this superclass for more information regarding those methods.
71
+
72
+ Args:
73
+ vocab_file (`str`, *optional*):
74
+ [SentencePiece](https://github.com/google/sentencepiece) file (generally has a .model extension) that
75
+ contains the vocabulary necessary to instantiate a tokenizer.
76
+ tokenizer_file (`str`, *optional*):
77
+ [tokenizers](https://github.com/huggingface/tokenizers) file (generally has a .json extension) that
78
+ contains everything needed to load the tokenizer.
79
+ clean_up_tokenization_spaces (`bool`, *optional*, defaults to `False`):
80
+ Whether or not to cleanup spaces after decoding, cleanup consists in removing potential artifacts like
81
+ extra spaces.
82
+ unk_token (`str` or `tokenizers.AddedToken`, *optional*, defaults to `"<unk>"`):
83
+ The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
84
+ token instead.
85
+ bos_token (`str` or `tokenizers.AddedToken`, *optional*, defaults to `"<s>"`):
86
+ The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token.
87
+ eos_token (`str` or `tokenizers.AddedToken`, *optional*, defaults to `"</s>"`):
88
+ The end of sequence token.
89
+ add_bos_token (`bool`, *optional*, defaults to `True`):
90
+ Whether or not to add an `bos_token` at the start of sequences.
91
+ add_eos_token (`bool`, *optional*, defaults to `False`):
92
+ Whether or not to add an `eos_token` at the end of sequences.
93
+ use_default_system_prompt (`bool`, *optional*, defaults to `False`):
94
+ Whether or not the default system prompt for Llama should be used
95
+ legacy (`bool`, *optional*):
96
+ Whether or not the `legacy` behavior of the tokenizer should be used. Legacy is before the merge of #24622
97
+ and #25224 which includes fixes to properly handle tokens that appear after special tokens.
98
+ Make sure to also set `from_slow` to `True`.
99
+ A simple example:
100
+
101
+ - `legacy=True`:
102
+ ```python
103
+ >>> from transformers import LlamaTokenizerFast
104
+
105
+ >>> tokenizer = LlamaTokenizerFast.from_pretrained("huggyllama/llama-7b", legacy=True, from_slow=True)
106
+ >>> tokenizer.encode("Hello <s>.") # 869 is '▁.'
107
+ [1, 15043, 29871, 1, 869]
108
+ ```
109
+ - `legacy=False`:
110
+ ```python
111
+ >>> from transformers import LlamaTokenizerFast
112
+
113
+ >>> tokenizer = LlamaTokenizerFast.from_pretrained("huggyllama/llama-7b", legacy=False, from_slow=True)
114
+ >>> tokenizer.encode("Hello <s>.") # 29889 is '.'
115
+ [1, 15043, 29871, 1, 29889]
116
+ ```
117
+ Checkout the [pull request](https://github.com/huggingface/transformers/pull/24565) for more details.
118
+ add_prefix_space (`bool`, *optional*):
119
+ Whether or not the tokenizer should automatically add a prefix space
120
+ """
121
+
122
+ vocab_files_names = VOCAB_FILES_NAMES
123
+ slow_tokenizer_class = LlamaTokenizer
124
+ padding_side = "left"
125
+ model_input_names = ["input_ids", "attention_mask"]
126
+
127
+ def __init__(
128
+ self,
129
+ vocab_file=None,
130
+ tokenizer_file=None,
131
+ clean_up_tokenization_spaces=False,
132
+ unk_token="<unk>",
133
+ bos_token="<s>",
134
+ eos_token="</s>",
135
+ add_bos_token=True,
136
+ add_eos_token=False,
137
+ use_default_system_prompt=False,
138
+ legacy=None,
139
+ add_prefix_space=None,
140
+ **kwargs,
141
+ ):
142
+ if legacy is None:
143
+ logger.warning_once(
144
+ f"You are using the default legacy behaviour of the {self.__class__}. This is"
145
+ " expected, and simply means that the `legacy` (previous) behavior will be used so nothing changes for you."
146
+ " If you want to use the new behaviour, set `legacy=False`. This should only be set if you understand what it"
147
+ " means, and thoroughly read the reason why this was added as explained in"
148
+ " https://github.com/huggingface/transformers/pull/24565 - if you loaded a llama tokenizer from a GGUF file"
149
+ " you can ignore this message."
150
+ )
151
+ legacy = True
152
+ self.legacy = legacy
153
+
154
+ if add_prefix_space is not None:
155
+ kwargs["from_slow"] = True
156
+
157
+ super().__init__(
158
+ vocab_file=vocab_file,
159
+ tokenizer_file=tokenizer_file,
160
+ clean_up_tokenization_spaces=clean_up_tokenization_spaces,
161
+ unk_token=unk_token,
162
+ bos_token=bos_token,
163
+ eos_token=eos_token,
164
+ add_bos_token=add_bos_token,
165
+ add_eos_token=add_eos_token,
166
+ use_default_system_prompt=use_default_system_prompt,
167
+ add_prefix_space=add_prefix_space,
168
+ legacy=legacy,
169
+ **kwargs,
170
+ )
171
+ self._add_bos_token = add_bos_token
172
+ self._add_eos_token = add_eos_token
173
+ self.update_post_processor()
174
+ self.use_default_system_prompt = use_default_system_prompt
175
+ self.vocab_file = vocab_file
176
+
177
+ @property
178
+ def can_save_slow_tokenizer(self) -> bool:
179
+ return os.path.isfile(self.vocab_file) if self.vocab_file else False
180
+
181
+ def update_post_processor(self):
182
+ """
183
+ Updates the underlying post processor with the current `bos_token` and `eos_token`.
184
+ """
185
+ bos = self.bos_token
186
+ bos_token_id = self.bos_token_id
187
+ if bos is None and self.add_bos_token:
188
+ raise ValueError("add_bos_token = True but bos_token = None")
189
+
190
+ eos = self.eos_token
191
+ eos_token_id = self.eos_token_id
192
+ if eos is None and self.add_eos_token:
193
+ raise ValueError("add_eos_token = True but eos_token = None")
194
+
195
+ single = f"{(bos+':0 ') if self.add_bos_token else ''}$A:0{(' '+eos+':0') if self.add_eos_token else ''}"
196
+ pair = f"{single}{(' '+bos+':1') if self.add_bos_token else ''} $B:1{(' '+eos+':1') if self.add_eos_token else ''}"
197
+
198
+ special_tokens = []
199
+ if self.add_bos_token:
200
+ special_tokens.append((bos, bos_token_id))
201
+ if self.add_eos_token:
202
+ special_tokens.append((eos, eos_token_id))
203
+ self._tokenizer.post_processor = processors.TemplateProcessing(
204
+ single=single, pair=pair, special_tokens=special_tokens
205
+ )
206
+
207
+ @property
208
+ def add_eos_token(self):
209
+ return self._add_eos_token
210
+
211
+ @property
212
+ def add_bos_token(self):
213
+ return self._add_bos_token
214
+
215
+ @add_eos_token.setter
216
+ def add_eos_token(self, value):
217
+ self._add_eos_token = value
218
+ self.update_post_processor()
219
+
220
+ @add_bos_token.setter
221
+ def add_bos_token(self, value):
222
+ self._add_bos_token = value
223
+ self.update_post_processor()
224
+
225
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
226
+ if not self.can_save_slow_tokenizer:
227
+ raise ValueError(
228
+ "Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
229
+ "tokenizer."
230
+ )
231
+
232
+ if not os.path.isdir(save_directory):
233
+ logger.error(f"Vocabulary path ({save_directory}) should be a directory")
234
+ return
235
+ out_vocab_file = os.path.join(
236
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
237
+ )
238
+
239
+ if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file):
240
+ copyfile(self.vocab_file, out_vocab_file)
241
+
242
+ return (out_vocab_file,)
243
+
244
+ # TODO ArthurZ let's rely on the template processor instead, refactor all fast tokenizers
245
+ # Copied from transformers.models.llama.tokenization_llama.LlamaTokenizer.build_inputs_with_special_tokens
246
+ def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None):
247
+ bos_token_id = [self.bos_token_id] if self.add_bos_token else []
248
+ eos_token_id = [self.eos_token_id] if self.add_eos_token else []
249
+
250
+ output = bos_token_id + token_ids_0 + eos_token_id
251
+
252
+ if token_ids_1 is not None:
253
+ output = output + bos_token_id + token_ids_1 + eos_token_id
254
+
255
+ return output
isolated/sim_greedy/upstream_sgl/internvl/model/phi3/configuration_phi3.py ADDED
@@ -0,0 +1,211 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 Microsoft and the HuggingFace Inc. team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License atd
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ """ Phi-3 model configuration"""
16
+
17
+
18
+ from transformers.configuration_utils import PretrainedConfig
19
+ from transformers.utils import logging
20
+
21
+ logger = logging.get_logger(__name__)
22
+
23
+ PHI3_PRETRAINED_CONFIG_ARCHIVE_MAP = {
24
+ 'microsoft/Phi-3-mini-4k-instruct': 'https://huggingface.co/microsoft/Phi-3-mini-4k-instruct/resolve/main/config.json',
25
+ 'microsoft/Phi-3-mini-128k-instruct': 'https://huggingface.co/microsoft/Phi-3-mini-128k-instruct/resolve/main/config.json',
26
+ }
27
+
28
+
29
+ class Phi3Config(PretrainedConfig):
30
+ r"""
31
+ This is the configuration class to store the configuration of a [`Phi3Model`]. It is used to instantiate a Phi-3
32
+ model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
33
+ defaults will yield a similar configuration to that of the
34
+ [microsoft/Phi-3-mini-4k-instruct](https://huggingface.co/microsoft/Phi-3-mini-4k-instruct).
35
+
36
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
37
+ documentation from [`PretrainedConfig`] for more information.
38
+
39
+ Args:
40
+ vocab_size (`int`, *optional*, defaults to 32064):
41
+ Vocabulary size of the Phi-3 model. Defines the number of different tokens that can be represented by the
42
+ `inputs_ids` passed when calling [`Phi3Model`].
43
+ hidden_size (`int`, *optional*, defaults to 3072):
44
+ Dimension of the hidden representations.
45
+ intermediate_size (`int`, *optional*, defaults to 8192):
46
+ Dimension of the MLP representations.
47
+ num_hidden_layers (`int`, *optional*, defaults to 32):
48
+ Number of hidden layers in the Transformer decoder.
49
+ num_attention_heads (`int`, *optional*, defaults to 32):
50
+ Number of attention heads for each attention layer in the Transformer decoder.
51
+ num_key_value_heads (`int`, *optional*):
52
+ This is the number of key_value heads that should be used to implement Grouped Query Attention. If
53
+ `num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if
54
+ `num_key_value_heads=1 the model will use Multi Query Attention (MQA) otherwise GQA is used. When
55
+ converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed
56
+ by meanpooling all the original heads within that group. For more details checkout [this
57
+ paper](https://arxiv.org/pdf/2305.13245.pdf). If it is not specified, will default to
58
+ `num_attention_heads`.
59
+ resid_pdrop (`float`, *optional*, defaults to 0.0):
60
+ Dropout probability for mlp outputs.
61
+ embd_pdrop (`int`, *optional*, defaults to 0.0):
62
+ The dropout ratio for the embeddings.
63
+ attention_dropout (`float`, *optional*, defaults to 0.0):
64
+ The dropout ratio after computing the attention scores.
65
+ hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
66
+ The non-linear activation function (function or string) in the decoder.
67
+ max_position_embeddings (`int`, *optional*, defaults to 4096):
68
+ The maximum sequence length that this model might ever be used with.
69
+ original_max_position_embeddings (`int`, *optional*, defaults to 4096):
70
+ The maximum sequence length that this model was trained with. This is used to determine the size of the
71
+ original RoPE embeddings when using long scaling.
72
+ initializer_range (`float`, *optional*, defaults to 0.02):
73
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
74
+ rms_norm_eps (`float`, *optional*, defaults to 1e-05):
75
+ The epsilon value used for the RMSNorm.
76
+ use_cache (`bool`, *optional*, defaults to `True`):
77
+ Whether or not the model should return the last key/values attentions (not used by all models). Only
78
+ relevant if `config.is_decoder=True`. Whether to tie weight embeddings or not.
79
+ tie_word_embeddings (`bool`, *optional*, defaults to `False`):
80
+ Whether to tie weight embeddings
81
+ rope_theta (`float`, *optional*, defaults to 10000.0):
82
+ The base period of the RoPE embeddings.
83
+ rope_scaling (`dict`, *optional*):
84
+ The scaling strategy for the RoPE embeddings. If `None`, no scaling is applied. If a dictionary, it must
85
+ contain the following keys: `type`, `short_factor` and `long_factor`. The `type` must be either `su` or `yarn` and
86
+ the `short_factor` and `long_factor` must be lists of numbers with the same length as the hidden size
87
+ divided by the number of attention heads divided by 2.
88
+ bos_token_id (`int`, *optional*, defaults to 1):
89
+ The id of the "beginning-of-sequence" token.
90
+ eos_token_id (`int`, *optional*, defaults to 32000):
91
+ The id of the "end-of-sequence" token.
92
+ pad_token_id (`int`, *optional*, defaults to 32000):
93
+ The id of the padding token.
94
+ sliding_window (`int`, *optional*):
95
+ Sliding window attention window size. If `None`, no sliding window is applied.
96
+
97
+ Example:
98
+
99
+ ```python
100
+ >>> from transformers import Phi3Model, Phi3Config
101
+
102
+ >>> # Initializing a Phi-3 style configuration
103
+ >>> configuration = Phi3Config.from_pretrained("microsoft/Phi-3-mini-4k-instruct")
104
+
105
+ >>> # Initializing a model from the configuration
106
+ >>> model = Phi3Model(configuration)
107
+
108
+ >>> # Accessing the model configuration
109
+ >>> configuration = model.config
110
+ ```"""
111
+
112
+ model_type = 'phi3'
113
+ keys_to_ignore_at_inference = ['past_key_values']
114
+
115
+ def __init__(
116
+ self,
117
+ vocab_size=32064,
118
+ hidden_size=3072,
119
+ intermediate_size=8192,
120
+ num_hidden_layers=32,
121
+ num_attention_heads=32,
122
+ num_key_value_heads=None,
123
+ resid_pdrop=0.0,
124
+ embd_pdrop=0.0,
125
+ attention_dropout=0.0,
126
+ hidden_act='silu',
127
+ max_position_embeddings=4096,
128
+ original_max_position_embeddings=4096,
129
+ initializer_range=0.02,
130
+ rms_norm_eps=1e-5,
131
+ use_cache=True,
132
+ tie_word_embeddings=False,
133
+ rope_theta=10000.0,
134
+ rope_scaling=None,
135
+ bos_token_id=1,
136
+ eos_token_id=32000,
137
+ pad_token_id=32000,
138
+ sliding_window=None,
139
+ **kwargs,
140
+ ):
141
+ self.vocab_size = vocab_size
142
+ self.hidden_size = hidden_size
143
+ self.intermediate_size = intermediate_size
144
+ self.num_hidden_layers = num_hidden_layers
145
+ self.num_attention_heads = num_attention_heads
146
+
147
+ if num_key_value_heads is None:
148
+ num_key_value_heads = num_attention_heads
149
+
150
+ self.num_key_value_heads = num_key_value_heads
151
+ self.resid_pdrop = resid_pdrop
152
+ self.embd_pdrop = embd_pdrop
153
+ self.attention_dropout = attention_dropout
154
+ self.hidden_act = hidden_act
155
+ self.max_position_embeddings = max_position_embeddings
156
+ self.original_max_position_embeddings = original_max_position_embeddings
157
+ self.initializer_range = initializer_range
158
+ self.rms_norm_eps = rms_norm_eps
159
+ self.use_cache = use_cache
160
+ self.rope_theta = rope_theta
161
+ self.rope_scaling = rope_scaling
162
+ self._rope_scaling_validation()
163
+ self.sliding_window = sliding_window
164
+
165
+ super().__init__(
166
+ bos_token_id=bos_token_id,
167
+ eos_token_id=eos_token_id,
168
+ pad_token_id=pad_token_id,
169
+ tie_word_embeddings=tie_word_embeddings,
170
+ **kwargs,
171
+ )
172
+
173
+ def _rope_scaling_validation(self):
174
+ """
175
+ Validate the `rope_scaling` configuration.
176
+ """
177
+ if self.rope_scaling is None:
178
+ return
179
+
180
+ if not isinstance(self.rope_scaling, dict) or len(self.rope_scaling) != 3:
181
+ raise ValueError(
182
+ '`rope_scaling` must be a dictionary with three fields, `type`, `short_factor` and `long_factor`, '
183
+ f'got {self.rope_scaling}'
184
+ )
185
+ rope_scaling_type = self.rope_scaling.get('type', None)
186
+ rope_scaling_short_factor = self.rope_scaling.get('short_factor', None)
187
+ rope_scaling_long_factor = self.rope_scaling.get('long_factor', None)
188
+ if rope_scaling_type is None or rope_scaling_type not in ['su', 'yarn']:
189
+ raise ValueError(f"`rope_scaling`'s type field must be one of ['su', 'yarn'], got {rope_scaling_type}")
190
+ if not (
191
+ isinstance(rope_scaling_short_factor, list)
192
+ and all(isinstance(x, (int, float)) for x in rope_scaling_short_factor)
193
+ ):
194
+ raise ValueError(
195
+ f"`rope_scaling`'s short_factor field must be a list of numbers, got {rope_scaling_short_factor}"
196
+ )
197
+ if not len(rope_scaling_short_factor) == self.hidden_size // self.num_attention_heads // 2:
198
+ raise ValueError(
199
+ f"`rope_scaling`'s short_factor field must have length {self.hidden_size // self.num_attention_heads // 2}, got {len(rope_scaling_short_factor)}"
200
+ )
201
+ if not (
202
+ isinstance(rope_scaling_long_factor, list)
203
+ and all(isinstance(x, (int, float)) for x in rope_scaling_long_factor)
204
+ ):
205
+ raise ValueError(
206
+ f"`rope_scaling`'s long_factor field must be a list of numbers, got {rope_scaling_long_factor}"
207
+ )
208
+ if not len(rope_scaling_long_factor) == self.hidden_size // self.num_attention_heads // 2:
209
+ raise ValueError(
210
+ f"`rope_scaling`'s long_factor field must have length {self.hidden_size // self.num_attention_heads // 2}, got {len(rope_scaling_long_factor)}"
211
+ )
isolated/sim_greedy/upstream_sgl/internvl/model/phi3/modeling_phi3.py ADDED
@@ -0,0 +1,1601 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 Microsoft and the HuggingFace Inc. team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ """ PyTorch Phi-3 model."""
16
+
17
+ import inspect
18
+ import math
19
+ import warnings
20
+ from typing import List, Optional, Tuple, Union
21
+
22
+ import torch
23
+ import torch.nn.functional as F
24
+ import torch.utils.checkpoint
25
+ from torch import nn
26
+ from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
27
+ from transformers.activations import ACT2FN
28
+ from transformers.cache_utils import Cache, DynamicCache
29
+ from transformers.modeling_attn_mask_utils import \
30
+ _prepare_4d_causal_attention_mask
31
+ from transformers.modeling_outputs import (BaseModelOutputWithPast,
32
+ CausalLMOutputWithPast,
33
+ SequenceClassifierOutputWithPast,
34
+ TokenClassifierOutput)
35
+ from transformers.modeling_utils import PreTrainedModel
36
+ from transformers.utils import (add_code_sample_docstrings,
37
+ add_start_docstrings,
38
+ add_start_docstrings_to_model_forward,
39
+ is_flash_attn_2_available,
40
+ is_flash_attn_greater_or_equal_2_10, logging,
41
+ replace_return_docstrings)
42
+
43
+ from .configuration_phi3 import Phi3Config
44
+
45
+ logger = logging.get_logger(__name__)
46
+
47
+ # Transformers scans dependencies in the modeling file, causing issues on conditional loading. The regex only ignores try/catch blocks, but not if statements
48
+ # if is_flash_attn_2_available():
49
+ _flash_supports_window_size = False
50
+ try:
51
+ from flash_attn import flash_attn_func, flash_attn_varlen_func
52
+ from flash_attn.bert_padding import (index_first_axis, pad_input, # noqa
53
+ unpad_input)
54
+
55
+ _flash_supports_window_size = 'window_size' in list(inspect.signature(flash_attn_func).parameters)
56
+ except ImportError as error:
57
+ logger.warning(
58
+ f'`flash-attention` package not found, consider installing for better performance: {error}.'
59
+ )
60
+ if not _flash_supports_window_size:
61
+ logger.warning(
62
+ "Current `flash-attenton` does not support `window_size`. Either upgrade or use `attn_implementation='eager'`."
63
+ )
64
+
65
+ _CHECKPOINT_FOR_DOC = 'microsoft/Phi-3-mini-4k-instruct'
66
+ _CONFIG_FOR_DOC = 'Phi3Config'
67
+
68
+ PHI3_PRETRAINED_MODEL_ARCHIVE_LIST = [
69
+ 'microsoft/Phi-3-mini-4k-instruct',
70
+ 'microsoft/Phi-3-mini-128k-instruct',
71
+ # See all Phi-3 models at https://huggingface.co/models?filter=Phi-3
72
+ ]
73
+
74
+
75
+ # Copied from transformers.models.llama.modeling_llama.LlamaRMSNorm with Llama->Phi3
76
+ class Phi3RMSNorm(nn.Module):
77
+ def __init__(self, hidden_size, eps=1e-6):
78
+ """
79
+ Phi3RMSNorm is equivalent to T5LayerNorm
80
+ """
81
+ super().__init__()
82
+ self.weight = nn.Parameter(torch.ones(hidden_size))
83
+ self.variance_epsilon = eps
84
+
85
+ def forward(self, hidden_states):
86
+ input_dtype = hidden_states.dtype
87
+ hidden_states = hidden_states.to(torch.float32)
88
+ variance = hidden_states.pow(2).mean(-1, keepdim=True)
89
+ hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
90
+ return self.weight * hidden_states.to(input_dtype)
91
+
92
+
93
+ # Copied from transformers.models.llama.modeling_llama._get_unpad_data
94
+ def _get_unpad_data(attention_mask):
95
+ seqlens_in_batch = attention_mask.sum(dim=-1, dtype=torch.int32)
96
+ indices = torch.nonzero(attention_mask.flatten(), as_tuple=False).flatten()
97
+ max_seqlen_in_batch = seqlens_in_batch.max().item()
98
+ cu_seqlens = F.pad(torch.cumsum(seqlens_in_batch, dim=0, dtype=torch.int32), (1, 0))
99
+ return (
100
+ indices,
101
+ cu_seqlens,
102
+ max_seqlen_in_batch,
103
+ )
104
+
105
+
106
+ # Copied from transformers.models.gemma.modeling_gemma.GemmaRotaryEmbedding with gemma->phi3, Gemma->Phi3
107
+ class Phi3RotaryEmbedding(nn.Module):
108
+ def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None):
109
+ super().__init__()
110
+
111
+ self.dim = dim
112
+ self.max_position_embeddings = max_position_embeddings
113
+ self.base = base
114
+ self.register_buffer('inv_freq', None, persistent=False)
115
+
116
+ @torch.no_grad()
117
+ def forward(self, x, position_ids, seq_len=None):
118
+ # x: [bs, num_attention_heads, seq_len, head_size]
119
+ if self.inv_freq is None:
120
+ self.inv_freq = 1.0 / (
121
+ self.base ** (torch.arange(0, self.dim, 2, dtype=torch.int64, device=x.device).float() / self.dim)
122
+ )
123
+ inv_freq_expanded = self.inv_freq[None, :, None].float().expand(position_ids.shape[0], -1, 1)
124
+ position_ids_expanded = position_ids[:, None, :].float()
125
+ # Force float32 since bfloat16 loses precision on long contexts
126
+ # See https://github.com/huggingface/transformers/pull/29285
127
+ device_type = x.device.type
128
+ device_type = device_type if isinstance(device_type, str) and device_type != 'mps' else 'cpu'
129
+ with torch.autocast(device_type=device_type, enabled=False):
130
+ freqs = (inv_freq_expanded.float() @ position_ids_expanded.float()).transpose(1, 2)
131
+ emb = torch.cat((freqs, freqs), dim=-1)
132
+ cos = emb.cos()
133
+ sin = emb.sin()
134
+ return cos.to(dtype=x.dtype), sin.to(dtype=x.dtype)
135
+
136
+
137
+ class Phi3SuScaledRotaryEmbedding(Phi3RotaryEmbedding):
138
+ def __init__(self, dim, config, device=None):
139
+ super().__init__(dim, config.max_position_embeddings, config.rope_theta, device)
140
+
141
+ self.short_factor = config.rope_scaling['short_factor']
142
+ self.long_factor = config.rope_scaling['long_factor']
143
+ self.original_max_position_embeddings = config.original_max_position_embeddings
144
+
145
+ @torch.no_grad()
146
+ def forward(self, x, position_ids, seq_len=None):
147
+ seq_len = torch.max(position_ids) + 1
148
+ if seq_len > self.original_max_position_embeddings:
149
+ ext_factors = torch.tensor(self.long_factor, dtype=torch.float32, device=x.device)
150
+ else:
151
+ ext_factors = torch.tensor(self.short_factor, dtype=torch.float32, device=x.device)
152
+
153
+ inv_freq_shape = torch.arange(0, self.dim, 2, dtype=torch.int64, device=x.device).float() / self.dim
154
+ self.inv_freq = 1.0 / (ext_factors * self.base**inv_freq_shape)
155
+
156
+ inv_freq_expanded = self.inv_freq[None, :, None].float().expand(position_ids.shape[0], -1, 1)
157
+ position_ids_expanded = position_ids[:, None, :].float()
158
+
159
+ # Force float32 since bfloat16 loses precision on long contexts
160
+ # See https://github.com/huggingface/transformers/pull/29285
161
+ device_type = x.device.type
162
+ device_type = device_type if isinstance(device_type, str) and device_type != 'mps' else 'cpu'
163
+ with torch.autocast(device_type=device_type, enabled=False):
164
+ freqs = (inv_freq_expanded.float() @ position_ids_expanded.float()).transpose(1, 2)
165
+ emb = torch.cat((freqs, freqs), dim=-1)
166
+
167
+ scale = self.max_position_embeddings / self.original_max_position_embeddings
168
+ if scale <= 1.0:
169
+ scaling_factor = 1.0
170
+ else:
171
+ scaling_factor = math.sqrt(1 + math.log(scale) / math.log(self.original_max_position_embeddings))
172
+
173
+ cos = emb.cos() * scaling_factor
174
+ sin = emb.sin() * scaling_factor
175
+ return cos.to(dtype=x.dtype), sin.to(dtype=x.dtype)
176
+
177
+
178
+ class Phi3YarnScaledRotaryEmbedding(Phi3RotaryEmbedding):
179
+ def __init__(self, dim, config, device=None):
180
+ super().__init__(dim, config.max_position_embeddings, config.rope_theta, device)
181
+
182
+ self.short_factor = config.rope_scaling['short_factor']
183
+ self.long_factor = config.rope_scaling['long_factor']
184
+ self.original_max_position_embeddings = config.original_max_position_embeddings
185
+
186
+ @torch.no_grad()
187
+ def forward(self, x, position_ids, seq_len=None):
188
+ seq_len = torch.max(position_ids) + 1
189
+ if seq_len > self.original_max_position_embeddings:
190
+ ext_factors = torch.tensor(self.long_factor, dtype=torch.float32, device=x.device)
191
+ else:
192
+ ext_factors = torch.tensor(self.short_factor, dtype=torch.float32, device=x.device)
193
+
194
+ inv_freq_shape = torch.arange(0, self.dim, 2, dtype=torch.int64, device=x.device).float() / self.dim
195
+ self.inv_freq = 1.0 / (ext_factors * self.base**inv_freq_shape)
196
+
197
+ inv_freq_expanded = self.inv_freq[None, :, None].float().expand(position_ids.shape[0], -1, 1)
198
+ position_ids_expanded = position_ids[:, None, :].float()
199
+
200
+ # Force float32 since bfloat16 loses precision on long contexts
201
+ # See https://github.com/huggingface/transformers/pull/29285
202
+ device_type = x.device.type
203
+ device_type = device_type if isinstance(device_type, str) and device_type != 'mps' else 'cpu'
204
+ with torch.autocast(device_type=device_type, enabled=False):
205
+ freqs = (inv_freq_expanded.float() @ position_ids_expanded.float()).transpose(1, 2)
206
+ emb = torch.cat((freqs, freqs), dim=-1)
207
+
208
+ scale = self.max_position_embeddings / self.original_max_position_embeddings
209
+ if scale <= 1.0:
210
+ scaling_factor = 1.0
211
+ else:
212
+ scaling_factor = 0.1 * math.log(scale) + 1.0
213
+
214
+ cos = emb.cos() * scaling_factor
215
+ sin = emb.sin() * scaling_factor
216
+ return cos.to(dtype=x.dtype), sin.to(dtype=x.dtype)
217
+
218
+
219
+ # Copied from transformers.models.llama.modeling_llama.rotate_half
220
+ def rotate_half(x):
221
+ """Rotates half the hidden dims of the input."""
222
+ x1 = x[..., : x.shape[-1] // 2]
223
+ x2 = x[..., x.shape[-1] // 2 :]
224
+ return torch.cat((-x2, x1), dim=-1)
225
+
226
+
227
+ # Copied from transformers.models.llama.modeling_llama.apply_rotary_pos_emb
228
+ def apply_rotary_pos_emb(q, k, cos, sin, position_ids=None, unsqueeze_dim=1):
229
+ """Applies Rotary Position Embedding to the query and key tensors.
230
+
231
+ Args:
232
+ q (`torch.Tensor`): The query tensor.
233
+ k (`torch.Tensor`): The key tensor.
234
+ cos (`torch.Tensor`): The cosine part of the rotary embedding.
235
+ sin (`torch.Tensor`): The sine part of the rotary embedding.
236
+ position_ids (`torch.Tensor`, *optional*):
237
+ Deprecated and unused.
238
+ unsqueeze_dim (`int`, *optional*, defaults to 1):
239
+ The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and
240
+ sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note
241
+ that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and
242
+ k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes
243
+ cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have
244
+ the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2.
245
+ Returns:
246
+ `tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding.
247
+ """
248
+ cos = cos.unsqueeze(unsqueeze_dim)
249
+ sin = sin.unsqueeze(unsqueeze_dim)
250
+ q_embed = (q * cos) + (rotate_half(q) * sin)
251
+ k_embed = (k * cos) + (rotate_half(k) * sin)
252
+ return q_embed, k_embed
253
+
254
+
255
+ class Phi3MLP(nn.Module):
256
+ def __init__(self, config):
257
+ super().__init__()
258
+
259
+ self.config = config
260
+ self.gate_up_proj = nn.Linear(config.hidden_size, 2 * config.intermediate_size, bias=False)
261
+ self.down_proj = nn.Linear(config.intermediate_size, config.hidden_size, bias=False)
262
+
263
+ self.activation_fn = ACT2FN[config.hidden_act]
264
+
265
+ def forward(self, hidden_states: torch.FloatTensor) -> torch.FloatTensor:
266
+ up_states = self.gate_up_proj(hidden_states)
267
+
268
+ gate, up_states = up_states.chunk(2, dim=-1)
269
+ up_states = up_states * self.activation_fn(gate)
270
+
271
+ return self.down_proj(up_states)
272
+
273
+
274
+ # Copied from transformers.models.llama.modeling_llama.repeat_kv with llama->phi
275
+ def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor:
276
+ """
277
+ This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch,
278
+ num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)
279
+ """
280
+ batch, num_key_value_heads, slen, head_dim = hidden_states.shape
281
+ if n_rep == 1:
282
+ return hidden_states
283
+ hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim)
284
+ return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim)
285
+
286
+
287
+ class Phi3Attention(nn.Module):
288
+ """Multi-headed attention from 'Attention Is All You Need' paper"""
289
+
290
+ def __init__(self, config: Phi3Config, layer_idx: Optional[int] = None):
291
+ super().__init__()
292
+ self.config = config
293
+ self.layer_idx = layer_idx
294
+ if layer_idx is None:
295
+ logger.warning_once(
296
+ f'Instantiating {self.__class__.__name__} without passing a `layer_idx` is not recommended and will '
297
+ 'lead to errors during the forward call if caching is used. Please make sure to provide a `layer_idx` '
298
+ 'when creating this class.'
299
+ )
300
+
301
+ self.attention_dropout = config.attention_dropout
302
+ self.hidden_size = config.hidden_size
303
+ self.num_heads = config.num_attention_heads
304
+ self.head_dim = self.hidden_size // self.num_heads
305
+ self.num_key_value_heads = config.num_key_value_heads
306
+ self.num_key_value_groups = self.num_heads // self.num_key_value_heads
307
+ self.max_position_embeddings = config.max_position_embeddings
308
+ self.original_max_position_embeddings = config.original_max_position_embeddings
309
+ self.rope_theta = config.rope_theta
310
+ self.rope_scaling = config.rope_scaling
311
+ self.is_causal = True
312
+
313
+ if (self.head_dim * self.num_heads) != self.hidden_size:
314
+ raise ValueError(
315
+ f'hidden_size must be divisible by num_heads (got `hidden_size`: {self.hidden_size}'
316
+ f' and `num_heads`: {self.num_heads}).'
317
+ )
318
+
319
+ op_size = self.num_heads * self.head_dim + 2 * (self.num_key_value_heads * self.head_dim)
320
+ self.o_proj = nn.Linear(self.num_heads * self.head_dim, self.hidden_size, bias=False)
321
+ self.qkv_proj = nn.Linear(self.hidden_size, op_size, bias=False)
322
+ self._init_rope()
323
+
324
+ def _init_rope(self):
325
+ if self.rope_scaling is None:
326
+ self.rotary_emb = Phi3RotaryEmbedding(
327
+ self.head_dim,
328
+ max_position_embeddings=self.max_position_embeddings,
329
+ base=self.rope_theta,
330
+ )
331
+ else:
332
+ scaling_type = self.config.rope_scaling['type']
333
+ if scaling_type == 'su':
334
+ self.rotary_emb = Phi3SuScaledRotaryEmbedding(self.head_dim, self.config)
335
+ elif scaling_type == 'yarn':
336
+ self.rotary_emb = Phi3YarnScaledRotaryEmbedding(self.head_dim, self.config)
337
+ else:
338
+ raise ValueError(f'Unknown RoPE scaling type {scaling_type}')
339
+
340
+ def forward(
341
+ self,
342
+ hidden_states: torch.Tensor,
343
+ attention_mask: Optional[torch.Tensor] = None,
344
+ position_ids: Optional[torch.LongTensor] = None,
345
+ past_key_value: Optional[Cache] = None,
346
+ output_attentions: bool = False,
347
+ use_cache: bool = False,
348
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
349
+ logger.warning_once('You are not running the flash-attention implementation, expect numerical differences.')
350
+
351
+ bsz, q_len, _ = hidden_states.size()
352
+
353
+ qkv = self.qkv_proj(hidden_states)
354
+ query_pos = self.num_heads * self.head_dim
355
+ query_states = qkv[..., :query_pos]
356
+ key_states = qkv[..., query_pos : query_pos + self.num_key_value_heads * self.head_dim]
357
+ value_states = qkv[..., query_pos + self.num_key_value_heads * self.head_dim :]
358
+
359
+ query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
360
+ key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
361
+ value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
362
+
363
+ kv_seq_len = key_states.shape[-2]
364
+ if past_key_value is not None:
365
+ if self.layer_idx is None:
366
+ raise ValueError(
367
+ f'The cache structure has changed since version v4.36. If you are using {self.__class__.__name__} '
368
+ 'for auto-regressive decoding with k/v caching, please make sure to initialize the attention class '
369
+ 'with a layer index.'
370
+ )
371
+ kv_seq_len += past_key_value.get_usable_length(kv_seq_len, self.layer_idx)
372
+ cos, sin = self.rotary_emb(value_states, position_ids, seq_len=kv_seq_len)
373
+
374
+ query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin, position_ids)
375
+
376
+ if past_key_value is not None:
377
+ cache_kwargs = {'sin': sin, 'cos': cos} # Specific to RoPE models
378
+ key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs)
379
+
380
+ # repeat k/v heads if n_kv_heads < n_heads
381
+ key_states = repeat_kv(key_states, self.num_key_value_groups)
382
+ value_states = repeat_kv(value_states, self.num_key_value_groups)
383
+
384
+ attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) / math.sqrt(self.head_dim)
385
+
386
+ if attn_weights.size() != (bsz, self.num_heads, q_len, kv_seq_len):
387
+ raise ValueError(
388
+ f'Attention weights should be of size {(bsz, self.num_heads, q_len, kv_seq_len)}, but is'
389
+ f' {attn_weights.size()}'
390
+ )
391
+
392
+ if attention_mask is not None:
393
+ if attention_mask.size() != (bsz, 1, q_len, kv_seq_len):
394
+ raise ValueError(
395
+ f'Attention mask should be of size {(bsz, 1, q_len, kv_seq_len)}, but is {attention_mask.size()}'
396
+ )
397
+ attn_weights = attn_weights + attention_mask
398
+
399
+ # upcast attention to fp32
400
+ attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(value_states.dtype)
401
+ attn_weights = nn.functional.dropout(attn_weights, p=self.attention_dropout, training=self.training)
402
+
403
+ attn_output = torch.matmul(attn_weights, value_states)
404
+
405
+ if attn_output.size() != (bsz, self.num_heads, q_len, self.head_dim):
406
+ raise ValueError(
407
+ f'`attn_output` should be of size {(bsz, self.num_heads, q_len, self.head_dim)}, but is'
408
+ f' {attn_output.size()}'
409
+ )
410
+
411
+ attn_output = attn_output.transpose(1, 2).contiguous()
412
+ attn_output = attn_output.reshape(bsz, q_len, self.hidden_size)
413
+
414
+ attn_output = self.o_proj(attn_output)
415
+
416
+ if not output_attentions:
417
+ attn_weights = None
418
+
419
+ return attn_output, attn_weights, past_key_value
420
+
421
+
422
+ class Phi3FlashAttention2(Phi3Attention):
423
+ """
424
+ Phi-3 flash attention module. This module inherits from `Phi3Attention` as the weights of the module stays
425
+ untouched. The only required change would be on the forward pass where it needs to correctly call the public API of
426
+ flash attention and deal with padding tokens in case the input contains any of them.
427
+ """
428
+
429
+ # Copied from transformers.models.llama.modeling_llama.LlamaFlashAttention2.__init__
430
+ def __init__(self, *args, **kwargs):
431
+ super().__init__(*args, **kwargs)
432
+
433
+ # TODO: Should be removed once Flash Attention for RoCm is bumped to 2.1.
434
+ # flash_attn<2.1 generates top-left aligned causal mask, while what is needed here is bottom-right alignement, that was made default for flash_attn>=2.1. This attribute is used to handle this difference. Reference: https://github.com/Dao-AILab/flash-attention/releases/tag/v2.1.0.
435
+ # Beware that with flash_attn<2.1, using q_seqlen != k_seqlen (except for the case q_seqlen == 1) produces a wrong mask (top-left).
436
+ self._flash_attn_uses_top_left_mask = not is_flash_attn_greater_or_equal_2_10()
437
+
438
+ def forward(
439
+ self,
440
+ hidden_states: torch.Tensor,
441
+ attention_mask: Optional[torch.LongTensor] = None,
442
+ position_ids: Optional[torch.LongTensor] = None,
443
+ past_key_value: Optional[Cache] = None,
444
+ output_attentions: bool = False,
445
+ use_cache: bool = False,
446
+ **kwargs,
447
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
448
+ # Phi3FlashAttention2 attention does not support output_attentions
449
+
450
+ if not _flash_supports_window_size:
451
+ logger.warning_once(
452
+ "The current flash attention version does not support sliding window attention. Please use `attn_implementation='eager'` or upgrade flash-attn library."
453
+ )
454
+ raise ValueError('The current flash attention version does not support sliding window attention.')
455
+
456
+ output_attentions = False
457
+
458
+ if 'padding_mask' in kwargs:
459
+ warnings.warn(
460
+ 'Passing `padding_mask` is deprecated and will be removed in v4.37. Please make sure use `attention_mask` instead.`'
461
+ )
462
+
463
+ # overwrite attention_mask with padding_mask
464
+ attention_mask = kwargs.pop('padding_mask')
465
+
466
+ bsz, q_len, _ = hidden_states.size()
467
+
468
+ qkv = self.qkv_proj(hidden_states)
469
+ query_pos = self.num_heads * self.head_dim
470
+ query_states = qkv[..., :query_pos]
471
+ key_states = qkv[..., query_pos : query_pos + self.num_key_value_heads * self.head_dim]
472
+ value_states = qkv[..., query_pos + self.num_key_value_heads * self.head_dim :]
473
+
474
+ # Flash attention requires the input to have the shape
475
+ # batch_size x seq_length x head_dim x hidden_dim
476
+ # therefore we just need to keep the original shape
477
+ query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
478
+ key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
479
+ value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
480
+
481
+ kv_seq_len = key_states.shape[-2]
482
+ if past_key_value is not None:
483
+ if self.layer_idx is None:
484
+ raise ValueError(
485
+ f'The cache structure has changed since version v4.36. If you are using {self.__class__.__name__} '
486
+ 'for auto-regressive decoding with k/v caching, please make sure to initialize the attention class '
487
+ 'with a layer index.'
488
+ )
489
+ kv_seq_len += past_key_value.get_usable_length(kv_seq_len, self.layer_idx)
490
+
491
+ # Because the input can be padded, the absolute sequence length depends on the max position id.
492
+ rotary_seq_len = max(kv_seq_len, position_ids[:, -1].max().item()) + 1
493
+ cos, sin = self.rotary_emb(value_states, position_ids, seq_len=rotary_seq_len)
494
+
495
+ query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin, position_ids)
496
+
497
+ use_sliding_windows = (
498
+ _flash_supports_window_size
499
+ and getattr(self.config, 'sliding_window', None) is not None
500
+ and kv_seq_len > self.config.sliding_window
501
+ )
502
+
503
+ if past_key_value is not None:
504
+ # Activate slicing cache only if the config has a value `sliding_windows` attribute
505
+ cache_has_contents = past_key_value.get_seq_length(self.layer_idx) > 0
506
+ if (
507
+ getattr(self.config, 'sliding_window', None) is not None
508
+ and kv_seq_len > self.config.sliding_window
509
+ and cache_has_contents
510
+ ):
511
+ slicing_tokens = 1 - self.config.sliding_window
512
+
513
+ past_key = past_key_value[self.layer_idx][0]
514
+ past_value = past_key_value[self.layer_idx][1]
515
+
516
+ past_key = past_key[:, :, slicing_tokens:, :].contiguous()
517
+ past_value = past_value[:, :, slicing_tokens:, :].contiguous()
518
+
519
+ if past_key.shape[-2] != self.config.sliding_window - 1:
520
+ raise ValueError(
521
+ f'past key must have a shape of (`batch_size, num_heads, self.config.sliding_window-1, head_dim`), got'
522
+ f' {past_key.shape}'
523
+ )
524
+
525
+ if attention_mask is not None:
526
+ attention_mask = attention_mask[:, slicing_tokens:]
527
+ attention_mask = torch.cat([attention_mask, torch.ones_like(attention_mask[:, -1:])], dim=-1)
528
+
529
+ cache_kwargs = {'sin': sin, 'cos': cos} # Specific to RoPE models
530
+ key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs)
531
+
532
+ # repeat k/v heads if n_kv_heads < n_heads
533
+ key_states = repeat_kv(key_states, self.num_key_value_groups)
534
+ value_states = repeat_kv(value_states, self.num_key_value_groups)
535
+
536
+ attn_dropout = self.attention_dropout if self.training else 0.0
537
+
538
+ # In PEFT, usually we cast the layer norms in float32 for training stability reasons
539
+ # therefore the input hidden states gets silently casted in float32. Hence, we need
540
+ # cast them back in the correct dtype just to be sure everything works as expected.
541
+ # This might slowdown training & inference so it is recommended to not cast the LayerNorms
542
+ # in fp32.
543
+
544
+ if query_states.dtype == torch.float32:
545
+ if torch.is_autocast_enabled():
546
+ target_dtype = torch.get_autocast_gpu_dtype()
547
+ # Handle the case where the model is quantized
548
+ elif hasattr(self.config, '_pre_quantization_dtype'):
549
+ target_dtype = self.config._pre_quantization_dtype
550
+ else:
551
+ target_dtype = self.qkv_proj.weight.dtype
552
+
553
+ logger.warning_once(
554
+ f'The input hidden states seems to be silently casted in float32, this might be related to'
555
+ f' the fact you have upcasted embedding or layer norm layers in float32. We will cast back the input in'
556
+ f' {target_dtype}.'
557
+ )
558
+
559
+ query_states = query_states.to(target_dtype)
560
+ key_states = key_states.to(target_dtype)
561
+ value_states = value_states.to(target_dtype)
562
+
563
+ # Reashape to the expected shape for Flash Attention
564
+ query_states = query_states.transpose(1, 2)
565
+ key_states = key_states.transpose(1, 2)
566
+ value_states = value_states.transpose(1, 2)
567
+
568
+ attn_output = self._flash_attention_forward(
569
+ query_states,
570
+ key_states,
571
+ value_states,
572
+ attention_mask,
573
+ q_len,
574
+ dropout=attn_dropout,
575
+ use_sliding_windows=use_sliding_windows,
576
+ )
577
+
578
+ attn_output = attn_output.reshape(bsz, q_len, self.hidden_size).contiguous()
579
+ attn_output = self.o_proj(attn_output)
580
+
581
+ if not output_attentions:
582
+ attn_weights = None
583
+
584
+ return attn_output, attn_weights, past_key_value
585
+
586
+ # Copied from transformers.models.mistral.modeling_mistral.MistralFlashAttention2._flash_attention_forward
587
+ def _flash_attention_forward(
588
+ self,
589
+ query_states,
590
+ key_states,
591
+ value_states,
592
+ attention_mask,
593
+ query_length,
594
+ dropout=0.0,
595
+ softmax_scale=None,
596
+ use_sliding_windows=False,
597
+ ):
598
+ """
599
+ Calls the forward method of Flash Attention - if the input hidden states contain at least one padding token
600
+ first unpad the input, then computes the attention scores and pad the final attention scores.
601
+
602
+ Args:
603
+ query_states (`torch.Tensor`):
604
+ Input query states to be passed to Flash Attention API
605
+ key_states (`torch.Tensor`):
606
+ Input key states to be passed to Flash Attention API
607
+ value_states (`torch.Tensor`):
608
+ Input value states to be passed to Flash Attention API
609
+ attention_mask (`torch.Tensor`):
610
+ The padding mask - corresponds to a tensor of size `(batch_size, seq_len)` where 0 stands for the
611
+ position of padding tokens and 1 for the position of non-padding tokens.
612
+ dropout (`float`):
613
+ Attention dropout
614
+ softmax_scale (`float`, *optional*):
615
+ The scaling of QK^T before applying softmax. Default to 1 / sqrt(head_dim)
616
+ use_sliding_windows (`bool`, *optional*):
617
+ Whether to activate sliding window attention.
618
+ """
619
+ if not self._flash_attn_uses_top_left_mask:
620
+ causal = self.is_causal
621
+ else:
622
+ # TODO: Remove the `query_length != 1` check once Flash Attention for RoCm is bumped to 2.1. For details, please see the comment in LlamaFlashAttention2 __init__.
623
+ causal = self.is_causal and query_length != 1
624
+
625
+ # Contains at least one padding token in the sequence
626
+ if attention_mask is not None:
627
+ batch_size = query_states.shape[0]
628
+ query_states, key_states, value_states, indices_q, cu_seq_lens, max_seq_lens = self._upad_input(
629
+ query_states, key_states, value_states, attention_mask, query_length
630
+ )
631
+
632
+ cu_seqlens_q, cu_seqlens_k = cu_seq_lens
633
+ max_seqlen_in_batch_q, max_seqlen_in_batch_k = max_seq_lens
634
+
635
+ if not use_sliding_windows:
636
+ attn_output_unpad = flash_attn_varlen_func(
637
+ query_states,
638
+ key_states,
639
+ value_states,
640
+ cu_seqlens_q=cu_seqlens_q,
641
+ cu_seqlens_k=cu_seqlens_k,
642
+ max_seqlen_q=max_seqlen_in_batch_q,
643
+ max_seqlen_k=max_seqlen_in_batch_k,
644
+ dropout_p=dropout,
645
+ softmax_scale=softmax_scale,
646
+ causal=causal,
647
+ )
648
+ else:
649
+ attn_output_unpad = flash_attn_varlen_func(
650
+ query_states,
651
+ key_states,
652
+ value_states,
653
+ cu_seqlens_q=cu_seqlens_q,
654
+ cu_seqlens_k=cu_seqlens_k,
655
+ max_seqlen_q=max_seqlen_in_batch_q,
656
+ max_seqlen_k=max_seqlen_in_batch_k,
657
+ dropout_p=dropout,
658
+ softmax_scale=softmax_scale,
659
+ causal=causal,
660
+ window_size=(self.config.sliding_window, self.config.sliding_window),
661
+ )
662
+
663
+ attn_output = pad_input(attn_output_unpad, indices_q, batch_size, query_length)
664
+ else:
665
+ if not use_sliding_windows:
666
+ attn_output = flash_attn_func(
667
+ query_states,
668
+ key_states,
669
+ value_states,
670
+ dropout,
671
+ softmax_scale=softmax_scale,
672
+ causal=causal,
673
+ )
674
+ else:
675
+ attn_output = flash_attn_func(
676
+ query_states,
677
+ key_states,
678
+ value_states,
679
+ dropout,
680
+ softmax_scale=softmax_scale,
681
+ causal=causal,
682
+ window_size=(self.config.sliding_window, self.config.sliding_window),
683
+ )
684
+
685
+ return attn_output
686
+
687
+ # Copied from transformers.models.mistral.modeling_mistral.MistralFlashAttention2._upad_input
688
+ def _upad_input(self, query_layer, key_layer, value_layer, attention_mask, query_length):
689
+ batch_size, kv_seq_len, num_heads, head_dim = key_layer.shape
690
+
691
+ # On the first iteration we need to properly re-create the padding mask
692
+ # by slicing it on the proper place
693
+ if kv_seq_len != attention_mask.shape[-1]:
694
+ attention_mask_num_tokens = attention_mask.shape[-1]
695
+ attention_mask = attention_mask[:, attention_mask_num_tokens - kv_seq_len :]
696
+
697
+ indices_k, cu_seqlens_k, max_seqlen_in_batch_k = _get_unpad_data(attention_mask)
698
+
699
+ key_layer = index_first_axis(key_layer.reshape(batch_size * kv_seq_len, num_heads, head_dim), indices_k)
700
+ value_layer = index_first_axis(value_layer.reshape(batch_size * kv_seq_len, num_heads, head_dim), indices_k)
701
+
702
+ if query_length == kv_seq_len:
703
+ query_layer = index_first_axis(
704
+ query_layer.reshape(batch_size * kv_seq_len, num_heads, head_dim), indices_k
705
+ )
706
+ cu_seqlens_q = cu_seqlens_k
707
+ max_seqlen_in_batch_q = max_seqlen_in_batch_k
708
+ indices_q = indices_k
709
+ elif query_length == 1:
710
+ max_seqlen_in_batch_q = 1
711
+ cu_seqlens_q = torch.arange(
712
+ batch_size + 1, dtype=torch.int32, device=query_layer.device
713
+ ) # There is a memcpy here, that is very bad.
714
+ indices_q = cu_seqlens_q[:-1]
715
+ query_layer = query_layer.squeeze(1)
716
+ else:
717
+ # The -q_len: slice assumes left padding.
718
+ attention_mask = attention_mask[:, -query_length:]
719
+ query_layer, indices_q, cu_seqlens_q, max_seqlen_in_batch_q = unpad_input(query_layer, attention_mask)
720
+
721
+ return (
722
+ query_layer,
723
+ key_layer,
724
+ value_layer,
725
+ indices_q,
726
+ (cu_seqlens_q, cu_seqlens_k),
727
+ (max_seqlen_in_batch_q, max_seqlen_in_batch_k),
728
+ )
729
+
730
+
731
+ # copied from transformers.models.llama.modeling_llama.LlamaSdpaAttention with Llama->Phi3
732
+ # TODO @Arthur no longer copied from LLama after static cache
733
+ class Phi3SdpaAttention(Phi3Attention):
734
+ """
735
+ Phi3 attention module using torch.nn.functional.scaled_dot_product_attention. This module inherits from
736
+ `Phi3Attention` as the weights of the module stays untouched. The only changes are on the forward pass to adapt to
737
+ SDPA API.
738
+ """
739
+
740
+ # Adapted from Phi3Attention.forward
741
+ def forward(
742
+ self,
743
+ hidden_states: torch.Tensor,
744
+ attention_mask: Optional[torch.Tensor] = None,
745
+ position_ids: Optional[torch.LongTensor] = None,
746
+ past_key_value: Optional[Cache] = None,
747
+ output_attentions: bool = False,
748
+ use_cache: bool = False,
749
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
750
+ if output_attentions:
751
+ # TODO: Improve this warning with e.g. `model.config.attn_implementation = "manual"` once this is implemented.
752
+ logger.warning_once(
753
+ 'Phi3Model is using Phi3SdpaAttention, but `torch.nn.functional.scaled_dot_product_attention` does not support `output_attentions=True`. Falling back to the manual attention implementation, '
754
+ 'but specifying the manual implementation will be required from Transformers version v5.0.0 onwards. This warning can be removed using the argument `attn_implementation="eager"` when loading the model.'
755
+ )
756
+ return super().forward(
757
+ hidden_states=hidden_states,
758
+ attention_mask=attention_mask,
759
+ position_ids=position_ids,
760
+ past_key_value=past_key_value,
761
+ output_attentions=output_attentions,
762
+ use_cache=use_cache,
763
+ )
764
+
765
+ bsz, q_len, _ = hidden_states.size()
766
+
767
+ qkv = self.qkv_proj(hidden_states)
768
+ query_pos = self.num_heads * self.head_dim
769
+ query_states = qkv[..., :query_pos]
770
+ key_states = qkv[..., query_pos : query_pos + self.num_key_value_heads * self.head_dim]
771
+ value_states = qkv[..., query_pos + self.num_key_value_heads * self.head_dim :]
772
+
773
+ query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
774
+ key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
775
+ value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
776
+
777
+ kv_seq_len = key_states.shape[-2]
778
+ if past_key_value is not None:
779
+ kv_seq_len += past_key_value.get_usable_length(kv_seq_len, self.layer_idx)
780
+ cos, sin = self.rotary_emb(value_states, position_ids, seq_len=kv_seq_len)
781
+
782
+ query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin, position_ids)
783
+
784
+ if past_key_value is not None:
785
+ cache_kwargs = {'sin': sin, 'cos': cos} # Specific to RoPE models
786
+ key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs)
787
+
788
+ key_states = repeat_kv(key_states, self.num_key_value_groups)
789
+ value_states = repeat_kv(value_states, self.num_key_value_groups)
790
+
791
+ if attention_mask is not None:
792
+ if attention_mask.size() != (bsz, 1, q_len, kv_seq_len):
793
+ raise ValueError(
794
+ f'Attention mask should be of size {(bsz, 1, q_len, kv_seq_len)}, but is {attention_mask.size()}'
795
+ )
796
+
797
+ # SDPA with memory-efficient backend is currently (torch==2.1.2) bugged with non-contiguous inputs with custom attn_mask,
798
+ # Reference: https://github.com/pytorch/pytorch/issues/112577.
799
+ if query_states.device.type == 'cuda' and attention_mask is not None:
800
+ query_states = query_states.contiguous()
801
+ key_states = key_states.contiguous()
802
+ value_states = value_states.contiguous()
803
+
804
+ attn_output = torch.nn.functional.scaled_dot_product_attention(
805
+ query_states,
806
+ key_states,
807
+ value_states,
808
+ attn_mask=attention_mask,
809
+ dropout_p=self.attention_dropout if self.training else 0.0,
810
+ # The q_len > 1 is necessary to match with AttentionMaskConverter.to_causal_4d that does not create a causal mask in case q_len == 1.
811
+ is_causal=self.is_causal and attention_mask is None and q_len > 1,
812
+ )
813
+
814
+ attn_output = attn_output.transpose(1, 2).contiguous()
815
+ attn_output = attn_output.view(bsz, q_len, self.hidden_size)
816
+
817
+ attn_output = self.o_proj(attn_output)
818
+
819
+ return attn_output, None, past_key_value
820
+
821
+
822
+ PHI3_ATTENTION_CLASSES = {
823
+ 'eager': Phi3Attention,
824
+ 'flash_attention_2': Phi3FlashAttention2,
825
+ 'sdpa': Phi3SdpaAttention,
826
+ }
827
+
828
+
829
+ class Phi3DecoderLayer(nn.Module):
830
+ def __init__(self, config: Phi3Config, layer_idx: int):
831
+ super().__init__()
832
+
833
+ self.config = config
834
+ self.self_attn = PHI3_ATTENTION_CLASSES[config._attn_implementation](config, layer_idx=layer_idx)
835
+
836
+ self.mlp = Phi3MLP(config)
837
+ self.input_layernorm = Phi3RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
838
+
839
+ self.resid_attn_dropout = nn.Dropout(config.resid_pdrop)
840
+ self.resid_mlp_dropout = nn.Dropout(config.resid_pdrop)
841
+ self.post_attention_layernorm = Phi3RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
842
+
843
+ def forward(
844
+ self,
845
+ hidden_states: torch.Tensor,
846
+ attention_mask: Optional[torch.Tensor] = None,
847
+ position_ids: Optional[torch.LongTensor] = None,
848
+ past_key_value: Optional[Tuple[torch.Tensor]] = None,
849
+ output_attentions: Optional[bool] = False,
850
+ use_cache: Optional[bool] = False,
851
+ **kwargs,
852
+ ) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]:
853
+ if 'padding_mask' in kwargs:
854
+ warnings.warn(
855
+ 'Passing `padding_mask` is deprecated and will be removed in v4.37. Please make sure use `attention_mask` instead.`'
856
+ )
857
+ """
858
+ Args:
859
+ hidden_states (`torch.FloatTensor`):
860
+ input to the layer of shape `(batch, seq_len, embed_dim)`
861
+ attention_mask (`torch.FloatTensor`, *optional*): attention mask of size
862
+ `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
863
+ position_ids (`torch.LongTensor` of shape `({0})`, *optional*):
864
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range
865
+ `[0, config.n_positions - 1]`. [What are position IDs?](../glossary#position-ids)
866
+ output_attentions (`bool`, *optional*):
867
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
868
+ returned tensors for more detail.
869
+ use_cache (`bool`, *optional*):
870
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding
871
+ (see `past_key_values`).
872
+ past_key_value (`Tuple(torch.FloatTensor)`, *optional*): cached past key and value projection states
873
+ """
874
+
875
+ residual = hidden_states
876
+
877
+ hidden_states = self.input_layernorm(hidden_states)
878
+
879
+ # Self Attention
880
+ attn_outputs, self_attn_weights, present_key_value = self.self_attn(
881
+ hidden_states=hidden_states,
882
+ attention_mask=attention_mask,
883
+ position_ids=position_ids,
884
+ past_key_value=past_key_value,
885
+ output_attentions=output_attentions,
886
+ use_cache=use_cache,
887
+ )
888
+
889
+ hidden_states = residual + self.resid_attn_dropout(attn_outputs)
890
+
891
+ residual = hidden_states
892
+ hidden_states = self.post_attention_layernorm(hidden_states)
893
+ hidden_states = self.mlp(hidden_states)
894
+ hidden_states = residual + self.resid_mlp_dropout(hidden_states)
895
+
896
+ outputs = (hidden_states,)
897
+
898
+ if output_attentions:
899
+ outputs += (self_attn_weights,)
900
+
901
+ if use_cache:
902
+ outputs += (present_key_value,)
903
+
904
+ return outputs
905
+
906
+
907
+ PHI3_START_DOCSTRING = r"""
908
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
909
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
910
+ etc.)
911
+
912
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
913
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
914
+ and behavior.
915
+
916
+ Parameters:
917
+ config ([`Phi3Config`]):
918
+ Model configuration class with all the parameters of the model. Initializing with a config file does not
919
+ load the weights associated with the model, only the configuration. Check out the
920
+ [`~PreTrainedModel.from_pretrained`] method to load the model weights.
921
+ """
922
+
923
+
924
+ @add_start_docstrings(
925
+ 'The bare Phi-3 model outputting raw hidden-states without any specific head on top.',
926
+ PHI3_START_DOCSTRING,
927
+ )
928
+ class Phi3PreTrainedModel(PreTrainedModel):
929
+ config_class = Phi3Config
930
+ base_model_prefix = 'model'
931
+ supports_gradient_checkpointing = True
932
+ _no_split_modules = ['Phi3DecoderLayer']
933
+ _skip_keys_device_placement = 'past_key_values'
934
+ _supports_flash_attn_2 = True
935
+ _supports_sdpa = False
936
+ _supports_cache_class = True
937
+
938
+ _version = '0.0.5'
939
+
940
+ def _init_weights(self, module):
941
+ std = self.config.initializer_range
942
+ if isinstance(module, nn.Linear):
943
+ module.weight.data.normal_(mean=0.0, std=std)
944
+ if module.bias is not None:
945
+ module.bias.data.zero_()
946
+ elif isinstance(module, nn.Embedding):
947
+ module.weight.data.normal_(mean=0.0, std=std)
948
+ if module.padding_idx is not None:
949
+ module.weight.data[module.padding_idx].zero_()
950
+
951
+
952
+ PHI3_INPUTS_DOCSTRING = r"""
953
+ Args:
954
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
955
+ Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
956
+ it.
957
+
958
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
959
+ [`PreTrainedTokenizer.__call__`] for details.
960
+
961
+ [What are input IDs?](../glossary#input-ids)
962
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
963
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
964
+
965
+ - 1 for tokens that are **not masked**,
966
+ - 0 for tokens that are **masked**.
967
+
968
+ [What are attention masks?](../glossary#attention-mask)
969
+
970
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
971
+ [`PreTrainedTokenizer.__call__`] for details.
972
+
973
+ If `past_key_values` is used, optionally only the last `input_ids` have to be input (see
974
+ `past_key_values`).
975
+
976
+ If you want to change padding behavior, you should read [`modeling_opt._prepare_decoder_attention_mask`]
977
+ and modify to your needs. See diagram 1 in [the paper](https://arxiv.org/abs/1910.13461) for more
978
+ information on the default strategy.
979
+
980
+ - 1 indicates the head is **not masked**,
981
+ - 0 indicates the head is **masked**.
982
+ position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
983
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
984
+ config.n_positions - 1]`.
985
+
986
+ [What are position IDs?](../glossary#position-ids)
987
+ past_key_values (`Cache` or `tuple(tuple(torch.FloatTensor))`, *optional*):
988
+ Pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention
989
+ blocks) that can be used to speed up sequential decoding. This typically consists in the `past_key_values`
990
+ returned by the model at a previous stage of decoding, when `use_cache=True` or `config.use_cache=True`.
991
+
992
+ Two formats are allowed:
993
+ - a [`~cache_utils.Cache`] instance;
994
+ - Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of
995
+ shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`). This is also known as the legacy
996
+ cache format.
997
+
998
+ The model will output the same cache format that is fed as input. If no `past_key_values` are passed, the
999
+ legacy cache format will be returned.
1000
+
1001
+ If `past_key_values` are used, the user can optionally input only the last `input_ids` (those that don't
1002
+ have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `input_ids`
1003
+ of shape `(batch_size, sequence_length)`.
1004
+ inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
1005
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
1006
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
1007
+ model's internal embedding lookup matrix.
1008
+ use_cache (`bool`, *optional*):
1009
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
1010
+ `past_key_values`).
1011
+ output_attentions (`bool`, *optional*):
1012
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
1013
+ tensors for more detail.
1014
+ output_hidden_states (`bool`, *optional*):
1015
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
1016
+ more detail.
1017
+ return_dict (`bool`, *optional*):
1018
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
1019
+ """
1020
+
1021
+
1022
+ @add_start_docstrings(
1023
+ 'The bare Phi-3 model outputting raw hidden-states without any specific head on top.',
1024
+ PHI3_START_DOCSTRING,
1025
+ )
1026
+ class Phi3Model(Phi3PreTrainedModel):
1027
+ """
1028
+ Transformer decoder consisting of *config.num_hidden_layers* layers. Each layer is a [`Phi3DecoderLayer`]
1029
+
1030
+ Args:
1031
+ config: Phi3Config
1032
+ """
1033
+
1034
+ def __init__(self, config: Phi3Config):
1035
+ super().__init__(config)
1036
+ self.padding_idx = config.pad_token_id
1037
+ self.vocab_size = config.vocab_size
1038
+
1039
+ self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx)
1040
+ self.embed_dropout = nn.Dropout(config.embd_pdrop)
1041
+ self.layers = nn.ModuleList(
1042
+ [Phi3DecoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)]
1043
+ )
1044
+ self._attn_implementation = config._attn_implementation
1045
+ self.norm = Phi3RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
1046
+
1047
+ self.gradient_checkpointing = False
1048
+ # Initialize weights and apply final processing
1049
+ self.post_init()
1050
+
1051
+ def get_input_embeddings(self):
1052
+ return self.embed_tokens
1053
+
1054
+ def set_input_embeddings(self, value):
1055
+ self.embed_tokens = value
1056
+
1057
+ @add_start_docstrings_to_model_forward(PHI3_INPUTS_DOCSTRING)
1058
+ def forward(
1059
+ self,
1060
+ input_ids: torch.LongTensor = None,
1061
+ attention_mask: Optional[torch.Tensor] = None,
1062
+ position_ids: Optional[torch.LongTensor] = None,
1063
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
1064
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1065
+ use_cache: Optional[bool] = None,
1066
+ output_attentions: Optional[bool] = None,
1067
+ output_hidden_states: Optional[bool] = None,
1068
+ return_dict: Optional[bool] = None,
1069
+ ) -> Union[Tuple, BaseModelOutputWithPast]:
1070
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
1071
+ output_hidden_states = (
1072
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
1073
+ )
1074
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
1075
+
1076
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1077
+
1078
+ # retrieve input_ids and inputs_embeds
1079
+ if input_ids is not None and inputs_embeds is not None:
1080
+ raise ValueError('You cannot specify both input_ids and inputs_embeds at the same time')
1081
+ elif input_ids is not None:
1082
+ batch_size, seq_length = input_ids.shape[:2]
1083
+ elif inputs_embeds is not None:
1084
+ batch_size, seq_length = inputs_embeds.shape[:2]
1085
+ else:
1086
+ raise ValueError('You have to specify either input_ids or inputs_embeds')
1087
+
1088
+ past_key_values_length = 0
1089
+
1090
+ if self.gradient_checkpointing and self.training:
1091
+ if use_cache:
1092
+ logger.warning_once(
1093
+ '`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`...'
1094
+ )
1095
+ use_cache = False
1096
+
1097
+ if use_cache:
1098
+ use_legacy_cache = not isinstance(past_key_values, Cache)
1099
+ if use_legacy_cache:
1100
+ past_key_values = DynamicCache.from_legacy_cache(past_key_values)
1101
+ past_key_values_length = past_key_values.get_usable_length(seq_length)
1102
+
1103
+ if position_ids is None:
1104
+ device = input_ids.device if input_ids is not None else inputs_embeds.device
1105
+ position_ids = torch.arange(
1106
+ past_key_values_length, seq_length + past_key_values_length, dtype=torch.long, device=device
1107
+ )
1108
+ position_ids = position_ids.unsqueeze(0).view(-1, seq_length)
1109
+ else:
1110
+ position_ids = position_ids.view(-1, seq_length).long()
1111
+
1112
+ if inputs_embeds is None:
1113
+ inputs_embeds = self.embed_tokens(input_ids)
1114
+
1115
+ if attention_mask is not None and self._attn_implementation == 'flash_attention_2' and use_cache:
1116
+ is_padding_right = attention_mask[:, -1].sum().item() != batch_size
1117
+ if is_padding_right:
1118
+ raise ValueError(
1119
+ "You are attempting to perform batched generation with padding_side='right'"
1120
+ ' this may lead to unexpected behaviour for Flash Attention version of Phi3. Make sure to '
1121
+ " call `tokenizer.padding_side = 'left'` before tokenizing the input. "
1122
+ )
1123
+
1124
+ if self._attn_implementation == 'flash_attention_2':
1125
+ # 2d mask is passed through the layers
1126
+ attention_mask = attention_mask if (attention_mask is not None and 0 in attention_mask) else None
1127
+ else:
1128
+ # 4d mask is passed through the layers
1129
+ attention_mask = _prepare_4d_causal_attention_mask(
1130
+ attention_mask,
1131
+ (batch_size, seq_length),
1132
+ inputs_embeds,
1133
+ past_key_values_length,
1134
+ sliding_window=self.config.sliding_window,
1135
+ )
1136
+
1137
+ hidden_states = inputs_embeds
1138
+
1139
+ # decoder layers
1140
+ all_hidden_states = () if output_hidden_states else None
1141
+ all_self_attns = () if output_attentions else None
1142
+ next_decoder_cache = None
1143
+
1144
+ for decoder_layer in self.layers:
1145
+ if output_hidden_states:
1146
+ all_hidden_states += (hidden_states,)
1147
+
1148
+ if self.gradient_checkpointing and self.training:
1149
+ layer_outputs = self._gradient_checkpointing_func(
1150
+ decoder_layer.__call__,
1151
+ hidden_states,
1152
+ attention_mask,
1153
+ position_ids,
1154
+ past_key_values,
1155
+ output_attentions,
1156
+ use_cache,
1157
+ )
1158
+ else:
1159
+ layer_outputs = decoder_layer(
1160
+ hidden_states,
1161
+ attention_mask=attention_mask,
1162
+ position_ids=position_ids,
1163
+ past_key_value=past_key_values,
1164
+ output_attentions=output_attentions,
1165
+ use_cache=use_cache,
1166
+ )
1167
+
1168
+ hidden_states = layer_outputs[0]
1169
+
1170
+ if use_cache:
1171
+ next_decoder_cache = layer_outputs[2 if output_attentions else 1]
1172
+
1173
+ if output_attentions:
1174
+ all_self_attns += (layer_outputs[1],)
1175
+
1176
+ hidden_states = self.norm(hidden_states)
1177
+
1178
+ # add hidden states from the last decoder layer
1179
+ if output_hidden_states:
1180
+ all_hidden_states += (hidden_states,)
1181
+
1182
+ next_cache = None
1183
+ if use_cache:
1184
+ next_cache = next_decoder_cache.to_legacy_cache() if use_legacy_cache else next_decoder_cache
1185
+ if not return_dict:
1186
+ return tuple(v for v in [hidden_states, next_cache, all_hidden_states, all_self_attns] if v is not None)
1187
+ return BaseModelOutputWithPast(
1188
+ last_hidden_state=hidden_states,
1189
+ past_key_values=next_cache,
1190
+ hidden_states=all_hidden_states,
1191
+ attentions=all_self_attns,
1192
+ )
1193
+
1194
+
1195
+ class Phi3ForCausalLM(Phi3PreTrainedModel):
1196
+ _tied_weights_keys = ['lm_head.weight']
1197
+
1198
+ # Copied from transformers.models.llama.modeling_llama.LlamaForCausalLM.__init__ with Llama->Phi3
1199
+ def __init__(self, config):
1200
+ super().__init__(config)
1201
+ self.model = Phi3Model(config)
1202
+ self.vocab_size = config.vocab_size
1203
+ self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
1204
+
1205
+ # Initialize weights and apply final processing
1206
+ self.post_init()
1207
+
1208
+ # Copied from transformers.models.llama.modeling_llama.LlamaForCausalLM.get_input_embeddings
1209
+ def get_input_embeddings(self):
1210
+ return self.model.embed_tokens
1211
+
1212
+ # Copied from transformers.models.llama.modeling_llama.LlamaForCausalLM.set_input_embeddings
1213
+ def set_input_embeddings(self, value):
1214
+ self.model.embed_tokens = value
1215
+
1216
+ # Copied from transformers.models.llama.modeling_llama.LlamaForCausalLM.get_output_embeddings
1217
+ def get_output_embeddings(self):
1218
+ return self.lm_head
1219
+
1220
+ # Copied from transformers.models.llama.modeling_llama.LlamaForCausalLM.set_output_embeddings
1221
+ def set_output_embeddings(self, new_embeddings):
1222
+ self.lm_head = new_embeddings
1223
+
1224
+ # Copied from transformers.models.llama.modeling_llama.LlamaForCausalLM.set_decoder
1225
+ def set_decoder(self, decoder):
1226
+ self.model = decoder
1227
+
1228
+ # Copied from transformers.models.llama.modeling_llama.LlamaForCausalLM.get_decoder
1229
+ def get_decoder(self):
1230
+ return self.model
1231
+
1232
+ # Ignore copy
1233
+ @add_start_docstrings_to_model_forward(PHI3_INPUTS_DOCSTRING)
1234
+ @replace_return_docstrings(output_type=CausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC)
1235
+ def forward(
1236
+ self,
1237
+ input_ids: torch.LongTensor = None,
1238
+ attention_mask: Optional[torch.Tensor] = None,
1239
+ position_ids: Optional[torch.LongTensor] = None,
1240
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
1241
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1242
+ labels: Optional[torch.LongTensor] = None,
1243
+ use_cache: Optional[bool] = None,
1244
+ output_attentions: Optional[bool] = None,
1245
+ output_hidden_states: Optional[bool] = None,
1246
+ return_dict: Optional[bool] = None,
1247
+ ) -> Union[Tuple, CausalLMOutputWithPast]:
1248
+ r"""
1249
+ Args:
1250
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
1251
+ Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
1252
+ config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
1253
+ (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
1254
+
1255
+ Returns:
1256
+
1257
+ Example:
1258
+
1259
+ ```python
1260
+ >>> from transformers import AutoTokenizer, Phi3ForCausalLM
1261
+
1262
+ >>> model = Phi3ForCausalLM.from_pretrained("microsoft/phi-3-mini-4k-instruct")
1263
+ >>> tokenizer = AutoTokenizer.from_pretrained("microsoft/phi-3-mini-4k-instruct")
1264
+
1265
+ >>> prompt = "This is an example script ."
1266
+ >>> inputs = tokenizer(prompt, return_tensors="pt")
1267
+
1268
+ >>> # Generate
1269
+ >>> generate_ids = model.generate(inputs.input_ids, max_length=30)
1270
+ >>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
1271
+ 'This is an example script .\n Certainly! Below is a sample script that demonstrates a simple task, such as calculating the sum'
1272
+ ```"""
1273
+
1274
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
1275
+ output_hidden_states = (
1276
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
1277
+ )
1278
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1279
+
1280
+ # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)
1281
+ outputs = self.model(
1282
+ input_ids=input_ids,
1283
+ attention_mask=attention_mask,
1284
+ position_ids=position_ids,
1285
+ past_key_values=past_key_values,
1286
+ inputs_embeds=inputs_embeds,
1287
+ use_cache=use_cache,
1288
+ output_attentions=output_attentions,
1289
+ output_hidden_states=output_hidden_states,
1290
+ return_dict=return_dict,
1291
+ )
1292
+
1293
+ hidden_states = outputs[0]
1294
+ logits = self.lm_head(hidden_states)
1295
+ logits = logits.float()
1296
+
1297
+ loss = None
1298
+ if labels is not None:
1299
+ # Shift so that tokens < n predict n
1300
+ shift_logits = logits[..., :-1, :].contiguous()
1301
+ shift_labels = labels[..., 1:].contiguous()
1302
+ # Flatten the tokens
1303
+ loss_fct = CrossEntropyLoss()
1304
+ shift_logits = shift_logits.view(-1, self.config.vocab_size)
1305
+ shift_labels = shift_labels.view(-1)
1306
+ # Enable model parallelism
1307
+ shift_labels = shift_labels.to(shift_logits.device)
1308
+ loss = loss_fct(shift_logits, shift_labels)
1309
+
1310
+ if not return_dict:
1311
+ output = (logits,) + outputs[1:]
1312
+ return (loss,) + output if loss is not None else output
1313
+
1314
+ return CausalLMOutputWithPast(
1315
+ loss=loss,
1316
+ logits=logits,
1317
+ past_key_values=outputs.past_key_values,
1318
+ hidden_states=outputs.hidden_states,
1319
+ attentions=outputs.attentions,
1320
+ )
1321
+
1322
+ # Copied from transformers.models.persimmon.modeling_persimmon.PersimmonForCausalLM.prepare_inputs_for_generation
1323
+ def prepare_inputs_for_generation(
1324
+ self, input_ids, past_key_values=None, attention_mask=None, inputs_embeds=None, **kwargs
1325
+ ):
1326
+ if past_key_values is not None:
1327
+ if isinstance(past_key_values, Cache):
1328
+ cache_length = past_key_values.get_seq_length()
1329
+ past_length = past_key_values.seen_tokens
1330
+ max_cache_length = past_key_values.get_max_length()
1331
+ else:
1332
+ cache_length = past_length = past_key_values[0][0].shape[2]
1333
+ max_cache_length = None
1334
+
1335
+ # Keep only the unprocessed tokens:
1336
+ # 1 - If the length of the attention_mask exceeds the length of input_ids, then we are in a setting where
1337
+ # some of the inputs are exclusively passed as part of the cache (e.g. when passing input_embeds as
1338
+ # input)
1339
+ if attention_mask is not None and attention_mask.shape[1] > input_ids.shape[1]:
1340
+ input_ids = input_ids[:, -(attention_mask.shape[1] - past_length) :]
1341
+ # 2 - If the past_length is smaller than input_ids', then input_ids holds all input tokens. We can discard
1342
+ # input_ids based on the past_length.
1343
+ elif past_length < input_ids.shape[1]:
1344
+ input_ids = input_ids[:, past_length:]
1345
+ # 3 - Otherwise (past_length >= input_ids.shape[1]), let's assume input_ids only has unprocessed tokens.
1346
+
1347
+ # If we are about to go beyond the maximum cache length, we need to crop the input attention mask.
1348
+ if (
1349
+ max_cache_length is not None
1350
+ and attention_mask is not None
1351
+ and cache_length + input_ids.shape[1] > max_cache_length
1352
+ ):
1353
+ attention_mask = attention_mask[:, -max_cache_length:]
1354
+
1355
+ position_ids = kwargs.get('position_ids', None)
1356
+ if attention_mask is not None and position_ids is None:
1357
+ # create position_ids on the fly for batch generation
1358
+ position_ids = attention_mask.long().cumsum(-1) - 1
1359
+ position_ids.masked_fill_(attention_mask == 0, 1)
1360
+ if past_key_values:
1361
+ position_ids = position_ids[:, -input_ids.shape[1] :]
1362
+
1363
+ # if `inputs_embeds` are passed, we only want to use them in the 1st generation step
1364
+ if inputs_embeds is not None and past_key_values is None:
1365
+ model_inputs = {'inputs_embeds': inputs_embeds}
1366
+ else:
1367
+ model_inputs = {'input_ids': input_ids}
1368
+
1369
+ model_inputs.update(
1370
+ {
1371
+ 'position_ids': position_ids,
1372
+ 'past_key_values': past_key_values,
1373
+ 'use_cache': kwargs.get('use_cache'),
1374
+ 'attention_mask': attention_mask,
1375
+ }
1376
+ )
1377
+ return model_inputs
1378
+
1379
+ @staticmethod
1380
+ # Copied from transformers.models.llama.modeling_llama.LlamaForCausalLM._reorder_cache
1381
+ def _reorder_cache(past_key_values, beam_idx):
1382
+ reordered_past = ()
1383
+ for layer_past in past_key_values:
1384
+ reordered_past += (
1385
+ tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past),
1386
+ )
1387
+ return reordered_past
1388
+
1389
+
1390
+ @add_start_docstrings(
1391
+ """
1392
+ The [`Phi3Model`] with a sequence classification head on top (linear layer).
1393
+
1394
+ [`Phi3ForSequenceClassification`] uses the last token in order to do the classification, as other causal models
1395
+ (e.g. GPT-2) do.
1396
+
1397
+ Since it does classification on the last token, it requires to know the position of the last token. If a
1398
+ `pad_token_id` is defined in the configuration, it finds the last token that is not a padding token in each row. If
1399
+ no `pad_token_id` is defined, it simply takes the last value in each row of the batch. Since it cannot guess the
1400
+ padding tokens when `inputs_embeds` are passed instead of `input_ids`, it does the same (take the last value in
1401
+ each row of the batch).
1402
+ """,
1403
+ PHI3_START_DOCSTRING,
1404
+ )
1405
+ # Copied from transformers.models.llama.modeling_llama.LlamaForSequenceClassification with Llama->Phi3, LLAMA->PHI3, self.transformer->self.model, transformer_outputs->model_outputs
1406
+ class Phi3ForSequenceClassification(Phi3PreTrainedModel):
1407
+ def __init__(self, config):
1408
+ super().__init__(config)
1409
+ self.num_labels = config.num_labels
1410
+ self.model = Phi3Model(config)
1411
+ self.score = nn.Linear(config.hidden_size, self.num_labels, bias=False)
1412
+
1413
+ # Initialize weights and apply final processing
1414
+ self.post_init()
1415
+
1416
+ def get_input_embeddings(self):
1417
+ return self.model.embed_tokens
1418
+
1419
+ def set_input_embeddings(self, value):
1420
+ self.model.embed_tokens = value
1421
+
1422
+ @add_start_docstrings_to_model_forward(PHI3_INPUTS_DOCSTRING)
1423
+ def forward(
1424
+ self,
1425
+ input_ids: torch.LongTensor = None,
1426
+ attention_mask: Optional[torch.Tensor] = None,
1427
+ position_ids: Optional[torch.LongTensor] = None,
1428
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
1429
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1430
+ labels: Optional[torch.LongTensor] = None,
1431
+ use_cache: Optional[bool] = None,
1432
+ output_attentions: Optional[bool] = None,
1433
+ output_hidden_states: Optional[bool] = None,
1434
+ return_dict: Optional[bool] = None,
1435
+ ) -> Union[Tuple, SequenceClassifierOutputWithPast]:
1436
+ r"""
1437
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1438
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
1439
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
1440
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
1441
+ """
1442
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1443
+
1444
+ model_outputs = self.model(
1445
+ input_ids,
1446
+ attention_mask=attention_mask,
1447
+ position_ids=position_ids,
1448
+ past_key_values=past_key_values,
1449
+ inputs_embeds=inputs_embeds,
1450
+ use_cache=use_cache,
1451
+ output_attentions=output_attentions,
1452
+ output_hidden_states=output_hidden_states,
1453
+ return_dict=return_dict,
1454
+ )
1455
+ hidden_states = model_outputs[0]
1456
+ logits = self.score(hidden_states)
1457
+
1458
+ if input_ids is not None:
1459
+ batch_size = input_ids.shape[0]
1460
+ else:
1461
+ batch_size = inputs_embeds.shape[0]
1462
+
1463
+ if self.config.pad_token_id is None and batch_size != 1:
1464
+ raise ValueError('Cannot handle batch sizes > 1 if no padding token is defined.')
1465
+ if self.config.pad_token_id is None:
1466
+ sequence_lengths = -1
1467
+ else:
1468
+ if input_ids is not None:
1469
+ # if no pad token found, use modulo instead of reverse indexing for ONNX compatibility
1470
+ sequence_lengths = torch.eq(input_ids, self.config.pad_token_id).int().argmax(-1) - 1
1471
+ sequence_lengths = sequence_lengths % input_ids.shape[-1]
1472
+ sequence_lengths = sequence_lengths.to(logits.device)
1473
+ else:
1474
+ sequence_lengths = -1
1475
+
1476
+ pooled_logits = logits[torch.arange(batch_size, device=logits.device), sequence_lengths]
1477
+
1478
+ loss = None
1479
+ if labels is not None:
1480
+ labels = labels.to(logits.device)
1481
+ if self.config.problem_type is None:
1482
+ if self.num_labels == 1:
1483
+ self.config.problem_type = 'regression'
1484
+ elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
1485
+ self.config.problem_type = 'single_label_classification'
1486
+ else:
1487
+ self.config.problem_type = 'multi_label_classification'
1488
+
1489
+ if self.config.problem_type == 'regression':
1490
+ loss_fct = MSELoss()
1491
+ if self.num_labels == 1:
1492
+ loss = loss_fct(pooled_logits.squeeze(), labels.squeeze())
1493
+ else:
1494
+ loss = loss_fct(pooled_logits, labels)
1495
+ elif self.config.problem_type == 'single_label_classification':
1496
+ loss_fct = CrossEntropyLoss()
1497
+ loss = loss_fct(pooled_logits.view(-1, self.num_labels), labels.view(-1))
1498
+ elif self.config.problem_type == 'multi_label_classification':
1499
+ loss_fct = BCEWithLogitsLoss()
1500
+ loss = loss_fct(pooled_logits, labels)
1501
+ if not return_dict:
1502
+ output = (pooled_logits,) + model_outputs[1:]
1503
+ return ((loss,) + output) if loss is not None else output
1504
+
1505
+ return SequenceClassifierOutputWithPast(
1506
+ loss=loss,
1507
+ logits=pooled_logits,
1508
+ past_key_values=model_outputs.past_key_values,
1509
+ hidden_states=model_outputs.hidden_states,
1510
+ attentions=model_outputs.attentions,
1511
+ )
1512
+
1513
+
1514
+ @add_start_docstrings(
1515
+ """
1516
+ [`Phi3Model`] with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for
1517
+ Named-Entity-Recognition (NER) tasks.
1518
+ """,
1519
+ PHI3_START_DOCSTRING,
1520
+ )
1521
+ # Copied from transformers.models.mpt.modeling_mpt.MptForTokenClassification with Mpt->Phi3,MPT->PHI3,self.transformer->self.model,transformer_outputs->model_outputs
1522
+ class Phi3ForTokenClassification(Phi3PreTrainedModel):
1523
+ def __init__(self, config: Phi3Config):
1524
+ super().__init__(config)
1525
+ self.num_labels = config.num_labels
1526
+
1527
+ self.model = Phi3Model(config)
1528
+ if hasattr(config, 'classifier_dropout') and config.classifier_dropout is not None:
1529
+ classifier_dropout = config.classifier_dropout
1530
+ elif hasattr(config, 'hidden_dropout') and config.hidden_dropout is not None:
1531
+ classifier_dropout = config.hidden_dropout
1532
+ else:
1533
+ classifier_dropout = 0.1
1534
+ self.dropout = nn.Dropout(classifier_dropout)
1535
+ self.classifier = nn.Linear(config.hidden_size, config.num_labels)
1536
+
1537
+ # Initialize weights and apply final processing
1538
+ self.post_init()
1539
+
1540
+ @add_start_docstrings_to_model_forward(PHI3_INPUTS_DOCSTRING)
1541
+ @add_code_sample_docstrings(
1542
+ checkpoint=_CHECKPOINT_FOR_DOC,
1543
+ output_type=TokenClassifierOutput,
1544
+ config_class=_CONFIG_FOR_DOC,
1545
+ )
1546
+ def forward(
1547
+ self,
1548
+ input_ids: Optional[torch.LongTensor] = None,
1549
+ past_key_values: Optional[Tuple[Tuple[torch.Tensor, torch.Tensor], ...]] = None,
1550
+ attention_mask: Optional[torch.Tensor] = None,
1551
+ inputs_embeds: Optional[torch.Tensor] = None,
1552
+ labels: Optional[torch.Tensor] = None,
1553
+ use_cache: Optional[bool] = None,
1554
+ output_attentions: Optional[bool] = None,
1555
+ output_hidden_states: Optional[bool] = None,
1556
+ return_dict: Optional[bool] = None,
1557
+ **deprecated_arguments,
1558
+ ) -> Union[Tuple[torch.Tensor], TokenClassifierOutput]:
1559
+ r"""
1560
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1561
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
1562
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
1563
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
1564
+ """
1565
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1566
+
1567
+ model_outputs = self.model(
1568
+ input_ids,
1569
+ past_key_values=past_key_values,
1570
+ attention_mask=attention_mask,
1571
+ inputs_embeds=inputs_embeds,
1572
+ use_cache=use_cache,
1573
+ output_attentions=output_attentions,
1574
+ output_hidden_states=output_hidden_states,
1575
+ return_dict=return_dict,
1576
+ )
1577
+
1578
+ hidden_states = model_outputs[0]
1579
+ hidden_states = self.dropout(hidden_states)
1580
+ logits = self.classifier(hidden_states)
1581
+
1582
+ loss = None
1583
+ if labels is not None:
1584
+ # move labels to correct device to enable model parallelism
1585
+ labels = labels.to(logits.device)
1586
+ batch_size, seq_length = labels.shape
1587
+ loss_fct = CrossEntropyLoss()
1588
+ loss = loss_fct(
1589
+ logits.view(batch_size * seq_length, self.num_labels), labels.view(batch_size * seq_length)
1590
+ )
1591
+
1592
+ if not return_dict:
1593
+ output = (logits,) + model_outputs[2:]
1594
+ return ((loss,) + output) if loss is not None else output
1595
+
1596
+ return TokenClassifierOutput(
1597
+ loss=loss,
1598
+ logits=logits,
1599
+ hidden_states=model_outputs.hidden_states,
1600
+ attentions=model_outputs.attentions,
1601
+ )
isolated/sim_greedy/upstream_sgl/internvl/model/qwen2/__init__.py ADDED
@@ -0,0 +1,82 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 The Qwen Team and The HuggingFace Inc. team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from typing import TYPE_CHECKING
15
+
16
+ from transformers.utils import (
17
+ OptionalDependencyNotAvailable,
18
+ _LazyModule,
19
+ is_tokenizers_available,
20
+ is_torch_available,
21
+ )
22
+
23
+
24
+ _import_structure = {
25
+ "configuration_qwen2": ["Qwen2Config"],
26
+ "tokenization_qwen2": ["Qwen2Tokenizer"],
27
+ }
28
+
29
+ try:
30
+ if not is_tokenizers_available():
31
+ raise OptionalDependencyNotAvailable()
32
+ except OptionalDependencyNotAvailable:
33
+ pass
34
+ else:
35
+ _import_structure["tokenization_qwen2_fast"] = ["Qwen2TokenizerFast"]
36
+
37
+ try:
38
+ if not is_torch_available():
39
+ raise OptionalDependencyNotAvailable()
40
+ except OptionalDependencyNotAvailable:
41
+ pass
42
+ else:
43
+ _import_structure["modeling_qwen2"] = [
44
+ "Qwen2ForCausalLM",
45
+ "Qwen2Model",
46
+ "Qwen2PreTrainedModel",
47
+ "Qwen2ForSequenceClassification",
48
+ "Qwen2ForTokenClassification",
49
+ ]
50
+
51
+
52
+ if TYPE_CHECKING:
53
+ from .configuration_qwen2 import Qwen2Config
54
+ from .tokenization_qwen2 import Qwen2Tokenizer
55
+
56
+ try:
57
+ if not is_tokenizers_available():
58
+ raise OptionalDependencyNotAvailable()
59
+ except OptionalDependencyNotAvailable:
60
+ pass
61
+ else:
62
+ from .tokenization_qwen2_fast import Qwen2TokenizerFast
63
+
64
+ try:
65
+ if not is_torch_available():
66
+ raise OptionalDependencyNotAvailable()
67
+ except OptionalDependencyNotAvailable:
68
+ pass
69
+ else:
70
+ from .modeling_qwen2 import (
71
+ Qwen2ForCausalLM,
72
+ Qwen2ForSequenceClassification,
73
+ Qwen2ForTokenClassification,
74
+ Qwen2Model,
75
+ Qwen2PreTrainedModel,
76
+ )
77
+
78
+
79
+ else:
80
+ import sys
81
+
82
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
isolated/sim_greedy/upstream_sgl/internvl/model/qwen2/configuration_qwen2.py ADDED
@@ -0,0 +1,140 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2024 The Qwen team, Alibaba Group and the HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Qwen2 model configuration"""
16
+
17
+ from transformers.configuration_utils import PretrainedConfig
18
+ from transformers.utils import logging
19
+
20
+
21
+ logger = logging.get_logger(__name__)
22
+
23
+
24
+ class Qwen2Config(PretrainedConfig):
25
+ r"""
26
+ This is the configuration class to store the configuration of a [`Qwen2Model`]. It is used to instantiate a
27
+ Qwen2 model according to the specified arguments, defining the model architecture. Instantiating a configuration
28
+ with the defaults will yield a similar configuration to that of
29
+ Qwen2-7B-beta [Qwen/Qwen2-7B-beta](https://huggingface.co/Qwen/Qwen2-7B-beta).
30
+
31
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
32
+ documentation from [`PretrainedConfig`] for more information.
33
+
34
+
35
+ Args:
36
+ vocab_size (`int`, *optional*, defaults to 151936):
37
+ Vocabulary size of the Qwen2 model. Defines the number of different tokens that can be represented by the
38
+ `inputs_ids` passed when calling [`Qwen2Model`]
39
+ hidden_size (`int`, *optional*, defaults to 4096):
40
+ Dimension of the hidden representations.
41
+ intermediate_size (`int`, *optional*, defaults to 22016):
42
+ Dimension of the MLP representations.
43
+ num_hidden_layers (`int`, *optional*, defaults to 32):
44
+ Number of hidden layers in the Transformer encoder.
45
+ num_attention_heads (`int`, *optional*, defaults to 32):
46
+ Number of attention heads for each attention layer in the Transformer encoder.
47
+ num_key_value_heads (`int`, *optional*, defaults to 32):
48
+ This is the number of key_value heads that should be used to implement Grouped Query Attention. If
49
+ `num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if
50
+ `num_key_value_heads=1` the model will use Multi Query Attention (MQA) otherwise GQA is used. When
51
+ converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed
52
+ by meanpooling all the original heads within that group. For more details checkout [this
53
+ paper](https://arxiv.org/pdf/2305.13245.pdf). If it is not specified, will default to `32`.
54
+ hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
55
+ The non-linear activation function (function or string) in the decoder.
56
+ max_position_embeddings (`int`, *optional*, defaults to 32768):
57
+ The maximum sequence length that this model might ever be used with.
58
+ initializer_range (`float`, *optional*, defaults to 0.02):
59
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
60
+ rms_norm_eps (`float`, *optional*, defaults to 1e-06):
61
+ The epsilon used by the rms normalization layers.
62
+ use_cache (`bool`, *optional*, defaults to `True`):
63
+ Whether or not the model should return the last key/values attentions (not used by all models). Only
64
+ relevant if `config.is_decoder=True`.
65
+ tie_word_embeddings (`bool`, *optional*, defaults to `False`):
66
+ Whether the model's input and output word embeddings should be tied.
67
+ rope_theta (`float`, *optional*, defaults to 10000.0):
68
+ The base period of the RoPE embeddings.
69
+ use_sliding_window (`bool`, *optional*, defaults to `False`):
70
+ Whether to use sliding window attention.
71
+ sliding_window (`int`, *optional*, defaults to 4096):
72
+ Sliding window attention (SWA) window size. If not specified, will default to `4096`.
73
+ max_window_layers (`int`, *optional*, defaults to 28):
74
+ The number of layers that use SWA (Sliding Window Attention). The bottom layers use SWA while the top use full attention.
75
+ attention_dropout (`float`, *optional*, defaults to 0.0):
76
+ The dropout ratio for the attention probabilities.
77
+
78
+ ```python
79
+ >>> from transformers import Qwen2Model, Qwen2Config
80
+
81
+ >>> # Initializing a Qwen2 style configuration
82
+ >>> configuration = Qwen2Config()
83
+
84
+ >>> # Initializing a model from the Qwen2-7B style configuration
85
+ >>> model = Qwen2Model(configuration)
86
+
87
+ >>> # Accessing the model configuration
88
+ >>> configuration = model.config
89
+ ```"""
90
+
91
+ model_type = "qwen2"
92
+ keys_to_ignore_at_inference = ["past_key_values"]
93
+
94
+ def __init__(
95
+ self,
96
+ vocab_size=151936,
97
+ hidden_size=4096,
98
+ intermediate_size=22016,
99
+ num_hidden_layers=32,
100
+ num_attention_heads=32,
101
+ num_key_value_heads=32,
102
+ hidden_act="silu",
103
+ max_position_embeddings=32768,
104
+ initializer_range=0.02,
105
+ rms_norm_eps=1e-6,
106
+ use_cache=True,
107
+ tie_word_embeddings=False,
108
+ rope_theta=10000.0,
109
+ use_sliding_window=False,
110
+ sliding_window=4096,
111
+ max_window_layers=28,
112
+ attention_dropout=0.0,
113
+ **kwargs,
114
+ ):
115
+ self.vocab_size = vocab_size
116
+ self.max_position_embeddings = max_position_embeddings
117
+ self.hidden_size = hidden_size
118
+ self.intermediate_size = intermediate_size
119
+ self.num_hidden_layers = num_hidden_layers
120
+ self.num_attention_heads = num_attention_heads
121
+ self.use_sliding_window = use_sliding_window
122
+ self.sliding_window = sliding_window if use_sliding_window else None
123
+ self.max_window_layers = max_window_layers
124
+
125
+ # for backward compatibility
126
+ if num_key_value_heads is None:
127
+ num_key_value_heads = num_attention_heads
128
+
129
+ self.num_key_value_heads = num_key_value_heads
130
+ self.hidden_act = hidden_act
131
+ self.initializer_range = initializer_range
132
+ self.rms_norm_eps = rms_norm_eps
133
+ self.use_cache = use_cache
134
+ self.rope_theta = rope_theta
135
+ self.attention_dropout = attention_dropout
136
+
137
+ super().__init__(
138
+ tie_word_embeddings=tie_word_embeddings,
139
+ **kwargs,
140
+ )
isolated/sim_greedy/upstream_sgl/internvl/model/qwen2/modeling_qwen2.py ADDED
@@ -0,0 +1,1551 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2024 The Qwen team, Alibaba Group and the HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX
5
+ # and OPT implementations in this library. It has been modified from its
6
+ # original forms to accommodate minor architectural differences compared
7
+ # to GPT-NeoX and OPT used by the Meta AI team that trained the model.
8
+ #
9
+ # Licensed under the Apache License, Version 2.0 (the "License");
10
+ # you may not use this file except in compliance with the License.
11
+ # You may obtain a copy of the License at
12
+ #
13
+ # http://www.apache.org/licenses/LICENSE-2.0
14
+ #
15
+ # Unless required by applicable law or agreed to in writing, software
16
+ # distributed under the License is distributed on an "AS IS" BASIS,
17
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
18
+ # See the License for the specific language governing permissions and
19
+ # limitations under the License.
20
+ """PyTorch Qwen2 model."""
21
+
22
+ import math
23
+ from typing import List, Optional, Tuple, Union
24
+
25
+ import torch
26
+ import torch.utils.checkpoint
27
+ from torch import nn
28
+ from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
29
+
30
+ from transformers.activations import ACT2FN
31
+ from transformers.cache_utils import Cache, DynamicCache, StaticCache
32
+ from transformers.modeling_attn_mask_utils import AttentionMaskConverter
33
+ from transformers.modeling_outputs import (
34
+ BaseModelOutputWithPast,
35
+ CausalLMOutputWithPast,
36
+ SequenceClassifierOutputWithPast,
37
+ TokenClassifierOutput,
38
+ )
39
+ from transformers.modeling_utils import PreTrainedModel
40
+ from transformers.utils import (
41
+ add_start_docstrings,
42
+ add_start_docstrings_to_model_forward,
43
+ is_flash_attn_2_available,
44
+ is_flash_attn_greater_or_equal_2_10,
45
+ logging,
46
+ replace_return_docstrings,
47
+ )
48
+ from .configuration_qwen2 import Qwen2Config
49
+ from ..token_pruning import select_visual_token_indices
50
+
51
+
52
+ if is_flash_attn_2_available():
53
+ from transformers.modeling_flash_attention_utils import _flash_attention_forward
54
+
55
+
56
+ logger = logging.get_logger(__name__)
57
+
58
+
59
+ _CHECKPOINT_FOR_DOC = "Qwen/Qwen2-7B-beta"
60
+ _CONFIG_FOR_DOC = "Qwen2Config"
61
+
62
+
63
+ # Copied from transformers.models.llama.modeling_llama._prepare_4d_causal_attention_mask_with_cache_position
64
+ def _prepare_4d_causal_attention_mask_with_cache_position(
65
+ attention_mask: torch.Tensor,
66
+ sequence_length: int,
67
+ target_length: int,
68
+ dtype: torch.dtype,
69
+ device: torch.device,
70
+ min_dtype: float,
71
+ cache_position: torch.Tensor,
72
+ batch_size: int,
73
+ ):
74
+ """
75
+ Creates a causal 4D mask of shape `(batch_size, 1, query_length, key_value_length)` from a 2D mask of shape
76
+ `(batch_size, key_value_length)`, or if the input `attention_mask` is already 4D, do nothing.
77
+
78
+ Args:
79
+ attention_mask (`torch.Tensor`):
80
+ A 2D attention mask of shape `(batch_size, key_value_length)` or a 4D attention mask of shape `(batch_size, 1, query_length, key_value_length)`.
81
+ sequence_length (`int`):
82
+ The sequence length being processed.
83
+ target_length (`int`):
84
+ The target length: when generating with static cache, the mask should be as long as the static cache, to account for the 0 padding, the part of the cache that is not filled yet.
85
+ dtype (`torch.dtype`):
86
+ The dtype to use for the 4D attention mask.
87
+ device (`torch.device`):
88
+ The device to plcae the 4D attention mask on.
89
+ min_dtype (`float`):
90
+ The minimum value representable with the dtype `dtype`.
91
+ cache_position (`torch.Tensor`):
92
+ Indices depicting the position of the input sequence tokens in the sequence.
93
+ batch_size (`torch.Tensor`):
94
+ Batch size.
95
+ """
96
+ if attention_mask is not None and attention_mask.dim() == 4:
97
+ # In this case we assume that the mask comes already in inverted form and requires no inversion or slicing.
98
+ causal_mask = attention_mask
99
+ else:
100
+ causal_mask = torch.full((sequence_length, target_length), fill_value=min_dtype, dtype=dtype, device=device)
101
+ if sequence_length != 1:
102
+ causal_mask = torch.triu(causal_mask, diagonal=1)
103
+ causal_mask *= torch.arange(target_length, device=device) > cache_position.reshape(-1, 1)
104
+ causal_mask = causal_mask[None, None, :, :].expand(batch_size, 1, -1, -1)
105
+ if attention_mask is not None:
106
+ causal_mask = causal_mask.clone() # copy to contiguous memory for in-place edit
107
+ mask_length = attention_mask.shape[-1]
108
+ padding_mask = causal_mask[:, :, :, :mask_length] + attention_mask[:, None, None, :]
109
+ padding_mask = padding_mask == 0
110
+ causal_mask[:, :, :, :mask_length] = causal_mask[:, :, :, :mask_length].masked_fill(
111
+ padding_mask, min_dtype
112
+ )
113
+
114
+ return causal_mask
115
+
116
+
117
+ # Copied from transformers.models.llama.modeling_llama.LlamaRMSNorm with Llama->Qwen2
118
+ class Qwen2RMSNorm(nn.Module):
119
+ def __init__(self, hidden_size, eps=1e-6):
120
+ """
121
+ Qwen2RMSNorm is equivalent to T5LayerNorm
122
+ """
123
+ super().__init__()
124
+ self.weight = nn.Parameter(torch.ones(hidden_size))
125
+ self.variance_epsilon = eps
126
+
127
+ def forward(self, hidden_states):
128
+ input_dtype = hidden_states.dtype
129
+ hidden_states = hidden_states.to(torch.float32)
130
+ variance = hidden_states.pow(2).mean(-1, keepdim=True)
131
+ hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
132
+ return self.weight * hidden_states.to(input_dtype)
133
+
134
+ def extra_repr(self):
135
+ return f"{tuple(self.weight.shape)}, eps={self.variance_epsilon}"
136
+
137
+
138
+ # Copied from transformers.models.mixtral.modeling_mixtral.MixtralRotaryEmbedding with Mixtral->Qwen2
139
+ class Qwen2RotaryEmbedding(nn.Module):
140
+ def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None):
141
+ super().__init__()
142
+
143
+ self.dim = dim
144
+ self.max_position_embeddings = max_position_embeddings
145
+ self.base = base
146
+ inv_freq = 1.0 / (self.base ** (torch.arange(0, self.dim, 2, dtype=torch.int64).float().to(device) / self.dim))
147
+ self.register_buffer("inv_freq", inv_freq, persistent=False)
148
+
149
+ # Build here to make `torch.jit.trace` work.
150
+ self._set_cos_sin_cache(
151
+ seq_len=max_position_embeddings, device=self.inv_freq.device, dtype=torch.get_default_dtype()
152
+ )
153
+
154
+ def _set_cos_sin_cache(self, seq_len, device, dtype):
155
+ self.max_seq_len_cached = seq_len
156
+ t = torch.arange(self.max_seq_len_cached, device=device, dtype=torch.int64).type_as(self.inv_freq)
157
+
158
+ freqs = torch.outer(t, self.inv_freq)
159
+ # Different from paper, but it uses a different permutation in order to obtain the same calculation
160
+ emb = torch.cat((freqs, freqs), dim=-1)
161
+ self.register_buffer("cos_cached", emb.cos().to(dtype), persistent=False)
162
+ self.register_buffer("sin_cached", emb.sin().to(dtype), persistent=False)
163
+
164
+ def forward(self, x, seq_len=None):
165
+ # x: [bs, num_attention_heads, seq_len, head_size]
166
+ if seq_len > self.max_seq_len_cached:
167
+ self._set_cos_sin_cache(seq_len=seq_len, device=x.device, dtype=x.dtype)
168
+
169
+ return (
170
+ self.cos_cached[:seq_len].to(dtype=x.dtype),
171
+ self.sin_cached[:seq_len].to(dtype=x.dtype),
172
+ )
173
+
174
+
175
+ # Copied from transformers.models.llama.modeling_llama.rotate_half
176
+ def rotate_half(x):
177
+ """Rotates half the hidden dims of the input."""
178
+ x1 = x[..., : x.shape[-1] // 2]
179
+ x2 = x[..., x.shape[-1] // 2 :]
180
+ return torch.cat((-x2, x1), dim=-1)
181
+
182
+
183
+ # Copied from transformers.models.mixtral.modeling_mixtral.apply_rotary_pos_emb
184
+ def apply_rotary_pos_emb(q, k, cos, sin, position_ids, unsqueeze_dim=1):
185
+ """Applies Rotary Position Embedding to the query and key tensors.
186
+
187
+ Args:
188
+ q (`torch.Tensor`): The query tensor.
189
+ k (`torch.Tensor`): The key tensor.
190
+ cos (`torch.Tensor`): The cosine part of the rotary embedding.
191
+ sin (`torch.Tensor`): The sine part of the rotary embedding.
192
+ position_ids (`torch.Tensor`):
193
+ The position indices of the tokens corresponding to the query and key tensors. For example, this can be
194
+ used to pass offsetted position ids when working with a KV-cache.
195
+ unsqueeze_dim (`int`, *optional*, defaults to 1):
196
+ The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and
197
+ sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note
198
+ that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and
199
+ k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes
200
+ cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have
201
+ the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2.
202
+ Returns:
203
+ `tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding.
204
+ """
205
+ cos = cos[position_ids].unsqueeze(unsqueeze_dim)
206
+ sin = sin[position_ids].unsqueeze(unsqueeze_dim)
207
+ q_embed = (q * cos) + (rotate_half(q) * sin)
208
+ k_embed = (k * cos) + (rotate_half(k) * sin)
209
+ return q_embed, k_embed
210
+
211
+
212
+ # Copied from transformers.models.mistral.modeling_mistral.MistralMLP with Mistral->Qwen2
213
+ class Qwen2MLP(nn.Module):
214
+ def __init__(self, config):
215
+ super().__init__()
216
+ self.hidden_size = config.hidden_size
217
+ self.intermediate_size = config.intermediate_size
218
+ self.gate_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)
219
+ self.up_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)
220
+ self.down_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=False)
221
+ self.act_fn = ACT2FN[config.hidden_act]
222
+
223
+ def forward(self, hidden_state):
224
+ return self.down_proj(self.act_fn(self.gate_proj(hidden_state)) * self.up_proj(hidden_state))
225
+
226
+
227
+ # Copied from transformers.models.llama.modeling_llama.repeat_kv
228
+ def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor:
229
+ """
230
+ This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch,
231
+ num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)
232
+ """
233
+ batch, num_key_value_heads, slen, head_dim = hidden_states.shape
234
+ if n_rep == 1:
235
+ return hidden_states
236
+ hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim)
237
+ return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim)
238
+
239
+
240
+ class Qwen2Attention(nn.Module):
241
+ """
242
+ Multi-headed attention from 'Attention Is All You Need' paper. Modified to use sliding window attention: Longformer
243
+ and "Generating Long Sequences with Sparse Transformers".
244
+ """
245
+
246
+ def __init__(self, config: Qwen2Config, layer_idx: Optional[int] = None):
247
+ super().__init__()
248
+ self.config = config
249
+ self.layer_idx = layer_idx
250
+ if layer_idx is None:
251
+ logger.warning_once(
252
+ f"Instantiating {self.__class__.__name__} without passing `layer_idx` is not recommended and will "
253
+ "to errors during the forward call, if caching is used. Please make sure to provide a `layer_idx` "
254
+ "when creating this class."
255
+ )
256
+
257
+ self.hidden_size = config.hidden_size
258
+ self.num_heads = config.num_attention_heads
259
+ self.head_dim = self.hidden_size // self.num_heads
260
+ self.num_key_value_heads = config.num_key_value_heads
261
+ self.num_key_value_groups = self.num_heads // self.num_key_value_heads
262
+ self.max_position_embeddings = config.max_position_embeddings
263
+ self.rope_theta = config.rope_theta
264
+ self.is_causal = True
265
+ self.attention_dropout = config.attention_dropout
266
+
267
+ if (self.head_dim * self.num_heads) != self.hidden_size:
268
+ raise ValueError(
269
+ f"hidden_size must be divisible by num_heads (got `hidden_size`: {self.hidden_size}"
270
+ f" and `num_heads`: {self.num_heads})."
271
+ )
272
+ self.q_proj = nn.Linear(self.hidden_size, self.num_heads * self.head_dim, bias=True)
273
+ self.k_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=True)
274
+ self.v_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=True)
275
+ self.o_proj = nn.Linear(self.num_heads * self.head_dim, self.hidden_size, bias=False)
276
+
277
+ self.rotary_emb = Qwen2RotaryEmbedding(
278
+ self.head_dim,
279
+ max_position_embeddings=self.max_position_embeddings,
280
+ base=self.rope_theta,
281
+ )
282
+
283
+ def forward(
284
+ self,
285
+ hidden_states: torch.Tensor,
286
+ attention_mask: Optional[torch.Tensor] = None,
287
+ position_ids: Optional[torch.LongTensor] = None,
288
+ past_key_value: Optional[Cache] = None,
289
+ output_attentions: bool = False,
290
+ use_cache: bool = False,
291
+ cache_position: Optional[torch.LongTensor] = None,
292
+ **kwargs,
293
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
294
+ bsz, q_len, _ = hidden_states.size()
295
+
296
+ query_states = self.q_proj(hidden_states)
297
+ key_states = self.k_proj(hidden_states)
298
+ value_states = self.v_proj(hidden_states)
299
+
300
+ query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
301
+ key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
302
+ value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
303
+
304
+ kv_seq_len = key_states.shape[-2]
305
+ if past_key_value is not None:
306
+ if self.layer_idx is None:
307
+ raise ValueError(
308
+ f"The cache structure has changed since version v4.36. If you are using {self.__class__.__name__} "
309
+ "for auto-regressive decoding with k/v caching, please make sure to initialize the attention class "
310
+ "with a layer index."
311
+ )
312
+ kv_seq_len += past_key_value.get_usable_length(kv_seq_len, self.layer_idx)
313
+
314
+ prunded_sequence_length = kwargs["prunded_sequence_length"]
315
+ cos, sin = self.rotary_emb(value_states, seq_len=kv_seq_len + prunded_sequence_length)
316
+ query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin, position_ids)
317
+
318
+ if past_key_value is not None:
319
+ cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position} # Specific to RoPE models
320
+ key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs)
321
+
322
+ # repeat k/v heads if n_kv_heads < n_heads
323
+ key_states = repeat_kv(key_states, self.num_key_value_groups)
324
+ value_states = repeat_kv(value_states, self.num_key_value_groups)
325
+
326
+ attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) / math.sqrt(self.head_dim)
327
+
328
+ if attn_weights.size() != (bsz, self.num_heads, q_len, kv_seq_len):
329
+ raise ValueError(
330
+ f"Attention weights should be of size {(bsz, self.num_heads, q_len, kv_seq_len)}, but is"
331
+ f" {attn_weights.size()}"
332
+ )
333
+
334
+ if attention_mask is not None: # no matter the length, we just slice it
335
+ causal_mask = attention_mask[:, :, :, : key_states.shape[-2]]
336
+ attn_weights = attn_weights + causal_mask
337
+
338
+ # upcast attention to fp32
339
+ attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query_states.dtype)
340
+ attn_weights = nn.functional.dropout(attn_weights, p=self.attention_dropout, training=self.training)
341
+ attn_output = torch.matmul(attn_weights, value_states)
342
+
343
+ if attn_output.size() != (bsz, self.num_heads, q_len, self.head_dim):
344
+ raise ValueError(
345
+ f"`attn_output` should be of size {(bsz, self.num_heads, q_len, self.head_dim)}, but is"
346
+ f" {attn_output.size()}"
347
+ )
348
+
349
+ attn_output = attn_output.transpose(1, 2).contiguous()
350
+ attn_output = attn_output.reshape(bsz, q_len, self.hidden_size)
351
+
352
+ attn_output = self.o_proj(attn_output)
353
+
354
+ if not output_attentions:
355
+ attn_weights = None
356
+
357
+ return attn_output, attn_weights, past_key_value
358
+
359
+
360
+ class Qwen2FlashAttention2(Qwen2Attention):
361
+ """
362
+ Qwen2 flash attention module, following Qwen2 attention module. This module inherits from `Qwen2Attention`
363
+ as the weights of the module stays untouched. The only required change would be on the forward pass
364
+ where it needs to correctly call the public API of flash attention and deal with padding tokens
365
+ in case the input contains any of them. Additionally, for sliding window attention, we apply SWA only to the bottom
366
+ config.max_window_layers layers.
367
+ """
368
+
369
+ # Copied from transformers.models.llama.modeling_llama.LlamaFlashAttention2.__init__
370
+ def __init__(self, *args, **kwargs):
371
+ super().__init__(*args, **kwargs)
372
+
373
+ # TODO: Should be removed once Flash Attention for RoCm is bumped to 2.1.
374
+ # flash_attn<2.1 generates top-left aligned causal mask, while what is needed here is bottom-right alignement, that was made default for flash_attn>=2.1. This attribute is used to handle this difference. Reference: https://github.com/Dao-AILab/flash-attention/releases/tag/v2.1.0.
375
+ # Beware that with flash_attn<2.1, using q_seqlen != k_seqlen (except for the case q_seqlen == 1) produces a wrong mask (top-left).
376
+ self._flash_attn_uses_top_left_mask = not is_flash_attn_greater_or_equal_2_10()
377
+
378
+ def forward(
379
+ self,
380
+ hidden_states: torch.Tensor,
381
+ attention_mask: Optional[torch.Tensor] = None,
382
+ position_ids: Optional[torch.LongTensor] = None,
383
+ past_key_value: Optional[Cache] = None,
384
+ output_attentions: bool = False,
385
+ use_cache: bool = False,
386
+ cache_position: Optional[torch.LongTensor] = None,
387
+ **kwargs,
388
+ ):
389
+ if output_attentions:
390
+
391
+ return super().forward(
392
+ hidden_states=hidden_states,
393
+ attention_mask=attention_mask,
394
+ position_ids=position_ids,
395
+ past_key_value=past_key_value,
396
+ output_attentions=output_attentions,
397
+ use_cache=use_cache,
398
+ **kwargs,
399
+ )
400
+
401
+
402
+ bsz, q_len, _ = hidden_states.size()
403
+
404
+ query_states = self.q_proj(hidden_states)
405
+ key_states = self.k_proj(hidden_states)
406
+ value_states = self.v_proj(hidden_states)
407
+
408
+ query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
409
+ key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
410
+ value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
411
+
412
+ kv_seq_len = key_states.shape[-2]
413
+ if past_key_value is not None:
414
+ if self.layer_idx is None:
415
+ raise ValueError(
416
+ f"The cache structure has changed since version v4.36. If you are using {self.__class__.__name__} "
417
+ "for auto-regressive decoding with k/v caching, please make sure to initialize the attention class "
418
+ "with a layer index."
419
+ )
420
+ kv_seq_len += past_key_value.get_usable_length(kv_seq_len, self.layer_idx)
421
+
422
+ # Because the input can be padded, the absolute sequence length depends on the max position id.
423
+ rotary_seq_len = (
424
+ max(kv_seq_len, position_ids[:, -1].max().item() + 1) if position_ids is not None else kv_seq_len
425
+ )
426
+
427
+ prunded_sequence_length = kwargs["prunded_sequence_length"]
428
+ cos, sin = self.rotary_emb(value_states, seq_len=kv_seq_len + prunded_sequence_length)
429
+
430
+ query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin, position_ids)
431
+
432
+ if past_key_value is not None:
433
+ # Activate slicing cache only if the config has a value `sliding_windows` attribute
434
+ cache_has_contents = past_key_value.get_seq_length(self.layer_idx) > 0
435
+ if (
436
+ getattr(self.config, "sliding_window", None) is not None
437
+ and kv_seq_len > self.config.sliding_window
438
+ and cache_has_contents
439
+ ):
440
+ slicing_tokens = 1 - self.config.sliding_window
441
+
442
+ past_key = past_key_value[self.layer_idx][0]
443
+ past_value = past_key_value[self.layer_idx][1]
444
+
445
+ past_key = past_key[:, :, slicing_tokens:, :].contiguous()
446
+ past_value = past_value[:, :, slicing_tokens:, :].contiguous()
447
+
448
+ if past_key.shape[-2] != self.config.sliding_window - 1:
449
+ raise ValueError(
450
+ f"past key must have a shape of (`batch_size, num_heads, self.config.sliding_window-1, head_dim`), got"
451
+ f" {past_key.shape}"
452
+ )
453
+
454
+ if attention_mask is not None:
455
+ attention_mask = attention_mask[:, slicing_tokens:]
456
+ attention_mask = torch.cat([attention_mask, torch.ones_like(attention_mask[:, -1:])], dim=-1)
457
+
458
+ cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position} # Specific to RoPE models
459
+ key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs)
460
+
461
+ # repeat k/v heads if n_kv_heads < n_heads
462
+ key_states = repeat_kv(key_states, self.num_key_value_groups)
463
+ value_states = repeat_kv(value_states, self.num_key_value_groups)
464
+ dropout_rate = 0.0 if not self.training else self.attention_dropout
465
+
466
+ # In PEFT, usually we cast the layer norms in float32 for training stability reasons
467
+ # therefore the input hidden states gets silently casted in float32. Hence, we need
468
+ # cast them back in float16 just to be sure everything works as expected.
469
+ input_dtype = query_states.dtype
470
+ if input_dtype == torch.float32:
471
+ if torch.is_autocast_enabled():
472
+ target_dtype = torch.get_autocast_gpu_dtype()
473
+ # Handle the case where the model is quantized
474
+ elif hasattr(self.config, "_pre_quantization_dtype"):
475
+ target_dtype = self.config._pre_quantization_dtype
476
+ else:
477
+ target_dtype = self.q_proj.weight.dtype
478
+
479
+ logger.warning_once(
480
+ f"The input hidden states seems to be silently casted in float32, this might be related to"
481
+ f" the fact you have upcasted embedding or layer norm layers in float32. We will cast back the input in"
482
+ f" {target_dtype}."
483
+ )
484
+
485
+ query_states = query_states.to(target_dtype)
486
+ key_states = key_states.to(target_dtype)
487
+ value_states = value_states.to(target_dtype)
488
+
489
+ # Reashape to the expected shape for Flash Attention
490
+ query_states = query_states.transpose(1, 2)
491
+ key_states = key_states.transpose(1, 2)
492
+ value_states = value_states.transpose(1, 2)
493
+
494
+ if (
495
+ self.config.use_sliding_window
496
+ and getattr(self.config, "sliding_window", None) is not None
497
+ and self.layer_idx >= self.config.max_window_layers
498
+ ):
499
+ sliding_window = self.config.sliding_window
500
+ else:
501
+ sliding_window = None
502
+
503
+ attn_output = _flash_attention_forward(
504
+ query_states,
505
+ key_states,
506
+ value_states,
507
+ attention_mask,
508
+ q_len,
509
+ position_ids=position_ids,
510
+ dropout=dropout_rate,
511
+ sliding_window=sliding_window,
512
+ is_causal=self.is_causal,
513
+ use_top_left_mask=self._flash_attn_uses_top_left_mask,
514
+ )
515
+
516
+ attn_output = attn_output.reshape(bsz, q_len, self.hidden_size).contiguous()
517
+ attn_output = self.o_proj(attn_output)
518
+
519
+
520
+ attn_weights = None
521
+
522
+ return attn_output, attn_weights, past_key_value
523
+
524
+
525
+ # Copied from transformers.models.mixtral.modeling_mixtral.MixtralSdpaAttention with Mixtral->Qwen2
526
+ class Qwen2SdpaAttention(Qwen2Attention):
527
+ """
528
+ Qwen2 attention module using torch.nn.functional.scaled_dot_product_attention. This module inherits from
529
+ `Qwen2Attention` as the weights of the module stays untouched. The only changes are on the forward pass to adapt to
530
+ SDPA API.
531
+ """
532
+
533
+ # Adapted from Qwen2Attention.forward
534
+ def forward(
535
+ self,
536
+ hidden_states: torch.Tensor,
537
+ attention_mask: Optional[torch.Tensor] = None,
538
+ position_ids: Optional[torch.LongTensor] = None,
539
+ past_key_value: Optional[Cache] = None,
540
+ output_attentions: bool = False,
541
+ use_cache: bool = False,
542
+ cache_position: Optional[torch.LongTensor] = None,
543
+ **kwargs,
544
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
545
+ if output_attentions:
546
+ # TODO: Improve this warning with e.g. `model.config.attn_implementation = "manual"` once this is implemented.
547
+ logger.warning_once(
548
+ "Qwen2Model is using Qwen2SdpaAttention, but `torch.nn.functional.scaled_dot_product_attention` does not support `output_attentions=True`. Falling back to the manual attention implementation, "
549
+ 'but specifying the manual implementation will be required from Transformers version v5.0.0 onwards. This warning can be removed using the argument `attn_implementation="eager"` when loading the model.'
550
+ )
551
+ return super().forward(
552
+ hidden_states=hidden_states,
553
+ attention_mask=attention_mask,
554
+ position_ids=position_ids,
555
+ past_key_value=past_key_value,
556
+ output_attentions=output_attentions,
557
+ use_cache=use_cache,
558
+ **kwargs,
559
+ )
560
+
561
+ bsz, q_len, _ = hidden_states.size()
562
+
563
+ query_states = self.q_proj(hidden_states)
564
+ key_states = self.k_proj(hidden_states)
565
+ value_states = self.v_proj(hidden_states)
566
+
567
+ query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
568
+ key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
569
+ value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
570
+
571
+ kv_seq_len = key_states.shape[-2]
572
+ if past_key_value is not None:
573
+ kv_seq_len += past_key_value.get_usable_length(kv_seq_len, self.layer_idx)
574
+
575
+ prunded_sequence_length = kwargs["prunded_sequence_length"]
576
+ cos, sin = self.rotary_emb(value_states, seq_len=kv_seq_len + prunded_sequence_length)
577
+
578
+ query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin, position_ids)
579
+
580
+ if past_key_value is not None:
581
+ cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position} # Specific to RoPE models
582
+ key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs)
583
+
584
+ key_states = repeat_kv(key_states, self.num_key_value_groups)
585
+ value_states = repeat_kv(value_states, self.num_key_value_groups)
586
+
587
+ causal_mask = attention_mask
588
+ if attention_mask is not None: # no matter the length, we just slice it
589
+ causal_mask = attention_mask[:, :, :, : key_states.shape[-2]]
590
+
591
+ # SDPA with memory-efficient backend is currently (torch==2.1.2) bugged with non-contiguous inputs with custom attn_mask,
592
+ # Reference: https://github.com/pytorch/pytorch/issues/112577.
593
+ if query_states.device.type == "cuda" and attention_mask is not None:
594
+ query_states = query_states.contiguous()
595
+ key_states = key_states.contiguous()
596
+ value_states = value_states.contiguous()
597
+
598
+ # We dispatch to SDPA's Flash Attention or Efficient kernels via this `is_causal` if statement instead of an inline conditional assignment
599
+ # in SDPA to support both torch.compile's dynamic shapes and full graph options. An inline conditional prevents dynamic shapes from compiling.
600
+ # The q_len > 1 is necessary to match with AttentionMaskConverter.to_causal_4d that does not create a causal mask in case q_len == 1.
601
+ is_causal = True if causal_mask is None and q_len > 1 else False
602
+
603
+ attn_output = torch.nn.functional.scaled_dot_product_attention(
604
+ query_states,
605
+ key_states,
606
+ value_states,
607
+ attn_mask=causal_mask,
608
+ dropout_p=self.attention_dropout if self.training else 0.0,
609
+ is_causal=is_causal,
610
+ )
611
+
612
+ attn_output = attn_output.transpose(1, 2).contiguous()
613
+ attn_output = attn_output.view(bsz, q_len, self.hidden_size)
614
+
615
+ attn_output = self.o_proj(attn_output)
616
+
617
+ return attn_output, None, past_key_value
618
+
619
+
620
+ QWEN2_ATTENTION_CLASSES = {
621
+ "eager": Qwen2Attention,
622
+ "flash_attention_2": Qwen2FlashAttention2,
623
+ "sdpa": Qwen2SdpaAttention,
624
+ }
625
+
626
+
627
+ class Qwen2DecoderLayer(nn.Module):
628
+ def __init__(self, config: Qwen2Config, layer_idx: int):
629
+ super().__init__()
630
+ self.hidden_size = config.hidden_size
631
+
632
+ if config.sliding_window and config._attn_implementation != "flash_attention_2":
633
+ logger.warning_once(
634
+ f"Sliding Window Attention is enabled but not implemented for `{config._attn_implementation}`; "
635
+ "unexpected results may be encountered."
636
+ )
637
+ self.self_attn = QWEN2_ATTENTION_CLASSES[config._attn_implementation](config, layer_idx)
638
+
639
+ self.mlp = Qwen2MLP(config)
640
+ self.input_layernorm = Qwen2RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
641
+ self.post_attention_layernorm = Qwen2RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
642
+
643
+ def forward(
644
+ self,
645
+ hidden_states: torch.Tensor,
646
+ attention_mask: Optional[torch.Tensor] = None,
647
+ position_ids: Optional[torch.LongTensor] = None,
648
+ past_key_value: Optional[Tuple[torch.Tensor]] = None,
649
+ output_attentions: Optional[bool] = False,
650
+ use_cache: Optional[bool] = False,
651
+ cache_position: Optional[torch.LongTensor] = None,
652
+ **kwargs,
653
+ ) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]:
654
+ """
655
+ Args:
656
+ hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
657
+ attention_mask (`torch.FloatTensor`, *optional*): attention mask of size
658
+ `(batch, sequence_length)` where padding elements are indicated by 0.
659
+ output_attentions (`bool`, *optional*):
660
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
661
+ returned tensors for more detail.
662
+ use_cache (`bool`, *optional*):
663
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding
664
+ (see `past_key_values`).
665
+ past_key_value (`Tuple(torch.FloatTensor)`, *optional*): cached past key and value projection states
666
+ cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*):
667
+ Indices depicting the position of the input sequence tokens in the sequence.
668
+ kwargs (`dict`, *optional*):
669
+ Arbitrary kwargs to be ignored, used for FSDP and other methods that injects code
670
+ into the model
671
+ """
672
+
673
+ residual = hidden_states
674
+
675
+ hidden_states = self.input_layernorm(hidden_states)
676
+
677
+ # Self Attention
678
+ hidden_states, self_attn_weights, present_key_value = self.self_attn(
679
+ hidden_states=hidden_states,
680
+ attention_mask=attention_mask,
681
+ position_ids=position_ids,
682
+ past_key_value=past_key_value,
683
+ output_attentions=output_attentions,
684
+ use_cache=use_cache,
685
+ cache_position=cache_position,
686
+ **kwargs,
687
+ )
688
+ hidden_states = residual + hidden_states
689
+
690
+ # Fully Connected
691
+ residual = hidden_states
692
+ hidden_states = self.post_attention_layernorm(hidden_states)
693
+ hidden_states = self.mlp(hidden_states)
694
+ hidden_states = residual + hidden_states
695
+
696
+ outputs = (hidden_states,)
697
+
698
+ if output_attentions:
699
+ outputs += (self_attn_weights,)
700
+
701
+ if use_cache:
702
+ outputs += (present_key_value,)
703
+
704
+ return outputs
705
+
706
+
707
+ QWEN2_START_DOCSTRING = r"""
708
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
709
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
710
+ etc.)
711
+
712
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
713
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
714
+ and behavior.
715
+
716
+ Parameters:
717
+ config ([`Qwen2Config`]):
718
+ Model configuration class with all the parameters of the model. Initializing with a config file does not
719
+ load the weights associated with the model, only the configuration. Check out the
720
+ [`~PreTrainedModel.from_pretrained`] method to load the model weights.
721
+ """
722
+
723
+
724
+ @add_start_docstrings(
725
+ "The bare Qwen2 Model outputting raw hidden-states without any specific head on top.",
726
+ QWEN2_START_DOCSTRING,
727
+ )
728
+ class Qwen2PreTrainedModel(PreTrainedModel):
729
+ config_class = Qwen2Config
730
+ base_model_prefix = "model"
731
+ supports_gradient_checkpointing = True
732
+ _no_split_modules = ["Qwen2DecoderLayer"]
733
+ _skip_keys_device_placement = "past_key_values"
734
+ _supports_flash_attn_2 = True
735
+ _supports_sdpa = True
736
+ _supports_cache_class = True
737
+
738
+ def _init_weights(self, module):
739
+ std = self.config.initializer_range
740
+ if isinstance(module, nn.Linear):
741
+ module.weight.data.normal_(mean=0.0, std=std)
742
+ if module.bias is not None:
743
+ module.bias.data.zero_()
744
+ elif isinstance(module, nn.Embedding):
745
+ module.weight.data.normal_(mean=0.0, std=std)
746
+ if module.padding_idx is not None:
747
+ module.weight.data[module.padding_idx].zero_()
748
+
749
+
750
+ QWEN2_INPUTS_DOCSTRING = r"""
751
+ Args:
752
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
753
+ Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
754
+ it.
755
+
756
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
757
+ [`PreTrainedTokenizer.__call__`] for details.
758
+
759
+ [What are input IDs?](../glossary#input-ids)
760
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
761
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
762
+
763
+ - 1 for tokens that are **not masked**,
764
+ - 0 for tokens that are **masked**.
765
+
766
+ [What are attention masks?](../glossary#attention-mask)
767
+
768
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
769
+ [`PreTrainedTokenizer.__call__`] for details.
770
+
771
+ If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see
772
+ `past_key_values`).
773
+
774
+ If you want to change padding behavior, you should read [`modeling_opt._prepare_decoder_attention_mask`]
775
+ and modify to your needs. See diagram 1 in [the paper](https://arxiv.org/abs/1910.13461) for more
776
+ information on the default strategy.
777
+
778
+ - 1 indicates the head is **not masked**,
779
+ - 0 indicates the head is **masked**.
780
+ position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
781
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
782
+ config.n_positions - 1]`.
783
+
784
+ [What are position IDs?](../glossary#position-ids)
785
+ past_key_values (`Cache` or `tuple(tuple(torch.FloatTensor))`, *optional*):
786
+ Pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention
787
+ blocks) that can be used to speed up sequential decoding. This typically consists in the `past_key_values`
788
+ returned by the model at a previous stage of decoding, when `use_cache=True` or `config.use_cache=True`.
789
+
790
+ Two formats are allowed:
791
+ - a [`~cache_utils.Cache`] instance;
792
+ - Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of
793
+ shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`). This is also known as the legacy
794
+ cache format.
795
+
796
+ The model will output the same cache format that is fed as input. If no `past_key_values` are passed, the
797
+ legacy cache format will be returned.
798
+
799
+ If `past_key_values` are used, the user can optionally input only the last `input_ids` (those that don't
800
+ have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `input_ids`
801
+ of shape `(batch_size, sequence_length)`.
802
+ inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
803
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
804
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
805
+ model's internal embedding lookup matrix.
806
+ use_cache (`bool`, *optional*):
807
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
808
+ `past_key_values`).
809
+ output_attentions (`bool`, *optional*):
810
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
811
+ tensors for more detail.
812
+ output_hidden_states (`bool`, *optional*):
813
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
814
+ more detail.
815
+ return_dict (`bool`, *optional*):
816
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
817
+ cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*):
818
+ Indices depicting the position of the input sequence tokens in the sequence. Contrarily to `position_ids`,
819
+ this tensor is not affected by padding. It is used to update the cache in the correct position and to infer
820
+ the complete sequence length.
821
+ """
822
+
823
+
824
+ @add_start_docstrings(
825
+ "The bare Qwen2 Model outputting raw hidden-states without any specific head on top.",
826
+ QWEN2_START_DOCSTRING,
827
+ )
828
+ class Qwen2Model(Qwen2PreTrainedModel):
829
+ """
830
+ Transformer decoder consisting of *config.num_hidden_layers* layers. Each layer is a [`Qwen2DecoderLayer`]
831
+
832
+ Args:
833
+ config: Qwen2Config
834
+ """
835
+
836
+ def __init__(self, config: Qwen2Config):
837
+ super().__init__(config)
838
+ self.padding_idx = config.pad_token_id
839
+ self.vocab_size = config.vocab_size
840
+
841
+ self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx)
842
+ self.layers = nn.ModuleList(
843
+ [Qwen2DecoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)]
844
+ )
845
+ self._attn_implementation = config._attn_implementation
846
+ self.norm = Qwen2RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
847
+
848
+ self.gradient_checkpointing = False
849
+ # Initialize weights and apply final processing
850
+ self.post_init()
851
+
852
+ def get_input_embeddings(self):
853
+ return self.embed_tokens
854
+
855
+ def set_input_embeddings(self, value):
856
+ self.embed_tokens = value
857
+
858
+ @add_start_docstrings_to_model_forward(QWEN2_INPUTS_DOCSTRING)
859
+ def forward(
860
+ self,
861
+ input_ids: torch.LongTensor = None,
862
+ attention_mask: Optional[torch.Tensor] = None,
863
+ position_ids: Optional[torch.LongTensor] = None,
864
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
865
+ inputs_embeds: Optional[torch.FloatTensor] = None,
866
+ use_cache: Optional[bool] = None,
867
+ output_attentions: Optional[bool] = None,
868
+ output_hidden_states: Optional[bool] = None,
869
+ return_dict: Optional[bool] = None,
870
+ cache_position: Optional[torch.LongTensor] = None,
871
+ visual_token_index: Optional[torch.Tensor] = None,
872
+ large_model_prune_layer: Optional[float] = None,
873
+ large_model_prune_ratio: Optional[float] = None,
874
+ large_model_prune_selection: Optional[str] = None,
875
+ large_model_similarity_target_coverage: Optional[float] = None,
876
+ large_model_similarity_min_gain: Optional[float] = None,
877
+ large_model_similarity_min_keep: Optional[int] = None,
878
+ large_model_similarity_max_keep_ratio: Optional[float] = None,
879
+ visual_token_importance: Optional[torch.Tensor] = None,
880
+ ) -> Union[Tuple, BaseModelOutputWithPast]:
881
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
882
+ output_hidden_states = (
883
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
884
+ )
885
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
886
+
887
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
888
+
889
+
890
+ # retrieve input_ids and inputs_embeds
891
+ if input_ids is not None and inputs_embeds is not None:
892
+ raise ValueError('You cannot specify both input_ids and inputs_embeds at the same time')
893
+ elif input_ids is not None:
894
+ batch_size, seq_length = input_ids.shape[:2]
895
+ elif inputs_embeds is not None:
896
+ batch_size, seq_length = inputs_embeds.shape[:2]
897
+ else:
898
+ raise ValueError('You have to specify either input_ids or inputs_embeds')
899
+
900
+
901
+
902
+ if (input_ids is None) ^ (inputs_embeds is not None):
903
+ raise ValueError(
904
+ "You cannot specify both input_ids and inputs_embeds at the same time, and must specify either one"
905
+ )
906
+
907
+ if self.gradient_checkpointing and self.training:
908
+ if use_cache:
909
+ logger.warning_once(
910
+ "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
911
+ )
912
+ use_cache = False
913
+
914
+ use_legacy_cache = False
915
+ if use_cache and not isinstance(past_key_values, Cache) and not self.training:
916
+ use_legacy_cache = True
917
+ past_key_values = DynamicCache.from_legacy_cache(past_key_values)
918
+ logger.warning_once(
919
+ "We detected that you are passing `past_key_values` as a tuple and this is deprecated and will be removed in v4.43. "
920
+ "Please use an appropriate `Cache` class (https://huggingface.co/docs/transformers/v4.41.3/en/internal/generation_utils#transformers.Cache)"
921
+ )
922
+
923
+ if inputs_embeds is None:
924
+ inputs_embeds = self.embed_tokens(input_ids)
925
+
926
+ if cache_position is None:
927
+ past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0
928
+ cache_position = torch.arange(
929
+ past_seen_tokens, past_seen_tokens + inputs_embeds.shape[1], device=inputs_embeds.device
930
+ )
931
+ if position_ids is None:
932
+ position_ids = cache_position.unsqueeze(0)
933
+
934
+ causal_mask = self._update_causal_mask(
935
+ attention_mask, inputs_embeds, cache_position, past_key_values, output_attentions
936
+ )
937
+
938
+ hidden_states = inputs_embeds
939
+
940
+ # decoder layers
941
+ all_hidden_states = () if output_hidden_states else None
942
+ all_self_attns = () if output_attentions else None
943
+ next_decoder_cache = None
944
+
945
+
946
+ if large_model_prune_layer is not None:
947
+ token_prune = True
948
+ K = int(len(self.layers) * large_model_prune_layer)
949
+ keep_ratio = large_model_prune_ratio
950
+ visual_token_length = int(visual_token_index[1] - visual_token_index[0] + 1)
951
+ else:
952
+ token_prune = False
953
+
954
+ prunded_sequence_length = 0
955
+
956
+
957
+
958
+ for idx, decoder_layer in enumerate(self.layers):
959
+ if output_hidden_states:
960
+ all_hidden_states += (hidden_states,)
961
+
962
+ if self.gradient_checkpointing and self.training:
963
+ layer_outputs = self._gradient_checkpointing_func(
964
+ decoder_layer.__call__,
965
+ hidden_states,
966
+ causal_mask,
967
+ position_ids,
968
+ past_key_values,
969
+ output_attentions,
970
+ use_cache,
971
+ cache_position,
972
+ )
973
+ else:
974
+
975
+
976
+ ##### 某一层 random pruning #########
977
+ if token_prune:
978
+ if hidden_states.shape[1] != 1:
979
+ if idx == K:
980
+ device = hidden_states.device
981
+ selected_visual_index = select_visual_token_indices(
982
+ hidden_states,
983
+ visual_token_importance,
984
+ visual_token_index,
985
+ keep_ratio,
986
+ large_model_prune_selection or "topk",
987
+ similarity_target_coverage=large_model_similarity_target_coverage or 0.9,
988
+ similarity_min_gain=large_model_similarity_min_gain or 0.0,
989
+ similarity_min_keep=large_model_similarity_min_keep or 1,
990
+ similarity_max_keep_ratio=large_model_similarity_max_keep_ratio or 1.0,
991
+ ) + int(visual_token_index[0])
992
+ keep_indexs = torch.cat((
993
+ torch.arange(int(visual_token_index[0]), device=device),
994
+ selected_visual_index.to(device),
995
+ torch.arange(int(visual_token_index[1] + 1), seq_length, device=device),
996
+ ))
997
+ keep_indexs = keep_indexs.sort().values
998
+ hidden_states = hidden_states[:, keep_indexs,:]
999
+ if causal_mask is not None:
1000
+ causal_mask = causal_mask[:,:,:hidden_states.shape[1], :hidden_states.shape[1]]
1001
+ position_ids = keep_indexs.unsqueeze(0)
1002
+ prunded_sequence_length = visual_token_length - int(visual_token_length * keep_ratio)
1003
+
1004
+
1005
+ else:
1006
+ if idx == K:
1007
+ visual_token_length = visual_token_index[1] - visual_token_index[0] + 1
1008
+ prunded_sequence_length = visual_token_length - int(visual_token_length * keep_ratio)
1009
+ if causal_mask is not None:
1010
+ causal_mask = causal_mask[:, :, :, prunded_sequence_length:]
1011
+
1012
+
1013
+
1014
+
1015
+ layer_outputs = decoder_layer(
1016
+ hidden_states,
1017
+ attention_mask=causal_mask,
1018
+ position_ids=position_ids,
1019
+ past_key_value=past_key_values,
1020
+ output_attentions=output_attentions,
1021
+ use_cache=use_cache,
1022
+ cache_position=cache_position,
1023
+ prunded_sequence_length=prunded_sequence_length
1024
+ )
1025
+
1026
+ hidden_states = layer_outputs[0]
1027
+
1028
+ if use_cache:
1029
+ next_decoder_cache = layer_outputs[2 if output_attentions else 1]
1030
+
1031
+ if output_attentions:
1032
+ all_self_attns += (layer_outputs[1],)
1033
+
1034
+ hidden_states = self.norm(hidden_states)
1035
+
1036
+ # add hidden states from the last decoder layer
1037
+ if output_hidden_states:
1038
+ all_hidden_states += (hidden_states,)
1039
+
1040
+ next_cache = None
1041
+ if use_cache:
1042
+ next_cache = next_decoder_cache.to_legacy_cache() if use_legacy_cache else next_decoder_cache
1043
+
1044
+ if not return_dict:
1045
+ return tuple(v for v in [hidden_states, next_cache, all_hidden_states, all_self_attns] if v is not None)
1046
+ return BaseModelOutputWithPast(
1047
+ last_hidden_state=hidden_states,
1048
+ past_key_values=next_cache,
1049
+ hidden_states=all_hidden_states,
1050
+ attentions=all_self_attns,
1051
+ )
1052
+
1053
+ # Copied from transformers.models.llama.modeling_llama.LlamaModel._update_causal_mask
1054
+ def _update_causal_mask(
1055
+ self,
1056
+ attention_mask: torch.Tensor,
1057
+ input_tensor: torch.Tensor,
1058
+ cache_position: torch.Tensor,
1059
+ past_key_values: Cache,
1060
+ output_attentions: bool,
1061
+ ):
1062
+ # TODO: As of torch==2.2.0, the `attention_mask` passed to the model in `generate` is 2D and of dynamic length even when the static
1063
+ # KV cache is used. This is an issue for torch.compile which then recaptures cudagraphs at each decode steps due to the dynamic shapes.
1064
+ # (`recording cudagraph tree for symint key 13`, etc.), which is VERY slow. A workaround is `@torch.compiler.disable`, but this prevents using
1065
+ # `fullgraph=True`. See more context in https://github.com/huggingface/transformers/pull/29114
1066
+
1067
+ if self.config._attn_implementation == "flash_attention_2":
1068
+ if attention_mask is not None and 0.0 in attention_mask:
1069
+ return attention_mask
1070
+ return None
1071
+
1072
+ # For SDPA, when possible, we will rely on its `is_causal` argument instead of its `attn_mask` argument, in
1073
+ # order to dispatch on Flash Attention 2. This feature is not compatible with static cache, as SDPA will fail
1074
+ # to infer the attention mask.
1075
+ past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0
1076
+ using_static_cache = isinstance(past_key_values, StaticCache)
1077
+
1078
+ # When output attentions is True, sdpa implementation's forward method calls the eager implementation's forward
1079
+ if self.config._attn_implementation == "sdpa" and not using_static_cache and not output_attentions:
1080
+ if AttentionMaskConverter._ignore_causal_mask_sdpa(
1081
+ attention_mask,
1082
+ inputs_embeds=input_tensor,
1083
+ past_key_values_length=past_seen_tokens,
1084
+ is_training=self.training,
1085
+ ):
1086
+ return None
1087
+
1088
+ dtype, device = input_tensor.dtype, input_tensor.device
1089
+ min_dtype = torch.finfo(dtype).min
1090
+ sequence_length = input_tensor.shape[1]
1091
+ if using_static_cache:
1092
+ target_length = past_key_values.get_max_length()
1093
+ else:
1094
+ target_length = (
1095
+ attention_mask.shape[-1]
1096
+ if isinstance(attention_mask, torch.Tensor)
1097
+ else past_seen_tokens + sequence_length + 1
1098
+ )
1099
+
1100
+ # In case the provided `attention` mask is 2D, we generate a causal mask here (4D).
1101
+ causal_mask = _prepare_4d_causal_attention_mask_with_cache_position(
1102
+ attention_mask,
1103
+ sequence_length=sequence_length,
1104
+ target_length=target_length,
1105
+ dtype=dtype,
1106
+ device=device,
1107
+ min_dtype=min_dtype,
1108
+ cache_position=cache_position,
1109
+ batch_size=input_tensor.shape[0],
1110
+ )
1111
+
1112
+ if (
1113
+ self.config._attn_implementation == "sdpa"
1114
+ and attention_mask is not None
1115
+ and attention_mask.device.type == "cuda"
1116
+ and not output_attentions
1117
+ ):
1118
+ # Attend to all tokens in fully masked rows in the causal_mask, for example the relevant first rows when
1119
+ # using left padding. This is required by F.scaled_dot_product_attention memory-efficient attention path.
1120
+ # Details: https://github.com/pytorch/pytorch/issues/110213
1121
+ causal_mask = AttentionMaskConverter._unmask_unattended(causal_mask, min_dtype)
1122
+
1123
+ return causal_mask
1124
+
1125
+
1126
+ class Qwen2ForCausalLM(Qwen2PreTrainedModel):
1127
+ _tied_weights_keys = ["lm_head.weight"]
1128
+
1129
+ def __init__(self, config):
1130
+ super().__init__(config)
1131
+ self.model = Qwen2Model(config)
1132
+ self.vocab_size = config.vocab_size
1133
+ self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
1134
+
1135
+ # Initialize weights and apply final processing
1136
+ self.post_init()
1137
+
1138
+ def get_input_embeddings(self):
1139
+ return self.model.embed_tokens
1140
+
1141
+ def set_input_embeddings(self, value):
1142
+ self.model.embed_tokens = value
1143
+
1144
+ def get_output_embeddings(self):
1145
+ return self.lm_head
1146
+
1147
+ def set_output_embeddings(self, new_embeddings):
1148
+ self.lm_head = new_embeddings
1149
+
1150
+ def set_decoder(self, decoder):
1151
+ self.model = decoder
1152
+
1153
+ def get_decoder(self):
1154
+ return self.model
1155
+
1156
+ @add_start_docstrings_to_model_forward(QWEN2_INPUTS_DOCSTRING)
1157
+ @replace_return_docstrings(output_type=CausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC)
1158
+ def forward(
1159
+ self,
1160
+ input_ids: torch.LongTensor = None,
1161
+ attention_mask: Optional[torch.Tensor] = None,
1162
+ position_ids: Optional[torch.LongTensor] = None,
1163
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
1164
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1165
+ labels: Optional[torch.LongTensor] = None,
1166
+ use_cache: Optional[bool] = None,
1167
+ output_attentions: Optional[bool] = None,
1168
+ output_hidden_states: Optional[bool] = None,
1169
+ return_dict: Optional[bool] = None,
1170
+ cache_position: Optional[torch.LongTensor] = None,
1171
+ visual_token_index: Optional[torch.Tensor] = None,
1172
+ large_model_prune_layer: Optional[float] = None,
1173
+ large_model_prune_ratio: Optional[float] = None,
1174
+ large_model_prune_selection: Optional[str] = None,
1175
+ large_model_similarity_target_coverage: Optional[float] = None,
1176
+ large_model_similarity_min_gain: Optional[float] = None,
1177
+ large_model_similarity_min_keep: Optional[int] = None,
1178
+ large_model_similarity_max_keep_ratio: Optional[float] = None,
1179
+ visual_token_importance: Optional[torch.Tensor] = None,
1180
+ ) -> Union[Tuple, CausalLMOutputWithPast]:
1181
+ r"""
1182
+ Args:
1183
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
1184
+ Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
1185
+ config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
1186
+ (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
1187
+
1188
+ Returns:
1189
+
1190
+ Example:
1191
+
1192
+ ```python
1193
+ >>> from transformers import AutoTokenizer, Qwen2ForCausalLM
1194
+
1195
+ >>> model = Qwen2ForCausalLM.from_pretrained(PATH_TO_CONVERTED_WEIGHTS)
1196
+ >>> tokenizer = AutoTokenizer.from_pretrained(PATH_TO_CONVERTED_TOKENIZER)
1197
+
1198
+ >>> prompt = "Hey, are you conscious? Can you talk to me?"
1199
+ >>> inputs = tokenizer(prompt, return_tensors="pt")
1200
+
1201
+ >>> # Generate
1202
+ >>> generate_ids = model.generate(inputs.input_ids, max_length=30)
1203
+ >>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
1204
+ "Hey, are you conscious? Can you talk to me?\nI'm not conscious, but I can talk to you."
1205
+ ```"""
1206
+
1207
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
1208
+ output_hidden_states = (
1209
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
1210
+ )
1211
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1212
+
1213
+ # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)
1214
+ outputs = self.model(
1215
+ input_ids=input_ids,
1216
+ attention_mask=attention_mask,
1217
+ position_ids=position_ids,
1218
+ past_key_values=past_key_values,
1219
+ inputs_embeds=inputs_embeds,
1220
+ use_cache=use_cache,
1221
+ output_attentions=output_attentions,
1222
+ output_hidden_states=output_hidden_states,
1223
+ return_dict=return_dict,
1224
+ cache_position=cache_position,
1225
+ visual_token_index=visual_token_index,
1226
+ large_model_prune_layer=large_model_prune_layer,
1227
+ large_model_prune_ratio=large_model_prune_ratio,
1228
+ large_model_prune_selection=large_model_prune_selection,
1229
+ large_model_similarity_target_coverage=large_model_similarity_target_coverage,
1230
+ large_model_similarity_min_gain=large_model_similarity_min_gain,
1231
+ large_model_similarity_min_keep=large_model_similarity_min_keep,
1232
+ large_model_similarity_max_keep_ratio=large_model_similarity_max_keep_ratio,
1233
+ visual_token_importance=visual_token_importance
1234
+ )
1235
+
1236
+ hidden_states = outputs[0]
1237
+ logits = self.lm_head(hidden_states)
1238
+ logits = logits.float()
1239
+
1240
+ loss = None
1241
+ if labels is not None:
1242
+ # Shift so that tokens < n predict n
1243
+ shift_logits = logits[..., :-1, :].contiguous()
1244
+ shift_labels = labels[..., 1:].contiguous()
1245
+ # Flatten the tokens
1246
+ loss_fct = CrossEntropyLoss()
1247
+ shift_logits = shift_logits.view(-1, self.config.vocab_size)
1248
+ shift_labels = shift_labels.view(-1)
1249
+ # Enable model parallelism
1250
+ shift_labels = shift_labels.to(shift_logits.device)
1251
+ loss = loss_fct(shift_logits, shift_labels)
1252
+
1253
+ if not return_dict:
1254
+ output = (logits,) + outputs[1:]
1255
+ return (loss,) + output if loss is not None else output
1256
+
1257
+ return CausalLMOutputWithPast(
1258
+ loss=loss,
1259
+ logits=logits,
1260
+ past_key_values=outputs.past_key_values,
1261
+ hidden_states=outputs.hidden_states,
1262
+ attentions=outputs.attentions,
1263
+ )
1264
+
1265
+ # Copied from transformers.models.llama.modeling_llama.LlamaForCausalLM.prepare_inputs_for_generation
1266
+ def prepare_inputs_for_generation(
1267
+ self,
1268
+ input_ids,
1269
+ past_key_values=None,
1270
+ attention_mask=None,
1271
+ inputs_embeds=None,
1272
+ cache_position=None,
1273
+ position_ids=None,
1274
+ use_cache=True,
1275
+ **kwargs,
1276
+ ):
1277
+ # If we have cache: let's slice `input_ids` through `cache_position`, to keep only the unprocessed tokens
1278
+ # Exception 1: when passing input_embeds, input_ids may be missing entries
1279
+ # Exception 2: some generation methods do special slicing of input_ids, so we don't need to do it here
1280
+ if past_key_values is not None:
1281
+ if inputs_embeds is not None: # Exception 1
1282
+ input_ids = input_ids[:, -cache_position.shape[0] :]
1283
+ elif input_ids.shape[1] != cache_position.shape[0]: # Default case (the "else", a no op, is Exception 2)
1284
+ input_ids = input_ids[:, cache_position]
1285
+
1286
+ if attention_mask is not None and position_ids is None:
1287
+ # create position_ids on the fly for batch generation
1288
+ position_ids = attention_mask.long().cumsum(-1) - 1
1289
+ position_ids.masked_fill_(attention_mask == 0, 1)
1290
+ if past_key_values:
1291
+ position_ids = position_ids[:, -input_ids.shape[1] :]
1292
+
1293
+ # This `clone` call is needed to avoid recapturing cuda graphs with `torch.compile`'s `mode="reduce-overhead`, as otherwise the input `position_ids` would have various stride during the decoding. Here, simply using `.contiguous()` is not sufficient as in the batch size = 1 case, `position_ids` is already contiguous but with varying stride which retriggers a capture.
1294
+ position_ids = position_ids.clone(memory_format=torch.contiguous_format)
1295
+
1296
+ # if `inputs_embeds` are passed, we only want to use them in the 1st generation step
1297
+ if inputs_embeds is not None and cache_position[0] == 0:
1298
+ model_inputs = {"inputs_embeds": inputs_embeds, "input_ids": None}
1299
+ else:
1300
+ # The clone here is for the same reason as for `position_ids`.
1301
+ model_inputs = {"input_ids": input_ids.clone(memory_format=torch.contiguous_format), "inputs_embeds": None}
1302
+
1303
+ if isinstance(past_key_values, StaticCache) and attention_mask.ndim == 2:
1304
+ if model_inputs["inputs_embeds"] is not None:
1305
+ batch_size, sequence_length, _ = model_inputs["inputs_embeds"].shape
1306
+ device = model_inputs["inputs_embeds"].device
1307
+ else:
1308
+ batch_size, sequence_length = model_inputs["input_ids"].shape
1309
+ device = model_inputs["input_ids"].device
1310
+
1311
+ dtype = self.lm_head.weight.dtype
1312
+ min_dtype = torch.finfo(dtype).min
1313
+
1314
+ attention_mask = _prepare_4d_causal_attention_mask_with_cache_position(
1315
+ attention_mask,
1316
+ sequence_length=sequence_length,
1317
+ target_length=past_key_values.get_max_length(),
1318
+ dtype=dtype,
1319
+ device=device,
1320
+ min_dtype=min_dtype,
1321
+ cache_position=cache_position,
1322
+ batch_size=batch_size,
1323
+ )
1324
+
1325
+ model_inputs.update(
1326
+ {
1327
+ "position_ids": position_ids,
1328
+ "cache_position": cache_position,
1329
+ "past_key_values": past_key_values,
1330
+ "use_cache": use_cache,
1331
+ "attention_mask": attention_mask,
1332
+ 'visual_token_index': kwargs.get('visual_token_index'),
1333
+ 'large_model_prune_layer': kwargs.get('large_model_prune_layer'),
1334
+ 'large_model_prune_ratio': kwargs.get('large_model_prune_ratio'),
1335
+ 'large_model_prune_selection': kwargs.get('large_model_prune_selection'),
1336
+ 'large_model_similarity_target_coverage': kwargs.get('large_model_similarity_target_coverage'),
1337
+ 'large_model_similarity_min_gain': kwargs.get('large_model_similarity_min_gain'),
1338
+ 'large_model_similarity_min_keep': kwargs.get('large_model_similarity_min_keep'),
1339
+ 'large_model_similarity_max_keep_ratio': kwargs.get('large_model_similarity_max_keep_ratio'),
1340
+ 'visual_token_importance': kwargs.get('visual_token_importance')
1341
+ }
1342
+ )
1343
+ return model_inputs
1344
+
1345
+
1346
+ @add_start_docstrings(
1347
+ """
1348
+ The Qwen2 Model transformer with a sequence classification head on top (linear layer).
1349
+
1350
+ [`Qwen2ForSequenceClassification`] uses the last token in order to do the classification, as other causal models
1351
+ (e.g. GPT-2) do.
1352
+
1353
+ Since it does classification on the last token, it requires to know the position of the last token. If a
1354
+ `pad_token_id` is defined in the configuration, it finds the last token that is not a padding token in each row. If
1355
+ no `pad_token_id` is defined, it simply takes the last value in each row of the batch. Since it cannot guess the
1356
+ padding tokens when `inputs_embeds` are passed instead of `input_ids`, it does the same (take the last value in
1357
+ each row of the batch).
1358
+ """,
1359
+ QWEN2_START_DOCSTRING,
1360
+ )
1361
+ class Qwen2ForSequenceClassification(Qwen2PreTrainedModel):
1362
+ def __init__(self, config):
1363
+ super().__init__(config)
1364
+ self.num_labels = config.num_labels
1365
+ self.model = Qwen2Model(config)
1366
+ self.score = nn.Linear(config.hidden_size, self.num_labels, bias=False)
1367
+
1368
+ # Initialize weights and apply final processing
1369
+ self.post_init()
1370
+
1371
+ def get_input_embeddings(self):
1372
+ return self.model.embed_tokens
1373
+
1374
+ def set_input_embeddings(self, value):
1375
+ self.model.embed_tokens = value
1376
+
1377
+ @add_start_docstrings_to_model_forward(QWEN2_INPUTS_DOCSTRING)
1378
+ def forward(
1379
+ self,
1380
+ input_ids: torch.LongTensor = None,
1381
+ attention_mask: Optional[torch.Tensor] = None,
1382
+ position_ids: Optional[torch.LongTensor] = None,
1383
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
1384
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1385
+ labels: Optional[torch.LongTensor] = None,
1386
+ use_cache: Optional[bool] = None,
1387
+ output_attentions: Optional[bool] = None,
1388
+ output_hidden_states: Optional[bool] = None,
1389
+ return_dict: Optional[bool] = None,
1390
+ ) -> Union[Tuple, SequenceClassifierOutputWithPast]:
1391
+ r"""
1392
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1393
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
1394
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
1395
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
1396
+ """
1397
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1398
+
1399
+ transformer_outputs = self.model(
1400
+ input_ids,
1401
+ attention_mask=attention_mask,
1402
+ position_ids=position_ids,
1403
+ past_key_values=past_key_values,
1404
+ inputs_embeds=inputs_embeds,
1405
+ use_cache=use_cache,
1406
+ output_attentions=output_attentions,
1407
+ output_hidden_states=output_hidden_states,
1408
+ return_dict=return_dict,
1409
+ )
1410
+ hidden_states = transformer_outputs[0]
1411
+ logits = self.score(hidden_states)
1412
+
1413
+ if input_ids is not None:
1414
+ batch_size = input_ids.shape[0]
1415
+ else:
1416
+ batch_size = inputs_embeds.shape[0]
1417
+
1418
+ if self.config.pad_token_id is None and batch_size != 1:
1419
+ raise ValueError("Cannot handle batch sizes > 1 if no padding token is defined.")
1420
+ if self.config.pad_token_id is None:
1421
+ sequence_lengths = -1
1422
+ else:
1423
+ if input_ids is not None:
1424
+ # if no pad token found, use modulo instead of reverse indexing for ONNX compatibility
1425
+ sequence_lengths = torch.eq(input_ids, self.config.pad_token_id).int().argmax(-1) - 1
1426
+ sequence_lengths = sequence_lengths % input_ids.shape[-1]
1427
+ sequence_lengths = sequence_lengths.to(logits.device)
1428
+ else:
1429
+ sequence_lengths = -1
1430
+
1431
+ pooled_logits = logits[torch.arange(batch_size, device=logits.device), sequence_lengths]
1432
+
1433
+ loss = None
1434
+ if labels is not None:
1435
+ labels = labels.to(logits.device)
1436
+ if self.config.problem_type is None:
1437
+ if self.num_labels == 1:
1438
+ self.config.problem_type = "regression"
1439
+ elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
1440
+ self.config.problem_type = "single_label_classification"
1441
+ else:
1442
+ self.config.problem_type = "multi_label_classification"
1443
+
1444
+ if self.config.problem_type == "regression":
1445
+ loss_fct = MSELoss()
1446
+ if self.num_labels == 1:
1447
+ loss = loss_fct(pooled_logits.squeeze(), labels.squeeze())
1448
+ else:
1449
+ loss = loss_fct(pooled_logits, labels)
1450
+ elif self.config.problem_type == "single_label_classification":
1451
+ loss_fct = CrossEntropyLoss()
1452
+ loss = loss_fct(pooled_logits.view(-1, self.num_labels), labels.view(-1))
1453
+ elif self.config.problem_type == "multi_label_classification":
1454
+ loss_fct = BCEWithLogitsLoss()
1455
+ loss = loss_fct(pooled_logits, labels)
1456
+ if not return_dict:
1457
+ output = (pooled_logits,) + transformer_outputs[1:]
1458
+ return ((loss,) + output) if loss is not None else output
1459
+
1460
+ return SequenceClassifierOutputWithPast(
1461
+ loss=loss,
1462
+ logits=pooled_logits,
1463
+ past_key_values=transformer_outputs.past_key_values,
1464
+ hidden_states=transformer_outputs.hidden_states,
1465
+ attentions=transformer_outputs.attentions,
1466
+ )
1467
+
1468
+
1469
+ @add_start_docstrings(
1470
+ """
1471
+ The Qwen2 Model transformer with a token classification head on top (a linear layer on top of the hidden-states
1472
+ output) e.g. for Named-Entity-Recognition (NER) tasks.
1473
+ """,
1474
+ QWEN2_START_DOCSTRING,
1475
+ )
1476
+ # Copied from transformers.models.llama.modeling_llama.LlamaForTokenClassification with Llama->Qwen2, LLAMA->QWEN2
1477
+ class Qwen2ForTokenClassification(Qwen2PreTrainedModel):
1478
+ def __init__(self, config):
1479
+ super().__init__(config)
1480
+ self.num_labels = config.num_labels
1481
+ self.model = Qwen2Model(config)
1482
+ if getattr(config, "classifier_dropout", None) is not None:
1483
+ classifier_dropout = config.classifier_dropout
1484
+ elif getattr(config, "hidden_dropout", None) is not None:
1485
+ classifier_dropout = config.hidden_dropout
1486
+ else:
1487
+ classifier_dropout = 0.1
1488
+ self.dropout = nn.Dropout(classifier_dropout)
1489
+ self.score = nn.Linear(config.hidden_size, config.num_labels)
1490
+
1491
+ # Initialize weights and apply final processing
1492
+ self.post_init()
1493
+
1494
+ def get_input_embeddings(self):
1495
+ return self.model.embed_tokens
1496
+
1497
+ def set_input_embeddings(self, value):
1498
+ self.model.embed_tokens = value
1499
+
1500
+ @add_start_docstrings_to_model_forward(QWEN2_INPUTS_DOCSTRING)
1501
+ def forward(
1502
+ self,
1503
+ input_ids: Optional[torch.LongTensor] = None,
1504
+ attention_mask: Optional[torch.Tensor] = None,
1505
+ position_ids: Optional[torch.LongTensor] = None,
1506
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
1507
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1508
+ labels: Optional[torch.LongTensor] = None,
1509
+ use_cache: Optional[bool] = None,
1510
+ output_attentions: Optional[bool] = None,
1511
+ output_hidden_states: Optional[bool] = None,
1512
+ return_dict: Optional[bool] = None,
1513
+ ) -> Union[Tuple, TokenClassifierOutput]:
1514
+ r"""
1515
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1516
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
1517
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
1518
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
1519
+ """
1520
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1521
+
1522
+ outputs = self.model(
1523
+ input_ids,
1524
+ attention_mask=attention_mask,
1525
+ position_ids=position_ids,
1526
+ past_key_values=past_key_values,
1527
+ inputs_embeds=inputs_embeds,
1528
+ use_cache=use_cache,
1529
+ output_attentions=output_attentions,
1530
+ output_hidden_states=output_hidden_states,
1531
+ return_dict=return_dict,
1532
+ )
1533
+ sequence_output = outputs[0]
1534
+ sequence_output = self.dropout(sequence_output)
1535
+ logits = self.score(sequence_output)
1536
+
1537
+ loss = None
1538
+ if labels is not None:
1539
+ loss_fct = CrossEntropyLoss()
1540
+ loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
1541
+
1542
+ if not return_dict:
1543
+ output = (logits,) + outputs[2:]
1544
+ return ((loss,) + output) if loss is not None else output
1545
+
1546
+ return TokenClassifierOutput(
1547
+ loss=loss,
1548
+ logits=logits,
1549
+ hidden_states=outputs.hidden_states,
1550
+ attentions=outputs.attentions,
1551
+ )
isolated/sim_greedy/upstream_sgl/internvl/model/qwen2/tokenization_qwen2.py ADDED
@@ -0,0 +1,339 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2024 The Qwen team, Alibaba Group and The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Tokenization classes for Qwen2."""
16
+
17
+ import json
18
+ import os
19
+ import unicodedata
20
+ from functools import lru_cache
21
+ from typing import Optional, Tuple
22
+
23
+ import regex as re
24
+
25
+ from ...tokenization_utils import AddedToken, PreTrainedTokenizer
26
+ from ...utils import logging
27
+
28
+
29
+ logger = logging.get_logger(__name__)
30
+
31
+ VOCAB_FILES_NAMES = {
32
+ "vocab_file": "vocab.json",
33
+ "merges_file": "merges.txt",
34
+ }
35
+
36
+
37
+ MAX_MODEL_INPUT_SIZES = {"qwen/qwen-tokenizer": 32768}
38
+
39
+ PRETOKENIZE_REGEX = r"""(?i:'s|'t|'re|'ve|'m|'ll|'d)|[^\r\n\p{L}\p{N}]?\p{L}+|\p{N}| ?[^\s\p{L}\p{N}]+[\r\n]*|\s*[\r\n]+|\s+(?!\S)|\s+"""
40
+
41
+
42
+ @lru_cache()
43
+ # Copied from transformers.models.gpt2.tokenization_gpt2.bytes_to_unicode
44
+ def bytes_to_unicode():
45
+ """
46
+ Returns list of utf-8 byte and a mapping to unicode strings. We specifically avoids mapping to whitespace/control
47
+ characters the bpe code barfs on.
48
+
49
+ The reversible bpe codes work on unicode strings. This means you need a large # of unicode characters in your vocab
50
+ if you want to avoid UNKs. When you're at something like a 10B token dataset you end up needing around 5K for
51
+ decent coverage. This is a significant percentage of your normal, say, 32K bpe vocab. To avoid that, we want lookup
52
+ tables between utf-8 bytes and unicode strings.
53
+ """
54
+ bs = (
55
+ list(range(ord("!"), ord("~") + 1)) + list(range(ord("¡"), ord("¬") + 1)) + list(range(ord("®"), ord("ÿ") + 1))
56
+ )
57
+ cs = bs[:]
58
+ n = 0
59
+ for b in range(2**8):
60
+ if b not in bs:
61
+ bs.append(b)
62
+ cs.append(2**8 + n)
63
+ n += 1
64
+ cs = [chr(n) for n in cs]
65
+ return dict(zip(bs, cs))
66
+
67
+
68
+ # Copied from transformers.models.gpt2.tokenization_gpt2.get_pairs
69
+ def get_pairs(word):
70
+ """
71
+ Return set of symbol pairs in a word.
72
+
73
+ Word is represented as tuple of symbols (symbols being variable-length strings).
74
+ """
75
+ pairs = set()
76
+ prev_char = word[0]
77
+ for char in word[1:]:
78
+ pairs.add((prev_char, char))
79
+ prev_char = char
80
+ return pairs
81
+
82
+
83
+ class Qwen2Tokenizer(PreTrainedTokenizer):
84
+ """
85
+ Construct a Qwen2 tokenizer. Based on byte-level Byte-Pair-Encoding.
86
+
87
+ Same with GPT2Tokenizer, this tokenizer has been trained to treat spaces like parts of the tokens so a word will
88
+ be encoded differently whether it is at the beginning of the sentence (without space) or not:
89
+
90
+ ```python
91
+ >>> from transformers import Qwen2Tokenizer
92
+
93
+ >>> tokenizer = Qwen2Tokenizer.from_pretrained("Qwen/Qwen-tokenizer")
94
+ >>> tokenizer("Hello world")["input_ids"]
95
+ [9707, 1879]
96
+
97
+ >>> tokenizer(" Hello world")["input_ids"]
98
+ [21927, 1879]
99
+ ```
100
+ This is expected.
101
+
102
+ You should not use GPT2Tokenizer instead, because of the different pretokenization rules.
103
+
104
+ This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to
105
+ this superclass for more information regarding those methods.
106
+
107
+ Args:
108
+ vocab_file (`str`):
109
+ Path to the vocabulary file.
110
+ merges_file (`str`):
111
+ Path to the merges file.
112
+ errors (`str`, *optional*, defaults to `"replace"`):
113
+ Paradigm to follow when decoding bytes to UTF-8. See
114
+ [bytes.decode](https://docs.python.org/3/library/stdtypes.html#bytes.decode) for more information.
115
+ unk_token (`str`, *optional*, defaults to `"<|endoftext|>"`):
116
+ The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
117
+ token instead.
118
+ bos_token (`str`, *optional*):
119
+ The beginning of sequence token. Not applicable for this tokenizer.
120
+ eos_token (`str`, *optional*, defaults to `"<|endoftext|>"`):
121
+ The end of sequence token.
122
+ pad_token (`str`, *optional*, defaults to `"<|endoftext|>"`):
123
+ The token used for padding, for example when batching sequences of different lengths.
124
+ clean_up_tokenization_spaces (`bool`, *optional*, defaults to `False`):
125
+ Whether or not the model should cleanup the spaces that were added when splitting the input text during the
126
+ tokenization process. Not applicable to this tokenizer, since tokenization does not add spaces.
127
+ split_special_tokens (`bool`, *optional*, defaults to `False`):
128
+ Whether or not the special tokens should be split during the tokenization process. The default behavior is
129
+ to not split special tokens. This means that if `<|endoftext|>` is the `eos_token`, then `tokenizer.tokenize("<|endoftext|>") =
130
+ ['<|endoftext|>`]. Otherwise, if `split_special_tokens=True`, then `tokenizer.tokenize("<|endoftext|>")` will be give `['<',
131
+ '|', 'endo', 'ft', 'ext', '|', '>']`. This argument is only supported for `slow` tokenizers for the moment.
132
+ """
133
+
134
+ vocab_files_names = VOCAB_FILES_NAMES
135
+ model_input_names = ["input_ids", "attention_mask"]
136
+
137
+ def __init__(
138
+ self,
139
+ vocab_file,
140
+ merges_file,
141
+ errors="replace",
142
+ unk_token="<|endoftext|>",
143
+ bos_token=None,
144
+ eos_token="<|endoftext|>",
145
+ pad_token="<|endoftext|>",
146
+ clean_up_tokenization_spaces=False,
147
+ split_special_tokens=False,
148
+ **kwargs,
149
+ ):
150
+ # Qwen vocab does not contain control tokens; added tokens need to be special
151
+ bos_token = (
152
+ AddedToken(bos_token, lstrip=False, rstrip=False, special=True, normalized=False)
153
+ if isinstance(bos_token, str)
154
+ else bos_token
155
+ )
156
+ eos_token = (
157
+ AddedToken(eos_token, lstrip=False, rstrip=False, special=True, normalized=False)
158
+ if isinstance(eos_token, str)
159
+ else eos_token
160
+ )
161
+ unk_token = (
162
+ AddedToken(unk_token, lstrip=False, rstrip=False, special=True, normalized=False)
163
+ if isinstance(unk_token, str)
164
+ else unk_token
165
+ )
166
+ pad_token = (
167
+ AddedToken(pad_token, lstrip=False, rstrip=False, special=True, normalized=False)
168
+ if isinstance(pad_token, str)
169
+ else pad_token
170
+ )
171
+
172
+ with open(vocab_file, encoding="utf-8") as vocab_handle:
173
+ self.encoder = json.load(vocab_handle)
174
+ self.decoder = {v: k for k, v in self.encoder.items()}
175
+ self.errors = errors # how to handle errors in decoding
176
+ self.byte_encoder = bytes_to_unicode()
177
+ self.byte_decoder = {v: k for k, v in self.byte_encoder.items()}
178
+ bpe_merges = []
179
+ with open(merges_file, encoding="utf-8") as merges_handle:
180
+ for i, line in enumerate(merges_handle):
181
+ line = line.strip()
182
+ if (i == 0 and line.startswith("#version:")) or not line:
183
+ continue
184
+ bpe_merges.append(tuple(line.split()))
185
+ self.bpe_ranks = dict(zip(bpe_merges, range(len(bpe_merges))))
186
+ # NOTE: the cache can grow without bound and will get really large for long running processes
187
+ # (esp. for texts of language that do not use space between word, e.g. Chinese); technically
188
+ # not a memory leak but appears as one.
189
+ # GPT2Tokenizer has the same problem, so let's be consistent.
190
+ self.cache = {}
191
+
192
+ self.pat = re.compile(PRETOKENIZE_REGEX)
193
+
194
+ if kwargs.get("add_prefix_space", False):
195
+ logger.warning_once(
196
+ f"{self.__class__.__name} does not support `add_prefix_space`, setting it to True has no effect."
197
+ )
198
+
199
+ super().__init__(
200
+ errors=errors,
201
+ bos_token=bos_token,
202
+ eos_token=eos_token,
203
+ pad_token=pad_token,
204
+ unk_token=unk_token,
205
+ clean_up_tokenization_spaces=clean_up_tokenization_spaces,
206
+ split_special_tokens=split_special_tokens,
207
+ **kwargs,
208
+ )
209
+
210
+ @property
211
+ def vocab_size(self) -> int:
212
+ return len(self.encoder)
213
+
214
+ # Copied from transformers.models.gpt2.tokenization_gpt2.GPT2Tokenizer.get_vocab
215
+ def get_vocab(self):
216
+ return dict(self.encoder, **self.added_tokens_encoder)
217
+
218
+ # Copied from transformers.models.gpt2.tokenization_gpt2.GPT2Tokenizer.bpe
219
+ def bpe(self, token):
220
+ if token in self.cache:
221
+ return self.cache[token]
222
+ word = tuple(token)
223
+ pairs = get_pairs(word)
224
+
225
+ if not pairs:
226
+ return token
227
+
228
+ while True:
229
+ bigram = min(pairs, key=lambda pair: self.bpe_ranks.get(pair, float("inf")))
230
+ if bigram not in self.bpe_ranks:
231
+ break
232
+ first, second = bigram
233
+ new_word = []
234
+ i = 0
235
+ while i < len(word):
236
+ try:
237
+ j = word.index(first, i)
238
+ except ValueError:
239
+ new_word.extend(word[i:])
240
+ break
241
+ else:
242
+ new_word.extend(word[i:j])
243
+ i = j
244
+
245
+ if word[i] == first and i < len(word) - 1 and word[i + 1] == second:
246
+ new_word.append(first + second)
247
+ i += 2
248
+ else:
249
+ new_word.append(word[i])
250
+ i += 1
251
+ new_word = tuple(new_word)
252
+ word = new_word
253
+ if len(word) == 1:
254
+ break
255
+ else:
256
+ pairs = get_pairs(word)
257
+ word = " ".join(word)
258
+ self.cache[token] = word
259
+ return word
260
+
261
+ # Copied from transformers.models.gpt2.tokenization_gpt2.GPT2Tokenizer._tokenize
262
+ def _tokenize(self, text):
263
+ """Tokenize a string."""
264
+ bpe_tokens = []
265
+ for token in re.findall(self.pat, text):
266
+ token = "".join(
267
+ self.byte_encoder[b] for b in token.encode("utf-8")
268
+ ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
269
+ bpe_tokens.extend(bpe_token for bpe_token in self.bpe(token).split(" "))
270
+ return bpe_tokens
271
+
272
+ # Copied from transformers.models.gpt2.tokenization_gpt2.GPT2Tokenizer._convert_token_to_id
273
+ def _convert_token_to_id(self, token):
274
+ """Converts a token (str) in an id using the vocab."""
275
+ return self.encoder.get(token, self.encoder.get(self.unk_token))
276
+
277
+ # Copied from transformers.models.gpt2.tokenization_gpt2.GPT2Tokenizer._convert_id_to_token
278
+ def _convert_id_to_token(self, index):
279
+ """Converts an index (integer) in a token (str) using the vocab."""
280
+ return self.decoder.get(index)
281
+
282
+ # Copied from transformers.models.gpt2.tokenization_gpt2.GPT2Tokenizer.convert_tokens_to_string
283
+ def convert_tokens_to_string(self, tokens):
284
+ """Converts a sequence of tokens (string) in a single string."""
285
+ text = "".join(tokens)
286
+ text = bytearray([self.byte_decoder[c] for c in text]).decode("utf-8", errors=self.errors)
287
+ return text
288
+
289
+ def decode(
290
+ self,
291
+ token_ids,
292
+ skip_special_tokens: bool = False,
293
+ clean_up_tokenization_spaces: Optional[bool] = False,
294
+ spaces_between_special_tokens: bool = False,
295
+ **kwargs,
296
+ ) -> str:
297
+ # `spaces_between_special_tokens` defaults to True for _decode in slow tokenizers
298
+ # and cannot be configured elsewhere, but it should default to False for Qwen2Tokenizer
299
+ return super().decode(
300
+ token_ids,
301
+ skip_special_tokens=skip_special_tokens,
302
+ clean_up_tokenization_spaces=clean_up_tokenization_spaces,
303
+ spaces_between_special_tokens=spaces_between_special_tokens,
304
+ **kwargs,
305
+ )
306
+
307
+ # Copied from transformers.models.gpt2.tokenization_gpt2.GPT2Tokenizer.save_vocabulary
308
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
309
+ if not os.path.isdir(save_directory):
310
+ logger.error(f"Vocabulary path ({save_directory}) should be a directory")
311
+ return
312
+ vocab_file = os.path.join(
313
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
314
+ )
315
+ merge_file = os.path.join(
316
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"]
317
+ )
318
+
319
+ with open(vocab_file, "w", encoding="utf-8") as f:
320
+ f.write(json.dumps(self.encoder, indent=2, sort_keys=True, ensure_ascii=False) + "\n")
321
+
322
+ index = 0
323
+ with open(merge_file, "w", encoding="utf-8") as writer:
324
+ writer.write("#version: 0.2\n")
325
+ for bpe_tokens, token_index in sorted(self.bpe_ranks.items(), key=lambda kv: kv[1]):
326
+ if index != token_index:
327
+ logger.warning(
328
+ f"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."
329
+ " Please check that the tokenizer is not corrupted!"
330
+ )
331
+ index = token_index
332
+ writer.write(" ".join(bpe_tokens) + "\n")
333
+ index += 1
334
+
335
+ return vocab_file, merge_file
336
+
337
+ def prepare_for_tokenization(self, text, **kwargs):
338
+ text = unicodedata.normalize("NFC", text)
339
+ return (text, kwargs)
isolated/sim_greedy/upstream_sgl/internvl/model/qwen2/tokenization_qwen2_fast.py ADDED
@@ -0,0 +1,134 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2024 The Qwen team, Alibaba Group and The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Tokenization classes for Qwen2."""
16
+
17
+ from typing import Optional, Tuple
18
+
19
+ from transformers.tokenization_utils import AddedToken
20
+ from transformers.tokenization_utils_fast import PreTrainedTokenizerFast
21
+ from transformers.utils import logging
22
+ from transformers.tokenization_qwen2 import Qwen2Tokenizer
23
+
24
+
25
+ logger = logging.get_logger(__name__)
26
+
27
+ VOCAB_FILES_NAMES = {
28
+ "vocab_file": "vocab.json",
29
+ "merges_file": "merges.txt",
30
+ "tokenizer_file": "tokenizer.json",
31
+ }
32
+
33
+
34
+ MAX_MODEL_INPUT_SIZES = {"qwen/qwen-tokenizer": 32768}
35
+
36
+
37
+ class Qwen2TokenizerFast(PreTrainedTokenizerFast):
38
+ """
39
+ Construct a "fast" Qwen2 tokenizer (backed by HuggingFace's *tokenizers* library). Based on byte-level
40
+ Byte-Pair-Encoding.
41
+
42
+ Same with GPT2Tokenizer, this tokenizer has been trained to treat spaces like parts of the tokens so a word will
43
+ be encoded differently whether it is at the beginning of the sentence (without space) or not:
44
+
45
+ ```python
46
+ >>> from transformers import Qwen2TokenizerFast
47
+
48
+ >>> tokenizer = Qwen2TokenizerFast.from_pretrained("Qwen/Qwen-tokenizer")
49
+ >>> tokenizer("Hello world")["input_ids"]
50
+ [9707, 1879]
51
+
52
+ >>> tokenizer(" Hello world")["input_ids"]
53
+ [21927, 1879]
54
+ ```
55
+ This is expected.
56
+
57
+ This tokenizer inherits from [`PreTrainedTokenizerFast`] which contains most of the main methods. Users should
58
+ refer to this superclass for more information regarding those methods.
59
+
60
+ Args:
61
+ vocab_file (`str`, *optional*):
62
+ Path to the vocabulary file.
63
+ merges_file (`str`, *optional*):
64
+ Path to the merges file.
65
+ tokenizer_file (`str`, *optional*):
66
+ Path to [tokenizers](https://github.com/huggingface/tokenizers) file (generally has a .json extension) that
67
+ contains everything needed to load the tokenizer.
68
+ unk_token (`str`, *optional*, defaults to `"<|endoftext|>"`):
69
+ The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
70
+ token instead. Not applicable to this tokenizer.
71
+ bos_token (`str`, *optional*):
72
+ The beginning of sequence token. Not applicable for this tokenizer.
73
+ eos_token (`str`, *optional*, defaults to `"<|endoftext|>"`):
74
+ The end of sequence token.
75
+ pad_token (`str`, *optional*, defaults to `"<|endoftext|>"`):
76
+ The token used for padding, for example when batching sequences of different lengths.
77
+ """
78
+
79
+ vocab_files_names = VOCAB_FILES_NAMES
80
+ model_input_names = ["input_ids", "attention_mask"]
81
+ slow_tokenizer_class = Qwen2Tokenizer
82
+
83
+ def __init__(
84
+ self,
85
+ vocab_file=None,
86
+ merges_file=None,
87
+ tokenizer_file=None,
88
+ unk_token="<|endoftext|>",
89
+ bos_token=None,
90
+ eos_token="<|endoftext|>",
91
+ pad_token="<|endoftext|>",
92
+ **kwargs,
93
+ ):
94
+ # We need to at least pass vocab_file and merges_file to base class
95
+ # in case a slow tokenizer needs to be initialized; other can be
96
+ # configured through files.
97
+ # following GPT2TokenizerFast, also adding unk_token, bos_token, and eos_token
98
+
99
+ bos_token = (
100
+ AddedToken(bos_token, lstrip=False, rstrip=False, special=True, normalized=False)
101
+ if isinstance(bos_token, str)
102
+ else bos_token
103
+ )
104
+ eos_token = (
105
+ AddedToken(eos_token, lstrip=False, rstrip=False, special=True, normalized=False)
106
+ if isinstance(eos_token, str)
107
+ else eos_token
108
+ )
109
+ unk_token = (
110
+ AddedToken(unk_token, lstrip=False, rstrip=False, special=True, normalized=False)
111
+ if isinstance(unk_token, str)
112
+ else unk_token
113
+ )
114
+ pad_token = (
115
+ AddedToken(pad_token, lstrip=False, rstrip=False, special=True, normalized=False)
116
+ if isinstance(pad_token, str)
117
+ else pad_token
118
+ )
119
+
120
+ super().__init__(
121
+ vocab_file=vocab_file,
122
+ merges_file=merges_file,
123
+ tokenizer_file=tokenizer_file,
124
+ unk_token=unk_token,
125
+ bos_token=bos_token,
126
+ eos_token=eos_token,
127
+ pad_token=pad_token,
128
+ **kwargs,
129
+ )
130
+
131
+ # Copied from transformers.models.gpt2.tokenization_gpt2_fast.GPT2TokenizerFast.save_vocabulary
132
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
133
+ files = self._tokenizer.model.save(save_directory, name=filename_prefix)
134
+ return tuple(files)
isolated/sim_greedy/upstream_sgl/internvl/train/__init__.py ADDED
File without changes
isolated/sim_greedy/upstream_sgl/internvl/train/constants.py ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ IMG_CONTEXT_TOKEN = '<IMG_CONTEXT>'
2
+ IMG_START_TOKEN = '<img>'
3
+ IMG_END_TOKEN = '</img>'
4
+ QUAD_START_TOKEN = '<quad>'
5
+ QUAD_END_TOKEN = '</quad>'
6
+ REF_START_TOKEN = '<ref>'
7
+ REF_END_TOKEN = '</ref>'
8
+ BOX_START_TOKEN = '<box>'
9
+ BOX_END_TOKEN = '</box>'
10
+ IMAGENET_MEAN = (0.485, 0.456, 0.406)
11
+ IMAGENET_STD = (0.229, 0.224, 0.225)
12
+ CLIP_MEAN = (0.4814546, 0.4578275, 0.40821073)
13
+ CLIP_STD = (0.2686295, 0.2613025, 0.2757711)
14
+ SIGLIP_MEAN = (0.5, 0.5, 0.5)
15
+ SIGLIP_STD = (0.5, 0.5, 0.5)
isolated/sim_greedy/upstream_sgl/internvl/train/dataset.py ADDED
@@ -0,0 +1,726 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import io
2
+
3
+ from transformers.trainer_pt_utils import LabelSmoother
4
+
5
+ IGNORE_TOKEN_ID = LabelSmoother.ignore_index
6
+ import os
7
+ import random
8
+ from typing import Dict
9
+
10
+ import cv2
11
+ import imageio
12
+ import numpy as np
13
+ import torch
14
+ import torchvision.transforms as T
15
+ import transformers
16
+ from decord import VideoReader
17
+ from internvl.conversation import get_conv_template
18
+ from PIL import Image
19
+ from torch.utils.data import ConcatDataset, WeightedRandomSampler
20
+ from torchvision.transforms.functional import InterpolationMode
21
+
22
+ from .constants import (CLIP_MEAN, CLIP_STD, IMAGENET_MEAN, IMAGENET_STD,
23
+ IMG_CONTEXT_TOKEN, IMG_END_TOKEN, IMG_START_TOKEN,
24
+ SIGLIP_MEAN, SIGLIP_STD)
25
+
26
+ try:
27
+ from petrel_client.client import Client
28
+ from petrel_client.common.config import Config
29
+ except ImportError as E:
30
+ print('petrel_client is not installed. If you read data locally instead of from ceph, ignore it.')
31
+ import sys
32
+
33
+
34
+ def get_frame_indices(num_frames, vlen, sample='rand', fix_start=None, input_fps=1, max_num_frames=-1):
35
+ if sample in ['rand', 'middle']: # uniform sampling
36
+ acc_samples = min(num_frames, vlen)
37
+ # split the video into `acc_samples` intervals, and sample from each interval.
38
+ intervals = np.linspace(start=0, stop=vlen, num=acc_samples + 1).astype(int)
39
+ ranges = []
40
+ for idx, interv in enumerate(intervals[:-1]):
41
+ ranges.append((interv, intervals[idx + 1] - 1))
42
+ if sample == 'rand':
43
+ try:
44
+ frame_indices = [random.choice(range(x[0], x[1])) for x in ranges]
45
+ except:
46
+ frame_indices = np.random.permutation(vlen)[:acc_samples]
47
+ frame_indices.sort()
48
+ frame_indices = list(frame_indices)
49
+ elif fix_start is not None:
50
+ frame_indices = [x[0] + fix_start for x in ranges]
51
+ elif sample == 'middle':
52
+ frame_indices = [(x[0] + x[1]) // 2 for x in ranges]
53
+ else:
54
+ raise NotImplementedError
55
+
56
+ if len(frame_indices) < num_frames: # padded with last frame
57
+ padded_frame_indices = [frame_indices[-1]] * num_frames
58
+ padded_frame_indices[:len(frame_indices)] = frame_indices
59
+ frame_indices = padded_frame_indices
60
+ elif 'fps' in sample: # fps0.5, sequentially sample frames at 0.5 fps
61
+ output_fps = float(sample[3:])
62
+ duration = float(vlen) / input_fps
63
+ delta = 1 / output_fps # gap between frames, this is also the clip length each frame represents
64
+ frame_seconds = np.arange(0 + delta / 2, duration + delta / 2, delta)
65
+ frame_indices = np.around(frame_seconds * input_fps).astype(int)
66
+ frame_indices = [e for e in frame_indices if e < vlen]
67
+ if max_num_frames > 0 and len(frame_indices) > max_num_frames:
68
+ frame_indices = frame_indices[:max_num_frames]
69
+ # frame_indices = np.linspace(0 + delta / 2, duration + delta / 2, endpoint=False, num=max_num_frames)
70
+ else:
71
+ raise ValueError
72
+ return frame_indices
73
+
74
+
75
+ def read_frames_gif(
76
+ video_path, num_frames, sample='rand', fix_start=None,
77
+ client=None, min_num_frames=4
78
+ ):
79
+ if 's3://' in video_path:
80
+ video_bytes = client.get(video_path)
81
+ gif = imageio.get_reader(io.BytesIO(video_bytes))
82
+ else:
83
+ gif = imageio.get_reader(video_path)
84
+ vlen = len(gif)
85
+
86
+ t_num_frames = np.random.randint(min_num_frames, num_frames + 1)
87
+ frame_indices = get_frame_indices(
88
+ t_num_frames, vlen, sample=sample, fix_start=fix_start
89
+ )
90
+ frames = []
91
+ for index, frame in enumerate(gif):
92
+ if index in frame_indices:
93
+ frame = cv2.cvtColor(frame, cv2.COLOR_RGBA2RGB).astype(np.uint8)
94
+ frame = Image.fromarray(frame)
95
+ frames.append(frame)
96
+ return frames
97
+
98
+
99
+ def read_frames_decord(
100
+ video_path, num_frames, sample='rand', fix_start=None,
101
+ client=None, clip=None, min_num_frames=4
102
+ ):
103
+ if 's3://' in video_path:
104
+ video_bytes = client.get(video_path)
105
+ video_reader = VideoReader(io.BytesIO(video_bytes), num_threads=1)
106
+ else:
107
+ video_reader = VideoReader(video_path, num_threads=1)
108
+ vlen = len(video_reader)
109
+ fps = video_reader.get_avg_fps()
110
+ duration = vlen / float(fps)
111
+ if clip:
112
+ start, end = clip
113
+ duration = end - start
114
+ vlen = int(duration * fps)
115
+ start_index = int(start * fps)
116
+
117
+ # t_num_frames = min(max(int(duration * sample_fps), min_num_frames), num_frames)
118
+ t_num_frames = np.random.randint(min_num_frames, num_frames + 1)
119
+
120
+ frame_indices = get_frame_indices(
121
+ t_num_frames, vlen, sample=sample, fix_start=fix_start,
122
+ input_fps=fps
123
+ )
124
+ if clip:
125
+ frame_indices = [f + start_index for f in frame_indices]
126
+ frames = video_reader.get_batch(frame_indices).asnumpy() # (T, H, W, C), np.uint8
127
+ frames = [Image.fromarray(frames[i]) for i in range(frames.shape[0])]
128
+ return frames
129
+
130
+
131
+ def read_frames_folder(
132
+ video_path, num_frames, sample='rand', fix_start=None,
133
+ client=None, clip=None, min_num_frames=4
134
+ ):
135
+ if 's3://' in video_path:
136
+ image_list = client.list(video_path)
137
+ frames = []
138
+ for image in image_list:
139
+ fp = os.path.join(video_path, image)
140
+ frame = Image.open(io.BytesIO(client.get(fp)))
141
+ frames.append(frame)
142
+ else:
143
+ image_list = sorted(list(os.listdir(video_path)))
144
+ frames = []
145
+ for image in image_list:
146
+ fp = os.path.join(video_path, image)
147
+ frame = Image.open(fp).convert('RGB')
148
+ frames.append(frame)
149
+ vlen = len(frames)
150
+
151
+ t_num_frames = np.random.randint(min_num_frames, num_frames + 1)
152
+
153
+ if vlen > t_num_frames:
154
+ frame_indices = get_frame_indices(
155
+ t_num_frames, vlen, sample=sample, fix_start=fix_start
156
+ )
157
+ frames = [frames[i] for i in frame_indices]
158
+ return frames
159
+
160
+
161
+ class WeightedConcatDataset(ConcatDataset):
162
+ def __init__(self, datasets, weights):
163
+ super().__init__(datasets)
164
+ self.weights = torch.DoubleTensor(weights)
165
+ self.total_size = sum(len(d) for d in datasets)
166
+ self.sampler = WeightedRandomSampler(weights=self.weights, num_samples=self.total_size, replacement=True)
167
+
168
+ def __iter__(self):
169
+ return iter(self.sampler)
170
+
171
+ def __len__(self):
172
+ return self.total_size
173
+
174
+
175
+ def pil_loader(img_str):
176
+ buff = io.BytesIO(img_str)
177
+ img = Image.open(buff)
178
+ return img.convert('RGB')
179
+
180
+
181
+ class TCSLoader(object):
182
+
183
+ def __init__(self, conf_path, sc_config_key='sensecore'):
184
+ print(f'[TCSLoader] config_path: {conf_path}')
185
+ print('--> before Client(conf_path)')
186
+ self.client = Client(conf_path)
187
+ self.sc_config_key = sc_config_key
188
+ print('--> after Client(conf_path)')
189
+
190
+ def __call__(self, fn, image_type='image', max_num_frames=-1, min_num_frames=4, sample='rand', clip=None):
191
+ if image_type == 'image':
192
+ img_value_str = self.client.get(fn)
193
+ img = pil_loader(img_value_str)
194
+ return img
195
+
196
+ elif image_type == 'video':
197
+ if fn.endswith('/'):
198
+ frames = read_frames_folder(fn, num_frames=max_num_frames, min_num_frames=min_num_frames,
199
+ client=self.client, sample=sample)
200
+ elif fn.endswith('.gif'):
201
+ frames = read_frames_gif(fn, num_frames=max_num_frames, min_num_frames=min_num_frames,
202
+ client=self.client, sample=sample)
203
+ else:
204
+ frames = read_frames_decord(fn, num_frames=max_num_frames, min_num_frames=min_num_frames,
205
+ client=self.client, sample=sample, clip=clip)
206
+ return frames
207
+
208
+
209
+ def expand2square(pil_img, background_color):
210
+ width, height = pil_img.size
211
+ if width == height:
212
+ return pil_img
213
+ elif width > height:
214
+ result = Image.new(pil_img.mode, (width, width), background_color)
215
+ result.paste(pil_img, (0, (width - height) // 2))
216
+ return result
217
+ else:
218
+ result = Image.new(pil_img.mode, (height, height), background_color)
219
+ result.paste(pil_img, ((height - width) // 2, 0))
220
+ return result
221
+
222
+
223
+ def simulate_jpeg_degradation(quality):
224
+ def jpeg_degrade(img):
225
+ with io.BytesIO() as output:
226
+ img.convert('RGB').save(output, format='JPEG', quality=quality)
227
+ output.seek(0) # Move the reading cursor to the start of the stream
228
+ img_jpeg = Image.open(output).copy() # Use .copy() to make sure the image is loaded in memory
229
+ return img_jpeg
230
+ return jpeg_degrade
231
+
232
+
233
+ # Define the JPEG compression quality range, pre-create all JPEG compression functions
234
+ qualities = list(range(75, 101))
235
+ jpeg_degrade_functions = {quality: simulate_jpeg_degradation(quality) for quality in qualities}
236
+
237
+
238
+ def build_transform(is_train, input_size, pad2square=False, normalize_type='imagenet'):
239
+ if normalize_type == 'imagenet':
240
+ MEAN, STD = IMAGENET_MEAN, IMAGENET_STD
241
+ elif normalize_type == 'clip':
242
+ MEAN, STD = CLIP_MEAN, CLIP_STD
243
+ elif normalize_type == 'siglip':
244
+ MEAN, STD = SIGLIP_MEAN, SIGLIP_STD
245
+ else:
246
+ raise NotImplementedError
247
+ if is_train: # use data augumentation
248
+ transform = T.Compose([
249
+ T.Lambda(lambda img: img.convert('RGB') if img.mode != 'RGB' else img),
250
+ T.RandomChoice([T.Lambda(jpeg_degrade_functions[quality]) for quality in qualities]),
251
+ T.Resize((input_size, input_size), interpolation=InterpolationMode.BICUBIC),
252
+ T.ToTensor(),
253
+ T.Normalize(mean=MEAN, std=STD)
254
+ ])
255
+ else:
256
+ if pad2square is False: # now we use this transform function by default
257
+ transform = T.Compose([
258
+ T.Lambda(lambda img: img.convert('RGB') if img.mode != 'RGB' else img),
259
+ T.Resize((input_size, input_size), interpolation=InterpolationMode.BICUBIC),
260
+ T.ToTensor(),
261
+ T.Normalize(mean=MEAN, std=STD)
262
+ ])
263
+ else:
264
+ transform = T.Compose([
265
+ T.Lambda(lambda img: img.convert('RGB') if img.mode != 'RGB' else img),
266
+ T.Lambda(lambda img: expand2square(img, tuple(int(x * 255) for x in MEAN))),
267
+ T.Resize((input_size, input_size), interpolation=InterpolationMode.BICUBIC),
268
+ T.ToTensor(),
269
+ T.Normalize(mean=MEAN, std=STD)
270
+ ])
271
+
272
+ return transform
273
+
274
+
275
+ def preprocess(
276
+ template_name,
277
+ sources,
278
+ tokenizer: transformers.PreTrainedTokenizer,
279
+ num_image_token_list: list,
280
+ text_only: bool = False,
281
+ group_by_length: bool = False,
282
+ use_packed_ds: bool = False,
283
+ ds_name: str = None,
284
+ num_image: int = 1
285
+ ) -> Dict:
286
+ conv = get_conv_template(template_name)
287
+ roles = {'human': conv.roles[0], 'gpt': conv.roles[1]}
288
+
289
+ # Apply prompt templates
290
+ conversations = []
291
+ for i, source in enumerate(sources):
292
+ if roles[source[0]['from']] != conv.roles[0]:
293
+ # Skip the first one if it is not from human
294
+ source = source[1:]
295
+
296
+ conv.messages = []
297
+ for j, sentence in enumerate(source):
298
+ role = roles[sentence['from']]
299
+ assert role == conv.roles[j % 2], f'{i}'
300
+ conv.append_message(role, sentence['value'])
301
+ conversations.append(conv.get_prompt())
302
+
303
+ if not text_only:
304
+ new_conversations = []
305
+ for conversation in conversations:
306
+ for i in range(num_image):
307
+ image_tokens = f'{IMG_START_TOKEN}{IMG_CONTEXT_TOKEN * num_image_token_list[i]}{IMG_END_TOKEN}'
308
+ conversation = conversation.replace('<image>', image_tokens, 1)
309
+ new_conversations.append(conversation)
310
+ conversations = new_conversations
311
+
312
+ # Tokenize conversations
313
+ input_ids = tokenizer(
314
+ conversations,
315
+ return_tensors='pt',
316
+ padding=False if group_by_length or use_packed_ds else 'max_length',
317
+ max_length=tokenizer.model_max_length,
318
+ truncation=True,
319
+ ).input_ids
320
+ targets = input_ids.clone()
321
+
322
+ # assert conv.sep_style == SeparatorStyle.ADD_COLON_TWO
323
+
324
+ # Mask targets. Only compute loss on the assistant outputs.
325
+ sep = conv.sep + conv.roles[1] + ': '
326
+ for conversation, target in zip(conversations, targets):
327
+ total_len = int(target.ne(tokenizer.pad_token_id).sum())
328
+
329
+ turns = conversation.split(conv.sep2)
330
+ cur_len = 1
331
+ target[:cur_len] = IGNORE_TOKEN_ID
332
+ for i, turn in enumerate(turns):
333
+ if turn == '':
334
+ break
335
+ turn_len = len(tokenizer(turn).input_ids)
336
+
337
+ parts = turn.split(sep)
338
+ if len(parts) != 2:
339
+ break
340
+ parts[0] += sep
341
+ # "-2" is hardcoded for the Llama tokenizer to make the offset correct.
342
+ instruction_len = len(tokenizer(parts[0]).input_ids) - 2
343
+
344
+ if i != 0 and not tokenizer.legacy:
345
+ # The legacy and non-legacy modes handle special tokens differently
346
+ instruction_len -= 1
347
+
348
+ # Ignore the user instructions
349
+ target[cur_len: cur_len + instruction_len] = IGNORE_TOKEN_ID
350
+ cur_len += turn_len
351
+
352
+ if i != 0 and not tokenizer.legacy:
353
+ # The legacy and non-legacy modes handle special tokens differently
354
+ cur_len -= 1
355
+
356
+ target[cur_len:] = IGNORE_TOKEN_ID
357
+
358
+ if False: # Inspect and check the correctness of masking
359
+ z = target.clone()
360
+ z = torch.where(z == IGNORE_TOKEN_ID, tokenizer.unk_token_id, z)
361
+ logger.info(tokenizer.decode(z))
362
+ exit()
363
+
364
+ if cur_len < tokenizer.model_max_length:
365
+ if cur_len != total_len:
366
+ target[:] = IGNORE_TOKEN_ID
367
+ print(
368
+ f'WARNING: tokenization mismatch: {cur_len} vs. {total_len}.'
369
+ f' #turn = {len(turns) - 1}. (ignored). This dataset is {ds_name}.'
370
+ )
371
+ sys.stdout.flush()
372
+
373
+ return dict(
374
+ input_ids=input_ids,
375
+ labels=targets,
376
+ attention_mask=input_ids.ne(tokenizer.pad_token_id),
377
+ )
378
+
379
+
380
+ def preprocess_mpt(
381
+ template_name,
382
+ sources,
383
+ tokenizer: transformers.PreTrainedTokenizer,
384
+ num_image_token_list: list,
385
+ text_only: bool = False,
386
+ group_by_length: bool = False,
387
+ use_packed_ds: bool = False,
388
+ ds_name: str = None,
389
+ num_image: int = 1
390
+ ) -> Dict:
391
+ conv = get_conv_template(template_name)
392
+ roles = {'human': conv.roles[0], 'gpt': conv.roles[1]}
393
+
394
+ # Apply prompt templates
395
+ conversations = []
396
+ for i, source in enumerate(sources):
397
+ if roles[source[0]['from']] != conv.roles[0]:
398
+ # Skip the first one if it is not from human
399
+ source = source[1:]
400
+
401
+ conv.messages = []
402
+ for j, sentence in enumerate(source):
403
+ role = roles[sentence['from']]
404
+ assert role == conv.roles[j % 2], f'{i}'
405
+ conv.append_message(role, sentence['value'])
406
+ conversations.append(conv.get_prompt())
407
+
408
+ if not text_only:
409
+ new_conversations = []
410
+ for conversation in conversations:
411
+ for i in range(num_image):
412
+ image_tokens = f'{IMG_START_TOKEN}{IMG_CONTEXT_TOKEN * num_image_token_list[i]}{IMG_END_TOKEN}'
413
+ conversation = conversation.replace('<image>', image_tokens, 1)
414
+ new_conversations.append(conversation)
415
+ conversations = new_conversations
416
+
417
+ # Tokenize conversations
418
+ input_ids = tokenizer(
419
+ conversations,
420
+ return_tensors='pt',
421
+ padding=False if group_by_length or use_packed_ds else 'max_length',
422
+ max_length=tokenizer.model_max_length,
423
+ truncation=True,
424
+ ).input_ids
425
+ targets = input_ids.clone()
426
+
427
+ # Mask targets. Only compute loss on the assistant outputs.
428
+ sep = conv.sep + conv.roles[1] # <|im_end|><|im_start|>assistant\n
429
+ for conversation, target in zip(conversations, targets):
430
+ total_len = int(target.ne(tokenizer.pad_token_id).sum())
431
+
432
+ turns = conversation.split(conv.sep)
433
+ re_turns = [conv.sep.join(turns[:3])] # system + user + gpt
434
+ for conv_idx in range(3, len(turns), 2):
435
+ re_turns.append(conv.sep.join(turns[conv_idx:conv_idx + 2])) # user + gpt
436
+ cur_len = 0
437
+ target[:cur_len] = IGNORE_TOKEN_ID
438
+ for i, turn in enumerate(re_turns):
439
+ if turn == '':
440
+ break
441
+ turn_len = len(tokenizer(turn).input_ids) + 1
442
+
443
+ parts = turn.split(sep)
444
+ if len(parts) != 2:
445
+ break
446
+ parts[0] += sep
447
+ instruction_len = len(tokenizer(parts[0]).input_ids)
448
+
449
+ # Ignore the user instructions
450
+ target[cur_len: cur_len + instruction_len] = IGNORE_TOKEN_ID
451
+ # print(f'[question {i}]', tokenizer.decode(input_ids[:, cur_len: cur_len + instruction_len][0]))
452
+ # print(f'[answer {i}]', tokenizer.decode(input_ids[:, cur_len + instruction_len: cur_len + turn_len][0]))
453
+ # print(f'[label {i}]', target[cur_len + instruction_len: cur_len + turn_len])
454
+ cur_len += turn_len
455
+
456
+ target[cur_len:] = IGNORE_TOKEN_ID
457
+
458
+ if cur_len < tokenizer.model_max_length:
459
+ if cur_len != total_len:
460
+ target[:] = IGNORE_TOKEN_ID
461
+ print(
462
+ f'WARNING: tokenization mismatch: {cur_len} vs. {total_len}.'
463
+ f' #turn = {len(turns) - 1}. (ignored). This dataset is {ds_name}.'
464
+ )
465
+ sys.stdout.flush()
466
+
467
+ return dict(
468
+ input_ids=input_ids,
469
+ labels=targets,
470
+ attention_mask=input_ids.ne(tokenizer.pad_token_id),
471
+ )
472
+
473
+
474
+ def preprocess_phi3(
475
+ template_name,
476
+ sources,
477
+ tokenizer: transformers.PreTrainedTokenizer,
478
+ num_image_token_list: list,
479
+ text_only: bool = False,
480
+ group_by_length: bool = False,
481
+ use_packed_ds: bool = False,
482
+ ds_name: str = None,
483
+ num_image: int = 1
484
+ ) -> Dict:
485
+ conv = get_conv_template(template_name)
486
+ roles = {'human': conv.roles[0], 'gpt': conv.roles[1]}
487
+
488
+ # Apply prompt templates
489
+ conversations = []
490
+ for i, source in enumerate(sources):
491
+ if roles[source[0]['from']] != conv.roles[0]:
492
+ # Skip the first one if it is not from human
493
+ source = source[1:]
494
+
495
+ conv.messages = []
496
+ for j, sentence in enumerate(source):
497
+ role = roles[sentence['from']]
498
+ assert role == conv.roles[j % 2], f'{i}'
499
+ conv.append_message(role, sentence['value'])
500
+ conversations.append(conv.get_prompt())
501
+
502
+ if not text_only:
503
+ new_conversations = []
504
+ for conversation in conversations:
505
+ for i in range(num_image):
506
+ image_tokens = f'{IMG_START_TOKEN}{IMG_CONTEXT_TOKEN * num_image_token_list[i]}{IMG_END_TOKEN}'
507
+ conversation = conversation.replace('<image>', image_tokens, 1)
508
+ new_conversations.append(conversation)
509
+ conversations = new_conversations
510
+
511
+ # Tokenize conversations
512
+ tokenizer.padding_side = 'right'
513
+ input_ids = tokenizer(
514
+ conversations,
515
+ return_tensors='pt',
516
+ padding=False if group_by_length or use_packed_ds else 'max_length',
517
+ max_length=tokenizer.model_max_length,
518
+ truncation=True,
519
+ ).input_ids
520
+ targets = input_ids.clone()
521
+
522
+ # Mask targets. Only compute loss on the assistant outputs.
523
+ sep = conv.sep + conv.roles[1] # <|end|>\n<|assistant|>
524
+ for conversation, target in zip(conversations, targets):
525
+ total_len = int(target.ne(int(tokenizer.pad_token_id)).sum())
526
+
527
+ turns = conversation.split(conv.sep)
528
+ re_turns = [conv.sep.join(turns[:3])] # system + user + gpt
529
+ for conv_idx in range(3, len(turns), 2):
530
+ re_turns.append(conv.sep.join(turns[conv_idx:conv_idx + 2])) # user + gpt
531
+ cur_len = 1
532
+ target[:cur_len] = IGNORE_TOKEN_ID
533
+ endoftext_id = tokenizer.convert_tokens_to_ids('<|endoftext|>')
534
+ target[target == endoftext_id] = IGNORE_TOKEN_ID
535
+
536
+ for i, turn in enumerate(re_turns):
537
+ if turn == '':
538
+ break
539
+ if i == 0:
540
+ turn_len = len(tokenizer(turn).input_ids)
541
+ else:
542
+ turn_len = len(tokenizer(turn).input_ids) - 1
543
+ parts = turn.split(sep)
544
+ if len(parts) != 2:
545
+ break
546
+ parts[0] += sep
547
+
548
+ if i == 0:
549
+ instruction_len = len(tokenizer(parts[0]).input_ids) - 1
550
+ else:
551
+ instruction_len = len(tokenizer(parts[0]).input_ids) - 2
552
+
553
+ # Ignore the user instructions
554
+ target[cur_len: cur_len + instruction_len] = IGNORE_TOKEN_ID
555
+ # print(f'[question {i}]', tokenizer.decode(input_ids[:, cur_len: cur_len + instruction_len][0]))
556
+ # print(f'[answer {i}]', tokenizer.decode(input_ids[:, cur_len + instruction_len: cur_len + turn_len][0]))
557
+ # print(f'[label {i}]', target[cur_len + instruction_len: cur_len + turn_len])
558
+ cur_len += turn_len
559
+
560
+ target[cur_len:] = IGNORE_TOKEN_ID
561
+
562
+ if False: # Inspect and check the correctness of masking
563
+ z = target.clone()
564
+ z = torch.where(z == IGNORE_TOKEN_ID, tokenizer.unk_token_id, z)
565
+ print(repr(tokenizer.decode(z)))
566
+
567
+ if cur_len < tokenizer.model_max_length:
568
+ if cur_len != total_len:
569
+ target[:] = IGNORE_TOKEN_ID
570
+ print(
571
+ f'WARNING: tokenization mismatch: {cur_len} vs. {total_len}.'
572
+ f' #turn = {len(turns) - 1}. (ignored). This dataset is {ds_name}.'
573
+ )
574
+ sys.stdout.flush()
575
+
576
+ return dict(
577
+ input_ids=input_ids,
578
+ labels=targets,
579
+ attention_mask=input_ids.ne(tokenizer.pad_token_id),
580
+ )
581
+
582
+
583
+ def preprocess_internlm(
584
+ template_name,
585
+ sources,
586
+ tokenizer: transformers.PreTrainedTokenizer,
587
+ num_image_token_list: list,
588
+ text_only: bool = False,
589
+ group_by_length: bool = False,
590
+ use_packed_ds: bool = False,
591
+ ds_name: str = None,
592
+ num_image: int = 1
593
+ ) -> Dict:
594
+ conv = get_conv_template(template_name)
595
+ roles = {'human': conv.roles[0], 'gpt': conv.roles[1]}
596
+
597
+ # Apply prompt templates
598
+ conversations = []
599
+ for i, source in enumerate(sources):
600
+ if roles[source[0]['from']] != conv.roles[0]:
601
+ # Skip the first one if it is not from human
602
+ source = source[1:]
603
+
604
+ conv.messages = []
605
+ for j, sentence in enumerate(source):
606
+ role = roles[sentence['from']]
607
+ assert role == conv.roles[j % 2], f'{i}'
608
+ sentence['value'] = sentence['value'].strip()
609
+ conv.append_message(role, sentence['value'])
610
+ conversations.append(conv.get_prompt())
611
+
612
+ if not text_only:
613
+ new_conversations = []
614
+ for conversation in conversations:
615
+ for i in range(num_image):
616
+ image_tokens = f'{IMG_START_TOKEN}{IMG_CONTEXT_TOKEN * num_image_token_list[i]}{IMG_END_TOKEN}'
617
+ conversation = conversation.replace('<image>', image_tokens, 1)
618
+ new_conversations.append(conversation)
619
+ conversations = new_conversations
620
+
621
+ # Tokenize conversations
622
+ input_ids = tokenizer(
623
+ conversations,
624
+ return_tensors='pt',
625
+ padding=False if group_by_length or use_packed_ds else 'max_length',
626
+ max_length=tokenizer.model_max_length,
627
+ truncation=True,
628
+ ).input_ids
629
+ targets = input_ids.clone()
630
+
631
+ for conversation, target in zip(conversations, targets):
632
+ total_len = int(target.ne(tokenizer.pad_token_id).sum()) # 浦语里面 pad_token_id = eos_token_id
633
+ cur_len = 1
634
+ target[:cur_len] = IGNORE_TOKEN_ID # <s>
635
+ parts = conversation.split(conv.roles[1]) # [UNUSED_TOKEN_146]assistant\n
636
+ info = parts[0] + conv.roles[1]
637
+ temp_len = len(tokenizer(info).input_ids) - 1 # 去除tokenizer的<s>
638
+ target[cur_len: cur_len + temp_len] = IGNORE_TOKEN_ID
639
+ cur_len = cur_len + temp_len
640
+
641
+ for index in range(1, len(parts) - 1):
642
+ info = parts[index]
643
+ part1, part2 = info.split(conv.roles[0])
644
+ temp_len = len(tokenizer(part1).input_ids) - 1
645
+ cur_len = cur_len + temp_len
646
+ part = conv.roles[0] + part2 + conv.roles[1]
647
+ temp_len = len(tokenizer(part).input_ids) - 1
648
+ target[cur_len: cur_len + temp_len] = IGNORE_TOKEN_ID
649
+ cur_len = cur_len + temp_len
650
+ last_info = parts[-1]
651
+ temp_len = len(tokenizer(last_info).input_ids) - 1
652
+ cur_len = cur_len + temp_len
653
+
654
+ target[cur_len:] = IGNORE_TOKEN_ID
655
+ if False: # Inspect and check the correctness of masking
656
+ z = target.clone()
657
+ z = torch.where(z == IGNORE_TOKEN_ID, tokenizer.unk_token_id, z)
658
+ print(repr(tokenizer.decode(z)))
659
+
660
+ if cur_len < tokenizer.model_max_length:
661
+ if cur_len != total_len:
662
+ target[:] = IGNORE_TOKEN_ID
663
+ print(f'WARNING: tokenization mismatch: {cur_len} vs. {total_len}. This dataset is {ds_name}.')
664
+ sys.stdout.flush()
665
+
666
+ return dict(
667
+ input_ids=input_ids,
668
+ labels=targets,
669
+ attention_mask=input_ids.ne(tokenizer.pad_token_id),
670
+ )
671
+
672
+
673
+ def find_closest_aspect_ratio(aspect_ratio, target_ratios, width, height, image_size):
674
+ best_ratio_diff = float('inf')
675
+ best_ratio = (1, 1)
676
+ area = width * height
677
+ for ratio in target_ratios:
678
+ target_aspect_ratio = ratio[0] / ratio[1]
679
+ ratio_diff = abs(aspect_ratio - target_aspect_ratio)
680
+ if ratio_diff < best_ratio_diff:
681
+ best_ratio_diff = ratio_diff
682
+ best_ratio = ratio
683
+ elif ratio_diff == best_ratio_diff:
684
+ if area > 0.5 * image_size * image_size * ratio[0] * ratio[1]:
685
+ best_ratio = ratio
686
+ # print(f'width: {width}, height: {height}, best_ratio: {best_ratio}')
687
+ return best_ratio
688
+
689
+
690
+ def dynamic_preprocess(image, min_num=1, max_num=6, image_size=448, use_thumbnail=False):
691
+ orig_width, orig_height = image.size
692
+ aspect_ratio = orig_width / orig_height
693
+
694
+ # calculate the existing image aspect ratio
695
+ target_ratios = set(
696
+ (i, j) for n in range(min_num, max_num + 1) for i in range(1, n + 1) for j in range(1, n + 1) if
697
+ i * j <= max_num and i * j >= min_num)
698
+ target_ratios = sorted(target_ratios, key=lambda x: x[0] * x[1])
699
+
700
+ # find the closest aspect ratio to the target
701
+ target_aspect_ratio = find_closest_aspect_ratio(
702
+ aspect_ratio, target_ratios, orig_width, orig_height, image_size)
703
+
704
+ # calculate the target width and height
705
+ target_width = image_size * target_aspect_ratio[0]
706
+ target_height = image_size * target_aspect_ratio[1]
707
+ blocks = target_aspect_ratio[0] * target_aspect_ratio[1]
708
+
709
+ # resize the image
710
+ resized_img = image.resize((target_width, target_height))
711
+ processed_images = []
712
+ for i in range(blocks):
713
+ box = (
714
+ (i % (target_width // image_size)) * image_size,
715
+ (i // (target_width // image_size)) * image_size,
716
+ ((i % (target_width // image_size)) + 1) * image_size,
717
+ ((i // (target_width // image_size)) + 1) * image_size
718
+ )
719
+ # split the image
720
+ split_img = resized_img.crop(box)
721
+ processed_images.append(split_img)
722
+ assert len(processed_images) == blocks
723
+ if use_thumbnail and len(processed_images) != 1:
724
+ thumbnail_img = image.resize((image_size, image_size))
725
+ processed_images.append(thumbnail_img)
726
+ return processed_images
isolated/sim_greedy/upstream_sgl/internvl/train/internvl_chat_finetune.py ADDED
@@ -0,0 +1,847 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gc
2
+ import json
3
+ import logging
4
+ import math
5
+ import os
6
+ import random
7
+ import sys
8
+ import traceback
9
+ import warnings
10
+ from copy import deepcopy
11
+ from dataclasses import dataclass, field
12
+ from typing import Dict, Optional
13
+
14
+ import numpy as np
15
+ import torch
16
+ import torch.distributed as dist
17
+ import transformers
18
+ from internvl.dist_utils import init_dist
19
+ from internvl.model.internlm2.modeling_internlm2 import InternLM2ForCausalLM
20
+ from internvl.model.internvl_chat import (InternVisionConfig,
21
+ InternVisionModel,
22
+ InternVLChatConfig,
23
+ InternVLChatModel)
24
+ from internvl.patch import (concat_pad_data_collator,
25
+ replace_llama_rmsnorm_with_fused_rmsnorm,
26
+ replace_train_sampler)
27
+ from internvl.train.constants import (BOX_END_TOKEN, BOX_START_TOKEN,
28
+ IMG_CONTEXT_TOKEN, IMG_END_TOKEN,
29
+ IMG_START_TOKEN, QUAD_END_TOKEN,
30
+ QUAD_START_TOKEN, REF_END_TOKEN,
31
+ REF_START_TOKEN)
32
+ from internvl.train.dataset import (ConcatDataset, TCSLoader,
33
+ WeightedConcatDataset, build_transform,
34
+ dynamic_preprocess, preprocess,
35
+ preprocess_internlm, preprocess_mpt,
36
+ preprocess_phi3)
37
+ from internvl.train.trainer_monkey_patch import replace_create_optimizer
38
+ from PIL import Image, ImageFile, PngImagePlugin, UnidentifiedImageError
39
+ from torch.utils.data import Dataset
40
+ from transformers import (AutoConfig, AutoModelForCausalLM, AutoTokenizer,
41
+ HfArgumentParser, Trainer, TrainingArguments,
42
+ set_seed)
43
+ from transformers.trainer_utils import get_last_checkpoint
44
+ from transformers.utils.logging import (enable_default_handler,
45
+ enable_explicit_format, set_verbosity)
46
+
47
+ # Apply necessary patches for the transformers library
48
+ replace_llama_rmsnorm_with_fused_rmsnorm()
49
+ replace_train_sampler()
50
+
51
+ # Try to import petrel_client for image loading, fallback to PIL if unavailable
52
+ try:
53
+ from petrel_client.client import Client
54
+ from petrel_client.common.config import Config
55
+ has_tcs_loader = True
56
+ except ImportError as E:
57
+ print('petrel_client is not installed. Using PIL to load images.')
58
+ has_tcs_loader = False
59
+
60
+ # Set constants for image processing and logging
61
+ IGNORE_INDEX = -100
62
+ Image.MAX_IMAGE_PIXELS = None
63
+ ImageFile.LOAD_TRUNCATED_IMAGES = True
64
+ MaximumDecompressedSize = 1024
65
+ MegaByte = 2 ** 20
66
+ PngImagePlugin.MAX_TEXT_CHUNK = MaximumDecompressedSize * MegaByte
67
+
68
+ warnings.filterwarnings('ignore')
69
+ logger = logging.getLogger(__name__)
70
+
71
+ os.environ['TOKENIZERS_PARALLELISM'] = 'true'
72
+
73
+
74
+ @dataclass
75
+ class ModelArguments:
76
+ """
77
+ Arguments for specifying model, tokenizer, and configurations.
78
+ """
79
+ model_name_or_path: Optional[str] = field(
80
+ default=None,
81
+ metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'}
82
+ )
83
+ vision_path: Optional[str] = field(
84
+ default=None,
85
+ metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'}
86
+ )
87
+ llm_path: Optional[str] = field(
88
+ default=None,
89
+ metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'}
90
+ )
91
+ mlp_path: Optional[str] = field(
92
+ default=None,
93
+ metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'}
94
+ )
95
+ freeze_llm: bool = field(
96
+ default=False,
97
+ metadata={'help': 'Set to True to freeze the LLM decoder.'},
98
+ )
99
+ freeze_backbone: bool = field(
100
+ default=False,
101
+ metadata={'help': 'Set to True to freeze the vision backbone of the model.'},
102
+ )
103
+ freeze_mlp: bool = field(
104
+ default=False,
105
+ metadata={'help': 'Set to True to freeze the MLP layers of the model.'},
106
+ )
107
+ unfreeze_vit_layers: int = field(
108
+ default=0,
109
+ metadata={'help': 'Specify the number of ViT layers to unfreeze. Default is 0.'},
110
+ )
111
+ vision_select_layer: int = field(
112
+ default=-1,
113
+ metadata={'help': 'Specify the layer of ViT feature map to use. Default is last layer.'},
114
+ )
115
+ use_backbone_lora: int = field(
116
+ default=0,
117
+ metadata={'help': 'Set the LoRA adapter rank for the backbone model. Default is 0.'}
118
+ )
119
+ use_llm_lora: int = field(
120
+ default=0,
121
+ metadata={'help': 'Set the LoRA adapter rank for the LLM. Default is 0.'}
122
+ )
123
+ unfreeze_lm_head: bool = field(
124
+ default=False,
125
+ metadata={'help': "Set to True to unfreeze the language model's head."},
126
+ )
127
+ use_custom_trainer: bool = field(
128
+ default=False,
129
+ metadata={'help': 'Set to True to enable the use of a custom trainer.'},
130
+ )
131
+ grad_checkpoint: Optional[bool] = field(
132
+ default=False,
133
+ metadata={'help': 'Set to True to use gradient checkpointing.'},
134
+ )
135
+ drop_path_rate: float = field(
136
+ default=0.0,
137
+ metadata={'help': 'Set the drop path rate for the ViT model. Default is 0.'},
138
+ )
139
+ ps_version: str = field(
140
+ default='v2',
141
+ metadata={'help': 'Specify the version of pixel shuffle implementation. Default is `v1`.'
142
+ 'Please use `v2` to fix the bug of transposed image.'}
143
+ )
144
+
145
+
146
+ @dataclass
147
+ class DataTrainingArguments:
148
+ """
149
+ Arguments for specifying data input for training and evaluation.
150
+ """
151
+ max_seq_length: Optional[int] = field(
152
+ default=2048,
153
+ metadata={
154
+ 'help': (
155
+ 'The maximum total input sequence length after tokenization. Sequences longer '
156
+ 'than this will be truncated, sequences shorter will be padded.'
157
+ )
158
+ },
159
+ )
160
+ force_image_size: Optional[int] = field(
161
+ default=448,
162
+ metadata={'help': 'Set the desired size for the image. Default is 224.'},
163
+ )
164
+ down_sample_ratio: Optional[float] = field(
165
+ default=0.5,
166
+ metadata={'help': 'Set the desired down-sampling ratio for the image. Default is 1.0.'},
167
+ )
168
+ pad2square: Optional[bool] = field(
169
+ default=False,
170
+ metadata={'help': 'Pad the image to a square shape if set to True.'},
171
+ )
172
+ conv_style: Optional[str] = field(
173
+ default='internlm2-chat', metadata={'help': 'Prompt style for a conversation.'}
174
+ )
175
+ meta_path: Optional[str] = field(
176
+ default=None,
177
+ metadata={'help': 'The path of the meta file of datasets.'},
178
+ )
179
+ use_data_resampling: Optional[bool] = field(
180
+ default=False,
181
+ metadata={'help': 'Set to True to use data resampling.'},
182
+ )
183
+ dynamic_image_size: Optional[bool] = field(
184
+ default=False,
185
+ metadata={'help': 'Set to True to use dynamic image size.'},
186
+ )
187
+ use_thumbnail: Optional[bool] = field(
188
+ default=False,
189
+ metadata={'help': 'Set to True to add a thumbnail image.'},
190
+ )
191
+ min_dynamic_patch: Optional[int] = field(
192
+ default=1,
193
+ metadata={'help': 'The minimum number of dynamic patches. Default is 1.'},
194
+ )
195
+ max_dynamic_patch: Optional[int] = field(
196
+ default=12,
197
+ metadata={'help': 'The maximum number of dynamic patches. Default is 6.'},
198
+ )
199
+ normalize_type: Optional[str] = field(
200
+ default='imagenet',
201
+ metadata={'help': 'The normalize type for the image. Default is imagenet.'},
202
+ )
203
+
204
+
205
+ class LazySupervisedDataset(Dataset):
206
+ """Dataset for supervised fine-tuning."""
207
+
208
+ def __init__(
209
+ self,
210
+ template_name,
211
+ meta,
212
+ tokenizer,
213
+ tcs_loader,
214
+ ds_name,
215
+ num_image_token,
216
+ image_size=224,
217
+ is_train=True,
218
+ pad2square=False,
219
+ group_by_length=False,
220
+ dynamic_image_size=False,
221
+ use_thumbnail=False,
222
+ min_dynamic_patch=1,
223
+ max_dynamic_patch=6,
224
+ min_num_frame=4, # for video data
225
+ max_num_frame=12, # for video data
226
+ sampling_method='rand', # for video data
227
+ repeat_time=1,
228
+ normalize_type='imagenet',
229
+ random_seed=0,
230
+ ):
231
+ super(LazySupervisedDataset, self).__init__()
232
+ self.ds_name = ds_name
233
+ self.tokenizer = tokenizer
234
+ self.template_name = template_name
235
+ self.num_image_token = num_image_token
236
+ logger.info(f'[Dataset] num_image_token: {num_image_token}')
237
+ logger.info(f'[Dataset] dynamic_image_size: {dynamic_image_size}')
238
+ logger.info(f'[Dataset] use_thumbnail: {use_thumbnail}')
239
+ logger.info(f'[Dataset] min_dynamic_patch: {min_dynamic_patch}, max_dynamic_patch: {max_dynamic_patch}')
240
+
241
+ self.image_size = image_size
242
+ self.is_train = is_train
243
+ self.pad2square = pad2square
244
+ self.max_num_frame = max_num_frame
245
+ self.min_num_frame = min_num_frame
246
+ self.sampling_method = sampling_method
247
+
248
+ logger.info('Formatting inputs...Skip in lazy mode')
249
+ assert meta['annotation'].endswith('jsonl'), f'annotation must be jsonl, but got {meta["annotation"]}'
250
+
251
+ with open(meta['annotation'], 'r') as f:
252
+ self.raw_data = f.readlines()
253
+ if repeat_time < 1:
254
+ # If repeat_time is less than 1, select a portion of the data
255
+ self.raw_data = self.raw_data[:int(len(self.raw_data) * repeat_time)]
256
+ if repeat_time > 1:
257
+ assert isinstance(repeat_time, int)
258
+ # Repeat the list if repeat_time is greater than 1
259
+ self.raw_data = self.raw_data * repeat_time
260
+
261
+ self.rng = np.random.default_rng(seed=random_seed)
262
+ self.rng.shuffle(self.raw_data)
263
+
264
+ gc.collect()
265
+ self.root = meta['root']
266
+ self.cached_data_dict = {}
267
+ self.tcs_loader = tcs_loader
268
+ self.group_by_length = group_by_length
269
+ self.dynamic_image_size = dynamic_image_size
270
+ self.use_thumbnail = use_thumbnail
271
+ self.min_dynamic_patch = min_dynamic_patch
272
+ self.max_dynamic_patch = max_dynamic_patch
273
+ self.normalize_type = normalize_type
274
+
275
+ # If the precomputed length does not exist, roughly estimate the length of
276
+ # each sample to improve the efficiency of group_by_length.
277
+ if self.group_by_length:
278
+ self.conv2length = {} # Using a dictionary to speed up token length calculation
279
+ self.length = []
280
+ for data_item in self.raw_data:
281
+ data_item = json.loads(data_item)
282
+ if 'length' in data_item:
283
+ token_length = data_item['length'] # Use precomputed length if available
284
+ else:
285
+ # Compute token length using the tokenizer
286
+ conversations = '\n'.join([temp['value'] for temp in data_item['conversations']])
287
+ str_length = len(conversations)
288
+ if str_length not in self.conv2length:
289
+ token_length = tokenizer(
290
+ conversations, return_tensors='pt', padding=False, truncation=False,
291
+ ).input_ids.size(1)
292
+ self.conv2length[str_length] = token_length + num_image_token * (
293
+ max_dynamic_patch + use_thumbnail)
294
+ else:
295
+ token_length = self.conv2length[str_length]
296
+ self.length.append(token_length)
297
+ gc.collect()
298
+
299
+ def __len__(self):
300
+ return len(self.raw_data)
301
+
302
+ def get_preprocess_function(self):
303
+ # Select the appropriate preprocessing function based on the template name
304
+ if self.template_name == 'Hermes-2':
305
+ preprocess_function = preprocess_mpt
306
+ elif self.template_name == 'internlm2-chat':
307
+ preprocess_function = preprocess_internlm
308
+ elif self.template_name == 'phi3-chat':
309
+ preprocess_function = preprocess_phi3
310
+ else:
311
+ preprocess_function = preprocess
312
+ return preprocess_function
313
+
314
+ def load_image(self, image_path):
315
+ # Load the image using tcs_loader if available, otherwise use PIL
316
+ if self.tcs_loader is not None and 's3://' in image_path:
317
+ return self.tcs_loader(image_path)
318
+ return Image.open(image_path).convert('RGB')
319
+
320
+ def get_image_path(self, image_path):
321
+ if image_path.startswith('s3://'): # for ceph
322
+ image_path = self.root + image_path
323
+ else: # for local image
324
+ image_path = os.path.join(self.root, image_path)
325
+ return image_path
326
+
327
+ def get_transform(self):
328
+ # Build transformation function
329
+ transform = build_transform(is_train=self.is_train, input_size=self.image_size,
330
+ pad2square=self.pad2square, normalize_type=self.normalize_type)
331
+ return transform
332
+
333
+ def multi_modal_get_item(self, data_item):
334
+ # Build transformation function
335
+ transform = self.get_transform()
336
+
337
+ # Ensure the first conversation contains an image placeholder
338
+ if '<image>' not in data_item['conversations'][0]['value']:
339
+ data_item['conversations'][0]['value'] = '<image>\n' + data_item['conversations'][0]['value']
340
+
341
+ # Merge the image path
342
+ image_path = self.get_image_path(data_item['image'])
343
+
344
+ # Load the image using tcs_loader if available, otherwise use PIL
345
+ image = self.load_image(image_path)
346
+
347
+ if self.dynamic_image_size: # If dynamic image size is enabled, preprocess the image dynamically
348
+ images = dynamic_preprocess(image, min_num=self.min_dynamic_patch, max_num=self.max_dynamic_patch,
349
+ image_size=self.image_size, use_thumbnail=self.use_thumbnail)
350
+ else: # Otherwise, use the original image as a single patch
351
+ images = [image]
352
+
353
+ # Apply the transformation to each image and stack the results into a tensor
354
+ pixel_values = [transform(image) for image in images]
355
+ pixel_values = torch.stack(pixel_values)
356
+
357
+ # Ensure that there is only one patch if dynamic image size is not enabled
358
+ num_patches = pixel_values.size(0)
359
+ if not self.dynamic_image_size:
360
+ assert num_patches == 1, f'The number of patches should be 1, but got {num_patches}.'
361
+
362
+ # Select the appropriate preprocessing function based on the template name
363
+ preprocess_function = self.get_preprocess_function()
364
+
365
+ # Preprocess the conversations and generate the return dictionary
366
+ ret = preprocess_function(self.template_name, [deepcopy(data_item['conversations'])],
367
+ self.tokenizer, [self.num_image_token * num_patches],
368
+ group_by_length=self.group_by_length, ds_name=self.ds_name)
369
+
370
+ # Create the final return dictionary
371
+ ret = dict(
372
+ input_ids=ret['input_ids'][0],
373
+ labels=ret['labels'][0],
374
+ attention_mask=ret['attention_mask'][0],
375
+ pixel_values=pixel_values,
376
+ image_flags=torch.tensor([1] * num_patches, dtype=torch.long)
377
+ )
378
+ return ret
379
+
380
+ def multi_modal_multi_image_get_item(self, data_item):
381
+ # Build transformation function
382
+ transform = self.get_transform()
383
+
384
+ images, num_tiles = [], []
385
+ num_image = len(data_item['image'])
386
+ for image_path in data_item['image']:
387
+ # Merge the image path
388
+ image_path = self.get_image_path(image_path)
389
+ # Load the image using tcs_loader if available, otherwise use PIL
390
+ image = self.load_image(image_path)
391
+ if self.dynamic_image_size: # If dynamic image size is enabled, preprocess the image dynamically
392
+ image = dynamic_preprocess(image, min_num=self.min_dynamic_patch,
393
+ max_num=self.max_dynamic_patch // num_image,
394
+ image_size=self.image_size, use_thumbnail=self.use_thumbnail)
395
+ images += image
396
+ num_tiles.append(len(image))
397
+ else: # Otherwise, use the original image as a single patch
398
+ images.append(image)
399
+ num_tiles.append(1)
400
+ pixel_values = [transform(image) for image in images]
401
+ pixel_values = torch.stack(pixel_values)
402
+ num_patches = pixel_values.size(0)
403
+
404
+ # Select the appropriate preprocessing function based on the template name
405
+ preprocess_function = self.get_preprocess_function()
406
+
407
+ # Preprocess the conversations and generate the return dictionary
408
+ num_image_tokens = [self.num_image_token * num_tile for num_tile in num_tiles]
409
+ ret = preprocess_function(self.template_name, [deepcopy(data_item['conversations'])],
410
+ self.tokenizer, num_image_tokens, group_by_length=self.group_by_length,
411
+ ds_name=self.ds_name, num_image=num_image)
412
+
413
+ # Create the final return dictionary
414
+ ret = dict(
415
+ input_ids=ret['input_ids'][0],
416
+ labels=ret['labels'][0],
417
+ attention_mask=ret['attention_mask'][0],
418
+ pixel_values=pixel_values,
419
+ image_flags=torch.tensor([1] * num_patches, dtype=torch.long)
420
+ )
421
+ return ret
422
+
423
+ def video_get_item(self, data_item):
424
+ # Build transformation function
425
+ transform = self.get_transform()
426
+
427
+ # Ensure the first conversation contains a video placeholder
428
+ if '<video>' not in data_item['conversations'][0]['value']:
429
+ data_item['conversations'][0]['value'] = '<video>\n' + data_item['conversations'][0]['value']
430
+
431
+ # Get the video file path
432
+ video_file = data_item['video']
433
+ video_path = os.path.join(self.root, video_file)
434
+
435
+ # Load the video frames using tcs_loader
436
+ # TODO: Load videos without using tcsloader.
437
+ image_list = self.tcs_loader(
438
+ video_path,
439
+ image_type='video',
440
+ max_num_frames=self.max_num_frame,
441
+ min_num_frames=self.min_num_frame,
442
+ sample=self.sampling_method,
443
+ clip=data_item.get('clip', None))
444
+
445
+ # Generate special tokens for each video frame
446
+ special_tokens = '\n'.join(['Frame{}: <image>'.format(i + 1) for i in range(len(image_list))])
447
+ data_item['conversations'][0]['value'] = data_item['conversations'][0]['value'].replace(
448
+ '<video>\n', special_tokens)
449
+
450
+ # Transform each frame image and stack them into a tensor
451
+ pixel_values = [transform(image) for image in image_list]
452
+ pixel_values = torch.stack(pixel_values)
453
+ num_patches = pixel_values.size(0)
454
+
455
+ # Select the appropriate preprocessing function based on the template name
456
+ preprocess_function = self.get_preprocess_function()
457
+
458
+ # Preprocess the conversations and generate the return dictionary
459
+ num_image_tokens = [self.num_image_token] * num_patches
460
+ ret = preprocess_function(self.template_name, [deepcopy(data_item['conversations'])],
461
+ self.tokenizer, num_image_tokens, group_by_length=self.group_by_length,
462
+ ds_name=self.ds_name, num_image=num_patches)
463
+
464
+ # Create the final return dictionary
465
+ ret = dict(
466
+ input_ids=ret['input_ids'][0],
467
+ labels=ret['labels'][0],
468
+ attention_mask=ret['attention_mask'][0],
469
+ pixel_values=pixel_values,
470
+ image_flags=torch.tensor([1] * num_patches, dtype=torch.long)
471
+ )
472
+ return ret
473
+
474
+ def pure_text_get_item(self, data_item):
475
+ # Build transformation function
476
+ transform = self.get_transform()
477
+
478
+ # Create a blank white image
479
+ image = Image.new('RGB', (224, 224), (255, 255, 255))
480
+
481
+ # Dynamically preprocess the image to generate patches
482
+ images = dynamic_preprocess(image, min_num=self.min_dynamic_patch, max_num=1,
483
+ image_size=self.image_size, use_thumbnail=self.use_thumbnail)
484
+
485
+ # Apply the transformation to each image patch and stack them into a tensor
486
+ pixel_values = [transform(image) for image in images]
487
+ pixel_values = torch.stack(pixel_values)
488
+ num_patches = pixel_values.size(0)
489
+
490
+ # Ensure there is only one patch
491
+ assert num_patches == 1, f'The number of patches should be 1, but got {num_patches}.'
492
+
493
+ # Select the appropriate preprocessing function based on the template name
494
+ preprocess_function = self.get_preprocess_function()
495
+
496
+ # Preprocess the conversations and generate the return dictionary
497
+ ret = preprocess_function(self.template_name, [deepcopy(data_item['conversations'])],
498
+ self.tokenizer, [self.num_image_token * num_patches], text_only=True,
499
+ group_by_length=self.group_by_length, ds_name=self.ds_name)
500
+
501
+ # Create the final return dictionary
502
+ ret = dict(
503
+ input_ids=ret['input_ids'][0],
504
+ labels=ret['labels'][0],
505
+ attention_mask=ret['attention_mask'][0],
506
+ pixel_values=pixel_values,
507
+ image_flags=torch.tensor([0] * num_patches, dtype=torch.long)
508
+ )
509
+ return ret
510
+
511
+ def __getitem__(self, i) -> Dict[str, torch.Tensor]:
512
+ i = i % len(self.raw_data)
513
+ while True:
514
+ try:
515
+ data_item = json.loads(self.raw_data[i])
516
+ if 'image' in data_item and len(data_item['image']) != 0:
517
+ if type(data_item['image']) == list:
518
+ ret = self.multi_modal_multi_image_get_item(data_item)
519
+ else:
520
+ ret = self.multi_modal_get_item(data_item)
521
+ elif 'video' in data_item and data_item['video'] is not None and data_item['video'] != '':
522
+ ret = self.video_get_item(data_item)
523
+ else:
524
+ ret = self.pure_text_get_item(data_item)
525
+ break
526
+ except Exception as e:
527
+ print(e, self.ds_name, flush=True)
528
+ if not isinstance(e, UnidentifiedImageError):
529
+ traceback.print_exc()
530
+ data_item = json.loads(self.raw_data[i])
531
+ if 'image' in data_item:
532
+ if type(data_item['image']) == list:
533
+ images = [self.root + item for item in data_item['image']]
534
+ print(f'Failed to load image: {images}, the dataset is: {self.ds_name}')
535
+ else:
536
+ if data_item['image'].startswith('s3://'):
537
+ data_path = self.root + data_item['image']
538
+ else:
539
+ data_path = os.path.join(self.root, data_item['image'])
540
+ print(f'Failed to load image: {data_path}, the dataset is: {self.ds_name}')
541
+ elif 'video' in data_item:
542
+ data_path = os.path.join(self.root, data_item['video'])
543
+ print(f'Failed to load video: {data_path}, the dataset is: {self.ds_name}')
544
+ i = random.randint(0, len(self.raw_data) - 1)
545
+ return ret
546
+
547
+
548
+ def build_datasets(
549
+ data_args,
550
+ tokenizer,
551
+ tcs_loader,
552
+ model,
553
+ group_by_length=False,
554
+ dynamic_image_size=False,
555
+ use_thumbnail=False,
556
+ min_dynamic_patch=1,
557
+ max_dynamic_patch=12,
558
+ normalize_type='imagenet',
559
+ ):
560
+ datasets = []
561
+ lengths = []
562
+ ds_collections = json.loads(open(data_args.meta_path).read())
563
+ for ds_idx, ds_name in enumerate(ds_collections.keys()):
564
+ repeat_time = ds_collections[ds_name]['repeat_time']
565
+ if 'max_dynamic_patch' in ds_collections[ds_name]:
566
+ max_num = ds_collections[ds_name]['max_dynamic_patch']
567
+ logger.info(f'max_dynamic_patch is set to {max_num} according to the meta file')
568
+ else:
569
+ max_num = max_dynamic_patch
570
+ dataset = LazySupervisedDataset(
571
+ data_args.conv_style, ds_collections[ds_name],
572
+ tokenizer,
573
+ tcs_loader,
574
+ ds_name=ds_name,
575
+ num_image_token=model.num_image_token,
576
+ image_size=data_args.force_image_size,
577
+ is_train=ds_collections[ds_name]['data_augment'],
578
+ pad2square=data_args.pad2square,
579
+ group_by_length=group_by_length,
580
+ dynamic_image_size=dynamic_image_size,
581
+ use_thumbnail=use_thumbnail,
582
+ min_dynamic_patch=min_dynamic_patch,
583
+ max_dynamic_patch=max_num,
584
+ repeat_time=repeat_time,
585
+ normalize_type=normalize_type,
586
+ random_seed=ds_idx,
587
+ )
588
+ logger.info(f'Add dataset: {ds_name} with length: {len(dataset)}')
589
+ datasets.append(dataset)
590
+ if data_args.use_data_resampling:
591
+ lengths.append(math.sqrt(len(dataset)))
592
+ else:
593
+ lengths.append(len(dataset))
594
+ if data_args.use_data_resampling:
595
+ total_length = sum(lengths)
596
+ weights = [l / total_length for l in lengths]
597
+ train_dataset = WeightedConcatDataset(datasets, weights)
598
+ else:
599
+ train_dataset = ConcatDataset(datasets)
600
+ return train_dataset
601
+
602
+
603
+ def main():
604
+ # Parse input arguments
605
+ # See all possible arguments in src/transformers/training_args.py
606
+ # If use DeepSpeed zero3, init_dist must before HfArgumentParser
607
+ launcher = os.environ.get('LAUNCHER', 'slurm')
608
+ init_dist(launcher=launcher, backend='nccl')
609
+ parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments))
610
+ if len(sys.argv) == 2 and sys.argv[1].endswith('.json'):
611
+ # If we pass only one argument to the script, and it's the path to a json file,
612
+ # let's parse it to get our arguments.
613
+ model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
614
+ else:
615
+ model_args, data_args, training_args = parser.parse_args_into_dataclasses()
616
+
617
+ # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
618
+ # information sent is the one passed as arguments along with your Python/PyTorch versions.
619
+ # send_example_telemetry('InternV-Chat', model_args, data_args)
620
+
621
+ # Setup logging
622
+ logging.basicConfig(
623
+ format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',
624
+ datefmt='%m/%d/%Y %H:%M:%S',
625
+ handlers=[logging.StreamHandler(sys.stdout)],
626
+ )
627
+
628
+ if training_args.should_log:
629
+ # The default of training_args.log_level is passive, so we set log level at info here to have that default.
630
+ transformers.utils.logging.set_verbosity_info()
631
+
632
+ log_level = training_args.get_process_log_level()
633
+ logger.setLevel(log_level)
634
+ set_verbosity(log_level)
635
+ enable_default_handler()
636
+ enable_explicit_format()
637
+
638
+ # Log on each process the small summary:
639
+ logger.warning(
640
+ f'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'
641
+ + f'distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fp16}'
642
+ )
643
+ logger.info(f'Training/evaluation parameters {training_args}')
644
+
645
+ # Detecting last checkpoint and eventually continue from last checkpoint.
646
+ last_checkpoint = None
647
+ if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir:
648
+ last_checkpoint = get_last_checkpoint(training_args.output_dir)
649
+ if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0:
650
+ raise ValueError(
651
+ f'Output directory ({training_args.output_dir}) already exists and is not empty. '
652
+ 'Use --overwrite_output_dir to overcome.'
653
+ )
654
+ elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
655
+ logger.info(
656
+ f'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '
657
+ 'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.'
658
+ )
659
+ # Set seed before initializing model.
660
+ set_seed(training_args.seed)
661
+
662
+ # Load pretrained model, tokenizer, and image processor
663
+ tokenizer_path = model_args.model_name_or_path or model_args.llm_path
664
+ logger.info(f'Loading Tokenizer: {tokenizer_path}')
665
+ tokenizer = AutoTokenizer.from_pretrained(
666
+ tokenizer_path, add_eos_token=False, trust_remote_code=True, use_fast=False)
667
+ tokenizer.tokenizer_path = tokenizer_path
668
+ tokenizer.model_max_length = data_args.max_seq_length
669
+ token_list = [IMG_START_TOKEN, IMG_END_TOKEN, IMG_CONTEXT_TOKEN,
670
+ QUAD_START_TOKEN, QUAD_END_TOKEN, REF_START_TOKEN,
671
+ REF_END_TOKEN, BOX_START_TOKEN, BOX_END_TOKEN]
672
+ num_new_tokens = tokenizer.add_tokens(token_list, special_tokens=True)
673
+ img_context_token_id = tokenizer.convert_tokens_to_ids(IMG_CONTEXT_TOKEN)
674
+ tcs_loader = TCSLoader('~/petreloss.conf') if has_tcs_loader else None
675
+
676
+ if model_args.model_name_or_path is not None:
677
+ logger.info('Loading InternVLChatModel...')
678
+ config = InternVLChatConfig.from_pretrained(model_args.model_name_or_path)
679
+ config.vision_config.drop_path_rate = model_args.drop_path_rate
680
+ if config.llm_config.model_type == 'internlm2':
681
+ config.llm_config.attn_implementation = 'flash_attention_2' # for InternLM
682
+ logger.info('Using flash_attention_2 for InternLM')
683
+ else:
684
+ config.llm_config._attn_implementation = 'flash_attention_2' # for LLaMA
685
+ logger.info('Using flash_attention_2 for LLaMA')
686
+ config.template = data_args.conv_style
687
+ config.select_layer = model_args.vision_select_layer
688
+ config.dynamic_image_size = data_args.dynamic_image_size
689
+ config.use_thumbnail = data_args.use_thumbnail
690
+ config.ps_version = model_args.ps_version
691
+ config.min_dynamic_patch = data_args.min_dynamic_patch
692
+ config.max_dynamic_patch = data_args.max_dynamic_patch
693
+ model = InternVLChatModel.from_pretrained(
694
+ model_args.model_name_or_path, torch_dtype=torch.bfloat16, config=config)
695
+ else:
696
+ logger.info('Loading ViT-6B...')
697
+ vision_config = InternVisionConfig.from_pretrained(model_args.vision_path)
698
+ vision_config.drop_path_rate = model_args.drop_path_rate
699
+ vision_model = InternVisionModel.from_pretrained(
700
+ model_args.vision_path, torch_dtype=torch.bfloat16, config=vision_config)
701
+ logger.info('Loading LLaMA...')
702
+ llm_config = AutoConfig.from_pretrained(model_args.llm_path, trust_remote_code=True)
703
+ if llm_config.model_type == 'internlm2':
704
+ model_type = InternLM2ForCausalLM
705
+ llm_config.attn_implementation = 'flash_attention_2' # for InternLM
706
+ logger.info('Using flash_attention_2 for InternLM')
707
+ else:
708
+ model_type = AutoModelForCausalLM
709
+ llm_config._attn_implementation = 'flash_attention_2' # for LLaMA
710
+ logger.info('Using flash_attention_2 for LLaMA')
711
+ llm = model_type.from_pretrained(
712
+ model_args.llm_path, torch_dtype=torch.bfloat16,
713
+ config=llm_config, trust_remote_code=True)
714
+ logger.info('Building InternVLChatConfig...')
715
+ internvl_chat_config = InternVLChatConfig(
716
+ vision_config.to_dict(), llm_config.to_dict(), downsample_ratio=data_args.down_sample_ratio,
717
+ pad2square=data_args.pad2square, template=data_args.conv_style,
718
+ select_layer=model_args.vision_select_layer, dynamic_image_size=data_args.dynamic_image_size,
719
+ use_thumbnail=data_args.use_thumbnail, ps_version=model_args.ps_version,
720
+ min_dynamic_patch=data_args.min_dynamic_patch, max_dynamic_patch=data_args.max_dynamic_patch)
721
+ internvl_chat_config.force_image_size = data_args.force_image_size
722
+ logger.info('Building InternVLChatModel...')
723
+ model = InternVLChatModel(internvl_chat_config, vision_model, llm)
724
+ model.img_context_token_id = img_context_token_id
725
+
726
+ assert model.config.downsample_ratio == data_args.down_sample_ratio
727
+
728
+ if model_args.mlp_path is not None:
729
+ logger.info('Loading pretrained MLP projector...')
730
+ state_dict = torch.load(model_args.mlp_path, map_location='cpu')
731
+ message = model.mlp1.load_state_dict(state_dict)
732
+ logger.info(message)
733
+ logger.info('Finished')
734
+
735
+ patch_size = model.config.vision_config.patch_size
736
+ logger.info(f'model.config.force_image_size: {model.config.force_image_size}')
737
+ logger.info(f'data_args.force_image_size: {data_args.force_image_size}')
738
+ logger.info(f'model.config.vision_config.image_size: {model.config.vision_config.image_size}')
739
+ if model.config.vision_config.image_size != data_args.force_image_size:
740
+ logger.info(f'Resizing position embedding from '
741
+ f'{model.config.vision_config.image_size} '
742
+ f'to {data_args.force_image_size}...')
743
+ model.vision_model.resize_pos_embeddings(old_size=model.config.vision_config.image_size,
744
+ new_size=data_args.force_image_size,
745
+ patch_size=patch_size)
746
+ model.config.vision_config.image_size = data_args.force_image_size
747
+ model.config.force_image_size = data_args.force_image_size
748
+ model.num_image_token = int((data_args.force_image_size // patch_size) ** 2 * (data_args.down_sample_ratio ** 2))
749
+
750
+ if num_new_tokens > 0:
751
+ model.language_model.resize_token_embeddings(len(tokenizer))
752
+ output_embeddings = model.language_model.get_output_embeddings().weight.data
753
+ output_embeddings_avg = output_embeddings[:-num_new_tokens].mean(dim=0, keepdim=True)
754
+ output_embeddings[-num_new_tokens:] = output_embeddings_avg
755
+
756
+ model.config.llm_config.vocab_size = len(tokenizer)
757
+ model.language_model.config.vocab_size = len(tokenizer)
758
+
759
+ model.language_model.config.use_cache = False
760
+ model.vision_model.gradient_checkpointing = True
761
+ model.vision_model.encoder.gradient_checkpointing = True
762
+ if model_args.grad_checkpoint:
763
+ model.language_model._set_gradient_checkpointing()
764
+
765
+ train_dataset = build_datasets(
766
+ data_args, tokenizer, tcs_loader, model, group_by_length=training_args.group_by_length,
767
+ dynamic_image_size=data_args.dynamic_image_size, use_thumbnail=data_args.use_thumbnail,
768
+ min_dynamic_patch=data_args.min_dynamic_patch, max_dynamic_patch=data_args.max_dynamic_patch,
769
+ normalize_type=data_args.normalize_type)
770
+
771
+ def _freeze_params(module):
772
+ for param in module.parameters():
773
+ param.requires_grad = False
774
+
775
+ if model_args.freeze_backbone:
776
+ # model.vision_model = model.vision_model.eval()
777
+ _freeze_params(model.vision_model)
778
+
779
+ if model_args.freeze_llm:
780
+ model.language_model = model.language_model.eval()
781
+ _freeze_params(model.language_model)
782
+
783
+ if model_args.unfreeze_lm_head:
784
+ model.language_model.lm_head.requires_grad = True
785
+
786
+ if model_args.use_backbone_lora:
787
+ model.wrap_backbone_lora(r=model_args.use_backbone_lora, lora_alpha=2 * model_args.use_backbone_lora)
788
+ model.config.use_backbone_lora = model_args.use_backbone_lora
789
+
790
+ if model_args.use_llm_lora:
791
+ model.wrap_llm_lora(r=model_args.use_llm_lora, lora_alpha=2 * model_args.use_llm_lora)
792
+ model.config.use_llm_lora = model_args.use_llm_lora
793
+
794
+ if model_args.freeze_mlp:
795
+ _freeze_params(model.mlp1)
796
+
797
+ if model_args.unfreeze_vit_layers != 0:
798
+ layers = model.vision_model.encoder.layers[model_args.unfreeze_vit_layers:]
799
+ for k, v in layers.named_parameters():
800
+ logger.info(f'Unfreezing ViT layer: {k}')
801
+ v.requires_grad = True
802
+
803
+ # print trainable parameters
804
+ if dist.get_rank() == 0:
805
+ for name, param in model.named_parameters():
806
+ if param.requires_grad:
807
+ logger.info(name)
808
+
809
+ # set seed for torch dataloaders
810
+ set_seed(training_args.seed)
811
+
812
+ # Initialize our Trainer
813
+ if model_args.use_custom_trainer:
814
+ replace_create_optimizer()
815
+
816
+ trainer = Trainer(
817
+ model=model,
818
+ args=training_args,
819
+ train_dataset=train_dataset if training_args.do_train else None,
820
+ eval_dataset=None,
821
+ tokenizer=tokenizer,
822
+ data_collator=concat_pad_data_collator
823
+ )
824
+
825
+ # Training
826
+ if training_args.do_train:
827
+ checkpoint = None
828
+ if training_args.resume_from_checkpoint is not None:
829
+ checkpoint = training_args.resume_from_checkpoint
830
+ elif last_checkpoint is not None:
831
+ checkpoint = last_checkpoint
832
+ train_result = trainer.train(resume_from_checkpoint=checkpoint)
833
+ trainer.save_model() # Saves the tokenizer too for easy upload
834
+
835
+ metrics = train_result.metrics
836
+ try:
837
+ metrics['train_samples'] = len(train_dataset)
838
+ except:
839
+ metrics['train_samples'] = -1
840
+
841
+ trainer.log_metrics('train', metrics)
842
+ trainer.save_metrics('train', metrics)
843
+ trainer.save_state()
844
+
845
+
846
+ if __name__ == '__main__':
847
+ main()
isolated/sim_greedy/upstream_sgl/internvl/train/internvl_chat_pretrain.py ADDED
@@ -0,0 +1,885 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gc
2
+ import json
3
+ import logging
4
+ import math
5
+ import os
6
+ import random
7
+ import sys
8
+ import traceback
9
+ import warnings
10
+ from copy import deepcopy
11
+ from dataclasses import dataclass, field
12
+ from typing import Dict, Optional
13
+
14
+ import numpy as np
15
+ import torch
16
+ import torch.distributed as dist
17
+ import transformers
18
+ from internvl.dist_utils import init_dist
19
+ from internvl.model.internlm2.modeling_internlm2 import InternLM2ForCausalLM
20
+ from internvl.model.internvl_chat import (InternVisionConfig,
21
+ InternVisionModel,
22
+ InternVLChatConfig,
23
+ InternVLChatModel)
24
+ from internvl.patch import (concat_pad_data_collator,
25
+ replace_llama_rmsnorm_with_fused_rmsnorm,
26
+ replace_train_sampler)
27
+ from internvl.train.constants import (BOX_END_TOKEN, BOX_START_TOKEN,
28
+ IMG_CONTEXT_TOKEN, IMG_END_TOKEN,
29
+ IMG_START_TOKEN, QUAD_END_TOKEN,
30
+ QUAD_START_TOKEN, REF_END_TOKEN,
31
+ REF_START_TOKEN)
32
+ from internvl.train.dataset import (ConcatDataset, TCSLoader,
33
+ WeightedConcatDataset, build_transform,
34
+ dynamic_preprocess, preprocess,
35
+ preprocess_internlm, preprocess_mpt,
36
+ preprocess_phi3)
37
+ from internvl.train.trainer_monkey_patch import replace_create_optimizer
38
+ from PIL import Image, ImageFile, PngImagePlugin, UnidentifiedImageError
39
+ from torch.utils.data import Dataset
40
+ from transformers import (AutoConfig, AutoModelForCausalLM, AutoTokenizer,
41
+ HfArgumentParser, Trainer, TrainingArguments,
42
+ set_seed)
43
+ from transformers.trainer_utils import get_last_checkpoint
44
+ from transformers.utils.logging import (enable_default_handler,
45
+ enable_explicit_format, set_verbosity)
46
+
47
+ # Apply necessary patches for the transformers library
48
+ replace_llama_rmsnorm_with_fused_rmsnorm()
49
+ replace_train_sampler()
50
+
51
+ # Try to import petrel_client for image loading, fallback to PIL if unavailable
52
+ try:
53
+ from petrel_client.client import Client
54
+ from petrel_client.common.config import Config
55
+ has_tcs_loader = True
56
+ except ImportError as E:
57
+ print('petrel_client is not installed. Using PIL to load images.')
58
+ has_tcs_loader = False
59
+
60
+ # Set constants for image processing and logging
61
+ IGNORE_INDEX = -100
62
+ Image.MAX_IMAGE_PIXELS = None
63
+ ImageFile.LOAD_TRUNCATED_IMAGES = True
64
+ MaximumDecompressedSize = 1024
65
+ MegaByte = 2 ** 20
66
+ PngImagePlugin.MAX_TEXT_CHUNK = MaximumDecompressedSize * MegaByte
67
+
68
+ warnings.filterwarnings('ignore')
69
+ logger = logging.getLogger(__name__)
70
+
71
+ os.environ['TOKENIZERS_PARALLELISM'] = 'true'
72
+
73
+
74
+ @dataclass
75
+ class ModelArguments:
76
+ """
77
+ Arguments for specifying model, tokenizer, and configurations.
78
+ """
79
+ model_name_or_path: Optional[str] = field(
80
+ default=None,
81
+ metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'}
82
+ )
83
+ vision_path: Optional[str] = field(
84
+ default=None,
85
+ metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'}
86
+ )
87
+ llm_path: Optional[str] = field(
88
+ default=None,
89
+ metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'}
90
+ )
91
+ mlp_path: Optional[str] = field(
92
+ default=None,
93
+ metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'}
94
+ )
95
+ freeze_llm: bool = field(
96
+ default=False,
97
+ metadata={'help': 'Set to True to freeze the LLM decoder.'},
98
+ )
99
+ freeze_backbone: bool = field(
100
+ default=False,
101
+ metadata={'help': 'Set to True to freeze the vision backbone of the model.'},
102
+ )
103
+ freeze_mlp: bool = field(
104
+ default=False,
105
+ metadata={'help': 'Set to True to freeze the MLP layers of the model.'},
106
+ )
107
+ unfreeze_vit_layers: int = field(
108
+ default=0,
109
+ metadata={'help': 'Specify the number of ViT layers to unfreeze. Default is 0.'},
110
+ )
111
+ vision_select_layer: int = field(
112
+ default=-1,
113
+ metadata={'help': 'Specify the layer of ViT feature map to use. Default is last layer.'},
114
+ )
115
+ use_backbone_lora: int = field(
116
+ default=0,
117
+ metadata={'help': 'Set the LoRA adapter rank for the backbone model. Default is 0.'}
118
+ )
119
+ use_llm_lora: int = field(
120
+ default=0,
121
+ metadata={'help': 'Set the LoRA adapter rank for the LLM. Default is 0.'}
122
+ )
123
+ unfreeze_lm_head: bool = field(
124
+ default=False,
125
+ metadata={'help': "Set to True to unfreeze the language model's head."},
126
+ )
127
+ use_custom_trainer: bool = field(
128
+ default=False,
129
+ metadata={'help': 'Set to True to enable the use of a custom trainer.'},
130
+ )
131
+ grad_checkpoint: Optional[bool] = field(
132
+ default=False,
133
+ metadata={'help': 'Set to True to use gradient checkpointing.'},
134
+ )
135
+ drop_path_rate: float = field(
136
+ default=0.0,
137
+ metadata={'help': 'Set the drop path rate for the ViT model. Default is 0.'},
138
+ )
139
+ ps_version: str = field(
140
+ default='v2',
141
+ metadata={'help': 'Specify the version of pixel shuffle implementation. Default is `v1`.'
142
+ 'Please use `v2` to fix the bug of transposed image.'}
143
+ )
144
+
145
+
146
+ @dataclass
147
+ class DataTrainingArguments:
148
+ """
149
+ Arguments for specifying data input for training and evaluation.
150
+ """
151
+ max_seq_length: Optional[int] = field(
152
+ default=2048,
153
+ metadata={
154
+ 'help': (
155
+ 'The maximum total input sequence length after tokenization. Sequences longer '
156
+ 'than this will be truncated, sequences shorter will be padded.'
157
+ )
158
+ },
159
+ )
160
+ force_image_size: Optional[int] = field(
161
+ default=448,
162
+ metadata={'help': 'Set the desired size for the image. Default is 224.'},
163
+ )
164
+ down_sample_ratio: Optional[float] = field(
165
+ default=0.5,
166
+ metadata={'help': 'Set the desired down-sampling ratio for the image. Default is 1.0.'},
167
+ )
168
+ pad2square: Optional[bool] = field(
169
+ default=False,
170
+ metadata={'help': 'Pad the image to a square shape if set to True.'},
171
+ )
172
+ conv_style: Optional[str] = field(
173
+ default='internlm2-chat', metadata={'help': 'Prompt style for a conversation.'}
174
+ )
175
+ meta_path: Optional[str] = field(
176
+ default=None,
177
+ metadata={'help': 'The path of the meta file of datasets.'},
178
+ )
179
+ use_data_resampling: Optional[bool] = field(
180
+ default=False,
181
+ metadata={'help': 'Set to True to use data resampling.'},
182
+ )
183
+ dynamic_image_size: Optional[bool] = field(
184
+ default=False,
185
+ metadata={'help': 'Set to True to use dynamic image size.'},
186
+ )
187
+ use_thumbnail: Optional[bool] = field(
188
+ default=False,
189
+ metadata={'help': 'Set to True to add a thumbnail image.'},
190
+ )
191
+ min_dynamic_patch: Optional[int] = field(
192
+ default=1,
193
+ metadata={'help': 'The minimum number of dynamic patches. Default is 1.'},
194
+ )
195
+ max_dynamic_patch: Optional[int] = field(
196
+ default=12,
197
+ metadata={'help': 'The maximum number of dynamic patches. Default is 6.'},
198
+ )
199
+ normalize_type: Optional[str] = field(
200
+ default='imagenet',
201
+ metadata={'help': 'The normalize type for the image. Default is imagenet.'},
202
+ )
203
+
204
+
205
+ class LazySupervisedDataset(Dataset):
206
+ """Dataset for supervised fine-tuning."""
207
+
208
+ def __init__(
209
+ self,
210
+ template_name,
211
+ meta,
212
+ tokenizer,
213
+ tcs_loader,
214
+ ds_name,
215
+ num_image_token,
216
+ image_size=224,
217
+ is_train=True,
218
+ pad2square=False,
219
+ group_by_length=False,
220
+ dynamic_image_size=False,
221
+ use_thumbnail=False,
222
+ min_dynamic_patch=1,
223
+ max_dynamic_patch=6,
224
+ min_num_frame=4, # for video data
225
+ max_num_frame=12, # for video data
226
+ sampling_method='rand', # for video data
227
+ repeat_time=1,
228
+ normalize_type='imagenet',
229
+ random_seed=0,
230
+ ):
231
+ super(LazySupervisedDataset, self).__init__()
232
+ self.ds_name = ds_name
233
+ self.tokenizer = tokenizer
234
+ self.template_name = template_name
235
+ self.num_image_token = num_image_token
236
+ logger.info(f'[Dataset] num_image_token: {num_image_token}')
237
+ logger.info(f'[Dataset] dynamic_image_size: {dynamic_image_size}')
238
+ logger.info(f'[Dataset] use_thumbnail: {use_thumbnail}')
239
+ logger.info(f'[Dataset] min_dynamic_patch: {min_dynamic_patch}, max_dynamic_patch: {max_dynamic_patch}')
240
+
241
+ self.image_size = image_size
242
+ self.is_train = is_train
243
+ self.pad2square = pad2square
244
+ self.max_num_frame = max_num_frame
245
+ self.min_num_frame = min_num_frame
246
+ self.sampling_method = sampling_method
247
+
248
+ logger.info('Formatting inputs...Skip in lazy mode')
249
+ assert meta['annotation'].endswith('jsonl'), f'annotation must be jsonl, but got {meta["annotation"]}'
250
+
251
+ total_ranks = torch.distributed.get_world_size()
252
+ current_rank = torch.distributed.get_rank()
253
+
254
+ """
255
+ This section of the code is used to read hundreds of millions of data entries.
256
+ By using caching and splitting the data according to rank, it ensures fast reading
257
+ speed and prevents out-of-memory.
258
+ """
259
+ # Create a cache directory path
260
+ basename = os.path.basename(meta['annotation']).replace('.jsonl', '')
261
+ data_dir = os.path.join(os.path.dirname(meta['annotation']), f'{basename}_temp')
262
+ os.makedirs(data_dir, exist_ok=True) # Create the cache directory if it does not exist
263
+ # Create a temporary path for the current rank
264
+ temp_path = os.path.join(data_dir, f'{basename}_{current_rank}_of_{total_ranks}.jsonl')
265
+
266
+ # Check if the temporary file for the current rank already exists
267
+ if os.path.exists(temp_path):
268
+ # If it exists, read the raw data from the file
269
+ with open(temp_path, 'r') as f:
270
+ self.raw_data = f.readlines()
271
+ else:
272
+ # If it does not exist, read the raw data from the original annotation file
273
+ with open(meta['annotation'], 'r') as f:
274
+ self.raw_data = f.readlines()
275
+
276
+ # Adjust the raw data based on the repeat_time parameter
277
+ if repeat_time < 1:
278
+ self.raw_data = self.raw_data[:int(len(self.raw_data) * repeat_time)]
279
+ else:
280
+ self.raw_data = self.raw_data * int(repeat_time)
281
+
282
+ # Calculate the total number of lines and distribute lines to each rank
283
+ total_lines = len(self.raw_data)
284
+ logger.info(f'total_ranks: {total_ranks}, current_rank: {current_rank}, total_lines: {total_lines}')
285
+ lines_per_rank = total_lines // total_ranks # Number of lines each rank should process
286
+ lines_per_rank = max(1, lines_per_rank)
287
+
288
+ # Calculate the start and end line numbers for the current rank
289
+ start_line = lines_per_rank * current_rank # Starting line for the current rank
290
+ end_line = start_line + lines_per_rank # Ending line for the current rank
291
+
292
+ # Assign the appropriate lines to the current rank
293
+ self.raw_data = self.raw_data[start_line:end_line]
294
+
295
+ # Write the raw data for the current rank to the temporary file
296
+ with open(temp_path, 'w') as f:
297
+ f.writelines(self.raw_data)
298
+
299
+ self.rng = np.random.default_rng(seed=random_seed)
300
+ self.rng.shuffle(self.raw_data)
301
+
302
+ gc.collect()
303
+ self.root = meta['root']
304
+ self.cached_data_dict = {}
305
+ self.tcs_loader = tcs_loader
306
+ self.group_by_length = group_by_length
307
+ self.dynamic_image_size = dynamic_image_size
308
+ self.use_thumbnail = use_thumbnail
309
+ self.min_dynamic_patch = min_dynamic_patch
310
+ self.max_dynamic_patch = max_dynamic_patch
311
+ self.normalize_type = normalize_type
312
+
313
+ # If the precomputed length does not exist, roughly estimate the length of
314
+ # each sample to improve the efficiency of group_by_length.
315
+ if self.group_by_length:
316
+ self.conv2length = {} # Using a dictionary to speed up token length calculation
317
+ self.length = []
318
+ for data_item in self.raw_data:
319
+ data_item = json.loads(data_item)
320
+ if 'length' in data_item:
321
+ token_length = data_item['length'] # Use precomputed length if available
322
+ else:
323
+ # Compute token length using the tokenizer
324
+ conversations = '\n'.join([temp['value'] for temp in data_item['conversations']])
325
+ str_length = len(conversations)
326
+ if str_length not in self.conv2length:
327
+ token_length = tokenizer(
328
+ conversations, return_tensors='pt', padding=False, truncation=False,
329
+ ).input_ids.size(1)
330
+ self.conv2length[str_length] = token_length + num_image_token * (
331
+ max_dynamic_patch + use_thumbnail)
332
+ else:
333
+ token_length = self.conv2length[str_length]
334
+ self.length.append(token_length)
335
+ gc.collect()
336
+
337
+ def __len__(self):
338
+ return len(self.raw_data) * torch.distributed.get_world_size()
339
+
340
+ def get_preprocess_function(self):
341
+ # Select the appropriate preprocessing function based on the template name
342
+ if self.template_name == 'Hermes-2':
343
+ preprocess_function = preprocess_mpt
344
+ elif self.template_name == 'internlm2-chat':
345
+ preprocess_function = preprocess_internlm
346
+ elif self.template_name == 'phi3-chat':
347
+ preprocess_function = preprocess_phi3
348
+ else:
349
+ preprocess_function = preprocess
350
+ return preprocess_function
351
+
352
+ def load_image(self, image_path):
353
+ # Load the image using tcs_loader if available, otherwise use PIL
354
+ if self.tcs_loader is not None and 's3://' in image_path:
355
+ return self.tcs_loader(image_path)
356
+ return Image.open(image_path).convert('RGB')
357
+
358
+ def get_image_path(self, image_path):
359
+ if image_path.startswith('s3://'): # for ceph
360
+ image_path = self.root + image_path
361
+ else: # for local image
362
+ image_path = os.path.join(self.root, image_path)
363
+ return image_path
364
+
365
+ def get_transform(self):
366
+ # Build transformation function
367
+ transform = build_transform(is_train=self.is_train, input_size=self.image_size,
368
+ pad2square=self.pad2square, normalize_type=self.normalize_type)
369
+ return transform
370
+
371
+ def multi_modal_get_item(self, data_item):
372
+ # Build transformation function
373
+ transform = self.get_transform()
374
+
375
+ # Ensure the first conversation contains an image placeholder
376
+ if '<image>' not in data_item['conversations'][0]['value']:
377
+ data_item['conversations'][0]['value'] = '<image>\n' + data_item['conversations'][0]['value']
378
+
379
+ # Merge the image path
380
+ image_path = self.get_image_path(data_item['image'])
381
+
382
+ # Load the image using tcs_loader if available, otherwise use PIL
383
+ image = self.load_image(image_path)
384
+
385
+ if self.dynamic_image_size: # If dynamic image size is enabled, preprocess the image dynamically
386
+ images = dynamic_preprocess(image, min_num=self.min_dynamic_patch, max_num=self.max_dynamic_patch,
387
+ image_size=self.image_size, use_thumbnail=self.use_thumbnail)
388
+ else: # Otherwise, use the original image as a single patch
389
+ images = [image]
390
+
391
+ # Apply the transformation to each image and stack the results into a tensor
392
+ pixel_values = [transform(image) for image in images]
393
+ pixel_values = torch.stack(pixel_values)
394
+
395
+ # Ensure that there is only one patch if dynamic image size is not enabled
396
+ num_patches = pixel_values.size(0)
397
+ if not self.dynamic_image_size:
398
+ assert num_patches == 1, f'The number of patches should be 1, but got {num_patches}.'
399
+
400
+ # Select the appropriate preprocessing function based on the template name
401
+ preprocess_function = self.get_preprocess_function()
402
+
403
+ # Preprocess the conversations and generate the return dictionary
404
+ ret = preprocess_function(self.template_name, [deepcopy(data_item['conversations'])],
405
+ self.tokenizer, [self.num_image_token * num_patches],
406
+ group_by_length=self.group_by_length, ds_name=self.ds_name)
407
+
408
+ # Create the final return dictionary
409
+ ret = dict(
410
+ input_ids=ret['input_ids'][0],
411
+ labels=ret['labels'][0],
412
+ attention_mask=ret['attention_mask'][0],
413
+ pixel_values=pixel_values,
414
+ image_flags=torch.tensor([1] * num_patches, dtype=torch.long)
415
+ )
416
+ return ret
417
+
418
+ def multi_modal_multi_image_get_item(self, data_item):
419
+ # Build transformation function
420
+ transform = self.get_transform()
421
+
422
+ images, num_tiles = [], []
423
+ num_image = len(data_item['image'])
424
+ for image_path in data_item['image']:
425
+ # Merge the image path
426
+ image_path = self.get_image_path(image_path)
427
+ # Load the image using tcs_loader if available, otherwise use PIL
428
+ image = self.load_image(image_path)
429
+ if self.dynamic_image_size: # If dynamic image size is enabled, preprocess the image dynamically
430
+ image = dynamic_preprocess(image, min_num=self.min_dynamic_patch,
431
+ max_num=self.max_dynamic_patch // num_image,
432
+ image_size=self.image_size, use_thumbnail=self.use_thumbnail)
433
+ images += image
434
+ num_tiles.append(len(image))
435
+ else: # Otherwise, use the original image as a single patch
436
+ images.append(image)
437
+ num_tiles.append(1)
438
+ pixel_values = [transform(image) for image in images]
439
+ pixel_values = torch.stack(pixel_values)
440
+ num_patches = pixel_values.size(0)
441
+
442
+ # Select the appropriate preprocessing function based on the template name
443
+ preprocess_function = self.get_preprocess_function()
444
+
445
+ # Preprocess the conversations and generate the return dictionary
446
+ num_image_tokens = [self.num_image_token * num_tile for num_tile in num_tiles]
447
+ ret = preprocess_function(self.template_name, [deepcopy(data_item['conversations'])],
448
+ self.tokenizer, num_image_tokens, group_by_length=self.group_by_length,
449
+ ds_name=self.ds_name, num_image=num_image)
450
+
451
+ # Create the final return dictionary
452
+ ret = dict(
453
+ input_ids=ret['input_ids'][0],
454
+ labels=ret['labels'][0],
455
+ attention_mask=ret['attention_mask'][0],
456
+ pixel_values=pixel_values,
457
+ image_flags=torch.tensor([1] * num_patches, dtype=torch.long)
458
+ )
459
+ return ret
460
+
461
+ def video_get_item(self, data_item):
462
+ # Build transformation function
463
+ transform = self.get_transform()
464
+
465
+ # Ensure the first conversation contains a video placeholder
466
+ if '<video>' not in data_item['conversations'][0]['value']:
467
+ data_item['conversations'][0]['value'] = '<video>\n' + data_item['conversations'][0]['value']
468
+
469
+ # Get the video file path
470
+ video_file = data_item['video']
471
+ video_path = os.path.join(self.root, video_file)
472
+
473
+ # Load the video frames using tcs_loader
474
+ # TODO: Load videos without using tcsloader.
475
+ image_list = self.tcs_loader(
476
+ video_path,
477
+ image_type='video',
478
+ max_num_frames=self.max_num_frame,
479
+ min_num_frames=self.min_num_frame,
480
+ sample=self.sampling_method,
481
+ clip=data_item.get('clip', None))
482
+
483
+ # Generate special tokens for each video frame
484
+ special_tokens = '\n'.join(['Frame{}: <image>'.format(i + 1) for i in range(len(image_list))])
485
+ data_item['conversations'][0]['value'] = data_item['conversations'][0]['value'].replace(
486
+ '<video>\n', special_tokens)
487
+
488
+ # Transform each frame image and stack them into a tensor
489
+ pixel_values = [transform(image) for image in image_list]
490
+ pixel_values = torch.stack(pixel_values)
491
+ num_patches = pixel_values.size(0)
492
+
493
+ # Select the appropriate preprocessing function based on the template name
494
+ preprocess_function = self.get_preprocess_function()
495
+
496
+ # Preprocess the conversations and generate the return dictionary
497
+ num_image_tokens = [self.num_image_token] * num_patches
498
+ ret = preprocess_function(self.template_name, [deepcopy(data_item['conversations'])],
499
+ self.tokenizer, num_image_tokens, group_by_length=self.group_by_length,
500
+ ds_name=self.ds_name, num_image=num_patches)
501
+
502
+ # Create the final return dictionary
503
+ ret = dict(
504
+ input_ids=ret['input_ids'][0],
505
+ labels=ret['labels'][0],
506
+ attention_mask=ret['attention_mask'][0],
507
+ pixel_values=pixel_values,
508
+ image_flags=torch.tensor([1] * num_patches, dtype=torch.long)
509
+ )
510
+ return ret
511
+
512
+ def pure_text_get_item(self, data_item):
513
+ # Build transformation function
514
+ transform = self.get_transform()
515
+
516
+ # Create a blank white image
517
+ image = Image.new('RGB', (224, 224), (255, 255, 255))
518
+
519
+ # Dynamically preprocess the image to generate patches
520
+ images = dynamic_preprocess(image, min_num=self.min_dynamic_patch, max_num=1,
521
+ image_size=self.image_size, use_thumbnail=self.use_thumbnail)
522
+
523
+ # Apply the transformation to each image patch and stack them into a tensor
524
+ pixel_values = [transform(image) for image in images]
525
+ pixel_values = torch.stack(pixel_values)
526
+ num_patches = pixel_values.size(0)
527
+
528
+ # Ensure there is only one patch
529
+ assert num_patches == 1, f'The number of patches should be 1, but got {num_patches}.'
530
+
531
+ # Select the appropriate preprocessing function based on the template name
532
+ preprocess_function = self.get_preprocess_function()
533
+
534
+ # Preprocess the conversations and generate the return dictionary
535
+ ret = preprocess_function(self.template_name, [deepcopy(data_item['conversations'])],
536
+ self.tokenizer, [self.num_image_token * num_patches], text_only=True,
537
+ group_by_length=self.group_by_length, ds_name=self.ds_name)
538
+
539
+ # Create the final return dictionary
540
+ ret = dict(
541
+ input_ids=ret['input_ids'][0],
542
+ labels=ret['labels'][0],
543
+ attention_mask=ret['attention_mask'][0],
544
+ pixel_values=pixel_values,
545
+ image_flags=torch.tensor([0] * num_patches, dtype=torch.long)
546
+ )
547
+ return ret
548
+
549
+ def __getitem__(self, i) -> Dict[str, torch.Tensor]:
550
+ i = i % len(self.raw_data)
551
+ while True:
552
+ try:
553
+ data_item = json.loads(self.raw_data[i])
554
+ if 'image' in data_item and len(data_item['image']) != 0:
555
+ if type(data_item['image']) == list:
556
+ ret = self.multi_modal_multi_image_get_item(data_item)
557
+ else:
558
+ ret = self.multi_modal_get_item(data_item)
559
+ elif 'video' in data_item and data_item['video'] is not None and data_item['video'] != '':
560
+ ret = self.video_get_item(data_item)
561
+ else:
562
+ ret = self.pure_text_get_item(data_item)
563
+ break
564
+ except Exception as e:
565
+ print(e, self.ds_name, flush=True)
566
+ if not isinstance(e, UnidentifiedImageError):
567
+ traceback.print_exc()
568
+ data_item = json.loads(self.raw_data[i])
569
+ if 'image' in data_item:
570
+ if type(data_item['image']) == list:
571
+ images = [self.root + item for item in data_item['image']]
572
+ print(f'Failed to load image: {images}, the dataset is: {self.ds_name}')
573
+ else:
574
+ if data_item['image'].startswith('s3://'):
575
+ data_path = self.root + data_item['image']
576
+ else:
577
+ data_path = os.path.join(self.root, data_item['image'])
578
+ print(f'Failed to load image: {data_path}, the dataset is: {self.ds_name}')
579
+ elif 'video' in data_item:
580
+ data_path = os.path.join(self.root, data_item['video'])
581
+ print(f'Failed to load video: {data_path}, the dataset is: {self.ds_name}')
582
+ i = random.randint(0, len(self.raw_data) - 1)
583
+ return ret
584
+
585
+
586
+ def build_datasets(
587
+ data_args,
588
+ tokenizer,
589
+ tcs_loader,
590
+ model,
591
+ group_by_length=False,
592
+ dynamic_image_size=False,
593
+ use_thumbnail=False,
594
+ min_dynamic_patch=1,
595
+ max_dynamic_patch=12,
596
+ normalize_type='imagenet',
597
+ ):
598
+ datasets = []
599
+ lengths = []
600
+ ds_collections = json.loads(open(data_args.meta_path).read())
601
+ for ds_idx, ds_name in enumerate(ds_collections.keys()):
602
+ repeat_time = ds_collections[ds_name]['repeat_time']
603
+ if 'max_dynamic_patch' in ds_collections[ds_name]:
604
+ max_num = ds_collections[ds_name]['max_dynamic_patch']
605
+ logger.info(f'max_dynamic_patch is set to {max_num} according to the meta file')
606
+ else:
607
+ max_num = max_dynamic_patch
608
+ dataset = LazySupervisedDataset(
609
+ data_args.conv_style, ds_collections[ds_name],
610
+ tokenizer,
611
+ tcs_loader,
612
+ ds_name=ds_name,
613
+ num_image_token=model.num_image_token,
614
+ image_size=data_args.force_image_size,
615
+ is_train=ds_collections[ds_name]['data_augment'],
616
+ pad2square=data_args.pad2square,
617
+ group_by_length=group_by_length,
618
+ dynamic_image_size=dynamic_image_size,
619
+ use_thumbnail=use_thumbnail,
620
+ min_dynamic_patch=min_dynamic_patch,
621
+ max_dynamic_patch=max_num,
622
+ repeat_time=repeat_time,
623
+ normalize_type=normalize_type,
624
+ random_seed=ds_idx,
625
+ )
626
+ logger.info(f'Add dataset: {ds_name} with length: {len(dataset)}')
627
+ datasets.append(dataset)
628
+ if data_args.use_data_resampling:
629
+ lengths.append(math.sqrt(len(dataset)))
630
+ else:
631
+ lengths.append(len(dataset))
632
+ if data_args.use_data_resampling:
633
+ total_length = sum(lengths)
634
+ weights = [l / total_length for l in lengths]
635
+ train_dataset = WeightedConcatDataset(datasets, weights)
636
+ else:
637
+ train_dataset = ConcatDataset(datasets)
638
+ return train_dataset
639
+
640
+
641
+ def main():
642
+ # Parse input arguments
643
+ # See all possible arguments in src/transformers/training_args.py
644
+ # If use DeepSpeed zero3, init_dist must before HfArgumentParser
645
+ launcher = os.environ.get('LAUNCHER', 'slurm')
646
+ init_dist(launcher=launcher, backend='nccl')
647
+ parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments))
648
+ if len(sys.argv) == 2 and sys.argv[1].endswith('.json'):
649
+ # If we pass only one argument to the script, and it's the path to a json file,
650
+ # let's parse it to get our arguments.
651
+ model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
652
+ else:
653
+ model_args, data_args, training_args = parser.parse_args_into_dataclasses()
654
+
655
+ # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
656
+ # information sent is the one passed as arguments along with your Python/PyTorch versions.
657
+ # send_example_telemetry('InternV-Chat', model_args, data_args)
658
+
659
+ # Setup logging
660
+ logging.basicConfig(
661
+ format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',
662
+ datefmt='%m/%d/%Y %H:%M:%S',
663
+ handlers=[logging.StreamHandler(sys.stdout)],
664
+ )
665
+
666
+ if training_args.should_log:
667
+ # The default of training_args.log_level is passive, so we set log level at info here to have that default.
668
+ transformers.utils.logging.set_verbosity_info()
669
+
670
+ log_level = training_args.get_process_log_level()
671
+ logger.setLevel(log_level)
672
+ set_verbosity(log_level)
673
+ enable_default_handler()
674
+ enable_explicit_format()
675
+
676
+ # Log on each process the small summary:
677
+ logger.warning(
678
+ f'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'
679
+ + f'distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fp16}'
680
+ )
681
+ logger.info(f'Training/evaluation parameters {training_args}')
682
+
683
+ # Detecting last checkpoint and eventually continue from last checkpoint.
684
+ last_checkpoint = None
685
+ if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir:
686
+ last_checkpoint = get_last_checkpoint(training_args.output_dir)
687
+ if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0:
688
+ raise ValueError(
689
+ f'Output directory ({training_args.output_dir}) already exists and is not empty. '
690
+ 'Use --overwrite_output_dir to overcome.'
691
+ )
692
+ elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
693
+ logger.info(
694
+ f'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '
695
+ 'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.'
696
+ )
697
+ # Set seed before initializing model.
698
+ set_seed(training_args.seed)
699
+
700
+ # Load pretrained model, tokenizer, and image processor
701
+ tokenizer_path = model_args.model_name_or_path or model_args.llm_path
702
+ logger.info(f'Loading Tokenizer: {tokenizer_path}')
703
+ tokenizer = AutoTokenizer.from_pretrained(
704
+ tokenizer_path, add_eos_token=False, trust_remote_code=True, use_fast=False)
705
+ tokenizer.tokenizer_path = tokenizer_path
706
+ tokenizer.model_max_length = data_args.max_seq_length
707
+ token_list = [IMG_START_TOKEN, IMG_END_TOKEN, IMG_CONTEXT_TOKEN,
708
+ QUAD_START_TOKEN, QUAD_END_TOKEN, REF_START_TOKEN,
709
+ REF_END_TOKEN, BOX_START_TOKEN, BOX_END_TOKEN]
710
+ num_new_tokens = tokenizer.add_tokens(token_list, special_tokens=True)
711
+ img_context_token_id = tokenizer.convert_tokens_to_ids(IMG_CONTEXT_TOKEN)
712
+ tcs_loader = TCSLoader('~/petreloss.conf') if has_tcs_loader else None
713
+
714
+ if model_args.model_name_or_path is not None:
715
+ logger.info('Loading InternVLChatModel...')
716
+ config = InternVLChatConfig.from_pretrained(model_args.model_name_or_path)
717
+ config.vision_config.drop_path_rate = model_args.drop_path_rate
718
+ if config.llm_config.model_type == 'internlm2':
719
+ config.llm_config.attn_implementation = 'flash_attention_2' # for InternLM
720
+ logger.info('Using flash_attention_2 for InternLM')
721
+ else:
722
+ config.llm_config._attn_implementation = 'flash_attention_2' # for LLaMA
723
+ logger.info('Using flash_attention_2 for LLaMA')
724
+ config.template = data_args.conv_style
725
+ config.select_layer = model_args.vision_select_layer
726
+ config.dynamic_image_size = data_args.dynamic_image_size
727
+ config.use_thumbnail = data_args.use_thumbnail
728
+ config.ps_version = model_args.ps_version
729
+ config.min_dynamic_patch = data_args.min_dynamic_patch
730
+ config.max_dynamic_patch = data_args.max_dynamic_patch
731
+ model = InternVLChatModel.from_pretrained(
732
+ model_args.model_name_or_path, torch_dtype=torch.bfloat16, config=config)
733
+ else:
734
+ logger.info('Loading ViT-6B...')
735
+ vision_config = InternVisionConfig.from_pretrained(model_args.vision_path)
736
+ vision_config.drop_path_rate = model_args.drop_path_rate
737
+ vision_model = InternVisionModel.from_pretrained(
738
+ model_args.vision_path, torch_dtype=torch.bfloat16, config=vision_config)
739
+ logger.info('Loading LLaMA...')
740
+ llm_config = AutoConfig.from_pretrained(model_args.llm_path, trust_remote_code=True)
741
+ if llm_config.model_type == 'internlm2':
742
+ model_type = InternLM2ForCausalLM
743
+ llm_config.attn_implementation = 'flash_attention_2' # for InternLM
744
+ logger.info('Using flash_attention_2 for InternLM')
745
+ else:
746
+ model_type = AutoModelForCausalLM
747
+ llm_config._attn_implementation = 'flash_attention_2' # for LLaMA
748
+ logger.info('Using flash_attention_2 for LLaMA')
749
+ llm = model_type.from_pretrained(
750
+ model_args.llm_path, torch_dtype=torch.bfloat16,
751
+ config=llm_config, trust_remote_code=True)
752
+ logger.info('Building InternVLChatConfig...')
753
+ internvl_chat_config = InternVLChatConfig(
754
+ vision_config.to_dict(), llm_config.to_dict(), downsample_ratio=data_args.down_sample_ratio,
755
+ pad2square=data_args.pad2square, template=data_args.conv_style,
756
+ select_layer=model_args.vision_select_layer, dynamic_image_size=data_args.dynamic_image_size,
757
+ use_thumbnail=data_args.use_thumbnail, ps_version=model_args.ps_version,
758
+ min_dynamic_patch=data_args.min_dynamic_patch, max_dynamic_patch=data_args.max_dynamic_patch)
759
+ internvl_chat_config.force_image_size = data_args.force_image_size
760
+ logger.info('Building InternVLChatModel...')
761
+ model = InternVLChatModel(internvl_chat_config, vision_model, llm)
762
+ model.img_context_token_id = img_context_token_id
763
+
764
+ assert model.config.downsample_ratio == data_args.down_sample_ratio
765
+
766
+ if model_args.mlp_path is not None:
767
+ logger.info('Loading pretrained MLP projector...')
768
+ state_dict = torch.load(model_args.mlp_path, map_location='cpu')
769
+ message = model.mlp1.load_state_dict(state_dict)
770
+ logger.info(message)
771
+ logger.info('Finished')
772
+
773
+ patch_size = model.config.vision_config.patch_size
774
+ logger.info(f'model.config.force_image_size: {model.config.force_image_size}')
775
+ logger.info(f'data_args.force_image_size: {data_args.force_image_size}')
776
+ logger.info(f'model.config.vision_config.image_size: {model.config.vision_config.image_size}')
777
+ if model.config.vision_config.image_size != data_args.force_image_size:
778
+ logger.info(f'Resizing position embedding from '
779
+ f'{model.config.vision_config.image_size} '
780
+ f'to {data_args.force_image_size}...')
781
+ model.vision_model.resize_pos_embeddings(old_size=model.config.vision_config.image_size,
782
+ new_size=data_args.force_image_size,
783
+ patch_size=patch_size)
784
+ model.config.vision_config.image_size = data_args.force_image_size
785
+ model.config.force_image_size = data_args.force_image_size
786
+ model.num_image_token = int((data_args.force_image_size // patch_size) ** 2 * (data_args.down_sample_ratio ** 2))
787
+
788
+ if num_new_tokens > 0:
789
+ model.language_model.resize_token_embeddings(len(tokenizer))
790
+ output_embeddings = model.language_model.get_output_embeddings().weight.data
791
+ output_embeddings_avg = output_embeddings[:-num_new_tokens].mean(dim=0, keepdim=True)
792
+ output_embeddings[-num_new_tokens:] = output_embeddings_avg
793
+
794
+ model.config.llm_config.vocab_size = len(tokenizer)
795
+ model.language_model.config.vocab_size = len(tokenizer)
796
+
797
+ model.language_model.config.use_cache = False
798
+ model.vision_model.gradient_checkpointing = True
799
+ model.vision_model.encoder.gradient_checkpointing = True
800
+ if model_args.grad_checkpoint:
801
+ model.language_model._set_gradient_checkpointing()
802
+
803
+ train_dataset = build_datasets(
804
+ data_args, tokenizer, tcs_loader, model, group_by_length=training_args.group_by_length,
805
+ dynamic_image_size=data_args.dynamic_image_size, use_thumbnail=data_args.use_thumbnail,
806
+ min_dynamic_patch=data_args.min_dynamic_patch, max_dynamic_patch=data_args.max_dynamic_patch,
807
+ normalize_type=data_args.normalize_type)
808
+
809
+ def _freeze_params(module):
810
+ for param in module.parameters():
811
+ param.requires_grad = False
812
+
813
+ if model_args.freeze_backbone:
814
+ # model.vision_model = model.vision_model.eval()
815
+ _freeze_params(model.vision_model)
816
+
817
+ if model_args.freeze_llm:
818
+ model.language_model = model.language_model.eval()
819
+ _freeze_params(model.language_model)
820
+
821
+ if model_args.unfreeze_lm_head:
822
+ model.language_model.lm_head.requires_grad = True
823
+
824
+ if model_args.use_backbone_lora:
825
+ model.wrap_backbone_lora(r=model_args.use_backbone_lora, lora_alpha=2 * model_args.use_backbone_lora)
826
+ model.config.use_backbone_lora = model_args.use_backbone_lora
827
+
828
+ if model_args.use_llm_lora:
829
+ model.wrap_llm_lora(r=model_args.use_llm_lora, lora_alpha=2 * model_args.use_llm_lora)
830
+ model.config.use_llm_lora = model_args.use_llm_lora
831
+
832
+ if model_args.freeze_mlp:
833
+ _freeze_params(model.mlp1)
834
+
835
+ if model_args.unfreeze_vit_layers != 0:
836
+ layers = model.vision_model.encoder.layers[model_args.unfreeze_vit_layers:]
837
+ for k, v in layers.named_parameters():
838
+ logger.info(f'Unfreezing ViT layer: {k}')
839
+ v.requires_grad = True
840
+
841
+ # print trainable parameters
842
+ if dist.get_rank() == 0:
843
+ for name, param in model.named_parameters():
844
+ if param.requires_grad:
845
+ logger.info(name)
846
+
847
+ # set seed for torch dataloaders
848
+ set_seed(training_args.seed)
849
+
850
+ # Initialize our Trainer
851
+ if model_args.use_custom_trainer:
852
+ replace_create_optimizer()
853
+
854
+ trainer = Trainer(
855
+ model=model,
856
+ args=training_args,
857
+ train_dataset=train_dataset if training_args.do_train else None,
858
+ eval_dataset=None,
859
+ tokenizer=tokenizer,
860
+ data_collator=concat_pad_data_collator
861
+ )
862
+
863
+ # Training
864
+ if training_args.do_train:
865
+ checkpoint = None
866
+ if training_args.resume_from_checkpoint is not None:
867
+ checkpoint = training_args.resume_from_checkpoint
868
+ elif last_checkpoint is not None:
869
+ checkpoint = last_checkpoint
870
+ train_result = trainer.train(resume_from_checkpoint=checkpoint)
871
+ trainer.save_model() # Saves the tokenizer too for easy upload
872
+
873
+ metrics = train_result.metrics
874
+ try:
875
+ metrics['train_samples'] = len(train_dataset)
876
+ except:
877
+ metrics['train_samples'] = -1
878
+
879
+ trainer.log_metrics('train', metrics)
880
+ trainer.save_metrics('train', metrics)
881
+ trainer.save_state()
882
+
883
+
884
+ if __name__ == '__main__':
885
+ main()
isolated/sim_greedy/upstream_sgl/internvl/train/trainer_monkey_patch.py ADDED
@@ -0,0 +1,159 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import os
3
+
4
+ import torch
5
+ import torch.nn as nn
6
+ import transformers
7
+ from transformers import Trainer, logging
8
+ from transformers.trainer import is_sagemaker_mp_enabled
9
+
10
+ logger = logging.get_logger(__name__)
11
+
12
+
13
+ def get_num_layer_for_vit_and_qllama(var_name, vit_num_max_layer, llama_num_max_layer):
14
+ if var_name.startswith('internvl.'):
15
+ var_name = var_name[len('internvl.'):]
16
+ if var_name in ('query_tokens', 'logit_scale',):
17
+ return 0
18
+ if var_name.startswith('clip_projector.'):
19
+ return vit_num_max_layer
20
+ if var_name.startswith('clip_projector2.') or var_name.startswith('itm_head.') or \
21
+ var_name == 'text_projection':
22
+ return llama_num_max_layer
23
+ if var_name.startswith('vision_model.'):
24
+ if 'embeddings.' in var_name:
25
+ return 0
26
+ if 'layers.' in var_name:
27
+ var_name = var_name.split('layers.')[-1]
28
+ layer_id = int(var_name.split('.')[0])
29
+ return layer_id + 1
30
+ if var_name.startswith('qllama.'):
31
+ if 'embed_tokens' in var_name:
32
+ return 0
33
+ if 'layers.' in var_name:
34
+ var_name = var_name.split('layers.')[-1]
35
+ layer_id = int(var_name.split('.')[0])
36
+ return layer_id + 1
37
+ else:
38
+ return llama_num_max_layer
39
+ return 0
40
+
41
+
42
+ def param_classification(name):
43
+ if name.startswith('internvl.'):
44
+ name = name[len('internvl.'):]
45
+ if name in ['query_tokens', 'text_projection', 'logit_scale']:
46
+ return 'qllama'
47
+ elif name.startswith('vision_model.'):
48
+ return 'vit'
49
+ elif name.startswith('qllama.'):
50
+ return 'qllama'
51
+ elif name.startswith('clip_projector.'):
52
+ return 'vit'
53
+ elif name.startswith('clip_projector2.'):
54
+ return 'qllama'
55
+ elif name.startswith('itm_head.'):
56
+ return 'qllama'
57
+ else:
58
+ return 'other'
59
+
60
+
61
+ def create_optimizer(self):
62
+ """
63
+ Setup the optimizer.
64
+
65
+ We provide a reasonable default that works well. If you want to use something else, you can pass a tuple in the
66
+ Trainer's init through `optimizers`, or subclass and override this method in a subclass.
67
+ """
68
+ opt_model = self.model_wrapped if is_sagemaker_mp_enabled() else self.model
69
+
70
+ parameter_groups = {}
71
+ try: # for stage2 model
72
+ vit_num_layers = opt_model.config.vision_config.num_hidden_layers + 2
73
+ qllama_num_layers = opt_model.config.qllama_config.num_hidden_layers + 2
74
+ except: # for stage3 model
75
+ vit_num_layers = opt_model.internvl.config.vision_config.num_hidden_layers + 2
76
+ qllama_num_layers = opt_model.internvl.config.qllama_config.num_hidden_layers + 2
77
+ print('vit_num_layers:', vit_num_layers)
78
+ print('qllama_num_layers:', qllama_num_layers)
79
+
80
+ vit_layer_decay_rate = float(os.getenv('VIT_LAYER_DECAY_RATE', 1.0))
81
+ qllama_layer_decay_rate = float(os.getenv('QLLAMA_LAYER_DECAY_RATE', 1.0))
82
+ qllama_lr_scale = float(os.getenv('QLLAMA_LR_SCALE', 1.0))
83
+ print('vit_layer_decay_rate:', vit_layer_decay_rate)
84
+ print('qllama_layer_decay_rate:', qllama_layer_decay_rate)
85
+ print('qllama_lr_scale:', qllama_lr_scale)
86
+
87
+ for name, param in opt_model.named_parameters():
88
+ if not param.requires_grad:
89
+ continue # frozen weights
90
+ if len(param.shape) == 1 or name.endswith('.bias'):
91
+ group_name = 'no_decay'
92
+ this_weight_decay = 0.
93
+ else:
94
+ group_name = 'decay'
95
+ this_weight_decay = self.args.weight_decay
96
+
97
+ cls = param_classification(name)
98
+ layer_id = get_num_layer_for_vit_and_qllama(name, vit_num_layers, qllama_num_layers)
99
+ group_name = '%s_layer_%d_%s' % (cls, layer_id, group_name)
100
+ if group_name not in parameter_groups:
101
+ if cls == 'vit':
102
+ scale = vit_layer_decay_rate ** (vit_num_layers - layer_id - 1)
103
+ elif cls == 'qllama':
104
+ scale = qllama_layer_decay_rate ** (qllama_num_layers - layer_id - 1)
105
+ scale = scale * qllama_lr_scale
106
+ else:
107
+ scale = 1.0
108
+ scale = min(1.0, scale)
109
+ parameter_groups[group_name] = {
110
+ 'weight_decay': this_weight_decay,
111
+ 'params': [],
112
+ 'param_names': [],
113
+ 'lr_scale': scale,
114
+ 'group_name': group_name,
115
+ 'lr': scale * self.args.learning_rate,
116
+ }
117
+ parameter_groups[group_name]['params'].append(param)
118
+ parameter_groups[group_name]['param_names'].append(name)
119
+
120
+ rank = torch.distributed.get_rank()
121
+ if rank == 0:
122
+ to_display = {}
123
+ for key in parameter_groups:
124
+ to_display[key] = {
125
+ 'param_names': parameter_groups[key]['param_names'],
126
+ 'lr_scale': parameter_groups[key]['lr_scale'],
127
+ 'lr': parameter_groups[key]['lr'],
128
+ 'weight_decay': parameter_groups[key]['weight_decay'],
129
+ }
130
+ print('Param groups = %s' % json.dumps(to_display, indent=2))
131
+
132
+ optimizer_grouped_parameters = list(parameter_groups.values())
133
+ optimizer_cls, optimizer_kwargs = Trainer.get_optimizer_cls_and_kwargs(self.args)
134
+
135
+ self.optimizer = optimizer_cls(optimizer_grouped_parameters, **optimizer_kwargs)
136
+ if optimizer_cls.__name__ == 'Adam8bit':
137
+ import bitsandbytes
138
+
139
+ manager = bitsandbytes.optim.GlobalOptimManager.get_instance()
140
+
141
+ skipped = 0
142
+ for module in opt_model.modules():
143
+ if isinstance(module, nn.Embedding):
144
+ skipped += sum({p.data_ptr(): p.numel() for p in module.parameters()}.values())
145
+ logger.info(f'skipped {module}: {skipped / 2 ** 20}M params')
146
+ manager.register_module_override(module, 'weight', {'optim_bits': 32})
147
+ logger.debug(f'bitsandbytes: will optimize {module} in fp32')
148
+ logger.info(f'skipped: {skipped / 2 ** 20}M params')
149
+
150
+ if is_sagemaker_mp_enabled():
151
+ import smdistributed.modelparallel.torch as smp
152
+ self.optimizer = smp.DistributedOptimizer(self.optimizer)
153
+
154
+ return self.optimizer
155
+
156
+
157
+ def replace_create_optimizer():
158
+ print('Replace original create_optimizer with custom create_optimizer')
159
+ transformers.Trainer.create_optimizer = create_optimizer
outputs/full_shared_vision_1bguide_8btext_random_20260511_0932/keep09_random/run.log ADDED
The diff for this file is too large to render. See raw diff
 
outputs/full_shared_vision_1bguide_8btext_random_20260511_0932/keep09_random/textvqa_shared_vision_1bguide_8btext_keep09_random.filter_debug.json ADDED
The diff for this file is too large to render. See raw diff
 
outputs/full_shared_vision_1bguide_8btext_random_20260511_0932/keep09_random/textvqa_shared_vision_1bguide_8btext_keep09_random.json ADDED
The diff for this file is too large to render. See raw diff
 
outputs/full_shared_vision_1bguide_8btext_random_20260511_0932/keep09_random/textvqa_shared_vision_1bguide_8btext_keep09_random.summary.json ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "mode": "shared_vision_guided",
3
+ "guide_checkpoint": "/root/models/InternVL2-1B",
4
+ "large_checkpoint": "/root/models/InternVL2-8B",
5
+ "count": 5000,
6
+ "accuracy": 0.320899999999999,
7
+ "large_model_prune_layer": 0.0,
8
+ "large_model_prune_ratio": 0.09,
9
+ "large_model_prune_selection": "random",
10
+ "consistency_token_ratio": 0.05,
11
+ "guide_reasoning_mode": "none",
12
+ "guide_reasoning_max_new_tokens": 1024,
13
+ "guide_reasoning_filter_mode": "none",
14
+ "guide_attention_source": "answer",
15
+ "guide_reasoning_attention_weight": 1.0,
16
+ "guide_answer_attention_weight": 1.0,
17
+ "guide_question_attention_weight": 1.0,
18
+ "guide_text_mode": "none",
19
+ "guide_text_max_new_tokens": 12,
20
+ "avg_small_model_time": 0.27918549451828,
21
+ "avg_large_model_time": 0.15921860122680664,
22
+ "results_file": "/root/SGL_new/outputs/full_shared_vision_1bguide_8btext_random_20260511_0932/keep09_random/textvqa_shared_vision_1bguide_8btext_keep09_random.json",
23
+ "filter_debug_file": "/root/SGL_new/outputs/full_shared_vision_1bguide_8btext_random_20260511_0932/keep09_random/textvqa_shared_vision_1bguide_8btext_keep09_random.filter_debug.json"
24
+ }
outputs/full_shared_vision_1bguide_8btext_random_20260511_0932/keep09_random_gpu1/run.log ADDED
The diff for this file is too large to render. See raw diff
 
outputs/full_shared_vision_1bguide_8btext_random_20260511_0932/keep09_random_gpu1/textvqa_shared_vision_1bguide_8btext_keep09_random_gpu1.filter_debug.json ADDED
The diff for this file is too large to render. See raw diff
 
outputs/full_shared_vision_1bguide_8btext_random_20260511_0932/keep09_random_gpu1/textvqa_shared_vision_1bguide_8btext_keep09_random_gpu1.json ADDED
The diff for this file is too large to render. See raw diff
 
outputs/full_shared_vision_1bguide_8btext_random_20260511_0932/keep09_random_gpu1/textvqa_shared_vision_1bguide_8btext_keep09_random_gpu1.summary.json ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "mode": "shared_vision_guided",
3
+ "guide_checkpoint": "/root/models/InternVL2-1B",
4
+ "large_checkpoint": "/root/models/InternVL2-8B",
5
+ "count": 5000,
6
+ "accuracy": 0.320899999999999,
7
+ "large_model_prune_layer": 0.0,
8
+ "large_model_prune_ratio": 0.09,
9
+ "large_model_prune_selection": "random",
10
+ "consistency_token_ratio": 0.05,
11
+ "guide_reasoning_mode": "none",
12
+ "guide_reasoning_max_new_tokens": 1024,
13
+ "guide_reasoning_filter_mode": "none",
14
+ "guide_attention_source": "answer",
15
+ "guide_reasoning_attention_weight": 1.0,
16
+ "guide_answer_attention_weight": 1.0,
17
+ "guide_question_attention_weight": 1.0,
18
+ "guide_text_mode": "none",
19
+ "guide_text_max_new_tokens": 12,
20
+ "avg_small_model_time": 0.276790934753418,
21
+ "avg_large_model_time": 0.1592107618331909,
22
+ "results_file": "/root/SGL_new/outputs/full_shared_vision_1bguide_8btext_random_20260511_0932/keep09_random_gpu1/textvqa_shared_vision_1bguide_8btext_keep09_random_gpu1.json",
23
+ "filter_debug_file": "/root/SGL_new/outputs/full_shared_vision_1bguide_8btext_random_20260511_0932/keep09_random_gpu1/textvqa_shared_vision_1bguide_8btext_keep09_random_gpu1.filter_debug.json"
24
+ }
outputs/full_shared_vision_1bguide_8btext_random_20260511_0932/keep40_random/run.log ADDED
The diff for this file is too large to render. See raw diff
 
outputs/full_shared_vision_1bguide_8btext_random_20260511_0932/keep40_random/textvqa_shared_vision_1bguide_8btext_keep40_random.filter_debug.json ADDED
The diff for this file is too large to render. See raw diff
 
outputs/full_shared_vision_1bguide_8btext_random_20260511_0932/keep40_random/textvqa_shared_vision_1bguide_8btext_keep40_random.json ADDED
The diff for this file is too large to render. See raw diff
 
outputs/full_shared_vision_1bguide_8btext_random_20260511_0932/keep40_random/textvqa_shared_vision_1bguide_8btext_keep40_random.summary.json ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "mode": "shared_vision_guided",
3
+ "guide_checkpoint": "/root/models/InternVL2-1B",
4
+ "large_checkpoint": "/root/models/InternVL2-8B",
5
+ "count": 5000,
6
+ "accuracy": 0.6294800000000021,
7
+ "large_model_prune_layer": 0.0,
8
+ "large_model_prune_ratio": 0.4,
9
+ "large_model_prune_selection": "random",
10
+ "consistency_token_ratio": 0.05,
11
+ "guide_reasoning_mode": "none",
12
+ "guide_reasoning_max_new_tokens": 1024,
13
+ "guide_reasoning_filter_mode": "none",
14
+ "guide_attention_source": "answer",
15
+ "guide_reasoning_attention_weight": 1.0,
16
+ "guide_answer_attention_weight": 1.0,
17
+ "guide_question_attention_weight": 1.0,
18
+ "guide_text_mode": "none",
19
+ "guide_text_max_new_tokens": 12,
20
+ "avg_small_model_time": 0.2767399290084839,
21
+ "avg_large_model_time": 0.21510390257835388,
22
+ "results_file": "/root/SGL_new/outputs/full_shared_vision_1bguide_8btext_random_20260511_0932/keep40_random/textvqa_shared_vision_1bguide_8btext_keep40_random.json",
23
+ "filter_debug_file": "/root/SGL_new/outputs/full_shared_vision_1bguide_8btext_random_20260511_0932/keep40_random/textvqa_shared_vision_1bguide_8btext_keep40_random.filter_debug.json"
24
+ }
outputs/full_shared_vision_1bguide_8btext_rawalign_prune0p09_restart/full_shared_vision_1bguide_8btext_rawalign_prune0p09_restart.json ADDED
The diff for this file is too large to render. See raw diff
 
outputs/full_shared_vision_1bguide_8btext_rawalign_prune0p09_restart/run.log ADDED
The diff for this file is too large to render. See raw diff