yujingfeng commited on
Commit
a0370d5
·
verified ·
1 Parent(s): f2cf2db

Upload 2 files

Browse files
Files changed (2) hide show
  1. configuration_qwen2_5_vl.py +68 -0
  2. modeling_qwen2_5_vl.py +934 -0
configuration_qwen2_5_vl.py ADDED
@@ -0,0 +1,68 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from transformers import PretrainedConfig
2
+
3
+ class Qwen2_5_VLConfig(PretrainedConfig):
4
+ model_type = "qwen2_5_vl"
5
+ keys_to_ignore_at_inference = ["past_key_values"]
6
+
7
+ def __init__(
8
+ self,
9
+ vocab_size=151936,
10
+ hidden_size=4096,
11
+ num_hidden_layers=32,
12
+ num_attention_heads=32,
13
+ emb_dropout_prob=0.0,
14
+ attn_dropout_prob=0.0,
15
+ layer_norm_epsilon=1e-6,
16
+ initializer_range=0.02,
17
+ max_position_embeddings=8192,
18
+ scale_attn_weights=True,
19
+ use_cache=True,
20
+ bf16=False,
21
+ fp16=False,
22
+ fp32=False,
23
+ kv_channels=128,
24
+ rotary_pct=1.0,
25
+ rotary_emb_base=1000000,
26
+ use_dynamic_ntk=True,
27
+ use_logn_attn=True,
28
+ use_flash_attn="auto",
29
+ intermediate_size=11008,
30
+ no_bias=True,
31
+ tie_word_embeddings=False,
32
+ visual=dict(
33
+ image_start_id=151857,
34
+ image_end_id=151858,
35
+ image_size=448,
36
+ patch_size=14,
37
+ hidden_size=4096,
38
+ num_hidden_layers=32,
39
+ num_attention_heads=32,
40
+ ),
41
+ **kwargs,
42
+ ):
43
+ self.vocab_size = vocab_size
44
+ self.hidden_size = hidden_size
45
+ self.intermediate_size = intermediate_size
46
+ self.num_hidden_layers = num_hidden_layers
47
+ self.num_attention_heads = num_attention_heads
48
+ self.emb_dropout_prob = emb_dropout_prob
49
+ self.attn_dropout_prob = attn_dropout_prob
50
+ self.layer_norm_epsilon = layer_norm_epsilon
51
+ self.initializer_range = initializer_range
52
+ self.scale_attn_weights = scale_attn_weights
53
+ self.use_cache = use_cache
54
+ self.max_position_embeddings = max_position_embeddings
55
+ self.bf16 = bf16
56
+ self.fp16 = fp16
57
+ self.fp32 = fp32
58
+ self.kv_channels = kv_channels
59
+ self.rotary_pct = rotary_pct
60
+ self.rotary_emb_base = rotary_emb_base
61
+ self.use_dynamic_ntk = use_dynamic_ntk
62
+ self.use_logn_attn = use_logn_attn
63
+ self.use_flash_attn = use_flash_attn
64
+ self.no_bias = no_bias
65
+ self.visual = visual
66
+ super().__init__(tie_word_embeddings=tie_word_embeddings,
67
+ **kwargs
68
+ )
modeling_qwen2_5_vl.py ADDED
@@ -0,0 +1,934 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import importlib
2
+ import math
3
+ from typing import TYPE_CHECKING, Optional, Tuple, Union, Callable, List, Any, Generator
4
+
5
+ import torch
6
+ import torch.nn.functional as F
7
+ import torch.utils.checkpoint
8
+ from torch.cuda.amp import autocast
9
+ from torch.nn import CrossEntropyLoss
10
+ from transformers import PreTrainedTokenizer, GenerationConfig, StoppingCriteriaList
11
+ from transformers.generation.logits_process import LogitsProcessorList
12
+ from transformers.modeling_outputs import BaseModelOutputWithPast, CausalLMOutputWithPast
13
+ from transformers.modeling_utils import PreTrainedModel
14
+ from transformers.utils import logging
15
+
16
+ if TYPE_CHECKING:
17
+ from transformers.generation.streamers import BaseStreamer
18
+ from .configuration_qwen2_5_vl import Qwen2_5_VLConfig # <-- 新类名
19
+ from .qwen_generation_utils import (
20
+ HistoryType, make_context, decode_tokens,
21
+ get_stop_words_ids, StopWordsLogitsProcessor
22
+ )
23
+ from .visual import VisionTransformer
24
+
25
+ try:
26
+ from einops import rearrange
27
+ except ImportError:
28
+ rearrange = None
29
+
30
+ from torch import nn
31
+
32
+ logger = logging.get_logger(__name__)
33
+
34
+ # ---------- 在文件顶部添加以下代码 ----------
35
+ class RMSNorm(nn.Module):
36
+ def __init__(self, hidden_size, eps=1e-6):
37
+ super().__init__()
38
+ self.weight = nn.Parameter(torch.ones(hidden_size))
39
+ self.eps = eps
40
+
41
+ def forward(self, hidden_states):
42
+ input_dtype = hidden_states.dtype
43
+ hidden_states = hidden_states.to(torch.float32)
44
+ variance = hidden_states.pow(2).mean(-1, keepdim=True)
45
+ hidden_states = hidden_states * torch.rsqrt(variance + self.eps)
46
+ return self.weight * hidden_states.to(input_dtype)
47
+
48
+ def rotate_half(x):
49
+ x1, x2 = x.chunk(2, dim=-1)
50
+ return torch.cat((-x2, x1), dim=-1)
51
+
52
+ def apply_rotary_pos_emb(tensor, cos, sin):
53
+ cos = cos[:, :, : tensor.shape[-2], :]
54
+ sin = sin[:, :, : tensor.shape[-2], :]
55
+ return (tensor * cos) + (rotate_half(tensor) * sin)
56
+
57
+ class RotaryEmbedding(nn.Module):
58
+ def __init__(self, dim, base=1000000):
59
+ super().__init__()
60
+ self.dim = dim
61
+ self.base = base
62
+ self.inv_freq = 1.0 / (base ** (torch.arange(0, dim, 2).float() / dim))
63
+ self._seq_len_cached = 0
64
+ self._cos_cached = None
65
+ self._sin_cached = None
66
+
67
+ def _update_cos_sin_cache(self, seq_len, device, dtype):
68
+ if seq_len == self._seq_len_cached:
69
+ return
70
+ t = torch.arange(seq_len, device=device, dtype=dtype)
71
+ freqs = torch.outer(t, self.inv_freq.to(device))
72
+ emb = torch.cat((freqs, freqs), dim=-1)
73
+ self._cos_cached = emb.cos()[None, None, :, :].to(dtype)
74
+ self._sin_cached = emb.sin()[None, None, :, :].to(dtype)
75
+ self._seq_len_cached = seq_len
76
+
77
+ def forward(self, seq_len, device, dtype):
78
+ self._update_cos_sin_cache(seq_len, device, dtype)
79
+ return self._cos_cached, self._sin_cached
80
+
81
+ class Qwen2_5_VLAttention(nn.Module):
82
+ def __init__(self, config):
83
+ super().__init__()
84
+
85
+ self.register_buffer("masked_bias", torch.tensor(-1e4), persistent=False)
86
+ self.seq_length = config.seq_length
87
+
88
+ self.hidden_size = config.hidden_size
89
+ self.split_size = config.hidden_size
90
+ self.num_heads = config.num_attention_heads
91
+ self.head_dim = self.hidden_size // self.num_heads
92
+
93
+ self.scale_attn_weights = True
94
+
95
+ self.projection_size = config.kv_channels * config.num_attention_heads
96
+
97
+ assert self.projection_size % config.num_attention_heads == 0
98
+ self.hidden_size_per_attention_head = (
99
+ self.projection_size // config.num_attention_heads
100
+ )
101
+
102
+ self.c_attn = nn.Linear(config.hidden_size, 3 * self.projection_size)
103
+
104
+ self.c_proj = nn.Linear(
105
+ config.hidden_size, self.projection_size, bias=not config.no_bias
106
+ )
107
+
108
+ self.is_fp32 = not (config.bf16 or config.fp16)
109
+ self.bf16 = config.bf16
110
+
111
+ self.use_dynamic_ntk = config.use_dynamic_ntk
112
+ self.use_logn_attn = config.use_logn_attn
113
+
114
+ logn_list = [
115
+ math.log(i, self.seq_length) if i > self.seq_length else 1
116
+ for i in range(1, 32768)
117
+ ]
118
+ self.logn_tensor = torch.tensor(logn_list)[None, :, None, None]
119
+
120
+ self.attn_dropout = nn.Dropout(config.attn_dropout_prob)
121
+
122
+ def _attn(self, query, key, value, registered_causal_mask, attention_mask=None, head_mask=None):
123
+ attn_weights = torch.matmul(query, key.transpose(-1, -2))
124
+
125
+ if self.scale_attn_weights:
126
+ attn_weights = attn_weights / torch.full(
127
+ [],
128
+ value.size(-1) ** 0.5,
129
+ dtype=attn_weights.dtype,
130
+ device=attn_weights.device,
131
+ )
132
+
133
+ attn_weights = attn_weights + attention_mask
134
+ attn_weights = nn.functional.softmax(attn_weights, dim=-1)
135
+ attn_weights = attn_weights.type(value.dtype)
136
+ attn_weights = self.attn_dropout(attn_weights)
137
+
138
+ if head_mask is not None:
139
+ attn_weights = attn_weights * head_mask
140
+
141
+ attn_output = torch.matmul(attn_weights, value)
142
+ attn_output = attn_output.transpose(1, 2)
143
+
144
+ return attn_output, attn_weights
145
+
146
+ def _upcast_and_reordered_attn(
147
+ self, query, key, value, registered_causal_mask, attention_mask=None, head_mask=None
148
+ ):
149
+ bsz, num_heads, q_seq_len, dk = query.size()
150
+ _, _, k_seq_len, _ = key.size()
151
+
152
+ attn_weights = torch.empty(
153
+ bsz * num_heads,
154
+ q_seq_len,
155
+ k_seq_len,
156
+ dtype=torch.float32,
157
+ device=query.device,
158
+ )
159
+
160
+ scale_factor = 1.0
161
+ if self.scale_attn_weights:
162
+ scale_factor /= float(value.size(-1)) ** 0.5
163
+
164
+ with autocast(enabled=False):
165
+ q, k = query.reshape(-1, q_seq_len, dk), key.transpose(-1, -2).reshape(
166
+ -1, dk, k_seq_len
167
+ )
168
+ attn_weights = torch.baddbmm(
169
+ attn_weights, q.float(), k.float(), beta=0, alpha=scale_factor
170
+ )
171
+ attn_weights = attn_weights.reshape(bsz, num_heads, q_seq_len, k_seq_len)
172
+
173
+ query_length, key_length = query.size(-2), key.size(-2)
174
+ causal_mask = registered_causal_mask[
175
+ :, :, key_length - query_length : key_length, :key_length
176
+ ]
177
+ mask_value = torch.finfo(attn_weights.dtype).min
178
+ mask_value = torch.tensor(mask_value, dtype=attn_weights.dtype).to(attn_weights.device)
179
+ attn_weights = torch.where(causal_mask, attn_weights, mask_value)
180
+
181
+ if attention_mask is not None:
182
+ attn_weights = attn_weights + attention_mask
183
+
184
+ attn_weights = nn.functional.softmax(attn_weights, dim=-1)
185
+
186
+ if attn_weights.dtype != torch.float32:
187
+ raise RuntimeError("Error with upcasting, attn_weights does not have dtype torch.float32")
188
+
189
+ attn_weights = attn_weights.type(value.dtype)
190
+ attn_weights = self.attn_dropout(attn_weights)
191
+
192
+ if head_mask is not None:
193
+ attn_weights = attn_weights * head_mask
194
+
195
+ attn_output = torch.matmul(attn_weights, value)
196
+
197
+ return attn_output, attn_weights
198
+
199
+ def _split_heads(self, tensor, num_heads, attn_head_size):
200
+ new_shape = tensor.size()[:-1] + (num_heads, attn_head_size)
201
+ return tensor.view(new_shape)
202
+
203
+ def _merge_heads(self, tensor, num_heads, attn_head_size):
204
+ tensor = tensor.contiguous()
205
+ new_shape = tensor.size()[:-2] + (num_heads * attn_head_size,)
206
+ return tensor.view(new_shape)
207
+
208
+ def forward(
209
+ self,
210
+ hidden_states: Optional[Tuple[torch.FloatTensor]],
211
+ rotary_pos_emb: Optional[List[torch.Tensor]] = None,
212
+ registered_causal_mask: Optional[torch.Tensor] = None,
213
+ layer_past: Optional[Tuple[torch.Tensor]] = None,
214
+ attention_mask: Optional[torch.FloatTensor] = None,
215
+ head_mask: Optional[torch.FloatTensor] = None,
216
+ encoder_hidden_states: Optional[torch.Tensor] = None,
217
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
218
+ output_attentions: Optional[bool] = False,
219
+ use_cache: Optional[bool] = False,
220
+ ):
221
+ mixed_x_layer = self.c_attn(hidden_states)
222
+ query, key, value = mixed_x_layer.split(self.split_size, dim=2)
223
+ query = self._split_heads(query, self.num_heads, self.head_dim)
224
+ key = self._split_heads(key, self.num_heads, self.head_dim)
225
+ value = self._split_heads(value, self.num_heads, self.head_dim)
226
+
227
+ if rotary_pos_emb is not None:
228
+ cur_len = query.shape[1]
229
+ rotary_pos_emb = [i[:, -cur_len:, :, :] for i in rotary_pos_emb]
230
+ rotary_pos_emb = (rotary_pos_emb,) * 2
231
+ q_pos_emb, k_pos_emb = rotary_pos_emb
232
+ query = apply_rotary_pos_emb(query, q_pos_emb)
233
+ key = apply_rotary_pos_emb(key, k_pos_emb)
234
+
235
+ if layer_past is not None:
236
+ past_key, past_value = layer_past
237
+ key = torch.cat((past_key, key), dim=1)
238
+ value = torch.cat((past_value, value), dim=1)
239
+
240
+ present = (key, value) if use_cache else None
241
+
242
+ if self.use_logn_attn and not self.training:
243
+ if self.logn_tensor.device != query.device or self.logn_tensor.dtype != query.dtype:
244
+ self.logn_tensor = self.logn_tensor.to(query.device).type_as(query)
245
+ seq_start = key.size(1) - query.size(1)
246
+ seq_end = key.size(1)
247
+ logn_tensor = self.logn_tensor[:, seq_start:seq_end, :, :]
248
+ query = query * logn_tensor.expand_as(query)
249
+
250
+ query = query.permute(0, 2, 1, 3)
251
+ key = key.permute(0, 2, 1, 3)
252
+ value = value.permute(0, 2, 1, 3)
253
+
254
+ attn_output, attn_weight = self._attn(
255
+ query, key, value, registered_causal_mask, attention_mask, head_mask
256
+ )
257
+ context_layer = self._merge_heads(attn_output, self.num_heads, self.head_dim)
258
+ attn_output = self.c_proj(context_layer)
259
+
260
+ outputs = (attn_output, present)
261
+ if output_attentions:
262
+ outputs += (attn_weight,)
263
+ return outputs
264
+
265
+
266
+ class Qwen2_5_VLMLP(nn.Module):
267
+ def __init__(self, config):
268
+ super().__init__()
269
+ self.w1 = nn.Linear(
270
+ config.hidden_size, config.intermediate_size // 2, bias=not config.no_bias
271
+ )
272
+ self.w2 = nn.Linear(
273
+ config.hidden_size, config.intermediate_size // 2, bias=not config.no_bias
274
+ )
275
+ ff_dim_in = config.intermediate_size // 2
276
+ self.c_proj = nn.Linear(ff_dim_in, config.hidden_size, bias=not config.no_bias)
277
+
278
+ def forward(self, hidden_states):
279
+ a1 = self.w1(hidden_states)
280
+ a2 = self.w2(hidden_states)
281
+ intermediate_parallel = a1 * F.silu(a2)
282
+ output = self.c_proj(intermediate_parallel)
283
+ return output
284
+
285
+
286
+ class Qwen2_5_VLBlock(nn.Module):
287
+ def __init__(self, config):
288
+ super().__init__()
289
+ hidden_size = config.hidden_size
290
+ self.bf16 = config.bf16
291
+
292
+ self.ln_1 = RMSNorm(
293
+ hidden_size,
294
+ eps=config.layer_norm_epsilon,
295
+ )
296
+ self.attn = Qwen2_5_VLAttention(config)
297
+ self.ln_2 = RMSNorm(
298
+ hidden_size,
299
+ eps=config.layer_norm_epsilon,
300
+ )
301
+
302
+ self.mlp = Qwen2_5_VLMLP(config)
303
+
304
+ def forward(
305
+ self,
306
+ hidden_states: Optional[Tuple[torch.FloatTensor]],
307
+ rotary_pos_emb: Optional[List[torch.Tensor]] = None,
308
+ registered_causal_mask: Optional[torch.Tensor] = None,
309
+ layer_past: Optional[Tuple[torch.Tensor]] = None,
310
+ attention_mask: Optional[torch.FloatTensor] = None,
311
+ head_mask: Optional[torch.FloatTensor] = None,
312
+ encoder_hidden_states: Optional[torch.Tensor] = None,
313
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
314
+ use_cache: Optional[bool] = False,
315
+ output_attentions: Optional[bool] = False,
316
+ ):
317
+ layernorm_output = self.ln_1(hidden_states)
318
+
319
+ attn_outputs = self.attn(
320
+ layernorm_output,
321
+ rotary_pos_emb,
322
+ registered_causal_mask=registered_causal_mask,
323
+ layer_past=layer_past,
324
+ attention_mask=attention_mask,
325
+ head_mask=head_mask,
326
+ use_cache=use_cache,
327
+ output_attentions=output_attentions,
328
+ )
329
+ attn_output = attn_outputs[0]
330
+ outputs = attn_outputs[1:]
331
+
332
+ residual = hidden_states
333
+ layernorm_input = attn_output + residual
334
+
335
+ layernorm_output = self.ln_2(layernorm_input)
336
+
337
+ residual = layernorm_input
338
+ mlp_output = self.mlp(layernorm_output)
339
+ hidden_states = residual + mlp_output
340
+
341
+ if use_cache:
342
+ outputs = (hidden_states,) + outputs
343
+ else:
344
+ outputs = (hidden_states,) + outputs[1:]
345
+
346
+ return outputs
347
+
348
+
349
+ class Qwen2_5_VLPreTrainedModel(PreTrainedModel):
350
+ config_class = Qwen2_5_VLConfig
351
+ base_model_prefix = "transformer"
352
+ is_parallelizable = False
353
+ supports_gradient_checkpointing = True
354
+ _no_split_modules = ["Qwen2_5_VLBlock"]
355
+
356
+ def __init__(self, *inputs, **kwargs):
357
+ super().__init__(*inputs, **kwargs)
358
+
359
+ def _init_weights(self, module):
360
+ """Initialize the weights."""
361
+ if isinstance(module, nn.Linear):
362
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
363
+ if module.bias is not None:
364
+ module.bias.data.zero_()
365
+ elif isinstance(module, nn.Embedding):
366
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
367
+ if module.padding_idx is not None:
368
+ module.weight.data[module.padding_idx].zero_()
369
+ elif isinstance(module, RMSNorm):
370
+ module.weight.data.fill_(1.0)
371
+
372
+ for name, p in module.named_parameters():
373
+ if name == "c_proj.weight":
374
+ p.data.normal_(
375
+ mean=0.0,
376
+ std=(
377
+ self.config.initializer_range
378
+ / math.sqrt(2 * self.config.num_hidden_layers)
379
+ ),
380
+ )
381
+
382
+ def _set_gradient_checkpointing(self, module, value=False):
383
+ if isinstance(module, Qwen2_5_VLModel):
384
+ module.gradient_checkpointing = value
385
+
386
+
387
+ class Qwen2_5_VLModel(Qwen2_5_VLPreTrainedModel):
388
+ _keys_to_ignore_on_load_missing = ["attn.masked_bias"]
389
+
390
+ def __init__(self, config):
391
+ super().__init__(config)
392
+ self.vocab_size = config.vocab_size
393
+ self.num_hidden_layers = config.num_hidden_layers
394
+ self.embed_dim = config.hidden_size
395
+
396
+ self.gradient_checkpointing = False
397
+ self.use_dynamic_ntk = config.use_dynamic_ntk
398
+ self.seq_length = config.seq_length
399
+
400
+ self.wte = nn.Embedding(self.vocab_size, self.embed_dim)
401
+ self.drop = nn.Dropout(config.emb_dropout_prob)
402
+
403
+ if config.rotary_pct == 1.0:
404
+ self.rotary_ndims = None
405
+ else:
406
+ assert config.rotary_pct < 1
407
+ self.rotary_ndims = int(config.kv_channels * config.rotary_pct)
408
+
409
+ dim = self.rotary_ndims if self.rotary_ndims is not None else config.kv_channels
410
+ self.rotary_emb = RotaryEmbedding(dim, base=config.rotary_emb_base)
411
+
412
+ self.use_flash_attn = config.use_flash_attn
413
+ self.is_fp32 = not (config.bf16 or config.fp16)
414
+ self.registered_causal_mask = None # 控制是否使用 buffer 注册掩码
415
+
416
+ self.h = nn.ModuleList(
417
+ [Qwen2_5_VLBlock(config) for _ in range(config.num_hidden_layers)]
418
+ )
419
+ self.ln_f = RMSNorm(self.embed_dim, eps=config.layer_norm_epsilon)
420
+
421
+ self.visual = VisionTransformer(**config.visual)
422
+ self.post_init()
423
+
424
+ def get_input_embeddings(self):
425
+ return self.wte
426
+
427
+ def set_input_embeddings(self, new_embeddings):
428
+ self.wte = new_embeddings
429
+
430
+ def _prepare_decoder_attention_mask(self, attention_mask, input_shape, inputs_embeds, past_key_values_length):
431
+ combined_attention_mask = None
432
+ if input_shape[-1] > 1:
433
+ combined_attention_mask = _make_causal_mask(
434
+ input_shape,
435
+ inputs_embeds.dtype,
436
+ device=inputs_embeds.device,
437
+ past_key_values_length=past_key_values_length,
438
+ )
439
+
440
+ if attention_mask is not None:
441
+ expanded_attn_mask = _expand_mask(attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1]).to(
442
+ inputs_embeds.device
443
+ )
444
+ combined_attention_mask = (
445
+ expanded_attn_mask if combined_attention_mask is None else expanded_attn_mask + combined_attention_mask
446
+ )
447
+
448
+ return combined_attention_mask
449
+
450
+ def forward(
451
+ self,
452
+ input_ids=None,
453
+ past_key_values=None,
454
+ attention_mask=None,
455
+ token_type_ids=None,
456
+ position_ids=None,
457
+ head_mask=None,
458
+ inputs_embeds=None,
459
+ encoder_hidden_states=None,
460
+ encoder_attention_mask=None,
461
+ use_cache=None,
462
+ output_attentions=None,
463
+ output_hidden_states=None,
464
+ return_dict=None,
465
+ ):
466
+ if past_key_values is None and torch.any(input_ids == self.config.visual['image_start_id']):
467
+ bos_pos = torch.where(input_ids == self.config.visual['image_start_id'])
468
+ eos_pos = torch.where(input_ids == self.config.visual['image_start_id'] + 1)
469
+ assert (bos_pos[0] == eos_pos[0]).all()
470
+ img_pos = torch.stack((bos_pos[0], bos_pos[1], eos_pos[1]), dim=1)
471
+ images = []
472
+ for i, a, b in img_pos:
473
+ image = input_ids[i][a + 1 : b - 1].tolist()
474
+ image = image[:image.index(self.config.visual['image_start_id'] + 2)]
475
+ images.append(bytes(image).decode('utf-8'))
476
+ images = self.visual.encode(images)
477
+ fake_images = None
478
+ elif self.training:
479
+ fake_images = torch.zeros(1, 3, 224, 224).to(
480
+ dtype=self.visual.conv1.weight.dtype,
481
+ device=self.visual.conv1.weight.device
482
+ )
483
+ images = self.visual(fake_images)
484
+ else:
485
+ fake_images = None
486
+ images = None
487
+
488
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
489
+ output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
490
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
491
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
492
+
493
+ if input_ids is not None and inputs_embeds is not None:
494
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
495
+ elif input_ids is not None:
496
+ input_shape = input_ids.size()
497
+ input_ids = input_ids.view(-1, input_shape[-1])
498
+ batch_size = input_ids.shape[0]
499
+ elif inputs_embeds is not None:
500
+ input_shape = inputs_embeds.size()[:-1]
501
+ batch_size = inputs_embeds.shape[0]
502
+ else:
503
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
504
+
505
+ device = input_ids.device if input_ids is not None else inputs_embeds.device
506
+
507
+ if token_type_ids is not None:
508
+ token_type_ids = token_type_ids.view(-1, input_shape[-1])
509
+ if position_ids is not None:
510
+ position_ids = position_ids.view(-1, input_shape[-1])
511
+
512
+ past_key_values = tuple([None] * len(self.h)) if past_key_values is None else past_key_values
513
+ past_length = 0 if past_key_values[0] is None else past_key_values[0][0].size(-2)
514
+
515
+ if position_ids is None:
516
+ position_ids = torch.arange(past_length, input_shape[-1] + past_length, dtype=torch.long, device=device)
517
+ position_ids = position_ids.unsqueeze(0).view(-1, input_shape[-1])
518
+
519
+ head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
520
+ if inputs_embeds is None:
521
+ inputs_embeds = self.wte(input_ids)
522
+
523
+ attention_mask = self._prepare_decoder_attention_mask(attention_mask, input_shape, inputs_embeds, past_length)
524
+
525
+ hidden_states = self.drop(inputs_embeds).clone()
526
+ if fake_images is not None:
527
+ hidden_states = hidden_states + images.mean() * 0
528
+ elif images is not None:
529
+ for idx, (i, a, b) in enumerate(img_pos):
530
+ hidden_states[i][a + 1 : b] = images[idx]
531
+
532
+ output_shape = input_shape + (hidden_states.size(-1),)
533
+
534
+ if self.gradient_checkpointing and self.training:
535
+ if use_cache:
536
+ logger.warning_once("`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`...")
537
+ use_cache = False
538
+
539
+ presents = () if use_cache else None
540
+ all_self_attentions = () if output_attentions else None
541
+ all_hidden_states = () if output_hidden_states else None
542
+
543
+ kv_seq_len = hidden_states.size()[1]
544
+ if past_key_values[0] is not None:
545
+ kv_seq_len += past_key_values[0][0].shape[1]
546
+
547
+ if self.use_dynamic_ntk and kv_seq_len == hidden_states.size()[1] and not self.training:
548
+ context_value = math.log(kv_seq_len / self.seq_length, 2) + 1
549
+ ntk_alpha = 2 ** math.ceil(context_value) - 1
550
+ ntk_alpha = max(ntk_alpha, 1)
551
+ else:
552
+ ntk_alpha = self.rotary_emb._ntk_alpha_cached
553
+
554
+ rotary_pos_emb = self.rotary_emb(kv_seq_len, ntk_alpha=ntk_alpha)
555
+ rotary_pos_emb = [r.to(hidden_states.device) for r in rotary_pos_emb]
556
+
557
+ for i, (block, layer_past) in enumerate(zip(self.h, past_key_values)):
558
+ if output_hidden_states:
559
+ all_hidden_states = all_hidden_states + (hidden_states,)
560
+
561
+ if self.gradient_checkpointing and self.training:
562
+ def create_custom_forward(module):
563
+ def custom_forward(*inputs):
564
+ return module(*inputs, use_cache, output_attentions)
565
+ return custom_forward
566
+
567
+ outputs = torch.utils.checkpoint.checkpoint(
568
+ create_custom_forward(block),
569
+ hidden_states,
570
+ rotary_pos_emb,
571
+ self.registered_causal_mask,
572
+ None,
573
+ attention_mask,
574
+ head_mask[i],
575
+ encoder_hidden_states,
576
+ encoder_attention_mask,
577
+ )
578
+ else:
579
+ outputs = block(
580
+ hidden_states,
581
+ rotary_pos_emb=rotary_pos_emb,
582
+ registered_causal_mask=self.registered_causal_mask,
583
+ layer_past=layer_past,
584
+ attention_mask=attention_mask,
585
+ head_mask=head_mask[i],
586
+ encoder_hidden_states=encoder_hidden_states,
587
+ encoder_attention_mask=encoder_attention_mask,
588
+ use_cache=use_cache,
589
+ output_attentions=output_attentions,
590
+ )
591
+
592
+ hidden_states = outputs[0]
593
+ if use_cache:
594
+ presents = presents + (outputs[1],)
595
+ if output_attentions:
596
+ all_self_attentions = all_self_attentions + (outputs[2 if use_cache else 1],)
597
+
598
+ hidden_states = self.ln_f(hidden_states)
599
+ hidden_states = hidden_states.view(output_shape)
600
+
601
+ if output_hidden_states:
602
+ all_hidden_states = all_hidden_states + (hidden_states,)
603
+
604
+ if not return_dict:
605
+ return tuple(v for v in [hidden_states, presents, all_hidden_states] if v is not None)
606
+
607
+ return BaseModelOutputWithPast(
608
+ last_hidden_state=hidden_states,
609
+ past_key_values=presents,
610
+ hidden_states=all_hidden_states,
611
+ attentions=all_self_attentions,
612
+ )
613
+
614
+
615
+ class Qwen2_5_VLForCausalLM(Qwen2_5_VLPreTrainedModel):
616
+ _keys_to_ignore_on_load_missing = [r"h\.\d+\.attn\.rotary_emb\.inv_freq"]
617
+ _keys_to_ignore_on_load_unexpected = [r"h\.\d+\.attn\.masked_bias"]
618
+
619
+ def __init__(self, config: Qwen2_5_VLConfig):
620
+ super().__init__(config)
621
+
622
+ assert (
623
+ config.bf16 + config.fp16 + config.fp32 <= 1
624
+ ), "Only one of \"bf16\", \"fp16\", \"fp32\" can be true"
625
+
626
+ autoset_precision = config.bf16 + config.fp16 + config.fp32 == 0
627
+
628
+ if autoset_precision:
629
+ if SUPPORT_BF16:
630
+ logger.warn("Auto-enabling bf16 for faster inference.")
631
+ config.bf16 = True
632
+ elif SUPPORT_FP16:
633
+ logger.warn("Auto-enabling fp16 for faster inference.")
634
+ config.fp16 = True
635
+ else:
636
+ config.fp32 = True
637
+
638
+ if config.bf16 and SUPPORT_CUDA and not SUPPORT_BF16:
639
+ logger.warn("bf16 not supported on current device.")
640
+ if config.fp16 and SUPPORT_CUDA and not SUPPORT_FP16:
641
+ logger.warn("fp16 not supported on current device.")
642
+ if config.fp32:
643
+ if SUPPORT_BF16:
644
+ logger.warn("Try bf16=True for faster inference.")
645
+ elif SUPPORT_FP16:
646
+ logger.warn("Try fp16=True for faster inference.")
647
+
648
+ self.transformer = Qwen2_5_VLModel(config)
649
+ self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
650
+
651
+ if config.bf16:
652
+ self.transformer.bfloat16()
653
+ self.lm_head.bfloat16()
654
+ if config.fp16:
655
+ self.transformer.half()
656
+ self.lm_head.half()
657
+
658
+ self.post_init()
659
+
660
+ def get_output_embeddings(self):
661
+ return self.lm_head
662
+
663
+ def set_output_embeddings(self, new_embeddings):
664
+ self.lm_head = new_embeddings
665
+
666
+ def prepare_inputs_for_generation(
667
+ self, input_ids, past_key_values=None, inputs_embeds=None, **kwargs
668
+ ):
669
+ token_type_ids = kwargs.get("token_type_ids", None)
670
+ if past_key_values:
671
+ input_ids = input_ids[:, -1].unsqueeze(-1)
672
+ if token_type_ids is not None:
673
+ token_type_ids = token_type_ids[:, -1].unsqueeze(-1)
674
+
675
+ attention_mask = kwargs.get("attention_mask", None)
676
+ position_ids = kwargs.get("position_ids", None)
677
+
678
+ if attention_mask is not None and position_ids is None:
679
+ position_ids = attention_mask.long().cumsum(-1) - 1
680
+ position_ids.masked_fill_(attention_mask == 0, 1)
681
+ if past_key_values:
682
+ position_ids = position_ids[:, -1].unsqueeze(-1)
683
+ else:
684
+ position_ids = None
685
+
686
+ if inputs_embeds is not None and past_key_values is None:
687
+ model_inputs = {"inputs_embeds": inputs_embeds}
688
+ else:
689
+ model_inputs = {"input_ids": input_ids}
690
+
691
+ model_inputs.update({
692
+ "past_key_values": past_key_values,
693
+ "use_cache": kwargs.get("use_cache"),
694
+ "position_ids": position_ids,
695
+ "attention_mask": attention_mask,
696
+ "token_type_ids": token_type_ids,
697
+ })
698
+
699
+ return model_inputs
700
+
701
+ def forward(
702
+ self,
703
+ input_ids: Optional[torch.LongTensor] = None,
704
+ past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None,
705
+ attention_mask: Optional[torch.FloatTensor] = None,
706
+ token_type_ids: Optional[torch.LongTensor] = None,
707
+ position_ids: Optional[torch.LongTensor] = None,
708
+ head_mask: Optional[torch.FloatTensor] = None,
709
+ inputs_embeds: Optional[torch.FloatTensor] = None,
710
+ encoder_hidden_states: Optional[torch.Tensor] = None,
711
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
712
+ labels: Optional[torch.LongTensor] = None,
713
+ use_cache: Optional[bool] = None,
714
+ output_attentions: Optional[bool] = None,
715
+ output_hidden_states: Optional[bool] = None,
716
+ return_dict: Optional[bool] = None,
717
+ ) -> Union[Tuple, CausalLMOutputWithPast]:
718
+
719
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
720
+
721
+ transformer_outputs = self.transformer(
722
+ input_ids,
723
+ past_key_values=past_key_values,
724
+ attention_mask=attention_mask,
725
+ token_type_ids=token_type_ids,
726
+ position_ids=position_ids,
727
+ head_mask=head_mask,
728
+ inputs_embeds=inputs_embeds,
729
+ encoder_hidden_states=encoder_hidden_states,
730
+ encoder_attention_mask=encoder_attention_mask,
731
+ use_cache=use_cache,
732
+ output_attentions=output_attentions,
733
+ output_hidden_states=output_hidden_states,
734
+ return_dict=return_dict,
735
+ )
736
+ hidden_states = transformer_outputs[0]
737
+ lm_logits = self.lm_head(hidden_states)
738
+
739
+ loss = None
740
+ if labels is not None:
741
+ labels = labels.to(lm_logits.device)
742
+ shift_logits = lm_logits[..., :-1, :].contiguous()
743
+ shift_labels = labels[..., 1:].contiguous()
744
+ loss_fct = CrossEntropyLoss()
745
+ loss = loss_fct(
746
+ shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1)
747
+ )
748
+
749
+ if not return_dict:
750
+ output = (lm_logits,) + transformer_outputs[1:]
751
+ return ((loss,) + output) if loss is not None else output
752
+
753
+ return CausalLMOutputWithPast(
754
+ loss=loss,
755
+ logits=lm_logits,
756
+ past_key_values=transformer_outputs.past_key_values,
757
+ hidden_states=transformer_outputs.hidden_states,
758
+ attentions=transformer_outputs.attentions,
759
+ )
760
+
761
+
762
+ @staticmethod
763
+ def _reorder_cache(
764
+ past_key_values: Tuple[Tuple[torch.Tensor]], beam_idx: torch.Tensor
765
+ ) -> Tuple[Tuple[torch.Tensor]]:
766
+ return tuple(
767
+ tuple(
768
+ past_state.index_select(0, beam_idx.to(past_state.device))
769
+ for past_state in layer_past
770
+ )
771
+ for layer_past in past_key_values
772
+ )
773
+
774
+ def chat(
775
+ self,
776
+ tokenizer: PreTrainedTokenizer,
777
+ query: str,
778
+ history: Optional[HistoryType],
779
+ system: str = "You are a helpful assistant.",
780
+ append_history: bool = True,
781
+ stream: Optional[bool] = _SENTINEL,
782
+ stop_words_ids: Optional[List[List[int]]] = None,
783
+ generation_config: Optional[GenerationConfig] = None,
784
+ **kwargs,
785
+ ) -> Tuple[str, HistoryType]:
786
+ generation_config = generation_config if generation_config is not None else self.generation_config
787
+ assert stream is _SENTINEL, _ERROR_STREAM_IN_CHAT
788
+ assert generation_config.chat_format == 'chatml', _ERROR_BAD_CHAT_FORMAT
789
+ if history is None:
790
+ history = []
791
+ if stop_words_ids is None:
792
+ stop_words_ids = []
793
+
794
+ max_window_size = kwargs.get('max_window_size', generation_config.max_window_size)
795
+ raw_text, context_tokens = make_context(
796
+ tokenizer,
797
+ query,
798
+ history=history,
799
+ system=system,
800
+ max_window_size=max_window_size,
801
+ chat_format=generation_config.chat_format,
802
+ )
803
+
804
+ stop_words_ids.extend(get_stop_words_ids(
805
+ generation_config.chat_format, tokenizer
806
+ ))
807
+ input_ids = torch.tensor([context_tokens]).to(self.device)
808
+
809
+ outputs = self.generate(
810
+ input_ids,
811
+ stop_words_ids=stop_words_ids,
812
+ return_dict_in_generate=False,
813
+ generation_config=generation_config,
814
+ **kwargs,
815
+ )
816
+
817
+ response = decode_tokens(
818
+ outputs[0],
819
+ tokenizer,
820
+ raw_text_len=len(raw_text),
821
+ context_length=len(context_tokens),
822
+ chat_format=generation_config.chat_format,
823
+ verbose=False,
824
+ errors='replace'
825
+ )
826
+
827
+ if append_history:
828
+ history.append((query, response))
829
+
830
+ return response, history
831
+
832
+ def chat_stream(
833
+ self,
834
+ tokenizer: PreTrainedTokenizer,
835
+ query: str,
836
+ history: Optional[HistoryType],
837
+ system: str = "You are a helpful assistant.",
838
+ stop_words_ids: Optional[List[List[int]]] = None,
839
+ logits_processor: Optional[LogitsProcessorList] = None,
840
+ generation_config: Optional[GenerationConfig] = None,
841
+ **kwargs,
842
+ ) -> Generator[str, Any, None]:
843
+ generation_config = generation_config if generation_config is not None else self.generation_config
844
+ assert generation_config.chat_format == 'chatml', _ERROR_BAD_CHAT_FORMAT
845
+ if history is None:
846
+ history = []
847
+ if stop_words_ids is None:
848
+ stop_words_ids = []
849
+
850
+ max_window_size = kwargs.get('max_window_size', generation_config.max_window_size)
851
+ raw_text, context_tokens = make_context(
852
+ tokenizer,
853
+ query,
854
+ history=history,
855
+ system=system,
856
+ max_window_size=max_window_size,
857
+ chat_format=generation_config.chat_format,
858
+ )
859
+
860
+ stop_words_ids.extend(get_stop_words_ids(
861
+ generation_config.chat_format, tokenizer
862
+ ))
863
+ if stop_words_ids is not None:
864
+ stop_words_logits_processor = StopWordsLogitsProcessor(
865
+ stop_words_ids=stop_words_ids,
866
+ eos_token_id=generation_config.eos_token_id,
867
+ )
868
+ if logits_processor is None:
869
+ logits_processor = LogitsProcessorList([stop_words_logits_processor])
870
+ else:
871
+ logits_processor.append(stop_words_logits_processor)
872
+
873
+ input_ids = torch.tensor([context_tokens]).to(self.device)
874
+
875
+ from transformers_stream_generator.main import NewGenerationMixin, StreamGenerationConfig
876
+ self.__class__.generate_stream = NewGenerationMixin.generate
877
+ self.__class__.sample_stream = NewGenerationMixin.sample_stream
878
+ stream_config = StreamGenerationConfig(**generation_config.to_dict(), do_stream=True)
879
+
880
+ def stream_generator():
881
+ outputs = []
882
+ for token in self.generate_stream(
883
+ input_ids,
884
+ return_dict_in_generate=False,
885
+ generation_config=stream_config,
886
+ logits_processor=logits_processor,
887
+ seed=-1,
888
+ **kwargs
889
+ ):
890
+ outputs.append(token.item())
891
+ yield tokenizer.decode(outputs, skip_special_tokens=True, errors='ignore', keep_image_special=True)
892
+
893
+ return stream_generator()
894
+
895
+ def generate(
896
+ self,
897
+ inputs: Optional[torch.Tensor] = None,
898
+ generation_config: Optional[GenerationConfig] = None,
899
+ logits_processor: Optional[LogitsProcessorList] = None,
900
+ stopping_criteria: Optional[StoppingCriteriaList] = None,
901
+ prefix_allowed_tokens_fn: Optional[Callable[[int, torch.Tensor], List[int]]] = None,
902
+ synced_gpus: Optional[bool] = None,
903
+ assistant_model: Optional["PreTrainedModel"] = None,
904
+ streamer: Optional["BaseStreamer"] = None,
905
+ **kwargs,
906
+ ) -> Union[GenerateOutput, torch.LongTensor]:
907
+ generation_config = generation_config if generation_config is not None else self.generation_config
908
+
909
+ stop_words_ids = kwargs.pop("stop_words_ids", None)
910
+ if stop_words_ids is None:
911
+ stop_words_ids = getattr(generation_config, "stop_words_ids", None)
912
+
913
+ if stop_words_ids is not None:
914
+ stop_words_logits_processor = StopWordsLogitsProcessor(
915
+ stop_words_ids=stop_words_ids,
916
+ eos_token_id=generation_config.eos_token_id,
917
+ )
918
+ if logits_processor is None:
919
+ logits_processor = LogitsProcessorList([stop_words_logits_processor])
920
+ else:
921
+ logits_processor.append(stop_words_logits_processor)
922
+
923
+ return super().generate(
924
+ inputs,
925
+ generation_config=generation_config,
926
+ logits_processor=logits_processor,
927
+ stopping_criteria=stopping_criteria,
928
+ prefix_allowed_tokens_fn=prefix_allowed_tokens_fn,
929
+ synced_gpus=synced_gpus,
930
+ assistant_model=assistant_model,
931
+ streamer=streamer,
932
+ **kwargs,
933
+ )
934
+