drizzlezyk commited on
Commit
781dcf6
·
verified ·
1 Parent(s): e33b7e4

Upload modeling_openpangu_dense.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. modeling_openpangu_dense.py +860 -0
modeling_openpangu_dense.py ADDED
@@ -0,0 +1,860 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨
2
+ # This file was automatically generated from modular_openpangu_dense.py.
3
+ # Do NOT edit this file manually as any edits will be overwritten by the generation of
4
+ # the file from the modular. If any change should be done, please apply the change to the
5
+ # modular_openpangu_dense.py file directly. One of our CI enforces this.
6
+ # 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨
7
+ # coding=utf-8
8
+ # Copyright (c) 2025 Huawei Technologies Co., Ltd. All Rights Reserved.
9
+ # Copyright 2022 EleutherAI and the HuggingFace Inc. team. All rights reserved.
10
+ #
11
+ # This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX
12
+ # and OPT implementations in this library. It has been modified from its
13
+ # original forms to accommodate minor architectural differences compared
14
+ # to GPT-NeoX and OPT used by the Meta AI team that trained the model.
15
+ #
16
+ # Licensed under the Apache License, Version 2.0 (the "License");
17
+ # you may not use this file except in compliance with the License.
18
+ # You may obtain a copy of the License at
19
+ #
20
+ # http://www.apache.org/licenses/LICENSE-2.0
21
+ #
22
+ # Unless required by applicable law or agreed to in writing, software
23
+ # distributed under the License is distributed on an "AS IS" BASIS,
24
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
25
+ # See the License for the specific language governing permissions and
26
+ # limitations under the License.
27
+
28
+ from typing import Callable, Optional, Union
29
+
30
+ import torch
31
+ import torch.nn.functional as F
32
+ import torch_npu
33
+ from torch_npu.contrib import transfer_to_npu
34
+
35
+ if "910" in torch.npu.get_device_name():
36
+ NPU_ATTN_INFR = True
37
+ print("[INFO] torch_npu detected. Using NPU fused infer attention.")
38
+ else:
39
+ NPU_ATTN_INFR = False
40
+ from einops import rearrange
41
+ from torch import nn
42
+
43
+ from transformers.activations import ACT2FN
44
+ from transformers.cache_utils import Cache, DynamicCache
45
+ from transformers.generation import GenerationMixin
46
+ from transformers.integrations import use_kernel_forward_from_hub
47
+ from transformers.masking_utils import create_causal_mask, create_sliding_window_causal_mask
48
+ from transformers.modeling_flash_attention_utils import FlashAttentionKwargs
49
+ from transformers.modeling_layers import GradientCheckpointingLayer
50
+ from transformers.modeling_outputs import BaseModelOutputWithPast, CausalLMOutputWithPast
51
+ from transformers.modeling_rope_utils import dynamic_rope_update
52
+ from transformers.modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel
53
+ from transformers.processing_utils import Unpack
54
+ from transformers.utils import LossKwargs, auto_docstring, can_return_tuple, logging
55
+
56
+ from .configuration_openpangu_dense import PanguEmbeddedConfig
57
+
58
+
59
+ logger = logging.get_logger(__name__)
60
+
61
+
62
+ def aggregate_hidden_through_time(
63
+ input_hidden, merge_conv, sliding_window=2, decay_coeff=0.5, restore_sliding_window=False, history_cache=None
64
+ ):
65
+ """
66
+ input_hidden.shape = (B, S, H)
67
+ return.shape = (B, S, H)
68
+ """
69
+ B, S, H = input_hidden.shape
70
+
71
+ # concat zeors to the lefe of the first token
72
+ if history_cache is None:
73
+ history_cache = torch.zeros((B, H, sliding_window - 1), device=input_hidden.device, dtype=input_hidden.dtype)
74
+ else:
75
+ history_cache = history_cache.permute(0, 2, 1)
76
+
77
+ conv_input = torch.cat(
78
+ [history_cache, input_hidden.permute(0, 2, 1)], # input_hidden (B, S, H) -> (B, H, S)
79
+ dim=-1,
80
+ )
81
+
82
+ conv_output = merge_conv(conv_input)
83
+ # (B, H, S) -> (B, S, H)
84
+ return conv_output.permute(0, 2, 1)
85
+
86
+
87
+ class WindowBuffer:
88
+ def __init__(self, win_size, decay_coeff, use_cache, aggregate_fn):
89
+ self.win_size = win_size
90
+ self.decay_coeff = decay_coeff
91
+ self.use_cache = use_cache
92
+ self.aggregate_fn = aggregate_fn
93
+ self.buffer = None
94
+
95
+ def get_aggregated_hidden(self, hidden_states):
96
+ if not self.use_cache:
97
+ self.buffer = None
98
+ return aggregate_hidden_through_time(hidden_states, self.aggregate_fn, sliding_window=self.win_size)
99
+
100
+ B, S, H = hidden_states.shape
101
+ if S > 1:
102
+ # prefill, generate first token
103
+ win_input = aggregate_hidden_through_time(hidden_states, self.aggregate_fn, sliding_window=self.win_size)
104
+ self.buffer = hidden_states[:, -(self.win_size - 1) :]
105
+ else:
106
+ # decode stage
107
+ win_input = aggregate_hidden_through_time(
108
+ hidden_states, self.aggregate_fn, sliding_window=self.win_size, history_cache=self.buffer
109
+ )
110
+ if self.win_size > 2:
111
+ self.buffer = torch.cat([self.buffer[:, -(self.win_size - 2) :], hidden_states], dim=1)
112
+ else:
113
+ self.buffer = hidden_states
114
+
115
+ return win_input
116
+
117
+
118
+ @use_kernel_forward_from_hub("RMSNorm")
119
+ class PanguEmbeddedRMSNorm(nn.Module):
120
+ def __init__(self, hidden_size, eps=1e-6):
121
+ """
122
+ PanguEmbeddedRMSNorm is equivalent to T5LayerNorm
123
+ """
124
+ super().__init__()
125
+ self.weight = nn.Parameter(torch.ones(hidden_size))
126
+ self.variance_epsilon = eps
127
+
128
+ def forward(self, hidden_states):
129
+ input_dtype = hidden_states.dtype
130
+ hidden_states = hidden_states.to(torch.float32)
131
+ variance = hidden_states.pow(2).mean(-1, keepdim=True)
132
+ hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
133
+ return self.weight * hidden_states.to(input_dtype)
134
+
135
+ def extra_repr(self):
136
+ return f"{tuple(self.weight.shape)}, eps={self.variance_epsilon}"
137
+
138
+
139
+ class PanguEmbeddedRotaryEmbedding(nn.Module):
140
+ def __init__(self, config: PanguEmbeddedConfig, device=None):
141
+ super().__init__()
142
+
143
+ base_dim = config.head_dim
144
+
145
+ rotary_percent = config.rotary_percent
146
+
147
+ dim = base_dim
148
+ if rotary_percent < 1.0:
149
+ dim = int(dim * rotary_percent)
150
+ if dim % 2 != 0:
151
+ dim += 1
152
+
153
+ rotary_base = config.rope_theta
154
+ inv_freq = 1.0 / (rotary_base ** (torch.arange(0, dim, 2, dtype=torch.float32) / dim))
155
+
156
+ if hasattr(config, "rope_scaling") and config.rope_scaling is not None:
157
+ self.rope_type = config.rope_scaling.get("rope_type", config.rope_scaling.get("type"))
158
+ else:
159
+ self.rope_type = "default"
160
+
161
+ self.max_seq_len_cached = config.max_position_embeddings
162
+ self.original_max_seq_len = config.max_position_embeddings
163
+
164
+ self.config = config
165
+
166
+ self.attention_scaling = 1.0
167
+
168
+ if device is not None:
169
+ inv_freq = inv_freq.to(device)
170
+
171
+ self.register_buffer("inv_freq", inv_freq, persistent=False)
172
+ self.original_inv_freq = self.inv_freq
173
+
174
+ self.dim = dim
175
+
176
+ @torch.no_grad()
177
+ @dynamic_rope_update
178
+ def forward(self, x, position_ids):
179
+ inv_freq_expanded = self.inv_freq[None, :, None].float().expand(position_ids.shape[0], -1, 1).to(x.device)
180
+ position_ids_expanded = position_ids[:, None, :].float()
181
+
182
+ device_type = x.device.type if isinstance(x.device.type, str) and x.device.type != "mps" else "cpu"
183
+ with torch.autocast(device_type=device_type, enabled=False): # Force float32
184
+ freqs = (inv_freq_expanded.float() @ position_ids_expanded.float()).transpose(1, 2)
185
+ emb = torch.cat((freqs, freqs), dim=-1)
186
+ cos = emb.cos() * self.attention_scaling
187
+ sin = emb.sin() * self.attention_scaling
188
+
189
+ return cos.to(dtype=x.dtype), sin.to(dtype=x.dtype)
190
+
191
+
192
+ class PanguEmbeddedMLP(nn.Module):
193
+ def __init__(self, config):
194
+ super().__init__()
195
+ self.config = config
196
+ self.hidden_size = config.hidden_size
197
+ self.intermediate_size = config.intermediate_size
198
+ self.gate_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)
199
+ self.up_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)
200
+ self.down_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=False)
201
+ self.act_fn = ACT2FN[config.hidden_act]
202
+
203
+ def forward(self, x):
204
+ down_proj = self.down_proj(self.act_fn(self.gate_proj(x)) * self.up_proj(x))
205
+ return down_proj
206
+
207
+
208
+ def rotate_half(x):
209
+ """Rotates half the hidden dims of the input."""
210
+ x1 = x[..., : x.shape[-1] // 2]
211
+ x2 = x[..., x.shape[-1] // 2 :]
212
+ return torch.cat((-x2, x1), dim=-1)
213
+
214
+
215
+ def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor:
216
+ """
217
+ This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch,
218
+ num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)
219
+ """
220
+ batch, num_key_value_heads, slen, head_dim = hidden_states.shape
221
+ if n_rep == 1:
222
+ return hidden_states
223
+ hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim)
224
+ return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim)
225
+
226
+
227
+ def eager_attention_forward(
228
+ module: nn.Module,
229
+ query: torch.Tensor,
230
+ key: torch.Tensor,
231
+ value: torch.Tensor,
232
+ attention_mask: Optional[torch.Tensor],
233
+ scaling: float,
234
+ dropout: float = 0.0,
235
+ **kwargs,
236
+ ):
237
+ key_states = repeat_kv(key, module.num_key_value_groups)
238
+ value_states = repeat_kv(value, module.num_key_value_groups)
239
+
240
+ attn_weights = torch.matmul(query, key_states.transpose(2, 3)) * scaling
241
+ if attention_mask is not None:
242
+ causal_mask = attention_mask[:, :, :, : key_states.shape[-2]]
243
+ attn_weights = attn_weights + causal_mask
244
+
245
+ attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query.dtype)
246
+ attn_weights = nn.functional.dropout(attn_weights, p=dropout, training=module.training)
247
+ attn_output = torch.matmul(attn_weights, value_states)
248
+ attn_output = attn_output.transpose(1, 2).contiguous()
249
+
250
+ return attn_output, attn_weights
251
+
252
+
253
+ def apply_rotary_pos_emb(
254
+ q: torch.Tensor, k: torch.Tensor, cos: torch.Tensor, sin: torch.Tensor, unsqueeze_dim: int = 1
255
+ ):
256
+ """
257
+ Applies Rotary Position Embedding to the query and key tensors,
258
+ handling cases where rotary_percent < 1.0 by only rotating a subset of the dimensions.
259
+
260
+ ATTENTION: This version assumes cos/sin tensors are already the full rotation dimension (D_rot),
261
+ consistent with some Megatron/Fusion implementations, rather than the standard HF (D_rot/2) format.
262
+
263
+ Args:
264
+ q (`torch.Tensor`): The query tensor [Batch, Heads, Seq, Head_Dim].
265
+ k (`torch.Tensor`): The key tensor [Batch, Heads, Seq, Head_Dim].
266
+ cos (`torch.Tensor`): The cosine part of the rotary embedding [Batch, Seq, Head_Dim_Rotary]. <--- FULL D_ROT
267
+ sin (`torch.Tensor`): The sine part of the rotary embedding [Batch, Seq, Head_Dim_Rotary]. <--- FULL D_ROT
268
+ unsqueeze_dim (`int`, *optional*, defaults to 1): The dimension to unsqueeze cos/sin for broadcasting (usually the Heads dimension).
269
+
270
+ Returns:
271
+ `tuple(torch.Tensor)` comprising of the rotated query and key tensors.
272
+ """
273
+ rot_dim = cos.shape[-1]
274
+
275
+ q_rope, q_pass = q[..., :rot_dim], q[..., rot_dim:]
276
+ k_rope, k_pass = k[..., :rot_dim], k[..., rot_dim:]
277
+
278
+ cos_broad = cos.unsqueeze(unsqueeze_dim) # [B, 1, S, Dim]
279
+ sin_broad = sin.unsqueeze(unsqueeze_dim) # [B, 1, S, Dim]
280
+
281
+ q_embed_rope = (q_rope * cos_broad) + (rotate_half(q_rope) * sin_broad)
282
+ k_embed_rope = (k_rope * cos_broad) + (rotate_half(k_rope) * sin_broad)
283
+
284
+ q_embed = torch.cat((q_embed_rope, q_pass), dim=-1)
285
+ k_embed = torch.cat((k_embed_rope, k_pass), dim=-1)
286
+
287
+ return q_embed, k_embed
288
+
289
+
290
+ class PanguEmbeddedAttention(nn.Module):
291
+ """Multi-headed attention from 'Attention Is All You Need' paper"""
292
+
293
+ def __init__(self, config: PanguEmbeddedConfig, layer_idx: int):
294
+ super().__init__()
295
+ self.config = config
296
+ self.layer_idx = layer_idx
297
+ self.head_dim = config.head_dim
298
+ self.num_key_value_groups = config.num_key_value_groups
299
+ self.scaling = self.head_dim**-0.5
300
+ self.attention_dropout = config.attention_dropout
301
+ self.is_causal = True
302
+
303
+ self.q_proj = nn.Linear(config.hidden_size, config.num_attention_heads * self.head_dim, bias=config.bias)
304
+ self.k_proj = nn.Linear(config.hidden_size, config.num_key_value_heads * self.head_dim, bias=config.bias)
305
+ self.v_proj = nn.Linear(config.hidden_size, config.num_key_value_heads * self.head_dim, bias=config.bias)
306
+ self.o_proj = nn.Linear(config.num_attention_heads * self.head_dim, config.hidden_size, bias=config.bias)
307
+ if layer_idx is None:
308
+ logger.warning_once(
309
+ f"Instantiating {self.__class__.__name__} without passing `layer_idx` is not recommended and will "
310
+ "to errors during the forward call, if caching is used. Please make sure to provide a `layer_idx` "
311
+ "when creating this class."
312
+ )
313
+
314
+ self.hidden_size = config.hidden_size
315
+ self.num_heads = config.num_attention_heads
316
+ self.qk_nope_dim = config.qk_nope_dim
317
+ self.qk_rope_dim = config.qk_rope_dim
318
+ self.v_channels = config.v_channels
319
+ self.num_key_value_heads = config.num_key_value_heads
320
+
321
+ self.max_position_embeddings = config.max_position_embeddings
322
+ self.rope_theta = config.rope_theta
323
+ self.attn_groupnorm = config.attn_groupnorm
324
+ self.attn_elementwise_gate = config.attn_elementwise_gate
325
+
326
+ self.param_sink_number = config.param_sink_number
327
+ self.param_sink_with_value = config.param_sink_with_value
328
+ self.num_attention_heads = config.num_attention_heads
329
+
330
+ self.rotary_emb = PanguEmbeddedRotaryEmbedding(config=config)
331
+ if self.param_sink_number > 0:
332
+ self.param_sink_query = torch.zeros(
333
+ (self.param_sink_number, self.num_heads, self.head_dim), dtype=config.torch_dtype
334
+ )
335
+
336
+ self.param_sink_num_heads_per_partition = self.num_key_value_heads
337
+ self.param_sink_key = torch.nn.Parameter(
338
+ torch.empty(
339
+ (self.param_sink_number, self.param_sink_num_heads_per_partition, self.head_dim),
340
+ dtype=config.torch_dtype,
341
+ )
342
+ )
343
+ if self.param_sink_with_value:
344
+ self.param_sink_value = torch.nn.Parameter(
345
+ torch.empty(
346
+ (self.param_sink_number, self.param_sink_num_heads_per_partition, self.v_channels),
347
+ dtype=config.torch_dtype,
348
+ )
349
+ )
350
+ else:
351
+ self.param_sink_value = torch.zeros(
352
+ (self.param_sink_number, self.param_sink_num_heads_per_partition, self.v_channels),
353
+ dtype=config.torch_dtype,
354
+ )
355
+
356
+ if self.attn_groupnorm:
357
+ self.groupnorm = PanguEmbeddedRMSNorm(hidden_size=self.head_dim, eps=config.rms_norm_eps)
358
+
359
+ if self.attn_elementwise_gate:
360
+ self.attention_gate = nn.Linear(config.hidden_size, config.num_attention_heads * self.head_dim, bias=False)
361
+
362
+ def forward(
363
+ self,
364
+ hidden_states: torch.Tensor,
365
+ position_embeddings: tuple[torch.Tensor, torch.Tensor],
366
+ attention_mask: Optional[torch.Tensor] = None,
367
+ position_ids: Optional[torch.LongTensor] = None,
368
+ past_key_value: Optional[Cache] = None,
369
+ output_attentions: bool = False,
370
+ use_cache: bool = False,
371
+ cache_position: Optional[torch.LongTensor] = None,
372
+ ) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[Cache]]:
373
+ attention_interface: Callable = eager_attention_forward
374
+ if self.config._attn_implementation != "eager":
375
+ attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation]
376
+ input_shape = hidden_states.shape[:-1]
377
+ hidden_shape = (*input_shape, -1, self.head_dim)
378
+
379
+ bsz, q_len, _ = hidden_states.size()
380
+ query_states = self.q_proj(hidden_states).view(hidden_shape).transpose(1, 2)
381
+ key_states = self.k_proj(hidden_states).view(hidden_shape).transpose(1, 2)
382
+ value_states = self.v_proj(hidden_states).view(hidden_shape).transpose(1, 2)
383
+
384
+ if self.attn_elementwise_gate:
385
+ gate_score = self.attention_gate(hidden_states)
386
+ else:
387
+ gate_score = None
388
+
389
+ kv_seq_len = q_len
390
+ is_prefill = past_key_value.get_usable_length(kv_seq_len, self.layer_idx) == 0
391
+ if past_key_value is not None:
392
+ kv_seq_len += past_key_value.get_usable_length(kv_seq_len, self.layer_idx)
393
+
394
+ cos, sin = position_embeddings
395
+ query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin)
396
+
397
+ if past_key_value is not None:
398
+ if self.layer_idx is None:
399
+ raise ValueError(
400
+ f"The cache structure has changed since version v4.36. If you are using {self.__class__.__name__} "
401
+ "for auto-regressive decoding with key_states/v caching, please make sure to initialize the attention class "
402
+ "with a layer index."
403
+ )
404
+ cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position}
405
+
406
+ key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs)
407
+
408
+ kv_seq_len = key_states.shape[-2]
409
+
410
+ if self.param_sink_number > 0:
411
+ batch_size = query_states.shape[0]
412
+ if is_prefill:
413
+ param_sink_query = (
414
+ self.param_sink_query.permute(1, 0, 2)
415
+ .unsqueeze(0)
416
+ .expand(batch_size, -1, -1, -1)
417
+ .to(query_states.device)
418
+ )
419
+ query_states = torch.cat([param_sink_query, query_states], dim=2)
420
+ q_len += self.param_sink_number
421
+
422
+ param_sink_key = (
423
+ self.param_sink_key.permute(1, 0, 2).unsqueeze(0).expand(batch_size, -1, -1, -1).to(key_states.device)
424
+ )
425
+ param_sink_value = (
426
+ self.param_sink_value.permute(1, 0, 2)
427
+ .unsqueeze(0)
428
+ .expand(batch_size, -1, -1, -1)
429
+ .to(value_states.device)
430
+ )
431
+
432
+ key_states = torch.cat([param_sink_key, key_states], dim=2)
433
+ value_states = torch.cat([param_sink_value, value_states], dim=2)
434
+
435
+ kv_seq_len += self.param_sink_number
436
+
437
+ if not self.training and NPU_ATTN_INFR:
438
+ q_len_current = query_states.shape[2]
439
+ kv_len_current = key_states.shape[2]
440
+ param_sink_number = self.config.param_sink_number
441
+
442
+ # Causal Mask
443
+ if is_prefill:
444
+ causal_mask_npu = (
445
+ torch.triu(torch.ones([q_len_current, kv_len_current]), diagonal=1)
446
+ .bool()
447
+ .unsqueeze(0)
448
+ .unsqueeze(0)
449
+ .to(query_states.device)
450
+ )
451
+ original_mask = ~attention_mask.bool()
452
+ expanded_mask = F.pad(
453
+ original_mask.float(), (param_sink_number, 0, param_sink_number, 0), mode="constant", value=1.0
454
+ ).bool()
455
+ attention_mask_npu = (expanded_mask) & (~causal_mask_npu)
456
+ else:
457
+ original_mask = ~attention_mask.bool()
458
+ attention_mask_npu = F.pad(
459
+ original_mask.float(), (param_sink_number, 0, 0, 0), mode="constant", value=1.0
460
+ ).bool()
461
+
462
+ attention_mask_npu = ~attention_mask_npu.bool()
463
+
464
+ attn_output, _ = torch_npu.npu_fused_infer_attention_score(
465
+ query_states,
466
+ key_states,
467
+ value_states,
468
+ num_heads=self.num_heads,
469
+ num_key_value_heads=self.num_key_value_heads,
470
+ input_layout="BNSD",
471
+ atten_mask=attention_mask_npu,
472
+ scale=self.scaling,
473
+ )
474
+ attn_output = attn_output.transpose(1, 2) # (bsz, q_len, num_heads * head_dim)
475
+ attn_weights = None
476
+ else:
477
+ attn_output, attn_weights = attention_interface(
478
+ self,
479
+ query_states,
480
+ key_states,
481
+ value_states,
482
+ attention_mask,
483
+ dropout=0.0 if not self.training else self.attention_dropout,
484
+ scaling=self.scaling,
485
+ sliding_window=self.sliding_window,
486
+ position_ids=position_ids,
487
+ )
488
+
489
+ if self.param_sink_number > 0 and is_prefill:
490
+ # (bsz, q_len_original, hidden_dim)
491
+ attn_output = attn_output[:, self.param_sink_number :, :]
492
+
493
+ if self.attn_groupnorm:
494
+ attn_output = self.groupnorm(attn_output)
495
+ if self.attn_elementwise_gate:
496
+ core_attn_out_reshaped = rearrange(attn_output, "s b h d -> s b (h d)", h=self.num_attention_heads)
497
+ core_attn_out_reshaped = core_attn_out_reshaped * F.sigmoid(gate_score)
498
+ attn_output = rearrange(core_attn_out_reshaped, "s b (h d) -> s b h d", h=self.num_attention_heads)
499
+
500
+ attn_output = attn_output.reshape(*input_shape, -1).contiguous()
501
+ attn_output = self.o_proj(attn_output)
502
+
503
+ if not output_attentions:
504
+ attn_weights = None
505
+
506
+ return attn_output, attn_weights, past_key_value
507
+
508
+
509
+ class PanguEmbeddedDecoderLayer(GradientCheckpointingLayer):
510
+ def __init__(self, config: PanguEmbeddedConfig, layer_idx: int):
511
+ super().__init__()
512
+ self.hidden_size = config.hidden_size
513
+ self.self_attn = PanguEmbeddedAttention(config=config, layer_idx=layer_idx)
514
+ self.mlp = PanguEmbeddedMLP(config)
515
+ self.input_layernorm = PanguEmbeddedRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
516
+ self.post_attention_layernorm = PanguEmbeddedRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
517
+ self.attention_type = config.layer_types[layer_idx]
518
+ if layer_idx == 0 or layer_idx == config.num_hidden_layers - 1:
519
+ self.start_end = True
520
+ else:
521
+ self.start_end = False
522
+ if self.start_end:
523
+ self.router_sliding_window = config.router_sliding_window
524
+ self.router_win_decay = config.router_win_decay
525
+ self.merge_conv = torch.nn.Conv1d(
526
+ config.hidden_size,
527
+ config.hidden_size,
528
+ self.router_sliding_window,
529
+ groups=config.hidden_size,
530
+ bias=False,
531
+ )
532
+ self.window_buffer = WindowBuffer(
533
+ self.router_sliding_window, self.router_win_decay, True, self.merge_conv.forward
534
+ )
535
+
536
+ def forward(
537
+ self,
538
+ hidden_states: torch.Tensor,
539
+ attention_mask: Optional[torch.Tensor] = None,
540
+ position_ids: Optional[torch.LongTensor] = None,
541
+ past_key_value: Optional[Cache] = None,
542
+ output_attentions: Optional[bool] = False,
543
+ use_cache: Optional[bool] = False,
544
+ cache_position: Optional[torch.LongTensor] = None,
545
+ **kwargs: Unpack[FlashAttentionKwargs],
546
+ ) -> tuple[torch.FloatTensor, Optional[tuple[torch.FloatTensor, torch.FloatTensor]]]:
547
+ residual = hidden_states
548
+ hidden_states = self.input_layernorm(hidden_states)
549
+
550
+ # Self Attention
551
+ hidden_states, self_attn_weights, _ = self.self_attn(
552
+ hidden_states=hidden_states,
553
+ attention_mask=attention_mask,
554
+ position_ids=position_ids,
555
+ past_key_value=past_key_value,
556
+ output_attentions=output_attentions,
557
+ use_cache=use_cache,
558
+ cache_position=cache_position,
559
+ **kwargs,
560
+ )
561
+ hidden_states = residual + hidden_states
562
+
563
+ # Fully Connected
564
+ residual = hidden_states
565
+
566
+ if self.start_end and self.router_sliding_window:
567
+ win_input = self.window_buffer.get_aggregated_hidden(hidden_states)
568
+ else:
569
+ win_input = hidden_states
570
+
571
+ hidden_states = self.post_attention_layernorm(win_input)
572
+
573
+ hidden_states = self.mlp(hidden_states)
574
+ hidden_states = residual + hidden_states
575
+
576
+ outputs = (hidden_states,)
577
+ if output_attentions:
578
+ outputs += (self_attn_weights,)
579
+
580
+ return outputs
581
+
582
+
583
+ @auto_docstring
584
+ class PanguEmbeddedPreTrainedModel(PreTrainedModel):
585
+ config_class = PanguEmbeddedConfig
586
+ base_model_prefix = "model"
587
+ supports_gradient_checkpointing = True
588
+ _no_split_modules = ["PanguEmbeddedDecoderLayer"]
589
+ _skip_keys_device_placement = ["past_key_values"]
590
+ _supports_flash_attn_3 = True
591
+ _supports_flash_attn_2 = True
592
+ _supports_sdpa = True
593
+ _supports_flex_attn = True
594
+ _supports_cache_class = True
595
+ _supports_quantized_cache = True
596
+ _supports_static_cache = True
597
+ _supports_attention_backend = True
598
+ _keys_to_ignore_on_load_unexpected = [r"model\.layers\.27.*"]
599
+
600
+ def _init_weights(self, module):
601
+ std = self.config.initializer_range
602
+ if isinstance(module, nn.Linear):
603
+ module.weight.data.normal_(mean=0.0, std=std)
604
+ if module.bias is not None:
605
+ module.bias.data.zero_()
606
+ elif isinstance(module, nn.Embedding):
607
+ module.weight.data.normal_(mean=0.0, std=std)
608
+ if module.padding_idx is not None:
609
+ module.weight.data[module.padding_idx].zero_()
610
+ elif isinstance(module, PanguEmbeddedRMSNorm):
611
+ module.weight.data.fill_(1.0)
612
+
613
+
614
+ @auto_docstring
615
+ class PanguEmbeddedModel(PanguEmbeddedPreTrainedModel):
616
+ def __init__(self, config: PanguEmbeddedConfig):
617
+ super().__init__(config)
618
+ self.padding_idx = config.pad_token_id
619
+ self.vocab_size = config.vocab_size
620
+
621
+ self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx)
622
+ self.layers = nn.ModuleList(
623
+ [PanguEmbeddedDecoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)]
624
+ )
625
+ self.rotary_emb = PanguEmbeddedRotaryEmbedding(config=config)
626
+ self.gradient_checkpointing = False
627
+ self.norms = nn.ModuleList(
628
+ [
629
+ PanguEmbeddedRMSNorm(config.hidden_size, eps=config.rms_norm_eps),
630
+ PanguEmbeddedRMSNorm(config.hidden_size, eps=config.rms_norm_eps),
631
+ ]
632
+ )
633
+
634
+ # Initialize weights and apply final processing
635
+ self.post_init()
636
+
637
+ def get_input_embeddings(self):
638
+ return self.embed_tokens
639
+
640
+ def set_input_embeddings(self, value):
641
+ self.embed_tokens = value
642
+
643
+ @can_return_tuple
644
+ @auto_docstring
645
+ def forward(
646
+ self,
647
+ input_ids: Optional[torch.LongTensor] = None,
648
+ attention_mask: Optional[torch.Tensor] = None,
649
+ position_ids: Optional[torch.LongTensor] = None,
650
+ past_key_values: Optional[Cache] = None,
651
+ inputs_embeds: Optional[torch.FloatTensor] = None,
652
+ use_cache: Optional[bool] = None,
653
+ output_attentions: Optional[bool] = None,
654
+ output_hidden_states: Optional[bool] = None,
655
+ cache_position: Optional[torch.LongTensor] = None,
656
+ **flash_attn_kwargs: Unpack[FlashAttentionKwargs],
657
+ ) -> BaseModelOutputWithPast:
658
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
659
+ output_hidden_states = (
660
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
661
+ )
662
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
663
+
664
+ if (input_ids is None) ^ (inputs_embeds is not None):
665
+ raise ValueError("You must specify exactly one of input_ids or inputs_embeds")
666
+
667
+ if self.gradient_checkpointing and self.training and use_cache:
668
+ logger.warning_once(
669
+ "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`."
670
+ )
671
+ use_cache = False
672
+
673
+ if not isinstance(past_key_values, (type(None), Cache)):
674
+ raise ValueError("The `past_key_values` should be either a `Cache` object or `None`.")
675
+ if inputs_embeds is None:
676
+ inputs_embeds = self.embed_tokens(input_ids)
677
+
678
+ if use_cache and past_key_values is None:
679
+ past_key_values = DynamicCache()
680
+
681
+ if cache_position is None:
682
+ past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0
683
+ cache_position = torch.arange(
684
+ past_seen_tokens, past_seen_tokens + inputs_embeds.shape[1], device=inputs_embeds.device
685
+ )
686
+
687
+ if position_ids is None:
688
+ position_ids = cache_position.unsqueeze(0)
689
+
690
+ if not isinstance(causal_mask_mapping := attention_mask, dict):
691
+ mask_kwargs = {
692
+ "config": self.config,
693
+ "input_embeds": inputs_embeds,
694
+ "attention_mask": attention_mask,
695
+ "cache_position": cache_position,
696
+ "past_key_values": past_key_values,
697
+ "position_ids": position_ids,
698
+ }
699
+ causal_mask_mapping = {
700
+ "full_attention": create_causal_mask(**mask_kwargs),
701
+ "sliding_attention": create_sliding_window_causal_mask(**mask_kwargs),
702
+ }
703
+
704
+ hidden_states = inputs_embeds
705
+
706
+ # create position embeddings to be shared across the decoder layers
707
+ position_embeddings = self.rotary_emb(hidden_states, position_ids)
708
+
709
+ # decoder layers
710
+ all_hidden_states = () if output_hidden_states else None
711
+ all_self_attns = () if output_attentions else None
712
+
713
+ for decoder_layer in self.layers[: self.config.num_hidden_layers]:
714
+ if output_hidden_states:
715
+ all_hidden_states += (hidden_states,)
716
+
717
+ layer_outputs = decoder_layer(
718
+ hidden_states,
719
+ attention_mask=causal_mask_mapping[decoder_layer.attention_type],
720
+ position_ids=position_ids,
721
+ past_key_value=past_key_values,
722
+ output_attentions=output_attentions,
723
+ use_cache=use_cache,
724
+ cache_position=cache_position,
725
+ position_embeddings=position_embeddings,
726
+ **flash_attn_kwargs,
727
+ )
728
+
729
+ hidden_states = layer_outputs[0]
730
+
731
+ if output_attentions:
732
+ all_self_attns += (layer_outputs[1],)
733
+
734
+ hidden_states = self.norms[0](hidden_states)
735
+
736
+ # add hidden states from the last decoder layer
737
+ if output_hidden_states:
738
+ all_hidden_states += (hidden_states,)
739
+
740
+ return BaseModelOutputWithPast(
741
+ last_hidden_state=hidden_states,
742
+ past_key_values=past_key_values if use_cache else None,
743
+ hidden_states=all_hidden_states,
744
+ attentions=all_self_attns,
745
+ )
746
+
747
+
748
+ class KwargsForCausalLM(FlashAttentionKwargs, LossKwargs): ...
749
+
750
+
751
+ @auto_docstring
752
+ class PanguEmbeddedForCausalLM(PanguEmbeddedPreTrainedModel, GenerationMixin):
753
+ _tied_weights_keys = ["lm_head.weight"]
754
+ _tp_plan = {"lm_head": "colwise_rep"}
755
+ _pp_plan = {"lm_head": (["hidden_states"], ["logits"])}
756
+
757
+ def __init__(self, config):
758
+ super().__init__(config)
759
+ self.model = PanguEmbeddedModel(config)
760
+ self.vocab_size = config.vocab_size
761
+ self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
762
+
763
+ # Initialize weights and apply final processing
764
+ self.post_init()
765
+
766
+ def get_input_embeddings(self):
767
+ return self.model.embed_tokens
768
+
769
+ def set_input_embeddings(self, value):
770
+ self.model.embed_tokens = value
771
+
772
+ def get_output_embeddings(self):
773
+ return self.lm_head
774
+
775
+ def set_output_embeddings(self, new_embeddings):
776
+ self.lm_head = new_embeddings
777
+
778
+ def set_decoder(self, decoder):
779
+ self.model = decoder
780
+
781
+ def get_decoder(self):
782
+ return self.model
783
+
784
+ @can_return_tuple
785
+ @auto_docstring
786
+ def forward(
787
+ self,
788
+ input_ids: Optional[torch.LongTensor] = None,
789
+ attention_mask: Optional[torch.Tensor] = None,
790
+ position_ids: Optional[torch.LongTensor] = None,
791
+ past_key_values: Optional[Cache] = None,
792
+ inputs_embeds: Optional[torch.FloatTensor] = None,
793
+ labels: Optional[torch.LongTensor] = None,
794
+ use_cache: Optional[bool] = None,
795
+ output_attentions: Optional[bool] = None,
796
+ output_hidden_states: Optional[bool] = None,
797
+ cache_position: Optional[torch.LongTensor] = None,
798
+ logits_to_keep: Union[int, torch.Tensor] = 0,
799
+ **kwargs: Unpack[KwargsForCausalLM],
800
+ ) -> CausalLMOutputWithPast:
801
+ r"""
802
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
803
+ Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
804
+ config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
805
+ (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
806
+
807
+ Example:
808
+
809
+ ```python
810
+ >>> from transformers import AutoTokenizer, PanguEmbeddedForCausalLM
811
+
812
+ >>> model = PanguEmbeddedForCausalLM.from_pretrained("meta-PanguEmbedded/PanguEmbedded-2-7b-hf")
813
+ >>> tokenizer = AutoTokenizer.from_pretrained("meta-PanguEmbedded/PanguEmbedded-2-7b-hf")
814
+
815
+ >>> prompt = "Hey, are you conscious? Can you talk to me?"
816
+ >>> inputs = tokenizer(prompt, return_tensors="pt")
817
+
818
+ >>> # Generate
819
+ >>> generate_ids = model.generate(inputs.input_ids, max_length=30)
820
+ >>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
821
+ "Hey, are you conscious? Can you talk to me?\nI'm not conscious, but I can talk to you."
822
+ ```"""
823
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
824
+ output_hidden_states = (
825
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
826
+ )
827
+
828
+ # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)
829
+ outputs: BaseModelOutputWithPast = self.model(
830
+ input_ids=input_ids,
831
+ attention_mask=attention_mask,
832
+ position_ids=position_ids,
833
+ past_key_values=past_key_values,
834
+ inputs_embeds=inputs_embeds,
835
+ use_cache=use_cache,
836
+ output_attentions=output_attentions,
837
+ output_hidden_states=output_hidden_states,
838
+ cache_position=cache_position,
839
+ **kwargs,
840
+ )
841
+
842
+ hidden_states = outputs.last_hidden_state
843
+ # Only compute necessary logits, and do not upcast them to float if we are not computing the loss
844
+ slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep
845
+ logits = self.lm_head(hidden_states[:, slice_indices, :])
846
+
847
+ loss = None
848
+ if labels is not None:
849
+ loss = self.loss_function(logits=logits, labels=labels, vocab_size=self.config.vocab_size, **kwargs)
850
+
851
+ return CausalLMOutputWithPast(
852
+ loss=loss,
853
+ logits=logits,
854
+ past_key_values=outputs.past_key_values,
855
+ hidden_states=outputs.hidden_states,
856
+ attentions=outputs.attentions,
857
+ )
858
+
859
+
860
+ __all__ = ["PanguEmbeddedForCausalLM", "PanguEmbeddedModel", "PanguEmbeddedPreTrainedModel"]