ivanfioravanti commited on
Commit
bd458af
·
verified ·
1 Parent(s): 7cff04a

Add files using upload-large-folder tool

Browse files
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ tokenizer.json filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: mit
3
+ language:
4
+ - en
5
+ pipeline_tag: text-generation
6
+ tags:
7
+ - mlx
8
+ base_model: inclusionAI/Ling-2.6-flash
9
+ library_name: mlx
10
+ ---
11
+
12
+ # mlx-community/Ling-2.6-flash-mlx-4bit
13
+
14
+ This model [mlx-community/Ling-2.6-flash-mlx-4bit](https://huggingface.co/mlx-community/Ling-2.6-flash-mlx-4bit) was
15
+ converted to MLX format from [inclusionAI/Ling-2.6-flash](https://huggingface.co/inclusionAI/Ling-2.6-flash)
16
+ using mlx-lm version **0.31.3**.
17
+
18
+ ## Use with mlx
19
+
20
+ ```bash
21
+ pip install mlx-lm
22
+ ```
23
+
24
+ ```python
25
+ from mlx_lm import load, generate
26
+
27
+ model, tokenizer = load("mlx-community/Ling-2.6-flash-mlx-4bit")
28
+
29
+ prompt = "hello"
30
+
31
+ if tokenizer.chat_template is not None:
32
+ messages = [{"role": "user", "content": prompt}]
33
+ prompt = tokenizer.apply_chat_template(
34
+ messages, add_generation_prompt=True, return_dict=False,
35
+ )
36
+
37
+ response = generate(model, tokenizer, prompt=prompt, verbose=True)
38
+ ```
chat_template.jinja ADDED
@@ -0,0 +1,97 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {% set thinking_option = 'off' %}
2
+ {{- '<role>SYSTEM</role>' }}
3
+ {%- if tools %}
4
+ {%- if messages[0].role == 'system' %}
5
+ {{- messages[0].content + '\n' }}
6
+ {%- endif %}
7
+ {{- "# Tools\n\nYou may call one or more functions to assist with the user query.\n\nYou are provided with function signatures within <tools></tools> XML tags:\n<tools>" }}
8
+ {%- for tool in tools %}
9
+ {{- "\n" }}
10
+ {{- tool | tojson }}
11
+ {%- endfor %}
12
+ {{- "\n</tools>\n\nFor each function call, return a json object with function name and arguments within <tool_call></tool_call> XML tags:\n<tool_call>\n{\"name\": <function-name>, \"arguments\": <args-json-object>}\n</tool_call>\n" }}
13
+ {{- 'detailed thinking ' + thinking_option + '<|role_end|>' }}
14
+ {%- else %}
15
+ {%- if messages[0].role == 'system' %}
16
+ {%- if 'detailed thinking on' in messages[0].content or 'detailed thinking off' in messages[0].content %}
17
+ {{- messages[0].content + '<|role_end|>' }}
18
+ {%- else %}
19
+ {{- messages[0].content + '\n' }}
20
+ {{- 'detailed thinking ' + thinking_option + '<|role_end|>' }}
21
+ {%- endif %}
22
+ {% else %}
23
+ {{- 'detailed thinking ' + thinking_option + '<|role_end|>' }}
24
+ {%- endif %}
25
+ {%- endif %}
26
+ {%- set ns = namespace(multi_step_tool=true, last_query_index=messages|length - 1) %}
27
+ {%- for message in messages[::-1] %}
28
+ {%- set index = (messages|length - 1) - loop.index0 %}
29
+ {%- if ns.multi_step_tool and message.role == "user" and message.content is string and not(message.content.startswith('<tool_response>') and message.content.endswith('</tool_response>')) %}
30
+ {%- set ns.multi_step_tool = false %}
31
+ {%- set ns.last_query_index = index %}
32
+ {%- endif %}
33
+ {%- endfor %}
34
+ {%- for message in messages %}
35
+ {%- if message.content is string %}
36
+ {%- set content = message.content %}
37
+ {%- else %}
38
+ {%- set content = '' %}
39
+ {%- endif %}
40
+ {%- if message.role == "user" %}
41
+ {{- '<role>HUMAN</role>' + message.content + '<|role_end|>' }}
42
+ {%- elif message.role == "system" and not loop.first %}
43
+ {{- '<role>SYSTEM</role>' + message.content + '<|role_end|>' }}
44
+ {%- elif message.role == "assistant" %}
45
+ {%- set reasoning_content = '' %}
46
+ {%- if message.reasoning_content is string %}
47
+ {%- set reasoning_content = message.reasoning_content %}
48
+ {%- else %}
49
+ {%- if '</think>' in content %}
50
+ {%- set reasoning_content = content.split('</think>')[0].rstrip('\n').split('<think>')[-1].lstrip('\n') %}
51
+ {%- set content = content.split('</think>')[-1].lstrip('\n') %}
52
+ {%- endif %}
53
+ {%- endif %}
54
+ {%- if loop.index0 > ns.last_query_index %}
55
+ {%- if reasoning_content %}
56
+ {{- '<role>ASSISTANT</role>' + '\n<think>\n' + reasoning_content.strip('\n') + '\n</think>\n\n' + content.lstrip('\n') }}
57
+ {%- else %}
58
+ {{- '<role>ASSISTANT</role>' + content }}
59
+ {%- endif %}
60
+ {%- else %}
61
+ {{- '<role>ASSISTANT</role>' + content }}
62
+ {%- endif %}
63
+ {%- if message.tool_calls %}
64
+ {%- for tool_call in message.tool_calls %}
65
+ {%- if (loop.first and content) or (not loop.first) %}
66
+ {{- '\n' }}
67
+ {%- endif %}
68
+ {%- if tool_call.function %}
69
+ {%- set tool_call = tool_call.function %}
70
+ {%- endif %}
71
+ {{- '<tool_call>\n{"name": "' }}
72
+ {{- tool_call.name }}
73
+ {{- '", "arguments": ' }}
74
+ {%- if tool_call.arguments is string %}
75
+ {{- tool_call.arguments }}
76
+ {%- else %}
77
+ {{- tool_call.arguments | tojson }}
78
+ {%- endif %}
79
+ {{- '}\n</tool_call>' }}
80
+ {%- endfor %}
81
+ {%- endif %}
82
+ {{- '<|role_end|>' }}
83
+ {%- elif message.role == "tool" %}
84
+ {%- if loop.first or (messages[loop.index0 - 1].role != "tool") %}
85
+ {{- '<role>OBSERVATION</role>' }}
86
+ {%- endif %}
87
+ {{- '\n<tool_response>\n' }}
88
+ {{- content }}
89
+ {{- '\n</tool_response>' }}
90
+ {%- if loop.last or (messages[loop.index0 + 1].role != "tool") %}
91
+ {{- '<|role_end|>' }}
92
+ {%- endif %}
93
+ {%- endif %}
94
+ {%- endfor %}
95
+ {%- if add_generation_prompt %}
96
+ {{- '<role>ASSISTANT</role>' }}
97
+ {%- endif %}
config.json ADDED
@@ -0,0 +1,329 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "BailingMoeV2_5ForCausalLM"
4
+ ],
5
+ "attention_dropout": 0.0,
6
+ "auto_map": {
7
+ "AutoConfig": "configuration_bailing_moe_v2_5.BailingMoeV2_5Config",
8
+ "AutoModel": "modeling_bailing_moe_v2_5.BailingMoeV2_5Model",
9
+ "AutoModelForCausalLM": "modeling_bailing_moe_v2_5.BailingMoeV2_5ForCausalLM"
10
+ },
11
+ "embedding_dropout": 0.0,
12
+ "eos_token_id": [
13
+ 156892,
14
+ 156895
15
+ ],
16
+ "first_k_dense_replace": 1,
17
+ "group_norm_size": 4,
18
+ "head_dim": 128,
19
+ "hidden_act": "silu",
20
+ "hidden_size": 4096,
21
+ "initializer_range": 0.02,
22
+ "intermediate_size": 9216,
23
+ "kv_lora_rank": 512,
24
+ "layer_group_size": 8,
25
+ "linear_silu": false,
26
+ "max_position_embeddings": 131072,
27
+ "max_window_layers": 20,
28
+ "model_type": "bailing_hybrid",
29
+ "moe_intermediate_size": 1024,
30
+ "moe_router_enable_expert_bias": true,
31
+ "moe_shared_expert_intermediate_size": 1024,
32
+ "mtp_loss_scaling_factor": 0,
33
+ "n_group": 8,
34
+ "num_attention_heads": 32,
35
+ "num_experts": 256,
36
+ "num_experts_per_tok": 8,
37
+ "num_hidden_layers": 32,
38
+ "num_key_value_heads": 32,
39
+ "num_kv_heads_for_linear_attn": 32,
40
+ "num_nextn_predict_layers": 1,
41
+ "num_shared_experts": 1,
42
+ "output_dropout": 0.0,
43
+ "output_router_logits": false,
44
+ "pad_token_id": 156892,
45
+ "partial_rotary_factor": 0.5,
46
+ "q_lora_rank": 1536,
47
+ "qk_head_dim": 192,
48
+ "qk_nope_head_dim": 128,
49
+ "qk_rope_head_dim": 64,
50
+ "quantization": {
51
+ "group_size": 64,
52
+ "bits": 4,
53
+ "mode": "affine",
54
+ "model.layers.1.mlp.gate.gate_proj": {
55
+ "group_size": 64,
56
+ "bits": 8
57
+ },
58
+ "model.layers.2.mlp.gate.gate_proj": {
59
+ "group_size": 64,
60
+ "bits": 8
61
+ },
62
+ "model.layers.3.mlp.gate.gate_proj": {
63
+ "group_size": 64,
64
+ "bits": 8
65
+ },
66
+ "model.layers.4.mlp.gate.gate_proj": {
67
+ "group_size": 64,
68
+ "bits": 8
69
+ },
70
+ "model.layers.5.mlp.gate.gate_proj": {
71
+ "group_size": 64,
72
+ "bits": 8
73
+ },
74
+ "model.layers.6.mlp.gate.gate_proj": {
75
+ "group_size": 64,
76
+ "bits": 8
77
+ },
78
+ "model.layers.7.mlp.gate.gate_proj": {
79
+ "group_size": 64,
80
+ "bits": 8
81
+ },
82
+ "model.layers.8.mlp.gate.gate_proj": {
83
+ "group_size": 64,
84
+ "bits": 8
85
+ },
86
+ "model.layers.9.mlp.gate.gate_proj": {
87
+ "group_size": 64,
88
+ "bits": 8
89
+ },
90
+ "model.layers.10.mlp.gate.gate_proj": {
91
+ "group_size": 64,
92
+ "bits": 8
93
+ },
94
+ "model.layers.11.mlp.gate.gate_proj": {
95
+ "group_size": 64,
96
+ "bits": 8
97
+ },
98
+ "model.layers.12.mlp.gate.gate_proj": {
99
+ "group_size": 64,
100
+ "bits": 8
101
+ },
102
+ "model.layers.13.mlp.gate.gate_proj": {
103
+ "group_size": 64,
104
+ "bits": 8
105
+ },
106
+ "model.layers.14.mlp.gate.gate_proj": {
107
+ "group_size": 64,
108
+ "bits": 8
109
+ },
110
+ "model.layers.15.mlp.gate.gate_proj": {
111
+ "group_size": 64,
112
+ "bits": 8
113
+ },
114
+ "model.layers.16.mlp.gate.gate_proj": {
115
+ "group_size": 64,
116
+ "bits": 8
117
+ },
118
+ "model.layers.17.mlp.gate.gate_proj": {
119
+ "group_size": 64,
120
+ "bits": 8
121
+ },
122
+ "model.layers.18.mlp.gate.gate_proj": {
123
+ "group_size": 64,
124
+ "bits": 8
125
+ },
126
+ "model.layers.19.mlp.gate.gate_proj": {
127
+ "group_size": 64,
128
+ "bits": 8
129
+ },
130
+ "model.layers.20.mlp.gate.gate_proj": {
131
+ "group_size": 64,
132
+ "bits": 8
133
+ },
134
+ "model.layers.21.mlp.gate.gate_proj": {
135
+ "group_size": 64,
136
+ "bits": 8
137
+ },
138
+ "model.layers.22.mlp.gate.gate_proj": {
139
+ "group_size": 64,
140
+ "bits": 8
141
+ },
142
+ "model.layers.23.mlp.gate.gate_proj": {
143
+ "group_size": 64,
144
+ "bits": 8
145
+ },
146
+ "model.layers.24.mlp.gate.gate_proj": {
147
+ "group_size": 64,
148
+ "bits": 8
149
+ },
150
+ "model.layers.25.mlp.gate.gate_proj": {
151
+ "group_size": 64,
152
+ "bits": 8
153
+ },
154
+ "model.layers.26.mlp.gate.gate_proj": {
155
+ "group_size": 64,
156
+ "bits": 8
157
+ },
158
+ "model.layers.27.mlp.gate.gate_proj": {
159
+ "group_size": 64,
160
+ "bits": 8
161
+ },
162
+ "model.layers.28.mlp.gate.gate_proj": {
163
+ "group_size": 64,
164
+ "bits": 8
165
+ },
166
+ "model.layers.29.mlp.gate.gate_proj": {
167
+ "group_size": 64,
168
+ "bits": 8
169
+ },
170
+ "model.layers.30.mlp.gate.gate_proj": {
171
+ "group_size": 64,
172
+ "bits": 8
173
+ },
174
+ "model.layers.31.mlp.gate.gate_proj": {
175
+ "group_size": 64,
176
+ "bits": 8
177
+ }
178
+ },
179
+ "quantization_config": {
180
+ "group_size": 64,
181
+ "bits": 4,
182
+ "mode": "affine",
183
+ "model.layers.1.mlp.gate.gate_proj": {
184
+ "group_size": 64,
185
+ "bits": 8
186
+ },
187
+ "model.layers.2.mlp.gate.gate_proj": {
188
+ "group_size": 64,
189
+ "bits": 8
190
+ },
191
+ "model.layers.3.mlp.gate.gate_proj": {
192
+ "group_size": 64,
193
+ "bits": 8
194
+ },
195
+ "model.layers.4.mlp.gate.gate_proj": {
196
+ "group_size": 64,
197
+ "bits": 8
198
+ },
199
+ "model.layers.5.mlp.gate.gate_proj": {
200
+ "group_size": 64,
201
+ "bits": 8
202
+ },
203
+ "model.layers.6.mlp.gate.gate_proj": {
204
+ "group_size": 64,
205
+ "bits": 8
206
+ },
207
+ "model.layers.7.mlp.gate.gate_proj": {
208
+ "group_size": 64,
209
+ "bits": 8
210
+ },
211
+ "model.layers.8.mlp.gate.gate_proj": {
212
+ "group_size": 64,
213
+ "bits": 8
214
+ },
215
+ "model.layers.9.mlp.gate.gate_proj": {
216
+ "group_size": 64,
217
+ "bits": 8
218
+ },
219
+ "model.layers.10.mlp.gate.gate_proj": {
220
+ "group_size": 64,
221
+ "bits": 8
222
+ },
223
+ "model.layers.11.mlp.gate.gate_proj": {
224
+ "group_size": 64,
225
+ "bits": 8
226
+ },
227
+ "model.layers.12.mlp.gate.gate_proj": {
228
+ "group_size": 64,
229
+ "bits": 8
230
+ },
231
+ "model.layers.13.mlp.gate.gate_proj": {
232
+ "group_size": 64,
233
+ "bits": 8
234
+ },
235
+ "model.layers.14.mlp.gate.gate_proj": {
236
+ "group_size": 64,
237
+ "bits": 8
238
+ },
239
+ "model.layers.15.mlp.gate.gate_proj": {
240
+ "group_size": 64,
241
+ "bits": 8
242
+ },
243
+ "model.layers.16.mlp.gate.gate_proj": {
244
+ "group_size": 64,
245
+ "bits": 8
246
+ },
247
+ "model.layers.17.mlp.gate.gate_proj": {
248
+ "group_size": 64,
249
+ "bits": 8
250
+ },
251
+ "model.layers.18.mlp.gate.gate_proj": {
252
+ "group_size": 64,
253
+ "bits": 8
254
+ },
255
+ "model.layers.19.mlp.gate.gate_proj": {
256
+ "group_size": 64,
257
+ "bits": 8
258
+ },
259
+ "model.layers.20.mlp.gate.gate_proj": {
260
+ "group_size": 64,
261
+ "bits": 8
262
+ },
263
+ "model.layers.21.mlp.gate.gate_proj": {
264
+ "group_size": 64,
265
+ "bits": 8
266
+ },
267
+ "model.layers.22.mlp.gate.gate_proj": {
268
+ "group_size": 64,
269
+ "bits": 8
270
+ },
271
+ "model.layers.23.mlp.gate.gate_proj": {
272
+ "group_size": 64,
273
+ "bits": 8
274
+ },
275
+ "model.layers.24.mlp.gate.gate_proj": {
276
+ "group_size": 64,
277
+ "bits": 8
278
+ },
279
+ "model.layers.25.mlp.gate.gate_proj": {
280
+ "group_size": 64,
281
+ "bits": 8
282
+ },
283
+ "model.layers.26.mlp.gate.gate_proj": {
284
+ "group_size": 64,
285
+ "bits": 8
286
+ },
287
+ "model.layers.27.mlp.gate.gate_proj": {
288
+ "group_size": 64,
289
+ "bits": 8
290
+ },
291
+ "model.layers.28.mlp.gate.gate_proj": {
292
+ "group_size": 64,
293
+ "bits": 8
294
+ },
295
+ "model.layers.29.mlp.gate.gate_proj": {
296
+ "group_size": 64,
297
+ "bits": 8
298
+ },
299
+ "model.layers.30.mlp.gate.gate_proj": {
300
+ "group_size": 64,
301
+ "bits": 8
302
+ },
303
+ "model.layers.31.mlp.gate.gate_proj": {
304
+ "group_size": 64,
305
+ "bits": 8
306
+ }
307
+ },
308
+ "rms_norm_eps": 1e-06,
309
+ "rope_interleave": true,
310
+ "rope_scaling": null,
311
+ "rope_theta": 6000000,
312
+ "rotary_dim": 64,
313
+ "routed_scaling_factor": 2.5,
314
+ "router_dtype": "fp32",
315
+ "score_function": "sigmoid",
316
+ "scoring_func": "sigmoid",
317
+ "seq_aux": true,
318
+ "tie_word_embeddings": false,
319
+ "topk_group": 4,
320
+ "topk_method": "noaux_tc",
321
+ "torch_dtype": "bfloat16",
322
+ "transformers_version": "4.56.2",
323
+ "use_bias": false,
324
+ "use_cache": true,
325
+ "use_qk_norm": true,
326
+ "use_qkv_bias": false,
327
+ "v_head_dim": 128,
328
+ "vocab_size": 157184
329
+ }
configuration_bailing_moe_v2_5.py ADDED
@@ -0,0 +1,120 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Bailing MoE V2 model configuration"""
2
+
3
+ from transformers.configuration_utils import PretrainedConfig
4
+
5
+
6
+ class BailingMoeV2_5Config(PretrainedConfig):
7
+
8
+ def __init__(
9
+ self,
10
+ vocab_size=157184,
11
+ hidden_size=2048,
12
+ intermediate_size=5120,
13
+ num_hidden_layers=20,
14
+ num_attention_heads=16,
15
+ num_key_value_heads=4,
16
+ hidden_act="silu",
17
+ use_qkv_bias=False, # bailing only
18
+ use_bias=False, # bailing only
19
+ rms_norm_eps=1e-06,
20
+ tie_word_embeddings=False, # PretrainedConfig key, here change default value.
21
+ embedding_dropout=0.0,
22
+ attention_dropout=0.0,
23
+ output_dropout=0.0,
24
+ initializer_range=0.02,
25
+ max_position_embeddings=32768,
26
+ rope_theta=600000.0,
27
+ use_cache=True,
28
+ max_window_layers=20,
29
+ rope_scaling=None,
30
+ pad_token_id=156892,
31
+ eos_token_id=156892,
32
+ num_experts=256,
33
+ num_shared_experts=1,
34
+ num_experts_per_tok=8,
35
+ n_group=8,
36
+ topk_group=4,
37
+ moe_intermediate_size=512,
38
+ first_k_dense_replace=1,
39
+ head_dim=128,
40
+ output_router_logits=False,
41
+ use_qk_norm=True,
42
+ num_nextn_predict_layers=0,
43
+ mtp_loss_scaling_factor=0,
44
+ moe_router_enable_expert_bias=True,
45
+ routed_scaling_factor=1.0,
46
+ layer_group_size=5,
47
+ group_norm_size=4,
48
+ linear_silu=False,
49
+ kv_lora_rank=512,
50
+ q_lora_rank=None,
51
+ qk_rope_head_dim=64,
52
+ v_head_dim=128,
53
+ qk_nope_head_dim=128,
54
+ rope_interleave=True,
55
+ partial_rotary_factor=0.5,
56
+ score_function="sigmoid",
57
+ scoring_func="sigmoid",
58
+ seq_aux=True,
59
+ topk_method="noaux_tc",
60
+ router_dtype="fp32",
61
+ **kwargs,
62
+ ):
63
+ self.num_hidden_layers = num_hidden_layers
64
+ self.vocab_size = vocab_size
65
+ self.hidden_size = hidden_size
66
+ self.intermediate_size = intermediate_size
67
+ self.num_attention_heads = num_attention_heads
68
+ self.num_key_value_heads = num_key_value_heads
69
+ self.hidden_act = hidden_act
70
+ self.use_qkv_bias = use_qkv_bias
71
+ self.use_bias = use_bias
72
+ self.rms_norm_eps = rms_norm_eps
73
+ self.embedding_dropout = embedding_dropout
74
+ self.attention_dropout = attention_dropout
75
+ self.output_dropout = output_dropout
76
+ self.num_nextn_predict_layers = num_nextn_predict_layers
77
+ self.mtp_loss_scaling_factor = mtp_loss_scaling_factor
78
+ self.initializer_range = initializer_range
79
+ self.max_position_embeddings = max_position_embeddings
80
+ self.rope_theta = rope_theta
81
+ self.use_cache = use_cache
82
+ self.max_window_layers = max_window_layers
83
+ self.head_dim = head_dim or self.hidden_size // self.num_attention_heads
84
+ self.rope_scaling = rope_scaling
85
+ self.use_qk_norm = use_qk_norm
86
+ self.moe_router_enable_expert_bias = moe_router_enable_expert_bias
87
+ self.routed_scaling_factor = routed_scaling_factor
88
+
89
+ # MoE configs
90
+ self.num_experts = num_experts
91
+ self.num_shared_experts = num_shared_experts
92
+ self.num_experts_per_tok = num_experts_per_tok
93
+ self.n_group = n_group
94
+ self.topk_group = topk_group
95
+ self.moe_intermediate_size = moe_intermediate_size
96
+ self.first_k_dense_replace = first_k_dense_replace
97
+ self.output_router_logits = output_router_logits
98
+
99
+ # Linear configs
100
+ self.layer_group_size = layer_group_size
101
+ self.group_norm_size = group_norm_size
102
+ self.linear_silu = linear_silu
103
+ # mla
104
+ self.kv_lora_rank = kv_lora_rank
105
+ self.q_lora_rank = q_lora_rank
106
+ self.qk_rope_head_dim = qk_rope_head_dim
107
+
108
+ self.score_function = score_function
109
+ self.scoring_func = scoring_func
110
+ self.seq_aux = seq_aux
111
+ self.topk_method = topk_method
112
+ self.v_head_dim = v_head_dim
113
+ self.qk_nope_head_dim = qk_nope_head_dim
114
+ self.qk_head_dim = qk_nope_head_dim + qk_rope_head_dim
115
+ self.rope_interleave = rope_interleave
116
+ self.router_dtype = router_dtype
117
+ self.partial_rotary_factor = partial_rotary_factor
118
+ super().__init__(
119
+ pad_token_id=pad_token_id, eos_token_id=eos_token_id, tie_word_embeddings=tie_word_embeddings, **kwargs
120
+ )
generation_config.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token_id": 156891,
3
+ "eos_token_id": [
4
+ 156892,
5
+ 156895
6
+ ],
7
+ "pad_token_id": 156892
8
+ }
model-00001-of-00012.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6800064883fcb31f92cf18d0a2c36facc99ef437f867239b293db4ec92fc3b55
3
+ size 4858938458
model-00002-of-00012.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cd866ab6497d0ac1cc6ca59d0330d0840cd4afa251e8c00f1fdbd4407ddc41ed
3
+ size 4998062930
model-00003-of-00012.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f5fd996e5d6fc6bf9b0f1cd126bece2fd8ea242e1ff8e16a4178640eb27a3f68
3
+ size 4917437144
model-00004-of-00012.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:73b41a924b79538f78e1710bf41285c30f0e08c18d96d3817bdaf869792f82b8
3
+ size 4998063073
model-00005-of-00012.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:53f3397da25902ac2b73b82dba9c3c247b75f6a4b635b8b209135ed2d444bbd3
3
+ size 4998063007
model-00006-of-00012.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8504fa0d559bc4d8fad533b9ed20efe48310bf206f994f820b940c401a9f1c98
3
+ size 4917437182
model-00007-of-00012.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0e2e105439ffe558b798d44737471d4bbb5166f3402d400734ccc85d90f4777f
3
+ size 4998063013
model-00008-of-00012.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6922235b46cb4d59bc5812e9ec9951fcff7a5758a26098709e3e8f8ee9625548
3
+ size 4998063111
model-00009-of-00012.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0ce774cbe79f410cc081dcb159192f1a0e42c4d7ab93d11be33346d5a6e66d7f
3
+ size 4917437186
model-00010-of-00012.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a57b75d239b45a79e8afc365c7a2e928c80eda8c3d2dcf2e1e387f32a9003400
3
+ size 4998063129
model-00011-of-00012.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0b9e04a076dcd63cba005c0e6b793983d969524a9a5478869be59e56d1adc5aa
3
+ size 4998063019
model-00012-of-00012.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1643455ed6d1d5feb1e466ddb438d9dbcd540671c1823aeddb63162cebc51669
3
+ size 4024441316
model.safetensors.index.json ADDED
The diff for this file is too large to render. See raw diff
 
modeling_bailing_moe_v2_5.py ADDED
@@ -0,0 +1,1602 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2025 Antgroup and The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX
5
+ # and OPT implementations in this library. It has been modified from its
6
+ # original forms to accommodate minor architectural differences compared
7
+ # to GPT-NeoX and OPT used by the Meta AI team that trained the model.
8
+ #
9
+ # Licensed under the Apache License, Version 2.0 (the "License");
10
+ # you may not use this file except in compliance with the License.
11
+ # You may obtain a copy of the License at
12
+ #
13
+ # http://www.apache.org/licenses/LICENSE-2.0
14
+ #
15
+ # Unless required by applicable law or agreed to in writing, software
16
+ # distributed under the License is distributed on an "AS IS" BASIS,
17
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
18
+ # See the License for the specific language governing permissions and
19
+ # limitations under the License.
20
+ """PyTorch BailingMoE model."""
21
+
22
+ import math
23
+ import warnings
24
+ from typing import List, Optional, Tuple, Union, Callable
25
+
26
+ import torch
27
+ import torch.nn.functional as F
28
+ from torch import nn
29
+
30
+ from transformers.activations import ACT2FN
31
+ from transformers.cache_utils import Cache, DynamicCache
32
+ from transformers.modeling_attn_mask_utils import (
33
+ AttentionMaskConverter,
34
+ _prepare_4d_attention_mask,
35
+ _prepare_4d_causal_attention_mask,
36
+ _prepare_4d_causal_attention_mask_for_sdpa,
37
+ )
38
+ from transformers.modeling_outputs import MoeModelOutputWithPast
39
+ from transformers.modeling_rope_utils import ROPE_INIT_FUNCTIONS, dynamic_rope_update
40
+ from transformers.modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel
41
+ from transformers.pytorch_utils import ALL_LAYERNORM_LAYERS, is_torch_greater_or_equal_than_1_13
42
+ from transformers.utils import (
43
+ add_start_docstrings,
44
+ add_start_docstrings_to_model_forward,
45
+ logging,
46
+ replace_return_docstrings,
47
+ )
48
+ from transformers.utils.import_utils import is_torch_fx_available
49
+ from .configuration_bailing_moe_v2_5 import BailingMoeV2_5Config
50
+ from transformers.generation.utils import GenerationMixin
51
+ from dataclasses import dataclass
52
+ from transformers.utils import ModelOutput
53
+ from transformers import DynamicLayer
54
+ from transformers.processing_utils import Unpack
55
+ from transformers.utils import TransformersKwargs
56
+ from transformers.utils.deprecation import deprecate_kwarg
57
+ from transformers.modeling_flash_attention_utils import FlashAttentionKwargs
58
+
59
+ from fla.ops.simple_gla.fused_recurrent import fused_recurrent_simple_gla
60
+ from fla.ops.simple_gla.chunk import chunk_simple_gla
61
+
62
+
63
+ # This makes `_prepare_4d_causal_attention_mask` a leaf function in the FX graph.
64
+ # It means that the function will not be traced through and simply appear as a node in the graph.
65
+ if is_torch_fx_available():
66
+ if not is_torch_greater_or_equal_than_1_13:
67
+ import torch.fx
68
+
69
+ _prepare_4d_causal_attention_mask = torch.fx.wrap(_prepare_4d_causal_attention_mask)
70
+
71
+
72
+ logger = logging.get_logger(__name__)
73
+
74
+ _CONFIG_FOR_DOC = "BailingMoeV2_5Config"
75
+
76
+
77
+ def roll_tensor(tensor, shifts=-1, dims=-1, fill_value=0):
78
+ """Roll the tensor input along the given dimension(s).
79
+ Inserted elements are set to be 0.0.
80
+ """
81
+ rolled_tensor = torch.roll(tensor, shifts=shifts, dims=dims)
82
+ rolled_tensor.select(dims, shifts).fill_(fill_value)
83
+ return rolled_tensor, rolled_tensor.sum()
84
+
85
+
86
+ @dataclass
87
+ class MoEV2_5CausalLMOutputWithPast(ModelOutput):
88
+ """
89
+ Base class for causal language model (or autoregressive) outputs as well as Mixture of Expert's router hidden
90
+ states terms, to train a MoE model.
91
+ Args:
92
+ loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
93
+ Language modeling loss (for next-token prediction).
94
+ logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`):
95
+ Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
96
+ past_key_values (`Cache`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
97
+ It is a [`~cache_utils.Cache`] instance. For more details, see our [kv cache guide](https://huggingface.co/docs/transformers/en/kv_cache).
98
+ Contains pre-computed hidden-states (key and values in the self-attention blocks) that can be used (see
99
+ `past_key_values` input) to speed up sequential decoding.
100
+ hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
101
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, +
102
+ one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
103
+ Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.
104
+ attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
105
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
106
+ sequence_length)`.
107
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
108
+ heads.
109
+ z_loss (`torch.FloatTensor`, *optional*, returned when `labels` is provided):
110
+ z_loss for the sparse modules.
111
+ aux_loss (`torch.FloatTensor`, *optional*, returned when `labels` is provided):
112
+ aux_loss for the sparse modules.
113
+ router_logits (`tuple(torch.FloatTensor)`, *optional*, returned when `output_router_logits=True` is passed or when `config.add_router_probs=True`):
114
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, sequence_length, num_experts)`.
115
+ Router logits of the encoder model, useful to compute the auxiliary loss and the z_loss for the sparse
116
+ modules.
117
+ """
118
+
119
+ loss: Optional[torch.FloatTensor] = None
120
+ logits: Optional[torch.FloatTensor] = None
121
+ past_key_values: Optional[Cache] = None
122
+ hidden_states: Optional[tuple[torch.FloatTensor, ...]] = None
123
+ attentions: Optional[tuple[torch.FloatTensor, ...]] = None
124
+ z_loss: Optional[torch.FloatTensor] = None
125
+ aux_loss: Optional[torch.FloatTensor] = None
126
+ router_logits: Optional[tuple[torch.FloatTensor]] = None
127
+ mtp_loss: Optional[torch.FloatTensor] = None
128
+ mtp_logits: Optional[tuple[torch.FloatTensor, ...]] = None
129
+
130
+
131
+ class MoeV2_5ModelOutputWithPast(MoeModelOutputWithPast):
132
+
133
+ def __init__(self, mtp_hidden_states=None, **kwargs):
134
+ super().__init__(**kwargs)
135
+ self.mtp_hidden_states = mtp_hidden_states
136
+
137
+
138
+ def _get_unpad_data(attention_mask):
139
+ seqlens_in_batch = attention_mask.sum(dim=-1, dtype=torch.int32)
140
+ indices = torch.nonzero(attention_mask.flatten(), as_tuple=False).flatten()
141
+ max_seqlen_in_batch = seqlens_in_batch.max().item()
142
+ cu_seqlens = F.pad(torch.cumsum(seqlens_in_batch, dim=0, dtype=torch.torch.int32), (1, 0))
143
+ return (
144
+ indices,
145
+ cu_seqlens,
146
+ max_seqlen_in_batch,
147
+ )
148
+
149
+
150
+ def _expand_mask(mask: torch.Tensor, dtype: torch.dtype, tgt_len: Optional[int] = None):
151
+ warnings.warn(
152
+ "Calling `transformers.models.BailingMoeV2_5.modeling_BailingMoeV2_5._prepare_4d_attention_mask` is deprecated and will be removed in v4.37. Use `transformers.modeling_attn_mask_utils._prepare_4d_attention_mask"
153
+ )
154
+ return _prepare_4d_attention_mask(mask=mask, dtype=dtype, tgt_len=tgt_len)
155
+
156
+
157
+ def _make_causal_mask(
158
+ input_ids_shape: torch.Size, dtype: torch.dtype, device: torch.device, past_key_values_length: int = 0
159
+ ):
160
+ warnings.warn(
161
+ "Calling `transformers.models.BailingMoeV2_5.modeling_BailingMoeV2_5._make_causal_mask` is deprecated and will be removed in v4.37. Use `transformers.models.BailingMoeV2_5.modeling_BailingMoeV2_5.AttentionMaskConverter._make_causal_mask"
162
+ )
163
+ return AttentionMaskConverter._make_causal_mask(
164
+ input_ids_shape=input_ids_shape, dtype=dtype, device=device, past_key_values_length=past_key_values_length
165
+ )
166
+
167
+
168
+ class BailingMoeV2_5RMSNorm(nn.Module):
169
+ def __init__(self, hidden_size, eps=1e-6):
170
+ """
171
+ BailingMoeV2_5RMSNorm is equivalent to T5LayerNorm
172
+ """
173
+ super().__init__()
174
+ self.weight = nn.Parameter(torch.ones(hidden_size))
175
+ self.variance_epsilon = eps
176
+
177
+ def forward(self, hidden_states):
178
+ input_dtype = hidden_states.dtype
179
+ hidden_states = hidden_states.to(torch.float32)
180
+ variance = hidden_states.pow(2).mean(-1, keepdim=True)
181
+ hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
182
+ return self.weight * hidden_states.to(input_dtype)
183
+
184
+
185
+ class BailingMoeV2_5GroupRMSNorm(nn.Module):
186
+ def __init__(self, hidden_size, group_norm_size, eps=1e-6):
187
+ """
188
+ BailingMoeV2_5RMSNorm is equivalent to T5LayerNorm
189
+ """
190
+ super().__init__()
191
+ self.weight = nn.Parameter(torch.ones(hidden_size))
192
+ self.group_norm_size = group_norm_size
193
+ assert hidden_size % group_norm_size == 0, "hidden_size must be divisible by group_norm_size"
194
+ self.variance_epsilon = eps
195
+
196
+ def forward(self, hidden_states):
197
+ input_dtype = hidden_states.dtype
198
+ input_shape = hidden_states.size()
199
+ group_input_shape = input_shape[:-1] + (self.group_norm_size, input_shape[-1] // self.group_norm_size)
200
+ hidden_states = hidden_states.view(group_input_shape)
201
+ hidden_states = hidden_states.to(torch.float32)
202
+ variance = hidden_states.pow(2).mean(-1, keepdim=True)
203
+ hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
204
+ return self.weight * hidden_states.to(input_dtype).view(input_shape)
205
+
206
+
207
+ ALL_LAYERNORM_LAYERS.append(BailingMoeV2_5RMSNorm)
208
+
209
+
210
+ class BailingMoeV2_5RotaryEmbedding(nn.Module):
211
+ def __init__(self, config: BailingMoeV2_5Config, device=None):
212
+ super().__init__()
213
+ # BC: "rope_type" was originally "type"
214
+ if hasattr(config, "rope_scaling") and config.rope_scaling is not None:
215
+ self.rope_type = config.rope_scaling.get("rope_type", config.rope_scaling.get("type"))
216
+ else:
217
+ self.rope_type = "default"
218
+ self.max_seq_len_cached = config.max_position_embeddings
219
+ self.original_max_seq_len = config.max_position_embeddings
220
+
221
+ self.config = config
222
+ self.rope_init_fn = ROPE_INIT_FUNCTIONS[self.rope_type]
223
+
224
+ inv_freq, self.attention_scaling = self.rope_init_fn(self.config, device)
225
+ self.register_buffer("inv_freq", inv_freq, persistent=False)
226
+ self.original_inv_freq = self.inv_freq
227
+
228
+ @torch.no_grad()
229
+ @dynamic_rope_update # power user: used with advanced RoPE types (e.g. dynamic rope)
230
+ def forward(self, x, position_ids):
231
+ inv_freq_expanded = self.inv_freq[None, :, None].float().expand(position_ids.shape[0], -1, 1).to(x.device)
232
+ position_ids_expanded = position_ids[:, None, :].float()
233
+
234
+ device_type = x.device.type if isinstance(x.device.type, str) and x.device.type != "mps" else "cpu"
235
+ with torch.autocast(device_type=device_type, enabled=False): # Force float32
236
+ freqs = (inv_freq_expanded.float() @ position_ids_expanded.float()).transpose(1, 2)
237
+ emb = torch.cat((freqs, freqs), dim=-1)
238
+ cos = emb.cos() * self.attention_scaling
239
+ sin = emb.sin() * self.attention_scaling
240
+
241
+ return cos.to(dtype=x.dtype), sin.to(dtype=x.dtype)
242
+
243
+
244
+ # Copied from transformers.models.llama.modeling_llama.rotate_half
245
+ def rotate_half(x):
246
+ """Rotates half the hidden dims of the input."""
247
+ x1 = x[..., : x.shape[-1] // 2]
248
+ x2 = x[..., x.shape[-1] // 2 :]
249
+ return torch.cat((-x2, x1), dim=-1)
250
+
251
+
252
+ # Copied from transformers.models.llama.modeling_llama.apply_rotary_pos_emb
253
+ def apply_rotary_pos_emb(q, k, cos, sin, unsqueeze_dim=1):
254
+ """Applies Rotary Position Embedding to the query and key tensors.
255
+ Args:
256
+ q (`torch.Tensor`): The query tensor.
257
+ k (`torch.Tensor`): The key tensor.
258
+ cos (`torch.Tensor`): The cosine part of the rotary embedding.
259
+ sin (`torch.Tensor`): The sine part of the rotary embedding.
260
+ unsqueeze_dim (`int`, *optional*, defaults to 1):
261
+ The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and
262
+ sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note
263
+ that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and
264
+ k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes
265
+ cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have
266
+ the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2.
267
+ Returns:
268
+ `tuple(torch.Tensor)` comprising the query and key tensors rotated using the Rotary Position Embedding.
269
+ """
270
+ cos = cos.unsqueeze(unsqueeze_dim)
271
+ sin = sin.unsqueeze(unsqueeze_dim)
272
+
273
+ # Keep half or full tensor for later concatenation
274
+ rotary_dim = cos.shape[-1]
275
+ q_rot, q_pass = q[..., :rotary_dim], q[..., rotary_dim:]
276
+ k_rot, k_pass = k[..., :rotary_dim], k[..., rotary_dim:]
277
+
278
+ # Apply rotary embeddings on the first half or full tensor
279
+ q_embed = (q_rot * cos) + (rotate_half(q_rot) * sin)
280
+ k_embed = (k_rot * cos) + (rotate_half(k_rot) * sin)
281
+
282
+ # Concatenate back to full shape
283
+ q_embed = torch.cat([q_embed, q_pass], dim=-1)
284
+ k_embed = torch.cat([k_embed, k_pass], dim=-1)
285
+ return q_embed, k_embed
286
+
287
+
288
+ class BailingMoeV2_5MLP(nn.Module):
289
+ def __init__(self, config: BailingMoeV2_5Config, intermediate_size: int):
290
+ super().__init__()
291
+ self.config = config
292
+ self.hidden_size = config.hidden_size
293
+ self.intermediate_size = intermediate_size
294
+
295
+ self.gate_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)
296
+ self.up_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)
297
+ self.down_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=False)
298
+ self.act_fn = ACT2FN[config.hidden_act]
299
+
300
+ def forward(self, x):
301
+ return self.down_proj(self.act_fn(self.gate_proj(x)) * self.up_proj(x))
302
+
303
+
304
+ class BailingMoeV2_5Gate(nn.Module):
305
+ def __init__(self, config):
306
+ super().__init__()
307
+ self.config = config
308
+ self.top_k = config.num_experts_per_tok
309
+ self.num_experts = config.num_experts
310
+
311
+ self.n_group = config.n_group
312
+ self.topk_group = config.topk_group
313
+
314
+ # topk selection algorithm
315
+ self.gating_dim = config.hidden_size
316
+ self.weight = nn.Parameter(torch.empty((self.num_experts, self.gating_dim)))
317
+ self.routed_scaling_factor = config.routed_scaling_factor
318
+
319
+ self.register_buffer("expert_bias", torch.zeros((self.num_experts)))
320
+ self.reset_parameters()
321
+
322
+ def reset_parameters(self) -> None:
323
+ import torch.nn.init as init
324
+
325
+ init.kaiming_uniform_(self.weight, a=math.sqrt(5))
326
+
327
+ def group_limited_topk(
328
+ self,
329
+ scores: torch.Tensor,
330
+ ):
331
+ num_tokens, _ = scores.size()
332
+ # Organize the experts into groups
333
+ group_scores = scores.view(num_tokens, self.n_group, -1).topk(2, dim=-1)[0].sum(dim=-1)
334
+ group_idx = torch.topk(group_scores, k=self.topk_group, dim=-1, sorted=False)[1]
335
+ group_mask = torch.zeros_like(group_scores)
336
+ group_mask.scatter_(1, group_idx, 1)
337
+
338
+ # Mask the experts based on selection groups
339
+ score_mask = (
340
+ group_mask.unsqueeze(-1)
341
+ .expand(num_tokens, self.n_group, self.num_experts // self.n_group)
342
+ .reshape(num_tokens, -1)
343
+ )
344
+
345
+ masked_scores = scores.masked_fill(~score_mask.bool(), float('-inf'))
346
+ probs, top_indices = torch.topk(masked_scores, k=self.top_k, dim=-1)
347
+
348
+ return probs, top_indices
349
+
350
+ def forward(self, hidden_states):
351
+ # compute gating score
352
+ hidden_states = hidden_states.view(-1, hidden_states.shape[-1])
353
+ logits = F.linear(hidden_states.type(torch.float32), self.weight.type(torch.float32))
354
+
355
+ scores = torch.sigmoid(logits.float()).type_as(logits)
356
+
357
+ scores_for_routing = scores + self.expert_bias
358
+ _, topk_idx = self.group_limited_topk(scores_for_routing)
359
+
360
+ scores = torch.gather(scores, dim=1, index=topk_idx).type_as(logits)
361
+
362
+ topk_weight = scores / (scores.sum(dim=-1, keepdim=True) + 1e-20) if self.top_k > 1 else scores
363
+ topk_weight = topk_weight * self.routed_scaling_factor
364
+
365
+ return topk_idx, topk_weight, logits
366
+
367
+
368
+ class BailingMoeV2_5SparseMoeBlock(nn.Module):
369
+ """
370
+ A mixed expert module containing shared experts.
371
+ """
372
+
373
+ def __init__(self, config: BailingMoeV2_5Config):
374
+ super().__init__()
375
+ self.config = config
376
+ self.num_experts_per_tok = config.num_experts_per_tok
377
+ self._setup_experts()
378
+ self.gate = BailingMoeV2_5Gate(config)
379
+ if config.num_shared_experts is not None:
380
+ self.shared_experts = BailingMoeV2_5MLP(
381
+ config=config, intermediate_size=config.moe_intermediate_size * config.num_shared_experts
382
+ )
383
+
384
+ def _setup_experts(self):
385
+ self.experts = nn.ModuleList(
386
+ [
387
+ BailingMoeV2_5MLP(config=self.config, intermediate_size=self.config.moe_intermediate_size)
388
+ for _ in range(self.config.num_experts)
389
+ ]
390
+ )
391
+
392
+ def forward(self, hidden_states):
393
+ identity = hidden_states
394
+ bsz, seq_len, h = hidden_states.shape
395
+ topk_idx, topk_weight, router_logits = self.gate(hidden_states)
396
+ hidden_states = hidden_states.view(-1, hidden_states.shape[-1])
397
+ flat_topk_idx = topk_idx.view(-1)
398
+ if self.training:
399
+ hidden_states = hidden_states.repeat_interleave(self.num_experts_per_tok, dim=0)
400
+ y = torch.empty_like(hidden_states)
401
+ for i, expert in enumerate(self.experts):
402
+ y[flat_topk_idx == i] = expert(hidden_states[flat_topk_idx == i])
403
+ y = (y.view(*topk_weight.shape, -1) * topk_weight.unsqueeze(-1)).sum(dim=1)
404
+ y = y.to(hidden_states.dtype).view(bsz, seq_len, h)
405
+ else:
406
+ y = self.moe_infer(hidden_states, topk_idx, topk_weight).view(bsz, seq_len, h)
407
+ if self.config.num_shared_experts is not None:
408
+ y = y + self.shared_experts(identity)
409
+ return y, (router_logits.view(bsz, seq_len, -1), topk_idx.view(bsz, seq_len, -1))
410
+
411
+ @torch.no_grad()
412
+ def moe_infer(self, x, topk_ids, topk_weight):
413
+ cnts = topk_ids.new_zeros((topk_ids.shape[0], len(self.experts)))
414
+ cnts.scatter_(1, topk_ids, 1)
415
+ tokens_per_expert = cnts.sum(dim=0)
416
+ idxs = topk_ids.view(-1).argsort()
417
+ sorted_tokens = x[idxs // topk_ids.shape[1]]
418
+ tokens_per_expert = tokens_per_expert.cpu().numpy()
419
+ outputs = []
420
+ start_idx = 0
421
+ for i, num_tokens in enumerate(tokens_per_expert):
422
+ end_idx = start_idx + num_tokens
423
+ if num_tokens == 0:
424
+ continue
425
+ expert = self.experts[i]
426
+ tokens_for_this_expert = sorted_tokens[start_idx:end_idx]
427
+ expert_out = expert(tokens_for_this_expert)
428
+ outputs.append(expert_out.to(x.device))
429
+ start_idx = end_idx
430
+
431
+ outs = torch.cat(outputs, dim=0) if len(outputs) else sorted_tokens.new_empty(0)
432
+ new_x = torch.empty_like(outs)
433
+ new_x[idxs] = outs
434
+ final_out = (
435
+ new_x.view(*topk_ids.shape, -1)
436
+ .type(topk_weight.dtype)
437
+ .mul_(topk_weight.unsqueeze(dim=-1))
438
+ .sum(dim=1)
439
+ .type(new_x.dtype)
440
+ )
441
+ return final_out
442
+
443
+
444
+ # Copied from transformers.models.llama.modeling_llama.repeat_kv
445
+ def repeat_kv(hidden_states: torch.Tensor, n_rep: int, head_first: bool = True) -> torch.Tensor:
446
+ """
447
+ This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). If head_first is True, the hidden states go from (batch,
448
+ num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)
449
+ """
450
+ if n_rep == 1:
451
+ return hidden_states
452
+ if head_first:
453
+ batch, num_key_value_heads, slen, head_dim = hidden_states.shape
454
+ hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim)
455
+ return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim)
456
+ else:
457
+ batch, slen, num_key_value_heads, head_dim = hidden_states.shape
458
+ hidden_states = hidden_states[:, :, :, None, :].expand(batch, slen, num_key_value_heads, n_rep, head_dim)
459
+ return hidden_states.reshape(batch, slen, num_key_value_heads * n_rep, head_dim)
460
+
461
+
462
+ def repeat_kv2(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor:
463
+ """
464
+ This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch,
465
+ num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)
466
+ """
467
+ batch, num_key_value_heads, slen, head_dim = hidden_states.shape
468
+ if n_rep == 1:
469
+ return hidden_states
470
+ hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim)
471
+ return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim)
472
+
473
+
474
+ def eager_attention_forward(
475
+ module: nn.Module,
476
+ query: torch.Tensor,
477
+ key: torch.Tensor,
478
+ value: torch.Tensor,
479
+ attention_mask: Optional[torch.Tensor],
480
+ scaling: float,
481
+ dropout: float = 0.0,
482
+ **kwargs: Unpack[TransformersKwargs],
483
+ ):
484
+ key_states = repeat_kv2(key, module.num_key_value_groups)
485
+ value_states = repeat_kv2(value, module.num_key_value_groups)
486
+
487
+ attn_weights = torch.matmul(query, key_states.transpose(2, 3)) * scaling
488
+ if attention_mask is not None:
489
+ causal_mask = attention_mask[:, :, :, : key_states.shape[-2]]
490
+ attn_weights = attn_weights + causal_mask
491
+
492
+ attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query.dtype)
493
+ attn_weights = nn.functional.dropout(attn_weights, p=dropout, training=module.training)
494
+ attn_output = torch.matmul(attn_weights, value_states)
495
+ attn_output = attn_output.transpose(1, 2).contiguous()
496
+
497
+ return attn_output, attn_weights
498
+
499
+
500
+ def apply_rotary_pos_emb_interleave(q, k, cos, sin, position_ids=None, unsqueeze_dim=1):
501
+ r"""
502
+ TODO let's just use the original freqcis computation to not have the view
503
+ transpose + reshape! This is not optimized!
504
+ Applies Rotary Position Embedding to the query and key tensors.
505
+
506
+ Args:
507
+ q (`torch.Tensor`): The query tensor.
508
+ k (`torch.Tensor`): The key tensor.
509
+ cos (`torch.Tensor`): The cosine part of the rotary embedding.
510
+ sin (`torch.Tensor`): The sine part of the rotary embedding.
511
+ position_ids (`torch.Tensor`):
512
+ The position indices of the tokens corresponding to the query and key tensors. For example, this can be
513
+ used to pass offsetted position ids when working with a KV-cache.
514
+ unsqueeze_dim (`int`, *optional*, defaults to 1):
515
+ The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and
516
+ sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note
517
+ that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and
518
+ k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes
519
+ cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have
520
+ the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2.
521
+ Returns:
522
+ `tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding.
523
+ """
524
+ cos = cos.unsqueeze(unsqueeze_dim)
525
+ sin = sin.unsqueeze(unsqueeze_dim)
526
+
527
+ b, h, s, d = q.shape
528
+ q = q.view(b, h, s, d // 2, 2).transpose(4, 3).reshape(b, h, s, d)
529
+
530
+ b, h, s, d = k.shape
531
+ k = k.view(b, h, s, d // 2, 2).transpose(4, 3).reshape(b, h, s, d)
532
+
533
+ q_embed = (q * cos) + (rotate_half(q) * sin)
534
+ k_embed = (k * cos) + (rotate_half(k) * sin)
535
+ return q_embed, k_embed
536
+
537
+
538
+ class BailingMoeV2_5MLARotaryEmbedding(nn.Module):
539
+ inv_freq: torch.Tensor # fix linting for `register_buffer`
540
+
541
+ def __init__(self, config: BailingMoeV2_5Config, device=None):
542
+ super().__init__()
543
+ # BC: "rope_type" was originally "type"
544
+ if hasattr(config, "rope_scaling") and isinstance(config.rope_scaling, dict):
545
+ self.rope_type = config.rope_scaling.get("rope_type", config.rope_scaling.get("type"))
546
+ else:
547
+ self.rope_type = "default"
548
+ self.max_seq_len_cached = config.max_position_embeddings
549
+ self.original_max_seq_len = config.max_position_embeddings
550
+
551
+ self.config = config
552
+ self.rope_init_fn = ROPE_INIT_FUNCTIONS[self.rope_type]
553
+
554
+ inv_freq, self.attention_scaling = self.rope_init_fn(self.config, device)
555
+ self.register_buffer("inv_freq", inv_freq, persistent=False)
556
+ self.original_inv_freq = self.inv_freq
557
+
558
+ @torch.no_grad()
559
+ @dynamic_rope_update # power user: used with advanced RoPE types (e.g. dynamic rope)
560
+ def forward(self, x, position_ids):
561
+ inv_freq_expanded = self.inv_freq[None, :, None].float().expand(position_ids.shape[0], -1, 1).to(x.device)
562
+ position_ids_expanded = position_ids[:, None, :].float()
563
+
564
+ device_type = x.device.type if isinstance(x.device.type, str) and x.device.type != "mps" else "cpu"
565
+ with torch.autocast(device_type=device_type, enabled=False): # Force float32
566
+ freqs = (inv_freq_expanded.float() @ position_ids_expanded.float()).transpose(1, 2)
567
+ emb = torch.cat((freqs, freqs), dim=-1)
568
+ cos = emb.cos() * self.attention_scaling
569
+ sin = emb.sin() * self.attention_scaling
570
+
571
+ return cos.to(dtype=x.dtype), sin.to(dtype=x.dtype)
572
+
573
+
574
+ def yarn_get_mscale(scale=1, mscale=1):
575
+ if scale <= 1:
576
+ return 1.0
577
+ return 0.1 * mscale * math.log(scale) + 1.0
578
+
579
+
580
+ class BailingMoeV2_5MultiLatentAttention(nn.Module):
581
+ """Multi-headed attention from 'Attention Is All You Need' paper"""
582
+
583
+ def __init__(self, config: BailingMoeV2_5Config, layer_idx: int):
584
+ super().__init__()
585
+ self.config = config
586
+ self.layer_idx = layer_idx
587
+ self.num_key_value_groups = config.num_attention_heads // config.num_key_value_heads
588
+ self.attention_dropout = config.attention_dropout
589
+ self.num_heads = config.num_attention_heads
590
+ self.rope_theta = config.rope_theta
591
+ self.q_lora_rank = config.q_lora_rank
592
+ self.qk_rope_head_dim = config.qk_rope_head_dim
593
+ self.kv_lora_rank = config.kv_lora_rank
594
+ self.v_head_dim = config.v_head_dim
595
+ self.qk_nope_head_dim = config.qk_nope_head_dim
596
+ self.qk_head_dim = config.qk_head_dim
597
+
598
+ self.is_causal = True
599
+ if self.q_lora_rank is None:
600
+ self.q_proj = nn.Linear(config.hidden_size, self.num_heads * self.qk_head_dim, bias=False)
601
+ else:
602
+ self.q_a_proj = nn.Linear(config.hidden_size, config.q_lora_rank, bias=config.use_qkv_bias)
603
+ self.q_a_layernorm = BailingMoeV2_5RMSNorm(config.q_lora_rank)
604
+ self.q_b_proj = nn.Linear(config.q_lora_rank, self.num_heads * self.qk_head_dim, bias=False)
605
+
606
+ self.kv_a_proj_with_mqa = nn.Linear(
607
+ config.hidden_size,
608
+ self.kv_lora_rank + self.qk_rope_head_dim,
609
+ bias=config.use_qkv_bias,
610
+ )
611
+ self.kv_a_layernorm = BailingMoeV2_5RMSNorm(self.kv_lora_rank)
612
+ self.kv_b_proj = nn.Linear(
613
+ self.kv_lora_rank,
614
+ self.num_heads * (self.qk_nope_head_dim + self.v_head_dim),
615
+ bias=False,
616
+ )
617
+
618
+ self.dense = nn.Linear(
619
+ self.num_heads * self.v_head_dim,
620
+ config.hidden_size,
621
+ bias=config.use_qkv_bias,
622
+ )
623
+
624
+ self.scaling = self.qk_head_dim ** (-0.5)
625
+ if self.config.rope_scaling is not None:
626
+ mscale_all_dim = self.config.rope_scaling.get("mscale_all_dim", 0)
627
+ scaling_factor = self.config.rope_scaling["factor"]
628
+ if mscale_all_dim:
629
+ mscale = yarn_get_mscale(scaling_factor, mscale_all_dim)
630
+ self.scaling = self.scaling * mscale * mscale
631
+
632
+ @deprecate_kwarg("past_key_value", new_name="past_key_values", version="4.58")
633
+ def forward(
634
+ self,
635
+ hidden_states: torch.Tensor,
636
+ position_embeddings: tuple[torch.Tensor, torch.Tensor],
637
+ attention_mask: Optional[torch.Tensor],
638
+ past_key_values: Optional[Cache] = None,
639
+ cache_position: Optional[torch.LongTensor] = None,
640
+ **kwargs: Unpack[FlashAttentionKwargs],
641
+ ) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]:
642
+
643
+ batch_size, seq_length = hidden_states.shape[:-1]
644
+ query_shape = (batch_size, seq_length, -1, self.qk_head_dim)
645
+ key_shape = (batch_size, seq_length, -1, self.qk_nope_head_dim + self.v_head_dim)
646
+
647
+ if self.q_lora_rank is None:
648
+ q_states = self.q_proj(hidden_states)
649
+ else:
650
+ q_states = self.q_b_proj(self.q_a_layernorm(self.q_a_proj(hidden_states)))
651
+ q_states = q_states.view(query_shape).transpose(1, 2)
652
+ q_pass, q_rot = torch.split(q_states, [self.qk_nope_head_dim, self.qk_rope_head_dim], dim=-1)
653
+
654
+ compressed_kv = self.kv_a_proj_with_mqa(hidden_states)
655
+ k_pass, k_rot = torch.split(compressed_kv, [self.kv_lora_rank, self.qk_rope_head_dim], dim=-1)
656
+
657
+ k_pass = self.kv_b_proj(self.kv_a_layernorm(k_pass)).view(key_shape).transpose(1, 2)
658
+ k_pass, value_states = torch.split(k_pass, [self.qk_nope_head_dim, self.v_head_dim], dim=-1)
659
+
660
+ k_rot = k_rot.view(batch_size, 1, seq_length, self.qk_rope_head_dim)
661
+
662
+ cos, sin = position_embeddings # tptest
663
+ if self.config.rope_interleave: # support using interleaved weights for efficiency
664
+ q_rot, k_rot = apply_rotary_pos_emb_interleave(q_rot, k_rot, cos, sin)
665
+ else:
666
+ x = 1 / 0
667
+ q_rot, k_rot = apply_rotary_pos_emb(q_rot, k_rot, cos, sin)
668
+ k_rot = k_rot.expand(*k_pass.shape[:-1], -1)
669
+
670
+ query_states = torch.cat((q_pass, q_rot), dim=-1)
671
+ key_states = torch.cat((k_pass, k_rot), dim=-1)
672
+
673
+ if past_key_values is not None:
674
+ # sin and cos are specific to RoPE models; cache_position needed for the static cache
675
+ cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position}
676
+ key_states, value_states = past_key_values.update(key_states, value_states, self.layer_idx, cache_kwargs)
677
+
678
+ if self.config._attn_implementation == "flash_attention_2" and self.qk_head_dim != self.v_head_dim:
679
+ value_states = F.pad(value_states, [0, self.qk_head_dim - self.v_head_dim])
680
+
681
+ attention_interface: Callable = eager_attention_forward
682
+
683
+ if self.config._attn_implementation != "eager":
684
+ attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation]
685
+
686
+ attn_output, attn_weights = attention_interface(
687
+ self,
688
+ query_states,
689
+ key_states,
690
+ value_states,
691
+ attention_mask,
692
+ dropout=0.0 if not self.training else self.attention_dropout,
693
+ scaling=self.scaling,
694
+ **kwargs,
695
+ )
696
+
697
+ if self.config._attn_implementation == "flash_attention_2" and self.qk_head_dim != self.v_head_dim:
698
+ attn_output = attn_output[:, :, :, : self.v_head_dim]
699
+
700
+ attn_output = attn_output.reshape(batch_size, seq_length, -1).contiguous()
701
+ attn_output = self.dense(attn_output)
702
+ return attn_output, attn_weights, past_key_values
703
+
704
+
705
+ class BailingMoeV2_5LinearAttention(nn.Module):
706
+ """
707
+ BailingMoeAttention implements a linear attention mechanism based on Lightning Attention-2
708
+ (https://arxiv.org/abs/2401.04658) with efficient computation using flash-linear-attention operators.
709
+
710
+ The implementation leverages optimized kernels from the flash-linear-attention library
711
+ (https://github.com/fla-org/flash-linear-attention) for maximum performance.
712
+ """
713
+
714
+ def __init__(self, config: BailingMoeV2_5Config, layer_idx: Optional[int] = None):
715
+ super().__init__()
716
+ self.config = config
717
+ self.layer_idx = layer_idx
718
+ if layer_idx is None:
719
+ logger.warning_once(
720
+ f"Instantiating {self.__class__.__name__} without passing `layer_idx` is not recommended and will "
721
+ "to errors during the forward call, if caching is used. Please make sure to provide a `layer_idx` "
722
+ "when creating this class."
723
+ )
724
+ self.hidden_size = config.hidden_size
725
+ self.num_heads = config.num_attention_heads
726
+ self.head_dim = config.head_dim or self.hidden_size // self.num_heads
727
+ self.num_key_value_heads = config.num_attention_heads
728
+ self.num_key_value_groups = self.num_heads // self.num_key_value_heads
729
+ partial_rotary_factor = config.partial_rotary_factor if hasattr(config, "partial_rotary_factor") else 1.0
730
+ self.rope_dim = int(self.head_dim * partial_rotary_factor)
731
+
732
+ self.use_qk_norm = getattr(config, "use_qk_norm", False)
733
+ self.rms_norm_eps = getattr(config, "rms_norm_eps", 1e-5)
734
+ self.mode = 'chunk'
735
+
736
+ self.query_key_value = nn.Linear(
737
+ self.hidden_size,
738
+ (self.num_heads + 2 * self.num_key_value_heads) * self.head_dim,
739
+ bias=config.use_qkv_bias,
740
+ )
741
+
742
+ if self.config.use_qk_norm:
743
+ self.query_layernorm = BailingMoeV2_5RMSNorm(self.head_dim, eps=config.rms_norm_eps)
744
+ self.key_layernorm = BailingMoeV2_5RMSNorm(self.head_dim, eps=config.rms_norm_eps)
745
+
746
+ self.rotary_emb = BailingMoeV2_5RotaryEmbedding(config=config)
747
+
748
+ self.dense = nn.Linear(self.num_heads * self.head_dim, self.hidden_size, bias=config.use_bias)
749
+
750
+ self.g_proj = nn.Linear(self.hidden_size, self.num_heads * self.head_dim, bias=False)
751
+ self.g_norm = BailingMoeV2_5GroupRMSNorm(
752
+ self.num_heads * self.head_dim, group_norm_size=config.group_norm_size, eps=self.rms_norm_eps
753
+ )
754
+ slope = -BailingMoeV2_5LinearAttention.build_slope_tensor(self.num_heads) * (
755
+ 1 - (self.layer_idx - 1) / (self.config.num_hidden_layers - 1) + 1e-5
756
+ )
757
+ self.register_buffer('slope', slope, persistent=False)
758
+
759
+ self.lightning_attn_ops = {'chunk': chunk_simple_gla, 'fused_recurrent': fused_recurrent_simple_gla}
760
+
761
+ @staticmethod
762
+ def build_slope_tensor(n_attention_heads: int):
763
+ """
764
+ Build a tensor of slopes for Lightning Attention-2 as described in the paper:
765
+ "Lightning Attention-2: A Free Lunch for Handling Unlimited Sequence Lengths in Large Language Models"
766
+ (https://arxiv.org/abs/2401.04658)
767
+
768
+ This function computes the slope values that control the decay rate of attention scores
769
+ based on the number of attention heads. The slopes are designed to have specific
770
+ mathematical properties that work optimally when the number of heads is a power of 2.
771
+
772
+ For non-power-of-2 head counts, a workaround is implemented to maintain similar properties.
773
+
774
+ Args:
775
+ n_attention_heads (int): Number of attention heads in the model
776
+
777
+ Returns:
778
+ torch.Tensor: A tensor of shape [n_attention_heads] containing the computed slopes
779
+
780
+ Note:
781
+ Code copied from: https://github.com/OpenNLPLab/lightning-attention/blob/d15c38529bbd5c2c82b44ddda3cac885825aa873/lightning_attn/utils/utils.py#L6
782
+ """
783
+
784
+ def get_slopes(n):
785
+ def get_slopes_power_of_2(n):
786
+ start = 2 ** (-(2 ** -(math.log2(n) - 3)))
787
+ ratio = start
788
+ return [start * ratio**i for i in range(n)]
789
+
790
+ if math.log2(n).is_integer():
791
+ return get_slopes_power_of_2(
792
+ n
793
+ ) # In the paper, we only train models that have 2^a heads for some a. This function has
794
+ else: # some good properties that only occur when the input is a power of 2. To maintain that even
795
+ closest_power_of_2 = 2 ** math.floor(
796
+ math.log2(n)
797
+ ) # when the number of heads is not a power of 2, we use this workaround.
798
+ return (
799
+ get_slopes_power_of_2(closest_power_of_2)
800
+ + get_slopes(2 * closest_power_of_2)[0::2][: n - closest_power_of_2]
801
+ )
802
+
803
+ slopes = torch.tensor(get_slopes(n_attention_heads), dtype=torch.float)
804
+ return slopes
805
+
806
+ def forward(
807
+ self,
808
+ hidden_states: torch.Tensor,
809
+ attention_mask: Optional[torch.Tensor] = None,
810
+ position_ids: Optional[torch.LongTensor] = None,
811
+ past_key_value: Optional[Cache] = None,
812
+ output_attentions: bool = False,
813
+ use_cache: bool = False,
814
+ position_embeddings: Optional[Tuple[torch.Tensor, torch.Tensor]] = None,
815
+ **kwargs,
816
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
817
+ if attention_mask is not None:
818
+ assert len(attention_mask.shape) == 2, (
819
+ "Expected attention_mask as a 0-1 matrix with shape [batch_size, seq_len] "
820
+ "for padding purposes (0 indicating padding). "
821
+ "Arbitrary attention masks of shape [batch_size, seq_len, seq_len] are not allowed."
822
+ )
823
+
824
+ # launching the triton kernel for just one token will actually be slower
825
+ mode = 'fused_recurrent' if hidden_states.shape[1] <= 64 else self.mode
826
+
827
+ # Currently output_attentions can only be False, returning attention weights is not supported
828
+ assert (
829
+ not output_attentions
830
+ ), "output_attentions can only be False, returning attention weights is not supported"
831
+
832
+ bsz, q_len, _ = hidden_states.size()
833
+ device = hidden_states.device
834
+
835
+ qkv = self.query_key_value(hidden_states)
836
+ qkv = qkv.view(bsz, q_len, self.num_heads + 2 * self.num_key_value_heads, self.head_dim)
837
+ query_states, key_states, value_states = qkv.split(
838
+ [self.num_heads, self.num_key_value_heads, self.num_key_value_heads], dim=-2
839
+ )
840
+ if self.config.use_qk_norm:
841
+ query_states = self.query_layernorm(query_states)
842
+ key_states = self.key_layernorm(key_states)
843
+
844
+ cos, sin = position_embeddings
845
+ query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin, unsqueeze_dim=2)
846
+
847
+ if self.num_key_value_groups > 1:
848
+ # [bsz, q_len, n_kv_heads, head_dim] -> [bsz, q_len, n_heads, head_dim]
849
+ key_states = repeat_kv(key_states, self.num_key_value_groups, head_first=False)
850
+ value_states = repeat_kv(value_states, self.num_key_value_groups, head_first=False)
851
+
852
+ recurrent_state = None
853
+ if past_key_value is not None and isinstance(past_key_value, Cache):
854
+ # ensure the cache list is long enough
855
+ while len(past_key_value.layers) <= self.layer_idx:
856
+ past_key_value.layers.append(DynamicLayer())
857
+
858
+ if past_key_value.layers[self.layer_idx].keys is not None:
859
+ recurrent_state = past_key_value.layers[self.layer_idx].keys
860
+ # ensure recurrent_state is on the same device as hidden_states
861
+ if recurrent_state.device != hidden_states.device:
862
+ recurrent_state = recurrent_state.to(device).contiguous()
863
+
864
+ if recurrent_state is None:
865
+ # dealing with left-padding
866
+ if attention_mask is not None and use_cache:
867
+ value_states = value_states.mul_(attention_mask[:, -q_len:, None, None])
868
+
869
+ o, recurrent_state = self.lightning_attn_ops[mode](
870
+ q=query_states,
871
+ k=key_states,
872
+ v=value_states,
873
+ g=self.slope[None, None, :].expand(bsz, q_len, self.num_heads),
874
+ initial_state=recurrent_state,
875
+ output_final_state=use_cache,
876
+ )
877
+
878
+ o = o.reshape(bsz, q_len, -1)
879
+ o = self.g_norm(o)
880
+ g_proj = self.g_proj(hidden_states)
881
+ o = o * torch.sigmoid_(g_proj)
882
+ o = self.dense(o)
883
+
884
+ if use_cache and past_key_value is not None and isinstance(past_key_value, Cache):
885
+ target_device = None
886
+ for cache in past_key_value.layers:
887
+ if cache.keys is not None:
888
+ target_device = cache.keys.device
889
+ break
890
+ if target_device is None:
891
+ target_device = recurrent_state.device
892
+
893
+ # move to target device
894
+ if recurrent_state.device != target_device:
895
+ recurrent_state = recurrent_state.to(target_device)
896
+
897
+ past_key_value.layers[self.layer_idx].keys = recurrent_state
898
+
899
+ return o, None, past_key_value
900
+
901
+
902
+ class BailingMoeV2_5MTPLayer(nn.Module):
903
+ def __init__(self, config: BailingMoeV2_5Config, layer_idx: int):
904
+ super().__init__()
905
+ self.layer_idx = layer_idx
906
+ self.input_layernorm = BailingMoeV2_5RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
907
+ self.enorm = BailingMoeV2_5RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
908
+
909
+ self.eh_proj = nn.Linear(config.hidden_size * 2, config.hidden_size, bias=False)
910
+ self.post_attention_layernorm = BailingMoeV2_5RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
911
+ self.attention = BailingMoeV2_5MultiLatentAttention(config=config, layer_idx=layer_idx)
912
+ self.mlp = BailingMoeV2_5SparseMoeBlock(config)
913
+
914
+ self.hnorm = BailingMoeV2_5RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
915
+ self.final_layernorm = BailingMoeV2_5RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
916
+
917
+ def forward(
918
+ self,
919
+ input_embeds,
920
+ hidden_states: torch.Tensor,
921
+ attention_mask: Optional[torch.Tensor] = None,
922
+ position_ids: Optional[torch.LongTensor] = None,
923
+ past_key_value: Optional[Tuple[torch.Tensor]] = None,
924
+ output_attentions: Optional[bool] = False,
925
+ output_router_logits: Optional[bool] = False,
926
+ use_cache: Optional[bool] = False,
927
+ position_embeddings: Optional[Tuple[torch.Tensor, torch.Tensor]] = None, # necessary, but kept here for BC
928
+ **kwargs,
929
+ ) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]:
930
+ input_embeds = self.enorm(input_embeds)
931
+ hidden_states = self.hnorm(hidden_states)
932
+ hidden_states = self.eh_proj(torch.cat([input_embeds, hidden_states], dim=-1))
933
+ residual = hidden_states
934
+
935
+ hidden_states = self.input_layernorm(hidden_states)
936
+
937
+ # Self Attention
938
+ hidden_states, self_attn_weights, present_key_value = self.attention(
939
+ hidden_states=hidden_states,
940
+ attention_mask=attention_mask,
941
+ position_ids=position_ids,
942
+ past_key_value=past_key_value,
943
+ output_attentions=output_attentions,
944
+ position_embeddings=position_embeddings,
945
+ use_cache=use_cache,
946
+ )
947
+ hidden_states = residual + hidden_states
948
+
949
+ # Fully Connected
950
+ residual = hidden_states
951
+ hidden_states = self.post_attention_layernorm(hidden_states)
952
+ hidden_states = self.mlp(hidden_states)
953
+ if isinstance(hidden_states, tuple):
954
+ hidden_states, router_logits = hidden_states
955
+ else:
956
+ router_logits = None
957
+ hidden_states = residual + hidden_states.to(residual.device)
958
+ hidden_states = self.final_layernorm(hidden_states)
959
+
960
+ outputs = (hidden_states,)
961
+
962
+ if output_attentions:
963
+ outputs += (self_attn_weights,)
964
+
965
+ if use_cache:
966
+ outputs += (present_key_value,)
967
+
968
+ if output_router_logits:
969
+ outputs += (router_logits,)
970
+
971
+ return outputs
972
+
973
+
974
+ class BailingMoeV2_5DecoderLayer(nn.Module):
975
+ def __init__(self, config: BailingMoeV2_5Config, layer_idx: int):
976
+ super().__init__()
977
+ self.hidden_size = config.hidden_size
978
+ self.layer_idx = layer_idx
979
+ self.attention_layer_type = (
980
+ "attention"
981
+ if (layer_idx + 1) % config.layer_group_size == 0
982
+ or layer_idx >= config.num_hidden_layers // config.layer_group_size * config.layer_group_size
983
+ else "linear_attention"
984
+ )
985
+
986
+ if self.attention_layer_type == "attention":
987
+ self.attention = BailingMoeV2_5MultiLatentAttention(config=config, layer_idx=layer_idx)
988
+ else:
989
+ self.attention = BailingMoeV2_5LinearAttention(config=config, layer_idx=layer_idx)
990
+
991
+ self.mlp = (
992
+ BailingMoeV2_5SparseMoeBlock(config)
993
+ if (config.num_experts is not None and layer_idx >= config.first_k_dense_replace)
994
+ else BailingMoeV2_5MLP(config=config, intermediate_size=config.intermediate_size)
995
+ )
996
+ self.input_layernorm = BailingMoeV2_5RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
997
+ self.post_attention_layernorm = BailingMoeV2_5RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
998
+
999
+ def forward(
1000
+ self,
1001
+ hidden_states: torch.Tensor,
1002
+ attention_mask: Optional[torch.Tensor] = None,
1003
+ position_ids: Optional[torch.LongTensor] = None,
1004
+ past_key_value: Optional[Tuple[torch.Tensor]] = None,
1005
+ cache_position: Optional[torch.LongTensor] = None,
1006
+ output_attentions: Optional[bool] = False,
1007
+ output_router_logits: Optional[bool] = False,
1008
+ use_cache: Optional[bool] = False,
1009
+ position_embeddings: Optional[Tuple[torch.Tensor, torch.Tensor]] = None, # necessary, but kept here for BC
1010
+ position_embeddings_mla: Optional[Tuple[torch.Tensor, torch.Tensor]] = None, # necessary, but kept here for BC
1011
+ **kwargs,
1012
+ ) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]:
1013
+ """
1014
+ Args:
1015
+ hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
1016
+ attention_mask (`torch.FloatTensor`, *optional*):
1017
+ attention mask of size `(batch_size, sequence_length)` if flash attention is used or `(batch_size, 1,
1018
+ query_sequence_length, key_sequence_length)` if default attention is used.
1019
+ position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
1020
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
1021
+ config.n_positions - 1]`.
1022
+ past_key_value (`Tuple(torch.FloatTensor)`, *optional*):
1023
+ cached past key and value projection states
1024
+ output_attentions (`bool`, *optional*):
1025
+ Whether to return the attentions tensors of all attention layers. See `attentions` under
1026
+ returned tensors for more detail.
1027
+ output_router_logits (`bool`, *optional*):
1028
+ Whether or not to return the logits of all the routers. They are useful for computing the router loss,
1029
+ and should not be returned during inference.
1030
+ use_cache (`bool`, *optional*):
1031
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding
1032
+ (see `past_key_values`).
1033
+ """
1034
+ residual = hidden_states
1035
+
1036
+ hidden_states = self.input_layernorm(hidden_states)
1037
+
1038
+ # Self Attention
1039
+ if self.attention_layer_type == "attention":
1040
+ hidden_states, self_attn_weights, present_key_value = self.attention(
1041
+ hidden_states=hidden_states,
1042
+ attention_mask=attention_mask,
1043
+ position_ids=position_ids,
1044
+ past_key_values=past_key_value,
1045
+ use_cache=use_cache,
1046
+ cache_position=cache_position, #
1047
+ position_embeddings=position_embeddings_mla, #
1048
+ **kwargs,
1049
+ )
1050
+ else:
1051
+ batch_size, seq_len = hidden_states.shape[0], hidden_states.shape[1]
1052
+ device = hidden_states.device
1053
+
1054
+ if attention_mask is None:
1055
+ # if attention_mask is None, create a full mask
1056
+ attention_mask = torch.ones((batch_size, seq_len), dtype=torch.int32, device=device)
1057
+ elif attention_mask.dim() == 4 and attention_mask.shape[1] == 1:
1058
+ attention_mask = attention_mask[:, 0, -1, :].to(torch.int32)
1059
+ attention_mask = (attention_mask > -1e4).to(torch.int32)
1060
+ elif attention_mask.dim() == 2:
1061
+ attention_mask = attention_mask.to(torch.int32)
1062
+ else:
1063
+ raise ValueError(f"Unsupported mask dimension: {attention_mask.shape}")
1064
+
1065
+ hidden_states, self_attn_weights, present_key_value = self.attention(
1066
+ hidden_states=hidden_states,
1067
+ attention_mask=attention_mask,
1068
+ past_key_value=past_key_value,
1069
+ position_ids=position_ids,
1070
+ use_cache=use_cache,
1071
+ output_attentions=output_attentions,
1072
+ position_embeddings=position_embeddings,
1073
+ )
1074
+
1075
+ hidden_states = residual + hidden_states
1076
+
1077
+ # Fully Connected
1078
+ residual = hidden_states
1079
+ hidden_states = self.post_attention_layernorm(hidden_states)
1080
+ hidden_states = self.mlp(hidden_states)
1081
+ if isinstance(hidden_states, tuple):
1082
+ hidden_states, router_logits = hidden_states
1083
+ else:
1084
+ router_logits = None
1085
+ hidden_states = residual + hidden_states.to(residual.device)
1086
+
1087
+ outputs = (hidden_states,)
1088
+
1089
+ if output_attentions:
1090
+ outputs += (self_attn_weights,)
1091
+
1092
+ if use_cache:
1093
+ outputs += (present_key_value,)
1094
+
1095
+ if output_router_logits:
1096
+ outputs += (router_logits,)
1097
+
1098
+ return outputs
1099
+
1100
+
1101
+ BAILINGMOEV2_5_START_DOCSTRING = r"""
1102
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
1103
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
1104
+ etc.)
1105
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
1106
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
1107
+ and behavior.
1108
+ Parameters:
1109
+ config ([`BailingMoeV2_5Config`]):
1110
+ Model configuration class with all the parameters of the model. Initializing with a config file does not
1111
+ load the weights associated with the model, only the configuration. Check out the
1112
+ [`~PreTrainedModel.from_pretrained`] method to load the model weights.
1113
+ """
1114
+
1115
+
1116
+ @add_start_docstrings(
1117
+ "The bare BailingMoeV2_5 Model outputting raw hidden-states without any specific head on top.",
1118
+ BAILINGMOEV2_5_START_DOCSTRING,
1119
+ )
1120
+ class BailingMoeV2_5PreTrainedModel(PreTrainedModel):
1121
+ config_class = BailingMoeV2_5Config
1122
+ base_model_prefix = "model"
1123
+ supports_gradient_checkpointing = True
1124
+ _no_split_modules = ["BailingMoeV2_5DecoderLayer"]
1125
+ _skip_keys_device_placement = "past_key_values"
1126
+ _supports_flash_attn_2 = True
1127
+ _supports_sdpa = True
1128
+ _supports_cache_class = True
1129
+
1130
+ def _init_weights(self, module):
1131
+ std = self.config.initializer_range
1132
+ if isinstance(module, nn.Linear):
1133
+ module.weight.data.normal_(mean=0.0, std=std)
1134
+ if module.bias is not None:
1135
+ module.bias.data.zero_()
1136
+ elif isinstance(module, nn.Embedding):
1137
+ module.weight.data.normal_(mean=0.0, std=std)
1138
+ if module.padding_idx is not None:
1139
+ module.weight.data[module.padding_idx].zero_()
1140
+
1141
+
1142
+ BAILINGMOEV2_5_INPUTS_DOCSTRING = r"""
1143
+ Args:
1144
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
1145
+ Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
1146
+ it.
1147
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
1148
+ [`PreTrainedTokenizer.__call__`] for details.
1149
+ [What are input IDs?](../glossary#input-ids)
1150
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
1151
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
1152
+ - 1 for tokens that are **not masked**,
1153
+ - 0 for tokens that are **masked**.
1154
+ [What are attention masks?](../glossary#attention-mask)
1155
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
1156
+ [`PreTrainedTokenizer.__call__`] for details.
1157
+ If `past_key_values` is used, optionally only the last `input_ids` have to be input (see
1158
+ `past_key_values`).
1159
+ If you want to change padding behavior, you should read [`modeling_opt._prepare_decoder_attention_mask`]
1160
+ and modify to your needs. See diagram 1 in [the paper](https://arxiv.org/abs/1910.13461) for more
1161
+ information on the default strategy.
1162
+ - 1 indicates the head is **not masked**,
1163
+ - 0 indicates the head is **masked**.
1164
+ position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
1165
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
1166
+ config.n_positions - 1]`.
1167
+ [What are position IDs?](../glossary#position-ids)
1168
+ past_key_values (`Cache` or `tuple(tuple(torch.FloatTensor))`, *optional*):
1169
+ Pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention
1170
+ blocks) that can be used to speed up sequential decoding. This typically consists in the `past_key_values`
1171
+ returned by the model at a previous stage of decoding, when `use_cache=True` or `config.use_cache=True`.
1172
+ Two formats are allowed:
1173
+ - a [`~cache_utils.Cache`] instance;
1174
+ - Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of
1175
+ shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`). This is also known as the legacy
1176
+ cache format.
1177
+ The model will output the same cache format that is fed as input. If no `past_key_values` are passed, the
1178
+ legacy cache format will be returned.
1179
+ If `past_key_values` are used, the user can optionally input only the last `input_ids` (those that don't
1180
+ have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `input_ids`
1181
+ of shape `(batch_size, sequence_length)`.
1182
+ inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
1183
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
1184
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
1185
+ model's internal embedding lookup matrix.
1186
+ use_cache (`bool`, *optional*):
1187
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
1188
+ `past_key_values`).
1189
+ output_attentions (`bool`, *optional*):
1190
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
1191
+ tensors for more detail.
1192
+ output_hidden_states (`bool`, *optional*):
1193
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
1194
+ more detail.
1195
+ return_dict (`bool`, *optional*):
1196
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
1197
+ """
1198
+
1199
+
1200
+ @add_start_docstrings(
1201
+ "The bare BailingMoeV2_5 Model outputting raw hidden-states without any specific head on top.",
1202
+ BAILINGMOEV2_5_START_DOCSTRING,
1203
+ )
1204
+ class BailingMoeV2_5Model(BailingMoeV2_5PreTrainedModel):
1205
+ """
1206
+ Transformer decoder consisting of *config.num_hidden_layers* layers. Each layer is a [`BailingMoeV2_5DecoderLayer`]
1207
+ Args:
1208
+ config: BailingMoeV2_5Config
1209
+ """
1210
+
1211
+ def __init__(self, config: BailingMoeV2_5Config):
1212
+ super().__init__(config)
1213
+ self.padding_idx = config.pad_token_id
1214
+ self.vocab_size = config.vocab_size
1215
+ self.num_nextn_predict_layers = config.num_nextn_predict_layers
1216
+
1217
+ self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx)
1218
+ self.layers = []
1219
+ for layer_idx in range(config.num_hidden_layers + config.num_nextn_predict_layers):
1220
+ layer_cls = BailingMoeV2_5DecoderLayer if layer_idx < config.num_hidden_layers else BailingMoeV2_5MTPLayer
1221
+ self.layers.append(layer_cls(config, layer_idx))
1222
+
1223
+ self.layers = nn.ModuleList(self.layers)
1224
+
1225
+ self._use_sdpa = config._attn_implementation == "sdpa"
1226
+ self._use_flash_attention_2 = config._attn_implementation == "flash_attention_2"
1227
+ self.norm = BailingMoeV2_5RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
1228
+ self.rotary_emb = BailingMoeV2_5RotaryEmbedding(config=config)
1229
+ self.rotary_emb_mla = BailingMoeV2_5MLARotaryEmbedding(config=config)
1230
+ self.gradient_checkpointing = False
1231
+ # Initialize weights and apply final processing
1232
+ self.post_init()
1233
+
1234
+ def get_input_embeddings(self):
1235
+ return self.word_embeddings
1236
+
1237
+ def set_input_embeddings(self, value):
1238
+ self.word_embeddings = value
1239
+
1240
+ @add_start_docstrings_to_model_forward(BAILINGMOEV2_5_INPUTS_DOCSTRING)
1241
+ def forward(
1242
+ self,
1243
+ input_ids: torch.LongTensor = None,
1244
+ attention_mask: Optional[torch.Tensor] = None,
1245
+ position_ids: Optional[torch.LongTensor] = None,
1246
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
1247
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1248
+ cache_position: Optional[torch.LongTensor] = None,
1249
+ use_cache: Optional[bool] = None,
1250
+ output_attentions: Optional[bool] = None,
1251
+ output_hidden_states: Optional[bool] = None,
1252
+ output_router_logits: Optional[bool] = None,
1253
+ return_dict: Optional[bool] = None,
1254
+ **kwargs,
1255
+ ) -> Union[Tuple, MoeV2_5ModelOutputWithPast]:
1256
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
1257
+ output_hidden_states = (
1258
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
1259
+ )
1260
+ output_router_logits = (
1261
+ output_router_logits if output_router_logits is not None else self.config.output_router_logits
1262
+ )
1263
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
1264
+
1265
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1266
+
1267
+ # retrieve input_ids and inputs_embeds
1268
+ if input_ids is not None and inputs_embeds is not None:
1269
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
1270
+ elif input_ids is not None:
1271
+ batch_size, seq_length = input_ids.shape[:2]
1272
+ elif inputs_embeds is not None:
1273
+ batch_size, seq_length = inputs_embeds.shape[:2]
1274
+ else:
1275
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
1276
+
1277
+ if self.gradient_checkpointing and self.training:
1278
+ if use_cache:
1279
+ logger.warning_once(
1280
+ "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`transformers."
1281
+ )
1282
+ use_cache = False
1283
+
1284
+ if use_cache and past_key_values is None:
1285
+ past_key_values = DynamicCache()
1286
+
1287
+ if inputs_embeds is None:
1288
+ inputs_embeds = self.word_embeddings(input_ids)
1289
+
1290
+ # For hybrid attention (MLA + Linear Attention), use the softmax attention layer's cache length
1291
+ # to ensure consistent position tracking across different attention types
1292
+ softmax_attention_layer_id = self.config.layer_group_size - 1
1293
+ if past_key_values is not None:
1294
+ past_seen_tokens = past_key_values.get_seq_length(layer_idx=softmax_attention_layer_id)
1295
+ else:
1296
+ past_seen_tokens = 0
1297
+
1298
+ if cache_position is None:
1299
+ cache_position = torch.arange(
1300
+ past_seen_tokens, past_seen_tokens + inputs_embeds.shape[1], device=inputs_embeds.device
1301
+ )
1302
+
1303
+ if position_ids is None:
1304
+ position_ids = cache_position.unsqueeze(0)
1305
+
1306
+ if self._use_flash_attention_2:
1307
+ # 2d mask is passed through the layers
1308
+ attention_mask = attention_mask if (attention_mask is not None and 0 in attention_mask) else None
1309
+ elif self._use_sdpa and not output_attentions:
1310
+ # output_attentions=True can not be supported when using SDPA, and we fall back on
1311
+ # the manual implementation that requires a 4D causal mask in all cases.
1312
+ attention_mask = _prepare_4d_causal_attention_mask_for_sdpa(
1313
+ attention_mask,
1314
+ (batch_size, seq_length),
1315
+ inputs_embeds,
1316
+ past_seen_tokens,
1317
+ )
1318
+ else:
1319
+ # 4d mask is passed through the layers
1320
+ attention_mask = _prepare_4d_causal_attention_mask(
1321
+ attention_mask, (batch_size, seq_length), inputs_embeds, past_seen_tokens
1322
+ )
1323
+
1324
+ # embed positions
1325
+ hidden_states = inputs_embeds
1326
+
1327
+ # create position embeddings to be shared across the decoder layers
1328
+ position_embeddings = self.rotary_emb(hidden_states, position_ids)
1329
+ position_embeddings_mla = self.rotary_emb_mla(hidden_states, position_ids)
1330
+
1331
+ # decoder layers
1332
+ all_hidden_states = () if output_hidden_states else None
1333
+ all_self_attns = () if output_attentions else None
1334
+ all_router_logits = () if output_router_logits else None
1335
+ next_decoder_cache = None
1336
+ layers = self.layers[: -self.num_nextn_predict_layers] if self.num_nextn_predict_layers > 0 else self.layers
1337
+ mtp_layers = self.layers[-self.num_nextn_predict_layers :] if self.num_nextn_predict_layers > 0 else None
1338
+
1339
+ # tptest miss causal_mask = create_causal_mask(
1340
+
1341
+ for decoder_layer in layers:
1342
+ if output_hidden_states:
1343
+ all_hidden_states += (hidden_states,)
1344
+
1345
+ if self.gradient_checkpointing and self.training:
1346
+ layer_outputs = self._gradient_checkpointing_func(
1347
+ decoder_layer.__call__,
1348
+ hidden_states,
1349
+ attention_mask,
1350
+ position_ids,
1351
+ past_key_values,
1352
+ cache_position,
1353
+ output_attentions,
1354
+ output_router_logits,
1355
+ use_cache,
1356
+ position_embeddings,
1357
+ position_embeddings_mla,
1358
+ )
1359
+ else:
1360
+ layer_outputs = decoder_layer(
1361
+ hidden_states,
1362
+ attention_mask=attention_mask,
1363
+ position_ids=position_ids,
1364
+ past_key_value=past_key_values,
1365
+ cache_position=cache_position,
1366
+ output_attentions=output_attentions,
1367
+ output_router_logits=output_router_logits,
1368
+ use_cache=use_cache,
1369
+ position_embeddings=position_embeddings,
1370
+ position_embeddings_mla=position_embeddings_mla,
1371
+ )
1372
+ hidden_states = layer_outputs[0]
1373
+
1374
+ if use_cache:
1375
+ next_decoder_cache = layer_outputs[2 if output_attentions else 1]
1376
+
1377
+ if output_attentions:
1378
+ all_self_attns += (layer_outputs[1],)
1379
+
1380
+ if output_router_logits and layer_outputs[-1] is not None:
1381
+ all_router_logits += (layer_outputs[-1],)
1382
+
1383
+ hidden_states = self.norm(hidden_states)
1384
+ main_hidden_states = hidden_states
1385
+
1386
+ # add hidden states from the last decoder layer
1387
+ if output_hidden_states:
1388
+ all_hidden_states += (main_hidden_states,)
1389
+
1390
+ mtp_hidden_states = None
1391
+
1392
+ if mtp_layers:
1393
+ for decoder_layer in mtp_layers:
1394
+ input_ids, _ = roll_tensor(input_ids, shifts=-1, dims=-1)
1395
+ inputs_embeds = self.word_embeddings(input_ids)
1396
+
1397
+ if self.gradient_checkpointing and self.training:
1398
+ layer_outputs = self._gradient_checkpointing_func(
1399
+ decoder_layer.__call__,
1400
+ inputs_embeds,
1401
+ hidden_states,
1402
+ attention_mask,
1403
+ position_ids,
1404
+ past_key_values,
1405
+ output_attentions,
1406
+ output_router_logits,
1407
+ use_cache,
1408
+ position_embeddings,
1409
+ )
1410
+ else:
1411
+ layer_outputs = decoder_layer(
1412
+ inputs_embeds,
1413
+ hidden_states,
1414
+ attention_mask=attention_mask,
1415
+ position_ids=position_ids,
1416
+ past_key_value=past_key_values,
1417
+ output_attentions=output_attentions,
1418
+ output_router_logits=output_router_logits,
1419
+ use_cache=use_cache,
1420
+ position_embeddings=position_embeddings,
1421
+ )
1422
+ if mtp_hidden_states is None:
1423
+ mtp_hidden_states = []
1424
+ hidden_states = layer_outputs[0]
1425
+ mtp_hidden_states.append(hidden_states)
1426
+
1427
+ if output_hidden_states:
1428
+ all_hidden_states += (hidden_states,)
1429
+
1430
+ if use_cache:
1431
+ next_decoder_cache = layer_outputs[2 if output_attentions else 1]
1432
+
1433
+ if output_attentions:
1434
+ all_self_attns += (layer_outputs[1],)
1435
+
1436
+ if output_router_logits and layer_outputs[-1] is not None:
1437
+ all_router_logits += (layer_outputs[-1],)
1438
+
1439
+ next_cache = None
1440
+ if use_cache:
1441
+ next_cache = next_decoder_cache
1442
+ if not return_dict:
1443
+ return tuple(
1444
+ v
1445
+ for v in [main_hidden_states, next_cache, all_hidden_states, all_self_attns, all_router_logits]
1446
+ if v is not None
1447
+ )
1448
+ return MoeV2_5ModelOutputWithPast(
1449
+ last_hidden_state=main_hidden_states,
1450
+ past_key_values=next_cache,
1451
+ hidden_states=all_hidden_states,
1452
+ mtp_hidden_states=mtp_hidden_states,
1453
+ attentions=all_self_attns,
1454
+ router_logits=all_router_logits,
1455
+ )
1456
+
1457
+
1458
+ class BailingMoeV2_5ForCausalLM(BailingMoeV2_5PreTrainedModel, GenerationMixin):
1459
+ _tied_weights_keys = ["lm_head.weight"]
1460
+
1461
+ def __init__(self, config: BailingMoeV2_5Config):
1462
+ super().__init__(config)
1463
+ self.model = BailingMoeV2_5Model(config)
1464
+ self.vocab_size = config.vocab_size
1465
+ self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
1466
+ self.num_nextn_predict_layers = config.num_nextn_predict_layers
1467
+ self.mtp_loss_scaling_factor = config.mtp_loss_scaling_factor
1468
+
1469
+ # Initialize weights and apply final processing
1470
+ self.post_init()
1471
+
1472
+ def get_input_embeddings(self):
1473
+ return self.model.word_embeddings
1474
+
1475
+ def set_input_embeddings(self, value):
1476
+ self.model.word_embeddings = value
1477
+
1478
+ def get_output_embeddings(self):
1479
+ return self.lm_head
1480
+
1481
+ def set_output_embeddings(self, new_embeddings):
1482
+ self.lm_head = new_embeddings
1483
+
1484
+ def set_decoder(self, decoder):
1485
+ self.model = decoder
1486
+
1487
+ def get_decoder(self):
1488
+ return self.model
1489
+
1490
+ @add_start_docstrings_to_model_forward(BAILINGMOEV2_5_INPUTS_DOCSTRING)
1491
+ @replace_return_docstrings(output_type=MoEV2_5CausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC)
1492
+ def forward(
1493
+ self,
1494
+ input_ids: torch.LongTensor = None,
1495
+ attention_mask: Optional[torch.Tensor] = None,
1496
+ position_ids: Optional[torch.LongTensor] = None,
1497
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
1498
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1499
+ labels: Optional[torch.LongTensor] = None,
1500
+ use_cache: Optional[bool] = None,
1501
+ output_attentions: Optional[bool] = None,
1502
+ output_hidden_states: Optional[bool] = None,
1503
+ output_router_logits: Optional[bool] = None,
1504
+ return_dict: Optional[bool] = None,
1505
+ **kwargs,
1506
+ ) -> Union[Tuple, MoEV2_5CausalLMOutputWithPast]:
1507
+ r"""
1508
+ Args:
1509
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
1510
+ Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
1511
+ config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
1512
+ (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
1513
+ Returns:
1514
+ Example:
1515
+ ```python
1516
+ >>> from transformers import AutoTokenizer
1517
+ >>> model = BailingMoeV2_5ForCausalLM.from_pretrained(PATH_TO_CONVERTED_WEIGHTS)
1518
+ >>> tokenizer = AutoTokenizer.from_pretrained(PATH_TO_CONVERTED_TOKENIZER)
1519
+ >>> prompt = "Hey, are you conscious? Can you talk to me?"
1520
+ >>> inputs = tokenizer(prompt, return_tensors="pt")
1521
+ >>> # Generate
1522
+ >>> generate_ids = model.generate(inputs.input_ids, max_length=30)
1523
+ >>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
1524
+ "Hey, are you conscious? Can you talk to me?\nI'm not conscious, but I can talk to you."
1525
+ ```"""
1526
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
1527
+ output_hidden_states = (
1528
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
1529
+ )
1530
+ output_router_logits = (
1531
+ output_router_logits if output_router_logits is not None else self.config.output_router_logits
1532
+ )
1533
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1534
+ # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)
1535
+ outputs = self.model(
1536
+ input_ids=input_ids,
1537
+ attention_mask=attention_mask,
1538
+ position_ids=position_ids,
1539
+ past_key_values=past_key_values,
1540
+ inputs_embeds=inputs_embeds,
1541
+ use_cache=use_cache,
1542
+ output_attentions=output_attentions,
1543
+ output_hidden_states=output_hidden_states,
1544
+ output_router_logits=output_router_logits,
1545
+ return_dict=return_dict,
1546
+ **kwargs,
1547
+ )
1548
+
1549
+ loss = None
1550
+ all_mtp_loss = None
1551
+ aux_loss = None
1552
+ hidden_states = outputs[0]
1553
+ logits = self.lm_head(hidden_states)
1554
+ logits = logits.float()
1555
+
1556
+ if labels is not None:
1557
+ loss = self.loss_function(logits, labels, self.config.vocab_size, **kwargs)
1558
+
1559
+ all_mtp_logits = None
1560
+ if self.num_nextn_predict_layers > 0:
1561
+ mtp_hidden_states = outputs.mtp_hidden_states
1562
+ shift_labels_mtp = None
1563
+ for i in range(self.num_nextn_predict_layers):
1564
+ mtp_hidden_states = mtp_hidden_states[i]
1565
+ mtp_logits = self.lm_head(mtp_hidden_states).float()
1566
+ if all_mtp_logits is None:
1567
+ all_mtp_logits = []
1568
+ all_mtp_logits.append(mtp_logits)
1569
+ if labels is not None:
1570
+ if shift_labels_mtp is None:
1571
+ shift_labels_mtp = labels.clone()
1572
+ shift_labels_mtp, _ = roll_tensor(shift_labels_mtp, shifts=-1, dims=-1, fill_value=-100)
1573
+ mtp_logits_ = mtp_logits.view(-1, self.config.vocab_size)
1574
+ mtp_loss = self.loss_function(
1575
+ mtp_logits_, shift_labels_mtp.to(mtp_logits_.device).view(-1), self.config.vocab_size, **kwargs
1576
+ )
1577
+ if loss is not None:
1578
+ loss += self.mtp_loss_scaling_factor * mtp_loss
1579
+ else:
1580
+ loss = self.mtp_loss_scaling_factor * mtp_loss
1581
+
1582
+ if all_mtp_loss is None:
1583
+ all_mtp_loss = []
1584
+ all_mtp_loss.append(mtp_loss)
1585
+
1586
+ if not return_dict:
1587
+ output = (logits,) + outputs[1:]
1588
+ if output_router_logits:
1589
+ output = (aux_loss,) + output
1590
+ return (loss,) + output if loss is not None else output
1591
+
1592
+ return MoEV2_5CausalLMOutputWithPast(
1593
+ loss=loss,
1594
+ mtp_loss=all_mtp_loss,
1595
+ aux_loss=aux_loss,
1596
+ logits=logits,
1597
+ mtp_logits=all_mtp_logits,
1598
+ past_key_values=outputs.past_key_values,
1599
+ hidden_states=outputs.hidden_states,
1600
+ attentions=outputs.attentions,
1601
+ router_logits=outputs.router_logits,
1602
+ )
tokenizer.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1ce9d2d10f1d6da7b2439bc9655e51a00a8c5970f7dd015ae8407ca3962199f4
3
+ size 12205770
tokenizer_config.json ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "backend": "tokenizers",
3
+ "bos_token": "<|startoftext|>",
4
+ "clean_up_tokenization_spaces": false,
5
+ "cls_token": "[CLS]",
6
+ "eos_token": "<|role_end|>",
7
+ "fast_tokenizer": true,
8
+ "gmask_token": "[gMASK]",
9
+ "is_local": true,
10
+ "merges_file": null,
11
+ "model_max_length": 1000000000000000019884624838656,
12
+ "model_specific_special_tokens": {
13
+ "gmask_token": "[gMASK]"
14
+ },
15
+ "pad_token": "<|endoftext|>",
16
+ "tokenizer_class": "TokenizersBackend",
17
+ "tool_parser_type": "json_tools"
18
+ }