YongganFu commited on
Commit
1840416
·
0 Parent(s):

Initial release of Efficient-DLM-8B

Browse files
.gitattributes ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tar filter=lfs diff=lfs merge=lfs -text
29
+ *.tflite filter=lfs diff=lfs merge=lfs -text
30
+ *.tgz filter=lfs diff=lfs merge=lfs -text
31
+ *.wasm filter=lfs diff=lfs merge=lfs -text
32
+ *.xz filter=lfs diff=lfs merge=lfs -text
33
+ *.zip filter=lfs diff=lfs merge=lfs -text
34
+ *.zst filter=lfs diff=lfs merge=lfs -text
35
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ tokenizer.json filter=lfs diff=lfs merge=lfs -text
37
+ *.png filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,71 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ library_name: transformers
3
+ license: other
4
+ license_name: cc-by-nc-4.0
5
+ pipeline_tag: text-generation
6
+ ---
7
+
8
+ # Efficient-DLM-8B
9
+
10
+ <p align="center">
11
+ 📄 <a href="https://arxiv.org/pdf/2512.14067">Tech Report</a> &nbsp&nbsp|&nbsp&nbsp 🤗 <a href="https://huggingface.co/nvidia/Efficient-DLM-4B">Efficient-DLM-4B</a> &nbsp&nbsp|&nbsp&nbsp 🤗 <a href="https://huggingface.co/nvidia/Efficient-DLM-8B">Efficient-DLM-8B</a>
12
+ </p>
13
+
14
+
15
+ ## Model Overview
16
+
17
+ Efficient-DLM-8B is a base diffusion language model designed for parallel generation. It converts pretrained AR LMs into diffusion LMs through efficient continuous pretraining, enabling faster decoding while preserving the task accuracy of strong AR models. Efficient-DLM features block-wise attention with clean-context conditioning for KV-cache-friendly decoding, as well as position-dependent token masking to reduce the training–test mismatch in diffusion generation. See our [paper](https://arxiv.org/abs/2512.14067) for more technical details.
18
+
19
+ <div align="center">
20
+ <img src="https://huggingface.co/nvidia/Efficient-DLM-8B/resolve/main/images/result.png" alt="Accuracy vs throughput Pareto curve" width="500">
21
+ </div>
22
+
23
+
24
+ ## Environment
25
+
26
+ ```bash
27
+ transformers>=4.52.2
28
+ ```
29
+
30
+
31
+ ## Chat with Efficient-DLM-8B
32
+
33
+ ```python
34
+ from transformers import AutoModel, AutoTokenizer
35
+ import torch
36
+
37
+ repo_name = "nvidia/Efficient-DLM-8B"
38
+
39
+ tokenizer = AutoTokenizer.from_pretrained(repo_name, trust_remote_code=True)
40
+ model = AutoModel.from_pretrained(repo_name, trust_remote_code=True)
41
+ model = model.cuda().to(torch.bfloat16)
42
+
43
+ user_input = input("User: ").strip()
44
+
45
+ prompt_ids = tokenizer(user_input, return_tensors="pt").input_ids.to(device="cuda")
46
+ out_ids, nfe = model.generate(
47
+ prompt_ids,
48
+ max_new_tokens=128,
49
+ steps=128,
50
+ block_length=32,
51
+ shift_logits=False,
52
+ temperature=0.7,
53
+ threshold=0.9,
54
+ )
55
+
56
+ response = tokenizer.batch_decode(out_ids[:, prompt_ids.shape[1]:], skip_special_tokens=True)[0]
57
+ print(f"Model: {response}")
58
+ print(f"[Num Function Eval (NFE)={nfe}]")
59
+ ```
60
+
61
+
62
+ ## Citation
63
+
64
+ ```
65
+ @article{fu2025efficient,
66
+ title={Efficient-dlm: From autoregressive to diffusion language models, and beyond in speed},
67
+ author={Fu, Yonggan and Whalen, Lexington and Ye, Zhifan and Dong, Xin and Diao, Shizhe and Liu, Jingyu and Wu, Chengyue and Zhang, Hao and Xie, Enze and Han, Song and others},
68
+ journal={arXiv preprint arXiv:2512.14067},
69
+ year={2025}
70
+ }
71
+ ```
added_tokens.json ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "</think>": 151668,
3
+ "</tool_call>": 151658,
4
+ "</tool_response>": 151666,
5
+ "<think>": 151667,
6
+ "<tool_call>": 151657,
7
+ "<tool_response>": 151665,
8
+ "<|box_end|>": 151649,
9
+ "<|box_start|>": 151648,
10
+ "<|endoftext|>": 151643,
11
+ "<|file_sep|>": 151664,
12
+ "<|fim_middle|>": 151660,
13
+ "<|fim_pad|>": 151662,
14
+ "<|fim_prefix|>": 151659,
15
+ "<|fim_suffix|>": 151661,
16
+ "<|im_end|>": 151645,
17
+ "<|im_start|>": 151644,
18
+ "<|image_pad|>": 151655,
19
+ "<|object_ref_end|>": 151647,
20
+ "<|object_ref_start|>": 151646,
21
+ "<|quad_end|>": 151651,
22
+ "<|quad_start|>": 151650,
23
+ "<|repo_name|>": 151663,
24
+ "<|video_pad|>": 151656,
25
+ "<|vision_end|>": 151653,
26
+ "<|vision_pad|>": 151654,
27
+ "<|vision_start|>": 151652
28
+ }
chat_template.jinja ADDED
@@ -0,0 +1,89 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {%- if tools %}
2
+ {{- '<|im_start|>system\n' }}
3
+ {%- if messages[0].role == 'system' %}
4
+ {{- messages[0].content + '\n\n' }}
5
+ {%- endif %}
6
+ {{- "# Tools\n\nYou may call one or more functions to assist with the user query.\n\nYou are provided with function signatures within <tools></tools> XML tags:\n<tools>" }}
7
+ {%- for tool in tools %}
8
+ {{- "\n" }}
9
+ {{- tool | tojson }}
10
+ {%- endfor %}
11
+ {{- "\n</tools>\n\nFor each function call, return a json object with function name and arguments within <tool_call></tool_call> XML tags:\n<tool_call>\n{\"name\": <function-name>, \"arguments\": <args-json-object>}\n</tool_call><|im_end|>\n" }}
12
+ {%- else %}
13
+ {%- if messages[0].role == 'system' %}
14
+ {{- '<|im_start|>system\n' + messages[0].content + '<|im_end|>\n' }}
15
+ {%- endif %}
16
+ {%- endif %}
17
+ {%- set ns = namespace(multi_step_tool=true, last_query_index=messages|length - 1) %}
18
+ {%- for message in messages[::-1] %}
19
+ {%- set index = (messages|length - 1) - loop.index0 %}
20
+ {%- if ns.multi_step_tool and message.role == "user" and message.content is string and not(message.content.startswith('<tool_response>') and message.content.endswith('</tool_response>')) %}
21
+ {%- set ns.multi_step_tool = false %}
22
+ {%- set ns.last_query_index = index %}
23
+ {%- endif %}
24
+ {%- endfor %}
25
+ {%- for message in messages %}
26
+ {%- if message.content is string %}
27
+ {%- set content = message.content %}
28
+ {%- else %}
29
+ {%- set content = '' %}
30
+ {%- endif %}
31
+ {%- if (message.role == "user") or (message.role == "system" and not loop.first) %}
32
+ {{- '<|im_start|>' + message.role + '\n' + content + '<|im_end|>' + '\n' }}
33
+ {%- elif message.role == "assistant" %}
34
+ {%- set reasoning_content = '' %}
35
+ {%- if message.reasoning_content is string %}
36
+ {%- set reasoning_content = message.reasoning_content %}
37
+ {%- else %}
38
+ {%- if '</think>' in content %}
39
+ {%- set reasoning_content = content.split('</think>')[0].rstrip('\n').split('<think>')[-1].lstrip('\n') %}
40
+ {%- set content = content.split('</think>')[-1].lstrip('\n') %}
41
+ {%- endif %}
42
+ {%- endif %}
43
+ {%- if loop.index0 > ns.last_query_index %}
44
+ {%- if loop.last or (not loop.last and reasoning_content) %}
45
+ {{- '<|im_start|>' + message.role + '\n<think>\n' + reasoning_content.strip('\n') + '\n</think>\n\n' + content.lstrip('\n') }}
46
+ {%- else %}
47
+ {{- '<|im_start|>' + message.role + '\n' + content }}
48
+ {%- endif %}
49
+ {%- else %}
50
+ {{- '<|im_start|>' + message.role + '\n' + content }}
51
+ {%- endif %}
52
+ {%- if message.tool_calls %}
53
+ {%- for tool_call in message.tool_calls %}
54
+ {%- if (loop.first and content) or (not loop.first) %}
55
+ {{- '\n' }}
56
+ {%- endif %}
57
+ {%- if tool_call.function %}
58
+ {%- set tool_call = tool_call.function %}
59
+ {%- endif %}
60
+ {{- '<tool_call>\n{"name": "' }}
61
+ {{- tool_call.name }}
62
+ {{- '", "arguments": ' }}
63
+ {%- if tool_call.arguments is string %}
64
+ {{- tool_call.arguments }}
65
+ {%- else %}
66
+ {{- tool_call.arguments | tojson }}
67
+ {%- endif %}
68
+ {{- '}\n</tool_call>' }}
69
+ {%- endfor %}
70
+ {%- endif %}
71
+ {{- '<|im_end|>\n' }}
72
+ {%- elif message.role == "tool" %}
73
+ {%- if loop.first or (messages[loop.index0 - 1].role != "tool") %}
74
+ {{- '<|im_start|>user' }}
75
+ {%- endif %}
76
+ {{- '\n<tool_response>\n' }}
77
+ {{- content }}
78
+ {{- '\n</tool_response>' }}
79
+ {%- if loop.last or (messages[loop.index0 + 1].role != "tool") %}
80
+ {{- '<|im_end|>\n' }}
81
+ {%- endif %}
82
+ {%- endif %}
83
+ {%- endfor %}
84
+ {%- if add_generation_prompt %}
85
+ {{- '<|im_start|>assistant\n' }}
86
+ {%- if enable_thinking is defined and enable_thinking is false %}
87
+ {{- '<think>\n\n</think>\n\n' }}
88
+ {%- endif %}
89
+ {%- endif %}
chat_utils.py ADDED
@@ -0,0 +1,225 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import torch
3
+ import torch.nn.functional as F
4
+
5
+
6
+ def add_gumbel_noise(logits, temperature):
7
+ '''
8
+ The Gumbel max is a method for sampling categorical distributions.
9
+ According to arXiv:2409.02908, for MDM, low-precision Gumbel Max improves perplexity score but reduces generation quality.
10
+ Thus, we use float64.
11
+ '''
12
+ if temperature == 0:
13
+ return logits
14
+ logits = logits.to(torch.float64)
15
+ noise = torch.rand_like(logits, dtype=torch.float64)
16
+ gumbel_noise = (- torch.log(noise)) ** temperature
17
+ return logits.exp() / gumbel_noise
18
+
19
+
20
+ def get_transfer_index(logits, temperature, remasking, mask_index, x, num_transfer_tokens, threshold=None,neg_entropy=False):
21
+ logits_with_noise = add_gumbel_noise(logits, temperature=temperature)
22
+ x0 = torch.argmax(logits_with_noise, dim=-1)
23
+
24
+ if remasking == 'low_confidence':
25
+ # p = F.softmax(logits.to(torch.float64), dim=-1)
26
+ p = F.softmax(logits, dim=-1)
27
+ x0_p = torch.squeeze(
28
+ torch.gather(p, dim=-1, index=torch.unsqueeze(x0, -1)), -1) # b, l
29
+ elif remasking == 'top_p_margin':
30
+ # Compute probabilities
31
+ p = F.softmax(logits, dim=-1) # (B, L, V)
32
+ # Top-2 per position
33
+ top2 = torch.topk(p, k=2, dim=-1).values # (B, L, 2)
34
+ margin = top2[..., 0] - top2[..., 1] # (B, L)
35
+
36
+ # Normalize margin to [0,1] over MASKED positions per row
37
+ plus_inf = torch.full_like(margin, float('inf'))
38
+ minus_inf = torch.full_like(margin, float('-inf'))
39
+ masked_for_min = torch.where(mask_index, margin, plus_inf)
40
+ masked_for_max = torch.where(mask_index, margin, minus_inf)
41
+ row_min = masked_for_min.amin(dim=1, keepdim=True) # (B, 1)
42
+ row_max = masked_for_max.amax(dim=1, keepdim=True) # (B, 1)
43
+ denom = (row_max - row_min)
44
+
45
+ # If denom==0 (all equal), set normalized=1 on masked; 0 elsewhere by default
46
+ normalized = torch.zeros_like(margin)
47
+ nonzero = denom > 0
48
+ normalized = torch.where(
49
+ mask_index & nonzero,
50
+ (margin - row_min) / (denom + 1e-12),
51
+ normalized
52
+ )
53
+ normalized = torch.where(
54
+ mask_index & (~nonzero),
55
+ torch.ones_like(normalized),
56
+ normalized
57
+ )
58
+ x0_p = normalized # ∈ [0,1] on masked positions
59
+ elif remasking == 'random':
60
+ x0_p = torch.rand((x0.shape[0], x0.shape[1]), device=x0.device)
61
+ else:
62
+ raise NotImplementedError(remasking)
63
+
64
+ # Calculate negative entropy if requested
65
+ if neg_entropy:
66
+ # p = F.softmax(logits.to(torch.float64), dim=-1)
67
+ p = F.softmax(logits, dim=-1)
68
+ epsilon = 1e-10
69
+ log_probs = torch.log(p + epsilon)
70
+ confidence_scores = torch.sum(p * log_probs, dim=-1) # negative entropy per position
71
+ else:
72
+ confidence_scores = x0_p
73
+
74
+ x0 = torch.where(mask_index, x0, x)
75
+ confidence = torch.where(mask_index, confidence_scores, -np.inf)
76
+
77
+ transfer_index = torch.zeros_like(x0, dtype=torch.bool, device=x0.device)
78
+ if threshold is not None:
79
+ num_transfer_tokens = mask_index.sum(dim=1, keepdim=True)
80
+ # print(f'confidence: {confidence}')
81
+ for j in range(confidence.shape[0]):
82
+ _, select_index = torch.topk(confidence[j], k=num_transfer_tokens[j])
83
+ transfer_index[j, select_index] = True
84
+ if threshold is not None:
85
+ for k in range(1, num_transfer_tokens[j]):
86
+ if confidence[j, select_index[k]] < threshold:
87
+ transfer_index[j, select_index[k]] = False
88
+ return x0, transfer_index
89
+
90
+
91
+ def get_num_transfer_tokens(mask_index, steps: int):
92
+ mask_num = mask_index.sum(dim=1, keepdim=True)
93
+ base = mask_num // steps
94
+ remainder = mask_num % steps
95
+ num_transfer_tokens = torch.zeros(mask_num.size(0), steps, device=mask_index.device, dtype=torch.int64) + base
96
+ for i in range(mask_num.size(0)):
97
+ num_transfer_tokens[i, : int(remainder[i])] += 1
98
+ return num_transfer_tokens
99
+
100
+
101
+ @torch.no_grad()
102
+ def generate_with_prefix_cache_block_diff(
103
+ model,
104
+ prompt,
105
+ steps=128,
106
+ gen_length=128,
107
+ block_length=128,
108
+ temperature=0.,
109
+ remasking='low_confidence',
110
+ mask_id=126336,
111
+ threshold=None,
112
+ factor=None,
113
+ shift_logits=False,
114
+ neg_entropy=False,
115
+ ):
116
+ dream_style=shift_logits
117
+ # Initialize the accumulator
118
+ x_accum = prompt.clone()
119
+
120
+ assert gen_length % block_length == 0
121
+ num_blocks = gen_length // block_length
122
+
123
+ assert steps % num_blocks == 0
124
+ steps_per_block = steps // num_blocks
125
+
126
+ nfe = 0
127
+
128
+ # Compute KV cache for the prompt initially
129
+ output = model(prompt, use_cache=True)
130
+ past_key_values = output.past_key_values
131
+
132
+ # For dream_style: store the "next token logit" of the context
133
+ next_logits_context = None
134
+ if dream_style:
135
+ next_logits_context = output.logits[:, -1:, :] # (B, 1, V)
136
+
137
+ for num_block in range(num_blocks):
138
+ # Create a new block with mask tokens (no seeding)
139
+ mask_block = torch.ones(
140
+ (prompt.shape[0], block_length),
141
+ dtype=prompt.dtype,
142
+ device=prompt.device
143
+ ) * mask_id
144
+
145
+ # Append the block of masks
146
+ x_accum = torch.cat([x_accum, mask_block], dim=1)
147
+ current_block_start = prompt.size(1) + num_block * block_length
148
+ block_slice = slice(current_block_start, current_block_start + block_length)
149
+
150
+ # Build the initial mask for this block
151
+ mask_block_idx0 = (x_accum[:, block_slice] == mask_id) # (B, Lb)
152
+
153
+ # Precompute the transfer schedule for this block
154
+ if dream_style:
155
+ # still denoise *all* positions (0..Lb-1), since none are seeded
156
+ schedule_mask = mask_block_idx0
157
+ else:
158
+ schedule_mask = mask_block_idx0
159
+
160
+ num_transfer_tokens = get_num_transfer_tokens(schedule_mask, steps_per_block) # (B, steps)
161
+
162
+ # Denoise the current block
163
+ for i in range(steps_per_block):
164
+ mask_block_idx = (x_accum[:, block_slice] == mask_id) # (B, Lb)
165
+ if mask_block_idx.sum() == 0:
166
+ break
167
+
168
+ nfe += 1
169
+
170
+ # Forward only the current noisy block using cached context
171
+ logits_block = model(
172
+ x_accum[:, block_slice],
173
+ past_key_values=past_key_values,
174
+ use_cache=False
175
+ ).logits
176
+
177
+ if dream_style:
178
+ # Align logits so that each masked position has a predictor:
179
+ # prepend context-next logit, then use logits_block[:-1]
180
+ if block_length == 1:
181
+ logits_use = next_logits_context # (B, 1, V)
182
+ else:
183
+ logits_use = torch.cat(
184
+ [next_logits_context, logits_block[:, :-1, :]],
185
+ dim=1
186
+ ) # (B, Lb, V)
187
+
188
+ mask_use = mask_block_idx # (B, Lb)
189
+ x_use = x_accum[:, block_slice] # (B, Lb)
190
+
191
+ x0, transfer_idx = get_transfer_index(
192
+ logits_use, temperature, remasking, mask_use, x_use,
193
+ num_transfer_tokens=num_transfer_tokens[:, i],
194
+ threshold=threshold, neg_entropy=neg_entropy
195
+ )
196
+ cur = x_accum[:, block_slice].clone()
197
+ cur[transfer_idx] = x0[transfer_idx]
198
+ x_accum[:, block_slice] = cur
199
+
200
+ else:
201
+ # non-AR (same-position) case
202
+ x0, transfer_idx = get_transfer_index(
203
+ logits_block, temperature, remasking, mask_block_idx,
204
+ x_accum[:, block_slice],
205
+ num_transfer_tokens=num_transfer_tokens[:, i],
206
+ threshold=threshold, neg_entropy=neg_entropy
207
+ )
208
+ cur = x_accum[:, block_slice].clone()
209
+ cur[transfer_idx] = x0[transfer_idx]
210
+ x_accum[:, block_slice] = cur
211
+
212
+ # after block is fully denoised, update KV cache
213
+ output = model(
214
+ x_accum[:, block_slice],
215
+ past_key_values=past_key_values,
216
+ use_cache=True
217
+ )
218
+ past_key_values = output.past_key_values
219
+ nfe += 1
220
+
221
+ if dream_style and num_block < num_blocks - 1:
222
+ # refresh context-next logit for the next block
223
+ next_logits_context = output.logits[:, -1:, :] # (B, 1, V)
224
+
225
+ return x_accum, nfe
config.json ADDED
@@ -0,0 +1,49 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "adaptive_mask_rate": false,
3
+ "architectures": [
4
+ "EfficientDLM"
5
+ ],
6
+ "attention_bias": false,
7
+ "attention_dropout": 0.0,
8
+ "attn_implementation": "sdpa",
9
+ "auto_map": {
10
+ "AutoConfig": "configuration_edlm.EfficientDLMConfig",
11
+ "AutoModel": "modeling_edlm.EfficientDLM"
12
+ },
13
+ "block_size": 32,
14
+ "diff_loss_weight": 1,
15
+ "disable_qk_norm": false,
16
+ "dlm_arch": "encoder",
17
+ "dlm_paradigm": "bidirectional",
18
+ "dlm_type": "llada",
19
+ "enforce_mask": false,
20
+ "head_dim": 128,
21
+ "hidden_act": "silu",
22
+ "hidden_size": 4096,
23
+ "initializer_range": 0.02,
24
+ "intermediate_size": 12288,
25
+ "intl_mask": false,
26
+ "mask_token_id": 151662,
27
+ "max_position_embeddings": 32768,
28
+ "max_window_layers": 28,
29
+ "model_type": "qwen3",
30
+ "multi_sampling": null,
31
+ "num_ar_layers": 0,
32
+ "num_attention_heads": 32,
33
+ "num_diffusion_layers": 0,
34
+ "num_hidden_layers": 36,
35
+ "num_key_value_heads": 8,
36
+ "prefix_ratio": 0.8,
37
+ "random_length_prob": 0,
38
+ "rms_norm_eps": 1e-06,
39
+ "rope_scaling": null,
40
+ "rope_theta": 1000000,
41
+ "sliding_window": null,
42
+ "tie_word_embeddings": false,
43
+ "tok_mask_half_life_ratio": null,
44
+ "torch_dtype": "bfloat16",
45
+ "transformers_version": "4.52.2",
46
+ "use_cache": false,
47
+ "use_sliding_window": false,
48
+ "vocab_size": 151936
49
+ }
configuration_edlm.py ADDED
@@ -0,0 +1,248 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2024 The Qwen team, Alibaba Group and the HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Qwen3 model configuration"""
16
+
17
+ from transformers.configuration_utils import PretrainedConfig
18
+ from transformers.modeling_rope_utils import rope_config_validation
19
+ from transformers.utils import logging
20
+
21
+
22
+ logger = logging.get_logger(__name__)
23
+
24
+
25
+ class EfficientDLMConfig(PretrainedConfig):
26
+ r"""
27
+ This is the configuration class to store the configuration of a [`Qwen3Model`]. It is used to instantiate a
28
+ Qwen3 model according to the specified arguments, defining the model architecture. Instantiating a configuration
29
+ with the defaults will yield a similar configuration to that of
30
+ Qwen3-8B [Qwen/Qwen3-8B](https://huggingface.co/Qwen/Qwen3-8B).
31
+
32
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
33
+ documentation from [`PretrainedConfig`] for more information.
34
+
35
+
36
+ Args:
37
+ vocab_size (`int`, *optional*, defaults to 151936):
38
+ Vocabulary size of the Qwen3 model. Defines the number of different tokens that can be represented by the
39
+ `inputs_ids` passed when calling [`Qwen3Model`]
40
+ hidden_size (`int`, *optional*, defaults to 4096):
41
+ Dimension of the hidden representations.
42
+ intermediate_size (`int`, *optional*, defaults to 22016):
43
+ Dimension of the MLP representations.
44
+ num_hidden_layers (`int`, *optional*, defaults to 32):
45
+ Number of hidden layers in the Transformer encoder.
46
+ num_attention_heads (`int`, *optional*, defaults to 32):
47
+ Number of attention heads for each attention layer in the Transformer encoder.
48
+ num_key_value_heads (`int`, *optional*, defaults to 32):
49
+ This is the number of key_value heads that should be used to implement Grouped Query Attention. If
50
+ `num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if
51
+ `num_key_value_heads=1` the model will use Multi Query Attention (MQA) otherwise GQA is used. When
52
+ converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed
53
+ by meanpooling all the original heads within that group. For more details checkout [this
54
+ paper](https://arxiv.org/pdf/2305.13245.pdf). If it is not specified, will default to `32`.
55
+ head_dim (`int`, *optional*, defaults to 128):
56
+ The attention head dimension.
57
+ hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
58
+ The non-linear activation function (function or string) in the decoder.
59
+ max_position_embeddings (`int`, *optional*, defaults to 32768):
60
+ The maximum sequence length that this model might ever be used with.
61
+ initializer_range (`float`, *optional*, defaults to 0.02):
62
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
63
+ rms_norm_eps (`float`, *optional*, defaults to 1e-06):
64
+ The epsilon used by the rms normalization layers.
65
+ use_cache (`bool`, *optional*, defaults to `True`):
66
+ Whether or not the model should return the last key/values attentions (not used by all models). Only
67
+ relevant if `config.is_decoder=True`.
68
+ tie_word_embeddings (`bool`, *optional*, defaults to `False`):
69
+ Whether the model's input and output word embeddings should be tied.
70
+ rope_theta (`float`, *optional*, defaults to 10000.0):
71
+ The base period of the RoPE embeddings.
72
+ rope_scaling (`Dict`, *optional*):
73
+ Dictionary containing the scaling configuration for the RoPE embeddings. NOTE: if you apply new rope type
74
+ and you expect the model to work on longer `max_position_embeddings`, we recommend you to update this value
75
+ accordingly.
76
+ Expected contents:
77
+ `rope_type` (`str`):
78
+ The sub-variant of RoPE to use. Can be one of ['default', 'linear', 'dynamic', 'yarn', 'longrope',
79
+ 'llama3'], with 'default' being the original RoPE implementation.
80
+ `factor` (`float`, *optional*):
81
+ Used with all rope types except 'default'. The scaling factor to apply to the RoPE embeddings. In
82
+ most scaling types, a `factor` of x will enable the model to handle sequences of length x *
83
+ original maximum pre-trained length.
84
+ `original_max_position_embeddings` (`int`, *optional*):
85
+ Used with 'dynamic', 'longrope' and 'llama3'. The original max position embeddings used during
86
+ pretraining.
87
+ `attention_factor` (`float`, *optional*):
88
+ Used with 'yarn' and 'longrope'. The scaling factor to be applied on the attention
89
+ computation. If unspecified, it defaults to value recommended by the implementation, using the
90
+ `factor` field to infer the suggested value.
91
+ `beta_fast` (`float`, *optional*):
92
+ Only used with 'yarn'. Parameter to set the boundary for extrapolation (only) in the linear
93
+ ramp function. If unspecified, it defaults to 32.
94
+ `beta_slow` (`float`, *optional*):
95
+ Only used with 'yarn'. Parameter to set the boundary for interpolation (only) in the linear
96
+ ramp function. If unspecified, it defaults to 1.
97
+ `short_factor` (`List[float]`, *optional*):
98
+ Only used with 'longrope'. The scaling factor to be applied to short contexts (<
99
+ `original_max_position_embeddings`). Must be a list of numbers with the same length as the hidden
100
+ size divided by the number of attention heads divided by 2
101
+ `long_factor` (`List[float]`, *optional*):
102
+ Only used with 'longrope'. The scaling factor to be applied to long contexts (<
103
+ `original_max_position_embeddings`). Must be a list of numbers with the same length as the hidden
104
+ size divided by the number of attention heads divided by 2
105
+ `low_freq_factor` (`float`, *optional*):
106
+ Only used with 'llama3'. Scaling factor applied to low frequency components of the RoPE
107
+ `high_freq_factor` (`float`, *optional*):
108
+ Only used with 'llama3'. Scaling factor applied to high frequency components of the RoPE
109
+ attention_bias (`bool`, defaults to `False`, *optional*, defaults to `False`):
110
+ Whether to use a bias in the query, key, value and output projection layers during self-attention.
111
+ use_sliding_window (`bool`, *optional*, defaults to `False`):
112
+ Whether to use sliding window attention.
113
+ sliding_window (`int`, *optional*, defaults to 4096):
114
+ Sliding window attention (SWA) window size. If not specified, will default to `4096`.
115
+ max_window_layers (`int`, *optional*, defaults to 28):
116
+ The number of layers that use SWA (Sliding Window Attention). The bottom layers use SWA while the top use full attention.
117
+ attention_dropout (`float`, *optional*, defaults to 0.0):
118
+ The dropout ratio for the attention probabilities.
119
+
120
+ ```python
121
+ >>> from transformers import Qwen3Model, Qwen3Config
122
+
123
+ >>> # Initializing a Qwen3 style configuration
124
+ >>> configuration = Qwen3Config()
125
+
126
+ >>> # Initializing a model from the Qwen3-8B style configuration
127
+ >>> model = Qwen3Model(configuration)
128
+
129
+ >>> # Accessing the model configuration
130
+ >>> configuration = model.config
131
+ ```"""
132
+
133
+ model_type = "qwen3"
134
+ keys_to_ignore_at_inference = ["past_key_values"]
135
+
136
+ # Default tensor parallel plan for base model `Qwen3`
137
+ base_model_tp_plan = {
138
+ "layers.*.self_attn.q_proj": "colwise",
139
+ "layers.*.self_attn.k_proj": "colwise",
140
+ "layers.*.self_attn.v_proj": "colwise",
141
+ "layers.*.self_attn.o_proj": "rowwise",
142
+ "layers.*.mlp.gate_proj": "colwise",
143
+ "layers.*.mlp.up_proj": "colwise",
144
+ "layers.*.mlp.down_proj": "rowwise",
145
+ }
146
+ base_model_pp_plan = {
147
+ "embed_tokens": (["input_ids"], ["inputs_embeds"]),
148
+ "layers": (["hidden_states", "attention_mask"], ["hidden_states"]),
149
+ "norm": (["hidden_states"], ["hidden_states"]),
150
+ }
151
+
152
+ def __init__(
153
+ self,
154
+ vocab_size=151936,
155
+ hidden_size=4096,
156
+ intermediate_size=22016,
157
+ num_hidden_layers=32,
158
+ num_attention_heads=32,
159
+ num_key_value_heads=32,
160
+ head_dim=128,
161
+ hidden_act="silu",
162
+ max_position_embeddings=32768,
163
+ initializer_range=0.02,
164
+ rms_norm_eps=1e-6,
165
+ use_cache=True,
166
+ tie_word_embeddings=False,
167
+ rope_theta=10000.0,
168
+ rope_scaling=None,
169
+ attention_bias=False,
170
+ use_sliding_window=False,
171
+ sliding_window=4096,
172
+ max_window_layers=28,
173
+ attention_dropout=0.0,
174
+ attn_implementation="sdpa",
175
+ mask_token_id=-1,
176
+ dlm_type='llada',
177
+ random_length_prob=None,
178
+ num_ar_layers=4,
179
+ num_diffusion_layers=4,
180
+ diff_loss_weight=1,
181
+ enforce_mask=False,
182
+ prefix_ratio=0.8,
183
+ dlm_paradigm='bidirectional',
184
+ dlm_arch='encoder',
185
+ block_size=32,
186
+ disable_qk_norm=False,
187
+ intl_mask=False,
188
+ tok_mask_half_life_ratio=None,
189
+ adaptive_mask_rate=False,
190
+ multi_sampling=None,
191
+ **kwargs,
192
+ ):
193
+ self.vocab_size = vocab_size
194
+ self.max_position_embeddings = max_position_embeddings
195
+ self.hidden_size = hidden_size
196
+ self.intermediate_size = intermediate_size
197
+ self.num_hidden_layers = num_hidden_layers
198
+ self.num_attention_heads = num_attention_heads
199
+ self.use_sliding_window = use_sliding_window
200
+ self.sliding_window = sliding_window # we check `use_sliding_window` in the modeling code
201
+ self.max_window_layers = max_window_layers
202
+
203
+ # for backward compatibility
204
+ if num_key_value_heads is None:
205
+ num_key_value_heads = num_attention_heads
206
+
207
+ self.num_key_value_heads = num_key_value_heads
208
+ self.head_dim = head_dim
209
+ self.hidden_act = hidden_act
210
+ self.initializer_range = initializer_range
211
+ self.rms_norm_eps = rms_norm_eps
212
+ self.use_cache = use_cache
213
+ self.rope_theta = rope_theta
214
+ self.rope_scaling = rope_scaling
215
+ self.attention_bias = attention_bias
216
+ self.attention_dropout = attention_dropout
217
+ # Validate the correctness of rotary position embeddings parameters
218
+ # BC: if there is a 'type' field, move it to 'rope_type'.
219
+ if self.rope_scaling is not None and "type" in self.rope_scaling:
220
+ self.rope_scaling["rope_type"] = self.rope_scaling["type"]
221
+ rope_config_validation(self)
222
+
223
+ self.attn_implementation = attn_implementation
224
+
225
+ self.mask_token_id = mask_token_id
226
+ self.dlm_type = dlm_type
227
+ self.random_length_prob = random_length_prob
228
+ self.num_ar_layers = num_ar_layers
229
+ self.num_diffusion_layers = num_diffusion_layers
230
+ self.diff_loss_weight = diff_loss_weight
231
+ self.enforce_mask = enforce_mask
232
+ self.prefix_ratio = prefix_ratio
233
+ self.dlm_paradigm = dlm_paradigm
234
+ self.dlm_arch = dlm_arch
235
+ self.block_size = block_size
236
+ self.disable_qk_norm = disable_qk_norm
237
+ self.intl_mask = intl_mask
238
+ self.tok_mask_half_life_ratio = tok_mask_half_life_ratio
239
+ self.adaptive_mask_rate = adaptive_mask_rate
240
+ self.multi_sampling = multi_sampling
241
+
242
+ super().__init__(
243
+ tie_word_embeddings=tie_word_embeddings,
244
+ **kwargs,
245
+ )
246
+
247
+
248
+ __all__ = ["EfficientDLMConfig"]
generation_config.json ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "transformers_version": "4.52.2",
4
+ "use_cache": false
5
+ }
images/result.png ADDED

Git LFS Details

  • SHA256: 9b81fe6641cd8816c4041697b0ac2cb1c4fcdfc2166504e2bde174c67ddc7eae
  • Pointer size: 131 Bytes
  • Size of remote file: 221 kB
merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9e19019749a8856227f834c11df1bba9020011ea57ff1015361059799f2ae56a
3
+ size 16381518008
modeling_edlm.py ADDED
@@ -0,0 +1,490 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import copy
2
+ from typing import Callable, Optional, Tuple, Union
3
+ import random
4
+
5
+ import torch
6
+ import torch.nn.functional as F
7
+ from torch import nn
8
+ from transformers.modeling_outputs import CausalLMOutputWithPast
9
+
10
+ from torch.nn.attention.flex_attention import flex_attention, create_block_mask
11
+
12
+ from transformers.modeling_flash_attention_utils import FlashAttentionKwargs
13
+
14
+ from transformers.processing_utils import Unpack
15
+
16
+ from transformers.cache_utils import Cache, DynamicCache
17
+
18
+ from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
19
+
20
+ from transformers.generation import GenerationMixin
21
+
22
+ import math
23
+
24
+ from .modeling_qwen3 import Qwen3Model, Qwen3PreTrainedModel, Qwen3Attention, apply_rotary_pos_emb, repeat_kv
25
+ from .configuration_edlm import EfficientDLMConfig
26
+ from .chat_utils import generate_with_prefix_cache_block_diff
27
+
28
+ # @torch.compile(dynamic=True, mode="reduce-overhead")
29
+ # @torch.compile(mode="default")
30
+ # @torch.compile(fullgraph=True, mode="reduce-overhead", dynamic=False)
31
+ @torch.compile(fullgraph=True, mode="max-autotune-no-cudagraphs", dynamic=False)
32
+ def fused_flex_attention(q, k, v, block_mask=None):
33
+ return flex_attention(q, k, v, block_mask=block_mask)
34
+
35
+ # with reference to https://github.com/pytorch-labs/attention-gym/blob/main/examples/flex_attn.ipynb
36
+ class Qwen3FlexAttention(Qwen3Attention):
37
+ def __init__(self, *args, **kwargs):
38
+ super().__init__(*args, **kwargs)
39
+
40
+ self.block_size = self.block_size_orig = self.config.block_size
41
+
42
+ if self.config.dlm_paradigm == 'bidirectional':
43
+ self.bidirectional_mask = self.compute_block_mask(mode='bidirectional')
44
+ elif self.config.dlm_paradigm == 'block_diff':
45
+ self.block_diff_mask = None
46
+ else:
47
+ raise ValueError(f"Unknown attention mode: {self.config.dlm_paradigm}")
48
+
49
+ self.mode = 'bidirectional'
50
+
51
+ import torch._dynamo.config as dcfg
52
+ dcfg.cache_size_limit = 512
53
+
54
+
55
+ def set_attention_mode(self, mode, block_size=None):
56
+ self.mode = mode
57
+ self.block_size = block_size
58
+
59
+
60
+ def compute_block_mask(self, mode, q_len, block_size=None):
61
+
62
+ def bidirectional_mask(b, h, q, kv):
63
+ return (q >= kv) | (q < kv)
64
+
65
+ def block_diff_mask(block_size, b, h, q_idx, kv_idx, n):
66
+ """
67
+ Constructs the specialized block diffusion attention mask for training
68
+ composed of three masks:
69
+ - **Block Diagonal Mask (M_BD)**: Self-attention within noised blocks
70
+ - **Offset Block Causal Mask (M_OBC)**: Cross-attention for conditional context
71
+ - **Block Causal Mask (M_BC)**: Attention to update x0
72
+ Args:
73
+ b, h: Batch and head indices (ignored for mask logic).
74
+ q_idx, kv_idx: Query and Key indices.
75
+ seq_len: Total sequence length.
76
+ block_size: Defines the block structure.
77
+ Returns:
78
+ A boolean attention mask.
79
+ """
80
+
81
+ # Indicate whether token belongs to xt or x0
82
+ x0_flag_q = (q_idx >= n)
83
+ x0_flag_kv = (kv_idx >= n)
84
+
85
+ # Compute block indices
86
+ block_q = torch.where(x0_flag_q == 1,
87
+ (q_idx - n) // block_size,
88
+ q_idx // block_size)
89
+ block_kv = torch.where(x0_flag_kv == 1,
90
+ (kv_idx - n) // block_size,
91
+ kv_idx // block_size)
92
+
93
+ # **1. Block Diagonal Mask (M_BD) **
94
+ block_diagonal = (block_q == block_kv) & (x0_flag_q == x0_flag_kv)
95
+
96
+ # **2. Offset Block-Causal Mask (M_OBC) **
97
+ offset_block_causal = (
98
+ (block_q > block_kv)
99
+ & (x0_flag_kv == 1)
100
+ & (x0_flag_q == 0)
101
+ )
102
+
103
+ # **3. Block-Causal Mask (M_BC) **
104
+ block_causal = (block_q >= block_kv) & (x0_flag_kv == 1) & (x0_flag_q == 1)
105
+
106
+ # **4. Combine Masks **
107
+ return block_diagonal | offset_block_causal | block_causal
108
+
109
+ if mode == 'bidirectional':
110
+ attn_mask = bidirectional_mask
111
+ elif mode == 'block_diff':
112
+ assert block_size is not None
113
+ attn_mask = lambda b, h, q, kv: block_diff_mask(block_size, b, h, q, kv, q_len//2)
114
+ else:
115
+ raise ValueError(f"Unknown attention mode: {mode}")
116
+
117
+ block_mask = create_block_mask(
118
+ attn_mask, B=None, H=None, Q_LEN=q_len, KV_LEN=q_len
119
+ )
120
+
121
+ return block_mask
122
+
123
+
124
+ def forward(
125
+ self,
126
+ hidden_states: torch.Tensor,
127
+ position_embeddings: Tuple[torch.Tensor, torch.Tensor],
128
+ attention_mask: Optional[torch.Tensor],
129
+ past_key_value: Optional[Cache] = None,
130
+ cache_position: Optional[torch.LongTensor] = None,
131
+ is_training: bool = True,
132
+ **kwargs: Unpack[FlashAttentionKwargs],
133
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
134
+ bsz, q_len, _ = hidden_states.size()
135
+ input_shape = hidden_states.shape[:-1]
136
+ hidden_shape = (*input_shape, -1, self.head_dim)
137
+
138
+ query_states = self.q_norm(self.q_proj(hidden_states).view(hidden_shape)).transpose(1, 2)
139
+ key_states = self.k_norm(self.k_proj(hidden_states).view(hidden_shape)).transpose(1, 2)
140
+ value_states = self.v_proj(hidden_states).view(hidden_shape).transpose(1, 2)
141
+
142
+ cos, sin = position_embeddings
143
+
144
+ if self.mode == 'block_diff' and is_training:
145
+ # Split query and key states in half along sequence length dimension
146
+ q1, q2 = query_states.chunk(2, dim=2)
147
+ k1, k2 = key_states.chunk(2, dim=2)
148
+
149
+ # Apply RoPE independently to each half
150
+ q1, k1 = apply_rotary_pos_emb(q1, k1, cos, sin)
151
+ q2, k2 = apply_rotary_pos_emb(q2, k2, cos, sin)
152
+
153
+ # Recombine the halves
154
+ query_states = torch.cat([q1, q2], dim=2)
155
+ key_states = torch.cat([k1, k2], dim=2)
156
+ else:
157
+ query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin)
158
+
159
+ if past_key_value is not None:
160
+ # sin and cos are specific to RoPE models; cache_position needed for the static cache
161
+ cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position}
162
+ key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs)
163
+
164
+ key_states = repeat_kv(key_states, self.num_key_value_groups)
165
+ value_states = repeat_kv(value_states, self.num_key_value_groups)
166
+
167
+ if self.mode == 'bidirectional':
168
+ if self.bidirectional_mask is None or q_len != self.bidirectional_mask.shape[-2]:
169
+ block_mask = self.compute_block_mask(mode='bidirectional', q_len=q_len)
170
+ else:
171
+ block_mask = self.bidirectional_mask
172
+ elif self.mode == 'block_diff':
173
+ if self.block_diff_mask is None or self.block_size != self.block_size_orig or q_len != self.block_diff_mask.shape[-2]:
174
+ block_mask = self.compute_block_mask(mode='block_diff', block_size=self.block_size, q_len=q_len)
175
+ else:
176
+ block_mask = self.block_diff_mask
177
+ else:
178
+ raise ValueError(f"Unknown attention mode: {self.mode}")
179
+
180
+ attn_output = fused_flex_attention(query_states, key_states, value_states, block_mask=block_mask)
181
+ attn_output = attn_output.transpose(1, 2).reshape(*input_shape, -1).contiguous()
182
+
183
+ attn_output = self.o_proj(attn_output)
184
+
185
+ return attn_output, None
186
+
187
+
188
+ def gumbel_topk(log_w: torch.Tensor, k: int) -> torch.Tensor:
189
+ """Return a Bool mask of length len(log_w) with exactly k True."""
190
+ g = -torch.log(-torch.log(torch.rand_like(log_w) + 1e-9) + 1e-9)
191
+ topk = torch.topk(log_w + g, k).indices
192
+ mask = torch.zeros_like(log_w, dtype=torch.bool)
193
+ mask[topk] = True
194
+ return mask
195
+
196
+
197
+ class EfficientDLM(Qwen3PreTrainedModel, GenerationMixin):
198
+ """
199
+ A single model with:
200
+ - a bidirectional encoder + diffusion‐LM head over A
201
+ - a causal decoder + LM head over B, conditioned on F_A
202
+ """
203
+
204
+ def __init__(self, config: EfficientDLMConfig):
205
+ super().__init__(config)
206
+
207
+ self.mask_token_id = config.mask_token_id
208
+
209
+ diffusion_config = copy.deepcopy(config)
210
+ diffusion_config.diffusion_lm = True
211
+
212
+ if config.dlm_paradigm in ['block_diff']:
213
+ diffusion_config.attn_class = Qwen3FlexAttention
214
+ elif config.dlm_paradigm in ['bidirectional', 'autoregressive']:
215
+ diffusion_config.attn_class = Qwen3Attention
216
+
217
+ if config.dlm_paradigm == 'autoregressive':
218
+ diffusion_config.diffusion_lm = False
219
+ else:
220
+ raise ValueError(f"Unsupported DLM paradigm: {config.dlm_paradigm}")
221
+
222
+ self.encoder = Qwen3Model(diffusion_config)
223
+ self.diffusion_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
224
+ self.vocab_size = config.vocab_size
225
+
226
+ self.post_init()
227
+
228
+
229
+ def forward_process(self, input_ids, eps=1e-3, block_size=None, loss_mask=None):
230
+ b, l = input_ids.shape
231
+ device = input_ids.device
232
+
233
+ t = torch.rand(b, device=device)
234
+
235
+ p_mask = (1 - eps) * t + eps # shape: (b,)
236
+ p_mask = p_mask[:, None].expand(-1, l) # shape: (b, l)
237
+
238
+ masked_indices = torch.rand((b, l), device=device) < p_mask
239
+
240
+ if loss_mask is not None:
241
+ masked_indices[loss_mask == 0] = 0
242
+
243
+ noisy_batch = torch.where(masked_indices, self.mask_token_id, input_ids)
244
+
245
+ return noisy_batch, masked_indices, p_mask
246
+
247
+
248
+ def forward_process_exp(
249
+ self,
250
+ input_ids: torch.Tensor,
251
+ eps: float = 1e-3,
252
+ block_size: int | None = None,
253
+ half_life_ratio: float = 0.25, # λ = ln 2 / (half_life_ratio·L)
254
+ loss_mask: Optional[torch.Tensor] = None,
255
+ ):
256
+ """
257
+ Two-stage corruption with optional per-block sampling.
258
+ • Stage 1: m ~ U(eps, 1) → k = round(m · len) (exact budget).
259
+ • Stage 2: sample exactly k positions with weights
260
+ w_i(m) = exp[ λ · (1−m) · i ] (late-heavy when m→0,
261
+ uniform when m→1).
262
+ If `block_size` is given, the procedure is run *independently*
263
+ inside each contiguous block of that length (last block may be shorter).
264
+ When block_size is provided, m is sampled per-block and p_mask is per-block.
265
+ Args
266
+ ----
267
+ input_ids : (B, L) LongTensor
268
+ eps : minimum corruption ratio
269
+ block_size: if not None, operate block-wise with per-block m sampling
270
+ half_life_ratio : controls steepness when m→0
271
+ """
272
+ B, L = input_ids.shape
273
+ device = input_ids.device
274
+ dtype = torch.float32
275
+
276
+ masked_indices = torch.zeros((B, L), dtype=torch.bool, device=device)
277
+ p_mask = torch.zeros((B, L), dtype=dtype, device=device)
278
+
279
+ # ---------- Stage 1 & 2: whole-sentence or block-wise -------------------
280
+ for b in range(B):
281
+ if block_size is None:
282
+ # ---------- Per-batch sampling (original behavior) ----------
283
+ m = eps + (1.0 - eps) * torch.rand(1, device=device).item() # scalar
284
+ k_tot = int(round(m * L))
285
+ k_tot = max(1, min(k_tot, L)) # clamp to [1, L]
286
+
287
+ # Fill p_mask for this batch
288
+ p_mask[b, :] = m
289
+
290
+ slope = 1.0 - m # ∈ [0,1]; 0 ⇒ uniform, 1 ⇒ late-heavy
291
+
292
+ # ------- single pool over the whole sentence -------------
293
+ lam_base = math.log(2.0) / (half_life_ratio * L) # base decay rate (λ when slope=1)
294
+
295
+ pos = torch.arange(L, device=device, dtype=dtype)
296
+ log_w = (lam_base * slope * pos).clone()
297
+
298
+ masked_indices[b] = gumbel_topk(log_w, k_tot)
299
+
300
+ else:
301
+ # ---------- Per-block sampling ----------
302
+ num_blocks = math.ceil(L / block_size)
303
+ lam_base = math.log(2.0) / (half_life_ratio * block_size) # base decay rate (λ when slope=1)
304
+
305
+ for blk in range(num_blocks):
306
+ start = blk * block_size
307
+ end = min((blk + 1) * block_size, L)
308
+ blk_len = end - start
309
+
310
+ # Sample m per block
311
+ m_blk = eps + (1.0 - eps) * torch.rand(1, device=device).item()
312
+
313
+ # Fill p_mask for this block
314
+ p_mask[b, start:end] = m_blk
315
+
316
+ # per-block budget
317
+ k_blk = int(round(m_blk * blk_len))
318
+ k_blk = max(0, min(k_blk, blk_len))
319
+ if k_blk == 0:
320
+ continue
321
+
322
+ slope = 1.0 - m_blk # ∈ [0,1]; 0 ⇒ uniform, 1 ⇒ late-heavy
323
+
324
+ pos = torch.arange(blk_len, device=device, dtype=dtype)
325
+ log_w = lam_base * slope * pos
326
+
327
+ blk_mask = gumbel_topk(log_w, k_blk)
328
+ masked_indices[b, start:end] = blk_mask
329
+
330
+ if loss_mask is not None:
331
+ masked_indices[loss_mask == 0] = 0
332
+
333
+ noisy_batch = torch.where(masked_indices, self.mask_token_id, input_ids)
334
+ return noisy_batch, masked_indices, p_mask
335
+
336
+
337
+ def forward(
338
+ self,
339
+ input_ids: torch.LongTensor,
340
+ attention_mask: Optional[torch.Tensor] = None,
341
+ position_ids: Optional[torch.LongTensor] = None,
342
+ labels: Optional[torch.LongTensor] = None,
343
+ split_len: Optional[int] = None,
344
+ past_key_values: Optional[Cache] = None,
345
+ block_size: Optional[int] = None,
346
+ block_diff_ppl: bool = False,
347
+ eps: float = 1e-3,
348
+ is_teacher: bool = False,
349
+ masked_indices: Optional[torch.Tensor] = None,
350
+ p_mask: Optional[torch.Tensor] = None,
351
+ loss_mask: Optional[torch.Tensor] = None,
352
+ skip_loss: bool = False,
353
+ **kwargs,
354
+ ) -> CausalLMOutputWithPast:
355
+
356
+ batch_size, seq_len = input_ids.shape
357
+
358
+ if self.config.dlm_paradigm == 'bidirectional':
359
+ if labels is not None and torch.rand(1) < self.config.random_length_prob:
360
+ random_length = torch.randint(2, input_ids.shape[1] + 1, (1,))
361
+ input_ids = input_ids[:, :random_length]
362
+ labels = labels[:, :random_length]
363
+
364
+ if attention_mask is not None:
365
+ attention_mask = attention_mask[:, :random_length]
366
+ if position_ids is not None:
367
+ position_ids = position_ids[:, :random_length]
368
+
369
+ elif self.config.dlm_paradigm == 'block_diff':
370
+ if labels is not None and block_size is None:
371
+ if torch.rand(1) < self.config.random_length_prob:
372
+ block_size = torch.randint(1, 8, (1,)).item() * 4 ## [4, 32] divisible by 4
373
+ else:
374
+ block_size = self.config.block_size
375
+
376
+ if labels is not None and self.config.dlm_paradigm != 'autoregressive':
377
+ if masked_indices is not None:
378
+ #assert p_mask is not None
379
+
380
+ if loss_mask is not None:
381
+ masked_indices[loss_mask == 0] = 0
382
+
383
+ noisy_inputs = torch.where(masked_indices, self.mask_token_id, input_ids)
384
+
385
+ else:
386
+ if self.config.tok_mask_half_life_ratio is not None:
387
+ noisy_inputs, masked_indices, p_mask = self.forward_process_exp(input_ids, eps=eps, block_size=block_size, half_life_ratio=self.config.tok_mask_half_life_ratio, loss_mask=loss_mask)
388
+ else:
389
+ noisy_inputs, masked_indices, p_mask = self.forward_process(input_ids, eps=eps, block_size=block_size, loss_mask=loss_mask)
390
+
391
+ else:
392
+ noisy_inputs = input_ids
393
+ masked_indices = None
394
+ p_mask = None
395
+
396
+ if self.config.dlm_paradigm in ['block_diff']:
397
+ for layer in self.encoder.layers:
398
+ if hasattr(layer.self_attn, 'set_attention_mode'):
399
+ layer.self_attn.set_attention_mode(self.config.dlm_paradigm, block_size=block_size)
400
+
401
+ input_ids_len = noisy_inputs.shape[1]
402
+ if labels is not None and self.config.dlm_paradigm == 'block_diff':
403
+ if position_ids is None:
404
+ position_ids = torch.arange(input_ids_len, device=noisy_inputs.device).unsqueeze(0)
405
+ noisy_inputs = torch.cat([noisy_inputs, input_ids], dim=1)
406
+
407
+ if block_diff_ppl:
408
+ if position_ids is None:
409
+ position_ids = torch.arange(input_ids_len // 2, device=noisy_inputs.device).unsqueeze(0)
410
+
411
+ enc_out = self.encoder(
412
+ past_key_values=past_key_values,
413
+ input_ids=noisy_inputs,
414
+ attention_mask=attention_mask,
415
+ position_ids=position_ids,
416
+ is_training=(labels is not None) or (block_diff_ppl),
417
+ **kwargs,
418
+ )
419
+
420
+ logits = self.diffusion_head(enc_out.last_hidden_state) # (batch, len_B, vocab)
421
+
422
+ if labels is not None and self.config.dlm_paradigm == 'block_diff':
423
+ logits = logits[:, :input_ids_len]
424
+
425
+ loss = None
426
+ if labels is not None and not skip_loss:
427
+ if self.config.dlm_paradigm == 'autoregressive':
428
+ shift_logits = logits[..., :-1, :].contiguous()
429
+ shift_labels = labels[..., 1:].contiguous()
430
+
431
+ if loss_mask is None:
432
+ loss_fct = CrossEntropyLoss()
433
+ shift_logits = shift_logits.view(-1, shift_logits.size(-1))
434
+ shift_labels = shift_labels.view(-1)
435
+ loss = loss_fct(shift_logits, shift_labels)
436
+
437
+ else:
438
+ loss_mask = loss_mask[..., 1:].contiguous()
439
+
440
+ loss_fct = CrossEntropyLoss(reduction='none')
441
+ shift_logits = shift_logits.view(-1, shift_logits.size(-1))
442
+ shift_labels = shift_labels.view(-1)
443
+ shift_labels = shift_labels.to(shift_logits.device)
444
+
445
+ token_losses = loss_fct(shift_logits, shift_labels)
446
+
447
+ loss = token_losses[loss_mask].sum() / loss_mask.sum()
448
+
449
+ else:
450
+ # Handle DREAM vs LLADA style losses
451
+ if hasattr(self.config, 'dlm_type') and self.config.dlm_type == 'dream':
452
+ logits = logits[..., :-1, :].contiguous()
453
+ labels = labels[..., 1:].contiguous()
454
+ masked_indices = masked_indices[:, 1:]
455
+ p_mask = p_mask[:, 1:]
456
+
457
+ # Calculate token-wise cross entropy loss for masked positions in B
458
+ token_loss = torch.nn.functional.cross_entropy(
459
+ logits[masked_indices],
460
+ labels[masked_indices],
461
+ reduction='none'
462
+ ) / p_mask[masked_indices]
463
+
464
+ loss = token_loss.sum() / masked_indices.sum()
465
+
466
+ return CausalLMOutputWithPast(
467
+ loss=loss if not is_teacher else logits,
468
+ logits=logits,
469
+ past_key_values=enc_out.past_key_values,
470
+ hidden_states=None,
471
+ attentions=None,
472
+ )
473
+
474
+
475
+ def generate(self, prompt_ids, max_new_tokens, steps, block_length, shift_logits, threshold, temperature=0):
476
+ out_ids, nfe = generate_with_prefix_cache_block_diff(
477
+ model=self,
478
+ prompt=prompt_ids,
479
+ gen_length=max_new_tokens,
480
+ steps=steps,
481
+ block_length=block_length,
482
+ remasking="low_confidence",
483
+ mask_id=self.mask_token_id,
484
+ threshold=threshold,
485
+ shift_logits=shift_logits,
486
+ temperature=temperature,
487
+ neg_entropy=False,
488
+ )
489
+
490
+ return out_ids, nfe
modeling_qwen3.py ADDED
@@ -0,0 +1,1241 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2025 The Qwen team, Alibaba Group and the HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ from typing import Callable, Optional, Tuple, Union
17
+
18
+ import torch
19
+ from torch import nn
20
+
21
+ from transformers.activations import ACT2FN
22
+ from transformers.cache_utils import Cache, DynamicCache, SlidingWindowCache, StaticCache
23
+ from transformers.generation import GenerationMixin
24
+ from transformers.integrations import use_kernel_forward_from_hub
25
+ from transformers.modeling_attn_mask_utils import AttentionMaskConverter
26
+ from transformers.modeling_flash_attention_utils import FlashAttentionKwargs
27
+ from transformers.modeling_layers import GradientCheckpointingLayer
28
+ from transformers.modeling_outputs import (
29
+ BaseModelOutputWithPast,
30
+ BaseModelOutput,
31
+ CausalLMOutputWithPast,
32
+ QuestionAnsweringModelOutput,
33
+ SequenceClassifierOutputWithPast,
34
+ TokenClassifierOutput,
35
+ )
36
+ from transformers.modeling_rope_utils import ROPE_INIT_FUNCTIONS, dynamic_rope_update
37
+ from transformers.modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel
38
+ from transformers.processing_utils import Unpack
39
+ from transformers.utils import auto_docstring, can_return_tuple, is_torch_flex_attn_available, logging
40
+
41
+ try:
42
+ from transformers.utils import LossKwargs
43
+ except ImportError:
44
+ from transformers.utils import TransformersKwargs as LossKwargs
45
+
46
+ from .configuration_edlm import EfficientDLMConfig
47
+
48
+
49
+ if is_torch_flex_attn_available():
50
+ from torch.nn.attention.flex_attention import BlockMask
51
+
52
+ from transformers.integrations.flex_attention import make_flex_block_causal_mask
53
+
54
+
55
+ logger = logging.get_logger(__name__)
56
+
57
+
58
+ @use_kernel_forward_from_hub("RMSNorm")
59
+ class Qwen3RMSNorm(nn.Module):
60
+ def __init__(self, hidden_size, eps=1e-6):
61
+ """
62
+ Qwen3RMSNorm is equivalent to T5LayerNorm
63
+ """
64
+ super().__init__()
65
+ self.weight = nn.Parameter(torch.ones(hidden_size))
66
+ self.variance_epsilon = eps
67
+
68
+ def forward(self, hidden_states):
69
+ input_dtype = hidden_states.dtype
70
+ hidden_states = hidden_states.to(torch.float32)
71
+ variance = hidden_states.pow(2).mean(-1, keepdim=True)
72
+ hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
73
+ return self.weight * hidden_states.to(input_dtype)
74
+
75
+ def extra_repr(self):
76
+ return f"{tuple(self.weight.shape)}, eps={self.variance_epsilon}"
77
+
78
+
79
+ class Qwen3MLP(nn.Module):
80
+ def __init__(self, config):
81
+ super().__init__()
82
+ self.config = config
83
+ self.hidden_size = config.hidden_size
84
+ self.intermediate_size = config.intermediate_size
85
+ self.gate_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)
86
+ self.up_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)
87
+ self.down_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=False)
88
+ self.act_fn = ACT2FN[config.hidden_act]
89
+
90
+ def forward(self, x):
91
+ down_proj = self.down_proj(self.act_fn(self.gate_proj(x)) * self.up_proj(x))
92
+ return down_proj
93
+
94
+
95
+ def rotate_half(x):
96
+ """Rotates half the hidden dims of the input."""
97
+ x1 = x[..., : x.shape[-1] // 2]
98
+ x2 = x[..., x.shape[-1] // 2 :]
99
+ return torch.cat((-x2, x1), dim=-1)
100
+
101
+
102
+ def apply_rotary_pos_emb(q, k, cos, sin, position_ids=None, unsqueeze_dim=1):
103
+ """Applies Rotary Position Embedding to the query and key tensors.
104
+
105
+ Args:
106
+ q (`torch.Tensor`): The query tensor.
107
+ k (`torch.Tensor`): The key tensor.
108
+ cos (`torch.Tensor`): The cosine part of the rotary embedding.
109
+ sin (`torch.Tensor`): The sine part of the rotary embedding.
110
+ position_ids (`torch.Tensor`, *optional*):
111
+ Deprecated and unused.
112
+ unsqueeze_dim (`int`, *optional*, defaults to 1):
113
+ The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and
114
+ sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note
115
+ that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and
116
+ k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes
117
+ cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have
118
+ the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2.
119
+ Returns:
120
+ `tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding.
121
+ """
122
+ cos = cos.unsqueeze(unsqueeze_dim)
123
+ sin = sin.unsqueeze(unsqueeze_dim)
124
+ q_embed = (q * cos) + (rotate_half(q) * sin)
125
+ k_embed = (k * cos) + (rotate_half(k) * sin)
126
+ return q_embed, k_embed
127
+
128
+
129
+ def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor:
130
+ """
131
+ This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch,
132
+ num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)
133
+ """
134
+ batch, num_key_value_heads, slen, head_dim = hidden_states.shape
135
+ if n_rep == 1:
136
+ return hidden_states
137
+ hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim)
138
+ return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim)
139
+
140
+
141
+ def eager_attention_forward(
142
+ module: nn.Module,
143
+ query: torch.Tensor,
144
+ key: torch.Tensor,
145
+ value: torch.Tensor,
146
+ attention_mask: Optional[torch.Tensor],
147
+ scaling: float,
148
+ dropout: float = 0.0,
149
+ **kwargs,
150
+ ):
151
+ key_states = repeat_kv(key, module.num_key_value_groups)
152
+ value_states = repeat_kv(value, module.num_key_value_groups)
153
+
154
+ attn_weights = torch.matmul(query, key_states.transpose(2, 3)) * scaling
155
+ if attention_mask is not None:
156
+ causal_mask = attention_mask[:, :, :, : key_states.shape[-2]]
157
+ attn_weights = attn_weights + causal_mask
158
+
159
+ attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query.dtype)
160
+ attn_weights = nn.functional.dropout(attn_weights, p=dropout, training=module.training)
161
+ attn_output = torch.matmul(attn_weights, value_states)
162
+ attn_output = attn_output.transpose(1, 2).contiguous()
163
+
164
+ return attn_output, attn_weights
165
+
166
+
167
+ class Qwen3Attention(nn.Module):
168
+ """Multi-headed attention from 'Attention Is All You Need' paper"""
169
+
170
+ def __init__(self, config: EfficientDLMConfig, layer_idx: int):
171
+ super().__init__()
172
+ self.config = config
173
+
174
+ self.layer_idx = layer_idx
175
+ self.head_dim = getattr(config, "head_dim", config.hidden_size // config.num_attention_heads)
176
+ self.num_key_value_groups = config.num_attention_heads // config.num_key_value_heads
177
+ self.scaling = self.head_dim**-0.5
178
+ self.attention_dropout = config.attention_dropout
179
+
180
+ self.diffusion_lm = config.diffusion_lm
181
+
182
+ self.is_causal = None if not self.diffusion_lm else False
183
+
184
+ self.q_proj = nn.Linear(
185
+ config.hidden_size, config.num_attention_heads * self.head_dim, bias=config.attention_bias
186
+ )
187
+ self.k_proj = nn.Linear(
188
+ config.hidden_size, config.num_key_value_heads * self.head_dim, bias=config.attention_bias
189
+ )
190
+ self.v_proj = nn.Linear(
191
+ config.hidden_size, config.num_key_value_heads * self.head_dim, bias=config.attention_bias
192
+ )
193
+ self.o_proj = nn.Linear(
194
+ config.num_attention_heads * self.head_dim, config.hidden_size, bias=False # config.attention_bias
195
+ )
196
+
197
+ if not config.disable_qk_norm:
198
+ self.q_norm = Qwen3RMSNorm(self.head_dim, eps=config.rms_norm_eps) # unlike olmo, only on the head dim!
199
+ self.k_norm = Qwen3RMSNorm(self.head_dim, eps=config.rms_norm_eps) # thus post q_norm does not need reshape
200
+ else:
201
+ self.q_norm = nn.Identity()
202
+ self.k_norm = nn.Identity()
203
+
204
+ self.sliding_window = config.sliding_window
205
+ if not (
206
+ self.config.use_sliding_window
207
+ and getattr(self.config, "sliding_window", None) is not None
208
+ and self.layer_idx >= self.config.max_window_layers
209
+ ):
210
+ self.sliding_window = None
211
+
212
+ def forward(
213
+ self,
214
+ hidden_states: torch.Tensor,
215
+ position_embeddings: Tuple[torch.Tensor, torch.Tensor],
216
+ attention_mask: Optional[torch.Tensor],
217
+ past_key_value: Optional[Cache] = None,
218
+ cache_position: Optional[torch.LongTensor] = None,
219
+ replace_position: Optional[torch.Tensor] = None,
220
+ is_training: bool = True,
221
+ use_cache: bool = False,
222
+ **kwargs: Unpack[FlashAttentionKwargs],
223
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
224
+ input_shape = hidden_states.shape[:-1]
225
+ hidden_shape = (*input_shape, -1, self.head_dim)
226
+
227
+ query_states = self.q_norm(self.q_proj(hidden_states).view(hidden_shape)).transpose(1, 2)
228
+ key_states = self.k_norm(self.k_proj(hidden_states).view(hidden_shape)).transpose(1, 2)
229
+ value_states = self.v_proj(hidden_states).view(hidden_shape).transpose(1, 2)
230
+
231
+ cos, sin = position_embeddings
232
+
233
+ if replace_position is not None:
234
+ # Get the indices that need to be replaced
235
+ replace_indices = replace_position.nonzero(as_tuple=True)[1] # [selected_length]
236
+ block_end_index = replace_indices.max() + 1 if len(replace_indices) > 0 else query_states.shape[-2]
237
+ query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin, position_ids=None, unsqueeze_dim=1)
238
+ else:
239
+ query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin)
240
+
241
+ if past_key_value is not None:
242
+ if replace_position is None:
243
+ # Normal cache behavior - append new keys/values
244
+ if use_cache:
245
+ cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position}
246
+ key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs)
247
+ else: ## if use_cache == False, do not update cache
248
+ old_k, old_v = past_key_value[self.layer_idx]
249
+ key_states = torch.cat([old_k, key_states], dim=-2)
250
+ value_states = torch.cat([old_v, value_states], dim=-2)
251
+ else:
252
+ # Replace specific positions in the cache
253
+ # Extract past keys and values from cache
254
+ if hasattr(past_key_value, 'key_cache') and hasattr(past_key_value, 'value_cache'):
255
+ # Get past keys and values for this layer
256
+ past_key = past_key_value.key_cache[self.layer_idx] # Shape: [B, n_kv_h, L, hs]
257
+ past_value = past_key_value.value_cache[self.layer_idx] # Shape: [B, n_kv_h, L, hs]
258
+
259
+ # Get the indices that need to be replaced in the full sequence
260
+ replace_indices = replace_position.nonzero(as_tuple=True)[1] # [selected_length]
261
+
262
+ # key_states and value_states are only for the current block (selected_length)
263
+ # We need to replace the positions indicated by replace_indices with these new values
264
+ if len(replace_indices) == key_states.shape[-2]:
265
+ # Replace selected positions in past_key with new key_states
266
+ past_key = past_key.clone() # Make a copy to avoid in-place modification
267
+ past_value = past_value.clone()
268
+ past_key[:, :, replace_indices] = key_states
269
+ past_value[:, :, replace_indices] = value_states
270
+
271
+ # Update the cache with modified keys/values
272
+ past_key_value.key_cache[self.layer_idx] = past_key
273
+ past_key_value.value_cache[self.layer_idx] = past_value
274
+
275
+ key_states = past_key
276
+ value_states = past_value
277
+ else:
278
+ print("length mismatch")
279
+ # Fallback - length mismatch, use normal cache update
280
+ cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position}
281
+ key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs)
282
+ else:
283
+ # Fallback to normal behavior if cache structure is unexpected
284
+ cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position}
285
+ key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs)
286
+
287
+ attention_interface: Callable = eager_attention_forward
288
+ if self.config._attn_implementation != "eager":
289
+ if self.config._attn_implementation == "sdpa" and kwargs.get("output_attentions", False):
290
+ logger.warning_once(
291
+ "`torch.nn.functional.scaled_dot_product_attention` does not support `output_attentions=True`. Falling back to "
292
+ 'eager attention. This warning can be removed using the argument `attn_implementation="eager"` when loading the model.'
293
+ )
294
+ else:
295
+ attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation]
296
+
297
+ attn_output, attn_weights = attention_interface(
298
+ self,
299
+ query_states,
300
+ key_states,
301
+ value_states,
302
+ attention_mask if not self.diffusion_lm else None,
303
+ dropout=0.0 if not self.training else self.attention_dropout,
304
+ scaling=self.scaling,
305
+ sliding_window=self.sliding_window, # diff with Llama
306
+ is_causal=self.is_causal,
307
+ **kwargs,
308
+ )
309
+
310
+ attn_output = attn_output.reshape(*input_shape, -1).contiguous()
311
+ attn_output = self.o_proj(attn_output)
312
+ return attn_output, attn_weights
313
+
314
+
315
+ class Qwen3DecoderLayer(GradientCheckpointingLayer):
316
+ def __init__(self, config: EfficientDLMConfig, layer_idx: int):
317
+ super().__init__()
318
+ self.hidden_size = config.hidden_size
319
+ if hasattr(config, 'attn_class'):
320
+ attn_class = config.attn_class
321
+ else:
322
+ attn_class = Qwen3Attention
323
+
324
+ self.layer_idx = layer_idx
325
+
326
+ self.self_attn = attn_class(config=config, layer_idx=layer_idx)
327
+ self.mlp = Qwen3MLP(config)
328
+ self.input_layernorm = Qwen3RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
329
+ self.post_attention_layernorm = Qwen3RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
330
+ if (
331
+ config.sliding_window and config._attn_implementation != "flash_attention_2"
332
+ ): # diff with Llama is this warning
333
+ logger.warning_once(
334
+ f"Sliding Window Attention is enabled but not implemented for `{config._attn_implementation}`; "
335
+ "unexpected results may be encountered."
336
+ )
337
+
338
+
339
+ def forward(
340
+ self,
341
+ hidden_states: torch.Tensor,
342
+ attention_mask: Optional[torch.Tensor] = None,
343
+ position_ids: Optional[torch.LongTensor] = None,
344
+ past_key_value: Optional[Cache] = None,
345
+ output_attentions: Optional[bool] = False,
346
+ use_cache: Optional[bool] = False,
347
+ cache_position: Optional[torch.LongTensor] = None,
348
+ position_embeddings: Optional[Tuple[torch.Tensor, torch.Tensor]] = None,
349
+ replace_position: Optional[torch.Tensor] = None,
350
+ is_training: bool = True,
351
+ **kwargs: Unpack[FlashAttentionKwargs],
352
+ ) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]:
353
+ residual = hidden_states
354
+ hidden_states = self.input_layernorm(hidden_states)
355
+
356
+ # Self Attention
357
+ hidden_states, self_attn_weights = self.self_attn(
358
+ hidden_states=hidden_states,
359
+ attention_mask=attention_mask,
360
+ position_ids=position_ids,
361
+ past_key_value=past_key_value,
362
+ output_attentions=output_attentions,
363
+ use_cache=use_cache,
364
+ cache_position=cache_position,
365
+ position_embeddings=position_embeddings,
366
+ replace_position=replace_position,
367
+ is_training=is_training,
368
+ **kwargs,
369
+ )
370
+ hidden_states = residual + hidden_states
371
+
372
+ # Fully Connected
373
+ residual = hidden_states
374
+ hidden_states = self.post_attention_layernorm(hidden_states)
375
+ hidden_states = self.mlp(hidden_states)
376
+ hidden_states = residual + hidden_states
377
+
378
+ outputs = (hidden_states,)
379
+ if output_attentions:
380
+ outputs += (self_attn_weights,)
381
+
382
+ return outputs
383
+
384
+
385
+ @auto_docstring
386
+ class Qwen3PreTrainedModel(PreTrainedModel):
387
+ config_class = EfficientDLMConfig
388
+ base_model_prefix = "model"
389
+ supports_gradient_checkpointing = True
390
+ _no_split_modules = ["Qwen3DecoderLayer"]
391
+ _skip_keys_device_placement = ["past_key_values"]
392
+ _supports_flash_attn_2 = True
393
+ _supports_sdpa = True
394
+ _supports_flex_attn = True
395
+ _supports_cache_class = True
396
+ _supports_quantized_cache = True
397
+ _supports_static_cache = True
398
+ _supports_attention_backend = True
399
+
400
+ def _init_weights(self, module):
401
+ std = self.config.initializer_range
402
+ if isinstance(module, nn.Linear):
403
+ module.weight.data.normal_(mean=0.0, std=std)
404
+ if module.bias is not None:
405
+ module.bias.data.zero_()
406
+ elif isinstance(module, nn.Embedding):
407
+ module.weight.data.normal_(mean=0.0, std=std)
408
+ if module.padding_idx is not None:
409
+ module.weight.data[module.padding_idx].zero_()
410
+ elif isinstance(module, Qwen3RMSNorm):
411
+ module.weight.data.fill_(1.0)
412
+
413
+
414
+ class Qwen3RotaryEmbedding(nn.Module):
415
+ def __init__(self, config: EfficientDLMConfig, device=None):
416
+ super().__init__()
417
+ # BC: "rope_type" was originally "type"
418
+ if hasattr(config, "rope_scaling") and config.rope_scaling is not None:
419
+ self.rope_type = config.rope_scaling.get("rope_type", config.rope_scaling.get("type"))
420
+ else:
421
+ self.rope_type = "default"
422
+ self.max_seq_len_cached = config.max_position_embeddings
423
+ self.original_max_seq_len = config.max_position_embeddings
424
+
425
+ self.config = config
426
+ self.rope_init_fn = ROPE_INIT_FUNCTIONS[self.rope_type]
427
+
428
+ inv_freq, self.attention_scaling = self.rope_init_fn(self.config, device)
429
+ self.register_buffer("inv_freq", inv_freq, persistent=False)
430
+ self.original_inv_freq = self.inv_freq
431
+
432
+ @torch.no_grad()
433
+ @dynamic_rope_update # power user: used with advanced RoPE types (e.g. dynamic rope)
434
+ def forward(self, x, position_ids):
435
+ inv_freq_expanded = self.inv_freq[None, :, None].float().expand(position_ids.shape[0], -1, 1).to(x.device)
436
+ position_ids_expanded = position_ids[:, None, :].float()
437
+
438
+ device_type = x.device.type if isinstance(x.device.type, str) and x.device.type != "mps" else "cpu"
439
+ with torch.autocast(device_type=device_type, enabled=False): # Force float32
440
+ freqs = (inv_freq_expanded.float() @ position_ids_expanded.float()).transpose(1, 2)
441
+ emb = torch.cat((freqs, freqs), dim=-1)
442
+ cos = emb.cos() * self.attention_scaling
443
+ sin = emb.sin() * self.attention_scaling
444
+
445
+ return cos.to(dtype=x.dtype), sin.to(dtype=x.dtype)
446
+
447
+
448
+ @auto_docstring
449
+ class Qwen3Model(Qwen3PreTrainedModel):
450
+ def __init__(self, config: EfficientDLMConfig):
451
+ super().__init__(config)
452
+ self.config = config
453
+
454
+ self.padding_idx = config.pad_token_id
455
+ self.vocab_size = config.vocab_size
456
+
457
+ self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx)
458
+ self.layers = nn.ModuleList(
459
+ [Qwen3DecoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)]
460
+ )
461
+ self.norm = Qwen3RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
462
+ self.rotary_emb = Qwen3RotaryEmbedding(config=config)
463
+ self.gradient_checkpointing = False
464
+
465
+ # Initialize weights and apply final processing
466
+ self.post_init()
467
+
468
+ def get_input_embeddings(self):
469
+ return self.embed_tokens
470
+
471
+ def set_input_embeddings(self, value):
472
+ self.embed_tokens = value
473
+
474
+ @can_return_tuple
475
+ @auto_docstring
476
+ def forward(
477
+ self,
478
+ input_ids: Optional[torch.LongTensor] = None,
479
+ attention_mask: Optional[torch.Tensor] = None,
480
+ position_ids: Optional[torch.LongTensor] = None,
481
+ past_key_values: Optional[Cache] = None,
482
+ inputs_embeds: Optional[torch.FloatTensor] = None,
483
+ use_cache: Optional[bool] = None,
484
+ output_attentions: Optional[bool] = None,
485
+ output_hidden_states: Optional[bool] = None,
486
+ cache_position: Optional[torch.LongTensor] = None,
487
+ replace_position: Optional[torch.Tensor] = None,
488
+ is_training: bool = True,
489
+ **flash_attn_kwargs: Unpack[FlashAttentionKwargs],
490
+ ) -> BaseModelOutputWithPast:
491
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
492
+ output_hidden_states = (
493
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
494
+ )
495
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
496
+
497
+ if (input_ids is None) ^ (inputs_embeds is not None):
498
+ raise ValueError("You must specify exactly one of input_ids or inputs_embeds")
499
+
500
+ if self.gradient_checkpointing and self.training and use_cache:
501
+ logger.warning_once(
502
+ "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`."
503
+ )
504
+ use_cache = False
505
+
506
+ # Allow both Cache objects and legacy tuple format for compatibility
507
+ if past_key_values is not None and not isinstance(past_key_values, Cache):
508
+ # Convert legacy tuple format to DynamicCache if needed
509
+ if isinstance(past_key_values, (list, tuple)):
510
+ # This is likely a legacy format - convert to DynamicCache
511
+ legacy_cache = past_key_values
512
+ past_key_values = DynamicCache()
513
+ for layer_idx, layer_cache in enumerate(legacy_cache):
514
+ if isinstance(layer_cache, (list, tuple)) and len(layer_cache) == 2:
515
+ key_cache, value_cache = layer_cache
516
+ past_key_values.update(key_cache, value_cache, layer_idx)
517
+ else:
518
+ raise ValueError("The `past_key_values` should be either a `Cache` object, list/tuple of layer caches, or `None`.")
519
+
520
+ # # TODO (joao): remove this exception in v4.56 -- it exists for users that try to pass a legacy cache
521
+ # if not isinstance(past_key_values, (type(None), Cache)):
522
+ # raise ValueError("The `past_key_values` should be either a `Cache` object or `None`.")
523
+
524
+ if inputs_embeds is None:
525
+ inputs_embeds = self.embed_tokens(input_ids)
526
+
527
+ if use_cache and past_key_values is None:
528
+ past_key_values = DynamicCache()
529
+
530
+ if cache_position is None:
531
+ past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0
532
+ cache_position = torch.arange(
533
+ past_seen_tokens, past_seen_tokens + inputs_embeds.shape[1], device=inputs_embeds.device
534
+ )
535
+
536
+ if position_ids is None:
537
+ position_ids = cache_position.unsqueeze(0)
538
+
539
+ causal_mask = self._update_causal_mask(
540
+ attention_mask, inputs_embeds, cache_position, past_key_values, output_attentions
541
+ )
542
+
543
+ hidden_states = inputs_embeds
544
+
545
+ # create position embeddings to be shared across the decoder layers
546
+ position_embeddings = self.rotary_emb(hidden_states, position_ids)
547
+
548
+ # decoder layers
549
+ all_hidden_states = () if output_hidden_states else None
550
+ all_self_attns = () if output_attentions else None
551
+
552
+ for decoder_layer in self.layers[: self.config.num_hidden_layers]:
553
+ if output_hidden_states:
554
+ all_hidden_states += (hidden_states,)
555
+
556
+ layer_outputs = decoder_layer(
557
+ hidden_states,
558
+ attention_mask=causal_mask,
559
+ position_ids=position_ids,
560
+ past_key_value=past_key_values,
561
+ output_attentions=output_attentions,
562
+ use_cache=use_cache,
563
+ cache_position=cache_position,
564
+ position_embeddings=position_embeddings,
565
+ replace_position=replace_position,
566
+ is_training=is_training,
567
+ **flash_attn_kwargs,
568
+ )
569
+
570
+ hidden_states = layer_outputs[0]
571
+
572
+ if output_attentions:
573
+ all_self_attns += (layer_outputs[1],)
574
+
575
+ hidden_states = self.norm(hidden_states)
576
+
577
+ # add hidden states from the last decoder layer
578
+ if output_hidden_states:
579
+ all_hidden_states += (hidden_states,)
580
+
581
+ past_key_values_output = None
582
+ if use_cache and past_key_values is not None:
583
+ if isinstance(past_key_values, Cache):
584
+ # Convert Cache to list of tuples format: [(key, value), (key, value), ...]
585
+ past_key_values_output = []
586
+ if hasattr(past_key_values, 'key_cache') and hasattr(past_key_values, 'value_cache'):
587
+ # DynamicCache format
588
+ for layer_idx in range(len(past_key_values.key_cache)):
589
+ past_key_values_output.append((
590
+ past_key_values.key_cache[layer_idx],
591
+ past_key_values.value_cache[layer_idx]
592
+ ))
593
+ else:
594
+ # Fallback - return as is
595
+ past_key_values_output = past_key_values
596
+ else:
597
+ past_key_values_output = past_key_values
598
+
599
+ return BaseModelOutputWithPast(
600
+ last_hidden_state=hidden_states,
601
+ past_key_values=past_key_values_output,
602
+ hidden_states=all_hidden_states,
603
+ attentions=all_self_attns,
604
+ )
605
+
606
+ def _update_causal_mask(
607
+ self,
608
+ attention_mask: Union[torch.Tensor, "BlockMask"],
609
+ input_tensor: torch.Tensor,
610
+ cache_position: torch.Tensor,
611
+ past_key_values: Cache,
612
+ output_attentions: bool = False,
613
+ ):
614
+ if self.config._attn_implementation == "flash_attention_2":
615
+ if attention_mask is not None and past_key_values is not None:
616
+ is_padding_right = attention_mask[:, -1].sum().item() != input_tensor.size()[0]
617
+ if is_padding_right:
618
+ raise ValueError(
619
+ "You are attempting to perform batched generation with padding_side='right'"
620
+ " this may lead to unexpected behaviour for Flash Attention version of Qwen3. Make sure to "
621
+ " call `tokenizer.padding_side = 'left'` before tokenizing the input. "
622
+ )
623
+ if attention_mask is not None and 0.0 in attention_mask:
624
+ return attention_mask
625
+ return None
626
+ if self.config._attn_implementation == "flex_attention":
627
+ if isinstance(attention_mask, torch.Tensor):
628
+ attention_mask = make_flex_block_causal_mask(attention_mask)
629
+ return attention_mask
630
+
631
+ # For SDPA, when possible, we will rely on its `is_causal` argument instead of its `attn_mask` argument, in
632
+ # order to dispatch on Flash Attention 2. This feature is not compatible with static cache, as SDPA will fail
633
+ # to infer the attention mask.
634
+ past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0
635
+ using_static_cache = isinstance(past_key_values, StaticCache)
636
+ using_sliding_window_cache = isinstance(past_key_values, SlidingWindowCache)
637
+
638
+ # When output attentions is True, sdpa implementation's forward method calls the eager implementation's forward
639
+ if (
640
+ self.config._attn_implementation == "sdpa"
641
+ and not (using_static_cache or using_sliding_window_cache)
642
+ and not output_attentions
643
+ ):
644
+ if AttentionMaskConverter._ignore_causal_mask_sdpa(
645
+ attention_mask,
646
+ inputs_embeds=input_tensor,
647
+ past_key_values_length=past_seen_tokens,
648
+ sliding_window=self.config.sliding_window,
649
+ is_training=self.training,
650
+ ):
651
+ return None
652
+
653
+ dtype = input_tensor.dtype
654
+ min_dtype = torch.finfo(dtype).min
655
+ sequence_length = input_tensor.shape[1]
656
+ # SlidingWindowCache or StaticCache
657
+ if using_sliding_window_cache or using_static_cache:
658
+ target_length = past_key_values.get_max_cache_shape()
659
+ # DynamicCache or no cache
660
+ else:
661
+ target_length = (
662
+ attention_mask.shape[-1]
663
+ if isinstance(attention_mask, torch.Tensor)
664
+ else past_seen_tokens + sequence_length + 1
665
+ )
666
+
667
+ # In case the provided `attention` mask is 2D, we generate a causal mask here (4D).
668
+ causal_mask = self._prepare_4d_causal_attention_mask_with_cache_position(
669
+ attention_mask,
670
+ sequence_length=sequence_length,
671
+ target_length=target_length,
672
+ dtype=dtype,
673
+ cache_position=cache_position,
674
+ batch_size=input_tensor.shape[0],
675
+ config=self.config,
676
+ past_key_values=past_key_values,
677
+ )
678
+
679
+ if (
680
+ self.config._attn_implementation == "sdpa"
681
+ and attention_mask is not None
682
+ and attention_mask.device.type in ["cuda", "xpu", "npu"]
683
+ and not output_attentions
684
+ ):
685
+ # Attend to all tokens in fully masked rows in the causal_mask, for example the relevant first rows when
686
+ # using left padding. This is required by F.scaled_dot_product_attention memory-efficient attention path.
687
+ # Details: https://github.com/pytorch/pytorch/issues/110213
688
+ causal_mask = AttentionMaskConverter._unmask_unattended(causal_mask, min_dtype)
689
+
690
+ return causal_mask
691
+
692
+ @staticmethod
693
+ def _prepare_4d_causal_attention_mask_with_cache_position(
694
+ attention_mask: torch.Tensor,
695
+ sequence_length: int,
696
+ target_length: int,
697
+ dtype: torch.dtype,
698
+ cache_position: torch.Tensor,
699
+ batch_size: int,
700
+ config: EfficientDLMConfig,
701
+ past_key_values: Cache,
702
+ ):
703
+ """
704
+ Creates a causal 4D mask of shape `(batch_size, 1, query_length, key_value_length)` from a 2D mask of shape
705
+ `(batch_size, key_value_length)`, or if the input `attention_mask` is already 4D, do nothing.
706
+
707
+ Args:
708
+ attention_mask (`torch.Tensor`):
709
+ A 2D attention mask of shape `(batch_size, key_value_length)` or a 4D attention mask of shape `(batch_size, 1, query_length, key_value_length)`.
710
+ sequence_length (`int`):
711
+ The sequence length being processed.
712
+ target_length (`int`):
713
+ The target length: when generating with static cache, the mask should be as long as the static cache, to account for the 0 padding, the part of the cache that is not filled yet.
714
+ dtype (`torch.dtype`):
715
+ The dtype to use for the 4D attention mask.
716
+ cache_position (`torch.Tensor`):
717
+ Indices depicting the position of the input sequence tokens in the sequence.
718
+ batch_size (`torch.Tensor`):
719
+ Batch size.
720
+ config (`EfficientDLMConfig`):
721
+ The model's configuration class
722
+ past_key_values (`Cache`):
723
+ The cache class that is being used currently to generate
724
+ """
725
+ if attention_mask is not None and attention_mask.dim() == 4:
726
+ # In this case we assume that the mask comes already in inverted form and requires no inversion or slicing.
727
+ causal_mask = attention_mask
728
+ else:
729
+ min_dtype = torch.finfo(dtype).min
730
+ causal_mask = torch.full(
731
+ (sequence_length, target_length), fill_value=min_dtype, dtype=dtype, device=cache_position.device
732
+ )
733
+ diagonal_attend_mask = torch.arange(target_length, device=cache_position.device) > cache_position.reshape(
734
+ -1, 1
735
+ )
736
+ text_config = config.get_text_config()
737
+ if getattr(text_config, "use_sliding_window", True) and text_config.sliding_window is not None:
738
+ # if we have sliding window, we should not attend to tokens beyond sliding window length, so we mask them out also
739
+ # the check is needed to verify is current checkpoint was trained with sliding window or not
740
+ if not isinstance(past_key_values, SlidingWindowCache) or sequence_length > target_length:
741
+ sliding_attend_mask = torch.arange(target_length, device=cache_position.device) <= (
742
+ cache_position.reshape(-1, 1) - text_config.sliding_window
743
+ )
744
+ diagonal_attend_mask.bitwise_or_(sliding_attend_mask)
745
+ causal_mask *= diagonal_attend_mask
746
+ causal_mask = causal_mask[None, None, :, :].expand(batch_size, 1, -1, -1)
747
+ if attention_mask is not None:
748
+ causal_mask = causal_mask.clone() # copy to contiguous memory for in-place edit
749
+ if attention_mask.shape[-1] > target_length:
750
+ attention_mask = attention_mask[:, :target_length]
751
+ mask_length = attention_mask.shape[-1]
752
+ padding_mask = causal_mask[:, :, :, :mask_length] + attention_mask[:, None, None, :].to(
753
+ causal_mask.device
754
+ )
755
+ padding_mask = padding_mask == 0
756
+ causal_mask[:, :, :, :mask_length] = causal_mask[:, :, :, :mask_length].masked_fill(
757
+ padding_mask, min_dtype
758
+ )
759
+ return causal_mask
760
+
761
+
762
+ class KwargsForCausalLM(FlashAttentionKwargs, LossKwargs): ...
763
+
764
+
765
+ @auto_docstring
766
+ class Qwen3ForCausalLM(Qwen3PreTrainedModel, GenerationMixin):
767
+ _tied_weights_keys = ["lm_head.weight"]
768
+ _tp_plan = {"lm_head": "colwise_rep"}
769
+ _pp_plan = {"lm_head": (["hidden_states"], ["logits"])}
770
+
771
+ def __init__(self, config):
772
+ super().__init__(config)
773
+
774
+ config._attn_implementation = config.attn_implementation
775
+
776
+ self.model = Qwen3Model(config)
777
+ self.vocab_size = config.vocab_size
778
+ self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
779
+
780
+ # Initialize weights and apply final processing
781
+ self.post_init()
782
+
783
+ def get_input_embeddings(self):
784
+ return self.model.embed_tokens
785
+
786
+ def set_input_embeddings(self, value):
787
+ self.model.embed_tokens = value
788
+
789
+ def get_output_embeddings(self):
790
+ return self.lm_head
791
+
792
+ def set_output_embeddings(self, new_embeddings):
793
+ self.lm_head = new_embeddings
794
+
795
+ def set_decoder(self, decoder):
796
+ self.model = decoder
797
+
798
+ def get_decoder(self):
799
+ return self.model
800
+
801
+ @can_return_tuple
802
+ @auto_docstring
803
+ def forward(
804
+ self,
805
+ input_ids: Optional[torch.LongTensor] = None,
806
+ attention_mask: Optional[torch.Tensor] = None,
807
+ position_ids: Optional[torch.LongTensor] = None,
808
+ past_key_values: Optional[Cache] = None,
809
+ inputs_embeds: Optional[torch.FloatTensor] = None,
810
+ labels: Optional[torch.LongTensor] = None,
811
+ use_cache: Optional[bool] = None,
812
+ output_attentions: Optional[bool] = None,
813
+ output_hidden_states: Optional[bool] = None,
814
+ cache_position: Optional[torch.LongTensor] = None,
815
+ logits_to_keep: Union[int, torch.Tensor] = 0,
816
+ replace_position: Optional[torch.Tensor] = None,
817
+ **kwargs: Unpack[KwargsForCausalLM],
818
+ ) -> CausalLMOutputWithPast:
819
+ r"""
820
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
821
+ Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
822
+ config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
823
+ (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
824
+
825
+ Example:
826
+
827
+ ```python
828
+ >>> from transformers import AutoTokenizer, Qwen3ForCausalLM
829
+
830
+ >>> model = Qwen3ForCausalLM.from_pretrained("Qwen/Qwen3-8B")
831
+ >>> tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen3-8B")
832
+
833
+ >>> prompt = "Hey, are you conscious? Can you talk to me?"
834
+ >>> inputs = tokenizer(prompt, return_tensors="pt")
835
+
836
+ >>> # Generate
837
+ >>> generate_ids = model.generate(inputs.input_ids, max_length=30)
838
+ >>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
839
+ "Hey, are you conscious? Can you talk to me?\nI'm not conscious, but I can talk to you."
840
+ ```"""
841
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
842
+ output_hidden_states = (
843
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
844
+ )
845
+
846
+ # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)
847
+ outputs: BaseModelOutputWithPast = self.model(
848
+ input_ids=input_ids,
849
+ attention_mask=attention_mask,
850
+ position_ids=position_ids,
851
+ past_key_values=past_key_values,
852
+ inputs_embeds=inputs_embeds,
853
+ use_cache=use_cache,
854
+ output_attentions=output_attentions,
855
+ output_hidden_states=output_hidden_states,
856
+ cache_position=cache_position,
857
+ replace_position=replace_position,
858
+ **kwargs,
859
+ )
860
+
861
+ hidden_states = outputs.last_hidden_state
862
+ # Only compute necessary logits, and do not upcast them to float if we are not computing the loss
863
+ slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep
864
+ logits = self.lm_head(hidden_states[:, slice_indices, :])
865
+
866
+ loss = None
867
+ if labels is not None:
868
+ loss = self.loss_function(logits=logits, labels=labels, vocab_size=self.config.vocab_size, **kwargs)
869
+
870
+ return CausalLMOutputWithPast(
871
+ loss=loss,
872
+ logits=logits,
873
+ past_key_values=outputs.past_key_values,
874
+ hidden_states=outputs.hidden_states,
875
+ attentions=outputs.attentions,
876
+ )
877
+
878
+
879
+ @auto_docstring(
880
+ custom_intro="""
881
+ The Qwen3 Model transformer with a sequence classification head on top (linear layer).
882
+
883
+ [`Qwen3ForSequenceClassification`] uses the last token in order to do the classification, as other causal models
884
+ (e.g. GPT-2) do.
885
+
886
+ Since it does classification on the last token, it requires to know the position of the last token. If a
887
+ `pad_token_id` is defined in the configuration, it finds the last token that is not a padding token in each row. If
888
+ no `pad_token_id` is defined, it simply takes the last value in each row of the batch. Since it cannot guess the
889
+ padding tokens when `inputs_embeds` are passed instead of `input_ids`, it does the same (take the last value in
890
+ each row of the batch).
891
+ """
892
+ )
893
+ class Qwen3ForSequenceClassification(Qwen3PreTrainedModel):
894
+ def __init__(self, config):
895
+ super().__init__(config)
896
+ self.num_labels = config.num_labels
897
+ self.model = Qwen3Model(config)
898
+ self.score = nn.Linear(config.hidden_size, self.num_labels, bias=False)
899
+
900
+ # Initialize weights and apply final processing
901
+ self.post_init()
902
+
903
+ def get_input_embeddings(self):
904
+ return self.model.embed_tokens
905
+
906
+ def set_input_embeddings(self, value):
907
+ self.model.embed_tokens = value
908
+
909
+ @can_return_tuple
910
+ @auto_docstring
911
+ def forward(
912
+ self,
913
+ input_ids: Optional[torch.LongTensor] = None,
914
+ attention_mask: Optional[torch.Tensor] = None,
915
+ position_ids: Optional[torch.LongTensor] = None,
916
+ past_key_values: Optional[Cache] = None,
917
+ inputs_embeds: Optional[torch.FloatTensor] = None,
918
+ labels: Optional[torch.LongTensor] = None,
919
+ use_cache: Optional[bool] = None,
920
+ output_attentions: Optional[bool] = None,
921
+ output_hidden_states: Optional[bool] = None,
922
+ ) -> SequenceClassifierOutputWithPast:
923
+ r"""
924
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
925
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
926
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
927
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
928
+ """
929
+
930
+ transformer_outputs: BaseModelOutputWithPast = self.model(
931
+ input_ids,
932
+ attention_mask=attention_mask,
933
+ position_ids=position_ids,
934
+ past_key_values=past_key_values,
935
+ inputs_embeds=inputs_embeds,
936
+ use_cache=use_cache,
937
+ output_attentions=output_attentions,
938
+ output_hidden_states=output_hidden_states,
939
+ )
940
+ hidden_states = transformer_outputs.last_hidden_state
941
+ logits = self.score(hidden_states)
942
+
943
+ if input_ids is not None:
944
+ batch_size = input_ids.shape[0]
945
+ else:
946
+ batch_size = inputs_embeds.shape[0]
947
+
948
+ if self.config.pad_token_id is None and batch_size != 1:
949
+ raise ValueError("Cannot handle batch sizes > 1 if no padding token is defined.")
950
+ if self.config.pad_token_id is None:
951
+ last_non_pad_token = -1
952
+ elif input_ids is not None:
953
+ # To handle both left- and right- padding, we take the rightmost token that is not equal to pad_token_id
954
+ non_pad_mask = (input_ids != self.config.pad_token_id).to(logits.device, torch.int32)
955
+ token_indices = torch.arange(input_ids.shape[-1], device=logits.device, dtype=torch.int32)
956
+ last_non_pad_token = (token_indices * non_pad_mask).argmax(-1)
957
+ else:
958
+ last_non_pad_token = -1
959
+ logger.warning_once(
960
+ f"{self.__class__.__name__} will not detect padding tokens in `inputs_embeds`. Results may be "
961
+ "unexpected if using padding tokens in conjunction with `inputs_embeds.`"
962
+ )
963
+
964
+ pooled_logits = logits[torch.arange(batch_size, device=logits.device), last_non_pad_token]
965
+
966
+ loss = None
967
+ if labels is not None:
968
+ loss = self.loss_function(logits=logits, labels=labels, pooled_logits=pooled_logits, config=self.config)
969
+
970
+ return SequenceClassifierOutputWithPast(
971
+ loss=loss,
972
+ logits=pooled_logits,
973
+ past_key_values=transformer_outputs.past_key_values,
974
+ hidden_states=transformer_outputs.hidden_states,
975
+ attentions=transformer_outputs.attentions,
976
+ )
977
+
978
+
979
+ @auto_docstring
980
+ class Qwen3ForTokenClassification(Qwen3PreTrainedModel):
981
+ def __init__(self, config):
982
+ super().__init__(config)
983
+ self.num_labels = config.num_labels
984
+ self.model = Qwen3Model(config)
985
+ if getattr(config, "classifier_dropout", None) is not None:
986
+ classifier_dropout = config.classifier_dropout
987
+ elif getattr(config, "hidden_dropout", None) is not None:
988
+ classifier_dropout = config.hidden_dropout
989
+ else:
990
+ classifier_dropout = 0.1
991
+ self.dropout = nn.Dropout(classifier_dropout)
992
+ self.score = nn.Linear(config.hidden_size, config.num_labels)
993
+
994
+ # Initialize weights and apply final processing
995
+ self.post_init()
996
+
997
+ def get_input_embeddings(self):
998
+ return self.model.embed_tokens
999
+
1000
+ def set_input_embeddings(self, value):
1001
+ self.model.embed_tokens = value
1002
+
1003
+ @can_return_tuple
1004
+ @auto_docstring
1005
+ def forward(
1006
+ self,
1007
+ input_ids: Optional[torch.LongTensor] = None,
1008
+ attention_mask: Optional[torch.Tensor] = None,
1009
+ position_ids: Optional[torch.LongTensor] = None,
1010
+ past_key_values: Optional[Cache] = None,
1011
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1012
+ labels: Optional[torch.LongTensor] = None,
1013
+ use_cache: Optional[bool] = None,
1014
+ output_attentions: Optional[bool] = None,
1015
+ output_hidden_states: Optional[bool] = None,
1016
+ ) -> TokenClassifierOutput:
1017
+ r"""
1018
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1019
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
1020
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
1021
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
1022
+ """
1023
+
1024
+ outputs: BaseModelOutputWithPast = self.model(
1025
+ input_ids,
1026
+ attention_mask=attention_mask,
1027
+ position_ids=position_ids,
1028
+ past_key_values=past_key_values,
1029
+ inputs_embeds=inputs_embeds,
1030
+ use_cache=use_cache,
1031
+ output_attentions=output_attentions,
1032
+ output_hidden_states=output_hidden_states,
1033
+ )
1034
+ sequence_output = outputs.last_hidden_state
1035
+ sequence_output = self.dropout(sequence_output)
1036
+ logits = self.score(sequence_output)
1037
+
1038
+ loss = None
1039
+ if labels is not None:
1040
+ loss = self.loss_function(logits, labels, self.config)
1041
+
1042
+ return TokenClassifierOutput(
1043
+ loss=loss,
1044
+ logits=logits,
1045
+ hidden_states=outputs.hidden_states,
1046
+ attentions=outputs.attentions,
1047
+ )
1048
+
1049
+
1050
+ @auto_docstring
1051
+ class Qwen3ForQuestionAnswering(Qwen3PreTrainedModel):
1052
+ base_model_prefix = "transformer"
1053
+
1054
+ def __init__(self, config):
1055
+ super().__init__(config)
1056
+ self.transformer = Qwen3Model(config)
1057
+ self.qa_outputs = nn.Linear(config.hidden_size, 2)
1058
+
1059
+ # Initialize weights and apply final processing
1060
+ self.post_init()
1061
+
1062
+ def get_input_embeddings(self):
1063
+ return self.transformer.embed_tokens
1064
+
1065
+ def set_input_embeddings(self, value):
1066
+ self.transformer.embed_tokens = value
1067
+
1068
+ @can_return_tuple
1069
+ @auto_docstring
1070
+ def forward(
1071
+ self,
1072
+ input_ids: Optional[torch.LongTensor] = None,
1073
+ attention_mask: Optional[torch.Tensor] = None,
1074
+ position_ids: Optional[torch.LongTensor] = None,
1075
+ past_key_values: Optional[Cache] = None,
1076
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1077
+ start_positions: Optional[torch.LongTensor] = None,
1078
+ end_positions: Optional[torch.LongTensor] = None,
1079
+ output_attentions: Optional[bool] = None,
1080
+ output_hidden_states: Optional[bool] = None,
1081
+ **kwargs,
1082
+ ) -> QuestionAnsweringModelOutput:
1083
+ outputs: BaseModelOutputWithPast = self.transformer(
1084
+ input_ids,
1085
+ attention_mask=attention_mask,
1086
+ position_ids=position_ids,
1087
+ past_key_values=past_key_values,
1088
+ inputs_embeds=inputs_embeds,
1089
+ output_attentions=output_attentions,
1090
+ output_hidden_states=output_hidden_states,
1091
+ )
1092
+
1093
+ sequence_output = outputs.last_hidden_state
1094
+
1095
+ logits = self.qa_outputs(sequence_output)
1096
+ start_logits, end_logits = logits.split(1, dim=-1)
1097
+ start_logits = start_logits.squeeze(-1).contiguous()
1098
+ end_logits = end_logits.squeeze(-1).contiguous()
1099
+
1100
+ loss = None
1101
+ if start_positions is not None and end_positions is not None:
1102
+ loss = self.loss_function(start_logits, end_logits, start_positions, end_positions, **kwargs)
1103
+
1104
+ return QuestionAnsweringModelOutput(
1105
+ loss=loss,
1106
+ start_logits=start_logits,
1107
+ end_logits=end_logits,
1108
+ hidden_states=outputs.hidden_states,
1109
+ attentions=outputs.attentions,
1110
+ )
1111
+
1112
+
1113
+ class Qwen3DiffusionLM(Qwen3ForCausalLM):
1114
+ def __init__(self, config):
1115
+ super().__init__(config)
1116
+ self.mask_token_id = 151662 # [MASK] token ID
1117
+
1118
+ def forward_process(self, input_ids, eps=1e-3):
1119
+ b, l = input_ids.shape
1120
+ t = torch.rand(b, device=input_ids.device)
1121
+ p_mask = (1 - eps) * t + eps
1122
+ p_mask = p_mask[:, None].repeat(1, l)
1123
+
1124
+ # Generate masked indices
1125
+ masked_indices = torch.rand((b, l), device=input_ids.device) < p_mask
1126
+
1127
+ noisy_batch = torch.where(masked_indices, self.mask_token_id, input_ids)
1128
+
1129
+ return noisy_batch, masked_indices, p_mask
1130
+
1131
+ @can_return_tuple
1132
+ @auto_docstring
1133
+ def forward(
1134
+ self,
1135
+ input_ids: Optional[torch.LongTensor] = None,
1136
+ attention_mask: Optional[torch.Tensor] = None,
1137
+ position_ids: Optional[torch.LongTensor] = None,
1138
+ past_key_values: Optional[Cache] = None,
1139
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1140
+ labels: Optional[torch.LongTensor] = None,
1141
+ use_cache: Optional[bool] = None,
1142
+ output_attentions: Optional[bool] = None,
1143
+ output_hidden_states: Optional[bool] = None,
1144
+ cache_position: Optional[torch.LongTensor] = None,
1145
+ logits_to_keep: Union[int, torch.Tensor] = 0,
1146
+ replace_position: Optional[torch.Tensor] = None,
1147
+ eps: float = 1e-3,
1148
+ output_last_hidden_states_only: bool = False,
1149
+ **kwargs: Unpack[KwargsForCausalLM],
1150
+ ) -> CausalLMOutputWithPast:
1151
+ # Apply random length truncation with 1% probability
1152
+ # if torch.rand(1) < 0.01:
1153
+ # random_length = torch.randint(1, input_ids.shape[1] + 1, (1,))
1154
+ # input_ids = input_ids[:, :random_length]
1155
+ # if attention_mask is not None:
1156
+ # attention_mask = attention_mask[:, :random_length]
1157
+
1158
+ # Apply forward process for diffusion with shifted masking
1159
+
1160
+ if labels is not None:
1161
+ if self.config.random_length_prob is not None:
1162
+ if torch.rand(1) < self.config.random_length_prob:
1163
+ random_length = torch.randint(2, input_ids.shape[1] + 1, (1,))
1164
+ input_ids = input_ids[:, :random_length]
1165
+ labels = labels[:, :random_length]
1166
+
1167
+ if attention_mask is not None:
1168
+ attention_mask = attention_mask[:, :random_length]
1169
+ if position_ids is not None:
1170
+ position_ids = position_ids[:, :random_length]
1171
+
1172
+ noisy_batch, masked_indices, p_mask = self.forward_process(input_ids, eps)
1173
+ else:
1174
+ noisy_batch = input_ids
1175
+ masked_indices = None
1176
+ p_mask = None
1177
+
1178
+ # Get model outputs
1179
+ outputs: BaseModelOutputWithPast = self.model(
1180
+ input_ids=noisy_batch,
1181
+ attention_mask=attention_mask,
1182
+ position_ids=position_ids,
1183
+ past_key_values=past_key_values,
1184
+ inputs_embeds=inputs_embeds,
1185
+ use_cache=use_cache,
1186
+ output_attentions=output_attentions,
1187
+ output_hidden_states=output_hidden_states,
1188
+ cache_position=cache_position,
1189
+ replace_position=replace_position,
1190
+ **kwargs,
1191
+ )
1192
+
1193
+ hidden_states = outputs.last_hidden_state
1194
+ if output_last_hidden_states_only:
1195
+ return BaseModelOutput(
1196
+ last_hidden_state=hidden_states
1197
+ )
1198
+ # Only compute necessary logits, and do not upcast them to float if we are not computing the loss
1199
+ slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep
1200
+ logits = self.lm_head(hidden_states[:, slice_indices, :])
1201
+
1202
+ loss = None
1203
+ if labels is not None:
1204
+ if self.config.dlm_type == 'dream':
1205
+ logits = logits[..., :-1, :].contiguous()
1206
+ labels = labels[..., 1:].contiguous()
1207
+ masked_indices = masked_indices[:, 1:]
1208
+ p_mask = p_mask[:, 1:]
1209
+
1210
+ # Calculate token-wise cross entropy loss for masked positions
1211
+ token_loss = torch.nn.functional.cross_entropy(
1212
+ logits[masked_indices],
1213
+ labels[masked_indices],
1214
+ reduction='none'
1215
+ ) / p_mask[masked_indices]
1216
+
1217
+ # Average loss over masked tokens only
1218
+ loss = token_loss.sum() / masked_indices.sum()
1219
+
1220
+ # loss = None
1221
+ # if labels is not None:
1222
+ # loss = self.loss_function(logits=logits, labels=labels, vocab_size=self.config.vocab_size, **kwargs)
1223
+
1224
+ return CausalLMOutputWithPast(
1225
+ loss=loss,
1226
+ logits=logits,
1227
+ past_key_values=outputs.past_key_values,
1228
+ hidden_states=outputs.hidden_states,
1229
+ attentions=outputs.attentions,
1230
+ )
1231
+
1232
+
1233
+ __all__ = [
1234
+ "Qwen3ForCausalLM",
1235
+ "Qwen3ForQuestionAnswering",
1236
+ "Qwen3Model",
1237
+ "Qwen3PreTrainedModel",
1238
+ "Qwen3ForSequenceClassification",
1239
+ "Qwen3ForTokenClassification",
1240
+ "Qwen3DiffusionLM",
1241
+ ]
special_tokens_map.json ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "additional_special_tokens": [
3
+ "<|im_start|>",
4
+ "<|im_end|>",
5
+ "<|object_ref_start|>",
6
+ "<|object_ref_end|>",
7
+ "<|box_start|>",
8
+ "<|box_end|>",
9
+ "<|quad_start|>",
10
+ "<|quad_end|>",
11
+ "<|vision_start|>",
12
+ "<|vision_end|>",
13
+ "<|vision_pad|>",
14
+ "<|image_pad|>",
15
+ "<|video_pad|>"
16
+ ],
17
+ "eos_token": {
18
+ "content": "<|im_end|>",
19
+ "lstrip": false,
20
+ "normalized": false,
21
+ "rstrip": false,
22
+ "single_word": false
23
+ },
24
+ "pad_token": {
25
+ "content": "<|endoftext|>",
26
+ "lstrip": false,
27
+ "normalized": false,
28
+ "rstrip": false,
29
+ "single_word": false
30
+ }
31
+ }
tokenizer.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:aeb13307a71acd8fe81861d94ad54ab689df773318809eed3cbe794b4492dae4
3
+ size 11422654
tokenizer_config.json ADDED
@@ -0,0 +1,239 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_bos_token": false,
3
+ "add_prefix_space": false,
4
+ "added_tokens_decoder": {
5
+ "151643": {
6
+ "content": "<|endoftext|>",
7
+ "lstrip": false,
8
+ "normalized": false,
9
+ "rstrip": false,
10
+ "single_word": false,
11
+ "special": true
12
+ },
13
+ "151644": {
14
+ "content": "<|im_start|>",
15
+ "lstrip": false,
16
+ "normalized": false,
17
+ "rstrip": false,
18
+ "single_word": false,
19
+ "special": true
20
+ },
21
+ "151645": {
22
+ "content": "<|im_end|>",
23
+ "lstrip": false,
24
+ "normalized": false,
25
+ "rstrip": false,
26
+ "single_word": false,
27
+ "special": true
28
+ },
29
+ "151646": {
30
+ "content": "<|object_ref_start|>",
31
+ "lstrip": false,
32
+ "normalized": false,
33
+ "rstrip": false,
34
+ "single_word": false,
35
+ "special": true
36
+ },
37
+ "151647": {
38
+ "content": "<|object_ref_end|>",
39
+ "lstrip": false,
40
+ "normalized": false,
41
+ "rstrip": false,
42
+ "single_word": false,
43
+ "special": true
44
+ },
45
+ "151648": {
46
+ "content": "<|box_start|>",
47
+ "lstrip": false,
48
+ "normalized": false,
49
+ "rstrip": false,
50
+ "single_word": false,
51
+ "special": true
52
+ },
53
+ "151649": {
54
+ "content": "<|box_end|>",
55
+ "lstrip": false,
56
+ "normalized": false,
57
+ "rstrip": false,
58
+ "single_word": false,
59
+ "special": true
60
+ },
61
+ "151650": {
62
+ "content": "<|quad_start|>",
63
+ "lstrip": false,
64
+ "normalized": false,
65
+ "rstrip": false,
66
+ "single_word": false,
67
+ "special": true
68
+ },
69
+ "151651": {
70
+ "content": "<|quad_end|>",
71
+ "lstrip": false,
72
+ "normalized": false,
73
+ "rstrip": false,
74
+ "single_word": false,
75
+ "special": true
76
+ },
77
+ "151652": {
78
+ "content": "<|vision_start|>",
79
+ "lstrip": false,
80
+ "normalized": false,
81
+ "rstrip": false,
82
+ "single_word": false,
83
+ "special": true
84
+ },
85
+ "151653": {
86
+ "content": "<|vision_end|>",
87
+ "lstrip": false,
88
+ "normalized": false,
89
+ "rstrip": false,
90
+ "single_word": false,
91
+ "special": true
92
+ },
93
+ "151654": {
94
+ "content": "<|vision_pad|>",
95
+ "lstrip": false,
96
+ "normalized": false,
97
+ "rstrip": false,
98
+ "single_word": false,
99
+ "special": true
100
+ },
101
+ "151655": {
102
+ "content": "<|image_pad|>",
103
+ "lstrip": false,
104
+ "normalized": false,
105
+ "rstrip": false,
106
+ "single_word": false,
107
+ "special": true
108
+ },
109
+ "151656": {
110
+ "content": "<|video_pad|>",
111
+ "lstrip": false,
112
+ "normalized": false,
113
+ "rstrip": false,
114
+ "single_word": false,
115
+ "special": true
116
+ },
117
+ "151657": {
118
+ "content": "<tool_call>",
119
+ "lstrip": false,
120
+ "normalized": false,
121
+ "rstrip": false,
122
+ "single_word": false,
123
+ "special": false
124
+ },
125
+ "151658": {
126
+ "content": "</tool_call>",
127
+ "lstrip": false,
128
+ "normalized": false,
129
+ "rstrip": false,
130
+ "single_word": false,
131
+ "special": false
132
+ },
133
+ "151659": {
134
+ "content": "<|fim_prefix|>",
135
+ "lstrip": false,
136
+ "normalized": false,
137
+ "rstrip": false,
138
+ "single_word": false,
139
+ "special": false
140
+ },
141
+ "151660": {
142
+ "content": "<|fim_middle|>",
143
+ "lstrip": false,
144
+ "normalized": false,
145
+ "rstrip": false,
146
+ "single_word": false,
147
+ "special": false
148
+ },
149
+ "151661": {
150
+ "content": "<|fim_suffix|>",
151
+ "lstrip": false,
152
+ "normalized": false,
153
+ "rstrip": false,
154
+ "single_word": false,
155
+ "special": false
156
+ },
157
+ "151662": {
158
+ "content": "<|fim_pad|>",
159
+ "lstrip": false,
160
+ "normalized": false,
161
+ "rstrip": false,
162
+ "single_word": false,
163
+ "special": false
164
+ },
165
+ "151663": {
166
+ "content": "<|repo_name|>",
167
+ "lstrip": false,
168
+ "normalized": false,
169
+ "rstrip": false,
170
+ "single_word": false,
171
+ "special": false
172
+ },
173
+ "151664": {
174
+ "content": "<|file_sep|>",
175
+ "lstrip": false,
176
+ "normalized": false,
177
+ "rstrip": false,
178
+ "single_word": false,
179
+ "special": false
180
+ },
181
+ "151665": {
182
+ "content": "<tool_response>",
183
+ "lstrip": false,
184
+ "normalized": false,
185
+ "rstrip": false,
186
+ "single_word": false,
187
+ "special": false
188
+ },
189
+ "151666": {
190
+ "content": "</tool_response>",
191
+ "lstrip": false,
192
+ "normalized": false,
193
+ "rstrip": false,
194
+ "single_word": false,
195
+ "special": false
196
+ },
197
+ "151667": {
198
+ "content": "<think>",
199
+ "lstrip": false,
200
+ "normalized": false,
201
+ "rstrip": false,
202
+ "single_word": false,
203
+ "special": false
204
+ },
205
+ "151668": {
206
+ "content": "</think>",
207
+ "lstrip": false,
208
+ "normalized": false,
209
+ "rstrip": false,
210
+ "single_word": false,
211
+ "special": false
212
+ }
213
+ },
214
+ "additional_special_tokens": [
215
+ "<|im_start|>",
216
+ "<|im_end|>",
217
+ "<|object_ref_start|>",
218
+ "<|object_ref_end|>",
219
+ "<|box_start|>",
220
+ "<|box_end|>",
221
+ "<|quad_start|>",
222
+ "<|quad_end|>",
223
+ "<|vision_start|>",
224
+ "<|vision_end|>",
225
+ "<|vision_pad|>",
226
+ "<|image_pad|>",
227
+ "<|video_pad|>"
228
+ ],
229
+ "bos_token": null,
230
+ "clean_up_tokenization_spaces": false,
231
+ "eos_token": "<|im_end|>",
232
+ "errors": "replace",
233
+ "extra_special_tokens": {},
234
+ "model_max_length": 131072,
235
+ "pad_token": "<|endoftext|>",
236
+ "split_special_tokens": false,
237
+ "tokenizer_class": "Qwen2Tokenizer",
238
+ "unk_token": null
239
+ }
vocab.json ADDED
The diff for this file is too large to render. See raw diff