Commit ·
d28c316
0
Parent(s):
Initial release of Efficient-DLM-4B
Browse files- .gitattributes +38 -0
- README.md +71 -0
- added_tokens.json +28 -0
- chat_template.jinja +89 -0
- chat_utils.py +225 -0
- config.json +49 -0
- configuration_edlm.py +248 -0
- generation_config.json +5 -0
- images/result.png +3 -0
- merges.txt +0 -0
- model.safetensors +3 -0
- modeling_edlm.py +496 -0
- modeling_qwen3.py +1235 -0
- special_tokens_map.json +31 -0
- tokenizer.json +3 -0
- tokenizer_config.json +239 -0
- vocab.json +0 -0
.gitattributes
ADDED
|
@@ -0,0 +1,38 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
*.7z filter=lfs diff=lfs merge=lfs -text
|
| 2 |
+
*.arrow filter=lfs diff=lfs merge=lfs -text
|
| 3 |
+
*.bin filter=lfs diff=lfs merge=lfs -text
|
| 4 |
+
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
| 5 |
+
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
| 6 |
+
*.ftz filter=lfs diff=lfs merge=lfs -text
|
| 7 |
+
*.gz filter=lfs diff=lfs merge=lfs -text
|
| 8 |
+
*.h5 filter=lfs diff=lfs merge=lfs -text
|
| 9 |
+
*.joblib filter=lfs diff=lfs merge=lfs -text
|
| 10 |
+
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
| 11 |
+
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
| 12 |
+
*.model filter=lfs diff=lfs merge=lfs -text
|
| 13 |
+
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
| 14 |
+
*.npy filter=lfs diff=lfs merge=lfs -text
|
| 15 |
+
*.npz filter=lfs diff=lfs merge=lfs -text
|
| 16 |
+
*.onnx filter=lfs diff=lfs merge=lfs -text
|
| 17 |
+
*.ot filter=lfs diff=lfs merge=lfs -text
|
| 18 |
+
*.parquet filter=lfs diff=lfs merge=lfs -text
|
| 19 |
+
*.pb filter=lfs diff=lfs merge=lfs -text
|
| 20 |
+
*.pickle filter=lfs diff=lfs merge=lfs -text
|
| 21 |
+
*.pkl filter=lfs diff=lfs merge=lfs -text
|
| 22 |
+
*.pt filter=lfs diff=lfs merge=lfs -text
|
| 23 |
+
*.pth filter=lfs diff=lfs merge=lfs -text
|
| 24 |
+
*.rar filter=lfs diff=lfs merge=lfs -text
|
| 25 |
+
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
| 26 |
+
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
| 27 |
+
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
| 28 |
+
*.tar filter=lfs diff=lfs merge=lfs -text
|
| 29 |
+
*.tflite filter=lfs diff=lfs merge=lfs -text
|
| 30 |
+
*.tgz filter=lfs diff=lfs merge=lfs -text
|
| 31 |
+
*.wasm filter=lfs diff=lfs merge=lfs -text
|
| 32 |
+
*.xz filter=lfs diff=lfs merge=lfs -text
|
| 33 |
+
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 34 |
+
*.zst filter=lfs diff=lfs merge=lfs -text
|
| 35 |
+
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
| 36 |
+
tokenizer.json filter=lfs diff=lfs merge=lfs -text
|
| 37 |
+
*.png filter=lfs diff=lfs merge=lfs -text
|
| 38 |
+
*.pdf filter=lfs diff=lfs merge=lfs -text
|
README.md
ADDED
|
@@ -0,0 +1,71 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
---
|
| 2 |
+
library_name: transformers
|
| 3 |
+
license: other
|
| 4 |
+
license_name: cc-by-nc-4.0
|
| 5 |
+
pipeline_tag: text-generation
|
| 6 |
+
---
|
| 7 |
+
|
| 8 |
+
# Efficient-DLM-4B
|
| 9 |
+
|
| 10 |
+
<p align="center">
|
| 11 |
+
📄 <a href="https://arxiv.org/pdf/2512.14067">Tech Report</a>   |   🤗 <a href="https://huggingface.co/nvidia/Efficient-DLM-4B">Efficient-DLM-4B</a>   |   🤗 <a href="https://huggingface.co/nvidia/Efficient-DLM-8B">Efficient-DLM-8B</a>
|
| 12 |
+
</p>
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
## Model Overview
|
| 16 |
+
|
| 17 |
+
Efficient-DLM-4B is a base diffusion language model designed for parallel generation. It converts pretrained AR LMs into diffusion LMs through efficient continuous pretraining, enabling faster decoding while preserving the task accuracy of strong AR models. Efficient-DLM features block-wise attention with clean-context conditioning for KV-cache-friendly decoding, as well as position-dependent token masking to reduce the training–test mismatch in diffusion generation. See our [paper](https://arxiv.org/abs/2512.14067) for more technical details.
|
| 18 |
+
|
| 19 |
+
<div align="center">
|
| 20 |
+
<img src="https://huggingface.co/nvidia/Efficient-DLM-4B/resolve/main/images/result.png" alt="Accuracy vs throughput Pareto curve" width="500">
|
| 21 |
+
</div>
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
## Environment
|
| 25 |
+
|
| 26 |
+
```bash
|
| 27 |
+
transformers>=4.52.2
|
| 28 |
+
```
|
| 29 |
+
|
| 30 |
+
|
| 31 |
+
## Chat with Efficient-DLM-4B
|
| 32 |
+
|
| 33 |
+
```python
|
| 34 |
+
from transformers import AutoModel, AutoTokenizer
|
| 35 |
+
import torch
|
| 36 |
+
|
| 37 |
+
repo_name = "nvidia/Efficient-DLM-4B"
|
| 38 |
+
|
| 39 |
+
tokenizer = AutoTokenizer.from_pretrained(repo_name, trust_remote_code=True)
|
| 40 |
+
model = AutoModel.from_pretrained(repo_name, trust_remote_code=True)
|
| 41 |
+
model = model.cuda().to(torch.bfloat16)
|
| 42 |
+
|
| 43 |
+
user_input = input("User: ").strip()
|
| 44 |
+
|
| 45 |
+
prompt_ids = tokenizer(user_input, return_tensors="pt").input_ids.to(device="cuda")
|
| 46 |
+
out_ids, nfe = model.generate(
|
| 47 |
+
prompt_ids,
|
| 48 |
+
max_new_tokens=128,
|
| 49 |
+
steps=128,
|
| 50 |
+
block_length=32,
|
| 51 |
+
shift_logits=False,
|
| 52 |
+
temperature=0.7,
|
| 53 |
+
threshold=0.9,
|
| 54 |
+
)
|
| 55 |
+
|
| 56 |
+
response = tokenizer.batch_decode(out_ids[:, prompt_ids.shape[1]:], skip_special_tokens=True)[0]
|
| 57 |
+
print(f"Model: {response}")
|
| 58 |
+
print(f"[Num Function Eval (NFE)={nfe}]")
|
| 59 |
+
```
|
| 60 |
+
|
| 61 |
+
|
| 62 |
+
## Citation
|
| 63 |
+
|
| 64 |
+
```
|
| 65 |
+
@article{fu2025efficient,
|
| 66 |
+
title={Efficient-dlm: From autoregressive to diffusion language models, and beyond in speed},
|
| 67 |
+
author={Fu, Yonggan and Whalen, Lexington and Ye, Zhifan and Dong, Xin and Diao, Shizhe and Liu, Jingyu and Wu, Chengyue and Zhang, Hao and Xie, Enze and Han, Song and others},
|
| 68 |
+
journal={arXiv preprint arXiv:2512.14067},
|
| 69 |
+
year={2025}
|
| 70 |
+
}
|
| 71 |
+
```
|
added_tokens.json
ADDED
|
@@ -0,0 +1,28 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"</think>": 151668,
|
| 3 |
+
"</tool_call>": 151658,
|
| 4 |
+
"</tool_response>": 151666,
|
| 5 |
+
"<think>": 151667,
|
| 6 |
+
"<tool_call>": 151657,
|
| 7 |
+
"<tool_response>": 151665,
|
| 8 |
+
"<|box_end|>": 151649,
|
| 9 |
+
"<|box_start|>": 151648,
|
| 10 |
+
"<|endoftext|>": 151643,
|
| 11 |
+
"<|file_sep|>": 151664,
|
| 12 |
+
"<|fim_middle|>": 151660,
|
| 13 |
+
"<|fim_pad|>": 151662,
|
| 14 |
+
"<|fim_prefix|>": 151659,
|
| 15 |
+
"<|fim_suffix|>": 151661,
|
| 16 |
+
"<|im_end|>": 151645,
|
| 17 |
+
"<|im_start|>": 151644,
|
| 18 |
+
"<|image_pad|>": 151655,
|
| 19 |
+
"<|object_ref_end|>": 151647,
|
| 20 |
+
"<|object_ref_start|>": 151646,
|
| 21 |
+
"<|quad_end|>": 151651,
|
| 22 |
+
"<|quad_start|>": 151650,
|
| 23 |
+
"<|repo_name|>": 151663,
|
| 24 |
+
"<|video_pad|>": 151656,
|
| 25 |
+
"<|vision_end|>": 151653,
|
| 26 |
+
"<|vision_pad|>": 151654,
|
| 27 |
+
"<|vision_start|>": 151652
|
| 28 |
+
}
|
chat_template.jinja
ADDED
|
@@ -0,0 +1,89 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{%- if tools %}
|
| 2 |
+
{{- '<|im_start|>system\n' }}
|
| 3 |
+
{%- if messages[0].role == 'system' %}
|
| 4 |
+
{{- messages[0].content + '\n\n' }}
|
| 5 |
+
{%- endif %}
|
| 6 |
+
{{- "# Tools\n\nYou may call one or more functions to assist with the user query.\n\nYou are provided with function signatures within <tools></tools> XML tags:\n<tools>" }}
|
| 7 |
+
{%- for tool in tools %}
|
| 8 |
+
{{- "\n" }}
|
| 9 |
+
{{- tool | tojson }}
|
| 10 |
+
{%- endfor %}
|
| 11 |
+
{{- "\n</tools>\n\nFor each function call, return a json object with function name and arguments within <tool_call></tool_call> XML tags:\n<tool_call>\n{\"name\": <function-name>, \"arguments\": <args-json-object>}\n</tool_call><|im_end|>\n" }}
|
| 12 |
+
{%- else %}
|
| 13 |
+
{%- if messages[0].role == 'system' %}
|
| 14 |
+
{{- '<|im_start|>system\n' + messages[0].content + '<|im_end|>\n' }}
|
| 15 |
+
{%- endif %}
|
| 16 |
+
{%- endif %}
|
| 17 |
+
{%- set ns = namespace(multi_step_tool=true, last_query_index=messages|length - 1) %}
|
| 18 |
+
{%- for message in messages[::-1] %}
|
| 19 |
+
{%- set index = (messages|length - 1) - loop.index0 %}
|
| 20 |
+
{%- if ns.multi_step_tool and message.role == "user" and message.content is string and not(message.content.startswith('<tool_response>') and message.content.endswith('</tool_response>')) %}
|
| 21 |
+
{%- set ns.multi_step_tool = false %}
|
| 22 |
+
{%- set ns.last_query_index = index %}
|
| 23 |
+
{%- endif %}
|
| 24 |
+
{%- endfor %}
|
| 25 |
+
{%- for message in messages %}
|
| 26 |
+
{%- if message.content is string %}
|
| 27 |
+
{%- set content = message.content %}
|
| 28 |
+
{%- else %}
|
| 29 |
+
{%- set content = '' %}
|
| 30 |
+
{%- endif %}
|
| 31 |
+
{%- if (message.role == "user") or (message.role == "system" and not loop.first) %}
|
| 32 |
+
{{- '<|im_start|>' + message.role + '\n' + content + '<|im_end|>' + '\n' }}
|
| 33 |
+
{%- elif message.role == "assistant" %}
|
| 34 |
+
{%- set reasoning_content = '' %}
|
| 35 |
+
{%- if message.reasoning_content is string %}
|
| 36 |
+
{%- set reasoning_content = message.reasoning_content %}
|
| 37 |
+
{%- else %}
|
| 38 |
+
{%- if '</think>' in content %}
|
| 39 |
+
{%- set reasoning_content = content.split('</think>')[0].rstrip('\n').split('<think>')[-1].lstrip('\n') %}
|
| 40 |
+
{%- set content = content.split('</think>')[-1].lstrip('\n') %}
|
| 41 |
+
{%- endif %}
|
| 42 |
+
{%- endif %}
|
| 43 |
+
{%- if loop.index0 > ns.last_query_index %}
|
| 44 |
+
{%- if loop.last or (not loop.last and reasoning_content) %}
|
| 45 |
+
{{- '<|im_start|>' + message.role + '\n<think>\n' + reasoning_content.strip('\n') + '\n</think>\n\n' + content.lstrip('\n') }}
|
| 46 |
+
{%- else %}
|
| 47 |
+
{{- '<|im_start|>' + message.role + '\n' + content }}
|
| 48 |
+
{%- endif %}
|
| 49 |
+
{%- else %}
|
| 50 |
+
{{- '<|im_start|>' + message.role + '\n' + content }}
|
| 51 |
+
{%- endif %}
|
| 52 |
+
{%- if message.tool_calls %}
|
| 53 |
+
{%- for tool_call in message.tool_calls %}
|
| 54 |
+
{%- if (loop.first and content) or (not loop.first) %}
|
| 55 |
+
{{- '\n' }}
|
| 56 |
+
{%- endif %}
|
| 57 |
+
{%- if tool_call.function %}
|
| 58 |
+
{%- set tool_call = tool_call.function %}
|
| 59 |
+
{%- endif %}
|
| 60 |
+
{{- '<tool_call>\n{"name": "' }}
|
| 61 |
+
{{- tool_call.name }}
|
| 62 |
+
{{- '", "arguments": ' }}
|
| 63 |
+
{%- if tool_call.arguments is string %}
|
| 64 |
+
{{- tool_call.arguments }}
|
| 65 |
+
{%- else %}
|
| 66 |
+
{{- tool_call.arguments | tojson }}
|
| 67 |
+
{%- endif %}
|
| 68 |
+
{{- '}\n</tool_call>' }}
|
| 69 |
+
{%- endfor %}
|
| 70 |
+
{%- endif %}
|
| 71 |
+
{{- '<|im_end|>\n' }}
|
| 72 |
+
{%- elif message.role == "tool" %}
|
| 73 |
+
{%- if loop.first or (messages[loop.index0 - 1].role != "tool") %}
|
| 74 |
+
{{- '<|im_start|>user' }}
|
| 75 |
+
{%- endif %}
|
| 76 |
+
{{- '\n<tool_response>\n' }}
|
| 77 |
+
{{- content }}
|
| 78 |
+
{{- '\n</tool_response>' }}
|
| 79 |
+
{%- if loop.last or (messages[loop.index0 + 1].role != "tool") %}
|
| 80 |
+
{{- '<|im_end|>\n' }}
|
| 81 |
+
{%- endif %}
|
| 82 |
+
{%- endif %}
|
| 83 |
+
{%- endfor %}
|
| 84 |
+
{%- if add_generation_prompt %}
|
| 85 |
+
{{- '<|im_start|>assistant\n' }}
|
| 86 |
+
{%- if enable_thinking is defined and enable_thinking is false %}
|
| 87 |
+
{{- '<think>\n\n</think>\n\n' }}
|
| 88 |
+
{%- endif %}
|
| 89 |
+
{%- endif %}
|
chat_utils.py
ADDED
|
@@ -0,0 +1,225 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import numpy as np
|
| 2 |
+
import torch
|
| 3 |
+
import torch.nn.functional as F
|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
def add_gumbel_noise(logits, temperature):
|
| 7 |
+
'''
|
| 8 |
+
The Gumbel max is a method for sampling categorical distributions.
|
| 9 |
+
According to arXiv:2409.02908, for MDM, low-precision Gumbel Max improves perplexity score but reduces generation quality.
|
| 10 |
+
Thus, we use float64.
|
| 11 |
+
'''
|
| 12 |
+
if temperature == 0:
|
| 13 |
+
return logits
|
| 14 |
+
logits = logits.to(torch.float64)
|
| 15 |
+
noise = torch.rand_like(logits, dtype=torch.float64)
|
| 16 |
+
gumbel_noise = (- torch.log(noise)) ** temperature
|
| 17 |
+
return logits.exp() / gumbel_noise
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
def get_transfer_index(logits, temperature, remasking, mask_index, x, num_transfer_tokens, threshold=None, neg_entropy=False):
|
| 21 |
+
logits_with_noise = add_gumbel_noise(logits, temperature=temperature)
|
| 22 |
+
x0 = torch.argmax(logits_with_noise, dim=-1)
|
| 23 |
+
|
| 24 |
+
if remasking == 'low_confidence':
|
| 25 |
+
# p = F.softmax(logits.to(torch.float64), dim=-1)
|
| 26 |
+
p = F.softmax(logits, dim=-1)
|
| 27 |
+
x0_p = torch.squeeze(
|
| 28 |
+
torch.gather(p, dim=-1, index=torch.unsqueeze(x0, -1)), -1) # b, l
|
| 29 |
+
elif remasking == 'top_p_margin':
|
| 30 |
+
# Compute probabilities
|
| 31 |
+
p = F.softmax(logits, dim=-1) # (B, L, V)
|
| 32 |
+
# Top-2 per position
|
| 33 |
+
top2 = torch.topk(p, k=2, dim=-1).values # (B, L, 2)
|
| 34 |
+
margin = top2[..., 0] - top2[..., 1] # (B, L)
|
| 35 |
+
|
| 36 |
+
# Normalize margin to [0,1] over MASKED positions per row
|
| 37 |
+
plus_inf = torch.full_like(margin, float('inf'))
|
| 38 |
+
minus_inf = torch.full_like(margin, float('-inf'))
|
| 39 |
+
masked_for_min = torch.where(mask_index, margin, plus_inf)
|
| 40 |
+
masked_for_max = torch.where(mask_index, margin, minus_inf)
|
| 41 |
+
row_min = masked_for_min.amin(dim=1, keepdim=True) # (B, 1)
|
| 42 |
+
row_max = masked_for_max.amax(dim=1, keepdim=True) # (B, 1)
|
| 43 |
+
denom = (row_max - row_min)
|
| 44 |
+
|
| 45 |
+
# If denom==0 (all equal), set normalized=1 on masked; 0 elsewhere by default
|
| 46 |
+
normalized = torch.zeros_like(margin)
|
| 47 |
+
nonzero = denom > 0
|
| 48 |
+
normalized = torch.where(
|
| 49 |
+
mask_index & nonzero,
|
| 50 |
+
(margin - row_min) / (denom + 1e-12),
|
| 51 |
+
normalized
|
| 52 |
+
)
|
| 53 |
+
normalized = torch.where(
|
| 54 |
+
mask_index & (~nonzero),
|
| 55 |
+
torch.ones_like(normalized),
|
| 56 |
+
normalized
|
| 57 |
+
)
|
| 58 |
+
x0_p = normalized # ∈ [0,1] on masked positions
|
| 59 |
+
elif remasking == 'random':
|
| 60 |
+
x0_p = torch.rand((x0.shape[0], x0.shape[1]), device=x0.device)
|
| 61 |
+
else:
|
| 62 |
+
raise NotImplementedError(remasking)
|
| 63 |
+
|
| 64 |
+
# Calculate negative entropy if requested
|
| 65 |
+
if neg_entropy:
|
| 66 |
+
# p = F.softmax(logits.to(torch.float64), dim=-1)
|
| 67 |
+
p = F.softmax(logits, dim=-1)
|
| 68 |
+
epsilon = 1e-10
|
| 69 |
+
log_probs = torch.log(p + epsilon)
|
| 70 |
+
confidence_scores = torch.sum(p * log_probs, dim=-1) # negative entropy per position
|
| 71 |
+
else:
|
| 72 |
+
confidence_scores = x0_p
|
| 73 |
+
|
| 74 |
+
x0 = torch.where(mask_index, x0, x)
|
| 75 |
+
confidence = torch.where(mask_index, confidence_scores, -np.inf)
|
| 76 |
+
|
| 77 |
+
transfer_index = torch.zeros_like(x0, dtype=torch.bool, device=x0.device)
|
| 78 |
+
if threshold is not None:
|
| 79 |
+
num_transfer_tokens = mask_index.sum(dim=1, keepdim=True)
|
| 80 |
+
# print(f'confidence: {confidence}')
|
| 81 |
+
for j in range(confidence.shape[0]):
|
| 82 |
+
_, select_index = torch.topk(confidence[j], k=num_transfer_tokens[j])
|
| 83 |
+
transfer_index[j, select_index] = True
|
| 84 |
+
if threshold is not None:
|
| 85 |
+
for k in range(1, num_transfer_tokens[j]):
|
| 86 |
+
if confidence[j, select_index[k]] < threshold:
|
| 87 |
+
transfer_index[j, select_index[k]] = False
|
| 88 |
+
return x0, transfer_index
|
| 89 |
+
|
| 90 |
+
|
| 91 |
+
def get_num_transfer_tokens(mask_index, steps: int):
|
| 92 |
+
mask_num = mask_index.sum(dim=1, keepdim=True)
|
| 93 |
+
base = mask_num // steps
|
| 94 |
+
remainder = mask_num % steps
|
| 95 |
+
num_transfer_tokens = torch.zeros(mask_num.size(0), steps, device=mask_index.device, dtype=torch.int64) + base
|
| 96 |
+
for i in range(mask_num.size(0)):
|
| 97 |
+
num_transfer_tokens[i, : int(remainder[i])] += 1
|
| 98 |
+
return num_transfer_tokens
|
| 99 |
+
|
| 100 |
+
|
| 101 |
+
@torch.no_grad()
|
| 102 |
+
def generate_with_prefix_cache_block_diff(
|
| 103 |
+
model,
|
| 104 |
+
prompt,
|
| 105 |
+
steps=128,
|
| 106 |
+
gen_length=128,
|
| 107 |
+
block_length=128,
|
| 108 |
+
temperature=0.,
|
| 109 |
+
remasking='low_confidence',
|
| 110 |
+
mask_id=126336,
|
| 111 |
+
threshold=None,
|
| 112 |
+
factor=None,
|
| 113 |
+
shift_logits=False,
|
| 114 |
+
neg_entropy=False,
|
| 115 |
+
):
|
| 116 |
+
dream_style=shift_logits
|
| 117 |
+
# Initialize the accumulator
|
| 118 |
+
x_accum = prompt.clone()
|
| 119 |
+
|
| 120 |
+
assert gen_length % block_length == 0
|
| 121 |
+
num_blocks = gen_length // block_length
|
| 122 |
+
|
| 123 |
+
assert steps % num_blocks == 0
|
| 124 |
+
steps_per_block = steps // num_blocks
|
| 125 |
+
|
| 126 |
+
nfe = 0
|
| 127 |
+
|
| 128 |
+
# Compute KV cache for the prompt initially
|
| 129 |
+
output = model(prompt, use_cache=True)
|
| 130 |
+
past_key_values = output.past_key_values
|
| 131 |
+
|
| 132 |
+
# For dream_style: store the "next token logit" of the context
|
| 133 |
+
next_logits_context = None
|
| 134 |
+
if dream_style:
|
| 135 |
+
next_logits_context = output.logits[:, -1:, :] # (B, 1, V)
|
| 136 |
+
|
| 137 |
+
for num_block in range(num_blocks):
|
| 138 |
+
# Create a new block with mask tokens (no seeding)
|
| 139 |
+
mask_block = torch.ones(
|
| 140 |
+
(prompt.shape[0], block_length),
|
| 141 |
+
dtype=prompt.dtype,
|
| 142 |
+
device=prompt.device
|
| 143 |
+
) * mask_id
|
| 144 |
+
|
| 145 |
+
# Append the block of masks
|
| 146 |
+
x_accum = torch.cat([x_accum, mask_block], dim=1)
|
| 147 |
+
current_block_start = prompt.size(1) + num_block * block_length
|
| 148 |
+
block_slice = slice(current_block_start, current_block_start + block_length)
|
| 149 |
+
|
| 150 |
+
# Build the initial mask for this block
|
| 151 |
+
mask_block_idx0 = (x_accum[:, block_slice] == mask_id) # (B, Lb)
|
| 152 |
+
|
| 153 |
+
# Precompute the transfer schedule for this block
|
| 154 |
+
if dream_style:
|
| 155 |
+
# still denoise *all* positions (0..Lb-1), since none are seeded
|
| 156 |
+
schedule_mask = mask_block_idx0
|
| 157 |
+
else:
|
| 158 |
+
schedule_mask = mask_block_idx0
|
| 159 |
+
|
| 160 |
+
num_transfer_tokens = get_num_transfer_tokens(schedule_mask, steps_per_block) # (B, steps)
|
| 161 |
+
|
| 162 |
+
# Denoise the current block
|
| 163 |
+
for i in range(steps_per_block):
|
| 164 |
+
mask_block_idx = (x_accum[:, block_slice] == mask_id) # (B, Lb)
|
| 165 |
+
if mask_block_idx.sum() == 0:
|
| 166 |
+
break
|
| 167 |
+
|
| 168 |
+
nfe += 1
|
| 169 |
+
|
| 170 |
+
# Forward only the current noisy block using cached context
|
| 171 |
+
logits_block = model(
|
| 172 |
+
x_accum[:, block_slice],
|
| 173 |
+
past_key_values=past_key_values,
|
| 174 |
+
use_cache=False
|
| 175 |
+
).logits
|
| 176 |
+
|
| 177 |
+
if dream_style:
|
| 178 |
+
# Align logits so that each masked position has a predictor:
|
| 179 |
+
# prepend context-next logit, then use logits_block[:-1]
|
| 180 |
+
if block_length == 1:
|
| 181 |
+
logits_use = next_logits_context # (B, 1, V)
|
| 182 |
+
else:
|
| 183 |
+
logits_use = torch.cat(
|
| 184 |
+
[next_logits_context, logits_block[:, :-1, :]],
|
| 185 |
+
dim=1
|
| 186 |
+
) # (B, Lb, V)
|
| 187 |
+
|
| 188 |
+
mask_use = mask_block_idx # (B, Lb)
|
| 189 |
+
x_use = x_accum[:, block_slice] # (B, Lb)
|
| 190 |
+
|
| 191 |
+
x0, transfer_idx = get_transfer_index(
|
| 192 |
+
logits_use, temperature, remasking, mask_use, x_use,
|
| 193 |
+
num_transfer_tokens=num_transfer_tokens[:, i],
|
| 194 |
+
threshold=threshold, neg_entropy=neg_entropy
|
| 195 |
+
)
|
| 196 |
+
cur = x_accum[:, block_slice].clone()
|
| 197 |
+
cur[transfer_idx] = x0[transfer_idx]
|
| 198 |
+
x_accum[:, block_slice] = cur
|
| 199 |
+
|
| 200 |
+
else:
|
| 201 |
+
# non-AR (same-position) case
|
| 202 |
+
x0, transfer_idx = get_transfer_index(
|
| 203 |
+
logits_block, temperature, remasking, mask_block_idx,
|
| 204 |
+
x_accum[:, block_slice],
|
| 205 |
+
num_transfer_tokens=num_transfer_tokens[:, i],
|
| 206 |
+
threshold=threshold, neg_entropy=neg_entropy
|
| 207 |
+
)
|
| 208 |
+
cur = x_accum[:, block_slice].clone()
|
| 209 |
+
cur[transfer_idx] = x0[transfer_idx]
|
| 210 |
+
x_accum[:, block_slice] = cur
|
| 211 |
+
|
| 212 |
+
# after block is fully denoised, update KV cache
|
| 213 |
+
output = model(
|
| 214 |
+
x_accum[:, block_slice],
|
| 215 |
+
past_key_values=past_key_values,
|
| 216 |
+
use_cache=True
|
| 217 |
+
)
|
| 218 |
+
past_key_values = output.past_key_values
|
| 219 |
+
nfe += 1
|
| 220 |
+
|
| 221 |
+
if dream_style and num_block < num_blocks - 1:
|
| 222 |
+
# refresh context-next logit for the next block
|
| 223 |
+
next_logits_context = output.logits[:, -1:, :] # (B, 1, V)
|
| 224 |
+
|
| 225 |
+
return x_accum, nfe
|
config.json
ADDED
|
@@ -0,0 +1,49 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"adaptive_mask_rate": false,
|
| 3 |
+
"architectures": [
|
| 4 |
+
"EfficientDLM"
|
| 5 |
+
],
|
| 6 |
+
"attention_bias": false,
|
| 7 |
+
"attention_dropout": 0.0,
|
| 8 |
+
"attn_implementation": "sdpa",
|
| 9 |
+
"auto_map": {
|
| 10 |
+
"AutoConfig": "configuration_edlm.EfficientDLMConfig",
|
| 11 |
+
"AutoModel": "modeling_edlm.EfficientDLM"
|
| 12 |
+
},
|
| 13 |
+
"block_size": 32,
|
| 14 |
+
"diff_loss_weight": 1,
|
| 15 |
+
"disable_qk_norm": false,
|
| 16 |
+
"dlm_arch": "encoder",
|
| 17 |
+
"dlm_paradigm": "bidirectional",
|
| 18 |
+
"dlm_type": "llada",
|
| 19 |
+
"enforce_mask": false,
|
| 20 |
+
"head_dim": 128,
|
| 21 |
+
"hidden_act": "silu",
|
| 22 |
+
"hidden_size": 2560,
|
| 23 |
+
"initializer_range": 0.02,
|
| 24 |
+
"intermediate_size": 9728,
|
| 25 |
+
"intl_mask": false,
|
| 26 |
+
"mask_token_id": 151662,
|
| 27 |
+
"max_position_embeddings": 32768,
|
| 28 |
+
"max_window_layers": 28,
|
| 29 |
+
"model_type": "qwen3",
|
| 30 |
+
"multi_sampling": null,
|
| 31 |
+
"num_ar_layers": 0,
|
| 32 |
+
"num_attention_heads": 32,
|
| 33 |
+
"num_diffusion_layers": 0,
|
| 34 |
+
"num_hidden_layers": 36,
|
| 35 |
+
"num_key_value_heads": 8,
|
| 36 |
+
"prefix_ratio": 0.8,
|
| 37 |
+
"random_length_prob": 0,
|
| 38 |
+
"rms_norm_eps": 1e-06,
|
| 39 |
+
"rope_scaling": null,
|
| 40 |
+
"rope_theta": 1000000,
|
| 41 |
+
"sliding_window": null,
|
| 42 |
+
"tie_word_embeddings": false,
|
| 43 |
+
"tok_mask_half_life_ratio": null,
|
| 44 |
+
"torch_dtype": "bfloat16",
|
| 45 |
+
"transformers_version": "4.52.2",
|
| 46 |
+
"use_cache": false,
|
| 47 |
+
"use_sliding_window": false,
|
| 48 |
+
"vocab_size": 151936
|
| 49 |
+
}
|
configuration_edlm.py
ADDED
|
@@ -0,0 +1,248 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# coding=utf-8
|
| 2 |
+
# Copyright 2024 The Qwen team, Alibaba Group and the HuggingFace Inc. team. All rights reserved.
|
| 3 |
+
#
|
| 4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 5 |
+
# you may not use this file except in compliance with the License.
|
| 6 |
+
# You may obtain a copy of the License at
|
| 7 |
+
#
|
| 8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 9 |
+
#
|
| 10 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 13 |
+
# See the License for the specific language governing permissions and
|
| 14 |
+
# limitations under the License.
|
| 15 |
+
"""Qwen3 model configuration"""
|
| 16 |
+
|
| 17 |
+
from transformers.configuration_utils import PretrainedConfig
|
| 18 |
+
from transformers.modeling_rope_utils import rope_config_validation
|
| 19 |
+
from transformers.utils import logging
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
logger = logging.get_logger(__name__)
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
class EfficientDLMConfig(PretrainedConfig):
|
| 26 |
+
r"""
|
| 27 |
+
This is the configuration class to store the configuration of a [`Qwen3Model`]. It is used to instantiate a
|
| 28 |
+
Qwen3 model according to the specified arguments, defining the model architecture. Instantiating a configuration
|
| 29 |
+
with the defaults will yield a similar configuration to that of
|
| 30 |
+
Qwen3-8B [Qwen/Qwen3-8B](https://huggingface.co/Qwen/Qwen3-8B).
|
| 31 |
+
|
| 32 |
+
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
|
| 33 |
+
documentation from [`PretrainedConfig`] for more information.
|
| 34 |
+
|
| 35 |
+
|
| 36 |
+
Args:
|
| 37 |
+
vocab_size (`int`, *optional*, defaults to 151936):
|
| 38 |
+
Vocabulary size of the Qwen3 model. Defines the number of different tokens that can be represented by the
|
| 39 |
+
`inputs_ids` passed when calling [`Qwen3Model`]
|
| 40 |
+
hidden_size (`int`, *optional*, defaults to 4096):
|
| 41 |
+
Dimension of the hidden representations.
|
| 42 |
+
intermediate_size (`int`, *optional*, defaults to 22016):
|
| 43 |
+
Dimension of the MLP representations.
|
| 44 |
+
num_hidden_layers (`int`, *optional*, defaults to 32):
|
| 45 |
+
Number of hidden layers in the Transformer encoder.
|
| 46 |
+
num_attention_heads (`int`, *optional*, defaults to 32):
|
| 47 |
+
Number of attention heads for each attention layer in the Transformer encoder.
|
| 48 |
+
num_key_value_heads (`int`, *optional*, defaults to 32):
|
| 49 |
+
This is the number of key_value heads that should be used to implement Grouped Query Attention. If
|
| 50 |
+
`num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if
|
| 51 |
+
`num_key_value_heads=1` the model will use Multi Query Attention (MQA) otherwise GQA is used. When
|
| 52 |
+
converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed
|
| 53 |
+
by meanpooling all the original heads within that group. For more details checkout [this
|
| 54 |
+
paper](https://arxiv.org/pdf/2305.13245.pdf). If it is not specified, will default to `32`.
|
| 55 |
+
head_dim (`int`, *optional*, defaults to 128):
|
| 56 |
+
The attention head dimension.
|
| 57 |
+
hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
|
| 58 |
+
The non-linear activation function (function or string) in the decoder.
|
| 59 |
+
max_position_embeddings (`int`, *optional*, defaults to 32768):
|
| 60 |
+
The maximum sequence length that this model might ever be used with.
|
| 61 |
+
initializer_range (`float`, *optional*, defaults to 0.02):
|
| 62 |
+
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
|
| 63 |
+
rms_norm_eps (`float`, *optional*, defaults to 1e-06):
|
| 64 |
+
The epsilon used by the rms normalization layers.
|
| 65 |
+
use_cache (`bool`, *optional*, defaults to `True`):
|
| 66 |
+
Whether or not the model should return the last key/values attentions (not used by all models). Only
|
| 67 |
+
relevant if `config.is_decoder=True`.
|
| 68 |
+
tie_word_embeddings (`bool`, *optional*, defaults to `False`):
|
| 69 |
+
Whether the model's input and output word embeddings should be tied.
|
| 70 |
+
rope_theta (`float`, *optional*, defaults to 10000.0):
|
| 71 |
+
The base period of the RoPE embeddings.
|
| 72 |
+
rope_scaling (`Dict`, *optional*):
|
| 73 |
+
Dictionary containing the scaling configuration for the RoPE embeddings. NOTE: if you apply new rope type
|
| 74 |
+
and you expect the model to work on longer `max_position_embeddings`, we recommend you to update this value
|
| 75 |
+
accordingly.
|
| 76 |
+
Expected contents:
|
| 77 |
+
`rope_type` (`str`):
|
| 78 |
+
The sub-variant of RoPE to use. Can be one of ['default', 'linear', 'dynamic', 'yarn', 'longrope',
|
| 79 |
+
'llama3'], with 'default' being the original RoPE implementation.
|
| 80 |
+
`factor` (`float`, *optional*):
|
| 81 |
+
Used with all rope types except 'default'. The scaling factor to apply to the RoPE embeddings. In
|
| 82 |
+
most scaling types, a `factor` of x will enable the model to handle sequences of length x *
|
| 83 |
+
original maximum pre-trained length.
|
| 84 |
+
`original_max_position_embeddings` (`int`, *optional*):
|
| 85 |
+
Used with 'dynamic', 'longrope' and 'llama3'. The original max position embeddings used during
|
| 86 |
+
pretraining.
|
| 87 |
+
`attention_factor` (`float`, *optional*):
|
| 88 |
+
Used with 'yarn' and 'longrope'. The scaling factor to be applied on the attention
|
| 89 |
+
computation. If unspecified, it defaults to value recommended by the implementation, using the
|
| 90 |
+
`factor` field to infer the suggested value.
|
| 91 |
+
`beta_fast` (`float`, *optional*):
|
| 92 |
+
Only used with 'yarn'. Parameter to set the boundary for extrapolation (only) in the linear
|
| 93 |
+
ramp function. If unspecified, it defaults to 32.
|
| 94 |
+
`beta_slow` (`float`, *optional*):
|
| 95 |
+
Only used with 'yarn'. Parameter to set the boundary for interpolation (only) in the linear
|
| 96 |
+
ramp function. If unspecified, it defaults to 1.
|
| 97 |
+
`short_factor` (`List[float]`, *optional*):
|
| 98 |
+
Only used with 'longrope'. The scaling factor to be applied to short contexts (<
|
| 99 |
+
`original_max_position_embeddings`). Must be a list of numbers with the same length as the hidden
|
| 100 |
+
size divided by the number of attention heads divided by 2
|
| 101 |
+
`long_factor` (`List[float]`, *optional*):
|
| 102 |
+
Only used with 'longrope'. The scaling factor to be applied to long contexts (<
|
| 103 |
+
`original_max_position_embeddings`). Must be a list of numbers with the same length as the hidden
|
| 104 |
+
size divided by the number of attention heads divided by 2
|
| 105 |
+
`low_freq_factor` (`float`, *optional*):
|
| 106 |
+
Only used with 'llama3'. Scaling factor applied to low frequency components of the RoPE
|
| 107 |
+
`high_freq_factor` (`float`, *optional*):
|
| 108 |
+
Only used with 'llama3'. Scaling factor applied to high frequency components of the RoPE
|
| 109 |
+
attention_bias (`bool`, defaults to `False`, *optional*, defaults to `False`):
|
| 110 |
+
Whether to use a bias in the query, key, value and output projection layers during self-attention.
|
| 111 |
+
use_sliding_window (`bool`, *optional*, defaults to `False`):
|
| 112 |
+
Whether to use sliding window attention.
|
| 113 |
+
sliding_window (`int`, *optional*, defaults to 4096):
|
| 114 |
+
Sliding window attention (SWA) window size. If not specified, will default to `4096`.
|
| 115 |
+
max_window_layers (`int`, *optional*, defaults to 28):
|
| 116 |
+
The number of layers that use SWA (Sliding Window Attention). The bottom layers use SWA while the top use full attention.
|
| 117 |
+
attention_dropout (`float`, *optional*, defaults to 0.0):
|
| 118 |
+
The dropout ratio for the attention probabilities.
|
| 119 |
+
|
| 120 |
+
```python
|
| 121 |
+
>>> from transformers import Qwen3Model, Qwen3Config
|
| 122 |
+
|
| 123 |
+
>>> # Initializing a Qwen3 style configuration
|
| 124 |
+
>>> configuration = Qwen3Config()
|
| 125 |
+
|
| 126 |
+
>>> # Initializing a model from the Qwen3-8B style configuration
|
| 127 |
+
>>> model = Qwen3Model(configuration)
|
| 128 |
+
|
| 129 |
+
>>> # Accessing the model configuration
|
| 130 |
+
>>> configuration = model.config
|
| 131 |
+
```"""
|
| 132 |
+
|
| 133 |
+
model_type = "qwen3"
|
| 134 |
+
keys_to_ignore_at_inference = ["past_key_values"]
|
| 135 |
+
|
| 136 |
+
# Default tensor parallel plan for base model `Qwen3`
|
| 137 |
+
base_model_tp_plan = {
|
| 138 |
+
"layers.*.self_attn.q_proj": "colwise",
|
| 139 |
+
"layers.*.self_attn.k_proj": "colwise",
|
| 140 |
+
"layers.*.self_attn.v_proj": "colwise",
|
| 141 |
+
"layers.*.self_attn.o_proj": "rowwise",
|
| 142 |
+
"layers.*.mlp.gate_proj": "colwise",
|
| 143 |
+
"layers.*.mlp.up_proj": "colwise",
|
| 144 |
+
"layers.*.mlp.down_proj": "rowwise",
|
| 145 |
+
}
|
| 146 |
+
base_model_pp_plan = {
|
| 147 |
+
"embed_tokens": (["input_ids"], ["inputs_embeds"]),
|
| 148 |
+
"layers": (["hidden_states", "attention_mask"], ["hidden_states"]),
|
| 149 |
+
"norm": (["hidden_states"], ["hidden_states"]),
|
| 150 |
+
}
|
| 151 |
+
|
| 152 |
+
def __init__(
|
| 153 |
+
self,
|
| 154 |
+
vocab_size=151936,
|
| 155 |
+
hidden_size=4096,
|
| 156 |
+
intermediate_size=22016,
|
| 157 |
+
num_hidden_layers=32,
|
| 158 |
+
num_attention_heads=32,
|
| 159 |
+
num_key_value_heads=32,
|
| 160 |
+
head_dim=128,
|
| 161 |
+
hidden_act="silu",
|
| 162 |
+
max_position_embeddings=32768,
|
| 163 |
+
initializer_range=0.02,
|
| 164 |
+
rms_norm_eps=1e-6,
|
| 165 |
+
use_cache=True,
|
| 166 |
+
tie_word_embeddings=False,
|
| 167 |
+
rope_theta=10000.0,
|
| 168 |
+
rope_scaling=None,
|
| 169 |
+
attention_bias=False,
|
| 170 |
+
use_sliding_window=False,
|
| 171 |
+
sliding_window=4096,
|
| 172 |
+
max_window_layers=28,
|
| 173 |
+
attention_dropout=0.0,
|
| 174 |
+
attn_implementation="sdpa",
|
| 175 |
+
mask_token_id=-1,
|
| 176 |
+
dlm_type='llada',
|
| 177 |
+
random_length_prob=None,
|
| 178 |
+
num_ar_layers=4,
|
| 179 |
+
num_diffusion_layers=4,
|
| 180 |
+
diff_loss_weight=1,
|
| 181 |
+
enforce_mask=False,
|
| 182 |
+
prefix_ratio=0.8,
|
| 183 |
+
dlm_paradigm='bidirectional',
|
| 184 |
+
dlm_arch='encoder',
|
| 185 |
+
block_size=32,
|
| 186 |
+
disable_qk_norm=False,
|
| 187 |
+
intl_mask=False,
|
| 188 |
+
tok_mask_half_life_ratio=None,
|
| 189 |
+
adaptive_mask_rate=False,
|
| 190 |
+
multi_sampling=None,
|
| 191 |
+
**kwargs,
|
| 192 |
+
):
|
| 193 |
+
self.vocab_size = vocab_size
|
| 194 |
+
self.max_position_embeddings = max_position_embeddings
|
| 195 |
+
self.hidden_size = hidden_size
|
| 196 |
+
self.intermediate_size = intermediate_size
|
| 197 |
+
self.num_hidden_layers = num_hidden_layers
|
| 198 |
+
self.num_attention_heads = num_attention_heads
|
| 199 |
+
self.use_sliding_window = use_sliding_window
|
| 200 |
+
self.sliding_window = sliding_window # we check `use_sliding_window` in the modeling code
|
| 201 |
+
self.max_window_layers = max_window_layers
|
| 202 |
+
|
| 203 |
+
# for backward compatibility
|
| 204 |
+
if num_key_value_heads is None:
|
| 205 |
+
num_key_value_heads = num_attention_heads
|
| 206 |
+
|
| 207 |
+
self.num_key_value_heads = num_key_value_heads
|
| 208 |
+
self.head_dim = head_dim
|
| 209 |
+
self.hidden_act = hidden_act
|
| 210 |
+
self.initializer_range = initializer_range
|
| 211 |
+
self.rms_norm_eps = rms_norm_eps
|
| 212 |
+
self.use_cache = use_cache
|
| 213 |
+
self.rope_theta = rope_theta
|
| 214 |
+
self.rope_scaling = rope_scaling
|
| 215 |
+
self.attention_bias = attention_bias
|
| 216 |
+
self.attention_dropout = attention_dropout
|
| 217 |
+
# Validate the correctness of rotary position embeddings parameters
|
| 218 |
+
# BC: if there is a 'type' field, move it to 'rope_type'.
|
| 219 |
+
if self.rope_scaling is not None and "type" in self.rope_scaling:
|
| 220 |
+
self.rope_scaling["rope_type"] = self.rope_scaling["type"]
|
| 221 |
+
rope_config_validation(self)
|
| 222 |
+
|
| 223 |
+
self.attn_implementation = attn_implementation
|
| 224 |
+
|
| 225 |
+
self.mask_token_id = mask_token_id
|
| 226 |
+
self.dlm_type = dlm_type
|
| 227 |
+
self.random_length_prob = random_length_prob
|
| 228 |
+
self.num_ar_layers = num_ar_layers
|
| 229 |
+
self.num_diffusion_layers = num_diffusion_layers
|
| 230 |
+
self.diff_loss_weight = diff_loss_weight
|
| 231 |
+
self.enforce_mask = enforce_mask
|
| 232 |
+
self.prefix_ratio = prefix_ratio
|
| 233 |
+
self.dlm_paradigm = dlm_paradigm
|
| 234 |
+
self.dlm_arch = dlm_arch
|
| 235 |
+
self.block_size = block_size
|
| 236 |
+
self.disable_qk_norm = disable_qk_norm
|
| 237 |
+
self.intl_mask = intl_mask
|
| 238 |
+
self.tok_mask_half_life_ratio = tok_mask_half_life_ratio
|
| 239 |
+
self.adaptive_mask_rate = adaptive_mask_rate
|
| 240 |
+
self.multi_sampling = multi_sampling
|
| 241 |
+
|
| 242 |
+
super().__init__(
|
| 243 |
+
tie_word_embeddings=tie_word_embeddings,
|
| 244 |
+
**kwargs,
|
| 245 |
+
)
|
| 246 |
+
|
| 247 |
+
|
| 248 |
+
__all__ = ["EfficientDLMConfig"]
|
generation_config.json
ADDED
|
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"_from_model_config": true,
|
| 3 |
+
"transformers_version": "4.52.2",
|
| 4 |
+
"use_cache": false
|
| 5 |
+
}
|
images/result.png
ADDED
|
Git LFS Details
|
merges.txt
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
model.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:77c83e52654fd49874f6b09cf78b739da454c8320dd54c6970c3e5f88dc5e7c4
|
| 3 |
+
size 8822895320
|
modeling_edlm.py
ADDED
|
@@ -0,0 +1,496 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import copy
|
| 2 |
+
from typing import Callable, Optional, Tuple, Union
|
| 3 |
+
import random
|
| 4 |
+
|
| 5 |
+
import torch
|
| 6 |
+
import torch.nn.functional as F
|
| 7 |
+
from torch import nn
|
| 8 |
+
from transformers.modeling_outputs import CausalLMOutputWithPast
|
| 9 |
+
|
| 10 |
+
from torch.nn.attention.flex_attention import flex_attention, create_block_mask
|
| 11 |
+
|
| 12 |
+
from transformers.modeling_flash_attention_utils import FlashAttentionKwargs
|
| 13 |
+
|
| 14 |
+
from transformers.processing_utils import Unpack
|
| 15 |
+
|
| 16 |
+
from transformers.cache_utils import Cache, DynamicCache
|
| 17 |
+
|
| 18 |
+
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
|
| 19 |
+
|
| 20 |
+
from transformers.generation import GenerationMixin
|
| 21 |
+
|
| 22 |
+
import math
|
| 23 |
+
|
| 24 |
+
from .modeling_qwen3 import Qwen3Model, Qwen3PreTrainedModel, Qwen3Attention, apply_rotary_pos_emb, repeat_kv
|
| 25 |
+
from .configuration_edlm import EfficientDLMConfig
|
| 26 |
+
from .chat_utils import generate_with_prefix_cache_block_diff
|
| 27 |
+
|
| 28 |
+
# @torch.compile(dynamic=True, mode="reduce-overhead")
|
| 29 |
+
# @torch.compile(mode="default")
|
| 30 |
+
# @torch.compile(fullgraph=True, mode="reduce-overhead", dynamic=False)
|
| 31 |
+
@torch.compile(fullgraph=True, mode="max-autotune-no-cudagraphs", dynamic=False)
|
| 32 |
+
def fused_flex_attention(q, k, v, block_mask=None):
|
| 33 |
+
return flex_attention(q, k, v, block_mask=block_mask)
|
| 34 |
+
|
| 35 |
+
# with reference to https://github.com/pytorch-labs/attention-gym/blob/main/examples/flex_attn.ipynb
|
| 36 |
+
class Qwen3FlexAttention(Qwen3Attention):
|
| 37 |
+
def __init__(self, *args, **kwargs):
|
| 38 |
+
super().__init__(*args, **kwargs)
|
| 39 |
+
|
| 40 |
+
self.block_size = self.block_size_orig = self.config.block_size
|
| 41 |
+
|
| 42 |
+
self.bidirectional_mask = None
|
| 43 |
+
if self.config.dlm_paradigm == 'bidirectional':
|
| 44 |
+
self.bidirectional_mask = self.compute_block_mask(mode='bidirectional')
|
| 45 |
+
elif self.config.dlm_paradigm == 'block_diff':
|
| 46 |
+
self.block_diff_mask = None
|
| 47 |
+
else:
|
| 48 |
+
raise ValueError(f"Unknown attention mode: {self.config.dlm_paradigm}")
|
| 49 |
+
|
| 50 |
+
self.mode = 'bidirectional'
|
| 51 |
+
|
| 52 |
+
import torch._dynamo.config as dcfg
|
| 53 |
+
dcfg.cache_size_limit = 512
|
| 54 |
+
|
| 55 |
+
|
| 56 |
+
def set_attention_mode(self, mode, block_size=None):
|
| 57 |
+
self.mode = mode
|
| 58 |
+
self.block_size = block_size
|
| 59 |
+
|
| 60 |
+
|
| 61 |
+
def compute_block_mask(self, mode, q_len, block_size=None):
|
| 62 |
+
|
| 63 |
+
def bidirectional_mask(b, h, q, kv):
|
| 64 |
+
return (q >= kv) | (q < kv)
|
| 65 |
+
|
| 66 |
+
def block_diff_mask(block_size, b, h, q_idx, kv_idx, n):
|
| 67 |
+
"""
|
| 68 |
+
Constructs the specialized block diffusion attention mask for training
|
| 69 |
+
composed of three masks:
|
| 70 |
+
- **Block Diagonal Mask (M_BD)**: Self-attention within noised blocks
|
| 71 |
+
- **Offset Block Causal Mask (M_OBC)**: Cross-attention for conditional context
|
| 72 |
+
- **Block Causal Mask (M_BC)**: Attention to update x0
|
| 73 |
+
Args:
|
| 74 |
+
b, h: Batch and head indices (ignored for mask logic).
|
| 75 |
+
q_idx, kv_idx: Query and Key indices.
|
| 76 |
+
seq_len: Total sequence length.
|
| 77 |
+
block_size: Defines the block structure.
|
| 78 |
+
Returns:
|
| 79 |
+
A boolean attention mask.
|
| 80 |
+
"""
|
| 81 |
+
|
| 82 |
+
# Indicate whether token belongs to xt or x0
|
| 83 |
+
x0_flag_q = (q_idx >= n)
|
| 84 |
+
x0_flag_kv = (kv_idx >= n)
|
| 85 |
+
|
| 86 |
+
# Compute block indices
|
| 87 |
+
block_q = torch.where(x0_flag_q == 1,
|
| 88 |
+
(q_idx - n) // block_size,
|
| 89 |
+
q_idx // block_size)
|
| 90 |
+
block_kv = torch.where(x0_flag_kv == 1,
|
| 91 |
+
(kv_idx - n) // block_size,
|
| 92 |
+
kv_idx // block_size)
|
| 93 |
+
|
| 94 |
+
# **1. Block Diagonal Mask (M_BD) **
|
| 95 |
+
block_diagonal = (block_q == block_kv) & (x0_flag_q == x0_flag_kv)
|
| 96 |
+
|
| 97 |
+
# **2. Offset Block-Causal Mask (M_OBC) **
|
| 98 |
+
offset_block_causal = (
|
| 99 |
+
(block_q > block_kv)
|
| 100 |
+
& (x0_flag_kv == 1)
|
| 101 |
+
& (x0_flag_q == 0)
|
| 102 |
+
)
|
| 103 |
+
|
| 104 |
+
# **3. Block-Causal Mask (M_BC) **
|
| 105 |
+
block_causal = (block_q >= block_kv) & (x0_flag_kv == 1) & (x0_flag_q == 1)
|
| 106 |
+
|
| 107 |
+
# **4. Combine Masks **
|
| 108 |
+
return block_diagonal | offset_block_causal | block_causal
|
| 109 |
+
|
| 110 |
+
if mode == 'bidirectional':
|
| 111 |
+
attn_mask = bidirectional_mask
|
| 112 |
+
elif mode == 'block_diff':
|
| 113 |
+
assert block_size is not None
|
| 114 |
+
attn_mask = lambda b, h, q, kv: block_diff_mask(block_size, b, h, q, kv, q_len//2)
|
| 115 |
+
else:
|
| 116 |
+
raise ValueError(f"Unknown attention mode: {mode}")
|
| 117 |
+
|
| 118 |
+
block_mask = create_block_mask(
|
| 119 |
+
attn_mask, B=None, H=None, Q_LEN=q_len, KV_LEN=q_len
|
| 120 |
+
)
|
| 121 |
+
|
| 122 |
+
return block_mask
|
| 123 |
+
|
| 124 |
+
|
| 125 |
+
def forward(
|
| 126 |
+
self,
|
| 127 |
+
hidden_states: torch.Tensor,
|
| 128 |
+
position_embeddings: Tuple[torch.Tensor, torch.Tensor],
|
| 129 |
+
attention_mask: Optional[torch.Tensor],
|
| 130 |
+
past_key_value: Optional[Cache] = None,
|
| 131 |
+
cache_position: Optional[torch.LongTensor] = None,
|
| 132 |
+
is_training: bool = True,
|
| 133 |
+
**kwargs: Unpack[FlashAttentionKwargs],
|
| 134 |
+
) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
|
| 135 |
+
bsz, q_len, _ = hidden_states.size()
|
| 136 |
+
input_shape = hidden_states.shape[:-1]
|
| 137 |
+
hidden_shape = (*input_shape, -1, self.head_dim)
|
| 138 |
+
|
| 139 |
+
query_states = self.q_norm(self.q_proj(hidden_states).view(hidden_shape)).transpose(1, 2)
|
| 140 |
+
key_states = self.k_norm(self.k_proj(hidden_states).view(hidden_shape)).transpose(1, 2)
|
| 141 |
+
value_states = self.v_proj(hidden_states).view(hidden_shape).transpose(1, 2)
|
| 142 |
+
|
| 143 |
+
cos, sin = position_embeddings
|
| 144 |
+
|
| 145 |
+
if self.mode == 'block_diff' and is_training:
|
| 146 |
+
# Split query and key states in half along sequence length dimension
|
| 147 |
+
q1, q2 = query_states.chunk(2, dim=2)
|
| 148 |
+
k1, k2 = key_states.chunk(2, dim=2)
|
| 149 |
+
|
| 150 |
+
# Apply RoPE independently to each half
|
| 151 |
+
q1, k1 = apply_rotary_pos_emb(q1, k1, cos, sin)
|
| 152 |
+
q2, k2 = apply_rotary_pos_emb(q2, k2, cos, sin)
|
| 153 |
+
|
| 154 |
+
# Recombine the halves
|
| 155 |
+
query_states = torch.cat([q1, q2], dim=2)
|
| 156 |
+
key_states = torch.cat([k1, k2], dim=2)
|
| 157 |
+
else:
|
| 158 |
+
query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin)
|
| 159 |
+
|
| 160 |
+
if past_key_value is not None:
|
| 161 |
+
# sin and cos are specific to RoPE models; cache_position needed for the static cache
|
| 162 |
+
cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position}
|
| 163 |
+
key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs)
|
| 164 |
+
|
| 165 |
+
key_states = repeat_kv(key_states, self.num_key_value_groups)
|
| 166 |
+
value_states = repeat_kv(value_states, self.num_key_value_groups)
|
| 167 |
+
|
| 168 |
+
if self.mode == 'bidirectional':
|
| 169 |
+
if self.bidirectional_mask is None or q_len != self.bidirectional_mask.shape[-2]:
|
| 170 |
+
block_mask = self.compute_block_mask(mode='bidirectional', q_len=q_len)
|
| 171 |
+
else:
|
| 172 |
+
block_mask = self.bidirectional_mask
|
| 173 |
+
elif self.mode == 'block_diff':
|
| 174 |
+
if self.block_diff_mask is None or self.block_size != self.block_size_orig or q_len != self.block_diff_mask.shape[-2]:
|
| 175 |
+
block_mask = self.compute_block_mask(mode='block_diff', block_size=self.block_size, q_len=q_len)
|
| 176 |
+
else:
|
| 177 |
+
block_mask = self.block_diff_mask
|
| 178 |
+
else:
|
| 179 |
+
raise ValueError(f"Unknown attention mode: {self.mode}")
|
| 180 |
+
|
| 181 |
+
attn_output = fused_flex_attention(query_states, key_states, value_states, block_mask=block_mask)
|
| 182 |
+
attn_output = attn_output.transpose(1, 2).reshape(*input_shape, -1).contiguous()
|
| 183 |
+
|
| 184 |
+
attn_output = self.o_proj(attn_output)
|
| 185 |
+
|
| 186 |
+
return attn_output, None
|
| 187 |
+
|
| 188 |
+
|
| 189 |
+
def gumbel_topk(log_w: torch.Tensor, k: int) -> torch.Tensor:
|
| 190 |
+
"""Return a Bool mask of length len(log_w) with exactly k True."""
|
| 191 |
+
g = -torch.log(-torch.log(torch.rand_like(log_w) + 1e-9) + 1e-9)
|
| 192 |
+
topk = torch.topk(log_w + g, k).indices
|
| 193 |
+
mask = torch.zeros_like(log_w, dtype=torch.bool)
|
| 194 |
+
mask[topk] = True
|
| 195 |
+
return mask
|
| 196 |
+
|
| 197 |
+
|
| 198 |
+
class EfficientDLM(Qwen3PreTrainedModel, GenerationMixin):
|
| 199 |
+
"""
|
| 200 |
+
A single model with:
|
| 201 |
+
- a bidirectional encoder + diffusion‐LM head over A
|
| 202 |
+
- a causal decoder + LM head over B, conditioned on F_A
|
| 203 |
+
"""
|
| 204 |
+
|
| 205 |
+
def __init__(self, config: EfficientDLMConfig):
|
| 206 |
+
super().__init__(config)
|
| 207 |
+
|
| 208 |
+
self.mask_token_id = config.mask_token_id
|
| 209 |
+
|
| 210 |
+
diffusion_config = copy.deepcopy(config)
|
| 211 |
+
diffusion_config.diffusion_lm = True
|
| 212 |
+
|
| 213 |
+
if config.dlm_paradigm in ['block_diff']:
|
| 214 |
+
diffusion_config.attn_class = Qwen3FlexAttention
|
| 215 |
+
elif config.dlm_paradigm in ['bidirectional', 'autoregressive']:
|
| 216 |
+
diffusion_config.attn_class = Qwen3Attention
|
| 217 |
+
|
| 218 |
+
if config.dlm_paradigm == 'autoregressive':
|
| 219 |
+
diffusion_config.diffusion_lm = False
|
| 220 |
+
else:
|
| 221 |
+
raise ValueError(f"Unsupported DLM paradigm: {config.dlm_paradigm}")
|
| 222 |
+
|
| 223 |
+
self.encoder = Qwen3Model(diffusion_config)
|
| 224 |
+
self.diffusion_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
|
| 225 |
+
self.vocab_size = config.vocab_size
|
| 226 |
+
|
| 227 |
+
self.post_init()
|
| 228 |
+
|
| 229 |
+
|
| 230 |
+
def forward_process(self, input_ids, eps=1e-3, block_size=None, loss_mask=None):
|
| 231 |
+
b, l = input_ids.shape
|
| 232 |
+
device = input_ids.device
|
| 233 |
+
|
| 234 |
+
t = torch.rand(b, device=device)
|
| 235 |
+
|
| 236 |
+
p_mask = (1 - eps) * t + eps # shape: (b,)
|
| 237 |
+
p_mask = p_mask[:, None].expand(-1, l) # shape: (b, l)
|
| 238 |
+
|
| 239 |
+
masked_indices = torch.rand((b, l), device=device) < p_mask
|
| 240 |
+
|
| 241 |
+
if loss_mask is not None:
|
| 242 |
+
masked_indices[loss_mask == 0] = 0
|
| 243 |
+
|
| 244 |
+
noisy_batch = torch.where(masked_indices, self.mask_token_id, input_ids)
|
| 245 |
+
|
| 246 |
+
return noisy_batch, masked_indices, p_mask
|
| 247 |
+
|
| 248 |
+
|
| 249 |
+
def forward_process_exp(
|
| 250 |
+
self,
|
| 251 |
+
input_ids: torch.Tensor,
|
| 252 |
+
eps: float = 1e-3,
|
| 253 |
+
block_size: int | None = None,
|
| 254 |
+
half_life_ratio: float = 0.25, # λ = ln 2 / (half_life_ratio·L)
|
| 255 |
+
loss_mask: Optional[torch.Tensor] = None,
|
| 256 |
+
):
|
| 257 |
+
"""
|
| 258 |
+
Two-stage corruption with optional per-block sampling.
|
| 259 |
+
• Stage 1: m ~ U(eps, 1) → k = round(m · len) (exact budget).
|
| 260 |
+
• Stage 2: sample exactly k positions with weights
|
| 261 |
+
w_i(m) = exp[ λ · (1−m) · i ] (late-heavy when m→0,
|
| 262 |
+
uniform when m→1).
|
| 263 |
+
If `block_size` is given, the procedure is run *independently*
|
| 264 |
+
inside each contiguous block of that length (last block may be shorter).
|
| 265 |
+
When block_size is provided, m is sampled per-block and p_mask is per-block.
|
| 266 |
+
Args
|
| 267 |
+
----
|
| 268 |
+
input_ids : (B, L) LongTensor
|
| 269 |
+
eps : minimum corruption ratio
|
| 270 |
+
block_size: if not None, operate block-wise with per-block m sampling
|
| 271 |
+
half_life_ratio : controls steepness when m→0
|
| 272 |
+
"""
|
| 273 |
+
B, L = input_ids.shape
|
| 274 |
+
device = input_ids.device
|
| 275 |
+
dtype = torch.float32
|
| 276 |
+
|
| 277 |
+
masked_indices = torch.zeros((B, L), dtype=torch.bool, device=device)
|
| 278 |
+
p_mask = torch.zeros((B, L), dtype=dtype, device=device)
|
| 279 |
+
|
| 280 |
+
# ---------- Stage 1 & 2: whole-sentence or block-wise -------------------
|
| 281 |
+
for b in range(B):
|
| 282 |
+
if block_size is None:
|
| 283 |
+
# ---------- Per-batch sampling (original behavior) ----------
|
| 284 |
+
m = eps + (1.0 - eps) * torch.rand(1, device=device).item() # scalar
|
| 285 |
+
k_tot = int(round(m * L))
|
| 286 |
+
k_tot = max(1, min(k_tot, L)) # clamp to [1, L]
|
| 287 |
+
|
| 288 |
+
# Fill p_mask for this batch
|
| 289 |
+
p_mask[b, :] = m
|
| 290 |
+
|
| 291 |
+
slope = 1.0 - m # ∈ [0,1]; 0 ⇒ uniform, 1 ⇒ late-heavy
|
| 292 |
+
|
| 293 |
+
# ------- single pool over the whole sentence -------------
|
| 294 |
+
lam_base = math.log(2.0) / (half_life_ratio * L) # base decay rate (λ when slope=1)
|
| 295 |
+
|
| 296 |
+
pos = torch.arange(L, device=device, dtype=dtype)
|
| 297 |
+
log_w = (lam_base * slope * pos).clone()
|
| 298 |
+
|
| 299 |
+
masked_indices[b] = gumbel_topk(log_w, k_tot)
|
| 300 |
+
|
| 301 |
+
else:
|
| 302 |
+
# ---------- Per-block sampling ----------
|
| 303 |
+
num_blocks = math.ceil(L / block_size)
|
| 304 |
+
lam_base = math.log(2.0) / (half_life_ratio * block_size) # base decay rate (λ when slope=1)
|
| 305 |
+
|
| 306 |
+
for blk in range(num_blocks):
|
| 307 |
+
start = blk * block_size
|
| 308 |
+
end = min((blk + 1) * block_size, L)
|
| 309 |
+
blk_len = end - start
|
| 310 |
+
|
| 311 |
+
# Sample m per block
|
| 312 |
+
m_blk = eps + (1.0 - eps) * torch.rand(1, device=device).item()
|
| 313 |
+
|
| 314 |
+
# Fill p_mask for this block
|
| 315 |
+
p_mask[b, start:end] = m_blk
|
| 316 |
+
|
| 317 |
+
# per-block budget
|
| 318 |
+
k_blk = int(round(m_blk * blk_len))
|
| 319 |
+
k_blk = max(0, min(k_blk, blk_len))
|
| 320 |
+
if k_blk == 0:
|
| 321 |
+
continue
|
| 322 |
+
|
| 323 |
+
slope = 1.0 - m_blk # ∈ [0,1]; 0 ⇒ uniform, 1 ⇒ late-heavy
|
| 324 |
+
|
| 325 |
+
pos = torch.arange(blk_len, device=device, dtype=dtype)
|
| 326 |
+
log_w = lam_base * slope * pos
|
| 327 |
+
|
| 328 |
+
blk_mask = gumbel_topk(log_w, k_blk)
|
| 329 |
+
masked_indices[b, start:end] = blk_mask
|
| 330 |
+
|
| 331 |
+
if loss_mask is not None:
|
| 332 |
+
masked_indices[loss_mask == 0] = 0
|
| 333 |
+
|
| 334 |
+
noisy_batch = torch.where(masked_indices, self.mask_token_id, input_ids)
|
| 335 |
+
return noisy_batch, masked_indices, p_mask
|
| 336 |
+
|
| 337 |
+
|
| 338 |
+
def forward(
|
| 339 |
+
self,
|
| 340 |
+
input_ids: torch.LongTensor,
|
| 341 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 342 |
+
position_ids: Optional[torch.LongTensor] = None,
|
| 343 |
+
labels: Optional[torch.LongTensor] = None,
|
| 344 |
+
split_len: Optional[int] = None,
|
| 345 |
+
past_key_values: Optional[Cache] = None,
|
| 346 |
+
block_size: Optional[int] = None,
|
| 347 |
+
block_diff_ppl: bool = False,
|
| 348 |
+
eps: float = 1e-3,
|
| 349 |
+
is_teacher: bool = False,
|
| 350 |
+
masked_indices: Optional[torch.Tensor] = None,
|
| 351 |
+
p_mask: Optional[torch.Tensor] = None,
|
| 352 |
+
loss_mask: Optional[torch.Tensor] = None,
|
| 353 |
+
skip_loss: bool = False,
|
| 354 |
+
inputs_embeds: Optional[torch.FloatTensor] = None,
|
| 355 |
+
**kwargs,
|
| 356 |
+
) -> CausalLMOutputWithPast:
|
| 357 |
+
|
| 358 |
+
if inputs_embeds is not None:
|
| 359 |
+
noisy_inputs = None
|
| 360 |
+
else:
|
| 361 |
+
batch_size, seq_len = input_ids.shape
|
| 362 |
+
|
| 363 |
+
if self.config.dlm_paradigm == 'bidirectional':
|
| 364 |
+
if labels is not None and torch.rand(1) < self.config.random_length_prob:
|
| 365 |
+
random_length = torch.randint(2, input_ids.shape[1] + 1, (1,))
|
| 366 |
+
input_ids = input_ids[:, :random_length]
|
| 367 |
+
labels = labels[:, :random_length]
|
| 368 |
+
|
| 369 |
+
if attention_mask is not None:
|
| 370 |
+
attention_mask = attention_mask[:, :random_length]
|
| 371 |
+
if position_ids is not None:
|
| 372 |
+
position_ids = position_ids[:, :random_length]
|
| 373 |
+
|
| 374 |
+
elif self.config.dlm_paradigm == 'block_diff':
|
| 375 |
+
if labels is not None and block_size is None:
|
| 376 |
+
if torch.rand(1) < self.config.random_length_prob:
|
| 377 |
+
block_size = torch.randint(1, 8, (1,)).item() * 4 ## [4, 32] divisible by 4
|
| 378 |
+
else:
|
| 379 |
+
block_size = self.config.block_size
|
| 380 |
+
|
| 381 |
+
if labels is not None and self.config.dlm_paradigm != 'autoregressive':
|
| 382 |
+
if masked_indices is not None:
|
| 383 |
+
#assert p_mask is not None
|
| 384 |
+
|
| 385 |
+
if loss_mask is not None:
|
| 386 |
+
masked_indices[loss_mask == 0] = 0
|
| 387 |
+
|
| 388 |
+
noisy_inputs = torch.where(masked_indices, self.mask_token_id, input_ids)
|
| 389 |
+
|
| 390 |
+
else:
|
| 391 |
+
if self.config.tok_mask_half_life_ratio is not None:
|
| 392 |
+
noisy_inputs, masked_indices, p_mask = self.forward_process_exp(input_ids, eps=eps, block_size=block_size, half_life_ratio=self.config.tok_mask_half_life_ratio, loss_mask=loss_mask)
|
| 393 |
+
else:
|
| 394 |
+
noisy_inputs, masked_indices, p_mask = self.forward_process(input_ids, eps=eps, block_size=block_size, loss_mask=loss_mask)
|
| 395 |
+
|
| 396 |
+
else:
|
| 397 |
+
noisy_inputs = input_ids
|
| 398 |
+
masked_indices = None
|
| 399 |
+
p_mask = None
|
| 400 |
+
|
| 401 |
+
if self.config.dlm_paradigm in ['block_diff']:
|
| 402 |
+
for layer in self.encoder.layers:
|
| 403 |
+
if hasattr(layer.self_attn, 'set_attention_mode'):
|
| 404 |
+
layer.self_attn.set_attention_mode(self.config.dlm_paradigm, block_size=block_size)
|
| 405 |
+
|
| 406 |
+
input_ids_len = noisy_inputs.shape[1]
|
| 407 |
+
if labels is not None and self.config.dlm_paradigm == 'block_diff':
|
| 408 |
+
if position_ids is None:
|
| 409 |
+
position_ids = torch.arange(input_ids_len, device=noisy_inputs.device).unsqueeze(0)
|
| 410 |
+
noisy_inputs = torch.cat([noisy_inputs, input_ids], dim=1)
|
| 411 |
+
|
| 412 |
+
if block_diff_ppl:
|
| 413 |
+
if position_ids is None:
|
| 414 |
+
position_ids = torch.arange(input_ids_len // 2, device=noisy_inputs.device).unsqueeze(0)
|
| 415 |
+
|
| 416 |
+
enc_out = self.encoder(
|
| 417 |
+
past_key_values=past_key_values,
|
| 418 |
+
input_ids=noisy_inputs,
|
| 419 |
+
inputs_embeds=inputs_embeds,
|
| 420 |
+
attention_mask=attention_mask,
|
| 421 |
+
position_ids=position_ids,
|
| 422 |
+
is_training=(labels is not None) or (block_diff_ppl),
|
| 423 |
+
**kwargs,
|
| 424 |
+
)
|
| 425 |
+
|
| 426 |
+
logits = self.diffusion_head(enc_out.last_hidden_state) # (batch, len_B, vocab)
|
| 427 |
+
|
| 428 |
+
if labels is not None and self.config.dlm_paradigm == 'block_diff':
|
| 429 |
+
logits = logits[:, :input_ids_len]
|
| 430 |
+
|
| 431 |
+
loss = None
|
| 432 |
+
if labels is not None and not skip_loss:
|
| 433 |
+
if self.config.dlm_paradigm == 'autoregressive':
|
| 434 |
+
shift_logits = logits[..., :-1, :].contiguous()
|
| 435 |
+
shift_labels = labels[..., 1:].contiguous()
|
| 436 |
+
|
| 437 |
+
if loss_mask is None:
|
| 438 |
+
loss_fct = CrossEntropyLoss()
|
| 439 |
+
shift_logits = shift_logits.view(-1, shift_logits.size(-1))
|
| 440 |
+
shift_labels = shift_labels.view(-1)
|
| 441 |
+
loss = loss_fct(shift_logits, shift_labels)
|
| 442 |
+
|
| 443 |
+
else:
|
| 444 |
+
loss_mask = loss_mask[..., 1:].contiguous()
|
| 445 |
+
|
| 446 |
+
loss_fct = CrossEntropyLoss(reduction='none')
|
| 447 |
+
shift_logits = shift_logits.view(-1, shift_logits.size(-1))
|
| 448 |
+
shift_labels = shift_labels.view(-1)
|
| 449 |
+
shift_labels = shift_labels.to(shift_logits.device)
|
| 450 |
+
|
| 451 |
+
token_losses = loss_fct(shift_logits, shift_labels)
|
| 452 |
+
|
| 453 |
+
loss = token_losses[loss_mask].sum() / loss_mask.sum()
|
| 454 |
+
|
| 455 |
+
else:
|
| 456 |
+
# Handle DREAM vs LLADA style losses
|
| 457 |
+
if hasattr(self.config, 'dlm_type') and self.config.dlm_type == 'dream':
|
| 458 |
+
logits = logits[..., :-1, :].contiguous()
|
| 459 |
+
labels = labels[..., 1:].contiguous()
|
| 460 |
+
masked_indices = masked_indices[:, 1:]
|
| 461 |
+
p_mask = p_mask[:, 1:]
|
| 462 |
+
|
| 463 |
+
# Calculate token-wise cross entropy loss for masked positions in B
|
| 464 |
+
token_loss = torch.nn.functional.cross_entropy(
|
| 465 |
+
logits[masked_indices],
|
| 466 |
+
labels[masked_indices],
|
| 467 |
+
reduction='none'
|
| 468 |
+
) / p_mask[masked_indices]
|
| 469 |
+
|
| 470 |
+
loss = token_loss.sum() / masked_indices.sum()
|
| 471 |
+
|
| 472 |
+
return CausalLMOutputWithPast(
|
| 473 |
+
loss=loss if not is_teacher else logits,
|
| 474 |
+
logits=logits,
|
| 475 |
+
past_key_values=enc_out.past_key_values,
|
| 476 |
+
hidden_states=enc_out.last_hidden_state,
|
| 477 |
+
attentions=None,
|
| 478 |
+
)
|
| 479 |
+
|
| 480 |
+
|
| 481 |
+
def generate(self, prompt_ids, max_new_tokens, steps, block_length, shift_logits, threshold, temperature=0):
|
| 482 |
+
out_ids, nfe = generate_with_prefix_cache_block_diff(
|
| 483 |
+
model=self,
|
| 484 |
+
prompt=prompt_ids,
|
| 485 |
+
gen_length=max_new_tokens,
|
| 486 |
+
steps=steps,
|
| 487 |
+
block_length=block_length,
|
| 488 |
+
remasking="low_confidence",
|
| 489 |
+
mask_id=self.mask_token_id,
|
| 490 |
+
threshold=threshold,
|
| 491 |
+
shift_logits=shift_logits,
|
| 492 |
+
temperature=temperature,
|
| 493 |
+
neg_entropy=False,
|
| 494 |
+
)
|
| 495 |
+
|
| 496 |
+
return out_ids, nfe
|
modeling_qwen3.py
ADDED
|
@@ -0,0 +1,1235 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# coding=utf-8
|
| 2 |
+
# Copyright 2025 The Qwen team, Alibaba Group and the HuggingFace Inc. team. All rights reserved.
|
| 3 |
+
#
|
| 4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 5 |
+
# you may not use this file except in compliance with the License.
|
| 6 |
+
# You may obtain a copy of the License at
|
| 7 |
+
#
|
| 8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 9 |
+
#
|
| 10 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 13 |
+
# See the License for the specific language governing permissions and
|
| 14 |
+
# limitations under the License.
|
| 15 |
+
|
| 16 |
+
from typing import Callable, Optional, Tuple, Union
|
| 17 |
+
|
| 18 |
+
import torch
|
| 19 |
+
from torch import nn
|
| 20 |
+
|
| 21 |
+
from transformers.activations import ACT2FN
|
| 22 |
+
from transformers.cache_utils import Cache, DynamicCache, SlidingWindowCache, StaticCache
|
| 23 |
+
from transformers.generation import GenerationMixin
|
| 24 |
+
from transformers.integrations import use_kernel_forward_from_hub
|
| 25 |
+
from transformers.modeling_attn_mask_utils import AttentionMaskConverter
|
| 26 |
+
from transformers.modeling_flash_attention_utils import FlashAttentionKwargs
|
| 27 |
+
from transformers.modeling_layers import GradientCheckpointingLayer
|
| 28 |
+
from transformers.modeling_outputs import (
|
| 29 |
+
BaseModelOutputWithPast,
|
| 30 |
+
CausalLMOutputWithPast,
|
| 31 |
+
QuestionAnsweringModelOutput,
|
| 32 |
+
SequenceClassifierOutputWithPast,
|
| 33 |
+
TokenClassifierOutput,
|
| 34 |
+
)
|
| 35 |
+
from transformers.modeling_rope_utils import ROPE_INIT_FUNCTIONS, dynamic_rope_update
|
| 36 |
+
from transformers.modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel
|
| 37 |
+
from transformers.processing_utils import Unpack
|
| 38 |
+
from transformers.utils import auto_docstring, can_return_tuple, is_torch_flex_attn_available, logging
|
| 39 |
+
try:
|
| 40 |
+
from transformers.utils import TransformersKwargs
|
| 41 |
+
except ImportError:
|
| 42 |
+
from typing import TypedDict
|
| 43 |
+
class TransformersKwargs(TypedDict, total=False):
|
| 44 |
+
pass
|
| 45 |
+
from .configuration_edlm import EfficientDLMConfig
|
| 46 |
+
|
| 47 |
+
|
| 48 |
+
if is_torch_flex_attn_available():
|
| 49 |
+
from torch.nn.attention.flex_attention import BlockMask
|
| 50 |
+
|
| 51 |
+
from transformers.integrations.flex_attention import make_flex_block_causal_mask
|
| 52 |
+
|
| 53 |
+
|
| 54 |
+
logger = logging.get_logger(__name__)
|
| 55 |
+
|
| 56 |
+
|
| 57 |
+
@use_kernel_forward_from_hub("RMSNorm")
|
| 58 |
+
class Qwen3RMSNorm(nn.Module):
|
| 59 |
+
def __init__(self, hidden_size, eps=1e-6):
|
| 60 |
+
"""
|
| 61 |
+
Qwen3RMSNorm is equivalent to T5LayerNorm
|
| 62 |
+
"""
|
| 63 |
+
super().__init__()
|
| 64 |
+
self.weight = nn.Parameter(torch.ones(hidden_size))
|
| 65 |
+
self.variance_epsilon = eps
|
| 66 |
+
|
| 67 |
+
def forward(self, hidden_states):
|
| 68 |
+
input_dtype = hidden_states.dtype
|
| 69 |
+
hidden_states = hidden_states.to(torch.float32)
|
| 70 |
+
variance = hidden_states.pow(2).mean(-1, keepdim=True)
|
| 71 |
+
hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
|
| 72 |
+
return self.weight * hidden_states.to(input_dtype)
|
| 73 |
+
|
| 74 |
+
def extra_repr(self):
|
| 75 |
+
return f"{tuple(self.weight.shape)}, eps={self.variance_epsilon}"
|
| 76 |
+
|
| 77 |
+
|
| 78 |
+
class Qwen3MLP(nn.Module):
|
| 79 |
+
def __init__(self, config):
|
| 80 |
+
super().__init__()
|
| 81 |
+
self.config = config
|
| 82 |
+
self.hidden_size = config.hidden_size
|
| 83 |
+
self.intermediate_size = config.intermediate_size
|
| 84 |
+
self.gate_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)
|
| 85 |
+
self.up_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)
|
| 86 |
+
self.down_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=False)
|
| 87 |
+
self.act_fn = ACT2FN[config.hidden_act]
|
| 88 |
+
|
| 89 |
+
def forward(self, x):
|
| 90 |
+
down_proj = self.down_proj(self.act_fn(self.gate_proj(x)) * self.up_proj(x))
|
| 91 |
+
return down_proj
|
| 92 |
+
|
| 93 |
+
|
| 94 |
+
def rotate_half(x):
|
| 95 |
+
"""Rotates half the hidden dims of the input."""
|
| 96 |
+
x1 = x[..., : x.shape[-1] // 2]
|
| 97 |
+
x2 = x[..., x.shape[-1] // 2 :]
|
| 98 |
+
return torch.cat((-x2, x1), dim=-1)
|
| 99 |
+
|
| 100 |
+
|
| 101 |
+
def apply_rotary_pos_emb(q, k, cos, sin, position_ids=None, unsqueeze_dim=1):
|
| 102 |
+
"""Applies Rotary Position Embedding to the query and key tensors.
|
| 103 |
+
|
| 104 |
+
Args:
|
| 105 |
+
q (`torch.Tensor`): The query tensor.
|
| 106 |
+
k (`torch.Tensor`): The key tensor.
|
| 107 |
+
cos (`torch.Tensor`): The cosine part of the rotary embedding.
|
| 108 |
+
sin (`torch.Tensor`): The sine part of the rotary embedding.
|
| 109 |
+
position_ids (`torch.Tensor`, *optional*):
|
| 110 |
+
Deprecated and unused.
|
| 111 |
+
unsqueeze_dim (`int`, *optional*, defaults to 1):
|
| 112 |
+
The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and
|
| 113 |
+
sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note
|
| 114 |
+
that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and
|
| 115 |
+
k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes
|
| 116 |
+
cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have
|
| 117 |
+
the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2.
|
| 118 |
+
Returns:
|
| 119 |
+
`tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding.
|
| 120 |
+
"""
|
| 121 |
+
cos = cos.unsqueeze(unsqueeze_dim)
|
| 122 |
+
sin = sin.unsqueeze(unsqueeze_dim)
|
| 123 |
+
q_embed = (q * cos) + (rotate_half(q) * sin)
|
| 124 |
+
k_embed = (k * cos) + (rotate_half(k) * sin)
|
| 125 |
+
return q_embed, k_embed
|
| 126 |
+
|
| 127 |
+
|
| 128 |
+
def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor:
|
| 129 |
+
"""
|
| 130 |
+
This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch,
|
| 131 |
+
num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)
|
| 132 |
+
"""
|
| 133 |
+
batch, num_key_value_heads, slen, head_dim = hidden_states.shape
|
| 134 |
+
if n_rep == 1:
|
| 135 |
+
return hidden_states
|
| 136 |
+
hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim)
|
| 137 |
+
return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim)
|
| 138 |
+
|
| 139 |
+
|
| 140 |
+
def eager_attention_forward(
|
| 141 |
+
module: nn.Module,
|
| 142 |
+
query: torch.Tensor,
|
| 143 |
+
key: torch.Tensor,
|
| 144 |
+
value: torch.Tensor,
|
| 145 |
+
attention_mask: Optional[torch.Tensor],
|
| 146 |
+
scaling: float,
|
| 147 |
+
dropout: float = 0.0,
|
| 148 |
+
**kwargs,
|
| 149 |
+
):
|
| 150 |
+
key_states = repeat_kv(key, module.num_key_value_groups)
|
| 151 |
+
value_states = repeat_kv(value, module.num_key_value_groups)
|
| 152 |
+
|
| 153 |
+
attn_weights = torch.matmul(query, key_states.transpose(2, 3)) * scaling
|
| 154 |
+
if attention_mask is not None:
|
| 155 |
+
causal_mask = attention_mask[:, :, :, : key_states.shape[-2]]
|
| 156 |
+
attn_weights = attn_weights + causal_mask
|
| 157 |
+
|
| 158 |
+
attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query.dtype)
|
| 159 |
+
attn_weights = nn.functional.dropout(attn_weights, p=dropout, training=module.training)
|
| 160 |
+
attn_output = torch.matmul(attn_weights, value_states)
|
| 161 |
+
attn_output = attn_output.transpose(1, 2).contiguous()
|
| 162 |
+
|
| 163 |
+
return attn_output, attn_weights
|
| 164 |
+
|
| 165 |
+
|
| 166 |
+
class Qwen3Attention(nn.Module):
|
| 167 |
+
"""Multi-headed attention from 'Attention Is All You Need' paper"""
|
| 168 |
+
|
| 169 |
+
def __init__(self, config: EfficientDLMConfig, layer_idx: int):
|
| 170 |
+
super().__init__()
|
| 171 |
+
self.config = config
|
| 172 |
+
|
| 173 |
+
self.layer_idx = layer_idx
|
| 174 |
+
self.head_dim = getattr(config, "head_dim", config.hidden_size // config.num_attention_heads)
|
| 175 |
+
self.num_key_value_groups = config.num_attention_heads // config.num_key_value_heads
|
| 176 |
+
self.scaling = self.head_dim**-0.5
|
| 177 |
+
self.attention_dropout = config.attention_dropout
|
| 178 |
+
|
| 179 |
+
self.diffusion_lm = config.diffusion_lm
|
| 180 |
+
|
| 181 |
+
self.is_causal = None if not self.diffusion_lm else False
|
| 182 |
+
|
| 183 |
+
self.q_proj = nn.Linear(
|
| 184 |
+
config.hidden_size, config.num_attention_heads * self.head_dim, bias=config.attention_bias
|
| 185 |
+
)
|
| 186 |
+
self.k_proj = nn.Linear(
|
| 187 |
+
config.hidden_size, config.num_key_value_heads * self.head_dim, bias=config.attention_bias
|
| 188 |
+
)
|
| 189 |
+
self.v_proj = nn.Linear(
|
| 190 |
+
config.hidden_size, config.num_key_value_heads * self.head_dim, bias=config.attention_bias
|
| 191 |
+
)
|
| 192 |
+
self.o_proj = nn.Linear(
|
| 193 |
+
config.num_attention_heads * self.head_dim, config.hidden_size, bias=False # config.attention_bias
|
| 194 |
+
)
|
| 195 |
+
|
| 196 |
+
if not config.disable_qk_norm:
|
| 197 |
+
self.q_norm = Qwen3RMSNorm(self.head_dim, eps=config.rms_norm_eps) # unlike olmo, only on the head dim!
|
| 198 |
+
self.k_norm = Qwen3RMSNorm(self.head_dim, eps=config.rms_norm_eps) # thus post q_norm does not need reshape
|
| 199 |
+
else:
|
| 200 |
+
self.q_norm = nn.Identity()
|
| 201 |
+
self.k_norm = nn.Identity()
|
| 202 |
+
|
| 203 |
+
self.sliding_window = config.sliding_window
|
| 204 |
+
if not (
|
| 205 |
+
self.config.use_sliding_window
|
| 206 |
+
and getattr(self.config, "sliding_window", None) is not None
|
| 207 |
+
and self.layer_idx >= self.config.max_window_layers
|
| 208 |
+
):
|
| 209 |
+
self.sliding_window = None
|
| 210 |
+
|
| 211 |
+
def forward(
|
| 212 |
+
self,
|
| 213 |
+
hidden_states: torch.Tensor,
|
| 214 |
+
position_embeddings: Tuple[torch.Tensor, torch.Tensor],
|
| 215 |
+
attention_mask: Optional[torch.Tensor],
|
| 216 |
+
past_key_value: Optional[Cache] = None,
|
| 217 |
+
cache_position: Optional[torch.LongTensor] = None,
|
| 218 |
+
replace_position: Optional[torch.Tensor] = None,
|
| 219 |
+
is_training: bool = True,
|
| 220 |
+
use_cache: bool = False,
|
| 221 |
+
**kwargs: Unpack[FlashAttentionKwargs],
|
| 222 |
+
) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
|
| 223 |
+
input_shape = hidden_states.shape[:-1]
|
| 224 |
+
hidden_shape = (*input_shape, -1, self.head_dim)
|
| 225 |
+
|
| 226 |
+
query_states = self.q_norm(self.q_proj(hidden_states).view(hidden_shape)).transpose(1, 2)
|
| 227 |
+
key_states = self.k_norm(self.k_proj(hidden_states).view(hidden_shape)).transpose(1, 2)
|
| 228 |
+
value_states = self.v_proj(hidden_states).view(hidden_shape).transpose(1, 2)
|
| 229 |
+
|
| 230 |
+
cos, sin = position_embeddings
|
| 231 |
+
|
| 232 |
+
if replace_position is not None:
|
| 233 |
+
# Get the indices that need to be replaced
|
| 234 |
+
replace_indices = replace_position.nonzero(as_tuple=True)[1] # [selected_length]
|
| 235 |
+
block_end_index = replace_indices.max() + 1 if len(replace_indices) > 0 else query_states.shape[-2]
|
| 236 |
+
query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin, position_ids=None, unsqueeze_dim=1)
|
| 237 |
+
else:
|
| 238 |
+
query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin)
|
| 239 |
+
|
| 240 |
+
if past_key_value is not None:
|
| 241 |
+
if replace_position is None:
|
| 242 |
+
# Normal cache behavior - append new keys/values
|
| 243 |
+
if use_cache:
|
| 244 |
+
cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position}
|
| 245 |
+
key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs)
|
| 246 |
+
else: ## if use_cache == False, do not update cache
|
| 247 |
+
old_k, old_v = past_key_value[self.layer_idx]
|
| 248 |
+
key_states = torch.cat([old_k, key_states], dim=-2)
|
| 249 |
+
value_states = torch.cat([old_v, value_states], dim=-2)
|
| 250 |
+
else:
|
| 251 |
+
# Replace specific positions in the cache
|
| 252 |
+
# Extract past keys and values from cache
|
| 253 |
+
if hasattr(past_key_value, 'key_cache') and hasattr(past_key_value, 'value_cache'):
|
| 254 |
+
# Get past keys and values for this layer
|
| 255 |
+
past_key = past_key_value.key_cache[self.layer_idx] # Shape: [B, n_kv_h, L, hs]
|
| 256 |
+
past_value = past_key_value.value_cache[self.layer_idx] # Shape: [B, n_kv_h, L, hs]
|
| 257 |
+
|
| 258 |
+
# Get the indices that need to be replaced in the full sequence
|
| 259 |
+
replace_indices = replace_position.nonzero(as_tuple=True)[1] # [selected_length]
|
| 260 |
+
|
| 261 |
+
# key_states and value_states are only for the current block (selected_length)
|
| 262 |
+
# We need to replace the positions indicated by replace_indices with these new values
|
| 263 |
+
if len(replace_indices) == key_states.shape[-2]:
|
| 264 |
+
# Replace selected positions in past_key with new key_states
|
| 265 |
+
past_key = past_key.clone() # Make a copy to avoid in-place modification
|
| 266 |
+
past_value = past_value.clone()
|
| 267 |
+
past_key[:, :, replace_indices] = key_states
|
| 268 |
+
past_value[:, :, replace_indices] = value_states
|
| 269 |
+
|
| 270 |
+
# Update the cache with modified keys/values
|
| 271 |
+
past_key_value.key_cache[self.layer_idx] = past_key
|
| 272 |
+
past_key_value.value_cache[self.layer_idx] = past_value
|
| 273 |
+
|
| 274 |
+
key_states = past_key
|
| 275 |
+
value_states = past_value
|
| 276 |
+
else:
|
| 277 |
+
print("length mismatch")
|
| 278 |
+
# Fallback - length mismatch, use normal cache update
|
| 279 |
+
cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position}
|
| 280 |
+
key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs)
|
| 281 |
+
else:
|
| 282 |
+
# Fallback to normal behavior if cache structure is unexpected
|
| 283 |
+
cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position}
|
| 284 |
+
key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs)
|
| 285 |
+
|
| 286 |
+
attention_interface: Callable = eager_attention_forward
|
| 287 |
+
if self.config._attn_implementation != "eager":
|
| 288 |
+
if self.config._attn_implementation == "sdpa" and kwargs.get("output_attentions", False):
|
| 289 |
+
logger.warning_once(
|
| 290 |
+
"`torch.nn.functional.scaled_dot_product_attention` does not support `output_attentions=True`. Falling back to "
|
| 291 |
+
'eager attention. This warning can be removed using the argument `attn_implementation="eager"` when loading the model.'
|
| 292 |
+
)
|
| 293 |
+
else:
|
| 294 |
+
attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation]
|
| 295 |
+
|
| 296 |
+
attn_output, attn_weights = attention_interface(
|
| 297 |
+
self,
|
| 298 |
+
query_states,
|
| 299 |
+
key_states,
|
| 300 |
+
value_states,
|
| 301 |
+
attention_mask if not self.diffusion_lm else None,
|
| 302 |
+
dropout=0.0 if not self.training else self.attention_dropout,
|
| 303 |
+
scaling=self.scaling,
|
| 304 |
+
sliding_window=self.sliding_window, # diff with Llama
|
| 305 |
+
is_causal=self.is_causal,
|
| 306 |
+
**kwargs,
|
| 307 |
+
)
|
| 308 |
+
|
| 309 |
+
attn_output = attn_output.reshape(*input_shape, -1).contiguous()
|
| 310 |
+
attn_output = self.o_proj(attn_output)
|
| 311 |
+
return attn_output, attn_weights
|
| 312 |
+
|
| 313 |
+
|
| 314 |
+
class Qwen3DecoderLayer(GradientCheckpointingLayer):
|
| 315 |
+
def __init__(self, config: EfficientDLMConfig, layer_idx: int):
|
| 316 |
+
super().__init__()
|
| 317 |
+
self.hidden_size = config.hidden_size
|
| 318 |
+
if hasattr(config, 'attn_class'):
|
| 319 |
+
attn_class = config.attn_class
|
| 320 |
+
else:
|
| 321 |
+
attn_class = Qwen3Attention
|
| 322 |
+
|
| 323 |
+
self.layer_idx = layer_idx
|
| 324 |
+
|
| 325 |
+
self.self_attn = attn_class(config=config, layer_idx=layer_idx)
|
| 326 |
+
self.mlp = Qwen3MLP(config)
|
| 327 |
+
self.input_layernorm = Qwen3RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
|
| 328 |
+
self.post_attention_layernorm = Qwen3RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
|
| 329 |
+
if (
|
| 330 |
+
config.sliding_window and config._attn_implementation != "flash_attention_2"
|
| 331 |
+
): # diff with Llama is this warning
|
| 332 |
+
logger.warning_once(
|
| 333 |
+
f"Sliding Window Attention is enabled but not implemented for `{config._attn_implementation}`; "
|
| 334 |
+
"unexpected results may be encountered."
|
| 335 |
+
)
|
| 336 |
+
|
| 337 |
+
|
| 338 |
+
def forward(
|
| 339 |
+
self,
|
| 340 |
+
hidden_states: torch.Tensor,
|
| 341 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 342 |
+
position_ids: Optional[torch.LongTensor] = None,
|
| 343 |
+
past_key_value: Optional[Cache] = None,
|
| 344 |
+
output_attentions: Optional[bool] = False,
|
| 345 |
+
use_cache: Optional[bool] = False,
|
| 346 |
+
cache_position: Optional[torch.LongTensor] = None,
|
| 347 |
+
position_embeddings: Optional[Tuple[torch.Tensor, torch.Tensor]] = None,
|
| 348 |
+
replace_position: Optional[torch.Tensor] = None,
|
| 349 |
+
is_training: bool = True,
|
| 350 |
+
**kwargs: Unpack[FlashAttentionKwargs],
|
| 351 |
+
) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]:
|
| 352 |
+
residual = hidden_states
|
| 353 |
+
hidden_states = self.input_layernorm(hidden_states)
|
| 354 |
+
|
| 355 |
+
# Self Attention
|
| 356 |
+
hidden_states, self_attn_weights = self.self_attn(
|
| 357 |
+
hidden_states=hidden_states,
|
| 358 |
+
attention_mask=attention_mask,
|
| 359 |
+
position_ids=position_ids,
|
| 360 |
+
past_key_value=past_key_value,
|
| 361 |
+
output_attentions=output_attentions,
|
| 362 |
+
use_cache=use_cache,
|
| 363 |
+
cache_position=cache_position,
|
| 364 |
+
position_embeddings=position_embeddings,
|
| 365 |
+
replace_position=replace_position,
|
| 366 |
+
is_training=is_training,
|
| 367 |
+
**kwargs,
|
| 368 |
+
)
|
| 369 |
+
hidden_states = residual + hidden_states
|
| 370 |
+
|
| 371 |
+
# Fully Connected
|
| 372 |
+
residual = hidden_states
|
| 373 |
+
hidden_states = self.post_attention_layernorm(hidden_states)
|
| 374 |
+
hidden_states = self.mlp(hidden_states)
|
| 375 |
+
hidden_states = residual + hidden_states
|
| 376 |
+
|
| 377 |
+
outputs = (hidden_states,)
|
| 378 |
+
if output_attentions:
|
| 379 |
+
outputs += (self_attn_weights,)
|
| 380 |
+
|
| 381 |
+
return outputs
|
| 382 |
+
|
| 383 |
+
|
| 384 |
+
@auto_docstring
|
| 385 |
+
class Qwen3PreTrainedModel(PreTrainedModel):
|
| 386 |
+
config_class = EfficientDLMConfig
|
| 387 |
+
base_model_prefix = "model"
|
| 388 |
+
supports_gradient_checkpointing = True
|
| 389 |
+
_no_split_modules = ["Qwen3DecoderLayer"]
|
| 390 |
+
_skip_keys_device_placement = ["past_key_values"]
|
| 391 |
+
_supports_flash_attn_2 = True
|
| 392 |
+
_supports_sdpa = True
|
| 393 |
+
_supports_flex_attn = True
|
| 394 |
+
_supports_cache_class = True
|
| 395 |
+
_supports_quantized_cache = True
|
| 396 |
+
_supports_static_cache = True
|
| 397 |
+
_supports_attention_backend = True
|
| 398 |
+
|
| 399 |
+
def _init_weights(self, module):
|
| 400 |
+
std = self.config.initializer_range
|
| 401 |
+
if isinstance(module, nn.Linear):
|
| 402 |
+
module.weight.data.normal_(mean=0.0, std=std)
|
| 403 |
+
if module.bias is not None:
|
| 404 |
+
module.bias.data.zero_()
|
| 405 |
+
elif isinstance(module, nn.Embedding):
|
| 406 |
+
module.weight.data.normal_(mean=0.0, std=std)
|
| 407 |
+
if module.padding_idx is not None:
|
| 408 |
+
module.weight.data[module.padding_idx].zero_()
|
| 409 |
+
elif isinstance(module, Qwen3RMSNorm):
|
| 410 |
+
module.weight.data.fill_(1.0)
|
| 411 |
+
|
| 412 |
+
|
| 413 |
+
class Qwen3RotaryEmbedding(nn.Module):
|
| 414 |
+
def __init__(self, config: EfficientDLMConfig, device=None):
|
| 415 |
+
super().__init__()
|
| 416 |
+
# BC: "rope_type" was originally "type"
|
| 417 |
+
if hasattr(config, "rope_scaling") and config.rope_scaling is not None:
|
| 418 |
+
self.rope_type = config.rope_scaling.get("rope_type", config.rope_scaling.get("type"))
|
| 419 |
+
else:
|
| 420 |
+
self.rope_type = "default"
|
| 421 |
+
self.max_seq_len_cached = config.max_position_embeddings
|
| 422 |
+
self.original_max_seq_len = config.max_position_embeddings
|
| 423 |
+
|
| 424 |
+
self.config = config
|
| 425 |
+
self.rope_init_fn = ROPE_INIT_FUNCTIONS[self.rope_type]
|
| 426 |
+
|
| 427 |
+
inv_freq, self.attention_scaling = self.rope_init_fn(self.config, device)
|
| 428 |
+
self.register_buffer("inv_freq", inv_freq, persistent=False)
|
| 429 |
+
self.original_inv_freq = self.inv_freq
|
| 430 |
+
|
| 431 |
+
@torch.no_grad()
|
| 432 |
+
@dynamic_rope_update # power user: used with advanced RoPE types (e.g. dynamic rope)
|
| 433 |
+
def forward(self, x, position_ids):
|
| 434 |
+
inv_freq_expanded = self.inv_freq[None, :, None].float().expand(position_ids.shape[0], -1, 1).to(x.device)
|
| 435 |
+
position_ids_expanded = position_ids[:, None, :].float()
|
| 436 |
+
|
| 437 |
+
device_type = x.device.type if isinstance(x.device.type, str) and x.device.type != "mps" else "cpu"
|
| 438 |
+
with torch.autocast(device_type=device_type, enabled=False): # Force float32
|
| 439 |
+
freqs = (inv_freq_expanded.float() @ position_ids_expanded.float()).transpose(1, 2)
|
| 440 |
+
emb = torch.cat((freqs, freqs), dim=-1)
|
| 441 |
+
cos = emb.cos() * self.attention_scaling
|
| 442 |
+
sin = emb.sin() * self.attention_scaling
|
| 443 |
+
|
| 444 |
+
return cos.to(dtype=x.dtype), sin.to(dtype=x.dtype)
|
| 445 |
+
|
| 446 |
+
|
| 447 |
+
@auto_docstring
|
| 448 |
+
class Qwen3Model(Qwen3PreTrainedModel):
|
| 449 |
+
def __init__(self, config: EfficientDLMConfig):
|
| 450 |
+
super().__init__(config)
|
| 451 |
+
self.config = config
|
| 452 |
+
|
| 453 |
+
self.padding_idx = config.pad_token_id
|
| 454 |
+
self.vocab_size = config.vocab_size
|
| 455 |
+
|
| 456 |
+
self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx)
|
| 457 |
+
self.layers = nn.ModuleList(
|
| 458 |
+
[Qwen3DecoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)]
|
| 459 |
+
)
|
| 460 |
+
self.norm = Qwen3RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
|
| 461 |
+
self.rotary_emb = Qwen3RotaryEmbedding(config=config)
|
| 462 |
+
self.gradient_checkpointing = False
|
| 463 |
+
|
| 464 |
+
# Initialize weights and apply final processing
|
| 465 |
+
self.post_init()
|
| 466 |
+
|
| 467 |
+
def get_input_embeddings(self):
|
| 468 |
+
return self.embed_tokens
|
| 469 |
+
|
| 470 |
+
def set_input_embeddings(self, value):
|
| 471 |
+
self.embed_tokens = value
|
| 472 |
+
|
| 473 |
+
@can_return_tuple
|
| 474 |
+
@auto_docstring
|
| 475 |
+
def forward(
|
| 476 |
+
self,
|
| 477 |
+
input_ids: Optional[torch.LongTensor] = None,
|
| 478 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 479 |
+
position_ids: Optional[torch.LongTensor] = None,
|
| 480 |
+
past_key_values: Optional[Cache] = None,
|
| 481 |
+
inputs_embeds: Optional[torch.FloatTensor] = None,
|
| 482 |
+
use_cache: Optional[bool] = None,
|
| 483 |
+
output_attentions: Optional[bool] = None,
|
| 484 |
+
output_hidden_states: Optional[bool] = None,
|
| 485 |
+
cache_position: Optional[torch.LongTensor] = None,
|
| 486 |
+
replace_position: Optional[torch.Tensor] = None,
|
| 487 |
+
is_training: bool = True,
|
| 488 |
+
**flash_attn_kwargs: Unpack[FlashAttentionKwargs],
|
| 489 |
+
) -> BaseModelOutputWithPast:
|
| 490 |
+
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
|
| 491 |
+
output_hidden_states = (
|
| 492 |
+
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
| 493 |
+
)
|
| 494 |
+
use_cache = use_cache if use_cache is not None else self.config.use_cache
|
| 495 |
+
|
| 496 |
+
if (input_ids is None) ^ (inputs_embeds is not None):
|
| 497 |
+
raise ValueError("You must specify exactly one of input_ids or inputs_embeds")
|
| 498 |
+
|
| 499 |
+
if self.gradient_checkpointing and self.training and use_cache:
|
| 500 |
+
logger.warning_once(
|
| 501 |
+
"`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`."
|
| 502 |
+
)
|
| 503 |
+
use_cache = False
|
| 504 |
+
|
| 505 |
+
# Allow both Cache objects and legacy tuple format for compatibility
|
| 506 |
+
if past_key_values is not None and not isinstance(past_key_values, Cache):
|
| 507 |
+
# Convert legacy tuple format to DynamicCache if needed
|
| 508 |
+
if isinstance(past_key_values, (list, tuple)):
|
| 509 |
+
# This is likely a legacy format - convert to DynamicCache
|
| 510 |
+
legacy_cache = past_key_values
|
| 511 |
+
past_key_values = DynamicCache()
|
| 512 |
+
for layer_idx, layer_cache in enumerate(legacy_cache):
|
| 513 |
+
if isinstance(layer_cache, (list, tuple)) and len(layer_cache) == 2:
|
| 514 |
+
key_cache, value_cache = layer_cache
|
| 515 |
+
past_key_values.update(key_cache, value_cache, layer_idx)
|
| 516 |
+
else:
|
| 517 |
+
raise ValueError("The `past_key_values` should be either a `Cache` object, list/tuple of layer caches, or `None`.")
|
| 518 |
+
|
| 519 |
+
# # TODO (joao): remove this exception in v4.56 -- it exists for users that try to pass a legacy cache
|
| 520 |
+
# if not isinstance(past_key_values, (type(None), Cache)):
|
| 521 |
+
# raise ValueError("The `past_key_values` should be either a `Cache` object or `None`.")
|
| 522 |
+
|
| 523 |
+
if inputs_embeds is None:
|
| 524 |
+
inputs_embeds = self.embed_tokens(input_ids)
|
| 525 |
+
|
| 526 |
+
if use_cache and past_key_values is None:
|
| 527 |
+
past_key_values = DynamicCache()
|
| 528 |
+
|
| 529 |
+
if cache_position is None:
|
| 530 |
+
past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0
|
| 531 |
+
cache_position = torch.arange(
|
| 532 |
+
past_seen_tokens, past_seen_tokens + inputs_embeds.shape[1], device=inputs_embeds.device
|
| 533 |
+
)
|
| 534 |
+
|
| 535 |
+
if position_ids is None:
|
| 536 |
+
position_ids = cache_position.unsqueeze(0)
|
| 537 |
+
|
| 538 |
+
causal_mask = self._update_causal_mask(
|
| 539 |
+
attention_mask, inputs_embeds, cache_position, past_key_values, output_attentions
|
| 540 |
+
)
|
| 541 |
+
|
| 542 |
+
hidden_states = inputs_embeds
|
| 543 |
+
|
| 544 |
+
# create position embeddings to be shared across the decoder layers
|
| 545 |
+
position_embeddings = self.rotary_emb(hidden_states, position_ids)
|
| 546 |
+
|
| 547 |
+
# decoder layers
|
| 548 |
+
all_hidden_states = () if output_hidden_states else None
|
| 549 |
+
all_self_attns = () if output_attentions else None
|
| 550 |
+
|
| 551 |
+
for decoder_layer in self.layers[: self.config.num_hidden_layers]:
|
| 552 |
+
if output_hidden_states:
|
| 553 |
+
all_hidden_states += (hidden_states,)
|
| 554 |
+
|
| 555 |
+
layer_outputs = decoder_layer(
|
| 556 |
+
hidden_states,
|
| 557 |
+
attention_mask=causal_mask,
|
| 558 |
+
position_ids=position_ids,
|
| 559 |
+
past_key_value=past_key_values,
|
| 560 |
+
output_attentions=output_attentions,
|
| 561 |
+
use_cache=use_cache,
|
| 562 |
+
cache_position=cache_position,
|
| 563 |
+
position_embeddings=position_embeddings,
|
| 564 |
+
replace_position=replace_position,
|
| 565 |
+
is_training=is_training,
|
| 566 |
+
**flash_attn_kwargs,
|
| 567 |
+
)
|
| 568 |
+
|
| 569 |
+
hidden_states = layer_outputs[0]
|
| 570 |
+
|
| 571 |
+
if output_attentions:
|
| 572 |
+
all_self_attns += (layer_outputs[1],)
|
| 573 |
+
|
| 574 |
+
hidden_states = self.norm(hidden_states)
|
| 575 |
+
|
| 576 |
+
# add hidden states from the last decoder layer
|
| 577 |
+
if output_hidden_states:
|
| 578 |
+
all_hidden_states += (hidden_states,)
|
| 579 |
+
|
| 580 |
+
past_key_values_output = None
|
| 581 |
+
if use_cache and past_key_values is not None:
|
| 582 |
+
if isinstance(past_key_values, Cache):
|
| 583 |
+
# Convert Cache to list of tuples format: [(key, value), (key, value), ...]
|
| 584 |
+
past_key_values_output = []
|
| 585 |
+
if hasattr(past_key_values, 'key_cache') and hasattr(past_key_values, 'value_cache'):
|
| 586 |
+
# DynamicCache format
|
| 587 |
+
for layer_idx in range(len(past_key_values.key_cache)):
|
| 588 |
+
past_key_values_output.append((
|
| 589 |
+
past_key_values.key_cache[layer_idx],
|
| 590 |
+
past_key_values.value_cache[layer_idx]
|
| 591 |
+
))
|
| 592 |
+
else:
|
| 593 |
+
# Fallback - return as is
|
| 594 |
+
past_key_values_output = past_key_values
|
| 595 |
+
else:
|
| 596 |
+
past_key_values_output = past_key_values
|
| 597 |
+
|
| 598 |
+
return BaseModelOutputWithPast(
|
| 599 |
+
last_hidden_state=hidden_states,
|
| 600 |
+
past_key_values=past_key_values_output,
|
| 601 |
+
hidden_states=all_hidden_states,
|
| 602 |
+
attentions=all_self_attns,
|
| 603 |
+
)
|
| 604 |
+
|
| 605 |
+
def _update_causal_mask(
|
| 606 |
+
self,
|
| 607 |
+
attention_mask: Union[torch.Tensor, "BlockMask"],
|
| 608 |
+
input_tensor: torch.Tensor,
|
| 609 |
+
cache_position: torch.Tensor,
|
| 610 |
+
past_key_values: Cache,
|
| 611 |
+
output_attentions: bool = False,
|
| 612 |
+
):
|
| 613 |
+
if self.config._attn_implementation == "flash_attention_2":
|
| 614 |
+
if attention_mask is not None and past_key_values is not None:
|
| 615 |
+
is_padding_right = attention_mask[:, -1].sum().item() != input_tensor.size()[0]
|
| 616 |
+
if is_padding_right:
|
| 617 |
+
raise ValueError(
|
| 618 |
+
"You are attempting to perform batched generation with padding_side='right'"
|
| 619 |
+
" this may lead to unexpected behaviour for Flash Attention version of Qwen3. Make sure to "
|
| 620 |
+
" call `tokenizer.padding_side = 'left'` before tokenizing the input. "
|
| 621 |
+
)
|
| 622 |
+
if attention_mask is not None and 0.0 in attention_mask:
|
| 623 |
+
return attention_mask
|
| 624 |
+
return None
|
| 625 |
+
if self.config._attn_implementation == "flex_attention":
|
| 626 |
+
if isinstance(attention_mask, torch.Tensor):
|
| 627 |
+
attention_mask = make_flex_block_causal_mask(attention_mask)
|
| 628 |
+
return attention_mask
|
| 629 |
+
|
| 630 |
+
# For SDPA, when possible, we will rely on its `is_causal` argument instead of its `attn_mask` argument, in
|
| 631 |
+
# order to dispatch on Flash Attention 2. This feature is not compatible with static cache, as SDPA will fail
|
| 632 |
+
# to infer the attention mask.
|
| 633 |
+
past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0
|
| 634 |
+
using_static_cache = isinstance(past_key_values, StaticCache)
|
| 635 |
+
using_sliding_window_cache = isinstance(past_key_values, SlidingWindowCache)
|
| 636 |
+
|
| 637 |
+
# When output attentions is True, sdpa implementation's forward method calls the eager implementation's forward
|
| 638 |
+
if (
|
| 639 |
+
self.config._attn_implementation == "sdpa"
|
| 640 |
+
and not (using_static_cache or using_sliding_window_cache)
|
| 641 |
+
and not output_attentions
|
| 642 |
+
):
|
| 643 |
+
if AttentionMaskConverter._ignore_causal_mask_sdpa(
|
| 644 |
+
attention_mask,
|
| 645 |
+
inputs_embeds=input_tensor,
|
| 646 |
+
past_key_values_length=past_seen_tokens,
|
| 647 |
+
sliding_window=self.config.sliding_window,
|
| 648 |
+
is_training=self.training,
|
| 649 |
+
):
|
| 650 |
+
return None
|
| 651 |
+
|
| 652 |
+
dtype = input_tensor.dtype
|
| 653 |
+
min_dtype = torch.finfo(dtype).min
|
| 654 |
+
sequence_length = input_tensor.shape[1]
|
| 655 |
+
# SlidingWindowCache or StaticCache
|
| 656 |
+
if using_sliding_window_cache or using_static_cache:
|
| 657 |
+
target_length = past_key_values.get_max_cache_shape()
|
| 658 |
+
# DynamicCache or no cache
|
| 659 |
+
else:
|
| 660 |
+
target_length = (
|
| 661 |
+
attention_mask.shape[-1]
|
| 662 |
+
if isinstance(attention_mask, torch.Tensor)
|
| 663 |
+
else past_seen_tokens + sequence_length + 1
|
| 664 |
+
)
|
| 665 |
+
|
| 666 |
+
# In case the provided `attention` mask is 2D, we generate a causal mask here (4D).
|
| 667 |
+
causal_mask = self._prepare_4d_causal_attention_mask_with_cache_position(
|
| 668 |
+
attention_mask,
|
| 669 |
+
sequence_length=sequence_length,
|
| 670 |
+
target_length=target_length,
|
| 671 |
+
dtype=dtype,
|
| 672 |
+
cache_position=cache_position,
|
| 673 |
+
batch_size=input_tensor.shape[0],
|
| 674 |
+
config=self.config,
|
| 675 |
+
past_key_values=past_key_values,
|
| 676 |
+
)
|
| 677 |
+
|
| 678 |
+
if (
|
| 679 |
+
self.config._attn_implementation == "sdpa"
|
| 680 |
+
and attention_mask is not None
|
| 681 |
+
and attention_mask.device.type in ["cuda", "xpu", "npu"]
|
| 682 |
+
and not output_attentions
|
| 683 |
+
):
|
| 684 |
+
# Attend to all tokens in fully masked rows in the causal_mask, for example the relevant first rows when
|
| 685 |
+
# using left padding. This is required by F.scaled_dot_product_attention memory-efficient attention path.
|
| 686 |
+
# Details: https://github.com/pytorch/pytorch/issues/110213
|
| 687 |
+
causal_mask = AttentionMaskConverter._unmask_unattended(causal_mask, min_dtype)
|
| 688 |
+
|
| 689 |
+
return causal_mask
|
| 690 |
+
|
| 691 |
+
@staticmethod
|
| 692 |
+
def _prepare_4d_causal_attention_mask_with_cache_position(
|
| 693 |
+
attention_mask: torch.Tensor,
|
| 694 |
+
sequence_length: int,
|
| 695 |
+
target_length: int,
|
| 696 |
+
dtype: torch.dtype,
|
| 697 |
+
cache_position: torch.Tensor,
|
| 698 |
+
batch_size: int,
|
| 699 |
+
config: EfficientDLMConfig,
|
| 700 |
+
past_key_values: Cache,
|
| 701 |
+
):
|
| 702 |
+
"""
|
| 703 |
+
Creates a causal 4D mask of shape `(batch_size, 1, query_length, key_value_length)` from a 2D mask of shape
|
| 704 |
+
`(batch_size, key_value_length)`, or if the input `attention_mask` is already 4D, do nothing.
|
| 705 |
+
|
| 706 |
+
Args:
|
| 707 |
+
attention_mask (`torch.Tensor`):
|
| 708 |
+
A 2D attention mask of shape `(batch_size, key_value_length)` or a 4D attention mask of shape `(batch_size, 1, query_length, key_value_length)`.
|
| 709 |
+
sequence_length (`int`):
|
| 710 |
+
The sequence length being processed.
|
| 711 |
+
target_length (`int`):
|
| 712 |
+
The target length: when generating with static cache, the mask should be as long as the static cache, to account for the 0 padding, the part of the cache that is not filled yet.
|
| 713 |
+
dtype (`torch.dtype`):
|
| 714 |
+
The dtype to use for the 4D attention mask.
|
| 715 |
+
cache_position (`torch.Tensor`):
|
| 716 |
+
Indices depicting the position of the input sequence tokens in the sequence.
|
| 717 |
+
batch_size (`torch.Tensor`):
|
| 718 |
+
Batch size.
|
| 719 |
+
config (`EfficientDLMConfig`):
|
| 720 |
+
The model's configuration class
|
| 721 |
+
past_key_values (`Cache`):
|
| 722 |
+
The cache class that is being used currently to generate
|
| 723 |
+
"""
|
| 724 |
+
if attention_mask is not None and attention_mask.dim() == 4:
|
| 725 |
+
# In this case we assume that the mask comes already in inverted form and requires no inversion or slicing.
|
| 726 |
+
causal_mask = attention_mask
|
| 727 |
+
else:
|
| 728 |
+
min_dtype = torch.finfo(dtype).min
|
| 729 |
+
causal_mask = torch.full(
|
| 730 |
+
(sequence_length, target_length), fill_value=min_dtype, dtype=dtype, device=cache_position.device
|
| 731 |
+
)
|
| 732 |
+
diagonal_attend_mask = torch.arange(target_length, device=cache_position.device) > cache_position.reshape(
|
| 733 |
+
-1, 1
|
| 734 |
+
)
|
| 735 |
+
text_config = config.get_text_config()
|
| 736 |
+
if getattr(text_config, "use_sliding_window", True) and text_config.sliding_window is not None:
|
| 737 |
+
# if we have sliding window, we should not attend to tokens beyond sliding window length, so we mask them out also
|
| 738 |
+
# the check is needed to verify is current checkpoint was trained with sliding window or not
|
| 739 |
+
if not isinstance(past_key_values, SlidingWindowCache) or sequence_length > target_length:
|
| 740 |
+
sliding_attend_mask = torch.arange(target_length, device=cache_position.device) <= (
|
| 741 |
+
cache_position.reshape(-1, 1) - text_config.sliding_window
|
| 742 |
+
)
|
| 743 |
+
diagonal_attend_mask.bitwise_or_(sliding_attend_mask)
|
| 744 |
+
causal_mask *= diagonal_attend_mask
|
| 745 |
+
causal_mask = causal_mask[None, None, :, :].expand(batch_size, 1, -1, -1)
|
| 746 |
+
if attention_mask is not None:
|
| 747 |
+
causal_mask = causal_mask.clone() # copy to contiguous memory for in-place edit
|
| 748 |
+
if attention_mask.shape[-1] > target_length:
|
| 749 |
+
attention_mask = attention_mask[:, :target_length]
|
| 750 |
+
mask_length = attention_mask.shape[-1]
|
| 751 |
+
padding_mask = causal_mask[:, :, :, :mask_length] + attention_mask[:, None, None, :].to(
|
| 752 |
+
causal_mask.device
|
| 753 |
+
)
|
| 754 |
+
padding_mask = padding_mask == 0
|
| 755 |
+
causal_mask[:, :, :, :mask_length] = causal_mask[:, :, :, :mask_length].masked_fill(
|
| 756 |
+
padding_mask, min_dtype
|
| 757 |
+
)
|
| 758 |
+
return causal_mask
|
| 759 |
+
|
| 760 |
+
|
| 761 |
+
class KwargsForCausalLM(FlashAttentionKwargs, TransformersKwargs): ...
|
| 762 |
+
|
| 763 |
+
|
| 764 |
+
@auto_docstring
|
| 765 |
+
class Qwen3ForCausalLM(Qwen3PreTrainedModel, GenerationMixin):
|
| 766 |
+
_tied_weights_keys = ["lm_head.weight"]
|
| 767 |
+
_tp_plan = {"lm_head": "colwise_rep"}
|
| 768 |
+
_pp_plan = {"lm_head": (["hidden_states"], ["logits"])}
|
| 769 |
+
|
| 770 |
+
def __init__(self, config):
|
| 771 |
+
super().__init__(config)
|
| 772 |
+
|
| 773 |
+
config._attn_implementation = config.attn_implementation
|
| 774 |
+
|
| 775 |
+
self.model = Qwen3Model(config)
|
| 776 |
+
self.vocab_size = config.vocab_size
|
| 777 |
+
self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
|
| 778 |
+
|
| 779 |
+
# Initialize weights and apply final processing
|
| 780 |
+
self.post_init()
|
| 781 |
+
|
| 782 |
+
def get_input_embeddings(self):
|
| 783 |
+
return self.model.embed_tokens
|
| 784 |
+
|
| 785 |
+
def set_input_embeddings(self, value):
|
| 786 |
+
self.model.embed_tokens = value
|
| 787 |
+
|
| 788 |
+
def get_output_embeddings(self):
|
| 789 |
+
return self.lm_head
|
| 790 |
+
|
| 791 |
+
def set_output_embeddings(self, new_embeddings):
|
| 792 |
+
self.lm_head = new_embeddings
|
| 793 |
+
|
| 794 |
+
def set_decoder(self, decoder):
|
| 795 |
+
self.model = decoder
|
| 796 |
+
|
| 797 |
+
def get_decoder(self):
|
| 798 |
+
return self.model
|
| 799 |
+
|
| 800 |
+
@can_return_tuple
|
| 801 |
+
@auto_docstring
|
| 802 |
+
def forward(
|
| 803 |
+
self,
|
| 804 |
+
input_ids: Optional[torch.LongTensor] = None,
|
| 805 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 806 |
+
position_ids: Optional[torch.LongTensor] = None,
|
| 807 |
+
past_key_values: Optional[Cache] = None,
|
| 808 |
+
inputs_embeds: Optional[torch.FloatTensor] = None,
|
| 809 |
+
labels: Optional[torch.LongTensor] = None,
|
| 810 |
+
use_cache: Optional[bool] = None,
|
| 811 |
+
output_attentions: Optional[bool] = None,
|
| 812 |
+
output_hidden_states: Optional[bool] = None,
|
| 813 |
+
cache_position: Optional[torch.LongTensor] = None,
|
| 814 |
+
logits_to_keep: Union[int, torch.Tensor] = 0,
|
| 815 |
+
replace_position: Optional[torch.Tensor] = None,
|
| 816 |
+
**kwargs: Unpack[KwargsForCausalLM],
|
| 817 |
+
) -> CausalLMOutputWithPast:
|
| 818 |
+
r"""
|
| 819 |
+
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
|
| 820 |
+
Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
|
| 821 |
+
config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
|
| 822 |
+
(masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
|
| 823 |
+
|
| 824 |
+
Example:
|
| 825 |
+
|
| 826 |
+
```python
|
| 827 |
+
>>> from transformers import AutoTokenizer, Qwen3ForCausalLM
|
| 828 |
+
|
| 829 |
+
>>> model = Qwen3ForCausalLM.from_pretrained("Qwen/Qwen3-8B")
|
| 830 |
+
>>> tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen3-8B")
|
| 831 |
+
|
| 832 |
+
>>> prompt = "Hey, are you conscious? Can you talk to me?"
|
| 833 |
+
>>> inputs = tokenizer(prompt, return_tensors="pt")
|
| 834 |
+
|
| 835 |
+
>>> # Generate
|
| 836 |
+
>>> generate_ids = model.generate(inputs.input_ids, max_length=30)
|
| 837 |
+
>>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
|
| 838 |
+
"Hey, are you conscious? Can you talk to me?\nI'm not conscious, but I can talk to you."
|
| 839 |
+
```"""
|
| 840 |
+
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
|
| 841 |
+
output_hidden_states = (
|
| 842 |
+
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
| 843 |
+
)
|
| 844 |
+
|
| 845 |
+
# decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)
|
| 846 |
+
outputs: BaseModelOutputWithPast = self.model(
|
| 847 |
+
input_ids=input_ids,
|
| 848 |
+
attention_mask=attention_mask,
|
| 849 |
+
position_ids=position_ids,
|
| 850 |
+
past_key_values=past_key_values,
|
| 851 |
+
inputs_embeds=inputs_embeds,
|
| 852 |
+
use_cache=use_cache,
|
| 853 |
+
output_attentions=output_attentions,
|
| 854 |
+
output_hidden_states=output_hidden_states,
|
| 855 |
+
cache_position=cache_position,
|
| 856 |
+
replace_position=replace_position,
|
| 857 |
+
**kwargs,
|
| 858 |
+
)
|
| 859 |
+
|
| 860 |
+
hidden_states = outputs.last_hidden_state
|
| 861 |
+
# Only compute necessary logits, and do not upcast them to float if we are not computing the loss
|
| 862 |
+
slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep
|
| 863 |
+
logits = self.lm_head(hidden_states[:, slice_indices, :])
|
| 864 |
+
|
| 865 |
+
loss = None
|
| 866 |
+
if labels is not None:
|
| 867 |
+
loss = self.loss_function(logits=logits, labels=labels, vocab_size=self.config.vocab_size, **kwargs)
|
| 868 |
+
|
| 869 |
+
return CausalLMOutputWithPast(
|
| 870 |
+
loss=loss,
|
| 871 |
+
logits=logits,
|
| 872 |
+
past_key_values=outputs.past_key_values,
|
| 873 |
+
hidden_states=outputs.hidden_states,
|
| 874 |
+
attentions=outputs.attentions,
|
| 875 |
+
)
|
| 876 |
+
|
| 877 |
+
|
| 878 |
+
@auto_docstring(
|
| 879 |
+
custom_intro="""
|
| 880 |
+
The Qwen3 Model transformer with a sequence classification head on top (linear layer).
|
| 881 |
+
|
| 882 |
+
[`Qwen3ForSequenceClassification`] uses the last token in order to do the classification, as other causal models
|
| 883 |
+
(e.g. GPT-2) do.
|
| 884 |
+
|
| 885 |
+
Since it does classification on the last token, it requires to know the position of the last token. If a
|
| 886 |
+
`pad_token_id` is defined in the configuration, it finds the last token that is not a padding token in each row. If
|
| 887 |
+
no `pad_token_id` is defined, it simply takes the last value in each row of the batch. Since it cannot guess the
|
| 888 |
+
padding tokens when `inputs_embeds` are passed instead of `input_ids`, it does the same (take the last value in
|
| 889 |
+
each row of the batch).
|
| 890 |
+
"""
|
| 891 |
+
)
|
| 892 |
+
class Qwen3ForSequenceClassification(Qwen3PreTrainedModel):
|
| 893 |
+
def __init__(self, config):
|
| 894 |
+
super().__init__(config)
|
| 895 |
+
self.num_labels = config.num_labels
|
| 896 |
+
self.model = Qwen3Model(config)
|
| 897 |
+
self.score = nn.Linear(config.hidden_size, self.num_labels, bias=False)
|
| 898 |
+
|
| 899 |
+
# Initialize weights and apply final processing
|
| 900 |
+
self.post_init()
|
| 901 |
+
|
| 902 |
+
def get_input_embeddings(self):
|
| 903 |
+
return self.model.embed_tokens
|
| 904 |
+
|
| 905 |
+
def set_input_embeddings(self, value):
|
| 906 |
+
self.model.embed_tokens = value
|
| 907 |
+
|
| 908 |
+
@can_return_tuple
|
| 909 |
+
@auto_docstring
|
| 910 |
+
def forward(
|
| 911 |
+
self,
|
| 912 |
+
input_ids: Optional[torch.LongTensor] = None,
|
| 913 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 914 |
+
position_ids: Optional[torch.LongTensor] = None,
|
| 915 |
+
past_key_values: Optional[Cache] = None,
|
| 916 |
+
inputs_embeds: Optional[torch.FloatTensor] = None,
|
| 917 |
+
labels: Optional[torch.LongTensor] = None,
|
| 918 |
+
use_cache: Optional[bool] = None,
|
| 919 |
+
output_attentions: Optional[bool] = None,
|
| 920 |
+
output_hidden_states: Optional[bool] = None,
|
| 921 |
+
) -> SequenceClassifierOutputWithPast:
|
| 922 |
+
r"""
|
| 923 |
+
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
|
| 924 |
+
Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
|
| 925 |
+
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
|
| 926 |
+
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
|
| 927 |
+
"""
|
| 928 |
+
|
| 929 |
+
transformer_outputs: BaseModelOutputWithPast = self.model(
|
| 930 |
+
input_ids,
|
| 931 |
+
attention_mask=attention_mask,
|
| 932 |
+
position_ids=position_ids,
|
| 933 |
+
past_key_values=past_key_values,
|
| 934 |
+
inputs_embeds=inputs_embeds,
|
| 935 |
+
use_cache=use_cache,
|
| 936 |
+
output_attentions=output_attentions,
|
| 937 |
+
output_hidden_states=output_hidden_states,
|
| 938 |
+
)
|
| 939 |
+
hidden_states = transformer_outputs.last_hidden_state
|
| 940 |
+
logits = self.score(hidden_states)
|
| 941 |
+
|
| 942 |
+
if input_ids is not None:
|
| 943 |
+
batch_size = input_ids.shape[0]
|
| 944 |
+
else:
|
| 945 |
+
batch_size = inputs_embeds.shape[0]
|
| 946 |
+
|
| 947 |
+
if self.config.pad_token_id is None and batch_size != 1:
|
| 948 |
+
raise ValueError("Cannot handle batch sizes > 1 if no padding token is defined.")
|
| 949 |
+
if self.config.pad_token_id is None:
|
| 950 |
+
last_non_pad_token = -1
|
| 951 |
+
elif input_ids is not None:
|
| 952 |
+
# To handle both left- and right- padding, we take the rightmost token that is not equal to pad_token_id
|
| 953 |
+
non_pad_mask = (input_ids != self.config.pad_token_id).to(logits.device, torch.int32)
|
| 954 |
+
token_indices = torch.arange(input_ids.shape[-1], device=logits.device, dtype=torch.int32)
|
| 955 |
+
last_non_pad_token = (token_indices * non_pad_mask).argmax(-1)
|
| 956 |
+
else:
|
| 957 |
+
last_non_pad_token = -1
|
| 958 |
+
logger.warning_once(
|
| 959 |
+
f"{self.__class__.__name__} will not detect padding tokens in `inputs_embeds`. Results may be "
|
| 960 |
+
"unexpected if using padding tokens in conjunction with `inputs_embeds.`"
|
| 961 |
+
)
|
| 962 |
+
|
| 963 |
+
pooled_logits = logits[torch.arange(batch_size, device=logits.device), last_non_pad_token]
|
| 964 |
+
|
| 965 |
+
loss = None
|
| 966 |
+
if labels is not None:
|
| 967 |
+
loss = self.loss_function(logits=logits, labels=labels, pooled_logits=pooled_logits, config=self.config)
|
| 968 |
+
|
| 969 |
+
return SequenceClassifierOutputWithPast(
|
| 970 |
+
loss=loss,
|
| 971 |
+
logits=pooled_logits,
|
| 972 |
+
past_key_values=transformer_outputs.past_key_values,
|
| 973 |
+
hidden_states=transformer_outputs.hidden_states,
|
| 974 |
+
attentions=transformer_outputs.attentions,
|
| 975 |
+
)
|
| 976 |
+
|
| 977 |
+
|
| 978 |
+
@auto_docstring
|
| 979 |
+
class Qwen3ForTokenClassification(Qwen3PreTrainedModel):
|
| 980 |
+
def __init__(self, config):
|
| 981 |
+
super().__init__(config)
|
| 982 |
+
self.num_labels = config.num_labels
|
| 983 |
+
self.model = Qwen3Model(config)
|
| 984 |
+
if getattr(config, "classifier_dropout", None) is not None:
|
| 985 |
+
classifier_dropout = config.classifier_dropout
|
| 986 |
+
elif getattr(config, "hidden_dropout", None) is not None:
|
| 987 |
+
classifier_dropout = config.hidden_dropout
|
| 988 |
+
else:
|
| 989 |
+
classifier_dropout = 0.1
|
| 990 |
+
self.dropout = nn.Dropout(classifier_dropout)
|
| 991 |
+
self.score = nn.Linear(config.hidden_size, config.num_labels)
|
| 992 |
+
|
| 993 |
+
# Initialize weights and apply final processing
|
| 994 |
+
self.post_init()
|
| 995 |
+
|
| 996 |
+
def get_input_embeddings(self):
|
| 997 |
+
return self.model.embed_tokens
|
| 998 |
+
|
| 999 |
+
def set_input_embeddings(self, value):
|
| 1000 |
+
self.model.embed_tokens = value
|
| 1001 |
+
|
| 1002 |
+
@can_return_tuple
|
| 1003 |
+
@auto_docstring
|
| 1004 |
+
def forward(
|
| 1005 |
+
self,
|
| 1006 |
+
input_ids: Optional[torch.LongTensor] = None,
|
| 1007 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 1008 |
+
position_ids: Optional[torch.LongTensor] = None,
|
| 1009 |
+
past_key_values: Optional[Cache] = None,
|
| 1010 |
+
inputs_embeds: Optional[torch.FloatTensor] = None,
|
| 1011 |
+
labels: Optional[torch.LongTensor] = None,
|
| 1012 |
+
use_cache: Optional[bool] = None,
|
| 1013 |
+
output_attentions: Optional[bool] = None,
|
| 1014 |
+
output_hidden_states: Optional[bool] = None,
|
| 1015 |
+
) -> TokenClassifierOutput:
|
| 1016 |
+
r"""
|
| 1017 |
+
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
|
| 1018 |
+
Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
|
| 1019 |
+
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
|
| 1020 |
+
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
|
| 1021 |
+
"""
|
| 1022 |
+
|
| 1023 |
+
outputs: BaseModelOutputWithPast = self.model(
|
| 1024 |
+
input_ids,
|
| 1025 |
+
attention_mask=attention_mask,
|
| 1026 |
+
position_ids=position_ids,
|
| 1027 |
+
past_key_values=past_key_values,
|
| 1028 |
+
inputs_embeds=inputs_embeds,
|
| 1029 |
+
use_cache=use_cache,
|
| 1030 |
+
output_attentions=output_attentions,
|
| 1031 |
+
output_hidden_states=output_hidden_states,
|
| 1032 |
+
)
|
| 1033 |
+
sequence_output = outputs.last_hidden_state
|
| 1034 |
+
sequence_output = self.dropout(sequence_output)
|
| 1035 |
+
logits = self.score(sequence_output)
|
| 1036 |
+
|
| 1037 |
+
loss = None
|
| 1038 |
+
if labels is not None:
|
| 1039 |
+
loss = self.loss_function(logits, labels, self.config)
|
| 1040 |
+
|
| 1041 |
+
return TokenClassifierOutput(
|
| 1042 |
+
loss=loss,
|
| 1043 |
+
logits=logits,
|
| 1044 |
+
hidden_states=outputs.hidden_states,
|
| 1045 |
+
attentions=outputs.attentions,
|
| 1046 |
+
)
|
| 1047 |
+
|
| 1048 |
+
|
| 1049 |
+
@auto_docstring
|
| 1050 |
+
class Qwen3ForQuestionAnswering(Qwen3PreTrainedModel):
|
| 1051 |
+
base_model_prefix = "transformer"
|
| 1052 |
+
|
| 1053 |
+
def __init__(self, config):
|
| 1054 |
+
super().__init__(config)
|
| 1055 |
+
self.transformer = Qwen3Model(config)
|
| 1056 |
+
self.qa_outputs = nn.Linear(config.hidden_size, 2)
|
| 1057 |
+
|
| 1058 |
+
# Initialize weights and apply final processing
|
| 1059 |
+
self.post_init()
|
| 1060 |
+
|
| 1061 |
+
def get_input_embeddings(self):
|
| 1062 |
+
return self.transformer.embed_tokens
|
| 1063 |
+
|
| 1064 |
+
def set_input_embeddings(self, value):
|
| 1065 |
+
self.transformer.embed_tokens = value
|
| 1066 |
+
|
| 1067 |
+
@can_return_tuple
|
| 1068 |
+
@auto_docstring
|
| 1069 |
+
def forward(
|
| 1070 |
+
self,
|
| 1071 |
+
input_ids: Optional[torch.LongTensor] = None,
|
| 1072 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 1073 |
+
position_ids: Optional[torch.LongTensor] = None,
|
| 1074 |
+
past_key_values: Optional[Cache] = None,
|
| 1075 |
+
inputs_embeds: Optional[torch.FloatTensor] = None,
|
| 1076 |
+
start_positions: Optional[torch.LongTensor] = None,
|
| 1077 |
+
end_positions: Optional[torch.LongTensor] = None,
|
| 1078 |
+
output_attentions: Optional[bool] = None,
|
| 1079 |
+
output_hidden_states: Optional[bool] = None,
|
| 1080 |
+
**kwargs,
|
| 1081 |
+
) -> QuestionAnsweringModelOutput:
|
| 1082 |
+
outputs: BaseModelOutputWithPast = self.transformer(
|
| 1083 |
+
input_ids,
|
| 1084 |
+
attention_mask=attention_mask,
|
| 1085 |
+
position_ids=position_ids,
|
| 1086 |
+
past_key_values=past_key_values,
|
| 1087 |
+
inputs_embeds=inputs_embeds,
|
| 1088 |
+
output_attentions=output_attentions,
|
| 1089 |
+
output_hidden_states=output_hidden_states,
|
| 1090 |
+
)
|
| 1091 |
+
|
| 1092 |
+
sequence_output = outputs.last_hidden_state
|
| 1093 |
+
|
| 1094 |
+
logits = self.qa_outputs(sequence_output)
|
| 1095 |
+
start_logits, end_logits = logits.split(1, dim=-1)
|
| 1096 |
+
start_logits = start_logits.squeeze(-1).contiguous()
|
| 1097 |
+
end_logits = end_logits.squeeze(-1).contiguous()
|
| 1098 |
+
|
| 1099 |
+
loss = None
|
| 1100 |
+
if start_positions is not None and end_positions is not None:
|
| 1101 |
+
loss = self.loss_function(start_logits, end_logits, start_positions, end_positions, **kwargs)
|
| 1102 |
+
|
| 1103 |
+
return QuestionAnsweringModelOutput(
|
| 1104 |
+
loss=loss,
|
| 1105 |
+
start_logits=start_logits,
|
| 1106 |
+
end_logits=end_logits,
|
| 1107 |
+
hidden_states=outputs.hidden_states,
|
| 1108 |
+
attentions=outputs.attentions,
|
| 1109 |
+
)
|
| 1110 |
+
|
| 1111 |
+
|
| 1112 |
+
class Qwen3DiffusionLM(Qwen3ForCausalLM):
|
| 1113 |
+
def __init__(self, config):
|
| 1114 |
+
super().__init__(config)
|
| 1115 |
+
self.mask_token_id = 151662 # [MASK] token ID
|
| 1116 |
+
|
| 1117 |
+
def forward_process(self, input_ids, eps=1e-3):
|
| 1118 |
+
b, l = input_ids.shape
|
| 1119 |
+
t = torch.rand(b, device=input_ids.device)
|
| 1120 |
+
p_mask = (1 - eps) * t + eps
|
| 1121 |
+
p_mask = p_mask[:, None].repeat(1, l)
|
| 1122 |
+
|
| 1123 |
+
# Generate masked indices
|
| 1124 |
+
masked_indices = torch.rand((b, l), device=input_ids.device) < p_mask
|
| 1125 |
+
|
| 1126 |
+
noisy_batch = torch.where(masked_indices, self.mask_token_id, input_ids)
|
| 1127 |
+
|
| 1128 |
+
return noisy_batch, masked_indices, p_mask
|
| 1129 |
+
|
| 1130 |
+
@can_return_tuple
|
| 1131 |
+
@auto_docstring
|
| 1132 |
+
def forward(
|
| 1133 |
+
self,
|
| 1134 |
+
input_ids: Optional[torch.LongTensor] = None,
|
| 1135 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 1136 |
+
position_ids: Optional[torch.LongTensor] = None,
|
| 1137 |
+
past_key_values: Optional[Cache] = None,
|
| 1138 |
+
inputs_embeds: Optional[torch.FloatTensor] = None,
|
| 1139 |
+
labels: Optional[torch.LongTensor] = None,
|
| 1140 |
+
use_cache: Optional[bool] = None,
|
| 1141 |
+
output_attentions: Optional[bool] = None,
|
| 1142 |
+
output_hidden_states: Optional[bool] = None,
|
| 1143 |
+
cache_position: Optional[torch.LongTensor] = None,
|
| 1144 |
+
logits_to_keep: Union[int, torch.Tensor] = 0,
|
| 1145 |
+
replace_position: Optional[torch.Tensor] = None,
|
| 1146 |
+
eps: float = 1e-3,
|
| 1147 |
+
**kwargs: Unpack[KwargsForCausalLM],
|
| 1148 |
+
) -> CausalLMOutputWithPast:
|
| 1149 |
+
# Apply random length truncation with 1% probability
|
| 1150 |
+
# if torch.rand(1) < 0.01:
|
| 1151 |
+
# random_length = torch.randint(1, input_ids.shape[1] + 1, (1,))
|
| 1152 |
+
# input_ids = input_ids[:, :random_length]
|
| 1153 |
+
# if attention_mask is not None:
|
| 1154 |
+
# attention_mask = attention_mask[:, :random_length]
|
| 1155 |
+
|
| 1156 |
+
# Apply forward process for diffusion with shifted masking
|
| 1157 |
+
|
| 1158 |
+
if labels is not None:
|
| 1159 |
+
if self.config.random_length_prob is not None:
|
| 1160 |
+
if torch.rand(1) < self.config.random_length_prob:
|
| 1161 |
+
random_length = torch.randint(2, input_ids.shape[1] + 1, (1,))
|
| 1162 |
+
input_ids = input_ids[:, :random_length]
|
| 1163 |
+
labels = labels[:, :random_length]
|
| 1164 |
+
|
| 1165 |
+
if attention_mask is not None:
|
| 1166 |
+
attention_mask = attention_mask[:, :random_length]
|
| 1167 |
+
if position_ids is not None:
|
| 1168 |
+
position_ids = position_ids[:, :random_length]
|
| 1169 |
+
|
| 1170 |
+
noisy_batch, masked_indices, p_mask = self.forward_process(input_ids, eps)
|
| 1171 |
+
else:
|
| 1172 |
+
noisy_batch = input_ids
|
| 1173 |
+
masked_indices = None
|
| 1174 |
+
p_mask = None
|
| 1175 |
+
|
| 1176 |
+
# Get model outputs
|
| 1177 |
+
outputs: BaseModelOutputWithPast = self.model(
|
| 1178 |
+
input_ids=noisy_batch,
|
| 1179 |
+
attention_mask=attention_mask,
|
| 1180 |
+
position_ids=position_ids,
|
| 1181 |
+
past_key_values=past_key_values,
|
| 1182 |
+
inputs_embeds=inputs_embeds,
|
| 1183 |
+
use_cache=use_cache,
|
| 1184 |
+
output_attentions=output_attentions,
|
| 1185 |
+
output_hidden_states=output_hidden_states,
|
| 1186 |
+
cache_position=cache_position,
|
| 1187 |
+
replace_position=replace_position,
|
| 1188 |
+
**kwargs,
|
| 1189 |
+
)
|
| 1190 |
+
|
| 1191 |
+
hidden_states = outputs.last_hidden_state
|
| 1192 |
+
# Only compute necessary logits, and do not upcast them to float if we are not computing the loss
|
| 1193 |
+
slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep
|
| 1194 |
+
logits = self.lm_head(hidden_states[:, slice_indices, :])
|
| 1195 |
+
|
| 1196 |
+
loss = None
|
| 1197 |
+
if labels is not None:
|
| 1198 |
+
if self.config.dlm_type == 'dream':
|
| 1199 |
+
logits = logits[..., :-1, :].contiguous()
|
| 1200 |
+
labels = labels[..., 1:].contiguous()
|
| 1201 |
+
masked_indices = masked_indices[:, 1:]
|
| 1202 |
+
p_mask = p_mask[:, 1:]
|
| 1203 |
+
|
| 1204 |
+
# Calculate token-wise cross entropy loss for masked positions
|
| 1205 |
+
token_loss = torch.nn.functional.cross_entropy(
|
| 1206 |
+
logits[masked_indices],
|
| 1207 |
+
labels[masked_indices],
|
| 1208 |
+
reduction='none'
|
| 1209 |
+
) / p_mask[masked_indices]
|
| 1210 |
+
|
| 1211 |
+
# Average loss over masked tokens only
|
| 1212 |
+
loss = token_loss.sum() / masked_indices.sum()
|
| 1213 |
+
|
| 1214 |
+
# loss = None
|
| 1215 |
+
# if labels is not None:
|
| 1216 |
+
# loss = self.loss_function(logits=logits, labels=labels, vocab_size=self.config.vocab_size, **kwargs)
|
| 1217 |
+
|
| 1218 |
+
return CausalLMOutputWithPast(
|
| 1219 |
+
loss=loss,
|
| 1220 |
+
logits=logits,
|
| 1221 |
+
past_key_values=outputs.past_key_values,
|
| 1222 |
+
hidden_states=outputs.hidden_states,
|
| 1223 |
+
attentions=outputs.attentions,
|
| 1224 |
+
)
|
| 1225 |
+
|
| 1226 |
+
|
| 1227 |
+
__all__ = [
|
| 1228 |
+
"Qwen3ForCausalLM",
|
| 1229 |
+
"Qwen3ForQuestionAnswering",
|
| 1230 |
+
"Qwen3Model",
|
| 1231 |
+
"Qwen3PreTrainedModel",
|
| 1232 |
+
"Qwen3ForSequenceClassification",
|
| 1233 |
+
"Qwen3ForTokenClassification",
|
| 1234 |
+
"Qwen3DiffusionLM",
|
| 1235 |
+
]
|
special_tokens_map.json
ADDED
|
@@ -0,0 +1,31 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"additional_special_tokens": [
|
| 3 |
+
"<|im_start|>",
|
| 4 |
+
"<|im_end|>",
|
| 5 |
+
"<|object_ref_start|>",
|
| 6 |
+
"<|object_ref_end|>",
|
| 7 |
+
"<|box_start|>",
|
| 8 |
+
"<|box_end|>",
|
| 9 |
+
"<|quad_start|>",
|
| 10 |
+
"<|quad_end|>",
|
| 11 |
+
"<|vision_start|>",
|
| 12 |
+
"<|vision_end|>",
|
| 13 |
+
"<|vision_pad|>",
|
| 14 |
+
"<|image_pad|>",
|
| 15 |
+
"<|video_pad|>"
|
| 16 |
+
],
|
| 17 |
+
"eos_token": {
|
| 18 |
+
"content": "<|im_end|>",
|
| 19 |
+
"lstrip": false,
|
| 20 |
+
"normalized": false,
|
| 21 |
+
"rstrip": false,
|
| 22 |
+
"single_word": false
|
| 23 |
+
},
|
| 24 |
+
"pad_token": {
|
| 25 |
+
"content": "<|endoftext|>",
|
| 26 |
+
"lstrip": false,
|
| 27 |
+
"normalized": false,
|
| 28 |
+
"rstrip": false,
|
| 29 |
+
"single_word": false
|
| 30 |
+
}
|
| 31 |
+
}
|
tokenizer.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:aeb13307a71acd8fe81861d94ad54ab689df773318809eed3cbe794b4492dae4
|
| 3 |
+
size 11422654
|
tokenizer_config.json
ADDED
|
@@ -0,0 +1,239 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"add_bos_token": false,
|
| 3 |
+
"add_prefix_space": false,
|
| 4 |
+
"added_tokens_decoder": {
|
| 5 |
+
"151643": {
|
| 6 |
+
"content": "<|endoftext|>",
|
| 7 |
+
"lstrip": false,
|
| 8 |
+
"normalized": false,
|
| 9 |
+
"rstrip": false,
|
| 10 |
+
"single_word": false,
|
| 11 |
+
"special": true
|
| 12 |
+
},
|
| 13 |
+
"151644": {
|
| 14 |
+
"content": "<|im_start|>",
|
| 15 |
+
"lstrip": false,
|
| 16 |
+
"normalized": false,
|
| 17 |
+
"rstrip": false,
|
| 18 |
+
"single_word": false,
|
| 19 |
+
"special": true
|
| 20 |
+
},
|
| 21 |
+
"151645": {
|
| 22 |
+
"content": "<|im_end|>",
|
| 23 |
+
"lstrip": false,
|
| 24 |
+
"normalized": false,
|
| 25 |
+
"rstrip": false,
|
| 26 |
+
"single_word": false,
|
| 27 |
+
"special": true
|
| 28 |
+
},
|
| 29 |
+
"151646": {
|
| 30 |
+
"content": "<|object_ref_start|>",
|
| 31 |
+
"lstrip": false,
|
| 32 |
+
"normalized": false,
|
| 33 |
+
"rstrip": false,
|
| 34 |
+
"single_word": false,
|
| 35 |
+
"special": true
|
| 36 |
+
},
|
| 37 |
+
"151647": {
|
| 38 |
+
"content": "<|object_ref_end|>",
|
| 39 |
+
"lstrip": false,
|
| 40 |
+
"normalized": false,
|
| 41 |
+
"rstrip": false,
|
| 42 |
+
"single_word": false,
|
| 43 |
+
"special": true
|
| 44 |
+
},
|
| 45 |
+
"151648": {
|
| 46 |
+
"content": "<|box_start|>",
|
| 47 |
+
"lstrip": false,
|
| 48 |
+
"normalized": false,
|
| 49 |
+
"rstrip": false,
|
| 50 |
+
"single_word": false,
|
| 51 |
+
"special": true
|
| 52 |
+
},
|
| 53 |
+
"151649": {
|
| 54 |
+
"content": "<|box_end|>",
|
| 55 |
+
"lstrip": false,
|
| 56 |
+
"normalized": false,
|
| 57 |
+
"rstrip": false,
|
| 58 |
+
"single_word": false,
|
| 59 |
+
"special": true
|
| 60 |
+
},
|
| 61 |
+
"151650": {
|
| 62 |
+
"content": "<|quad_start|>",
|
| 63 |
+
"lstrip": false,
|
| 64 |
+
"normalized": false,
|
| 65 |
+
"rstrip": false,
|
| 66 |
+
"single_word": false,
|
| 67 |
+
"special": true
|
| 68 |
+
},
|
| 69 |
+
"151651": {
|
| 70 |
+
"content": "<|quad_end|>",
|
| 71 |
+
"lstrip": false,
|
| 72 |
+
"normalized": false,
|
| 73 |
+
"rstrip": false,
|
| 74 |
+
"single_word": false,
|
| 75 |
+
"special": true
|
| 76 |
+
},
|
| 77 |
+
"151652": {
|
| 78 |
+
"content": "<|vision_start|>",
|
| 79 |
+
"lstrip": false,
|
| 80 |
+
"normalized": false,
|
| 81 |
+
"rstrip": false,
|
| 82 |
+
"single_word": false,
|
| 83 |
+
"special": true
|
| 84 |
+
},
|
| 85 |
+
"151653": {
|
| 86 |
+
"content": "<|vision_end|>",
|
| 87 |
+
"lstrip": false,
|
| 88 |
+
"normalized": false,
|
| 89 |
+
"rstrip": false,
|
| 90 |
+
"single_word": false,
|
| 91 |
+
"special": true
|
| 92 |
+
},
|
| 93 |
+
"151654": {
|
| 94 |
+
"content": "<|vision_pad|>",
|
| 95 |
+
"lstrip": false,
|
| 96 |
+
"normalized": false,
|
| 97 |
+
"rstrip": false,
|
| 98 |
+
"single_word": false,
|
| 99 |
+
"special": true
|
| 100 |
+
},
|
| 101 |
+
"151655": {
|
| 102 |
+
"content": "<|image_pad|>",
|
| 103 |
+
"lstrip": false,
|
| 104 |
+
"normalized": false,
|
| 105 |
+
"rstrip": false,
|
| 106 |
+
"single_word": false,
|
| 107 |
+
"special": true
|
| 108 |
+
},
|
| 109 |
+
"151656": {
|
| 110 |
+
"content": "<|video_pad|>",
|
| 111 |
+
"lstrip": false,
|
| 112 |
+
"normalized": false,
|
| 113 |
+
"rstrip": false,
|
| 114 |
+
"single_word": false,
|
| 115 |
+
"special": true
|
| 116 |
+
},
|
| 117 |
+
"151657": {
|
| 118 |
+
"content": "<tool_call>",
|
| 119 |
+
"lstrip": false,
|
| 120 |
+
"normalized": false,
|
| 121 |
+
"rstrip": false,
|
| 122 |
+
"single_word": false,
|
| 123 |
+
"special": false
|
| 124 |
+
},
|
| 125 |
+
"151658": {
|
| 126 |
+
"content": "</tool_call>",
|
| 127 |
+
"lstrip": false,
|
| 128 |
+
"normalized": false,
|
| 129 |
+
"rstrip": false,
|
| 130 |
+
"single_word": false,
|
| 131 |
+
"special": false
|
| 132 |
+
},
|
| 133 |
+
"151659": {
|
| 134 |
+
"content": "<|fim_prefix|>",
|
| 135 |
+
"lstrip": false,
|
| 136 |
+
"normalized": false,
|
| 137 |
+
"rstrip": false,
|
| 138 |
+
"single_word": false,
|
| 139 |
+
"special": false
|
| 140 |
+
},
|
| 141 |
+
"151660": {
|
| 142 |
+
"content": "<|fim_middle|>",
|
| 143 |
+
"lstrip": false,
|
| 144 |
+
"normalized": false,
|
| 145 |
+
"rstrip": false,
|
| 146 |
+
"single_word": false,
|
| 147 |
+
"special": false
|
| 148 |
+
},
|
| 149 |
+
"151661": {
|
| 150 |
+
"content": "<|fim_suffix|>",
|
| 151 |
+
"lstrip": false,
|
| 152 |
+
"normalized": false,
|
| 153 |
+
"rstrip": false,
|
| 154 |
+
"single_word": false,
|
| 155 |
+
"special": false
|
| 156 |
+
},
|
| 157 |
+
"151662": {
|
| 158 |
+
"content": "<|fim_pad|>",
|
| 159 |
+
"lstrip": false,
|
| 160 |
+
"normalized": false,
|
| 161 |
+
"rstrip": false,
|
| 162 |
+
"single_word": false,
|
| 163 |
+
"special": false
|
| 164 |
+
},
|
| 165 |
+
"151663": {
|
| 166 |
+
"content": "<|repo_name|>",
|
| 167 |
+
"lstrip": false,
|
| 168 |
+
"normalized": false,
|
| 169 |
+
"rstrip": false,
|
| 170 |
+
"single_word": false,
|
| 171 |
+
"special": false
|
| 172 |
+
},
|
| 173 |
+
"151664": {
|
| 174 |
+
"content": "<|file_sep|>",
|
| 175 |
+
"lstrip": false,
|
| 176 |
+
"normalized": false,
|
| 177 |
+
"rstrip": false,
|
| 178 |
+
"single_word": false,
|
| 179 |
+
"special": false
|
| 180 |
+
},
|
| 181 |
+
"151665": {
|
| 182 |
+
"content": "<tool_response>",
|
| 183 |
+
"lstrip": false,
|
| 184 |
+
"normalized": false,
|
| 185 |
+
"rstrip": false,
|
| 186 |
+
"single_word": false,
|
| 187 |
+
"special": false
|
| 188 |
+
},
|
| 189 |
+
"151666": {
|
| 190 |
+
"content": "</tool_response>",
|
| 191 |
+
"lstrip": false,
|
| 192 |
+
"normalized": false,
|
| 193 |
+
"rstrip": false,
|
| 194 |
+
"single_word": false,
|
| 195 |
+
"special": false
|
| 196 |
+
},
|
| 197 |
+
"151667": {
|
| 198 |
+
"content": "<think>",
|
| 199 |
+
"lstrip": false,
|
| 200 |
+
"normalized": false,
|
| 201 |
+
"rstrip": false,
|
| 202 |
+
"single_word": false,
|
| 203 |
+
"special": false
|
| 204 |
+
},
|
| 205 |
+
"151668": {
|
| 206 |
+
"content": "</think>",
|
| 207 |
+
"lstrip": false,
|
| 208 |
+
"normalized": false,
|
| 209 |
+
"rstrip": false,
|
| 210 |
+
"single_word": false,
|
| 211 |
+
"special": false
|
| 212 |
+
}
|
| 213 |
+
},
|
| 214 |
+
"additional_special_tokens": [
|
| 215 |
+
"<|im_start|>",
|
| 216 |
+
"<|im_end|>",
|
| 217 |
+
"<|object_ref_start|>",
|
| 218 |
+
"<|object_ref_end|>",
|
| 219 |
+
"<|box_start|>",
|
| 220 |
+
"<|box_end|>",
|
| 221 |
+
"<|quad_start|>",
|
| 222 |
+
"<|quad_end|>",
|
| 223 |
+
"<|vision_start|>",
|
| 224 |
+
"<|vision_end|>",
|
| 225 |
+
"<|vision_pad|>",
|
| 226 |
+
"<|image_pad|>",
|
| 227 |
+
"<|video_pad|>"
|
| 228 |
+
],
|
| 229 |
+
"bos_token": null,
|
| 230 |
+
"clean_up_tokenization_spaces": false,
|
| 231 |
+
"eos_token": "<|im_end|>",
|
| 232 |
+
"errors": "replace",
|
| 233 |
+
"extra_special_tokens": {},
|
| 234 |
+
"model_max_length": 131072,
|
| 235 |
+
"pad_token": "<|endoftext|>",
|
| 236 |
+
"split_special_tokens": false,
|
| 237 |
+
"tokenizer_class": "Qwen2Tokenizer",
|
| 238 |
+
"unk_token": null
|
| 239 |
+
}
|
vocab.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|