drawais commited on
Commit
1663034
·
verified ·
1 Parent(s): a0cb6f3

Initial upload of DeepSeek-R1-Distill-Qwen-32B-NVFP4

Browse files
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ tokenizer.json filter=lfs diff=lfs merge=lfs -text
LICENSE ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ MIT License
2
+
3
+ Copyright (c) 2024 DeepSeek
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.
NOTICE ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ NOTICE
2
+
3
+ This artifact is a derivative work of deepseek-ai/DeepSeek-R1-Distill-Qwen-32B, distributed under the MIT License.
4
+ The full license text is in the LICENSE file at the root of this repository.
5
+
6
+ Source model: https://huggingface.co/deepseek-ai/DeepSeek-R1-Distill-Qwen-32B
README.md ADDED
@@ -0,0 +1,57 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: mit
3
+ license_link: https://opensource.org/license/mit
4
+ base_model: deepseek-ai/DeepSeek-R1-Distill-Qwen-32B
5
+ tags:
6
+ - quantized
7
+ - 4-bit
8
+ - int4
9
+ - awq
10
+ language:
11
+ - en
12
+ library_name: transformers
13
+ pipeline_tag: text-generation
14
+ ---
15
+
16
+ # DeepSeek-R1-Distill-Qwen-32B-NVFP4
17
+
18
+ INT4 weight-only quantization of [`deepseek-ai/DeepSeek-R1-Distill-Qwen-32B`](https://huggingface.co/deepseek-ai/DeepSeek-R1-Distill-Qwen-32B).
19
+
20
+ DeepSeek-R1-Distill-Qwen-32B in NVFP4 W4A4. Native vLLM compressed-tensors. About 17 GB on disk.
21
+
22
+ | Property | Value |
23
+ |---|---|
24
+ | Base model | [deepseek-ai/DeepSeek-R1-Distill-Qwen-32B](https://huggingface.co/deepseek-ai/DeepSeek-R1-Distill-Qwen-32B) |
25
+ | Quantization | INT4 weight-only |
26
+ | Approx. on-disk size | ~20.7 GB |
27
+ | License | MIT License |
28
+ | Languages | English |
29
+
30
+ ## Load (vLLM)
31
+
32
+ ```bash
33
+ vllm serve drawais/DeepSeek-R1-Distill-Qwen-32B-NVFP4 \
34
+ --max-model-len 32768 \
35
+ --gpu-memory-utilization 0.94
36
+ ```
37
+
38
+ ```python
39
+ from vllm import LLM, SamplingParams
40
+ llm = LLM(model="drawais/DeepSeek-R1-Distill-Qwen-32B-NVFP4", max_model_len=32768)
41
+ print(llm.generate(["Hello!"], SamplingParams(max_tokens=128))[0].outputs[0].text)
42
+ ```
43
+
44
+ ## Footprint
45
+
46
+ ~20.7 GB on disk. Recommended VRAM: enough headroom for KV cache.
47
+
48
+ ## License & attribution
49
+
50
+ This artifact is a derivative work of [`deepseek-ai/DeepSeek-R1-Distill-Qwen-32B`](https://huggingface.co/deepseek-ai/DeepSeek-R1-Distill-Qwen-32B),
51
+ released by its original authors under the **MIT License**.
52
+
53
+ This artifact is distributed under the same license. The full license text is
54
+ included in [`LICENSE`](LICENSE), and required attribution is in [`NOTICE`](NOTICE).
55
+
56
+ License text: https://opensource.org/license/mit
57
+ Source model: https://huggingface.co/deepseek-ai/DeepSeek-R1-Distill-Qwen-32B
chat_template.jinja ADDED
@@ -0,0 +1 @@
 
 
1
+ {% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% set ns = namespace(is_first=false, is_tool=false, is_output_first=true, system_prompt='') %}{%- for message in messages %}{%- if message['role'] == 'system' %}{% set ns.system_prompt = message['content'] %}{%- endif %}{%- endfor %}{{bos_token}}{{ns.system_prompt}}{%- for message in messages %}{%- if message['role'] == 'user' %}{%- set ns.is_tool = false -%}{{'<|User|>' + message['content']}}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is none %}{%- set ns.is_tool = false -%}{%- for tool in message['tool_calls']%}{%- if not ns.is_first %}{{'<|Assistant|><|tool▁calls▁begin|><|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\n' + '```json' + '\n' + tool['function']['arguments'] + '\n' + '```' + '<|tool▁call▁end|>'}}{%- set ns.is_first = true -%}{%- else %}{{'\n' + '<|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\n' + '```json' + '\n' + tool['function']['arguments'] + '\n' + '```' + '<|tool▁call▁end|>'}}{{'<|tool▁calls▁end|><|end▁of▁sentence|>'}}{%- endif %}{%- endfor %}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is not none %}{%- if ns.is_tool %}{{'<|tool▁outputs▁end|>' + message['content'] + '<|end▁of▁sentence|>'}}{%- set ns.is_tool = false -%}{%- else %}{% set content = message['content'] %}{% if '</think>' in content %}{% set content = content.split('</think>')[-1] %}{% endif %}{{'<|Assistant|>' + content + '<|end▁of▁sentence|>'}}{%- endif %}{%- endif %}{%- if message['role'] == 'tool' %}{%- set ns.is_tool = true -%}{%- if ns.is_output_first %}{{'<|tool▁outputs▁begin|><|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- set ns.is_output_first = false %}{%- else %}{{'\n<|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- endif %}{%- endif %}{%- endfor -%}{% if ns.is_tool %}{{'<|tool▁outputs▁end|>'}}{% endif %}{% if add_generation_prompt and not ns.is_tool %}{{'<|Assistant|><think>\n'}}{% endif %}
config.json ADDED
@@ -0,0 +1,147 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "Qwen2ForCausalLM"
4
+ ],
5
+ "attention_dropout": 0.0,
6
+ "bos_token_id": 151643,
7
+ "dtype": "bfloat16",
8
+ "eos_token_id": 151643,
9
+ "hidden_act": "silu",
10
+ "hidden_size": 5120,
11
+ "initializer_range": 0.02,
12
+ "intermediate_size": 27648,
13
+ "layer_types": [
14
+ "full_attention",
15
+ "full_attention",
16
+ "full_attention",
17
+ "full_attention",
18
+ "full_attention",
19
+ "full_attention",
20
+ "full_attention",
21
+ "full_attention",
22
+ "full_attention",
23
+ "full_attention",
24
+ "full_attention",
25
+ "full_attention",
26
+ "full_attention",
27
+ "full_attention",
28
+ "full_attention",
29
+ "full_attention",
30
+ "full_attention",
31
+ "full_attention",
32
+ "full_attention",
33
+ "full_attention",
34
+ "full_attention",
35
+ "full_attention",
36
+ "full_attention",
37
+ "full_attention",
38
+ "full_attention",
39
+ "full_attention",
40
+ "full_attention",
41
+ "full_attention",
42
+ "full_attention",
43
+ "full_attention",
44
+ "full_attention",
45
+ "full_attention",
46
+ "full_attention",
47
+ "full_attention",
48
+ "full_attention",
49
+ "full_attention",
50
+ "full_attention",
51
+ "full_attention",
52
+ "full_attention",
53
+ "full_attention",
54
+ "full_attention",
55
+ "full_attention",
56
+ "full_attention",
57
+ "full_attention",
58
+ "full_attention",
59
+ "full_attention",
60
+ "full_attention",
61
+ "full_attention",
62
+ "full_attention",
63
+ "full_attention",
64
+ "full_attention",
65
+ "full_attention",
66
+ "full_attention",
67
+ "full_attention",
68
+ "full_attention",
69
+ "full_attention",
70
+ "full_attention",
71
+ "full_attention",
72
+ "full_attention",
73
+ "full_attention",
74
+ "full_attention",
75
+ "full_attention",
76
+ "full_attention",
77
+ "full_attention"
78
+ ],
79
+ "max_position_embeddings": 131072,
80
+ "max_window_layers": 64,
81
+ "model_type": "qwen2",
82
+ "num_attention_heads": 40,
83
+ "num_hidden_layers": 64,
84
+ "num_key_value_heads": 8,
85
+ "pad_token_id": null,
86
+ "quantization_config": {
87
+ "config_groups": {
88
+ "group_0": {
89
+ "format": "nvfp4-pack-quantized",
90
+ "input_activations": {
91
+ "actorder": null,
92
+ "block_structure": null,
93
+ "dynamic": "local",
94
+ "group_size": 16,
95
+ "num_bits": 4,
96
+ "observer": "static_minmax",
97
+ "observer_kwargs": {},
98
+ "scale_dtype": "torch.float8_e4m3fn",
99
+ "strategy": "tensor_group",
100
+ "symmetric": true,
101
+ "type": "float",
102
+ "zp_dtype": null
103
+ },
104
+ "output_activations": null,
105
+ "targets": [
106
+ "Linear"
107
+ ],
108
+ "weights": {
109
+ "actorder": null,
110
+ "block_structure": null,
111
+ "dynamic": false,
112
+ "group_size": 16,
113
+ "num_bits": 4,
114
+ "observer": "memoryless_minmax",
115
+ "observer_kwargs": {},
116
+ "scale_dtype": "torch.float8_e4m3fn",
117
+ "strategy": "tensor_group",
118
+ "symmetric": true,
119
+ "type": "float",
120
+ "zp_dtype": null
121
+ }
122
+ }
123
+ },
124
+ "format": "nvfp4-pack-quantized",
125
+ "global_compression_ratio": null,
126
+ "ignore": [
127
+ "lm_head"
128
+ ],
129
+ "kv_cache_scheme": null,
130
+ "quant_method": "compressed-tensors",
131
+ "quantization_status": "compressed",
132
+ "sparsity_config": {},
133
+ "transform_config": {},
134
+ "version": "0.15.1.a20260428"
135
+ },
136
+ "rms_norm_eps": 1e-05,
137
+ "rope_parameters": {
138
+ "rope_theta": 1000000.0,
139
+ "rope_type": "default"
140
+ },
141
+ "sliding_window": null,
142
+ "tie_word_embeddings": false,
143
+ "transformers_version": "5.8.0.dev0",
144
+ "use_cache": true,
145
+ "use_sliding_window": false,
146
+ "vocab_size": 152064
147
+ }
generation_config.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "bos_token_id": 151646,
4
+ "do_sample": true,
5
+ "eos_token_id": 151643,
6
+ "temperature": 0.6,
7
+ "top_p": 0.95,
8
+ "transformers_version": "5.8.0.dev0"
9
+ }
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1562c55fbd2faaaaa08b8d76f5434cec91b259498b8d7f01cf21e423402face3
3
+ size 20669918528
recipe.yaml ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ default_stage:
2
+ default_modifiers:
3
+ QuantizationModifier:
4
+ targets: [Linear]
5
+ ignore: [lm_head, 're:.*embed.*', 're:.*router.*']
6
+ scheme: NVFP4
7
+ bypass_divisibility_checks: false
tokenizer.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:28a58d07e59d765b325afb1027588b403441a5f534726372918a5f3cfcdf7e87
3
+ size 11421883
tokenizer_config.json ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_prefix_space": null,
3
+ "backend": "tokenizers",
4
+ "bos_token": "<|begin▁of▁sentence|>",
5
+ "clean_up_tokenization_spaces": false,
6
+ "eos_token": "<|end▁of▁sentence|>",
7
+ "is_local": false,
8
+ "local_files_only": false,
9
+ "model_max_length": 16384,
10
+ "pad_token": "<|end▁of▁sentence|>",
11
+ "sp_model_kwargs": {},
12
+ "tokenizer_class": "LlamaTokenizer",
13
+ "unk_token": null,
14
+ "use_default_system_prompt": false
15
+ }