drawais commited on
Commit
334e2e5
·
verified ·
1 Parent(s): 7893f39

Initial upload of Qwen3-Embedding-4B-AWQ-INT4

Browse files
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ tokenizer.json filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,53 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: apache-2.0
3
+
4
+ base_model: Qwen/Qwen3-Embedding-4B
5
+ tags:
6
+ - quantized
7
+ - 4-bit
8
+ - int4
9
+ - awq
10
+ language:
11
+ - en
12
+ library_name: transformers
13
+ pipeline_tag: text-generation
14
+ ---
15
+
16
+ # Qwen3-Embedding-4B-AWQ-INT4
17
+
18
+ INT4 weight-only quantization of [`Qwen/Qwen3-Embedding-4B`](https://huggingface.co/Qwen/Qwen3-Embedding-4B).
19
+
20
+ Qwen 3 Embedding 4B in INT4. Drop-in for any embedding stack. Fits on a 6 GB consumer GPU.
21
+
22
+ | Property | Value |
23
+ |---|---|
24
+ | Base model | [Qwen/Qwen3-Embedding-4B](https://huggingface.co/Qwen/Qwen3-Embedding-4B) |
25
+ | Quantization | INT4 weight-only |
26
+ | Approx. on-disk size | ~2.7 GB |
27
+ | Languages | English |
28
+
29
+ ## Load (vLLM)
30
+
31
+ ```bash
32
+ vllm serve drawais/Qwen3-Embedding-4B-AWQ-INT4 \
33
+ --max-model-len 32768 \
34
+ --gpu-memory-utilization 0.94
35
+ ```
36
+
37
+ ```python
38
+ from vllm import LLM, SamplingParams
39
+ llm = LLM(model="drawais/Qwen3-Embedding-4B-AWQ-INT4", max_model_len=32768)
40
+ print(llm.generate(["Hello!"], SamplingParams(max_tokens=128))[0].outputs[0].text)
41
+ ```
42
+
43
+ ## Footprint
44
+
45
+ ~2.7 GB on disk. Recommended VRAM: enough headroom for KV cache.
46
+
47
+ ## License
48
+
49
+ This artifact follows the license of the base model: **apache-2.0**. See [`LICENSE`](LICENSE) for terms.
50
+
51
+ ## Acknowledgements
52
+
53
+ Source model © its respective authors.
chat_template.jinja ADDED
@@ -0,0 +1,54 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {%- if tools %}
2
+ {{- '<|im_start|>system\n' }}
3
+ {%- if messages[0]['role'] == 'system' %}
4
+ {{- messages[0]['content'] }}
5
+ {%- else %}
6
+ {{- 'You are a helpful assistant.' }}
7
+ {%- endif %}
8
+ {{- "\n\n# Tools\n\nYou may call one or more functions to assist with the user query.\n\nYou are provided with function signatures within <tools></tools> XML tags:\n<tools>" }}
9
+ {%- for tool in tools %}
10
+ {{- "\n" }}
11
+ {{- tool | tojson }}
12
+ {%- endfor %}
13
+ {{- "\n</tools>\n\nFor each function call, return a json object with function name and arguments within <tool_call></tool_call> XML tags:\n<tool_call>\n{\"name\": <function-name>, \"arguments\": <args-json-object>}\n</tool_call><|im_end|>\n" }}
14
+ {%- else %}
15
+ {%- if messages[0]['role'] == 'system' %}
16
+ {{- '<|im_start|>system\n' + messages[0]['content'] + '<|im_end|>\n' }}
17
+ {%- else %}
18
+ {{- '<|im_start|>system\nYou are a helpful assistant.<|im_end|>\n' }}
19
+ {%- endif %}
20
+ {%- endif %}
21
+ {%- for message in messages %}
22
+ {%- if (message.role == "user") or (message.role == "system" and not loop.first) or (message.role == "assistant" and not message.tool_calls) %}
23
+ {{- '<|im_start|>' + message.role + '\n' + message.content + '<|im_end|>' + '\n' }}
24
+ {%- elif message.role == "assistant" %}
25
+ {{- '<|im_start|>' + message.role }}
26
+ {%- if message.content %}
27
+ {{- '\n' + message.content }}
28
+ {%- endif %}
29
+ {%- for tool_call in message.tool_calls %}
30
+ {%- if tool_call.function is defined %}
31
+ {%- set tool_call = tool_call.function %}
32
+ {%- endif %}
33
+ {{- '\n<tool_call>\n{"name": "' }}
34
+ {{- tool_call.name }}
35
+ {{- '", "arguments": ' }}
36
+ {{- tool_call.arguments | tojson }}
37
+ {{- '}\n</tool_call>' }}
38
+ {%- endfor %}
39
+ {{- '<|im_end|>\n' }}
40
+ {%- elif message.role == "tool" %}
41
+ {%- if (loop.index0 == 0) or (messages[loop.index0 - 1].role != "tool") %}
42
+ {{- '<|im_start|>user' }}
43
+ {%- endif %}
44
+ {{- '\n<tool_response>\n' }}
45
+ {{- message.content }}
46
+ {{- '\n</tool_response>' }}
47
+ {%- if loop.last or (messages[loop.index0 + 1].role != "tool") %}
48
+ {{- '<|im_end|>\n' }}
49
+ {%- endif %}
50
+ {%- endif %}
51
+ {%- endfor %}
52
+ {%- if add_generation_prompt %}
53
+ {{- '<|im_start|>assistant\n' }}
54
+ {%- endif %}
config.json ADDED
@@ -0,0 +1,108 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "Qwen3ForCausalLM"
4
+ ],
5
+ "attention_bias": false,
6
+ "attention_dropout": 0.0,
7
+ "bos_token_id": 151643,
8
+ "dtype": "bfloat16",
9
+ "eos_token_id": 151645,
10
+ "head_dim": 128,
11
+ "hidden_act": "silu",
12
+ "hidden_size": 2560,
13
+ "initializer_range": 0.02,
14
+ "intermediate_size": 9728,
15
+ "layer_types": [
16
+ "full_attention",
17
+ "full_attention",
18
+ "full_attention",
19
+ "full_attention",
20
+ "full_attention",
21
+ "full_attention",
22
+ "full_attention",
23
+ "full_attention",
24
+ "full_attention",
25
+ "full_attention",
26
+ "full_attention",
27
+ "full_attention",
28
+ "full_attention",
29
+ "full_attention",
30
+ "full_attention",
31
+ "full_attention",
32
+ "full_attention",
33
+ "full_attention",
34
+ "full_attention",
35
+ "full_attention",
36
+ "full_attention",
37
+ "full_attention",
38
+ "full_attention",
39
+ "full_attention",
40
+ "full_attention",
41
+ "full_attention",
42
+ "full_attention",
43
+ "full_attention",
44
+ "full_attention",
45
+ "full_attention",
46
+ "full_attention",
47
+ "full_attention",
48
+ "full_attention",
49
+ "full_attention",
50
+ "full_attention",
51
+ "full_attention"
52
+ ],
53
+ "max_position_embeddings": 40960,
54
+ "max_window_layers": 36,
55
+ "model_type": "qwen3",
56
+ "num_attention_heads": 32,
57
+ "num_hidden_layers": 36,
58
+ "num_key_value_heads": 8,
59
+ "pad_token_id": null,
60
+ "quantization_config": {
61
+ "config_groups": {
62
+ "group_0": {
63
+ "format": "pack-quantized",
64
+ "input_activations": null,
65
+ "output_activations": null,
66
+ "targets": [
67
+ "Linear"
68
+ ],
69
+ "weights": {
70
+ "actorder": null,
71
+ "block_structure": null,
72
+ "dynamic": false,
73
+ "group_size": 128,
74
+ "num_bits": 4,
75
+ "observer": "memoryless_minmax",
76
+ "observer_kwargs": {},
77
+ "scale_dtype": null,
78
+ "strategy": "group",
79
+ "symmetric": true,
80
+ "type": "int",
81
+ "zp_dtype": null
82
+ }
83
+ }
84
+ },
85
+ "format": "pack-quantized",
86
+ "global_compression_ratio": null,
87
+ "ignore": [
88
+ "lm_head"
89
+ ],
90
+ "kv_cache_scheme": null,
91
+ "quant_method": "compressed-tensors",
92
+ "quantization_status": "compressed",
93
+ "sparsity_config": {},
94
+ "transform_config": {},
95
+ "version": "0.15.1.a20260428"
96
+ },
97
+ "rms_norm_eps": 1e-06,
98
+ "rope_parameters": {
99
+ "rope_theta": 1000000,
100
+ "rope_type": "default"
101
+ },
102
+ "sliding_window": null,
103
+ "tie_word_embeddings": true,
104
+ "transformers_version": "5.8.0.dev0",
105
+ "use_cache": true,
106
+ "use_sliding_window": false,
107
+ "vocab_size": 151665
108
+ }
generation_config.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token_id": 151643,
3
+ "eos_token_id": 151643,
4
+ "max_new_tokens": 2048,
5
+ "transformers_version": "5.8.0.dev0"
6
+ }
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2d621114fcdefdb7f3ad088dea7223f435d20323665a35b8423c83f48509782f
3
+ size 2650452048
recipe.yaml ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ default_stage:
2
+ default_modifiers:
3
+ AWQModifier:
4
+ mappings:
5
+ - smooth_layer: re:.*input_layernorm$
6
+ balance_layers: ['re:.*q_proj$', 're:.*k_proj$', 're:.*v_proj$']
7
+ activation_hook_target: null
8
+ - smooth_layer: re:.*v_proj$
9
+ balance_layers: ['re:.*o_proj$']
10
+ activation_hook_target: null
11
+ - smooth_layer: re:.*post_attention_layernorm$
12
+ balance_layers: ['re:.*gate_proj$', 're:.*up_proj$']
13
+ activation_hook_target: null
14
+ - smooth_layer: re:.*up_proj$
15
+ balance_layers: ['re:.*down_proj$']
16
+ activation_hook_target: null
17
+ duo_scaling: true
18
+ n_grid: 20
19
+ QuantizationModifier:
20
+ targets: [Linear]
21
+ ignore: [lm_head, 're:.*embed.*', 're:.*router.*', 're:.*\.gate$']
22
+ scheme: W4A16
23
+ bypass_divisibility_checks: false
tokenizer.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ae3f2376d5fafe6691e99ba7ba64f50b091328710febe6a70d698adade8ba3b1
3
+ size 11423042
tokenizer_config.json ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_prefix_space": false,
3
+ "backend": "tokenizers",
4
+ "bos_token": null,
5
+ "clean_up_tokenization_spaces": false,
6
+ "eos_token": "<|im_end|>",
7
+ "errors": "replace",
8
+ "is_local": false,
9
+ "local_files_only": false,
10
+ "model_max_length": 131072,
11
+ "pad_token": "<|endoftext|>",
12
+ "split_special_tokens": false,
13
+ "tokenizer_class": "Qwen2Tokenizer",
14
+ "unk_token": null
15
+ }