drawais commited on
Commit
eb57d38
·
verified ·
1 Parent(s): f130bad

Initial upload of Qwen3-Reranker-4B-AWQ-INT4

Browse files
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ tokenizer.json filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,53 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: apache-2.0
3
+
4
+ base_model: Qwen/Qwen3-Reranker-4B
5
+ tags:
6
+ - quantized
7
+ - 4-bit
8
+ - int4
9
+ - awq
10
+ language:
11
+ - en
12
+ library_name: transformers
13
+ pipeline_tag: text-generation
14
+ ---
15
+
16
+ # Qwen3-Reranker-4B-AWQ-INT4
17
+
18
+ INT4 weight-only quantization of [`Qwen/Qwen3-Reranker-4B`](https://huggingface.co/Qwen/Qwen3-Reranker-4B).
19
+
20
+ Qwen 3 Reranker 4B in INT4. Pair with the Embedding kit for a fully local retrieval stack.
21
+
22
+ | Property | Value |
23
+ |---|---|
24
+ | Base model | [Qwen/Qwen3-Reranker-4B](https://huggingface.co/Qwen/Qwen3-Reranker-4B) |
25
+ | Quantization | INT4 weight-only |
26
+ | Approx. on-disk size | ~2.7 GB |
27
+ | Languages | English |
28
+
29
+ ## Load (vLLM)
30
+
31
+ ```bash
32
+ vllm serve drawais/Qwen3-Reranker-4B-AWQ-INT4 \
33
+ --max-model-len 32768 \
34
+ --gpu-memory-utilization 0.94
35
+ ```
36
+
37
+ ```python
38
+ from vllm import LLM, SamplingParams
39
+ llm = LLM(model="drawais/Qwen3-Reranker-4B-AWQ-INT4", max_model_len=32768)
40
+ print(llm.generate(["Hello!"], SamplingParams(max_tokens=128))[0].outputs[0].text)
41
+ ```
42
+
43
+ ## Footprint
44
+
45
+ ~2.7 GB on disk. Recommended VRAM: enough headroom for KV cache.
46
+
47
+ ## License
48
+
49
+ This artifact follows the license of the base model: **apache-2.0**. See [`LICENSE`](LICENSE) for terms.
50
+
51
+ ## Acknowledgements
52
+
53
+ Source model © its respective authors.
chat_template.jinja ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {%- set instruction = messages | selectattr("role", "eq", "system") | map(attribute="content") | first | default("Given a web search query, retrieve relevant passages that answer the query") -%}
2
+ {%- set query_text = messages | selectattr("role", "eq", "query") | map(attribute="content") | first -%}
3
+ {%- set document_text = messages | selectattr("role", "eq", "document") | map(attribute="content") | first -%}
4
+ <|im_start|>system
5
+ Judge whether the Document meets the requirements based on the Query and the Instruct provided. Note that the answer can only be "yes" or "no".<|im_end|>
6
+ <|im_start|>user
7
+ <Instruct>: {{ instruction }}
8
+ <Query>: {{ query_text }}
9
+ <Document>: {{ document_text }}<|im_end|>
10
+ <|im_start|>assistant
11
+ <think>
12
+
13
+ </think>
14
+
15
+
config.json ADDED
@@ -0,0 +1,108 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "Qwen3ForCausalLM"
4
+ ],
5
+ "attention_bias": false,
6
+ "attention_dropout": 0.0,
7
+ "bos_token_id": 151643,
8
+ "dtype": "bfloat16",
9
+ "eos_token_id": 151645,
10
+ "head_dim": 128,
11
+ "hidden_act": "silu",
12
+ "hidden_size": 2560,
13
+ "initializer_range": 0.02,
14
+ "intermediate_size": 9728,
15
+ "layer_types": [
16
+ "full_attention",
17
+ "full_attention",
18
+ "full_attention",
19
+ "full_attention",
20
+ "full_attention",
21
+ "full_attention",
22
+ "full_attention",
23
+ "full_attention",
24
+ "full_attention",
25
+ "full_attention",
26
+ "full_attention",
27
+ "full_attention",
28
+ "full_attention",
29
+ "full_attention",
30
+ "full_attention",
31
+ "full_attention",
32
+ "full_attention",
33
+ "full_attention",
34
+ "full_attention",
35
+ "full_attention",
36
+ "full_attention",
37
+ "full_attention",
38
+ "full_attention",
39
+ "full_attention",
40
+ "full_attention",
41
+ "full_attention",
42
+ "full_attention",
43
+ "full_attention",
44
+ "full_attention",
45
+ "full_attention",
46
+ "full_attention",
47
+ "full_attention",
48
+ "full_attention",
49
+ "full_attention",
50
+ "full_attention",
51
+ "full_attention"
52
+ ],
53
+ "max_position_embeddings": 40960,
54
+ "max_window_layers": 36,
55
+ "model_type": "qwen3",
56
+ "num_attention_heads": 32,
57
+ "num_hidden_layers": 36,
58
+ "num_key_value_heads": 8,
59
+ "pad_token_id": null,
60
+ "quantization_config": {
61
+ "config_groups": {
62
+ "group_0": {
63
+ "format": "pack-quantized",
64
+ "input_activations": null,
65
+ "output_activations": null,
66
+ "targets": [
67
+ "Linear"
68
+ ],
69
+ "weights": {
70
+ "actorder": null,
71
+ "block_structure": null,
72
+ "dynamic": false,
73
+ "group_size": 128,
74
+ "num_bits": 4,
75
+ "observer": "memoryless_minmax",
76
+ "observer_kwargs": {},
77
+ "scale_dtype": null,
78
+ "strategy": "group",
79
+ "symmetric": true,
80
+ "type": "int",
81
+ "zp_dtype": null
82
+ }
83
+ }
84
+ },
85
+ "format": "pack-quantized",
86
+ "global_compression_ratio": null,
87
+ "ignore": [
88
+ "lm_head"
89
+ ],
90
+ "kv_cache_scheme": null,
91
+ "quant_method": "compressed-tensors",
92
+ "quantization_status": "compressed",
93
+ "sparsity_config": {},
94
+ "transform_config": {},
95
+ "version": "0.15.1.a20260428"
96
+ },
97
+ "rms_norm_eps": 1e-06,
98
+ "rope_parameters": {
99
+ "rope_theta": 1000000,
100
+ "rope_type": "default"
101
+ },
102
+ "sliding_window": null,
103
+ "tie_word_embeddings": true,
104
+ "transformers_version": "5.8.0.dev0",
105
+ "use_cache": true,
106
+ "use_sliding_window": false,
107
+ "vocab_size": 151669
108
+ }
generation_config.json ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token_id": 151643,
3
+ "do_sample": true,
4
+ "eos_token_id": [
5
+ 151645,
6
+ 151643
7
+ ],
8
+ "pad_token_id": 151643,
9
+ "temperature": 0.6,
10
+ "top_k": 20,
11
+ "top_p": 0.95,
12
+ "transformers_version": "5.8.0.dev0"
13
+ }
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e221de455aa1a946f7e0c7a457913973ddf3df921aa74750633d82fb74a255e0
3
+ size 2650472528
recipe.yaml ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ default_stage:
2
+ default_modifiers:
3
+ AWQModifier:
4
+ mappings:
5
+ - smooth_layer: re:.*input_layernorm$
6
+ balance_layers: ['re:.*q_proj$', 're:.*k_proj$', 're:.*v_proj$']
7
+ activation_hook_target: null
8
+ - smooth_layer: re:.*v_proj$
9
+ balance_layers: ['re:.*o_proj$']
10
+ activation_hook_target: null
11
+ - smooth_layer: re:.*post_attention_layernorm$
12
+ balance_layers: ['re:.*gate_proj$', 're:.*up_proj$']
13
+ activation_hook_target: null
14
+ - smooth_layer: re:.*up_proj$
15
+ balance_layers: ['re:.*down_proj$']
16
+ activation_hook_target: null
17
+ duo_scaling: true
18
+ n_grid: 20
19
+ QuantizationModifier:
20
+ targets: [Linear]
21
+ ignore: [lm_head, 're:.*embed.*', 're:.*router.*', 're:.*\.gate$']
22
+ scheme: W4A16
23
+ bypass_divisibility_checks: false
tokenizer.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e946ac23b6a68f7a2abbe7b3c22190673c6d3d159b85305268db51b2729ac68a
3
+ size 11422749
tokenizer_config.json ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_prefix_space": false,
3
+ "backend": "tokenizers",
4
+ "bos_token": null,
5
+ "clean_up_tokenization_spaces": false,
6
+ "eos_token": "<|im_end|>",
7
+ "errors": "replace",
8
+ "is_local": false,
9
+ "local_files_only": false,
10
+ "model_max_length": 131072,
11
+ "pad_token": "<|endoftext|>",
12
+ "split_special_tokens": false,
13
+ "tokenizer_class": "Qwen2Tokenizer",
14
+ "unk_token": null
15
+ }