helcig commited on
Commit
e9a164c
·
verified ·
1 Parent(s): 745bcc4

Add general-25-nonuniform

Browse files
Files changed (40) hide show
  1. .gitattributes +1 -0
  2. general-25-nonuniform/LOAD_VLLM.md +43 -0
  3. general-25-nonuniform/added_tokens.json +28 -0
  4. general-25-nonuniform/chat_template.jinja +117 -0
  5. general-25-nonuniform/config.json +145 -0
  6. general-25-nonuniform/generation_config.json +12 -0
  7. general-25-nonuniform/merges.txt +0 -0
  8. general-25-nonuniform/model-00001-of-00025.safetensors +3 -0
  9. general-25-nonuniform/model-00002-of-00025.safetensors +3 -0
  10. general-25-nonuniform/model-00003-of-00025.safetensors +3 -0
  11. general-25-nonuniform/model-00004-of-00025.safetensors +3 -0
  12. general-25-nonuniform/model-00005-of-00025.safetensors +3 -0
  13. general-25-nonuniform/model-00006-of-00025.safetensors +3 -0
  14. general-25-nonuniform/model-00007-of-00025.safetensors +3 -0
  15. general-25-nonuniform/model-00008-of-00025.safetensors +3 -0
  16. general-25-nonuniform/model-00009-of-00025.safetensors +3 -0
  17. general-25-nonuniform/model-00010-of-00025.safetensors +3 -0
  18. general-25-nonuniform/model-00011-of-00025.safetensors +3 -0
  19. general-25-nonuniform/model-00012-of-00025.safetensors +3 -0
  20. general-25-nonuniform/model-00013-of-00025.safetensors +3 -0
  21. general-25-nonuniform/model-00014-of-00025.safetensors +3 -0
  22. general-25-nonuniform/model-00015-of-00025.safetensors +3 -0
  23. general-25-nonuniform/model-00016-of-00025.safetensors +3 -0
  24. general-25-nonuniform/model-00017-of-00025.safetensors +3 -0
  25. general-25-nonuniform/model-00018-of-00025.safetensors +3 -0
  26. general-25-nonuniform/model-00019-of-00025.safetensors +3 -0
  27. general-25-nonuniform/model-00020-of-00025.safetensors +3 -0
  28. general-25-nonuniform/model-00021-of-00025.safetensors +3 -0
  29. general-25-nonuniform/model-00022-of-00025.safetensors +3 -0
  30. general-25-nonuniform/model-00023-of-00025.safetensors +3 -0
  31. general-25-nonuniform/model-00024-of-00025.safetensors +3 -0
  32. general-25-nonuniform/model-00025-of-00025.safetensors +3 -0
  33. general-25-nonuniform/model.safetensors.index.json +0 -0
  34. general-25-nonuniform/pruned_metadata.json +60 -0
  35. general-25-nonuniform/sitecustomize.py +12 -0
  36. general-25-nonuniform/special_tokens_map.json +31 -0
  37. general-25-nonuniform/tokenizer.json +3 -0
  38. general-25-nonuniform/tokenizer_config.json +239 -0
  39. general-25-nonuniform/vllm_pruned_patch.py +88 -0
  40. general-25-nonuniform/vocab.json +0 -0
.gitattributes CHANGED
@@ -38,3 +38,4 @@ coding-25-nonuniform/tokenizer.json filter=lfs diff=lfs merge=lfs -text
38
  coding-50-uniform/tokenizer.json filter=lfs diff=lfs merge=lfs -text
39
  coding-50-nonuniform/tokenizer.json filter=lfs diff=lfs merge=lfs -text
40
  general-25-uniform/tokenizer.json filter=lfs diff=lfs merge=lfs -text
 
 
38
  coding-50-uniform/tokenizer.json filter=lfs diff=lfs merge=lfs -text
39
  coding-50-nonuniform/tokenizer.json filter=lfs diff=lfs merge=lfs -text
40
  general-25-uniform/tokenizer.json filter=lfs diff=lfs merge=lfs -text
41
+ general-25-nonuniform/tokenizer.json filter=lfs diff=lfs merge=lfs -text
general-25-nonuniform/LOAD_VLLM.md ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Loading this variant with vLLM
2
+
3
+ The checkpoint has heterogeneous per-layer expert counts. vLLM's stock
4
+ `Qwen3NextSparseMoeBlock` builds every layer with `config.num_experts`; our
5
+ bundled `vllm_pruned_patch.py` overrides each layer to use its own count from
6
+ `config.per_layer_num_experts`.
7
+
8
+ ## One-liner
9
+
10
+ ```bash
11
+ PYTHONPATH=$(pwd):${PYTHONPATH:-} python -c "
12
+ from vllm import LLM, SamplingParams
13
+ llm = LLM(model='.', tensor_parallel_size=4, dtype='bfloat16',
14
+ gpu_memory_utilization=0.85, trust_remote_code=True,
15
+ enforce_eager=True)
16
+ print(llm.generate(['def fib(n):'], SamplingParams(max_tokens=128))[0].outputs[0].text)
17
+ "
18
+ ```
19
+
20
+ ## Why PYTHONPATH?
21
+
22
+ vLLM spawns worker subprocesses via `multiprocessing.spawn` (safe with CUDA).
23
+ Those workers re-import `vllm` fresh — any monkey-patch you applied in the
24
+ parent process is gone. Python's `sitecustomize.py` mechanism runs
25
+ automatically in **every** interpreter that has the relevant directory on
26
+ `sys.path`. Putting this variant folder on `PYTHONPATH` is enough.
27
+
28
+ ## Tensor parallelism notes
29
+
30
+ - **TP (tensor parallel)** works fine with heterogeneous counts — TP shards
31
+ hidden dimensions inside each expert.
32
+ - **EP (expert parallel)** assumes experts shard evenly across ranks, which
33
+ is broken for heterogeneous counts. Keep `--enable-eplb` off.
34
+
35
+ ## With lm-eval-harness
36
+
37
+ ```bash
38
+ PYTHONPATH=$(pwd):${PYTHONPATH:-} \
39
+ lm_eval --model vllm \
40
+ --model_args "pretrained=.,tensor_parallel_size=4,dtype=bfloat16,gpu_memory_utilization=0.85,max_model_len=4096,trust_remote_code=True,enforce_eager=True" \
41
+ --tasks humaneval,mbpp \
42
+ --batch_size auto
43
+ ```
general-25-nonuniform/added_tokens.json ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "</think>": 151668,
3
+ "</tool_call>": 151658,
4
+ "</tool_response>": 151666,
5
+ "<think>": 151667,
6
+ "<tool_call>": 151657,
7
+ "<tool_response>": 151665,
8
+ "<|box_end|>": 151649,
9
+ "<|box_start|>": 151648,
10
+ "<|endoftext|>": 151643,
11
+ "<|file_sep|>": 151664,
12
+ "<|fim_middle|>": 151660,
13
+ "<|fim_pad|>": 151662,
14
+ "<|fim_prefix|>": 151659,
15
+ "<|fim_suffix|>": 151661,
16
+ "<|im_end|>": 151645,
17
+ "<|im_start|>": 151644,
18
+ "<|image_pad|>": 151655,
19
+ "<|object_ref_end|>": 151647,
20
+ "<|object_ref_start|>": 151646,
21
+ "<|quad_end|>": 151651,
22
+ "<|quad_start|>": 151650,
23
+ "<|repo_name|>": 151663,
24
+ "<|video_pad|>": 151656,
25
+ "<|vision_end|>": 151653,
26
+ "<|vision_pad|>": 151654,
27
+ "<|vision_start|>": 151652
28
+ }
general-25-nonuniform/chat_template.jinja ADDED
@@ -0,0 +1,117 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {% macro render_extra_keys(json_dict, handled_keys) %}
2
+ {%- if json_dict is mapping %}
3
+ {%- for json_key in json_dict if json_key not in handled_keys %}
4
+ {%- if json_dict[json_key] is string %}
5
+ {{-'\n<' ~ json_key ~ '>' ~ (json_dict[json_key] | string) ~ '</' ~ json_key ~ '>' }}
6
+ {%- else %}
7
+ {{- '\n<' ~ json_key ~ '>' ~ (json_dict[json_key] | tojson | safe) ~ '</' ~ json_key ~ '>' }}
8
+ {%- endif %}
9
+ {%- endfor %}
10
+ {%- endif %}
11
+ {%- endmacro %}
12
+
13
+ {%- if messages[0]["role"] == "system" %}
14
+ {%- set system_message = messages[0]["content"] %}
15
+ {%- set loop_messages = messages[1:] %}
16
+ {%- else %}
17
+ {%- set loop_messages = messages %}
18
+ {%- endif %}
19
+
20
+ {%- if not tools is defined %}
21
+ {%- set tools = [] %}
22
+ {%- endif %}
23
+
24
+ {%- if system_message is defined %}
25
+ {{- "<|im_start|>system\n" + system_message }}
26
+ {%- else %}
27
+ {%- if tools is iterable and tools | length > 0 %}
28
+ {{- "<|im_start|>system\nYou are Qwen, a helpful AI assistant that can interact with a computer to solve tasks." }}
29
+ {%- endif %}
30
+ {%- endif %}
31
+ {%- if tools is iterable and tools | length > 0 %}
32
+ {{- "\n\n# Tools\n\nYou have access to the following functions:\n\n" }}
33
+ {{- "<tools>" }}
34
+ {%- for tool in tools %}
35
+ {%- if tool.function is defined %}
36
+ {%- set tool = tool.function %}
37
+ {%- endif %}
38
+ {{- "\n<function>\n<name>" ~ tool.name ~ "</name>" }}
39
+ {%- if tool.description is defined %}
40
+ {{- '\n<description>' ~ (tool.description | trim) ~ '</description>' }}
41
+ {%- endif %}
42
+ {{- '\n<parameters>' }}
43
+ {%- if tool.parameters is defined and tool.parameters is mapping and tool.parameters.properties is defined and tool.parameters.properties is mapping %}
44
+ {%- for param_name, param_fields in tool.parameters.properties|items %}
45
+ {{- '\n<parameter>' }}
46
+ {{- '\n<name>' ~ param_name ~ '</name>' }}
47
+ {%- if param_fields.type is defined %}
48
+ {{- '\n<type>' ~ (param_fields.type | string) ~ '</type>' }}
49
+ {%- endif %}
50
+ {%- if param_fields.description is defined %}
51
+ {{- '\n<description>' ~ (param_fields.description | trim) ~ '</description>' }}
52
+ {%- endif %}
53
+ {%- set handled_keys = ['name', 'type', 'description'] %}
54
+ {{- render_extra_keys(param_fields, handled_keys) }}
55
+ {{- '\n</parameter>' }}
56
+ {%- endfor %}
57
+ {%- endif %}
58
+ {%- set handled_keys = ['type', 'properties'] %}
59
+ {{- render_extra_keys(tool.parameters, handled_keys) }}
60
+ {{- '\n</parameters>' }}
61
+ {%- set handled_keys = ['type', 'name', 'description', 'parameters'] %}
62
+ {{- render_extra_keys(tool, handled_keys) }}
63
+ {{- '\n</function>' }}
64
+ {%- endfor %}
65
+ {{- "\n</tools>" }}
66
+ {{- '\n\nIf you choose to call a function ONLY reply in the following format with NO suffix:\n\n<tool_call>\n<function=example_function_name>\n<parameter=example_parameter_1>\nvalue_1\n</parameter>\n<parameter=example_parameter_2>\nThis is the value for the second parameter\nthat can span\nmultiple lines\n</parameter>\n</function>\n</tool_call>\n\n<IMPORTANT>\nReminder:\n- Function calls MUST follow the specified format: an inner <function=...></function> block must be nested within <tool_call></tool_call> XML tags\n- Required parameters MUST be specified\n- You may provide optional reasoning for your function call in natural language BEFORE the function call, but NOT after\n- If there is no function call available, answer the question like normal with your current knowledge and do not tell the user about function calls\n</IMPORTANT>' }}
67
+ {%- endif %}
68
+ {%- if system_message is defined %}
69
+ {{- '<|im_end|>\n' }}
70
+ {%- else %}
71
+ {%- if tools is iterable and tools | length > 0 %}
72
+ {{- '<|im_end|>\n' }}
73
+ {%- endif %}
74
+ {%- endif %}
75
+ {%- for message in loop_messages %}
76
+ {%- if message.role == "assistant" and message.tool_calls is defined and message.tool_calls is iterable and message.tool_calls | length > 0 %}
77
+ {{- '<|im_start|>' + message.role }}
78
+ {%- if message.content is defined and message.content is string and message.content | trim | length > 0 %}
79
+ {{- '\n' + message.content | trim + '\n' }}
80
+ {%- endif %}
81
+ {%- for tool_call in message.tool_calls %}
82
+ {%- if tool_call.function is defined %}
83
+ {%- set tool_call = tool_call.function %}
84
+ {%- endif %}
85
+ {{- '\n<tool_call>\n<function=' + tool_call.name + '>\n' }}
86
+ {%- if tool_call.arguments is defined %}
87
+ {%- for args_name, args_value in tool_call.arguments|items %}
88
+ {{- '<parameter=' + args_name + '>\n' }}
89
+ {%- set args_value = args_value if args_value is string else args_value | tojson | safe %}
90
+ {{- args_value }}
91
+ {{- '\n</parameter>\n' }}
92
+ {%- endfor %}
93
+ {%- endif %}
94
+ {{- '</function>\n</tool_call>' }}
95
+ {%- endfor %}
96
+ {{- '<|im_end|>\n' }}
97
+ {%- elif message.role == "user" or message.role == "system" or message.role == "assistant" %}
98
+ {{- '<|im_start|>' + message.role + '\n' + message.content + '<|im_end|>' + '\n' }}
99
+ {%- elif message.role == "tool" %}
100
+ {%- if loop.previtem and loop.previtem.role != "tool" %}
101
+ {{- '<|im_start|>user' }}
102
+ {%- endif %}
103
+ {{- '\n<tool_response>\n' }}
104
+ {{- message.content }}
105
+ {{- '\n</tool_response>' }}
106
+ {%- if not loop.last and loop.nextitem.role != "tool" %}
107
+ {{- '<|im_end|>\n' }}
108
+ {%- elif loop.last %}
109
+ {{- '<|im_end|>\n' }}
110
+ {%- endif %}
111
+ {%- else %}
112
+ {{- '<|im_start|>' + message.role + '\n' + message.content + '<|im_end|>\n' }}
113
+ {%- endif %}
114
+ {%- endfor %}
115
+ {%- if add_generation_prompt %}
116
+ {{- '<|im_start|>assistant\n' }}
117
+ {%- endif %}
general-25-nonuniform/config.json ADDED
@@ -0,0 +1,145 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "Qwen3NextForCausalLM"
4
+ ],
5
+ "attention_bias": false,
6
+ "attention_dropout": 0,
7
+ "bos_token_id": 151643,
8
+ "decoder_sparse_step": 1,
9
+ "dtype": "bfloat16",
10
+ "eos_token_id": 151645,
11
+ "full_attention_interval": 4,
12
+ "head_dim": 256,
13
+ "hidden_act": "silu",
14
+ "hidden_size": 2048,
15
+ "initializer_range": 0.02,
16
+ "intermediate_size": 5120,
17
+ "layer_types": [
18
+ "linear_attention",
19
+ "linear_attention",
20
+ "linear_attention",
21
+ "full_attention",
22
+ "linear_attention",
23
+ "linear_attention",
24
+ "linear_attention",
25
+ "full_attention",
26
+ "linear_attention",
27
+ "linear_attention",
28
+ "linear_attention",
29
+ "full_attention",
30
+ "linear_attention",
31
+ "linear_attention",
32
+ "linear_attention",
33
+ "full_attention",
34
+ "linear_attention",
35
+ "linear_attention",
36
+ "linear_attention",
37
+ "full_attention",
38
+ "linear_attention",
39
+ "linear_attention",
40
+ "linear_attention",
41
+ "full_attention",
42
+ "linear_attention",
43
+ "linear_attention",
44
+ "linear_attention",
45
+ "full_attention",
46
+ "linear_attention",
47
+ "linear_attention",
48
+ "linear_attention",
49
+ "full_attention",
50
+ "linear_attention",
51
+ "linear_attention",
52
+ "linear_attention",
53
+ "full_attention",
54
+ "linear_attention",
55
+ "linear_attention",
56
+ "linear_attention",
57
+ "full_attention",
58
+ "linear_attention",
59
+ "linear_attention",
60
+ "linear_attention",
61
+ "full_attention",
62
+ "linear_attention",
63
+ "linear_attention",
64
+ "linear_attention",
65
+ "full_attention"
66
+ ],
67
+ "linear_conv_kernel_dim": 4,
68
+ "linear_key_head_dim": 128,
69
+ "linear_num_key_heads": 16,
70
+ "linear_num_value_heads": 32,
71
+ "linear_value_head_dim": 128,
72
+ "max_position_embeddings": 262144,
73
+ "mlp_only_layers": [],
74
+ "model_type": "qwen3_next",
75
+ "moe_intermediate_size": 512,
76
+ "norm_topk_prob": true,
77
+ "num_attention_heads": 16,
78
+ "num_experts": 512,
79
+ "num_experts_per_tok": 10,
80
+ "num_hidden_layers": 48,
81
+ "num_key_value_heads": 2,
82
+ "original_num_experts": 512,
83
+ "output_router_logits": false,
84
+ "partial_rotary_factor": 0.25,
85
+ "per_layer_num_experts": [
86
+ 404,
87
+ 340,
88
+ 439,
89
+ 406,
90
+ 394,
91
+ 413,
92
+ 420,
93
+ 399,
94
+ 384,
95
+ 362,
96
+ 382,
97
+ 384,
98
+ 365,
99
+ 368,
100
+ 377,
101
+ 375,
102
+ 385,
103
+ 383,
104
+ 406,
105
+ 396,
106
+ 383,
107
+ 348,
108
+ 379,
109
+ 376,
110
+ 378,
111
+ 385,
112
+ 385,
113
+ 385,
114
+ 385,
115
+ 407,
116
+ 406,
117
+ 405,
118
+ 405,
119
+ 356,
120
+ 366,
121
+ 375,
122
+ 361,
123
+ 391,
124
+ 391,
125
+ 385,
126
+ 406,
127
+ 382,
128
+ 371,
129
+ 379,
130
+ 367,
131
+ 372,
132
+ 354,
133
+ 367
134
+ ],
135
+ "rms_norm_eps": 1e-06,
136
+ "rope_scaling": null,
137
+ "rope_theta": 5000000,
138
+ "router_aux_loss_coef": 0.001,
139
+ "shared_expert_intermediate_size": 512,
140
+ "tie_word_embeddings": false,
141
+ "transformers_version": "4.57.6",
142
+ "use_cache": true,
143
+ "use_sliding_window": false,
144
+ "vocab_size": 151936
145
+ }
general-25-nonuniform/generation_config.json ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token_id": 151643,
3
+ "do_sample": true,
4
+ "eos_token_id": [
5
+ 151645,
6
+ 151643
7
+ ],
8
+ "pad_token_id": 151643,
9
+ "top_k": 40,
10
+ "top_p": 0.95,
11
+ "transformers_version": "4.57.6"
12
+ }
general-25-nonuniform/merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
general-25-nonuniform/model-00001-of-00025.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cad5ba3942f0e1980ad4045ec2f3b64864d247219d6893927cbecbb883fc3bf2
3
+ size 4998860248
general-25-nonuniform/model-00002-of-00025.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3a6b2059ca70f6c21a8cb84436fd8066a4fd91eb35b93b4f3233f2de3880b684
3
+ size 4999520576
general-25-nonuniform/model-00003-of-00025.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c4d89fbe6194290a98a56bf2a2e217bc7c8b3e2917c3240f151f6e239235f17f
3
+ size 4999691344
general-25-nonuniform/model-00004-of-00025.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e7f6f828fb2cd89345bd15c1641009608d979a20d20bbfcffd5cd31b4f6a1e02
3
+ size 4999414032
general-25-nonuniform/model-00005-of-00025.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5b3e9131a19b3dafa3dc6bf6fa1bba7a21e742004c00218b8b7bab6d405ad1e5
3
+ size 4999441440
general-25-nonuniform/model-00006-of-00025.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f52adb2f27f558a9cf04dc0ca39a5e57e014e8faf9c618b45425e08b4e037c9a
3
+ size 4999198768
general-25-nonuniform/model-00007-of-00025.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2d251eec072dc30319db1e7405dbed6562f13c95d68c33b40050bd4badfb8083
3
+ size 4999390536
general-25-nonuniform/model-00008-of-00025.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4cdd722d4270bf839ff197f96bb6d5cb3720c9c89ab62c1cee562baf3282972d
3
+ size 4999141936
general-25-nonuniform/model-00009-of-00025.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f412301784c765afdc7084e1e2409cabad7c5c56fc0fc41a69e7d9ee0abc6531
3
+ size 4999533936
general-25-nonuniform/model-00010-of-00025.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:722c87d86febc7db4c00627771d04853ed13378e669723fbb9cedeaec64f5eb7
3
+ size 4999346792
general-25-nonuniform/model-00011-of-00025.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:44853fa6235628377631572c8c595330c2db92dab714407ac8ea6f0972a3754b
3
+ size 4999382344
general-25-nonuniform/model-00012-of-00025.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ed7929cc1f4bd230ee0298a3524bef4950f67f294691e906b4e5fad7b8fceafc
3
+ size 4999154280
general-25-nonuniform/model-00013-of-00025.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4717e0194552a50e3d6bd65cc412959a25cb1273b869355b2feacb63fdab3ec5
3
+ size 4999513456
general-25-nonuniform/model-00014-of-00025.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:46ffd491ae830922564d3826c860fbc8d2a97772644cde9f8bfec78e5f6b4525
3
+ size 4999215720
general-25-nonuniform/model-00015-of-00025.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5f5d6cdd65ace031ba4e30d1278a80e5153e8622c0c59e78a998f3937b8f6541
3
+ size 4999632240
general-25-nonuniform/model-00016-of-00025.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:62fbd4f642b3209925f0e92116c5d8cc5a08226956094e53cdec927455b0cc98
3
+ size 4999383656
general-25-nonuniform/model-00017-of-00025.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:65f42aa7726b3c7652a281bf8d1ba5005272b399c83163a741740d15c1b7037a
3
+ size 4999505224
general-25-nonuniform/model-00018-of-00025.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:eddae4fbce0c6d97f8d64d9218314c7f32c394060117e08276b309b0b26958b0
3
+ size 4999096936
general-25-nonuniform/model-00019-of-00025.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5f535cd82f527c79316687c4535e3696036fd307ef317b80b3a8928135d87382
3
+ size 4999468400
general-25-nonuniform/model-00020-of-00025.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dec2c5b88da0f17b3665a0a8f9db8e0899515d8b03d5f9d0907882b71414cd01
3
+ size 4999240296
general-25-nonuniform/model-00021-of-00025.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3c5b43cae75b29d530b30cce77d528c8e85819169a33b5f756f04ec9f48ae6af
3
+ size 4999615856
general-25-nonuniform/model-00022-of-00025.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:deedaff296bca9875820145487ddae9f9328945f2dfe290ebb861b9ea816f876
3
+ size 4999133800
general-25-nonuniform/model-00023-of-00025.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a2065569d101f086004aceb6250de98a1bdb6de61827996e46288fc71684d95f
3
+ size 4999415152
general-25-nonuniform/model-00024-of-00025.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a5e5d7d68b01ec29b5da10709f0cadbe2e73af01cffa35bb8812de86739426d2
3
+ size 4999015016
general-25-nonuniform/model-00025-of-00025.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ade7358d4f0944c31d3d568f469f267ad1d5a1ac87369674bd616dbfc01fc221
3
+ size 691556896
general-25-nonuniform/model.safetensors.index.json ADDED
The diff for this file is too large to render. See raw diff
 
general-25-nonuniform/pruned_metadata.json ADDED
@@ -0,0 +1,60 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "base_model": "Qwen/Qwen3-Coder-Next",
3
+ "prune_mask": "results/coder_next/mask_general_nonuniform_25pct.pt",
4
+ "allocation": "nonuniform",
5
+ "original_num_experts": 512,
6
+ "num_hidden_layers": 48,
7
+ "num_experts_per_tok": 10,
8
+ "per_layer_num_experts": [
9
+ 404,
10
+ 340,
11
+ 439,
12
+ 406,
13
+ 394,
14
+ 413,
15
+ 420,
16
+ 399,
17
+ 384,
18
+ 362,
19
+ 382,
20
+ 384,
21
+ 365,
22
+ 368,
23
+ 377,
24
+ 375,
25
+ 385,
26
+ 383,
27
+ 406,
28
+ 396,
29
+ 383,
30
+ 348,
31
+ 379,
32
+ 376,
33
+ 378,
34
+ 385,
35
+ 385,
36
+ 385,
37
+ 385,
38
+ 407,
39
+ 406,
40
+ 405,
41
+ 405,
42
+ 356,
43
+ 366,
44
+ 375,
45
+ 361,
46
+ 391,
47
+ 391,
48
+ 385,
49
+ 406,
50
+ 382,
51
+ 371,
52
+ 379,
53
+ 367,
54
+ 372,
55
+ 354,
56
+ 367
57
+ ],
58
+ "total_pruned": 6144,
59
+ "sparsity": 0.25
60
+ }
general-25-nonuniform/sitecustomize.py ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Applied automatically in every Python process on PYTHONPATH.
2
+ vLLM spawns worker subprocesses that re-import modules; without this file
3
+ the monkey-patch would be missing in workers and weight loading would fail
4
+ for the heterogeneous per-layer expert counts.
5
+ """
6
+ import os
7
+ if os.environ.get('VLLM_PRUNED_PATCH_DISABLE') != '1':
8
+ try:
9
+ import vllm_pruned_patch
10
+ vllm_pruned_patch.apply()
11
+ except Exception:
12
+ pass
general-25-nonuniform/special_tokens_map.json ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "additional_special_tokens": [
3
+ "<|im_start|>",
4
+ "<|im_end|>",
5
+ "<|object_ref_start|>",
6
+ "<|object_ref_end|>",
7
+ "<|box_start|>",
8
+ "<|box_end|>",
9
+ "<|quad_start|>",
10
+ "<|quad_end|>",
11
+ "<|vision_start|>",
12
+ "<|vision_end|>",
13
+ "<|vision_pad|>",
14
+ "<|image_pad|>",
15
+ "<|video_pad|>"
16
+ ],
17
+ "eos_token": {
18
+ "content": "<|im_end|>",
19
+ "lstrip": false,
20
+ "normalized": false,
21
+ "rstrip": false,
22
+ "single_word": false
23
+ },
24
+ "pad_token": {
25
+ "content": "<|endoftext|>",
26
+ "lstrip": false,
27
+ "normalized": false,
28
+ "rstrip": false,
29
+ "single_word": false
30
+ }
31
+ }
general-25-nonuniform/tokenizer.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:aeb13307a71acd8fe81861d94ad54ab689df773318809eed3cbe794b4492dae4
3
+ size 11422654
general-25-nonuniform/tokenizer_config.json ADDED
@@ -0,0 +1,239 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_bos_token": false,
3
+ "add_prefix_space": false,
4
+ "added_tokens_decoder": {
5
+ "151643": {
6
+ "content": "<|endoftext|>",
7
+ "lstrip": false,
8
+ "normalized": false,
9
+ "rstrip": false,
10
+ "single_word": false,
11
+ "special": true
12
+ },
13
+ "151644": {
14
+ "content": "<|im_start|>",
15
+ "lstrip": false,
16
+ "normalized": false,
17
+ "rstrip": false,
18
+ "single_word": false,
19
+ "special": true
20
+ },
21
+ "151645": {
22
+ "content": "<|im_end|>",
23
+ "lstrip": false,
24
+ "normalized": false,
25
+ "rstrip": false,
26
+ "single_word": false,
27
+ "special": true
28
+ },
29
+ "151646": {
30
+ "content": "<|object_ref_start|>",
31
+ "lstrip": false,
32
+ "normalized": false,
33
+ "rstrip": false,
34
+ "single_word": false,
35
+ "special": true
36
+ },
37
+ "151647": {
38
+ "content": "<|object_ref_end|>",
39
+ "lstrip": false,
40
+ "normalized": false,
41
+ "rstrip": false,
42
+ "single_word": false,
43
+ "special": true
44
+ },
45
+ "151648": {
46
+ "content": "<|box_start|>",
47
+ "lstrip": false,
48
+ "normalized": false,
49
+ "rstrip": false,
50
+ "single_word": false,
51
+ "special": true
52
+ },
53
+ "151649": {
54
+ "content": "<|box_end|>",
55
+ "lstrip": false,
56
+ "normalized": false,
57
+ "rstrip": false,
58
+ "single_word": false,
59
+ "special": true
60
+ },
61
+ "151650": {
62
+ "content": "<|quad_start|>",
63
+ "lstrip": false,
64
+ "normalized": false,
65
+ "rstrip": false,
66
+ "single_word": false,
67
+ "special": true
68
+ },
69
+ "151651": {
70
+ "content": "<|quad_end|>",
71
+ "lstrip": false,
72
+ "normalized": false,
73
+ "rstrip": false,
74
+ "single_word": false,
75
+ "special": true
76
+ },
77
+ "151652": {
78
+ "content": "<|vision_start|>",
79
+ "lstrip": false,
80
+ "normalized": false,
81
+ "rstrip": false,
82
+ "single_word": false,
83
+ "special": true
84
+ },
85
+ "151653": {
86
+ "content": "<|vision_end|>",
87
+ "lstrip": false,
88
+ "normalized": false,
89
+ "rstrip": false,
90
+ "single_word": false,
91
+ "special": true
92
+ },
93
+ "151654": {
94
+ "content": "<|vision_pad|>",
95
+ "lstrip": false,
96
+ "normalized": false,
97
+ "rstrip": false,
98
+ "single_word": false,
99
+ "special": true
100
+ },
101
+ "151655": {
102
+ "content": "<|image_pad|>",
103
+ "lstrip": false,
104
+ "normalized": false,
105
+ "rstrip": false,
106
+ "single_word": false,
107
+ "special": true
108
+ },
109
+ "151656": {
110
+ "content": "<|video_pad|>",
111
+ "lstrip": false,
112
+ "normalized": false,
113
+ "rstrip": false,
114
+ "single_word": false,
115
+ "special": true
116
+ },
117
+ "151657": {
118
+ "content": "<tool_call>",
119
+ "lstrip": false,
120
+ "normalized": false,
121
+ "rstrip": false,
122
+ "single_word": false,
123
+ "special": false
124
+ },
125
+ "151658": {
126
+ "content": "</tool_call>",
127
+ "lstrip": false,
128
+ "normalized": false,
129
+ "rstrip": false,
130
+ "single_word": false,
131
+ "special": false
132
+ },
133
+ "151659": {
134
+ "content": "<|fim_prefix|>",
135
+ "lstrip": false,
136
+ "normalized": false,
137
+ "rstrip": false,
138
+ "single_word": false,
139
+ "special": false
140
+ },
141
+ "151660": {
142
+ "content": "<|fim_middle|>",
143
+ "lstrip": false,
144
+ "normalized": false,
145
+ "rstrip": false,
146
+ "single_word": false,
147
+ "special": false
148
+ },
149
+ "151661": {
150
+ "content": "<|fim_suffix|>",
151
+ "lstrip": false,
152
+ "normalized": false,
153
+ "rstrip": false,
154
+ "single_word": false,
155
+ "special": false
156
+ },
157
+ "151662": {
158
+ "content": "<|fim_pad|>",
159
+ "lstrip": false,
160
+ "normalized": false,
161
+ "rstrip": false,
162
+ "single_word": false,
163
+ "special": false
164
+ },
165
+ "151663": {
166
+ "content": "<|repo_name|>",
167
+ "lstrip": false,
168
+ "normalized": false,
169
+ "rstrip": false,
170
+ "single_word": false,
171
+ "special": false
172
+ },
173
+ "151664": {
174
+ "content": "<|file_sep|>",
175
+ "lstrip": false,
176
+ "normalized": false,
177
+ "rstrip": false,
178
+ "single_word": false,
179
+ "special": false
180
+ },
181
+ "151665": {
182
+ "content": "<tool_response>",
183
+ "lstrip": false,
184
+ "normalized": false,
185
+ "rstrip": false,
186
+ "single_word": false,
187
+ "special": false
188
+ },
189
+ "151666": {
190
+ "content": "</tool_response>",
191
+ "lstrip": false,
192
+ "normalized": false,
193
+ "rstrip": false,
194
+ "single_word": false,
195
+ "special": false
196
+ },
197
+ "151667": {
198
+ "content": "<think>",
199
+ "lstrip": false,
200
+ "normalized": false,
201
+ "rstrip": false,
202
+ "single_word": false,
203
+ "special": false
204
+ },
205
+ "151668": {
206
+ "content": "</think>",
207
+ "lstrip": false,
208
+ "normalized": false,
209
+ "rstrip": false,
210
+ "single_word": false,
211
+ "special": false
212
+ }
213
+ },
214
+ "additional_special_tokens": [
215
+ "<|im_start|>",
216
+ "<|im_end|>",
217
+ "<|object_ref_start|>",
218
+ "<|object_ref_end|>",
219
+ "<|box_start|>",
220
+ "<|box_end|>",
221
+ "<|quad_start|>",
222
+ "<|quad_end|>",
223
+ "<|vision_start|>",
224
+ "<|vision_end|>",
225
+ "<|vision_pad|>",
226
+ "<|image_pad|>",
227
+ "<|video_pad|>"
228
+ ],
229
+ "bos_token": null,
230
+ "clean_up_tokenization_spaces": false,
231
+ "eos_token": "<|im_end|>",
232
+ "errors": "replace",
233
+ "extra_special_tokens": {},
234
+ "model_max_length": 1048576,
235
+ "pad_token": "<|endoftext|>",
236
+ "split_special_tokens": false,
237
+ "tokenizer_class": "Qwen2Tokenizer",
238
+ "unk_token": null
239
+ }
general-25-nonuniform/vllm_pruned_patch.py ADDED
@@ -0,0 +1,88 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Monkey-patch vLLM's Qwen3-Next MoE block to support heterogeneous per-layer expert counts.
2
+
3
+ Use:
4
+ import vllm_pruned_patch
5
+ vllm_pruned_patch.apply()
6
+ # ...then any vLLM import / engine / lcb_runner call
7
+
8
+ The patch reads `config.per_layer_num_experts` (a list of length num_hidden_layers)
9
+ written by `prune_and_save.py`. If that field is missing the patch is a no-op,
10
+ so it's safe to apply unconditionally.
11
+
12
+ Tested against vLLM 0.16.0 / Qwen3NextSparseMoeBlock.
13
+ EP (expert parallelism) is NOT supported with heterogeneous counts —
14
+ keep --enable-eplb off (the default) and stick to TP-only.
15
+ """
16
+
17
+ from __future__ import annotations
18
+
19
+ import logging
20
+
21
+ logger = logging.getLogger(__name__)
22
+
23
+ _PATCHED = False
24
+
25
+
26
+ def apply():
27
+ """Idempotently install the patch. Safe to call multiple times."""
28
+ global _PATCHED
29
+ if _PATCHED:
30
+ return
31
+
32
+ from vllm.model_executor.models import qwen3_next as qm
33
+ from vllm.model_executor.models.utils import extract_layer_index
34
+
35
+ OriginalBlock = qm.Qwen3NextSparseMoeBlock
36
+ original_init = OriginalBlock.__init__
37
+
38
+ def patched_init(self, vllm_config, prefix: str = ""):
39
+ cfg = vllm_config.model_config.hf_config
40
+ per_layer = getattr(cfg, "per_layer_num_experts", None)
41
+ if per_layer is None:
42
+ # No pruning metadata — fall through to original behavior.
43
+ return original_init(self, vllm_config, prefix=prefix)
44
+
45
+ layer_idx = extract_layer_index(prefix)
46
+ n_local = int(per_layer[layer_idx])
47
+ original_n = cfg.num_experts
48
+
49
+ # Temporarily override num_experts on the (shared) hf_config so the
50
+ # original __init__ constructs gate + FusedMoE with the right size.
51
+ # Restore immediately after to avoid bleeding into other layers.
52
+ try:
53
+ cfg.num_experts = n_local
54
+ original_init(self, vllm_config, prefix=prefix)
55
+ finally:
56
+ cfg.num_experts = original_n
57
+
58
+ qm.Qwen3NextSparseMoeBlock.__init__ = patched_init
59
+
60
+ # Patch get_expert_mapping to use max kept count (smaller mapping table).
61
+ OriginalLM = qm.Qwen3NextForCausalLM
62
+ original_get_expert_mapping = OriginalLM.get_expert_mapping
63
+
64
+ def patched_get_expert_mapping(self):
65
+ per_layer = getattr(self.config, "per_layer_num_experts", None)
66
+ if per_layer is None:
67
+ return original_get_expert_mapping(self)
68
+ from vllm.model_executor.layers.fused_moe.shared_fused_moe import (
69
+ SharedFusedMoE,
70
+ )
71
+ # Saved checkpoint always has expert ids 0..max-1 across layers
72
+ # (each layer is re-indexed densely in prune_and_save.py).
73
+ max_n = max(int(x) for x in per_layer)
74
+ return SharedFusedMoE.make_expert_params_mapping(
75
+ self,
76
+ ckpt_gate_proj_name="gate_proj",
77
+ ckpt_down_proj_name="down_proj",
78
+ ckpt_up_proj_name="up_proj",
79
+ num_experts=max_n,
80
+ num_redundant_experts=self.num_redundant_experts,
81
+ )
82
+
83
+ qm.Qwen3NextForCausalLM.get_expert_mapping = patched_get_expert_mapping
84
+
85
+ _PATCHED = True
86
+ logger.info(
87
+ "Applied Qwen3-Next pruned-model patch (heterogeneous per-layer expert counts)."
88
+ )
general-25-nonuniform/vocab.json ADDED
The diff for this file is too large to render. See raw diff