Commit ·
48ba0b2
1
Parent(s): b610afe
add mlp to targets for vllm inference
Browse files- config.json +3 -2
config.json
CHANGED
|
@@ -113,7 +113,8 @@
|
|
| 113 |
"targets": [
|
| 114 |
"re:.*block_sparse_moe\\.experts\\.\\d+\\.w1$",
|
| 115 |
"re:.*block_sparse_moe\\.experts\\.\\d+\\.w2$",
|
| 116 |
-
"re:.*block_sparse_moe\\.experts\\.\\d+\\.w3$"
|
|
|
|
| 117 |
],
|
| 118 |
"weights": {
|
| 119 |
"actorder": null,
|
|
@@ -468,4 +469,4 @@
|
|
| 468 |
"use_qk_norm": true,
|
| 469 |
"use_routing_bias": true,
|
| 470 |
"vocab_size": 200064
|
| 471 |
-
}
|
|
|
|
| 113 |
"targets": [
|
| 114 |
"re:.*block_sparse_moe\\.experts\\.\\d+\\.w1$",
|
| 115 |
"re:.*block_sparse_moe\\.experts\\.\\d+\\.w2$",
|
| 116 |
+
"re:.*block_sparse_moe\\.experts\\.\\d+\\.w3$",
|
| 117 |
+
"re:.*mlp\\.experts\\.\\d+\\.(gate|up|gate_up|down)_proj$"
|
| 118 |
],
|
| 119 |
"weights": {
|
| 120 |
"actorder": null,
|
|
|
|
| 469 |
"use_qk_norm": true,
|
| 470 |
"use_routing_bias": true,
|
| 471 |
"vocab_size": 200064
|
| 472 |
+
}
|