feat: point AutoTokenizer to VoxCPM2Tokenizer via auto_map
Browse files- tokenizer_config.json +9 -3
tokenizer_config.json
CHANGED
|
@@ -97,7 +97,7 @@
|
|
| 97 |
"rstrip": false,
|
| 98 |
"single_word": false,
|
| 99 |
"special": true
|
| 100 |
-
},
|
| 101 |
"110": {
|
| 102 |
"content": "<|/speaker_id|>",
|
| 103 |
"lstrip": false,
|
|
@@ -205,8 +205,14 @@
|
|
| 205 |
"pad_token": null,
|
| 206 |
"sp_model_kwargs": {},
|
| 207 |
"spaces_between_special_tokens": false,
|
| 208 |
-
"tokenizer_class": "
|
| 209 |
"unk_token": "<unk>",
|
| 210 |
"use_default_system_prompt": false,
|
| 211 |
-
"chat_template": "{% for message in messages %}{{'<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' + '\n'}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\n' }}{% endif %}"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 212 |
}
|
|
|
|
| 97 |
"rstrip": false,
|
| 98 |
"single_word": false,
|
| 99 |
"special": true
|
| 100 |
+
},
|
| 101 |
"110": {
|
| 102 |
"content": "<|/speaker_id|>",
|
| 103 |
"lstrip": false,
|
|
|
|
| 205 |
"pad_token": null,
|
| 206 |
"sp_model_kwargs": {},
|
| 207 |
"spaces_between_special_tokens": false,
|
| 208 |
+
"tokenizer_class": "VoxCPM2Tokenizer",
|
| 209 |
"unk_token": "<unk>",
|
| 210 |
"use_default_system_prompt": false,
|
| 211 |
+
"chat_template": "{% for message in messages %}{{'<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' + '\n'}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\n' }}{% endif %}",
|
| 212 |
+
"auto_map": {
|
| 213 |
+
"AutoTokenizer": [
|
| 214 |
+
"tokenization_voxcpm2.VoxCPM2Tokenizer",
|
| 215 |
+
null
|
| 216 |
+
]
|
| 217 |
+
}
|
| 218 |
}
|