Upload tokenizer
Browse files- .gitattributes +1 -0
- tokenizer.json +0 -0
- tokenizer_config.json +0 -1
.gitattributes
CHANGED
|
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
| 33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
| 35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
| 33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
| 35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
| 36 |
+
tokenizer.json filter=lfs diff=lfs merge=lfs -text
|
tokenizer.json
CHANGED
|
The diff for this file is too large to render.
See raw diff
|
|
|
tokenizer_config.json
CHANGED
|
@@ -233,7 +233,6 @@
|
|
| 233 |
"extra_special_tokens": {},
|
| 234 |
"model_max_length": 131072,
|
| 235 |
"pad_token": "<|endoftext|>",
|
| 236 |
-
"padding_side": "right",
|
| 237 |
"split_special_tokens": false,
|
| 238 |
"tokenizer_class": "Qwen2Tokenizer",
|
| 239 |
"unk_token": null
|
|
|
|
| 233 |
"extra_special_tokens": {},
|
| 234 |
"model_max_length": 131072,
|
| 235 |
"pad_token": "<|endoftext|>",
|
|
|
|
| 236 |
"split_special_tokens": false,
|
| 237 |
"tokenizer_class": "Qwen2Tokenizer",
|
| 238 |
"unk_token": null
|