juliendenize commited on
Commit
32f566d
·
0 Parent(s):

Super-squash branch 'main' using huggingface_hub

Browse files
.gitattributes ADDED
@@ -0,0 +1,45 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tar filter=lfs diff=lfs merge=lfs -text
29
+ *.tflite filter=lfs diff=lfs merge=lfs -text
30
+ *.tgz filter=lfs diff=lfs merge=lfs -text
31
+ *.wasm filter=lfs diff=lfs merge=lfs -text
32
+ *.xz filter=lfs diff=lfs merge=lfs -text
33
+ *.zip filter=lfs diff=lfs merge=lfs -text
34
+ *.zst filter=lfs diff=lfs merge=lfs -text
35
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ tekken.json filter=lfs diff=lfs merge=lfs -text
37
+ tokenizer.json filter=lfs diff=lfs merge=lfs -text
38
+ images/Frame[[:space:]]2147228531.png filter=lfs diff=lfs merge=lfs -text
39
+ images/Frame[[:space:]]2147228532.png filter=lfs diff=lfs merge=lfs -text
40
+ images/Frame[[:space:]]2147228533.png filter=lfs diff=lfs merge=lfs -text
41
+ images/Frame[[:space:]]2147228534.png filter=lfs diff=lfs merge=lfs -text
42
+ images/image1.png filter=lfs diff=lfs merge=lfs -text
43
+ images/image2.png filter=lfs diff=lfs merge=lfs -text
44
+ images/image4.png filter=lfs diff=lfs merge=lfs -text
45
+ images/image3.png filter=lfs diff=lfs merge=lfs -text
LICENSE ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ Modified MIT License
2
+
3
+ Attribution notice: 2026 - Mistral AI
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy of the weights of this model and associated documentation files (the “Model”), to deal in the Model without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Model, and to permit persons to whom the Model is furnished to do so, subject to the following conditions:
6
+
7
+ 1. The above attribution notice and this permission notice shall be included in all copies or substantial portions of the Model.
8
+ 2. You are not authorized to exercise any rights under this license if the global consolidated monthly revenue of your company (or that of your employer) exceeds $20 million (or its equivalent in another currency) for the preceding month. This restriction in (b) applies to the Model and any derivatives, modifications, or combined works based on it, whether provided by Mistral AI or by a third party. You may contact Mistral AI (sales@mistral.ai) to request a commercial license, which Mistral AI may grant you at its sole discretion, or choose to use the Model on Mistral AI's hosted services available at https://mistral.ai/.
9
+
10
+ THE MODEL IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL MISTRAL AI BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE MODEL OR THE USE OR OTHER DEALINGS IN THE MODEL.
README.md ADDED
@@ -0,0 +1,552 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: other
3
+ language:
4
+ - en
5
+ - fr
6
+ - de
7
+ - es
8
+ - pt
9
+ - it
10
+ - ja
11
+ - ko
12
+ - ru
13
+ - zh
14
+ - ar
15
+ - fa
16
+ - id
17
+ - ms
18
+ - ne
19
+ - pl
20
+ - ro
21
+ - sr
22
+ - sv
23
+ - tr
24
+ - uk
25
+ - vi
26
+ - hi
27
+ - bn
28
+ tags:
29
+ - vLLM
30
+ ---
31
+
32
+ # Mistral Medium 3.5 128B
33
+
34
+ Mistral Medium 3.5 is our first flagship merged model. It is a dense 128B model with a 256k context window, handling instruction-following, reasoning,
35
+ and coding in a single set of weights. Mistral Medium 3.5 replaces its predecessor Mistral Medium 3.1 and Magistral in Le Chat. It also replaces Devstral 2 in our
36
+ coding agent Vibe. Concretely, expect better performance for instruct, reasoning and coding tasks in a new unified model in comparison with our previous released models.
37
+
38
+ Reasoning effort is configurable per request, so the same model can answer a quick chat reply or work through a complex agentic run. We trained the vision encoder from
39
+ scratch to handle variable image sizes and aspect ratios.
40
+
41
+ Find more information on our [blog](https://mistral.ai/news/vibe-remote-agents-mistral-medium-3-5).
42
+
43
+ > [!Note]
44
+ > To speed up local inference using vLLM, check out our released [EAGLE model](https://huggingface.co/mistralai/Mistral-Medium-3.5-128B-EAGLE).
45
+
46
+ ## Key Features
47
+
48
+ Mistral Medium 3.5 includes the following architectural choices:
49
+
50
+ - **Dense 128B parameters**.
51
+ - **256k context length**.
52
+ - **Multimodal input**: Accepts both text and image input, with text output.
53
+ - **Instruct and Reasoning functionalities** with function calls (reasoning effort configurable per request).
54
+
55
+ Mistral Medium 3.5 offers the following capabilities:
56
+
57
+ - **Reasoning Mode**: Toggle between fast instant reply mode and reasoning mode, boosting performance with test-time compute when requested.
58
+ - **Vision**: Analyzes images and provides insights based on visual content, in addition to text.
59
+ - **Multilingual**: Supports dozens of languages, including English, French, Spanish, German, Italian, Portuguese, Dutch, Chinese, Japanese, Korean, and Arabic.
60
+ - **System Prompt**: Strong adherence and support for system prompts.
61
+ - **Agentic**: Best-in-class agentic capabilities with native function calling and JSON output.
62
+ - **Large Context Window**: Supports a 256k context window.
63
+
64
+ We release this model under a **[Modified MIT License]((https://huggingface.co/mistralai/mistralai/Mistral-Medium-3.5-128B/blob/main/LICENSE))**: Open-source license for both commercial and non-commercial use with exceptions for companies with large revenue.
65
+
66
+ ## Recommended Settings
67
+
68
+ - **Reasoning Effort**:
69
+ - `'none'` → Do not use reasoning
70
+ - `'high'` → Use reasoning (recommended for complex prompts and agentic usage)
71
+ Use `reasoning_effort="high"` for complex tasks and agentic coding.
72
+ - **Temperature**: 0.7 for `reasoning_effort="high"`. Temp between 0.0 and 0.7 for `reasoning_effort="none"` depending on the task.
73
+ Generally, lower means answer that are more to the point and higher allows the model to be more creative. It is a good practice to try different values in order to
74
+ improve the model performance to meet your demands.
75
+
76
+ ## Benchmarks
77
+
78
+ ### Agentic Benchmarks
79
+
80
+ Mistral Medium 3.5 supersedes all our previous coding models, namely Devstral, across all benchmarks. It scores **91.4%** on τ³-Telecom and **77.6%** on SWE-Bench Verified. Due to its stronger agentic capabilities, Mistral Medium 3.5 replaces Devstral 2 in our coding agent, Vibe CLI.
81
+
82
+ ![Mistral agentic benchmark](https://huggingface.co/mistralai/Mistral-Medium-3.5-128B/resolve/main/images/image2.png)
83
+ ![Mistral agentic benchmark SWE-bench](https://huggingface.co/mistralai/Mistral-Medium-3.5-128B/resolve/main/images/image3.png)
84
+ ![Mistral agentic vs competiting models benchmark](https://huggingface.co/mistralai/Mistral-Medium-3.5-128B/resolve/main/images/image4.png)
85
+
86
+ ### Instruction Following, Reasoning, and Coding Benchmarks
87
+
88
+ We compared Mistral Medium 3.5 with competing models on instruction following, reasoning (math), and coding benchmarks. Thanks to its unified capabilities, it achieves strong results across all these tasks and Mistral Medium 3.5 is now powering Le Chat.
89
+
90
+ ![instruct reasoning and agentic benchmark](https://huggingface.co/mistralai/Mistral-Medium-3.5-128B/resolve/main/images/image1.png)
91
+
92
+ ## Usage
93
+
94
+ You can find Mistral Medium 3.5 support on multiple libraries for inference and fine-tuning.
95
+
96
+ We here **thank** every contributors and maintainers that helped us making it happen.
97
+
98
+ ### Mistral-Vibe
99
+
100
+ Use `Mistral Medium 3.5` with [Mistral Vibe](https://github.com/mistralai/mistral-vibe).
101
+
102
+ #### Install
103
+
104
+ Install the latest version:
105
+
106
+ ```sh
107
+ uv pip install mistral-vibe --upgrade
108
+ ```
109
+
110
+ #### API Usage
111
+
112
+ Mistral Medium 3.5 can be selected by starting `vibe`. If it is the first time you launch `vibe`, it will:
113
+
114
+ - Create a default configuration file at ~/.vibe/config.toml.
115
+ - Prompt you to enter your API key if it's not already configured.
116
+ - Save your API key to ~/.vibe/.env for future use.
117
+
118
+ Now select `mistral-medium-3.5` and start building !
119
+
120
+ #### Local server
121
+
122
+ If instead of pinging the Mistral API, you want to use a local vLLM server, you can do the following:
123
+ - 1. Spin up a vllm server as explained in [`Usage - vllm`](#vllm-recommended)
124
+ - 2. Add the model configuration in `~/.vibe/config.toml`:
125
+
126
+
127
+ ```toml
128
+ display_name = "Mistral Medium 3.5 (local vLLM)"
129
+ description = "Mistral Medium 3.5 mode using local vLLM"
130
+ safety = "neutral"
131
+
132
+ active_model = "mistral-medium-3.5" # Make sure this is the only active_model entry
133
+ [[providers]]
134
+ name = "vllm"
135
+ api_base = "http://<your-host-url>:8000/v1"
136
+ api_key_env_var = ""
137
+ backend = "generic"
138
+ api_style = "reasoning"
139
+
140
+ [[models]]
141
+ name = "mistralai/Mistral-Medium-3.5-128B"
142
+ provider = "vllm"
143
+ alias = "mistral-medium-3.5"
144
+ thinking = "high"
145
+ temperature = 0.7
146
+ auto_compact_threshold = 168000
147
+
148
+ [tools.bash]
149
+ default_timeout = 1200
150
+ ```
151
+
152
+ **Notes**:
153
+ - Make sure to overwrite `<your-host-url>` with your server's url.
154
+ - Other inference backends are also supported. Please look at [Mistral Vibe repo](https://github.com/mistralai/mistral-vibe) for more info.
155
+
156
+ Then restart `vibe` and "tab-shift" to "mistral-medium-3.5" mode.
157
+
158
+ Give it a try on some coding agentic tasks and start building some cool stuff !
159
+
160
+ ### Inference
161
+
162
+ The model can be deployed with:
163
+ - [`vllm (recommended)`](https://github.com/vllm-project/vllm): See [here](#vllm-recommended).
164
+ - [`llama.cpp`](https://github.com/ggml-org/llama.cpp): WIP stay tuned !
165
+ - [`LM studio`](https://lmstudio.ai/): WIP stay tuned !
166
+ - [`Ollama`](https://ollama.com//): See [here](https://ollama.com/library/mistral-medium-3.5).
167
+ - [`SGLang`](https://github.com/sgl-project/sglang): See [here](https://docs.sglang.io/basic_usage/send_request.html).
168
+ - [`transformers`](https://github.com/huggingface/transformers): See [here](#transformers).
169
+
170
+ For optimal performance, we recommend using the Mistral AI API if local serving is subpar.
171
+
172
+ ### Fine-Tuning
173
+
174
+ Fine-tune the model via:
175
+ - [`Axolotl`](https://github.com/axolotl-ai-cloud/axolotl): See [here](https://docs.axolotl.ai/docs/models/mistral-medium-3_5.html).
176
+ - [`Unsloth`](https://unsloth.ai/): See [here](https://unsloth.ai/docs/models/mistral-3.5).
177
+
178
+ ## vLLM (Recommended)
179
+
180
+ We recommend using Mistral Medium 3.5 with the [vLLM library](https://github.com/vllm-project/vllm) for production-ready inference.
181
+
182
+ > [!Note]
183
+ > To speed up local inference using vLLM, check out our released [EAGLE model](https://huggingface.co/mistralai/Mistral-Medium-3.5-128B-EAGLE)
184
+
185
+ ### Installation
186
+
187
+ Make sure to install **vllm nightly**:
188
+
189
+ ```
190
+ uv pip install -U vllm \
191
+ --torch-backend=auto \
192
+ --extra-index-url https://wheels.vllm.ai/nightly
193
+ ```
194
+
195
+ Doing so should automatically install [`mistral_common >= 1.11.1`](https://github.com/mistralai/mistral-common/releases/tag/v1.11.0) and `transformers >= 5.4.0`.
196
+
197
+ To check:
198
+ ```
199
+ python -c "import mistral_common; print(mistral_common.__version__)"
200
+ python -c "import transformers; print(transformers.__version__)"
201
+ ```
202
+
203
+ You can also make use of a ready-to-go [docker image](https://github.com/vllm-project/vllm/blob/main/docker/Dockerfile) or on the [docker hub](https://hub.docker.com/layers/vllm/vllm-openai/nightly).
204
+
205
+ ### Serve the Model
206
+
207
+ We recommend a server/client setup:
208
+
209
+ ```bash
210
+ vllm serve mistralai/Mistral-Medium-3.5-128B --tensor-parallel-size 8 \
211
+ --tool-call-parser mistral --enable-auto-tool-choice --reasoning-parser mistral --max_num_batched_tokens 16384 --max_num_seqs 128 \
212
+ --gpu_memory_utilization 0.8
213
+ ```
214
+
215
+ ### Ping the Server
216
+
217
+ <details>
218
+ <summary>Instruction Following</summary>
219
+
220
+ Mistral Medium 3.5 can follow your instructions to the letter.
221
+
222
+
223
+ ```python
224
+ from datetime import datetime, timedelta
225
+
226
+ from openai import OpenAI
227
+ from huggingface_hub import hf_hub_download
228
+
229
+ # Modify OpenAI's API key and API base to use vLLM's API server.
230
+ openai_api_key = "EMPTY"
231
+ openai_api_base = "http://localhost:8000/v1"
232
+
233
+ TEMP = 0.1
234
+ # use TEMP = 0.7 for reasoning="high"
235
+
236
+ client = OpenAI(
237
+ api_key=openai_api_key,
238
+ base_url=openai_api_base,
239
+ )
240
+
241
+ models = client.models.list()
242
+ model = models.data[0].id
243
+
244
+
245
+ def load_system_prompt(repo_id: str, filename: str) -> str:
246
+ file_path = hf_hub_download(repo_id=repo_id, filename=filename)
247
+ with open(file_path, "r") as file:
248
+ system_prompt = file.read()
249
+ today = datetime.today().strftime("%Y-%m-%d")
250
+ yesterday = (datetime.today() - timedelta(days=1)).strftime("%Y-%m-%d")
251
+ model_name = repo_id.split("/")[-1]
252
+ return system_prompt.format(name=model_name, today=today, yesterday=yesterday)
253
+
254
+
255
+ SYSTEM_PROMPT = load_system_prompt(model, "SYSTEM_PROMPT.txt")
256
+
257
+ messages = [
258
+ {"role": "system", "content": SYSTEM_PROMPT},
259
+ {
260
+ "role": "user",
261
+ "content": "Write me a sentence where every word starts with the next letter in the alphabet - start with 'a' and end with 'z'.",
262
+ },
263
+ ]
264
+
265
+ response = client.chat.completions.create(
266
+ model=model,
267
+ messages=messages,
268
+ temperature=TEMP,
269
+ reasoning_effort="none",
270
+ )
271
+
272
+ assistant_message = response.choices[0].message.content
273
+ print(assistant_message)
274
+ ```
275
+
276
+ </details>
277
+
278
+ <details>
279
+ <summary>Tool Call</summary>
280
+
281
+ Let's solve some equations thanks to our simple Python calculator tool.
282
+
283
+
284
+ ```python
285
+ import json
286
+ from datetime import datetime, timedelta
287
+
288
+ from openai import OpenAI
289
+ from huggingface_hub import hf_hub_download
290
+
291
+ # Modify OpenAI's API key and API base to use vLLM's API server.
292
+ openai_api_key = "EMPTY"
293
+ openai_api_base = "http://localhost:8000/v1"
294
+
295
+ TEMP = 0.1
296
+
297
+ client = OpenAI(
298
+ api_key=openai_api_key,
299
+ base_url=openai_api_base,
300
+ )
301
+
302
+ models = client.models.list()
303
+ model = models.data[0].id
304
+
305
+
306
+ def load_system_prompt(repo_id: str, filename: str) -> str:
307
+ file_path = hf_hub_download(repo_id=repo_id, filename=filename)
308
+ with open(file_path, "r") as file:
309
+ system_prompt = file.read()
310
+ today = datetime.today().strftime("%Y-%m-%d")
311
+ yesterday = (datetime.today() - timedelta(days=1)).strftime("%Y-%m-%d")
312
+ model_name = repo_id.split("/")[-1]
313
+ return system_prompt.format(name=model_name, today=today, yesterday=yesterday)
314
+
315
+
316
+ SYSTEM_PROMPT = load_system_prompt(model, "SYSTEM_PROMPT.txt")
317
+
318
+ image_url = "https://math-coaching.com/img/fiche/46/expressions-mathematiques.jpg"
319
+
320
+
321
+ def my_calculator(expression: str) -> str:
322
+ return str(eval(expression))
323
+
324
+
325
+ tools = [
326
+ {
327
+ "type": "function",
328
+ "function": {
329
+ "name": "my_calculator",
330
+ "description": "A calculator that can evaluate a mathematical expression.",
331
+ "parameters": {
332
+ "type": "object",
333
+ "properties": {
334
+ "expression": {
335
+ "type": "string",
336
+ "description": "The mathematical expression to evaluate.",
337
+ },
338
+ },
339
+ "required": ["expression"],
340
+ },
341
+ },
342
+ },
343
+ {
344
+ "type": "function",
345
+ "function": {
346
+ "name": "rewrite",
347
+ "description": "Rewrite a given text for improved clarity",
348
+ "parameters": {
349
+ "type": "object",
350
+ "properties": {
351
+ "text": {
352
+ "type": "string",
353
+ "description": "The input text to rewrite",
354
+ }
355
+ },
356
+ },
357
+ },
358
+ },
359
+ ]
360
+
361
+ messages = [
362
+ {"role": "system", "content": SYSTEM_PROMPT},
363
+ {
364
+ "role": "user",
365
+ "content": [
366
+ {
367
+ "type": "text",
368
+ "text": "Thanks to your calculator, compute the results for the equations that involve numbers displayed in the image.",
369
+ },
370
+ {
371
+ "type": "image_url",
372
+ "image_url": {
373
+ "url": image_url,
374
+ },
375
+ },
376
+ ],
377
+ },
378
+ ]
379
+
380
+ response = client.chat.completions.create(
381
+ model=model,
382
+ messages=messages,
383
+ temperature=TEMP,
384
+ tools=tools,
385
+ tool_choice="auto",
386
+ reasoning_effort="none",
387
+ )
388
+
389
+ tool_calls = response.choices[0].message.tool_calls
390
+
391
+ results = []
392
+ for tool_call in tool_calls:
393
+ function_name = tool_call.function.name
394
+ function_args = tool_call.function.arguments
395
+ if function_name == "my_calculator":
396
+ result = my_calculator(**json.loads(function_args))
397
+ results.append(result)
398
+
399
+ messages.append({"role": "assistant", "tool_calls": tool_calls})
400
+ for tool_call, result in zip(tool_calls, results):
401
+ messages.append(
402
+ {
403
+ "role": "tool",
404
+ "tool_call_id": tool_call.id,
405
+ "name": tool_call.function.name,
406
+ "content": result,
407
+ }
408
+ )
409
+
410
+
411
+ response = client.chat.completions.create(
412
+ model=model,
413
+ messages=messages,
414
+ temperature=TEMP,
415
+ reasoning_effort="none",
416
+ )
417
+
418
+ print(response.choices[0].message.content)
419
+ ```
420
+
421
+ </details>
422
+
423
+ <details>
424
+ <summary>Vision Reasoning</summary>
425
+
426
+ Let's see if the Mistral Medium 3.5 knows when to pick a fight !
427
+
428
+ ```python
429
+ from datetime import datetime, timedelta
430
+
431
+ from openai import OpenAI
432
+ from huggingface_hub import hf_hub_download
433
+
434
+ # Modify OpenAI's API key and API base to use vLLM's API server.
435
+ openai_api_key = "EMPTY"
436
+ openai_api_base = "http://localhost:8000/v1"
437
+
438
+ TEMP = 0.7
439
+
440
+ client = OpenAI(
441
+ api_key=openai_api_key,
442
+ base_url=openai_api_base,
443
+ )
444
+
445
+ models = client.models.list()
446
+ model = models.data[0].id
447
+
448
+
449
+ def load_system_prompt(repo_id: str, filename: str) -> str:
450
+ file_path = hf_hub_download(repo_id=repo_id, filename=filename)
451
+ with open(file_path, "r") as file:
452
+ system_prompt = file.read()
453
+ today = datetime.today().strftime("%Y-%m-%d")
454
+ yesterday = (datetime.today() - timedelta(days=1)).strftime("%Y-%m-%d")
455
+ model_name = repo_id.split("/")[-1]
456
+ return system_prompt.format(name=model_name, today=today, yesterday=yesterday)
457
+
458
+
459
+ SYSTEM_PROMPT = load_system_prompt(model, "SYSTEM_PROMPT.txt")
460
+ image_url = "https://static.wikia.nocookie.net/essentialsdocs/images/7/70/Battle.png/revision/latest?cb=20220523172438"
461
+
462
+ messages = [
463
+ {"role": "system", "content": SYSTEM_PROMPT},
464
+ {
465
+ "role": "user",
466
+ "content": [
467
+ {
468
+ "type": "text",
469
+ "text": "What action do you think I should take in this situation? List all the possible actions and explain why you think they are good or bad.",
470
+ },
471
+ {"type": "image_url", "image_url": {"url": image_url}},
472
+ ],
473
+ },
474
+ ]
475
+
476
+
477
+ response = client.chat.completions.create(
478
+ model=model,
479
+ messages=messages,
480
+ temperature=TEMP,
481
+ reasoning_effort="high",
482
+ )
483
+
484
+ print(response.choices[0].message.content)
485
+ ```
486
+
487
+ </details>
488
+
489
+ ## Transformers
490
+
491
+ ### Installation
492
+
493
+ First install the [Transformers framework](https://github.com/huggingface/transformers/) to use Mistral Medium 3.5:
494
+
495
+ ```bash
496
+ uv pip install transformers
497
+ ```
498
+
499
+ ### Inference
500
+
501
+ <details>
502
+ <summary>Python Inference Snippet</summary>
503
+
504
+ ```python
505
+ import torch
506
+ from transformers import AutoProcessor, Mistral3ForConditionalGeneration
507
+
508
+
509
+ model_id = "mistralai/Mistral-Medium-3.5-128B"
510
+
511
+ processor = AutoProcessor.from_pretrained(model_id)
512
+ model = Mistral3ForConditionalGeneration.from_pretrained(
513
+ model_id, device_map="auto"
514
+ )
515
+
516
+ image_url = "https://static.wikia.nocookie.net/essentialsdocs/images/7/70/Battle.png/revision/latest?cb=20220523172438"
517
+
518
+ messages = [
519
+ {
520
+ "role": "user",
521
+ "content": [
522
+ {
523
+ "type": "text",
524
+ "text": "What action do you think I should take in this situation? List all the possible actions and explain why you think they are good or bad.",
525
+ },
526
+ {"type": "image_url", "image_url": {"url": image_url}},
527
+ ],
528
+ },
529
+ ]
530
+
531
+ inputs = processor.apply_chat_template(messages, return_tensors="pt", tokenize=True, return_dict=True, reasoning_effort="high")
532
+ inputs = inputs.to(model.device)
533
+
534
+ output = model.generate(
535
+ **inputs,
536
+ max_new_tokens=1024,
537
+ do_sample=True,
538
+ temperature=0.7,
539
+ )[0]
540
+
541
+ # Setting `skip_special_tokens=False` to visualize reasoning trace between [THINK] [/THINK] tags.
542
+ decoded_output = processor.decode(output[len(inputs["input_ids"][0]):], skip_special_tokens=False)
543
+ print(decoded_output)
544
+ ```
545
+ </details>
546
+
547
+
548
+ ## License
549
+
550
+ This model is licensed under a [Modified MIT License](https://huggingface.co/mistralai/Mistral-Medium-3.5-128B/blob/main/LICENSE).
551
+
552
+ *You must not use this model in a manner that infringes, misappropriates, or otherwise violates any third party’s rights, including intellectual property rights.*
SYSTEM_PROMPT.txt ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ You are Mistral Medium 3.5, a Large Language Model (LLM) created by Mistral AI, a French startup headquartered in Paris.
2
+ You are an intelligent conversational assistant powering an AI assistant called Le Chat.
3
+ Your knowledge base was last updated on Friday, November 1, 2024.
4
+ The current date is {today}.
5
+
6
+ # GENERAL GUIDELINES
7
+
8
+ - Accurately answer the user's question.
9
+ - For uncertain information or when the user's request requires up-to-date or specific data, use the available tools to fetch the information.
10
+ - Be very attentive to dates, always try to resolve dates (e.g. "yesterday" is {yesterday}) and when asked about information at specific dates, discard information that is at another date.
11
+
12
+ # WEB BROWSING INSTRUCTIONS
13
+
14
+ You cannot perform any web search or access internet to open URLs, links etc without dedicated tools.
15
+
16
+ # MULTI-MODAL INSTRUCTIONS
17
+
18
+ - You have the ability to read images.
19
+ - You cannot read audio nor videos.
20
+ - You cannot generate images without dedicated tools.
21
+
22
+ # TOOL CALLING INSTRUCTIONS
23
+
24
+ You may have access to tools that you can use to fetch information or perform actions. You must use these tools in the following situations:
25
+
26
+ 1. When the request requires up-to-date information.
27
+ 2. When the request requires specific data that you do not have in your knowledge base.
28
+ 3. When the request involves actions that you cannot perform without tools.
29
+
30
+ Always prioritize using tools to provide the most accurate and helpful response.
chat_template.jinja ADDED
@@ -0,0 +1,263 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {#- Default date variables. To improve UX pass the correct ones to the Jinja render. #}
2
+ {%- if today is not defined %}
3
+ {%- set today = '29-04-2026' %}
4
+ {%- endif %}
5
+ {%- if yesterday is not defined %}
6
+ {%- set yesterday = '28-04-2026' %}
7
+ {%- endif %}
8
+
9
+ {#- Default system message if no system prompt is passed. #}
10
+ {%- set default_system_message -%}
11
+ You are Mistral Medium 3.5, a Large Language Model (LLM) created by Mistral AI, a French startup headquartered in Paris.
12
+ You are an intelligent conversational assistant powering an AI assistant called Le Chat.
13
+ Your knowledge base was last updated on Friday, November 1, 2024.
14
+ The current date is {{ today }}.
15
+
16
+ # GENERAL GUIDELINES
17
+
18
+ - Accurately answer the user's question.
19
+ - For uncertain information or when the user's request requires up-to-date or specific data, use the available tools to fetch the information.
20
+ - Be very attentive to dates, always try to resolve dates (e.g. "yesterday" is {{ yesterday }}) and when asked about information at specific dates, discard information that is at another date.
21
+
22
+ # WEB BROWSING INSTRUCTIONS
23
+
24
+ You cannot perform any web search or access internet to open URLs, links etc without dedicated tools.
25
+
26
+ # MULTI-MODAL INSTRUCTIONS
27
+
28
+ - You have the ability to read images.
29
+ - You cannot read audio nor videos.
30
+ - You cannot generate images without dedicated tools.
31
+
32
+ # TOOL CALLING INSTRUCTIONS
33
+
34
+ You may have access to tools that you can use to fetch information or perform actions. You must use these tools in the following situations:
35
+
36
+ 1. When the request requires up-to-date information.
37
+ 2. When the request requires specific data that you do not have in your knowledge base.
38
+ 3. When the request involves actions that you cannot perform without tools.
39
+
40
+ Always prioritize using tools to provide the most accurate and helpful response.
41
+ {%- endset %}
42
+
43
+ {#- Begin of sequence token. #}
44
+ {{- '<s>' }}
45
+
46
+
47
+ {#- Handle system prompt if it exists. #}
48
+ {%- set loop_messages = messages %}
49
+ {%- if messages[0]['role'] != 'system' and default_system_message != '' %}
50
+ {{- '[SYSTEM_PROMPT]' + default_system_message + '[/SYSTEM_PROMPT]' }}
51
+ {%- endif %}
52
+
53
+
54
+ {#- Tools and model settings definition #}
55
+ {%- set available_tools = '' %}
56
+ {%- set has_tools = false %}
57
+ {%- if tools is defined and tools is not none and tools|length > 0 %}
58
+ {%- set has_tools = true %}
59
+ {%- set available_tools = '[AVAILABLE_TOOLS]' + (tools| tojson) + '[/AVAILABLE_TOOLS]' %}
60
+ {%- endif %}
61
+ {%- if reasoning_effort is not defined or reasoning_effort is none %}
62
+ {%- set reasoning_effort = 'none' %}
63
+ {%- endif %}
64
+ {%- if reasoning_effort not in ['none', 'high'] %}
65
+ {{- raise_exception('reasoning_effort must be either "none" or "high"') }}
66
+ {%- endif %}
67
+ {%- set model_settings = '[MODEL_SETTINGS]{"reasoning_effort": "' + reasoning_effort + '"}[/MODEL_SETTINGS]' %}
68
+
69
+ {#- Aggregate consecutive messages with the same role except system and tool. #}
70
+ {#- A sentinel message is appended so the last group gets flushed inside the loop. #}
71
+ {%- set ns_agg = namespace(messages=[], current_group=[], current_role=none) %}
72
+ {%- for message in loop_messages + [{'role': '__sentinel__'}] %}
73
+ {%- if message['role'] != ns_agg.current_role or message['role'] == 'system' or message['role'] == 'tool' %}
74
+ {%- if ns_agg.current_role == 'tool' %}
75
+ {%- set ns_agg.messages = ns_agg.messages + ns_agg.current_group %}
76
+ {%- elif ns_agg.current_role is not none %}
77
+ {%- set ns_c = namespace(text_parts=[], chunks=[], has_non_text=false, tool_calls=[]) %}
78
+ {%- for msg in ns_agg.current_group %}
79
+ {#- Convert reasoning / reasoning_content to a leading thinking chunk. #}
80
+ {%- set reasoning = msg.get('reasoning_content', msg.get('reasoning', none)) %}
81
+ {%- if reasoning is not none and reasoning != '' %}
82
+ {%- set think_chunk = {'type': 'thinking', 'thinking': reasoning} %}
83
+ {%- if msg['content'] is string and msg['content'] != '' %}
84
+ {%- set new_content = [think_chunk, {'type': 'text', 'text': msg['content']}] %}
85
+ {%- elif msg['content'] is not none and msg['content'] is not string and msg['content'] | length > 0 %}
86
+ {%- set new_content = [think_chunk] + msg['content'] | list %}
87
+ {%- else %}
88
+ {%- set new_content = [think_chunk] %}
89
+ {%- endif %}
90
+ {%- if msg['tool_calls'] is defined and msg['tool_calls'] is not none %}
91
+ {%- set msg = {'role': msg['role'], 'content': new_content, 'tool_calls': msg['tool_calls']} %}
92
+ {%- else %}
93
+ {%- set msg = {'role': msg['role'], 'content': new_content} %}
94
+ {%- endif %}
95
+ {%- endif %}
96
+ {%- if msg['content'] is string %}
97
+ {%- set ns_c.text_parts = ns_c.text_parts + [msg['content']] %}
98
+ {%- elif msg['content'] is not none %}
99
+ {%- for block in msg['content'] %}
100
+ {%- if block['type'] == 'text' %}
101
+ {%- set ns_c.text_parts = ns_c.text_parts + [block['text']] %}
102
+ {%- else %}
103
+ {%- if ns_c.text_parts | length > 0 %}
104
+ {%- set ns_c.chunks = ns_c.chunks + [{'type': 'text', 'text': ns_c.text_parts | join('\n\n')}] %}
105
+ {%- set ns_c.text_parts = [] %}
106
+ {%- endif %}
107
+ {%- set ns_c.chunks = ns_c.chunks + [block] %}
108
+ {%- set ns_c.has_non_text = true %}
109
+ {%- endif %}
110
+ {%- endfor %}
111
+ {%- endif %}
112
+ {%- if msg['tool_calls'] is defined and msg['tool_calls'] is not none %}
113
+ {%- set ns_c.tool_calls = ns_c.tool_calls + msg['tool_calls'] | list %}
114
+ {%- endif %}
115
+ {%- endfor %}
116
+ {%- if ns_c.has_non_text %}
117
+ {%- if ns_c.text_parts | length > 0 %}
118
+ {%- set ns_c.chunks = ns_c.chunks + [{'type': 'text', 'text': ns_c.text_parts | join('\n\n')}] %}
119
+ {%- endif %}
120
+ {%- set merged_content = ns_c.chunks %}
121
+ {%- else %}
122
+ {%- set merged_content = ns_c.text_parts | join('\n\n') %}
123
+ {%- endif %}
124
+ {%- if ns_c.tool_calls | length > 0 %}
125
+ {%- set ns_agg.messages = ns_agg.messages + [{'role': ns_agg.current_role, 'content': merged_content, 'tool_calls': ns_c.tool_calls}] %}
126
+ {%- else %}
127
+ {%- set ns_agg.messages = ns_agg.messages + [{'role': ns_agg.current_role, 'content': merged_content}] %}
128
+ {%- endif %}
129
+ {%- endif %}
130
+ {%- if message['role'] != '__sentinel__' %}
131
+ {%- set ns_agg.current_group = [message] %}
132
+ {%- set ns_agg.current_role = message['role'] %}
133
+ {%- endif %}
134
+ {%- else %}
135
+ {%- set ns_agg.current_group = ns_agg.current_group + [message] %}
136
+ {%- endif %}
137
+ {%- endfor %}
138
+ {%- set loop_messages = ns_agg.messages %}
139
+
140
+ {#- Validates message ordering. #}
141
+ {%- set ns = namespace(available_tools_and_settings_emitted=false) %}
142
+ {%- if loop_messages | length > 0 and loop_messages[0]['role'] != 'user' and loop_messages[0]['role'] != 'system' %}
143
+ {{- raise_exception('Conversation must start with a user or system message, got ' + loop_messages[0]['role'] + '.') }}
144
+ {%- endif %}
145
+ {%- set ns_order = namespace(previous_role=none) %}
146
+ {%- for message in loop_messages %}
147
+ {%- set current_role = message['role'] %}
148
+ {%- if ns_order.previous_role is not none %}
149
+ {%- if ns_order.previous_role == 'system' %}
150
+ {%- if current_role != 'user' and current_role != 'assistant' and current_role != 'system' %}
151
+ {{- raise_exception('Unexpected role \'' + current_role + '\' after role \'' + ns_order.previous_role + '\'') }}
152
+ {%- endif %}
153
+ {%- elif ns_order.previous_role == 'user' %}
154
+ {%- if current_role != 'assistant' and current_role != 'system' and current_role != 'user' %}
155
+ {{- raise_exception('Unexpected role \'' + current_role + '\' after role \'' + ns_order.previous_role + '\'') }}
156
+ {%- endif %}
157
+ {%- elif ns_order.previous_role == 'assistant' %}
158
+ {%- if current_role != 'assistant' and current_role != 'user' and current_role != 'tool' %}
159
+ {{- raise_exception('Unexpected role \'' + current_role + '\' after role \'' + ns_order.previous_role + '\'') }}
160
+ {%- endif %}
161
+ {%- elif ns_order.previous_role == 'tool' %}
162
+ {%- if current_role != 'assistant' and current_role != 'tool' and current_role != 'user' %}
163
+ {{- raise_exception('Unexpected role \'' + current_role + '\' after role \'' + ns_order.previous_role + '\'') }}
164
+ {%- endif %}
165
+ {%- endif %}
166
+ {%- endif %}
167
+ {%- set ns_order.previous_role = current_role %}
168
+ {%- endfor %}
169
+
170
+ {#- Handle conversation messages. #}
171
+ {%- for message in loop_messages %}
172
+ {#- User messages supports text, image and image_url content. #}
173
+ {%- if message['role'] == 'user' %}
174
+ {%- if not ns.available_tools_and_settings_emitted %}
175
+ {{- available_tools }}
176
+ {{- model_settings }}
177
+ {%- set ns.available_tools_and_settings_emitted = true %}
178
+ {%- endif %}
179
+ {%- if message['content'] is string %}
180
+ {{- '[INST]' + message['content'] + '[/INST]' }}
181
+ {%- elif message['content'] | length > 0 %}
182
+ {{- '[INST]' }}
183
+ {%- if message['content'] | length == 2 %}
184
+ {%- set blocks = message['content'] | sort(attribute='type') %}
185
+ {%- else %}
186
+ {%- set blocks = message['content'] %}
187
+ {%- endif %}
188
+ {%- for block in blocks %}
189
+ {%- if block['type'] == 'text' %}
190
+ {{- block['text'] }}
191
+ {%- elif block['type'] in ['image', 'image_url'] %}
192
+ {{- '[IMG]' }}
193
+ {%- else %}
194
+ {{- raise_exception('Only text, image and image_url chunks are supported in user message content.') }}
195
+ {%- endif %}
196
+ {%- endfor %}
197
+ {{- '[/INST]' }}
198
+ {%- else %}
199
+ {{- raise_exception('User message must have a string or a list of chunks in content') }}
200
+ {%- endif %}
201
+
202
+ {#- Assistant messages supports text and thinking content. #}
203
+ {%- elif message['role'] == 'assistant' %}
204
+ {%- if (message['content'] is none or message['content'] == '' or message['content']|length == 0) and (message['tool_calls'] is not defined or message['tool_calls'] is none or message['tool_calls']|length == 0) %}
205
+ {{- raise_exception('Assistant message must have a string or a list of chunks in content or a list of tool calls.') }}
206
+ {%- endif %}
207
+
208
+ {%- if message['content'] is string and message['content'] != '' %}
209
+ {{- message['content'] }}
210
+ {%- elif message['content'] | length > 0 %}
211
+ {%- for block in message['content'] %}
212
+ {%- if block['type'] == 'text' %}
213
+ {{- block['text'] }}
214
+ {%- elif block['type'] == 'thinking' %}
215
+ {{- '[THINK]' + block['thinking'] }}
216
+ {%- if block.get('closed', true) %}{{- '[/THINK]' }}{%- endif %}
217
+ {%- else %}
218
+ {{- raise_exception('Only text and thinking chunks are supported in assistant message contents.') }}
219
+ {%- endif %}
220
+ {%- endfor %}
221
+ {%- endif %}
222
+
223
+ {%- if message['tool_calls'] is defined and message['tool_calls'] is not none and message['tool_calls']|length > 0 %}
224
+ {%- for tool in message['tool_calls'] %}
225
+ {{- '[TOOL_CALLS]' }}
226
+ {%- set name = tool['function']['name'] %}
227
+ {%- set arguments = tool['function']['arguments'] %}
228
+ {%- if arguments is not string %}
229
+ {%- set arguments = arguments|tojson|safe %}
230
+ {%- elif arguments == '' %}
231
+ {%- set arguments = '{}' %}
232
+ {%- endif %}
233
+ {{- name + '[ARGS]' + arguments }}
234
+ {%- endfor %}
235
+ {%- endif %}
236
+
237
+ {{- '</s>' }}
238
+
239
+ {#- Tool messages only supports text content. #}
240
+ {%- elif message['role'] == 'tool' %}
241
+ {{- '[TOOL_RESULTS]' + message['content']|string + '[/TOOL_RESULTS]' }}
242
+
243
+ {#- System messages. #}
244
+ {%- elif message['role'] == 'system' %}
245
+ {{- '[SYSTEM_PROMPT]' -}}
246
+ {%- if message['content'] is string %}
247
+ {{- message['content'] -}}
248
+ {%- else %}
249
+ {%- for block in message['content'] %}
250
+ {%- if block['type'] == 'text' %}
251
+ {{- block['text'] }}
252
+ {%- else %}
253
+ {{- raise_exception('Only text chunks are supported in system message contents.') }}
254
+ {%- endif %}
255
+ {%- endfor %}
256
+ {%- endif %}
257
+ {{- '[/SYSTEM_PROMPT]' -}}
258
+
259
+ {#- Raise exception for unsupported roles. #}
260
+ {%- else %}
261
+ {{- raise_exception('Only user, assistant, system and tool roles are supported, got ' + message['role'] + '.') }}
262
+ {%- endif %}
263
+ {%- endfor %}
config.json ADDED
@@ -0,0 +1,76 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "Mistral3ForConditionalGeneration"
4
+ ],
5
+ "dtype": "bfloat16",
6
+ "image_token_index": 10,
7
+ "model_type": "mistral3",
8
+ "multimodal_projector_bias": false,
9
+ "projector_hidden_act": "gelu",
10
+ "quantization_config": {
11
+ "activation_scheme": "static",
12
+ "dequantize": false,
13
+ "modules_to_not_convert": [
14
+ "model.vision_tower",
15
+ "model.multi_modal_projector",
16
+ "lm_head"
17
+ ],
18
+ "quant_method": "fp8",
19
+ "weight_block_size": null
20
+ },
21
+ "spatial_merge_size": 2,
22
+ "text_config": {
23
+ "attention_dropout": 0.0,
24
+ "bos_token_id": 1,
25
+ "eos_token_id": 2,
26
+ "head_dim": 128,
27
+ "hidden_act": "silu",
28
+ "hidden_size": 12288,
29
+ "initializer_range": 0.02,
30
+ "intermediate_size": 28672,
31
+ "max_position_embeddings": 262144,
32
+ "model_type": "ministral3",
33
+ "num_attention_heads": 96,
34
+ "num_hidden_layers": 88,
35
+ "num_key_value_heads": 8,
36
+ "pad_token_id": 11,
37
+ "rms_norm_eps": 1e-05,
38
+ "rope_parameters": {
39
+ "beta_fast": 4.0,
40
+ "beta_slow": 1.0,
41
+ "factor": 64.0,
42
+ "llama_4_scaling_beta": 0,
43
+ "mscale": 1.0,
44
+ "mscale_all_dim": 1.0,
45
+ "original_max_position_embeddings": 4096,
46
+ "rope_theta": 1000000.0,
47
+ "rope_type": "yarn",
48
+ "type": "yarn"
49
+ },
50
+ "sliding_window": null,
51
+ "tie_word_embeddings": false,
52
+ "use_cache": true,
53
+ "vocab_size": 131072
54
+ },
55
+ "tie_word_embeddings": false,
56
+ "transformers_version": "5.6.0.dev0",
57
+ "vision_config": {
58
+ "attention_dropout": 0.0,
59
+ "head_dim": 104,
60
+ "hidden_act": "silu",
61
+ "hidden_size": 1664,
62
+ "image_size": 1540,
63
+ "initializer_range": 0.02,
64
+ "intermediate_size": 8192,
65
+ "model_type": "pixtral",
66
+ "num_attention_heads": 16,
67
+ "num_channels": 3,
68
+ "num_hidden_layers": 48,
69
+ "patch_size": 14,
70
+ "rope_parameters": {
71
+ "rope_theta": 10000.0,
72
+ "rope_type": "default"
73
+ }
74
+ },
75
+ "vision_feature_layer": -1
76
+ }
consolidated-00001-of-00003.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2df3ab3b2e0005badbca431af4a597440393523ec8a2ce770b9e5cb43f440d8b
3
+ size 49993766074
consolidated-00002-of-00003.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:df31dbc8e74b89ccd826162211d943e6e78b068ad93efe28f6243c0144e37803
3
+ size 49993790760
consolidated-00003-of-00003.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4f17ee14ae76b574b6b4c233f655f0d645859f9983bfad3c72438fc6f5282204
3
+ size 33618545990
consolidated.safetensors.index.json ADDED
The diff for this file is too large to render. See raw diff
 
generation_config.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token_id": 1,
3
+ "eos_token_id": 2,
4
+ "max_length": 262144,
5
+ "pad_token_id": 11,
6
+ "transformers_version": "5.6.0.dev0"
7
+ }
images/image1.png ADDED

Git LFS Details

  • SHA256: 37556d874c878f4694181f35fb61ef2f5a4cf4ef9027b22853f5fbf44b814468
  • Pointer size: 131 Bytes
  • Size of remote file: 400 kB
images/image2.png ADDED

Git LFS Details

  • SHA256: 747d444807c82cfb2afe13b1faedf5d0421e351066febcba4a9a9e6356d663bc
  • Pointer size: 131 Bytes
  • Size of remote file: 517 kB
images/image3.png ADDED

Git LFS Details

  • SHA256: bcab7507cc80e9fb02d69cb7735ad037392e8d3138b270baef7cdf9324b9f473
  • Pointer size: 131 Bytes
  • Size of remote file: 198 kB
images/image4.png ADDED

Git LFS Details

  • SHA256: 440fd7310abb4a7fd27e038dd63bf52e28e34134edcc5a6f64a7ca1f8be969b1
  • Pointer size: 131 Bytes
  • Size of remote file: 787 kB
model-00001-of-00003.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:52c43048ffa28eb13bfa5fdacab541ac88b7f796cded4ec9cc167bf404fd8b3f
3
+ size 49750536964
model-00002-of-00003.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a81f2df626606d579dd23b51e767b91a2ebeb71ca7c79b432269bd52da4bcae9
3
+ size 49830208920
model-00003-of-00003.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bc663287e71f2ab9ded8b88819a11f962730c03441ca22f1f02c443fe841756e
3
+ size 34025418468
model.safetensors.index.json ADDED
The diff for this file is too large to render. See raw diff
 
params.json ADDED
@@ -0,0 +1,49 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "dim": 12288,
3
+ "n_layers": 88,
4
+ "head_dim": 128,
5
+ "hidden_dim": 28672,
6
+ "n_heads": 96,
7
+ "n_kv_heads": 8,
8
+ "rope_theta": 1000000.0,
9
+ "norm_eps": 1e-05,
10
+ "vocab_size": 131072,
11
+ "tied_embeddings": false,
12
+ "max_position_embeddings": 262144,
13
+ "llama_4_scaling": null,
14
+ "q_lora_rank": null,
15
+ "qk_rope_head_dim": null,
16
+ "qk_nope_head_dim": null,
17
+ "kv_lora_rank": null,
18
+ "v_head_dim": null,
19
+ "quantization": {
20
+ "qformat_weight": "fp8_e4m3",
21
+ "qscheme_act": "TENSOR"
22
+ },
23
+ "yarn": {
24
+ "original_max_position_embeddings": 4096,
25
+ "factor": 64,
26
+ "apply_scale": true,
27
+ "beta": 4,
28
+ "alpha": 1
29
+ },
30
+ "moe": null,
31
+ "vision_encoder": {
32
+ "image_token_id": 10,
33
+ "image_break_token_id": -1,
34
+ "image_end_token_id": -1,
35
+ "intermediate_size": 8192,
36
+ "num_hidden_layers": 48,
37
+ "num_attention_heads": 16,
38
+ "mm_projector_id": "patch_merge",
39
+ "spatial_merge_size": 2,
40
+ "hidden_size": 1664,
41
+ "num_channels": 3,
42
+ "image_size": 1540,
43
+ "max_image_size": 1540,
44
+ "patch_size": 14,
45
+ "rope_theta": 10000.0,
46
+ "add_pre_mm_projector_layer_norm": true,
47
+ "adapter_bias": false
48
+ }
49
+ }
processor_config.json ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "image_break_token": "[IMG_BREAK]",
3
+ "image_end_token": "[IMG_END]",
4
+ "image_processor": {
5
+ "do_convert_rgb": true,
6
+ "do_normalize": true,
7
+ "do_rescale": true,
8
+ "do_resize": true,
9
+ "image_mean": [
10
+ 0.48145466,
11
+ 0.4578275,
12
+ 0.40821073
13
+ ],
14
+ "image_processor_type": "PixtralImageProcessor",
15
+ "image_std": [
16
+ 0.26862954,
17
+ 0.26130258,
18
+ 0.27577711
19
+ ],
20
+ "patch_size": 14,
21
+ "resample": 3,
22
+ "rescale_factor": 0.00392156862745098,
23
+ "size": {
24
+ "longest_edge": 1540
25
+ }
26
+ },
27
+ "image_token": "[IMG]",
28
+ "patch_size": 14,
29
+ "processor_class": "PixtralProcessor",
30
+ "spatial_merge_size": 2
31
+ }
tekken.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b1272b956bd6edd2d2c674c76896c7661308c9e723997b0afb55ecb429cb5dc7
3
+ size 16275354
tokenizer.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2ba5b3330fd84d5376fcca797cfb3b42eee6241ce23e3271e6fb2a115a8751bd
3
+ size 17077420
tokenizer_config.json ADDED
@@ -0,0 +1,1012 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "backend": "tokenizers",
3
+ "bos_token": "<s>",
4
+ "eos_token": "</s>",
5
+ "extra_special_tokens": [
6
+ "<unk>",
7
+ "<s>",
8
+ "</s>",
9
+ "[INST]",
10
+ "[/INST]",
11
+ "[AVAILABLE_TOOLS]",
12
+ "[/AVAILABLE_TOOLS]",
13
+ "[TOOL_RESULTS]",
14
+ "[/TOOL_RESULTS]",
15
+ "[TOOL_CALLS]",
16
+ "[IMG]",
17
+ "<pad>",
18
+ "[IMG_BREAK]",
19
+ "[IMG_END]",
20
+ "[PREFIX]",
21
+ "[MIDDLE]",
22
+ "[SUFFIX]",
23
+ "[SYSTEM_PROMPT]",
24
+ "[/SYSTEM_PROMPT]",
25
+ "[TOOL_CONTENT]",
26
+ "<SPECIAL_20>",
27
+ "<SPECIAL_21>",
28
+ "<SPECIAL_22>",
29
+ "<SPECIAL_23>",
30
+ "[AUDIO]",
31
+ "[BEGIN_AUDIO]",
32
+ "<SPECIAL_26>",
33
+ "<SPECIAL_27>",
34
+ "<SPECIAL_28>",
35
+ "<SPECIAL_29>",
36
+ "<SPECIAL_30>",
37
+ "<SPECIAL_31>",
38
+ "[ARGS]",
39
+ "[CALL_ID]",
40
+ "[THINK]",
41
+ "[/THINK]",
42
+ "[MODEL_SETTINGS]",
43
+ "[/MODEL_SETTINGS]",
44
+ "<SPECIAL_38>",
45
+ "<SPECIAL_39>",
46
+ "<SPECIAL_40>",
47
+ "<SPECIAL_41>",
48
+ "<SPECIAL_42>",
49
+ "<SPECIAL_43>",
50
+ "<SPECIAL_44>",
51
+ "<SPECIAL_45>",
52
+ "<SPECIAL_46>",
53
+ "<SPECIAL_47>",
54
+ "<SPECIAL_48>",
55
+ "<SPECIAL_49>",
56
+ "<SPECIAL_50>",
57
+ "<SPECIAL_51>",
58
+ "<SPECIAL_52>",
59
+ "<SPECIAL_53>",
60
+ "<SPECIAL_54>",
61
+ "<SPECIAL_55>",
62
+ "<SPECIAL_56>",
63
+ "<SPECIAL_57>",
64
+ "<SPECIAL_58>",
65
+ "<SPECIAL_59>",
66
+ "<SPECIAL_60>",
67
+ "<SPECIAL_61>",
68
+ "<SPECIAL_62>",
69
+ "<SPECIAL_63>",
70
+ "<SPECIAL_64>",
71
+ "<SPECIAL_65>",
72
+ "<SPECIAL_66>",
73
+ "<SPECIAL_67>",
74
+ "<SPECIAL_68>",
75
+ "<SPECIAL_69>",
76
+ "<SPECIAL_70>",
77
+ "<SPECIAL_71>",
78
+ "<SPECIAL_72>",
79
+ "<SPECIAL_73>",
80
+ "<SPECIAL_74>",
81
+ "<SPECIAL_75>",
82
+ "<SPECIAL_76>",
83
+ "<SPECIAL_77>",
84
+ "<SPECIAL_78>",
85
+ "<SPECIAL_79>",
86
+ "<SPECIAL_80>",
87
+ "<SPECIAL_81>",
88
+ "<SPECIAL_82>",
89
+ "<SPECIAL_83>",
90
+ "<SPECIAL_84>",
91
+ "<SPECIAL_85>",
92
+ "<SPECIAL_86>",
93
+ "<SPECIAL_87>",
94
+ "<SPECIAL_88>",
95
+ "<SPECIAL_89>",
96
+ "<SPECIAL_90>",
97
+ "<SPECIAL_91>",
98
+ "<SPECIAL_92>",
99
+ "<SPECIAL_93>",
100
+ "<SPECIAL_94>",
101
+ "<SPECIAL_95>",
102
+ "<SPECIAL_96>",
103
+ "<SPECIAL_97>",
104
+ "<SPECIAL_98>",
105
+ "<SPECIAL_99>",
106
+ "<SPECIAL_100>",
107
+ "<SPECIAL_101>",
108
+ "<SPECIAL_102>",
109
+ "<SPECIAL_103>",
110
+ "<SPECIAL_104>",
111
+ "<SPECIAL_105>",
112
+ "<SPECIAL_106>",
113
+ "<SPECIAL_107>",
114
+ "<SPECIAL_108>",
115
+ "<SPECIAL_109>",
116
+ "<SPECIAL_110>",
117
+ "<SPECIAL_111>",
118
+ "<SPECIAL_112>",
119
+ "<SPECIAL_113>",
120
+ "<SPECIAL_114>",
121
+ "<SPECIAL_115>",
122
+ "<SPECIAL_116>",
123
+ "<SPECIAL_117>",
124
+ "<SPECIAL_118>",
125
+ "<SPECIAL_119>",
126
+ "<SPECIAL_120>",
127
+ "<SPECIAL_121>",
128
+ "<SPECIAL_122>",
129
+ "<SPECIAL_123>",
130
+ "<SPECIAL_124>",
131
+ "<SPECIAL_125>",
132
+ "<SPECIAL_126>",
133
+ "<SPECIAL_127>",
134
+ "<SPECIAL_128>",
135
+ "<SPECIAL_129>",
136
+ "<SPECIAL_130>",
137
+ "<SPECIAL_131>",
138
+ "<SPECIAL_132>",
139
+ "<SPECIAL_133>",
140
+ "<SPECIAL_134>",
141
+ "<SPECIAL_135>",
142
+ "<SPECIAL_136>",
143
+ "<SPECIAL_137>",
144
+ "<SPECIAL_138>",
145
+ "<SPECIAL_139>",
146
+ "<SPECIAL_140>",
147
+ "<SPECIAL_141>",
148
+ "<SPECIAL_142>",
149
+ "<SPECIAL_143>",
150
+ "<SPECIAL_144>",
151
+ "<SPECIAL_145>",
152
+ "<SPECIAL_146>",
153
+ "<SPECIAL_147>",
154
+ "<SPECIAL_148>",
155
+ "<SPECIAL_149>",
156
+ "<SPECIAL_150>",
157
+ "<SPECIAL_151>",
158
+ "<SPECIAL_152>",
159
+ "<SPECIAL_153>",
160
+ "<SPECIAL_154>",
161
+ "<SPECIAL_155>",
162
+ "<SPECIAL_156>",
163
+ "<SPECIAL_157>",
164
+ "<SPECIAL_158>",
165
+ "<SPECIAL_159>",
166
+ "<SPECIAL_160>",
167
+ "<SPECIAL_161>",
168
+ "<SPECIAL_162>",
169
+ "<SPECIAL_163>",
170
+ "<SPECIAL_164>",
171
+ "<SPECIAL_165>",
172
+ "<SPECIAL_166>",
173
+ "<SPECIAL_167>",
174
+ "<SPECIAL_168>",
175
+ "<SPECIAL_169>",
176
+ "<SPECIAL_170>",
177
+ "<SPECIAL_171>",
178
+ "<SPECIAL_172>",
179
+ "<SPECIAL_173>",
180
+ "<SPECIAL_174>",
181
+ "<SPECIAL_175>",
182
+ "<SPECIAL_176>",
183
+ "<SPECIAL_177>",
184
+ "<SPECIAL_178>",
185
+ "<SPECIAL_179>",
186
+ "<SPECIAL_180>",
187
+ "<SPECIAL_181>",
188
+ "<SPECIAL_182>",
189
+ "<SPECIAL_183>",
190
+ "<SPECIAL_184>",
191
+ "<SPECIAL_185>",
192
+ "<SPECIAL_186>",
193
+ "<SPECIAL_187>",
194
+ "<SPECIAL_188>",
195
+ "<SPECIAL_189>",
196
+ "<SPECIAL_190>",
197
+ "<SPECIAL_191>",
198
+ "<SPECIAL_192>",
199
+ "<SPECIAL_193>",
200
+ "<SPECIAL_194>",
201
+ "<SPECIAL_195>",
202
+ "<SPECIAL_196>",
203
+ "<SPECIAL_197>",
204
+ "<SPECIAL_198>",
205
+ "<SPECIAL_199>",
206
+ "<SPECIAL_200>",
207
+ "<SPECIAL_201>",
208
+ "<SPECIAL_202>",
209
+ "<SPECIAL_203>",
210
+ "<SPECIAL_204>",
211
+ "<SPECIAL_205>",
212
+ "<SPECIAL_206>",
213
+ "<SPECIAL_207>",
214
+ "<SPECIAL_208>",
215
+ "<SPECIAL_209>",
216
+ "<SPECIAL_210>",
217
+ "<SPECIAL_211>",
218
+ "<SPECIAL_212>",
219
+ "<SPECIAL_213>",
220
+ "<SPECIAL_214>",
221
+ "<SPECIAL_215>",
222
+ "<SPECIAL_216>",
223
+ "<SPECIAL_217>",
224
+ "<SPECIAL_218>",
225
+ "<SPECIAL_219>",
226
+ "<SPECIAL_220>",
227
+ "<SPECIAL_221>",
228
+ "<SPECIAL_222>",
229
+ "<SPECIAL_223>",
230
+ "<SPECIAL_224>",
231
+ "<SPECIAL_225>",
232
+ "<SPECIAL_226>",
233
+ "<SPECIAL_227>",
234
+ "<SPECIAL_228>",
235
+ "<SPECIAL_229>",
236
+ "<SPECIAL_230>",
237
+ "<SPECIAL_231>",
238
+ "<SPECIAL_232>",
239
+ "<SPECIAL_233>",
240
+ "<SPECIAL_234>",
241
+ "<SPECIAL_235>",
242
+ "<SPECIAL_236>",
243
+ "<SPECIAL_237>",
244
+ "<SPECIAL_238>",
245
+ "<SPECIAL_239>",
246
+ "<SPECIAL_240>",
247
+ "<SPECIAL_241>",
248
+ "<SPECIAL_242>",
249
+ "<SPECIAL_243>",
250
+ "<SPECIAL_244>",
251
+ "<SPECIAL_245>",
252
+ "<SPECIAL_246>",
253
+ "<SPECIAL_247>",
254
+ "<SPECIAL_248>",
255
+ "<SPECIAL_249>",
256
+ "<SPECIAL_250>",
257
+ "<SPECIAL_251>",
258
+ "<SPECIAL_252>",
259
+ "<SPECIAL_253>",
260
+ "<SPECIAL_254>",
261
+ "<SPECIAL_255>",
262
+ "<SPECIAL_256>",
263
+ "<SPECIAL_257>",
264
+ "<SPECIAL_258>",
265
+ "<SPECIAL_259>",
266
+ "<SPECIAL_260>",
267
+ "<SPECIAL_261>",
268
+ "<SPECIAL_262>",
269
+ "<SPECIAL_263>",
270
+ "<SPECIAL_264>",
271
+ "<SPECIAL_265>",
272
+ "<SPECIAL_266>",
273
+ "<SPECIAL_267>",
274
+ "<SPECIAL_268>",
275
+ "<SPECIAL_269>",
276
+ "<SPECIAL_270>",
277
+ "<SPECIAL_271>",
278
+ "<SPECIAL_272>",
279
+ "<SPECIAL_273>",
280
+ "<SPECIAL_274>",
281
+ "<SPECIAL_275>",
282
+ "<SPECIAL_276>",
283
+ "<SPECIAL_277>",
284
+ "<SPECIAL_278>",
285
+ "<SPECIAL_279>",
286
+ "<SPECIAL_280>",
287
+ "<SPECIAL_281>",
288
+ "<SPECIAL_282>",
289
+ "<SPECIAL_283>",
290
+ "<SPECIAL_284>",
291
+ "<SPECIAL_285>",
292
+ "<SPECIAL_286>",
293
+ "<SPECIAL_287>",
294
+ "<SPECIAL_288>",
295
+ "<SPECIAL_289>",
296
+ "<SPECIAL_290>",
297
+ "<SPECIAL_291>",
298
+ "<SPECIAL_292>",
299
+ "<SPECIAL_293>",
300
+ "<SPECIAL_294>",
301
+ "<SPECIAL_295>",
302
+ "<SPECIAL_296>",
303
+ "<SPECIAL_297>",
304
+ "<SPECIAL_298>",
305
+ "<SPECIAL_299>",
306
+ "<SPECIAL_300>",
307
+ "<SPECIAL_301>",
308
+ "<SPECIAL_302>",
309
+ "<SPECIAL_303>",
310
+ "<SPECIAL_304>",
311
+ "<SPECIAL_305>",
312
+ "<SPECIAL_306>",
313
+ "<SPECIAL_307>",
314
+ "<SPECIAL_308>",
315
+ "<SPECIAL_309>",
316
+ "<SPECIAL_310>",
317
+ "<SPECIAL_311>",
318
+ "<SPECIAL_312>",
319
+ "<SPECIAL_313>",
320
+ "<SPECIAL_314>",
321
+ "<SPECIAL_315>",
322
+ "<SPECIAL_316>",
323
+ "<SPECIAL_317>",
324
+ "<SPECIAL_318>",
325
+ "<SPECIAL_319>",
326
+ "<SPECIAL_320>",
327
+ "<SPECIAL_321>",
328
+ "<SPECIAL_322>",
329
+ "<SPECIAL_323>",
330
+ "<SPECIAL_324>",
331
+ "<SPECIAL_325>",
332
+ "<SPECIAL_326>",
333
+ "<SPECIAL_327>",
334
+ "<SPECIAL_328>",
335
+ "<SPECIAL_329>",
336
+ "<SPECIAL_330>",
337
+ "<SPECIAL_331>",
338
+ "<SPECIAL_332>",
339
+ "<SPECIAL_333>",
340
+ "<SPECIAL_334>",
341
+ "<SPECIAL_335>",
342
+ "<SPECIAL_336>",
343
+ "<SPECIAL_337>",
344
+ "<SPECIAL_338>",
345
+ "<SPECIAL_339>",
346
+ "<SPECIAL_340>",
347
+ "<SPECIAL_341>",
348
+ "<SPECIAL_342>",
349
+ "<SPECIAL_343>",
350
+ "<SPECIAL_344>",
351
+ "<SPECIAL_345>",
352
+ "<SPECIAL_346>",
353
+ "<SPECIAL_347>",
354
+ "<SPECIAL_348>",
355
+ "<SPECIAL_349>",
356
+ "<SPECIAL_350>",
357
+ "<SPECIAL_351>",
358
+ "<SPECIAL_352>",
359
+ "<SPECIAL_353>",
360
+ "<SPECIAL_354>",
361
+ "<SPECIAL_355>",
362
+ "<SPECIAL_356>",
363
+ "<SPECIAL_357>",
364
+ "<SPECIAL_358>",
365
+ "<SPECIAL_359>",
366
+ "<SPECIAL_360>",
367
+ "<SPECIAL_361>",
368
+ "<SPECIAL_362>",
369
+ "<SPECIAL_363>",
370
+ "<SPECIAL_364>",
371
+ "<SPECIAL_365>",
372
+ "<SPECIAL_366>",
373
+ "<SPECIAL_367>",
374
+ "<SPECIAL_368>",
375
+ "<SPECIAL_369>",
376
+ "<SPECIAL_370>",
377
+ "<SPECIAL_371>",
378
+ "<SPECIAL_372>",
379
+ "<SPECIAL_373>",
380
+ "<SPECIAL_374>",
381
+ "<SPECIAL_375>",
382
+ "<SPECIAL_376>",
383
+ "<SPECIAL_377>",
384
+ "<SPECIAL_378>",
385
+ "<SPECIAL_379>",
386
+ "<SPECIAL_380>",
387
+ "<SPECIAL_381>",
388
+ "<SPECIAL_382>",
389
+ "<SPECIAL_383>",
390
+ "<SPECIAL_384>",
391
+ "<SPECIAL_385>",
392
+ "<SPECIAL_386>",
393
+ "<SPECIAL_387>",
394
+ "<SPECIAL_388>",
395
+ "<SPECIAL_389>",
396
+ "<SPECIAL_390>",
397
+ "<SPECIAL_391>",
398
+ "<SPECIAL_392>",
399
+ "<SPECIAL_393>",
400
+ "<SPECIAL_394>",
401
+ "<SPECIAL_395>",
402
+ "<SPECIAL_396>",
403
+ "<SPECIAL_397>",
404
+ "<SPECIAL_398>",
405
+ "<SPECIAL_399>",
406
+ "<SPECIAL_400>",
407
+ "<SPECIAL_401>",
408
+ "<SPECIAL_402>",
409
+ "<SPECIAL_403>",
410
+ "<SPECIAL_404>",
411
+ "<SPECIAL_405>",
412
+ "<SPECIAL_406>",
413
+ "<SPECIAL_407>",
414
+ "<SPECIAL_408>",
415
+ "<SPECIAL_409>",
416
+ "<SPECIAL_410>",
417
+ "<SPECIAL_411>",
418
+ "<SPECIAL_412>",
419
+ "<SPECIAL_413>",
420
+ "<SPECIAL_414>",
421
+ "<SPECIAL_415>",
422
+ "<SPECIAL_416>",
423
+ "<SPECIAL_417>",
424
+ "<SPECIAL_418>",
425
+ "<SPECIAL_419>",
426
+ "<SPECIAL_420>",
427
+ "<SPECIAL_421>",
428
+ "<SPECIAL_422>",
429
+ "<SPECIAL_423>",
430
+ "<SPECIAL_424>",
431
+ "<SPECIAL_425>",
432
+ "<SPECIAL_426>",
433
+ "<SPECIAL_427>",
434
+ "<SPECIAL_428>",
435
+ "<SPECIAL_429>",
436
+ "<SPECIAL_430>",
437
+ "<SPECIAL_431>",
438
+ "<SPECIAL_432>",
439
+ "<SPECIAL_433>",
440
+ "<SPECIAL_434>",
441
+ "<SPECIAL_435>",
442
+ "<SPECIAL_436>",
443
+ "<SPECIAL_437>",
444
+ "<SPECIAL_438>",
445
+ "<SPECIAL_439>",
446
+ "<SPECIAL_440>",
447
+ "<SPECIAL_441>",
448
+ "<SPECIAL_442>",
449
+ "<SPECIAL_443>",
450
+ "<SPECIAL_444>",
451
+ "<SPECIAL_445>",
452
+ "<SPECIAL_446>",
453
+ "<SPECIAL_447>",
454
+ "<SPECIAL_448>",
455
+ "<SPECIAL_449>",
456
+ "<SPECIAL_450>",
457
+ "<SPECIAL_451>",
458
+ "<SPECIAL_452>",
459
+ "<SPECIAL_453>",
460
+ "<SPECIAL_454>",
461
+ "<SPECIAL_455>",
462
+ "<SPECIAL_456>",
463
+ "<SPECIAL_457>",
464
+ "<SPECIAL_458>",
465
+ "<SPECIAL_459>",
466
+ "<SPECIAL_460>",
467
+ "<SPECIAL_461>",
468
+ "<SPECIAL_462>",
469
+ "<SPECIAL_463>",
470
+ "<SPECIAL_464>",
471
+ "<SPECIAL_465>",
472
+ "<SPECIAL_466>",
473
+ "<SPECIAL_467>",
474
+ "<SPECIAL_468>",
475
+ "<SPECIAL_469>",
476
+ "<SPECIAL_470>",
477
+ "<SPECIAL_471>",
478
+ "<SPECIAL_472>",
479
+ "<SPECIAL_473>",
480
+ "<SPECIAL_474>",
481
+ "<SPECIAL_475>",
482
+ "<SPECIAL_476>",
483
+ "<SPECIAL_477>",
484
+ "<SPECIAL_478>",
485
+ "<SPECIAL_479>",
486
+ "<SPECIAL_480>",
487
+ "<SPECIAL_481>",
488
+ "<SPECIAL_482>",
489
+ "<SPECIAL_483>",
490
+ "<SPECIAL_484>",
491
+ "<SPECIAL_485>",
492
+ "<SPECIAL_486>",
493
+ "<SPECIAL_487>",
494
+ "<SPECIAL_488>",
495
+ "<SPECIAL_489>",
496
+ "<SPECIAL_490>",
497
+ "<SPECIAL_491>",
498
+ "<SPECIAL_492>",
499
+ "<SPECIAL_493>",
500
+ "<SPECIAL_494>",
501
+ "<SPECIAL_495>",
502
+ "<SPECIAL_496>",
503
+ "<SPECIAL_497>",
504
+ "<SPECIAL_498>",
505
+ "<SPECIAL_499>",
506
+ "<SPECIAL_500>",
507
+ "<SPECIAL_501>",
508
+ "<SPECIAL_502>",
509
+ "<SPECIAL_503>",
510
+ "<SPECIAL_504>",
511
+ "<SPECIAL_505>",
512
+ "<SPECIAL_506>",
513
+ "<SPECIAL_507>",
514
+ "<SPECIAL_508>",
515
+ "<SPECIAL_509>",
516
+ "<SPECIAL_510>",
517
+ "<SPECIAL_511>",
518
+ "<SPECIAL_512>",
519
+ "<SPECIAL_513>",
520
+ "<SPECIAL_514>",
521
+ "<SPECIAL_515>",
522
+ "<SPECIAL_516>",
523
+ "<SPECIAL_517>",
524
+ "<SPECIAL_518>",
525
+ "<SPECIAL_519>",
526
+ "<SPECIAL_520>",
527
+ "<SPECIAL_521>",
528
+ "<SPECIAL_522>",
529
+ "<SPECIAL_523>",
530
+ "<SPECIAL_524>",
531
+ "<SPECIAL_525>",
532
+ "<SPECIAL_526>",
533
+ "<SPECIAL_527>",
534
+ "<SPECIAL_528>",
535
+ "<SPECIAL_529>",
536
+ "<SPECIAL_530>",
537
+ "<SPECIAL_531>",
538
+ "<SPECIAL_532>",
539
+ "<SPECIAL_533>",
540
+ "<SPECIAL_534>",
541
+ "<SPECIAL_535>",
542
+ "<SPECIAL_536>",
543
+ "<SPECIAL_537>",
544
+ "<SPECIAL_538>",
545
+ "<SPECIAL_539>",
546
+ "<SPECIAL_540>",
547
+ "<SPECIAL_541>",
548
+ "<SPECIAL_542>",
549
+ "<SPECIAL_543>",
550
+ "<SPECIAL_544>",
551
+ "<SPECIAL_545>",
552
+ "<SPECIAL_546>",
553
+ "<SPECIAL_547>",
554
+ "<SPECIAL_548>",
555
+ "<SPECIAL_549>",
556
+ "<SPECIAL_550>",
557
+ "<SPECIAL_551>",
558
+ "<SPECIAL_552>",
559
+ "<SPECIAL_553>",
560
+ "<SPECIAL_554>",
561
+ "<SPECIAL_555>",
562
+ "<SPECIAL_556>",
563
+ "<SPECIAL_557>",
564
+ "<SPECIAL_558>",
565
+ "<SPECIAL_559>",
566
+ "<SPECIAL_560>",
567
+ "<SPECIAL_561>",
568
+ "<SPECIAL_562>",
569
+ "<SPECIAL_563>",
570
+ "<SPECIAL_564>",
571
+ "<SPECIAL_565>",
572
+ "<SPECIAL_566>",
573
+ "<SPECIAL_567>",
574
+ "<SPECIAL_568>",
575
+ "<SPECIAL_569>",
576
+ "<SPECIAL_570>",
577
+ "<SPECIAL_571>",
578
+ "<SPECIAL_572>",
579
+ "<SPECIAL_573>",
580
+ "<SPECIAL_574>",
581
+ "<SPECIAL_575>",
582
+ "<SPECIAL_576>",
583
+ "<SPECIAL_577>",
584
+ "<SPECIAL_578>",
585
+ "<SPECIAL_579>",
586
+ "<SPECIAL_580>",
587
+ "<SPECIAL_581>",
588
+ "<SPECIAL_582>",
589
+ "<SPECIAL_583>",
590
+ "<SPECIAL_584>",
591
+ "<SPECIAL_585>",
592
+ "<SPECIAL_586>",
593
+ "<SPECIAL_587>",
594
+ "<SPECIAL_588>",
595
+ "<SPECIAL_589>",
596
+ "<SPECIAL_590>",
597
+ "<SPECIAL_591>",
598
+ "<SPECIAL_592>",
599
+ "<SPECIAL_593>",
600
+ "<SPECIAL_594>",
601
+ "<SPECIAL_595>",
602
+ "<SPECIAL_596>",
603
+ "<SPECIAL_597>",
604
+ "<SPECIAL_598>",
605
+ "<SPECIAL_599>",
606
+ "<SPECIAL_600>",
607
+ "<SPECIAL_601>",
608
+ "<SPECIAL_602>",
609
+ "<SPECIAL_603>",
610
+ "<SPECIAL_604>",
611
+ "<SPECIAL_605>",
612
+ "<SPECIAL_606>",
613
+ "<SPECIAL_607>",
614
+ "<SPECIAL_608>",
615
+ "<SPECIAL_609>",
616
+ "<SPECIAL_610>",
617
+ "<SPECIAL_611>",
618
+ "<SPECIAL_612>",
619
+ "<SPECIAL_613>",
620
+ "<SPECIAL_614>",
621
+ "<SPECIAL_615>",
622
+ "<SPECIAL_616>",
623
+ "<SPECIAL_617>",
624
+ "<SPECIAL_618>",
625
+ "<SPECIAL_619>",
626
+ "<SPECIAL_620>",
627
+ "<SPECIAL_621>",
628
+ "<SPECIAL_622>",
629
+ "<SPECIAL_623>",
630
+ "<SPECIAL_624>",
631
+ "<SPECIAL_625>",
632
+ "<SPECIAL_626>",
633
+ "<SPECIAL_627>",
634
+ "<SPECIAL_628>",
635
+ "<SPECIAL_629>",
636
+ "<SPECIAL_630>",
637
+ "<SPECIAL_631>",
638
+ "<SPECIAL_632>",
639
+ "<SPECIAL_633>",
640
+ "<SPECIAL_634>",
641
+ "<SPECIAL_635>",
642
+ "<SPECIAL_636>",
643
+ "<SPECIAL_637>",
644
+ "<SPECIAL_638>",
645
+ "<SPECIAL_639>",
646
+ "<SPECIAL_640>",
647
+ "<SPECIAL_641>",
648
+ "<SPECIAL_642>",
649
+ "<SPECIAL_643>",
650
+ "<SPECIAL_644>",
651
+ "<SPECIAL_645>",
652
+ "<SPECIAL_646>",
653
+ "<SPECIAL_647>",
654
+ "<SPECIAL_648>",
655
+ "<SPECIAL_649>",
656
+ "<SPECIAL_650>",
657
+ "<SPECIAL_651>",
658
+ "<SPECIAL_652>",
659
+ "<SPECIAL_653>",
660
+ "<SPECIAL_654>",
661
+ "<SPECIAL_655>",
662
+ "<SPECIAL_656>",
663
+ "<SPECIAL_657>",
664
+ "<SPECIAL_658>",
665
+ "<SPECIAL_659>",
666
+ "<SPECIAL_660>",
667
+ "<SPECIAL_661>",
668
+ "<SPECIAL_662>",
669
+ "<SPECIAL_663>",
670
+ "<SPECIAL_664>",
671
+ "<SPECIAL_665>",
672
+ "<SPECIAL_666>",
673
+ "<SPECIAL_667>",
674
+ "<SPECIAL_668>",
675
+ "<SPECIAL_669>",
676
+ "<SPECIAL_670>",
677
+ "<SPECIAL_671>",
678
+ "<SPECIAL_672>",
679
+ "<SPECIAL_673>",
680
+ "<SPECIAL_674>",
681
+ "<SPECIAL_675>",
682
+ "<SPECIAL_676>",
683
+ "<SPECIAL_677>",
684
+ "<SPECIAL_678>",
685
+ "<SPECIAL_679>",
686
+ "<SPECIAL_680>",
687
+ "<SPECIAL_681>",
688
+ "<SPECIAL_682>",
689
+ "<SPECIAL_683>",
690
+ "<SPECIAL_684>",
691
+ "<SPECIAL_685>",
692
+ "<SPECIAL_686>",
693
+ "<SPECIAL_687>",
694
+ "<SPECIAL_688>",
695
+ "<SPECIAL_689>",
696
+ "<SPECIAL_690>",
697
+ "<SPECIAL_691>",
698
+ "<SPECIAL_692>",
699
+ "<SPECIAL_693>",
700
+ "<SPECIAL_694>",
701
+ "<SPECIAL_695>",
702
+ "<SPECIAL_696>",
703
+ "<SPECIAL_697>",
704
+ "<SPECIAL_698>",
705
+ "<SPECIAL_699>",
706
+ "<SPECIAL_700>",
707
+ "<SPECIAL_701>",
708
+ "<SPECIAL_702>",
709
+ "<SPECIAL_703>",
710
+ "<SPECIAL_704>",
711
+ "<SPECIAL_705>",
712
+ "<SPECIAL_706>",
713
+ "<SPECIAL_707>",
714
+ "<SPECIAL_708>",
715
+ "<SPECIAL_709>",
716
+ "<SPECIAL_710>",
717
+ "<SPECIAL_711>",
718
+ "<SPECIAL_712>",
719
+ "<SPECIAL_713>",
720
+ "<SPECIAL_714>",
721
+ "<SPECIAL_715>",
722
+ "<SPECIAL_716>",
723
+ "<SPECIAL_717>",
724
+ "<SPECIAL_718>",
725
+ "<SPECIAL_719>",
726
+ "<SPECIAL_720>",
727
+ "<SPECIAL_721>",
728
+ "<SPECIAL_722>",
729
+ "<SPECIAL_723>",
730
+ "<SPECIAL_724>",
731
+ "<SPECIAL_725>",
732
+ "<SPECIAL_726>",
733
+ "<SPECIAL_727>",
734
+ "<SPECIAL_728>",
735
+ "<SPECIAL_729>",
736
+ "<SPECIAL_730>",
737
+ "<SPECIAL_731>",
738
+ "<SPECIAL_732>",
739
+ "<SPECIAL_733>",
740
+ "<SPECIAL_734>",
741
+ "<SPECIAL_735>",
742
+ "<SPECIAL_736>",
743
+ "<SPECIAL_737>",
744
+ "<SPECIAL_738>",
745
+ "<SPECIAL_739>",
746
+ "<SPECIAL_740>",
747
+ "<SPECIAL_741>",
748
+ "<SPECIAL_742>",
749
+ "<SPECIAL_743>",
750
+ "<SPECIAL_744>",
751
+ "<SPECIAL_745>",
752
+ "<SPECIAL_746>",
753
+ "<SPECIAL_747>",
754
+ "<SPECIAL_748>",
755
+ "<SPECIAL_749>",
756
+ "<SPECIAL_750>",
757
+ "<SPECIAL_751>",
758
+ "<SPECIAL_752>",
759
+ "<SPECIAL_753>",
760
+ "<SPECIAL_754>",
761
+ "<SPECIAL_755>",
762
+ "<SPECIAL_756>",
763
+ "<SPECIAL_757>",
764
+ "<SPECIAL_758>",
765
+ "<SPECIAL_759>",
766
+ "<SPECIAL_760>",
767
+ "<SPECIAL_761>",
768
+ "<SPECIAL_762>",
769
+ "<SPECIAL_763>",
770
+ "<SPECIAL_764>",
771
+ "<SPECIAL_765>",
772
+ "<SPECIAL_766>",
773
+ "<SPECIAL_767>",
774
+ "<SPECIAL_768>",
775
+ "<SPECIAL_769>",
776
+ "<SPECIAL_770>",
777
+ "<SPECIAL_771>",
778
+ "<SPECIAL_772>",
779
+ "<SPECIAL_773>",
780
+ "<SPECIAL_774>",
781
+ "<SPECIAL_775>",
782
+ "<SPECIAL_776>",
783
+ "<SPECIAL_777>",
784
+ "<SPECIAL_778>",
785
+ "<SPECIAL_779>",
786
+ "<SPECIAL_780>",
787
+ "<SPECIAL_781>",
788
+ "<SPECIAL_782>",
789
+ "<SPECIAL_783>",
790
+ "<SPECIAL_784>",
791
+ "<SPECIAL_785>",
792
+ "<SPECIAL_786>",
793
+ "<SPECIAL_787>",
794
+ "<SPECIAL_788>",
795
+ "<SPECIAL_789>",
796
+ "<SPECIAL_790>",
797
+ "<SPECIAL_791>",
798
+ "<SPECIAL_792>",
799
+ "<SPECIAL_793>",
800
+ "<SPECIAL_794>",
801
+ "<SPECIAL_795>",
802
+ "<SPECIAL_796>",
803
+ "<SPECIAL_797>",
804
+ "<SPECIAL_798>",
805
+ "<SPECIAL_799>",
806
+ "<SPECIAL_800>",
807
+ "<SPECIAL_801>",
808
+ "<SPECIAL_802>",
809
+ "<SPECIAL_803>",
810
+ "<SPECIAL_804>",
811
+ "<SPECIAL_805>",
812
+ "<SPECIAL_806>",
813
+ "<SPECIAL_807>",
814
+ "<SPECIAL_808>",
815
+ "<SPECIAL_809>",
816
+ "<SPECIAL_810>",
817
+ "<SPECIAL_811>",
818
+ "<SPECIAL_812>",
819
+ "<SPECIAL_813>",
820
+ "<SPECIAL_814>",
821
+ "<SPECIAL_815>",
822
+ "<SPECIAL_816>",
823
+ "<SPECIAL_817>",
824
+ "<SPECIAL_818>",
825
+ "<SPECIAL_819>",
826
+ "<SPECIAL_820>",
827
+ "<SPECIAL_821>",
828
+ "<SPECIAL_822>",
829
+ "<SPECIAL_823>",
830
+ "<SPECIAL_824>",
831
+ "<SPECIAL_825>",
832
+ "<SPECIAL_826>",
833
+ "<SPECIAL_827>",
834
+ "<SPECIAL_828>",
835
+ "<SPECIAL_829>",
836
+ "<SPECIAL_830>",
837
+ "<SPECIAL_831>",
838
+ "<SPECIAL_832>",
839
+ "<SPECIAL_833>",
840
+ "<SPECIAL_834>",
841
+ "<SPECIAL_835>",
842
+ "<SPECIAL_836>",
843
+ "<SPECIAL_837>",
844
+ "<SPECIAL_838>",
845
+ "<SPECIAL_839>",
846
+ "<SPECIAL_840>",
847
+ "<SPECIAL_841>",
848
+ "<SPECIAL_842>",
849
+ "<SPECIAL_843>",
850
+ "<SPECIAL_844>",
851
+ "<SPECIAL_845>",
852
+ "<SPECIAL_846>",
853
+ "<SPECIAL_847>",
854
+ "<SPECIAL_848>",
855
+ "<SPECIAL_849>",
856
+ "<SPECIAL_850>",
857
+ "<SPECIAL_851>",
858
+ "<SPECIAL_852>",
859
+ "<SPECIAL_853>",
860
+ "<SPECIAL_854>",
861
+ "<SPECIAL_855>",
862
+ "<SPECIAL_856>",
863
+ "<SPECIAL_857>",
864
+ "<SPECIAL_858>",
865
+ "<SPECIAL_859>",
866
+ "<SPECIAL_860>",
867
+ "<SPECIAL_861>",
868
+ "<SPECIAL_862>",
869
+ "<SPECIAL_863>",
870
+ "<SPECIAL_864>",
871
+ "<SPECIAL_865>",
872
+ "<SPECIAL_866>",
873
+ "<SPECIAL_867>",
874
+ "<SPECIAL_868>",
875
+ "<SPECIAL_869>",
876
+ "<SPECIAL_870>",
877
+ "<SPECIAL_871>",
878
+ "<SPECIAL_872>",
879
+ "<SPECIAL_873>",
880
+ "<SPECIAL_874>",
881
+ "<SPECIAL_875>",
882
+ "<SPECIAL_876>",
883
+ "<SPECIAL_877>",
884
+ "<SPECIAL_878>",
885
+ "<SPECIAL_879>",
886
+ "<SPECIAL_880>",
887
+ "<SPECIAL_881>",
888
+ "<SPECIAL_882>",
889
+ "<SPECIAL_883>",
890
+ "<SPECIAL_884>",
891
+ "<SPECIAL_885>",
892
+ "<SPECIAL_886>",
893
+ "<SPECIAL_887>",
894
+ "<SPECIAL_888>",
895
+ "<SPECIAL_889>",
896
+ "<SPECIAL_890>",
897
+ "<SPECIAL_891>",
898
+ "<SPECIAL_892>",
899
+ "<SPECIAL_893>",
900
+ "<SPECIAL_894>",
901
+ "<SPECIAL_895>",
902
+ "<SPECIAL_896>",
903
+ "<SPECIAL_897>",
904
+ "<SPECIAL_898>",
905
+ "<SPECIAL_899>",
906
+ "<SPECIAL_900>",
907
+ "<SPECIAL_901>",
908
+ "<SPECIAL_902>",
909
+ "<SPECIAL_903>",
910
+ "<SPECIAL_904>",
911
+ "<SPECIAL_905>",
912
+ "<SPECIAL_906>",
913
+ "<SPECIAL_907>",
914
+ "<SPECIAL_908>",
915
+ "<SPECIAL_909>",
916
+ "<SPECIAL_910>",
917
+ "<SPECIAL_911>",
918
+ "<SPECIAL_912>",
919
+ "<SPECIAL_913>",
920
+ "<SPECIAL_914>",
921
+ "<SPECIAL_915>",
922
+ "<SPECIAL_916>",
923
+ "<SPECIAL_917>",
924
+ "<SPECIAL_918>",
925
+ "<SPECIAL_919>",
926
+ "<SPECIAL_920>",
927
+ "<SPECIAL_921>",
928
+ "<SPECIAL_922>",
929
+ "<SPECIAL_923>",
930
+ "<SPECIAL_924>",
931
+ "<SPECIAL_925>",
932
+ "<SPECIAL_926>",
933
+ "<SPECIAL_927>",
934
+ "<SPECIAL_928>",
935
+ "<SPECIAL_929>",
936
+ "<SPECIAL_930>",
937
+ "<SPECIAL_931>",
938
+ "<SPECIAL_932>",
939
+ "<SPECIAL_933>",
940
+ "<SPECIAL_934>",
941
+ "<SPECIAL_935>",
942
+ "<SPECIAL_936>",
943
+ "<SPECIAL_937>",
944
+ "<SPECIAL_938>",
945
+ "<SPECIAL_939>",
946
+ "<SPECIAL_940>",
947
+ "<SPECIAL_941>",
948
+ "<SPECIAL_942>",
949
+ "<SPECIAL_943>",
950
+ "<SPECIAL_944>",
951
+ "<SPECIAL_945>",
952
+ "<SPECIAL_946>",
953
+ "<SPECIAL_947>",
954
+ "<SPECIAL_948>",
955
+ "<SPECIAL_949>",
956
+ "<SPECIAL_950>",
957
+ "<SPECIAL_951>",
958
+ "<SPECIAL_952>",
959
+ "<SPECIAL_953>",
960
+ "<SPECIAL_954>",
961
+ "<SPECIAL_955>",
962
+ "<SPECIAL_956>",
963
+ "<SPECIAL_957>",
964
+ "<SPECIAL_958>",
965
+ "<SPECIAL_959>",
966
+ "<SPECIAL_960>",
967
+ "<SPECIAL_961>",
968
+ "<SPECIAL_962>",
969
+ "<SPECIAL_963>",
970
+ "<SPECIAL_964>",
971
+ "<SPECIAL_965>",
972
+ "<SPECIAL_966>",
973
+ "<SPECIAL_967>",
974
+ "<SPECIAL_968>",
975
+ "<SPECIAL_969>",
976
+ "<SPECIAL_970>",
977
+ "<SPECIAL_971>",
978
+ "<SPECIAL_972>",
979
+ "<SPECIAL_973>",
980
+ "<SPECIAL_974>",
981
+ "<SPECIAL_975>",
982
+ "<SPECIAL_976>",
983
+ "<SPECIAL_977>",
984
+ "<SPECIAL_978>",
985
+ "<SPECIAL_979>",
986
+ "<SPECIAL_980>",
987
+ "<SPECIAL_981>",
988
+ "<SPECIAL_982>",
989
+ "<SPECIAL_983>",
990
+ "<SPECIAL_984>",
991
+ "<SPECIAL_985>",
992
+ "<SPECIAL_986>",
993
+ "<SPECIAL_987>",
994
+ "<SPECIAL_988>",
995
+ "<SPECIAL_989>",
996
+ "<SPECIAL_990>",
997
+ "<SPECIAL_991>",
998
+ "<SPECIAL_992>",
999
+ "<SPECIAL_993>",
1000
+ "<SPECIAL_994>",
1001
+ "<SPECIAL_995>",
1002
+ "<SPECIAL_996>",
1003
+ "<SPECIAL_997>",
1004
+ "<SPECIAL_998>",
1005
+ "<SPECIAL_999>"
1006
+ ],
1007
+ "model_max_length": 1000000000000000019884624838656,
1008
+ "pad_token": "<pad>",
1009
+ "processor_class": "PixtralProcessor",
1010
+ "tokenizer_class": "TokenizersBackend",
1011
+ "unk_token": "<unk>"
1012
+ }