joerowell commited on
Commit
f82b43d
·
0 Parent(s):

Laguna XS.2 upload

Browse files
.gitattributes ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tar filter=lfs diff=lfs merge=lfs -text
29
+ *.tflite filter=lfs diff=lfs merge=lfs -text
30
+ *.tgz filter=lfs diff=lfs merge=lfs -text
31
+ *.wasm filter=lfs diff=lfs merge=lfs -text
32
+ *.xz filter=lfs diff=lfs merge=lfs -text
33
+ *.zip filter=lfs diff=lfs merge=lfs -text
34
+ *.zst filter=lfs diff=lfs merge=lfs -text
35
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
LICENSE.md ADDED
@@ -0,0 +1,202 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ Apache License
3
+ Version 2.0, January 2004
4
+ http://www.apache.org/licenses/
5
+
6
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
7
+
8
+ 1. Definitions.
9
+
10
+ "License" shall mean the terms and conditions for use, reproduction,
11
+ and distribution as defined by Sections 1 through 9 of this document.
12
+
13
+ "Licensor" shall mean the copyright owner or entity authorized by
14
+ the copyright owner that is granting the License.
15
+
16
+ "Legal Entity" shall mean the union of the acting entity and all
17
+ other entities that control, are controlled by, or are under common
18
+ control with that entity. For the purposes of this definition,
19
+ "control" means (i) the power, direct or indirect, to cause the
20
+ direction or management of such entity, whether by contract or
21
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
22
+ outstanding shares, or (iii) beneficial ownership of such entity.
23
+
24
+ "You" (or "Your") shall mean an individual or Legal Entity
25
+ exercising permissions granted by this License.
26
+
27
+ "Source" form shall mean the preferred form for making modifications,
28
+ including but not limited to software source code, documentation
29
+ source, and configuration files.
30
+
31
+ "Object" form shall mean any form resulting from mechanical
32
+ transformation or translation of a Source form, including but
33
+ not limited to compiled object code, generated documentation,
34
+ and conversions to other media types.
35
+
36
+ "Work" shall mean the work of authorship, whether in Source or
37
+ Object form, made available under the License, as indicated by a
38
+ copyright notice that is included in or attached to the work
39
+ (an example is provided in the Appendix below).
40
+
41
+ "Derivative Works" shall mean any work, whether in Source or Object
42
+ form, that is based on (or derived from) the Work and for which the
43
+ editorial revisions, annotations, elaborations, or other modifications
44
+ represent, as a whole, an original work of authorship. For the purposes
45
+ of this License, Derivative Works shall not include works that remain
46
+ separable from, or merely link (or bind by name) to the interfaces of,
47
+ the Work and Derivative Works thereof.
48
+
49
+ "Contribution" shall mean any work of authorship, including
50
+ the original version of the Work and any modifications or additions
51
+ to that Work or Derivative Works thereof, that is intentionally
52
+ submitted to Licensor for inclusion in the Work by the copyright owner
53
+ or by an individual or Legal Entity authorized to submit on behalf of
54
+ the copyright owner. For the purposes of this definition, "submitted"
55
+ means any form of electronic, verbal, or written communication sent
56
+ to the Licensor or its representatives, including but not limited to
57
+ communication on electronic mailing lists, source code control systems,
58
+ and issue tracking systems that are managed by, or on behalf of, the
59
+ Licensor for the purpose of discussing and improving the Work, but
60
+ excluding communication that is conspicuously marked or otherwise
61
+ designated in writing by the copyright owner as "Not a Contribution."
62
+
63
+ "Contributor" shall mean Licensor and any individual or Legal Entity
64
+ on behalf of whom a Contribution has been received by Licensor and
65
+ subsequently incorporated within the Work.
66
+
67
+ 2. Grant of Copyright License. Subject to the terms and conditions of
68
+ this License, each Contributor hereby grants to You a perpetual,
69
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
70
+ copyright license to reproduce, prepare Derivative Works of,
71
+ publicly display, publicly perform, sublicense, and distribute the
72
+ Work and such Derivative Works in Source or Object form.
73
+
74
+ 3. Grant of Patent License. Subject to the terms and conditions of
75
+ this License, each Contributor hereby grants to You a perpetual,
76
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
77
+ (except as stated in this section) patent license to make, have made,
78
+ use, offer to sell, sell, import, and otherwise transfer the Work,
79
+ where such license applies only to those patent claims licensable
80
+ by such Contributor that are necessarily infringed by their
81
+ Contribution(s) alone or by combination of their Contribution(s)
82
+ with the Work to which such Contribution(s) was submitted. If You
83
+ institute patent litigation against any entity (including a
84
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
85
+ or a Contribution incorporated within the Work constitutes direct
86
+ or contributory patent infringement, then any patent licenses
87
+ granted to You under this License for that Work shall terminate
88
+ as of the date such litigation is filed.
89
+
90
+ 4. Redistribution. You may reproduce and distribute copies of the
91
+ Work or Derivative Works thereof in any medium, with or without
92
+ modifications, and in Source or Object form, provided that You
93
+ meet the following conditions:
94
+
95
+ (a) You must give any other recipients of the Work or
96
+ Derivative Works a copy of this License; and
97
+
98
+ (b) You must cause any modified files to carry prominent notices
99
+ stating that You changed the files; and
100
+
101
+ (c) You must retain, in the Source form of any Derivative Works
102
+ that You distribute, all copyright, patent, trademark, and
103
+ attribution notices from the Source form of the Work,
104
+ excluding those notices that do not pertain to any part of
105
+ the Derivative Works; and
106
+
107
+ (d) If the Work includes a "NOTICE" text file as part of its
108
+ distribution, then any Derivative Works that You distribute must
109
+ include a readable copy of the attribution notices contained
110
+ within such NOTICE file, excluding those notices that do not
111
+ pertain to any part of the Derivative Works, in at least one
112
+ of the following places: within a NOTICE text file distributed
113
+ as part of the Derivative Works; within the Source form or
114
+ documentation, if provided along with the Derivative Works; or,
115
+ within a display generated by the Derivative Works, if and
116
+ wherever such third-party notices normally appear. The contents
117
+ of the NOTICE file are for informational purposes only and
118
+ do not modify the License. You may add Your own attribution
119
+ notices within Derivative Works that You distribute, alongside
120
+ or as an addendum to the NOTICE text from the Work, provided
121
+ that such additional attribution notices cannot be construed
122
+ as modifying the License.
123
+
124
+ You may add Your own copyright statement to Your modifications and
125
+ may provide additional or different license terms and conditions
126
+ for use, reproduction, or distribution of Your modifications, or
127
+ for any such Derivative Works as a whole, provided Your use,
128
+ reproduction, and distribution of the Work otherwise complies with
129
+ the conditions stated in this License.
130
+
131
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
132
+ any Contribution intentionally submitted for inclusion in the Work
133
+ by You to the Licensor shall be under the terms and conditions of
134
+ this License, without any additional terms or conditions.
135
+ Notwithstanding the above, nothing herein shall supersede or modify
136
+ the terms of any separate license agreement you may have executed
137
+ with Licensor regarding such Contributions.
138
+
139
+ 6. Trademarks. This License does not grant permission to use the trade
140
+ names, trademarks, service marks, or product names of the Licensor,
141
+ except as required for reasonable and customary use in describing the
142
+ origin of the Work and reproducing the content of the NOTICE file.
143
+
144
+ 7. Disclaimer of Warranty. Unless required by applicable law or
145
+ agreed to in writing, Licensor provides the Work (and each
146
+ Contributor provides its Contributions) on an "AS IS" BASIS,
147
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
148
+ implied, including, without limitation, any warranties or conditions
149
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
150
+ PARTICULAR PURPOSE. You are solely responsible for determining the
151
+ appropriateness of using or redistributing the Work and assume any
152
+ risks associated with Your exercise of permissions under this License.
153
+
154
+ 8. Limitation of Liability. In no event and under no legal theory,
155
+ whether in tort (including negligence), contract, or otherwise,
156
+ unless required by applicable law (such as deliberate and grossly
157
+ negligent acts) or agreed to in writing, shall any Contributor be
158
+ liable to You for damages, including any direct, indirect, special,
159
+ incidental, or consequential damages of any character arising as a
160
+ result of this License or out of the use or inability to use the
161
+ Work (including but not limited to damages for loss of goodwill,
162
+ work stoppage, computer failure or malfunction, or any and all
163
+ other commercial damages or losses), even if such Contributor
164
+ has been advised of the possibility of such damages.
165
+
166
+ 9. Accepting Warranty or Additional Liability. While redistributing
167
+ the Work or Derivative Works thereof, You may choose to offer,
168
+ and charge a fee for, acceptance of support, warranty, indemnity,
169
+ or other liability obligations and/or rights consistent with this
170
+ License. However, in accepting such obligations, You may act only
171
+ on Your own behalf and on Your sole responsibility, not on behalf
172
+ of any other Contributor, and only if You agree to indemnify,
173
+ defend, and hold each Contributor harmless for any liability
174
+ incurred by, or claims asserted against, such Contributor by reason
175
+ of your accepting any such warranty or additional liability.
176
+
177
+ END OF TERMS AND CONDITIONS
178
+
179
+ APPENDIX: How to apply the Apache License to your work.
180
+
181
+ To apply the Apache License to your work, attach the following
182
+ boilerplate notice, with the fields enclosed by brackets "[]"
183
+ replaced with your own identifying information. (Don't include
184
+ the brackets!) The text should be enclosed in the appropriate
185
+ comment syntax for the file format. We also recommend that a
186
+ file or class name and description of purpose be included on the
187
+ same "printed page" as the copyright notice for easier
188
+ identification within third-party archives.
189
+
190
+ Copyright 2026 Poolside
191
+
192
+ Licensed under the Apache License, Version 2.0 (the "License");
193
+ you may not use this file except in compliance with the License.
194
+ You may obtain a copy of the License at
195
+
196
+ http://www.apache.org/licenses/LICENSE-2.0
197
+
198
+ Unless required by applicable law or agreed to in writing, software
199
+ distributed under the License is distributed on an "AS IS" BASIS,
200
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
201
+ See the License for the specific language governing permissions and
202
+ limitations under the License.
README.md ADDED
@@ -0,0 +1,248 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ library_name: vllm
3
+ inference: false
4
+ extra_gated_description: >-
5
+ To learn more about how we process your personal data, please read our <a
6
+ href="https://poolside.ai/legal/privacy">Privacy Policy</a>.
7
+ tags:
8
+ - laguna-xs.2
9
+ license: apache-2.0
10
+ pipeline_tag: text-generation
11
+ ---
12
+
13
+ <p align="center">
14
+ <img alt="poolside-banner" src="https://poolside.ai/assets/laguna/laguna-xs2-banner.svg" width="800px">
15
+ </p>
16
+
17
+ <p align="center">
18
+ <a href="https://shimmer.poolside.ai"><strong>Try Laguna XS.2 in Shimmer</strong></a> ·
19
+ <a href="https://platform.poolside.ai"><strong>Get an API key</strong></a> ·
20
+ <a href="https://poolside.ai/blog/laguna-a-deeper-dive"><strong>Release blog post</strong></a>
21
+ </p>
22
+
23
+ <br>
24
+
25
+ # Laguna XS.2-INT4
26
+ Laguna XS.2-INT4 is a 33B total parameter Mixture-of-Experts model with 3B activated parameters per token designed for agentic coding and long-horizon work on a local machine. It uses Sliding Window Attention with per-head gating in 30 out of 40 layers for fast inference and low KV cache requirements.
27
+
28
+ > [!NOTE]
29
+ > This is the INT4 variant with an FP8-quantized KV cache. The [BF16](https://huggingface.co/poolside/Laguna-XS.2), [FP8](https://huggingface.co/poolside/Laguna-XS.2-FP8) and [NVFP4](https://huggingface.co/poolside/Laguna-XS.2-NVFP4) variants are also available on Hugging Face.
30
+
31
+ ## Highlights
32
+ - **Mixed SWA and global attention layout**: Laguna XS.2 uses sigmoid gating with per-layer rotary scales, enabling mixed SWA (Sliding Window Attention) and global attention layers in a 3:1 ratio (across 40 total layers)
33
+ - **KV cache in FP8**: KV cache quantized to FP8, reducing memory per token
34
+ - **Native reasoning support**: Interleaved thinking between tool calls with support for enabling and disabling thinking per-request
35
+ - **Local-ready**: At 33B total parameters and 3B activated, Laguna XS.2 is compact enough to run on a Mac with 36 GB of RAM. [Available on Ollama](https://ollama.com/library/laguna-xs.2)
36
+ - **Apache 2.0 license**: Use and modify freely for commercial and non-commercial purposes
37
+
38
+ ---
39
+
40
+ ## Model overview
41
+
42
+ - Training: pre-training, post-training and reinforcement learning stages
43
+ - Number of parameters: 33B total with 3B activated per token
44
+ - Optimizer: Muon
45
+ - Layers: 40 layers (10 layers with global attention, 30 layers with sliding window attention)
46
+ - Experts: 256 experts with 1 shared expert
47
+ - Sliding Window: 512 tokens
48
+ - Modality: text-to-text
49
+ - Context window: 131,072 tokens
50
+ - Reasoning support: interleaved thinking with preserved thinking
51
+
52
+ ## Benchmark results
53
+
54
+ <p align="center">
55
+ <img alt="benchmarks" src="https://poolside.ai/assets/laguna/laguna-xs2-chart.svg" width="800px">
56
+ </p>
57
+
58
+ | Model | Size (total params.) | SWE-bench Verified | SWE-bench Multilingual | SWE-bench Pro (Public Dataset) | Terminal-Bench 2.0 |
59
+ |---------------------------|----------------------|--------------------|------------------------|--------------------------------|--------------------|
60
+ | **Laguna XS.2 (BF16)** | 33B | 68.2% | 62.4% | 44.5% | 30.1% |
61
+ | Devstral Small 2 | 24B dense | 68.0% | 55.7% | - | 22.5% |
62
+ | Gemma 4 31B IT | 31B dense | 52.0% | 51.7% | 35.7% | 42.9% |
63
+ | Qwen3.5-35B-A3B | 35B | 69.2% | 60.3% | 44.6% | 40.5% |
64
+ | Qwen3.6-35B-A3B | 35B | 73.4% | 67.2% | 49.5% | 51.5% |
65
+ | Claude Haiku 4.5 | - | 73.3% | - | 39.5% | 29.8% |
66
+ | GPT-5.4 Nano | - | - | - | 52.4% | 46.3% |
67
+
68
+ *We used the highest publicly-referenced scores for all comparison models across each benchmark. In almost all cases these were official scores published in release blog posts or equivalent, with the exception of Gemma 4 31B IT where the highest published scores were [reported by the Qwen team](https://qwen.ai/blog?id=qwen3.6-35b-a3b) and Claude Haiku 4.5 where the highest published (verified) scores for SWE-bench Pro and Terminal-Bench 2.0 are from their respective official leaderboards.*
69
+
70
+ <details>
71
+ <summary>Expand for benchmarking methodology</summary>
72
+
73
+ All benchmarking for Laguna XS.2 was completed using the Laude Institute’s Harbor Framework with our [agent harness](https://github.com/poolsideai/pool), using a maximum of 500 steps and sandboxed execution using 8 GB RAM/2 CPUs (with the exception of Terminal-Bench 2.0; see below). The same sampling parameters were used for all benchmarking: temperature=0.7 and top_k=20. Some base task images and verifiers were patched to fix infrastructure reliability issues inherent in task setup, such as rate limits on third-party dependencies in external registries used by the verifier. More details outlining these updates and other findings will follow in a future technical blog post.
74
+
75
+ - SWE-bench Verified: mean pass@1 averaged over 4 runs.
76
+ - SWE-bench Multilingual: mean pass@1 averaged over 7 runs.
77
+ - SWE-bench Pro: mean pass@1 averaged over 3 runs.
78
+ - Terminal-Bench 2.0: mean pass@1 averaged over 5 runs. 48GB RAM/32 CPUs.
79
+
80
+ </details>
81
+
82
+ ## Usage
83
+
84
+ Laguna XS.2-INT4 has launch-day support in vLLM and Transformers.
85
+
86
+ The fastest way to get started is with our API, directly or using OpenRouter.
87
+
88
+ > [!NOTE]
89
+ > For complete usage instructions, see the main [Laguna XS.2 model card](https://huggingface.co/poolside/Laguna-XS.2).
90
+
91
+ ### Local deployment
92
+
93
+ Laguna XS.2-INT4 is supported in vLLM and Transformers. Use Laguna-XS.2 with Ollama (with MLX support) and the mlx-lm framework for the best experience on your local machine.
94
+
95
+ #### vLLM
96
+
97
+ The full vLLM recipe is on the main [Laguna XS.2 model card](https://huggingface.co/poolside/Laguna-XS.2). Quantization is detected automatically from `quantization_config` in this checkpoint, so the same command works with `poolside/Laguna-XS.2-INT4` substituted for the model ID. No extra flags required.
98
+
99
+ > [!NOTE]
100
+ > Please note that, during testing, we discovered that models with FP8-quantised KV caches can produce scrambled output when deployed on non-Hopper GPUs. We are actively investigating this issue with the vLLM team, but in the meantime, you can circumvent this issue by explicitly disabling FP8 KV cache (Laguna XS.2 has 40 layers, so list every layer in `--kv-cache-dtype-skip-layers`):
101
+ >
102
+ > ```shell
103
+ > vllm serve poolside/Laguna-XS.2-INT4 \
104
+ > --kv-cache-dtype-skip-layers $(seq 0 39) \
105
+ > --max-model-len 131072 \
106
+ > --reasoning-parser poolside_v1 \
107
+ > --tool-call-parser poolside_v1
108
+ > ```
109
+ >
110
+ > The [BF16 checkpoint](https://huggingface.co/poolside/Laguna-XS.2) is unaffected as it does not declare an FP8 KV cache.
111
+
112
+ #### Transformers
113
+
114
+ The full Transformers recipe is on the main [Laguna XS.2 model card](https://huggingface.co/poolside/Laguna-XS.2). Substitute `poolside/Laguna-XS.2-INT4` for the model ID; quantization is detected automatically from `quantization_config`.
115
+
116
+ #### Ollama
117
+
118
+ Visit [Ollama's model library](https://ollama.com/library/laguna-xs.2) to pull to your local machine.
119
+
120
+ ## Controlling reasoning
121
+
122
+ Laguna XS.2 has native reasoning support and is designed to work best with *preserved thinking*, where `reasoning` content from prior assistant messages is preserved in the message history. This model will generally reason before calling tools and between tool calls.
123
+
124
+ <details>
125
+ <summary>Expand for example</summary>
126
+
127
+ ```python
128
+ import json
129
+ from openai import OpenAI
130
+
131
+ client = OpenAI(
132
+ base_url="https://inference.poolside.ai/v1",
133
+ api_key="...",
134
+ )
135
+
136
+ model = "poolside/laguna-xs.2"
137
+
138
+ tools = [{"type": "function", "function": {
139
+ "name": "shell",
140
+ "description": "Execute a bash command and return the output.",
141
+ "parameters": {"type": "object", "properties": {"cmd": {"type": "string"}}, "required": ["cmd"]},
142
+ }}]
143
+
144
+ messages = [
145
+ {"role": "system", "content": "You are a coding agent with access to a shell tool."},
146
+ {"role": "user", "content": "Run uname -a"},
147
+ ]
148
+
149
+ # Thinking is enabled by default when the server sets --default-chat-template-kwargs {"enable_thinking": True}
150
+ # When using the Poolside API (https://inference.poolside.ai/v1), this flag is set by default
151
+ response = client.chat.completions.create(
152
+ model=model,
153
+ messages=messages,
154
+ tools=tools,
155
+ stream=True,
156
+ )
157
+
158
+ reasoning, content, tool_calls = "", "", []
159
+ for chunk in response:
160
+ delta = chunk.choices[0].delta
161
+ if hasattr(delta, "reasoning_content") and delta.reasoning_content:
162
+ reasoning += delta.reasoning_content
163
+ if hasattr(delta, "content") and delta.content:
164
+ content += delta.content
165
+ if hasattr(delta, "tool_calls") and delta.tool_calls:
166
+ for tc in delta.tool_calls:
167
+ if tc.index >= len(tool_calls):
168
+ tool_calls.append({"id": tc.id, "function": {"name": "", "arguments": ""}})
169
+ if tc.function.name:
170
+ tool_calls[tc.index]["function"]["name"] = tc.function.name
171
+ if tc.function.arguments:
172
+ tool_calls[tc.index]["function"]["arguments"] += tc.function.arguments
173
+
174
+ print(f"Reasoning: {reasoning}\nContent: {content}\nTool calls: {tool_calls}\n")
175
+
176
+ # Return reasoning in the next request for best performance
177
+ messages.append({
178
+ "role": "assistant",
179
+ "content": content,
180
+ "reasoning_content": reasoning,
181
+ "tool_calls": [{"id": tc["id"], "type": "function", "function": tc["function"]} for tc in tool_calls]
182
+ })
183
+
184
+ messages.append({
185
+ "role": "tool",
186
+ "tool_call_id": tool_calls[0]["id"],
187
+ "content": json.dumps({"stdout": "Darwin arm64", "exit_code": "0"})
188
+ })
189
+
190
+ response = client.chat.completions.create(
191
+ model=model,
192
+ messages=messages,
193
+ tools=tools,
194
+ stream=True,
195
+ )
196
+
197
+ reasoning, content = "", ""
198
+ for chunk in response:
199
+ delta = chunk.choices[0].delta
200
+ if hasattr(delta, "reasoning_content") and delta.reasoning_content:
201
+ reasoning += delta.reasoning_content
202
+ if hasattr(delta, "content") and delta.content:
203
+ content += delta.content
204
+
205
+ print(f"Reasoning: {reasoning}\nContent: {content}")
206
+ ```
207
+
208
+ </details>
209
+
210
+ ### Disabling reasoning
211
+
212
+ You can disable thinking by setting `enable_thinking` to `False` in a request or by not providing `--default-chat-template-kwargs {"enable_thinking": True}` or equivalent when starting the server.
213
+
214
+ <details>
215
+ <summary>Expand for example</summary>
216
+
217
+ ```python
218
+ from openai import OpenAI
219
+ client = OpenAI()
220
+
221
+ completion = client.chat.completions.create(
222
+ model="poolside/laguna-xs.2",
223
+ messages=[
224
+ {"role": "user", "content": "Write a retry wrapper with exponential backoff."}
225
+ ],
226
+ extra_body={
227
+ "chat_template_kwargs": { "enable_thinking": False },
228
+ },
229
+ stream=True
230
+ )
231
+
232
+ for chunk in completion:
233
+ print(chunk.choices[0].delta)
234
+ ```
235
+
236
+ </details>
237
+
238
+ For agentic coding use cases, we recommend enabling thinking and preserving reasoning in message history as outlined in the [Controlling reasoning] section.
239
+
240
+ ## License
241
+
242
+ This model is licensed under the [Apache 2.0 License](https://huggingface.co/poolside/Laguna-XS.2-INT4/blob/main/LICENSE.md).
243
+
244
+ ## Intended and Responsible Use
245
+
246
+ Laguna XS.2-INT4 is designed for software engineering and agentic coding use cases, and you are responsible for confirming that it is appropriate for your intended application. Laguna XS.2-INT4 is subject to the [Apache 2.0 License](https://huggingface.co/poolside/Laguna-XS.2-INT4/blob/main/LICENSE.md), and should be used consistently with Poolside's [Acceptable Use Policy](https://poolside.ai/legal/acceptable-use-policy). We advise against circumventing Laguna XS.2-INT4 safety guardrails without implementing substantially equivalent mitigations appropriate for your use case.
247
+
248
+ Please report security vulnerabilities or safety concerns to [security@poolside.ai](mailto:security@poolside.ai).
chat_template.jinja ADDED
@@ -0,0 +1,132 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {#- Iteration on laguna_glm_thinking_v5/chat_template.jinja -#}
2
+ {#- Adds a default system message (used when no system message is provided in `messages`). -#}
3
+ {{- "〈|EOS|〉" -}}
4
+ {%- set enable_thinking = enable_thinking | default(false) -%}
5
+ {%- set render_assistant_messages_raw = render_assistant_messages_raw | default(false) -%}
6
+ {%- set add_generation_prompt = add_generation_prompt | default(false) -%}
7
+
8
+ {#- ───── header (system message) ───── -#}
9
+ {%- set system_message = "You are a helpful, conversationally-fluent assistant made by Poolside. You are here to be helpful to users through natural language conversations." -%}
10
+ {%- if messages and messages[0].role == "system" -%}
11
+ {%- set system_message = messages[0].content -%}
12
+ {%- endif -%}
13
+
14
+ {%- if (system_message and system_message.strip()) or tools -%}
15
+ {{- "<system>\n" -}}
16
+
17
+ {%- if system_message and system_message.strip() -%}
18
+ {{- "\n" -}}
19
+ {{- system_message.rstrip() -}}
20
+ {%- endif -%}
21
+
22
+ {%- if tools -%}
23
+ {{- "\n\n### Tools\n\n" -}}
24
+ {%- set ns = namespace(tool_string="You may call functions to assist with the user query.\n"
25
+ ~ "All available function signatures are listed below:\n"
26
+ ~ "<available_tools>\n") -%}
27
+ {%- for tool in tools -%}
28
+ {%- set ns.tool_string = ns.tool_string ~ (tool | tojson) ~ "\n" -%}
29
+ {%- endfor -%}
30
+ {%- if enable_thinking -%}
31
+ {%- set tool_string = ns.tool_string + "</available_tools>\n\n" ~
32
+ "Wrap your thinking in '<think>', '</think>' tags, followed by a function call. For each function call, return an unescaped XML-like object with function name and arguments within '<tool_call>' and '</tool_call>' tags, like here:\n" ~
33
+ "<think> your thoughts here </think>\n" ~
34
+ "<tool_call>function-name\n<arg_key>argument-key</arg_key>\n<arg_value>value-of-argument-key</arg_value>\n" ~
35
+ "</tool_call>" -%}
36
+ {%- else -%}
37
+ {%- set tool_string = ns.tool_string + "</available_tools>\n\n" ~
38
+ "For each function call, return an unescaped XML-like object " ~
39
+ "with function name and arguments within '<tool_call>' and '</tool_call>' tags, like here:\n" ~
40
+ "<tool_call>function-name\n<arg_key>argument-key</arg_key>\n<arg_value>value-of-argument-key</arg_value>\n" ~
41
+ "</tool_call>" -%}
42
+ {%- endif -%}
43
+ {{- tool_string -}}
44
+ {%- endif -%}
45
+
46
+ {{- "\n</system>\n" -}}
47
+ {%- endif -%}
48
+
49
+ {#- ───── main loop ───── -#}
50
+ {%- for message in messages -%}
51
+ {%- set content = message.content if message.content is string else "" -%}
52
+ {%- if message.role == "user" -%}
53
+ {{- "<user>\n" + content + "\n</user>\n" -}}
54
+ {%- elif message.role == "assistant" -%}
55
+ {%- generation -%}
56
+ {{- "<assistant>\n" -}}
57
+ {%- if render_assistant_messages_raw -%}
58
+ {#- Raw mode: prepend the generation prompt token, then dump content verbatim. -#}
59
+ {#- The generation prompt is <think> when enable_thinking, </think> otherwise. -#}
60
+ {#- Only prepend if content doesn't already start with it. -#}
61
+ {%- if enable_thinking -%}
62
+ {%- if not content.startswith('<think>') -%}
63
+ {{- '<think>' -}}
64
+ {%- endif -%}
65
+ {%- else -%}
66
+ {%- if not content.startswith('</think>') -%}
67
+ {{- '</think>' -}}
68
+ {%- endif -%}
69
+ {%- endif -%}
70
+ {{- content -}}
71
+ {#- Append closing tag if content doesn't already end with it. -#}
72
+ {%- if not content.endswith('</assistant>\n') and not content.endswith('</assistant>') -%}
73
+ {{- '\n</assistant>' -}}
74
+ {%- endif -%}
75
+ {{- "\n" -}}
76
+ {%- else -%}
77
+ {#- Extract reasoning content from message.reasoning (vLLM field name) or message.reasoning_content, or from <think> tags -#}
78
+ {%- set reasoning_content = '' %}
79
+ {%- if message.reasoning is string %}
80
+ {%- set reasoning_content = message.reasoning %}
81
+ {%- elif message.reasoning_content is string %}
82
+ {%- set reasoning_content = message.reasoning_content %}
83
+ {%- endif %}
84
+ {#- Always strip <think> tags from content if present to avoid duplication -#}
85
+ {%- if '</think>' in content %}
86
+ {%- if not reasoning_content %}
87
+ {%- set reasoning_content = content.split('</think>')[0].rstrip('\n').split('<think>')[-1].lstrip('\n') %}
88
+ {%- endif %}
89
+ {%- set content = content.split('</think>')[-1].lstrip('\n') %}
90
+ {%- endif %}
91
+ {#- Display reasoning content for all messages -#}
92
+ {%- if reasoning_content -%}
93
+ {{- '<think>\n' + reasoning_content.strip() + '\n</think>\n' -}}
94
+ {%- else -%}
95
+ {{- '</think>\n' -}}
96
+ {%- endif -%}
97
+ {#- Display main content -#}
98
+ {%- if content.strip() -%}
99
+ {{- content.strip() ~ "\n" -}}
100
+ {%- endif -%}
101
+ {%- if message.tool_calls -%}
102
+ {%- for tool_call in message.tool_calls -%}
103
+ {%- set function_data = tool_call.function -%}
104
+ {{- '<tool_call>' + function_data.name }}
105
+ {% set _args = function_data.arguments %}
106
+ {%- for k, v in _args.items() -%}
107
+ {{- "<arg_key>" ~ k ~ "</arg_key>\n" -}}
108
+ {{- "<arg_value>"}}{{ v | tojson(ensure_ascii=False) if v is not string else v }}{{ "</arg_value>\n" -}}
109
+ {%- endfor -%}
110
+ {{- "</tool_call>\n" -}}
111
+ {%- endfor -%}
112
+ {%- endif -%}
113
+ {{- "</assistant>\n" -}}
114
+ {%- endif -%}
115
+ {%- endgeneration -%}
116
+ {%- elif message.role == "tool" -%}
117
+ {{- "<tool_response>\n" + content + "\n</tool_response>\n" -}}
118
+ {%- elif message.role == "system" and loop.index0 != 0 -%}
119
+ {#- Render additional system messages (skip the first one which is handled separately in the header) -#}
120
+ {{- "<system>\n" + content + "\n</system>\n" -}}
121
+ {%- endif -%}
122
+ {%- endfor -%}
123
+ {#- ───── generation prompt ───── -#}
124
+ {%- if add_generation_prompt -%}
125
+ {{- "<assistant>\n" -}}
126
+ {#- ───── Include reasoning mode directive ───── -#}
127
+ {%- if not enable_thinking %}
128
+ {{- '</think>' -}}
129
+ {%- else %}
130
+ {{- '<think>' -}}
131
+ {%- endif %}
132
+ {%- endif -%}
config.json ADDED
@@ -0,0 +1,306 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "LagunaForCausalLM"
4
+ ],
5
+ "auto_map": {
6
+ "AutoConfig": "configuration_laguna.LagunaConfig",
7
+ "AutoModelForCausalLM": "modeling_laguna.LagunaForCausalLM"
8
+ },
9
+ "model_type": "laguna",
10
+ "vocab_size": 100352,
11
+ "hidden_size": 2048,
12
+ "intermediate_size": 8192,
13
+ "num_hidden_layers": 40,
14
+ "num_attention_heads": 48,
15
+ "num_key_value_heads": 8,
16
+ "head_dim": 128,
17
+ "max_position_embeddings": 131072,
18
+ "attention_bias": false,
19
+ "attention_dropout": 0.0,
20
+ "rms_norm_eps": 1e-06,
21
+ "num_experts": 256,
22
+ "num_experts_per_tok": 8,
23
+ "moe_intermediate_size": 512,
24
+ "shared_expert_intermediate_size": 512,
25
+ "router_aux_loss_coef": 0.0,
26
+ "bos_token_id": 2,
27
+ "eos_token_id": [
28
+ 2,
29
+ 24
30
+ ],
31
+ "pad_token_id": 9,
32
+ "tie_word_embeddings": false,
33
+ "use_cache": true,
34
+ "torch_dtype": "bfloat16",
35
+ "gating": true,
36
+ "sliding_window": 512,
37
+ "rope_parameters": {
38
+ "full_attention": {
39
+ "rope_theta": 500000.0,
40
+ "rope_type": "yarn",
41
+ "factor": 32.0,
42
+ "original_max_position_embeddings": 4096,
43
+ "beta_slow": 1.0,
44
+ "beta_fast": 64.0,
45
+ "attention_factor": 1.0,
46
+ "partial_rotary_factor": 0.5
47
+ },
48
+ "sliding_attention": {
49
+ "rope_type": "default",
50
+ "rope_theta": 10000.0,
51
+ "partial_rotary_factor": 1.0
52
+ },
53
+ "original_max_position_embeddings": 4096
54
+ },
55
+ "layer_types": [
56
+ "full_attention",
57
+ "sliding_attention",
58
+ "sliding_attention",
59
+ "sliding_attention",
60
+ "full_attention",
61
+ "sliding_attention",
62
+ "sliding_attention",
63
+ "sliding_attention",
64
+ "full_attention",
65
+ "sliding_attention",
66
+ "sliding_attention",
67
+ "sliding_attention",
68
+ "full_attention",
69
+ "sliding_attention",
70
+ "sliding_attention",
71
+ "sliding_attention",
72
+ "full_attention",
73
+ "sliding_attention",
74
+ "sliding_attention",
75
+ "sliding_attention",
76
+ "full_attention",
77
+ "sliding_attention",
78
+ "sliding_attention",
79
+ "sliding_attention",
80
+ "full_attention",
81
+ "sliding_attention",
82
+ "sliding_attention",
83
+ "sliding_attention",
84
+ "full_attention",
85
+ "sliding_attention",
86
+ "sliding_attention",
87
+ "sliding_attention",
88
+ "full_attention",
89
+ "sliding_attention",
90
+ "sliding_attention",
91
+ "sliding_attention",
92
+ "full_attention",
93
+ "sliding_attention",
94
+ "sliding_attention",
95
+ "sliding_attention"
96
+ ],
97
+ "moe_apply_router_weight_on_input": false,
98
+ "partial_rotary_factor": 0.5,
99
+ "mlp_layer_types": [
100
+ "dense",
101
+ "sparse",
102
+ "sparse",
103
+ "sparse",
104
+ "sparse",
105
+ "sparse",
106
+ "sparse",
107
+ "sparse",
108
+ "sparse",
109
+ "sparse",
110
+ "sparse",
111
+ "sparse",
112
+ "sparse",
113
+ "sparse",
114
+ "sparse",
115
+ "sparse",
116
+ "sparse",
117
+ "sparse",
118
+ "sparse",
119
+ "sparse",
120
+ "sparse",
121
+ "sparse",
122
+ "sparse",
123
+ "sparse",
124
+ "sparse",
125
+ "sparse",
126
+ "sparse",
127
+ "sparse",
128
+ "sparse",
129
+ "sparse",
130
+ "sparse",
131
+ "sparse",
132
+ "sparse",
133
+ "sparse",
134
+ "sparse",
135
+ "sparse",
136
+ "sparse",
137
+ "sparse",
138
+ "sparse",
139
+ "sparse"
140
+ ],
141
+ "use_bidirectional_attention": false,
142
+ "moe_routed_scaling_factor": 2.5,
143
+ "num_attention_heads_per_layer": [
144
+ 48,
145
+ 64,
146
+ 64,
147
+ 64,
148
+ 48,
149
+ 64,
150
+ 64,
151
+ 64,
152
+ 48,
153
+ 64,
154
+ 64,
155
+ 64,
156
+ 48,
157
+ 64,
158
+ 64,
159
+ 64,
160
+ 48,
161
+ 64,
162
+ 64,
163
+ 64,
164
+ 48,
165
+ 64,
166
+ 64,
167
+ 64,
168
+ 48,
169
+ 64,
170
+ 64,
171
+ 64,
172
+ 48,
173
+ 64,
174
+ 64,
175
+ 64,
176
+ 48,
177
+ 64,
178
+ 64,
179
+ 64,
180
+ 48,
181
+ 64,
182
+ 64,
183
+ 64
184
+ ],
185
+ "quantization_config": {
186
+ "config_groups": {
187
+ "group_0": {
188
+ "format": "pack-quantized",
189
+ "input_activations": null,
190
+ "output_activations": null,
191
+ "targets": [
192
+ "re:.*layers\\.([1-9]|[12]\\d|30)\\..*(w[1-3]|gate_proj|up_proj|down_proj)$"
193
+ ],
194
+ "weights": {
195
+ "actorder": null,
196
+ "block_structure": null,
197
+ "dynamic": false,
198
+ "group_size": 128,
199
+ "num_bits": 4,
200
+ "observer": "memoryless_minmax",
201
+ "observer_kwargs": {},
202
+ "scale_dtype": null,
203
+ "strategy": "group",
204
+ "symmetric": true,
205
+ "type": "int",
206
+ "zp_dtype": null
207
+ }
208
+ },
209
+ "group_1": {
210
+ "format": "pack-quantized",
211
+ "input_activations": null,
212
+ "output_activations": null,
213
+ "targets": [
214
+ "re:.*layers\\.3[1-9]\\..*(w[1-3]|gate_proj|up_proj|down_proj)$"
215
+ ],
216
+ "weights": {
217
+ "actorder": null,
218
+ "block_structure": null,
219
+ "dynamic": false,
220
+ "group_size": 128,
221
+ "num_bits": 8,
222
+ "observer": "memoryless_minmax",
223
+ "observer_kwargs": {},
224
+ "scale_dtype": null,
225
+ "strategy": "group",
226
+ "symmetric": true,
227
+ "type": "int",
228
+ "zp_dtype": null
229
+ }
230
+ }
231
+ },
232
+ "format": "pack-quantized",
233
+ "global_compression_ratio": null,
234
+ "ignore": [
235
+ "lm_head",
236
+ "model.layers.0.mlp.gate_proj",
237
+ "model.layers.0.mlp.up_proj",
238
+ "model.layers.0.mlp.down_proj",
239
+ "re:.*\\.self_attn\\.q_proj$",
240
+ "re:.*\\.self_attn\\.k_proj$",
241
+ "re:.*\\.self_attn\\.v_proj$",
242
+ "re:.*\\.self_attn\\.o_proj$",
243
+ "re:.*\\.self_attn\\.g_proj$",
244
+ "re:.*\\.mlp\\.gate$",
245
+ "re:.*\\.mlp\\.shared_expert\\.gate_proj$",
246
+ "re:.*\\.mlp\\.shared_expert\\.up_proj$",
247
+ "re:.*\\.mlp\\.shared_expert\\.down_proj$"
248
+ ],
249
+ "kv_cache_scheme": {
250
+ "actorder": null,
251
+ "block_structure": null,
252
+ "dynamic": false,
253
+ "group_size": null,
254
+ "num_bits": 8,
255
+ "observer": "minmax",
256
+ "observer_kwargs": {},
257
+ "scale_dtype": null,
258
+ "strategy": "tensor",
259
+ "symmetric": true,
260
+ "type": "float",
261
+ "zp_dtype": null
262
+ },
263
+ "quant_method": "compressed-tensors",
264
+ "quantization_status": "compressed",
265
+ "sparsity_config": {},
266
+ "transform_config": {
267
+ "config_groups": {
268
+ "R1": {
269
+ "apply": [
270
+ {
271
+ "ignore": [],
272
+ "inverse": false,
273
+ "location": "weight_output",
274
+ "targets": [
275
+ "re:.*embed_tokens$",
276
+ "re:.*o_proj$",
277
+ "re:.*down_proj$"
278
+ ]
279
+ },
280
+ {
281
+ "ignore": [],
282
+ "inverse": true,
283
+ "location": "weight_input",
284
+ "targets": [
285
+ "re:.*q_proj$",
286
+ "re:.*k_proj$",
287
+ "re:.*v_proj$",
288
+ "re:.*gate_proj$",
289
+ "re:.*up_proj$",
290
+ "re:.*mlp.gate$",
291
+ "re:.*g_proj$",
292
+ "re:.*lm_head$"
293
+ ]
294
+ }
295
+ ],
296
+ "head_dim": 128,
297
+ "precision": "torch.float64",
298
+ "randomize": false,
299
+ "requires_grad": false,
300
+ "type": "hadamard"
301
+ }
302
+ }
303
+ },
304
+ "version": "0.14.1.dev11+gf2ee47b"
305
+ }
306
+ }
configuration_laguna.py ADDED
@@ -0,0 +1,213 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2026 Poolside and the HuggingFace Inc. team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from typing import Any, Literal
15
+
16
+ from huggingface_hub.dataclasses import strict
17
+
18
+ from transformers.configuration_utils import PreTrainedConfig
19
+ from transformers.modeling_rope_utils import RopeParameters
20
+ from transformers.utils import auto_docstring
21
+
22
+
23
+ @auto_docstring(checkpoint="poolside/laguna-XS.2")
24
+ @strict
25
+ class LagunaConfig(PreTrainedConfig):
26
+ r"""
27
+ partial_rotary_factor (`float`, *optional*):
28
+ Fraction of ``head_dim`` to rotate. Folded into each ``rope_parameters[layer_type]``
29
+ entry by ``__post_init__``.
30
+ num_attention_heads_per_layer (`list[int]`, *optional*):
31
+ Per-layer override for ``num_attention_heads``. Length must equal ``num_hidden_layers``.
32
+ mlp_layer_types (`list[str]`, *optional*):
33
+ Per-layer MLP type — ``"dense"`` or ``"sparse"``. Length must equal
34
+ ``num_hidden_layers``. Defaults to first layer dense, rest sparse.
35
+ moe_routed_scaling_factor (`float`, *optional*, defaults to 1.0):
36
+ Scalar applied to routed-expert output before combining with the shared-expert output.
37
+ moe_apply_router_weight_on_input (`bool`, *optional*, defaults to `False`):
38
+ Whether to apply router weights to the MoE input rather than the output. Not supported
39
+ in transformers yet; ``True`` will raise a ``NotImplementedError`` for now.
40
+ moe_router_logit_softcapping (`float`, *optional*, defaults to 0.0):
41
+ Scaling factor when applying tanh softcapping on the logits of the MoE router logits.
42
+
43
+ Example:
44
+
45
+ ```python
46
+ >>> from transformers import LagunaModel, LagunaConfig
47
+
48
+ >>> configuration = LagunaConfig()
49
+ >>> model = LagunaModel(configuration)
50
+ >>> configuration = model.config
51
+ ```
52
+ """
53
+
54
+ model_type = "laguna"
55
+ keys_to_ignore_at_inference = ["past_key_values"]
56
+ base_model_tp_plan = {
57
+ "layers.*.self_attn.q_proj": "colwise",
58
+ "layers.*.self_attn.k_proj": "colwise",
59
+ "layers.*.self_attn.v_proj": "colwise",
60
+ "layers.*.self_attn.g_proj": "colwise",
61
+ "layers.*.self_attn.o_proj": "rowwise",
62
+ "layers.*.self_attn.q_norm": "replicated_with_grad_allreduce",
63
+ "layers.*.self_attn.k_norm": "replicated_with_grad_allreduce",
64
+ "layers.*.mlp.gate_proj": "colwise",
65
+ "layers.*.mlp.up_proj": "colwise",
66
+ "layers.*.mlp.down_proj": "rowwise",
67
+ "layers.*.mlp.experts.gate_up_proj": "packed_colwise",
68
+ "layers.*.mlp.experts.down_proj": "rowwise",
69
+ "layers.*.mlp.experts": "moe_tp_experts",
70
+ "layers.*.mlp.shared_experts.gate_proj": "colwise",
71
+ "layers.*.mlp.shared_experts.up_proj": "colwise",
72
+ "layers.*.mlp.shared_experts.down_proj": "rowwise",
73
+ }
74
+ base_model_pp_plan = {
75
+ "embed_tokens": (["input_ids"], ["inputs_embeds"]),
76
+ "layers": (["hidden_states", "attention_mask"], ["hidden_states"]),
77
+ "norm": (["hidden_states"], ["hidden_states"]),
78
+ }
79
+
80
+ # Qwen2Moe-inherited defaults we want to override for Laguna's typical shape.
81
+ vocab_size: int = 100352
82
+ hidden_size: int = 2048
83
+ intermediate_size: int = 8192
84
+ num_hidden_layers: int = 40
85
+ num_attention_heads: int = 48
86
+ num_key_value_heads: int = 8
87
+ hidden_act: str = "silu"
88
+ max_position_embeddings: int = 131072
89
+ initializer_range: float = 0.02
90
+ rms_norm_eps: float = 1e-6
91
+ use_cache: bool = True
92
+ tie_word_embeddings: bool = False
93
+ rope_parameters: RopeParameters | dict | None = None
94
+ sliding_window: int | None = None
95
+ attention_dropout: float | int = 0.0
96
+ moe_intermediate_size: int = 512
97
+ shared_expert_intermediate_size: int = 512
98
+ num_experts_per_tok: int = 8
99
+ num_experts: int = 256
100
+ output_router_logits: bool = False
101
+ router_aux_loss_coef: float = 0.001
102
+ layer_types: list[str] | None = None
103
+ pad_token_id: int | None = None
104
+ bos_token_id: int | None = None
105
+ eos_token_id: int | list[int] | None = None
106
+
107
+ # Laguna-specific attention
108
+ head_dim: int = 128
109
+ attention_bias: bool = False
110
+ partial_rotary_factor: float | None = None
111
+ num_attention_heads_per_layer: list[int] | None = None
112
+ # Laguna-specific MoE
113
+ mlp_layer_types: list[str] | None = None
114
+ moe_routed_scaling_factor: float = 1.0
115
+ moe_apply_router_weight_on_input: bool = False
116
+ moe_router_logit_softcapping: float = 0.0
117
+
118
+ def __post_init__(self, **kwargs):
119
+ if self.layer_types is None:
120
+ self.layer_types = ["full_attention"] * self.num_hidden_layers
121
+ if self.mlp_layer_types is None:
122
+ self.mlp_layer_types = ["dense"] + ["sparse"] * (self.num_hidden_layers - 1)
123
+ if self.num_attention_heads_per_layer is None:
124
+ self.num_attention_heads_per_layer = [self.num_attention_heads] * self.num_hidden_layers
125
+
126
+ default_rope_params: dict[Literal["full_attention", "sliding_attention"], dict[str, Any]] = {
127
+ "full_attention": {"rope_type": "default", "rope_theta": 500000.0},
128
+ "sliding_attention": {"rope_type": "default", "rope_theta": 10000.0},
129
+ }
130
+ if self.rope_parameters is None:
131
+ self.rope_parameters = default_rope_params
132
+
133
+ self._normalize_rope_parameters()
134
+ # Skip ``Qwen2MoeConfig.__post_init__`` — it references ``mlp_only_layers`` /
135
+ # ``use_sliding_window`` / ``max_window_layers`` which Laguna drops above.
136
+ super().__post_init__(**kwargs)
137
+
138
+ def _normalize_rope_parameters(self):
139
+ """Coerce ``rope_parameters`` to the nested ``{layer_type: {...}}`` shape.
140
+
141
+ Accepts an already-nested dict as-is, or a flat dict that gets broadcast to every
142
+ layer type. A top-level ``partial_rotary_factor`` is folded into each sub-dict as
143
+ a default.
144
+ """
145
+ layer_types = set(self.layer_types)
146
+ rope_params = self.rope_parameters or {}
147
+ is_nested = isinstance(rope_params, dict) and any(k in layer_types for k in rope_params)
148
+ if is_nested:
149
+ nested = {lt: dict(rope_params.get(lt, {})) for lt in layer_types}
150
+ else:
151
+ nested = {lt: dict(rope_params) for lt in layer_types}
152
+
153
+ if self.partial_rotary_factor is not None:
154
+ for params in nested.values():
155
+ params.setdefault("partial_rotary_factor", self.partial_rotary_factor)
156
+
157
+ for params in nested.values():
158
+ params.setdefault("rope_type", "default")
159
+
160
+ self.rope_parameters = nested
161
+ # Null the top-level field now that its value lives in each sub-dict — otherwise
162
+ # ``standardize_rope_params`` would overwrite per-type values with the global one.
163
+ self.partial_rotary_factor = None
164
+
165
+ def convert_rope_params_to_dict(self, **kwargs):
166
+ # No need to handle BC for new models, because they have no old-format `rope_scaling`
167
+ return kwargs
168
+
169
+ def _validate_yarn_rope_parameters(self, rope_parameters: dict, ignore_keys=None):
170
+ """Override: parent reads ``self.rope_parameters["original_max_position_embeddings"]``
171
+ for its post-hoc factor sanity-check, which works for flat rope configs but raises
172
+ ``KeyError`` when ``self.rope_parameters`` is the Laguna/Gemma3-style per-layer-type
173
+ map (its keys are layer types like ``"full_attention"``). Fix locally by reading
174
+ from the per-call ``rope_parameters`` dict that ``validate_rope`` already passes in.
175
+ """
176
+ # Delegate to parent for the shared checks by temporarily swapping in a flat
177
+ # ``self.rope_parameters`` that has the key the parent expects. Cheapest way to
178
+ # share the parent's logic without reimplementing it here.
179
+ flat = getattr(self, "rope_parameters", None)
180
+ self.rope_parameters = rope_parameters
181
+ try:
182
+ super()._validate_yarn_rope_parameters(rope_parameters, ignore_keys=ignore_keys)
183
+ finally:
184
+ self.rope_parameters = flat
185
+
186
+ def validate_architecture(self):
187
+ """Part of ``@strict``-powered validation."""
188
+ if self.moe_apply_router_weight_on_input:
189
+ raise NotImplementedError(
190
+ "moe_apply_router_weight_on_input=True is not yet supported in the "
191
+ "transformers implementation of Laguna."
192
+ )
193
+ if (
194
+ self.num_attention_heads_per_layer is not None
195
+ and len(self.num_attention_heads_per_layer) != self.num_hidden_layers
196
+ ):
197
+ raise ValueError(
198
+ f"num_attention_heads_per_layer length ({len(self.num_attention_heads_per_layer)}) "
199
+ f"must equal num_hidden_layers ({self.num_hidden_layers})."
200
+ )
201
+ if len(self.layer_types) != self.num_hidden_layers:
202
+ raise ValueError(
203
+ f"layer_types length ({len(self.layer_types)}) "
204
+ f"must equal num_hidden_layers ({self.num_hidden_layers})."
205
+ )
206
+ if len(self.mlp_layer_types) != self.num_hidden_layers:
207
+ raise ValueError(
208
+ f"mlp_layer_types length ({len(self.mlp_layer_types)}) "
209
+ f"must equal num_hidden_layers ({self.num_hidden_layers})."
210
+ )
211
+
212
+
213
+ __all__ = ["LagunaConfig"]
generation_config.json ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token_id": 2,
3
+ "do_sample": true,
4
+ "eos_token_id": [
5
+ 2,
6
+ 24
7
+ ],
8
+ "max_new_tokens": 2048,
9
+ "pad_token_id": 9,
10
+ "temperature": 0.7,
11
+ "top_p": 0.9
12
+ }
model-00001-of-00005.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:43045bf9fa3f727e4775b787f94584e04b0bdd217eee4125f1b6c1a07a8556cd
3
+ size 5120288184
model-00002-of-00005.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9e7402da8b04a6399e4a7311ae1a691e7a1f0a695fbba94efe1857e4ea5eb358
3
+ size 5122059880
model-00003-of-00005.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:92304f2f9d0a27e4e0d201f2dec60ce569e497e0b52095a3a1c976cf2dad1536
3
+ size 5122069488
model-00004-of-00005.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d8093a961d1574ce79d29bc650238b53669199edaff5885a8e4e688ff3297502
3
+ size 5120927040
model-00005-of-00005.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:773d8bfc566c4c10f449c7dc97d81924b9504586a7f538f2791345339ad65a24
3
+ size 3411894936
model.safetensors.index.json ADDED
The diff for this file is too large to render. See raw diff
 
modeling_laguna.py ADDED
@@ -0,0 +1,755 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2026 Poolside and the HuggingFace Inc. team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from collections.abc import Callable
16
+ from typing import Optional
17
+
18
+ import torch
19
+ import torch.nn.functional as F
20
+ from torch import nn
21
+
22
+ from transformers import initialization as init
23
+ from transformers.activations import ACT2FN
24
+ from transformers.cache_utils import Cache, DynamicCache
25
+ from transformers.generation import GenerationMixin
26
+ from transformers.integrations import use_experts_implementation, use_kernel_forward_from_hub, use_kernelized_func
27
+ from transformers.masking_utils import create_causal_mask, create_sliding_window_causal_mask
28
+ from transformers.modeling_flash_attention_utils import FlashAttentionKwargs
29
+ from transformers.modeling_layers import GradientCheckpointingLayer
30
+ from transformers.modeling_outputs import MoeCausalLMOutputWithPast, MoeModelOutputWithPast
31
+ from transformers.modeling_rope_utils import ROPE_INIT_FUNCTIONS, dynamic_rope_update
32
+ from transformers.modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel
33
+ from transformers.processing_utils import Unpack
34
+ from transformers.utils import auto_docstring, can_return_tuple
35
+ from transformers.utils.generic import TransformersKwargs, maybe_autocast
36
+ from transformers.utils.output_capturing import OutputRecorder, capture_outputs
37
+ from .configuration_laguna import LagunaConfig
38
+
39
+
40
+ @use_kernel_forward_from_hub("RMSNorm")
41
+ class LagunaRMSNorm(nn.Module):
42
+ def __init__(self, hidden_size, eps: float = 1e-6) -> None:
43
+ """
44
+ LagunaRMSNorm is equivalent to T5LayerNorm
45
+ """
46
+ super().__init__()
47
+ self.weight = nn.Parameter(torch.ones(hidden_size))
48
+ self.variance_epsilon = eps
49
+
50
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
51
+ input_dtype = hidden_states.dtype
52
+ hidden_states = hidden_states.to(torch.float32)
53
+ variance = hidden_states.pow(2).mean(-1, keepdim=True)
54
+ hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
55
+ return self.weight * hidden_states.to(input_dtype)
56
+
57
+ def extra_repr(self):
58
+ return f"{tuple(self.weight.shape)}, eps={self.variance_epsilon}"
59
+
60
+
61
+ class LagunaRotaryEmbedding(nn.Module):
62
+ inv_freq: torch.Tensor # fix linting for `register_buffer`
63
+
64
+ def __init__(self, config: LagunaConfig, device=None, layer_type=None):
65
+ super().__init__()
66
+ self.max_seq_len_cached = config.max_position_embeddings
67
+ self.original_max_seq_len = config.max_position_embeddings
68
+
69
+ self.config = config
70
+
71
+ self.layer_types = list(set(config.layer_types))
72
+ self.rope_type = {}
73
+ for layer_type in self.layer_types:
74
+ rope_params = self.config.rope_parameters[layer_type]
75
+ if rope_params is None:
76
+ continue
77
+
78
+ self.rope_type[layer_type] = rope_params["rope_type"]
79
+ rope_init_fn: Callable = self.compute_default_rope_parameters
80
+ if self.rope_type[layer_type] != "default":
81
+ rope_init_fn = ROPE_INIT_FUNCTIONS[self.rope_type[layer_type]]
82
+ curr_inv_freq, curr_attention_scaling = rope_init_fn(self.config, device, layer_type=layer_type)
83
+ self.register_buffer(f"{layer_type}_inv_freq", curr_inv_freq, persistent=False)
84
+ self.register_buffer(f"{layer_type}_original_inv_freq", curr_inv_freq.clone(), persistent=False)
85
+ setattr(self, f"{layer_type}_attention_scaling", curr_attention_scaling)
86
+
87
+ @staticmethod
88
+ def compute_default_rope_parameters(
89
+ config: LagunaConfig | None = None,
90
+ device: Optional["torch.device"] = None,
91
+ seq_len: int | None = None,
92
+ layer_type: str | None = None,
93
+ ) -> tuple["torch.Tensor", float]:
94
+ """
95
+ Computes the inverse frequencies according to the original RoPE implementation
96
+ Args:
97
+ config ([`~transformers.PreTrainedConfig`]):
98
+ The model configuration.
99
+ device (`torch.device`):
100
+ The device to use for initialization of the inverse frequencies.
101
+ seq_len (`int`, *optional*):
102
+ The current sequence length. Unused for this type of RoPE.
103
+ layer_type (`str`, *optional*):
104
+ The current layer type if the model has different RoPE parameters per type.
105
+ Should not be used unless `config.layer_types is not None`
106
+ Returns:
107
+ Tuple of (`torch.Tensor`, `float`), containing the inverse frequencies for the RoPE embeddings and the
108
+ post-processing scaling factor applied to the computed cos/sin (unused in this type of RoPE).
109
+ """
110
+ base = config.rope_parameters[layer_type]["rope_theta"]
111
+ # key difference to gemma3: partial rope
112
+ partial_rotary_factor = config.rope_parameters[layer_type].get("partial_rotary_factor", 1.0)
113
+ head_dim = getattr(config, "head_dim", None) or config.hidden_size // config.num_attention_heads
114
+ dim = int(head_dim * partial_rotary_factor)
115
+
116
+ attention_factor = 1.0 # Unused in this type of RoPE
117
+
118
+ # Compute the inverse frequencies
119
+ inv_freq = 1.0 / (
120
+ base ** (torch.arange(0, dim, 2, dtype=torch.int64).to(device=device, dtype=torch.float) / dim)
121
+ )
122
+ return inv_freq, attention_factor
123
+
124
+ @torch.no_grad()
125
+ @dynamic_rope_update # power user: used with advanced RoPE types (e.g. dynamic rope)
126
+ def forward(self, x, position_ids, layer_type=None):
127
+ inv_freq = getattr(self, f"{layer_type}_inv_freq")
128
+ attention_scaling = getattr(self, f"{layer_type}_attention_scaling")
129
+
130
+ inv_freq_expanded = inv_freq[None, :, None].float().expand(position_ids.shape[0], -1, 1).to(x.device)
131
+ position_ids_expanded = position_ids[:, None, :].float()
132
+
133
+ device_type = x.device.type if isinstance(x.device.type, str) and x.device.type != "mps" else "cpu"
134
+ with maybe_autocast(device_type=device_type, enabled=False): # Force float32
135
+ freqs = (inv_freq_expanded.float() @ position_ids_expanded.float()).transpose(1, 2)
136
+ emb = torch.cat((freqs, freqs), dim=-1)
137
+ cos = emb.cos() * attention_scaling
138
+ sin = emb.sin() * attention_scaling
139
+
140
+ return cos.to(dtype=x.dtype), sin.to(dtype=x.dtype)
141
+
142
+
143
+ class LagunaMLP(nn.Module):
144
+ def __init__(self, config, intermediate_size=None):
145
+ super().__init__()
146
+ self.config = config
147
+ self.hidden_size = config.hidden_size
148
+ self.intermediate_size = config.intermediate_size if intermediate_size is None else intermediate_size
149
+ self.gate_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)
150
+ self.up_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)
151
+ self.down_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=False)
152
+ self.act_fn = ACT2FN[config.hidden_act]
153
+
154
+ def forward(self, x):
155
+ down_proj = self.down_proj(self.act_fn(self.gate_proj(x)) * self.up_proj(x))
156
+ return down_proj
157
+
158
+
159
+ class LagunaTopKRouter(nn.Module):
160
+ def __init__(self, config):
161
+ super().__init__()
162
+ self.top_k = config.num_experts_per_tok
163
+ self.num_experts = config.num_experts
164
+ self.hidden_dim = config.hidden_size
165
+ self.weight = nn.Parameter(torch.zeros(self.num_experts, self.hidden_dim))
166
+ self.e_score_correction_bias = nn.Parameter(torch.zeros(config.num_experts), requires_grad=False)
167
+ self.router_logit_softcapping = config.moe_router_logit_softcapping
168
+
169
+ def forward(
170
+ self,
171
+ hidden_states: torch.Tensor,
172
+ ) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
173
+ hidden_states = hidden_states.reshape(-1, self.hidden_dim)
174
+ router_logits = F.linear(hidden_states, self.weight).float()
175
+ # Optional logits softcapping
176
+ if self.router_logit_softcapping > 0.0:
177
+ router_logits = torch.tanh(router_logits / self.router_logit_softcapping) * self.router_logit_softcapping
178
+ # Sigmoid instead of softmax normalization
179
+ routing_scores = torch.sigmoid(router_logits)
180
+
181
+ scores_for_selection = routing_scores + self.e_score_correction_bias.to(routing_scores.dtype)
182
+ _, selected_experts = torch.topk(scores_for_selection, self.top_k, dim=-1)
183
+ routing_weights = routing_scores.gather(-1, selected_experts)
184
+ routing_weights = routing_weights / routing_weights.sum(dim=-1, keepdim=True)
185
+ routing_weights = routing_weights.to(hidden_states.dtype)
186
+
187
+ return router_logits, routing_weights, selected_experts
188
+
189
+
190
+ @use_experts_implementation
191
+ class LagunaExperts(nn.Module):
192
+ """Collection of expert weights stored as 3D tensors."""
193
+
194
+ def __init__(self, config):
195
+ super().__init__()
196
+ self.num_experts = config.num_experts
197
+ self.hidden_dim = config.hidden_size
198
+ self.intermediate_dim = config.moe_intermediate_size
199
+ self.gate_up_proj = nn.Parameter(torch.empty(self.num_experts, 2 * self.intermediate_dim, self.hidden_dim))
200
+ self.down_proj = nn.Parameter(torch.empty(self.num_experts, self.hidden_dim, self.intermediate_dim))
201
+ self.act_fn = ACT2FN[config.hidden_act]
202
+
203
+ def forward(
204
+ self,
205
+ hidden_states: torch.Tensor,
206
+ top_k_index: torch.Tensor,
207
+ top_k_weights: torch.Tensor,
208
+ ) -> torch.Tensor:
209
+ final_hidden_states = torch.zeros_like(hidden_states)
210
+ with torch.no_grad():
211
+ expert_mask = torch.nn.functional.one_hot(top_k_index, num_classes=self.num_experts)
212
+ expert_mask = expert_mask.permute(2, 1, 0)
213
+ expert_hit = torch.greater(expert_mask.sum(dim=(-1, -2)), 0).nonzero()
214
+
215
+ for expert_idx in expert_hit:
216
+ expert_idx = expert_idx[0]
217
+ if expert_idx == self.num_experts:
218
+ continue
219
+ top_k_pos, token_idx = torch.where(expert_mask[expert_idx])
220
+ current_state = hidden_states[token_idx]
221
+ gate, up = nn.functional.linear(current_state, self.gate_up_proj[expert_idx]).chunk(2, dim=-1)
222
+ current_hidden_states = self.act_fn(gate) * up
223
+ current_hidden_states = nn.functional.linear(current_hidden_states, self.down_proj[expert_idx])
224
+ current_hidden_states = current_hidden_states * top_k_weights[token_idx, top_k_pos, None]
225
+ final_hidden_states.index_add_(0, token_idx, current_hidden_states.to(final_hidden_states.dtype))
226
+
227
+ return final_hidden_states
228
+
229
+
230
+ class LagunaSparseMoeBlock(nn.Module):
231
+ def __init__(self, config: LagunaConfig):
232
+ super().__init__()
233
+ self.experts = LagunaExperts(config)
234
+ self.gate = LagunaTopKRouter(config)
235
+ self.shared_experts = LagunaMLP(config, intermediate_size=config.shared_expert_intermediate_size)
236
+ self.routed_scaling_factor = config.moe_routed_scaling_factor
237
+
238
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
239
+ batch_size, sequence_length, hidden_dim = hidden_states.shape
240
+ hidden_states = hidden_states.view(-1, hidden_dim)
241
+ shared_output = self.shared_experts(hidden_states)
242
+
243
+ _, routing_weights, selected_experts = self.gate(hidden_states)
244
+ hidden_states = self.experts(hidden_states, selected_experts, routing_weights)
245
+ # Additional scaling
246
+ hidden_states = hidden_states * self.routed_scaling_factor
247
+ hidden_states = hidden_states + shared_output
248
+
249
+ hidden_states = hidden_states.reshape(batch_size, sequence_length, hidden_dim)
250
+ return hidden_states
251
+
252
+
253
+ def rotate_half(x):
254
+ """Rotates half the hidden dims of the input."""
255
+ x1 = x[..., : x.shape[-1] // 2]
256
+ x2 = x[..., x.shape[-1] // 2 :]
257
+ return torch.cat((-x2, x1), dim=-1)
258
+
259
+
260
+ # Adapted from transformers.models.glm.modular_glm.apply_rotary_pos_emb
261
+ def apply_rotary_pos_emb(q, k, cos, sin, unsqueeze_dim=1):
262
+ """Applies Rotary Position Embedding to the query and key tensors.
263
+
264
+ Removes the interleaving of cos and sin from GLM
265
+
266
+ Args:
267
+ q (`torch.Tensor`): The query tensor.
268
+ k (`torch.Tensor`): The key tensor.
269
+ cos (`torch.Tensor`): The cosine part of the rotary embedding.
270
+ sin (`torch.Tensor`): The sine part of the rotary embedding.
271
+ unsqueeze_dim (`int`, *optional*, defaults to 1):
272
+ The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and
273
+ sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note
274
+ that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and
275
+ k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes
276
+ cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have
277
+ the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2.
278
+ Returns:
279
+ `tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding.
280
+ """
281
+ cos = cos.unsqueeze(unsqueeze_dim)
282
+ sin = sin.unsqueeze(unsqueeze_dim)
283
+
284
+ # Keep half or full tensor for later concatenation
285
+ rotary_dim = cos.shape[-1]
286
+ q_rot, q_pass = q[..., :rotary_dim], q[..., rotary_dim:]
287
+ k_rot, k_pass = k[..., :rotary_dim], k[..., rotary_dim:]
288
+
289
+ # Apply rotary embeddings on the first half or full tensor
290
+ q_embed = (q_rot * cos) + (rotate_half(q_rot) * sin)
291
+ k_embed = (k_rot * cos) + (rotate_half(k_rot) * sin)
292
+
293
+ # Concatenate back to full shape
294
+ q_embed = torch.cat([q_embed, q_pass], dim=-1)
295
+ k_embed = torch.cat([k_embed, k_pass], dim=-1)
296
+ return q_embed, k_embed
297
+
298
+
299
+ def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor:
300
+ """
301
+ This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch,
302
+ num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)
303
+ """
304
+ batch, num_key_value_heads, slen, head_dim = hidden_states.shape
305
+ if n_rep == 1:
306
+ return hidden_states
307
+ hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim)
308
+ return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim)
309
+
310
+
311
+ def eager_attention_forward(
312
+ module: nn.Module,
313
+ query: torch.Tensor,
314
+ key: torch.Tensor,
315
+ value: torch.Tensor,
316
+ attention_mask: torch.Tensor | None,
317
+ scaling: float,
318
+ dropout: float = 0.0,
319
+ **kwargs: Unpack[TransformersKwargs],
320
+ ):
321
+ key_states = repeat_kv(key, module.num_key_value_groups)
322
+ value_states = repeat_kv(value, module.num_key_value_groups)
323
+
324
+ attn_weights = torch.matmul(query, key_states.transpose(2, 3)) * scaling
325
+ if attention_mask is not None:
326
+ attn_weights = attn_weights + attention_mask
327
+
328
+ attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query.dtype)
329
+ attn_weights = nn.functional.dropout(attn_weights, p=dropout, training=module.training)
330
+ attn_output = torch.matmul(attn_weights, value_states)
331
+ attn_output = attn_output.transpose(1, 2).contiguous()
332
+
333
+ return attn_output, attn_weights
334
+
335
+
336
+ @use_kernelized_func(apply_rotary_pos_emb)
337
+ class LagunaAttention(nn.Module):
338
+ """Afmoe-style SWA/GQA attention with Laguna-specific gating and per-layer head count."""
339
+
340
+ def __init__(self, config: LagunaConfig, layer_idx: int, num_heads: int):
341
+ super().__init__()
342
+ # Number of heads is controlled via `config.num_attention_heads_per_layer` which is passed from the parent for the specific layer
343
+ self.num_heads = num_heads
344
+ self.config = config
345
+ self.layer_idx = layer_idx
346
+ self.head_dim = getattr(config, "head_dim", config.hidden_size // config.num_attention_heads)
347
+ self.num_key_value_groups = self.num_heads // config.num_key_value_heads
348
+ self.scaling = self.head_dim**-0.5
349
+ self.attention_dropout = config.attention_dropout
350
+ self.is_causal = True
351
+
352
+ # Per-layer head count: rebuild q_proj and o_proj using self.num_heads (parent uses config.num_attention_heads).
353
+ self.q_proj = nn.Linear(config.hidden_size, self.num_heads * self.head_dim, bias=config.attention_bias)
354
+ self.k_proj = nn.Linear(
355
+ config.hidden_size, config.num_key_value_heads * self.head_dim, bias=config.attention_bias
356
+ )
357
+ self.v_proj = nn.Linear(
358
+ config.hidden_size, config.num_key_value_heads * self.head_dim, bias=config.attention_bias
359
+ )
360
+ self.o_proj = nn.Linear(self.num_heads * self.head_dim, config.hidden_size, bias=config.attention_bias)
361
+ # Parent LlamaAttention already sets: layer_idx, num_heads, num_key_value_heads, num_key_value_groups, head_dim
362
+ # We only add Laguna-specific attributes
363
+ self.is_local_attention = config.layer_types[layer_idx] == "sliding_attention"
364
+ self.sliding_window = config.sliding_window if self.is_local_attention else None
365
+
366
+ self.q_norm = LagunaRMSNorm(self.head_dim, eps=config.rms_norm_eps)
367
+ self.k_norm = LagunaRMSNorm(self.head_dim, eps=config.rms_norm_eps)
368
+ self.g_proj = nn.Linear(config.hidden_size, self.num_heads, bias=False)
369
+
370
+ def forward(
371
+ self,
372
+ hidden_states: torch.Tensor,
373
+ position_embeddings: tuple[torch.Tensor, torch.Tensor],
374
+ attention_mask: torch.Tensor | None,
375
+ past_key_values: Cache | None = None,
376
+ **kwargs: Unpack[FlashAttentionKwargs],
377
+ ) -> tuple[torch.Tensor, torch.Tensor | None]:
378
+ input_shape = hidden_states.shape[:-1]
379
+ hidden_shape = (*input_shape, -1, self.head_dim)
380
+
381
+ query_states = self.q_proj(hidden_states).view(hidden_shape)
382
+ key_states = self.k_proj(hidden_states).view(hidden_shape)
383
+ value_states = self.v_proj(hidden_states).view(hidden_shape)
384
+
385
+ query_states = self.q_norm(query_states).transpose(1, 2)
386
+ key_states = self.k_norm(key_states).transpose(1, 2)
387
+ value_states = value_states.transpose(1, 2)
388
+
389
+ cos, sin = position_embeddings
390
+ query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin)
391
+
392
+ if past_key_values is not None:
393
+ key_states, value_states = past_key_values.update(key_states, value_states, self.layer_idx)
394
+
395
+ attention_interface: Callable = ALL_ATTENTION_FUNCTIONS.get_interface(
396
+ self.config._attn_implementation, eager_attention_forward
397
+ )
398
+ attn_output, attn_weights = attention_interface(
399
+ self,
400
+ query_states,
401
+ key_states,
402
+ value_states,
403
+ attention_mask,
404
+ dropout=0.0 if not self.training else self.attention_dropout,
405
+ scaling=self.scaling,
406
+ sliding_window=self.sliding_window,
407
+ **kwargs,
408
+ )
409
+
410
+ attn_output = attn_output.reshape(*input_shape, -1).contiguous()
411
+
412
+ gate = F.softplus(self.g_proj(hidden_states).float()).to(attn_output.dtype)
413
+ attn_output = (attn_output.view(*input_shape, -1, self.head_dim) * gate.unsqueeze(-1)).view(*input_shape, -1)
414
+
415
+ attn_output = self.o_proj(attn_output)
416
+ return attn_output, attn_weights
417
+
418
+
419
+ class LagunaDecoderLayer(GradientCheckpointingLayer):
420
+ def __init__(self, config: LagunaConfig, layer_idx: int):
421
+ super().__init__()
422
+ self.hidden_size = config.hidden_size
423
+ self.self_attn = LagunaAttention(config, layer_idx, config.num_attention_heads_per_layer[layer_idx])
424
+ if config.mlp_layer_types[layer_idx] == "sparse":
425
+ self.mlp = LagunaSparseMoeBlock(config)
426
+ else:
427
+ self.mlp = LagunaMLP(config, intermediate_size=config.intermediate_size)
428
+ self.input_layernorm = LagunaRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
429
+ self.post_attention_layernorm = LagunaRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
430
+
431
+ def forward(
432
+ self,
433
+ hidden_states: torch.Tensor,
434
+ attention_mask: torch.Tensor | None = None,
435
+ position_ids: torch.LongTensor | None = None,
436
+ past_key_values: Cache | None = None,
437
+ use_cache: bool | None = False,
438
+ position_embeddings: tuple[torch.Tensor, torch.Tensor] | None = None,
439
+ **kwargs: Unpack[TransformersKwargs],
440
+ ) -> torch.Tensor:
441
+ residual = hidden_states
442
+ hidden_states = self.input_layernorm(hidden_states)
443
+ # Self Attention
444
+ hidden_states, _ = self.self_attn(
445
+ hidden_states=hidden_states,
446
+ attention_mask=attention_mask,
447
+ position_ids=position_ids,
448
+ past_key_values=past_key_values,
449
+ use_cache=use_cache,
450
+ position_embeddings=position_embeddings,
451
+ **kwargs,
452
+ )
453
+ hidden_states = residual + hidden_states
454
+
455
+ # Fully Connected
456
+ residual = hidden_states
457
+ hidden_states = self.post_attention_layernorm(hidden_states)
458
+ hidden_states = self.mlp(hidden_states)
459
+ hidden_states = residual + hidden_states
460
+ return hidden_states
461
+
462
+
463
+ @auto_docstring
464
+ class LagunaPreTrainedModel(PreTrainedModel):
465
+ config: LagunaConfig
466
+ base_model_prefix = "model"
467
+ supports_gradient_checkpointing = True
468
+ _no_split_modules = ["LagunaDecoderLayer"]
469
+ _skip_keys_device_placement = ["past_key_values"]
470
+ _supports_flash_attn = True
471
+ _supports_sdpa = True
472
+ _supports_flex_attn = True
473
+
474
+ _can_compile_fullgraph = True
475
+ _supports_attention_backend = True
476
+ _can_record_outputs = {
477
+ "router_logits": OutputRecorder(LagunaTopKRouter, index=0),
478
+ "hidden_states": LagunaDecoderLayer,
479
+ "attentions": LagunaAttention,
480
+ }
481
+
482
+ @torch.no_grad()
483
+ def _init_weights(self, module):
484
+ super()._init_weights(module)
485
+ std = self.config.initializer_range
486
+ if isinstance(module, LagunaExperts):
487
+ init.normal_(module.gate_up_proj, mean=0.0, std=std)
488
+ init.normal_(module.down_proj, mean=0.0, std=std)
489
+ elif isinstance(module, LagunaTopKRouter):
490
+ init.normal_(module.weight, mean=0.0, std=std)
491
+ if isinstance(module, LagunaTopKRouter):
492
+ torch.nn.init.zeros_(module.e_score_correction_bias)
493
+ elif isinstance(module, LagunaRotaryEmbedding):
494
+ for layer_type in module.layer_types:
495
+ rope_init_fn = module.compute_default_rope_parameters
496
+ if module.rope_type[layer_type] != "default":
497
+ rope_init_fn = ROPE_INIT_FUNCTIONS[module.rope_type[layer_type]]
498
+ curr_inv_freq, _ = rope_init_fn(module.config, layer_type=layer_type)
499
+ init.copy_(getattr(module, f"{layer_type}_inv_freq"), curr_inv_freq)
500
+ init.copy_(getattr(module, f"{layer_type}_original_inv_freq"), curr_inv_freq)
501
+
502
+
503
+ @auto_docstring
504
+ class LagunaModel(LagunaPreTrainedModel):
505
+ def __init__(self, config: LagunaConfig):
506
+ super().__init__(config)
507
+ self.padding_idx = config.pad_token_id
508
+ self.vocab_size = config.vocab_size
509
+
510
+ self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx)
511
+ self.layers = nn.ModuleList(
512
+ [LagunaDecoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)]
513
+ )
514
+ self.norm = LagunaRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
515
+ self.rotary_emb = LagunaRotaryEmbedding(config=config)
516
+ self.gradient_checkpointing = False
517
+
518
+ # Initialize weights and apply final processing
519
+ self.post_init()
520
+
521
+ @capture_outputs
522
+ @auto_docstring
523
+ def forward(
524
+ self,
525
+ input_ids: torch.LongTensor | None = None,
526
+ attention_mask: torch.Tensor | None = None,
527
+ position_ids: torch.LongTensor | None = None,
528
+ past_key_values: Cache | None = None,
529
+ inputs_embeds: torch.FloatTensor | None = None,
530
+ use_cache: bool | None = None,
531
+ **kwargs: Unpack[TransformersKwargs],
532
+ ) -> MoeModelOutputWithPast:
533
+ if (input_ids is None) ^ (inputs_embeds is not None):
534
+ raise ValueError("You must specify exactly one of input_ids or inputs_embeds")
535
+
536
+ if inputs_embeds is None:
537
+ inputs_embeds = self.embed_tokens(input_ids)
538
+
539
+ if use_cache and past_key_values is None:
540
+ past_key_values = DynamicCache(config=self.config)
541
+
542
+ if position_ids is None:
543
+ past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0
544
+ position_ids = torch.arange(inputs_embeds.shape[1], device=inputs_embeds.device) + past_seen_tokens
545
+ position_ids = position_ids.unsqueeze(0)
546
+
547
+ if not isinstance(causal_mask_mapping := attention_mask, dict):
548
+ mask_kwargs = {
549
+ "config": self.config,
550
+ "inputs_embeds": inputs_embeds,
551
+ "attention_mask": attention_mask,
552
+ "past_key_values": past_key_values,
553
+ "position_ids": position_ids,
554
+ }
555
+ mask_creation_functions = {
556
+ "full_attention": lambda: create_causal_mask(**mask_kwargs),
557
+ "sliding_attention": lambda: create_sliding_window_causal_mask(**mask_kwargs),
558
+ }
559
+ causal_mask_mapping = {}
560
+ for layer_type in set(self.config.layer_types):
561
+ causal_mask_mapping[layer_type] = mask_creation_functions[layer_type]()
562
+
563
+ hidden_states = inputs_embeds
564
+ position_embeddings = {}
565
+ for layer_type in set(self.config.layer_types):
566
+ position_embeddings[layer_type] = self.rotary_emb(hidden_states, position_ids, layer_type)
567
+
568
+ for i, decoder_layer in enumerate(self.layers[: self.config.num_hidden_layers]):
569
+ hidden_states = decoder_layer(
570
+ hidden_states,
571
+ attention_mask=causal_mask_mapping[self.config.layer_types[i]],
572
+ position_embeddings=position_embeddings[self.config.layer_types[i]],
573
+ position_ids=position_ids,
574
+ past_key_values=past_key_values,
575
+ **kwargs,
576
+ )
577
+
578
+ hidden_states = self.norm(hidden_states)
579
+
580
+ return MoeModelOutputWithPast(
581
+ last_hidden_state=hidden_states,
582
+ past_key_values=past_key_values if use_cache else None,
583
+ )
584
+
585
+
586
+ def load_balancing_loss_func(
587
+ gate_logits: torch.Tensor | tuple[torch.Tensor] | None,
588
+ num_experts: int | None = None,
589
+ top_k=2,
590
+ attention_mask: torch.Tensor | None = None,
591
+ ) -> torch.Tensor | int:
592
+ r"""
593
+ Computes auxiliary load balancing loss as in Switch Transformer - implemented in Pytorch.
594
+
595
+ See Switch Transformer (https://huggingface.co/papers/2101.03961) for more details. This function implements the loss
596
+ function presented in equations (4) - (6) of the paper. It aims at penalizing cases where the routing between
597
+ experts is too unbalanced.
598
+
599
+ Args:
600
+ gate_logits:
601
+ Logits from the `gate`, should be a tuple of model.config.num_hidden_layers tensors of
602
+ shape [batch_size X sequence_length, num_experts].
603
+ num_experts:
604
+ Number of experts
605
+ top_k:
606
+ The number of experts to route per-token, can be also interpreted as the `top-k` routing
607
+ parameter.
608
+ attention_mask (`torch.Tensor`, *optional*):
609
+ The attention_mask used in forward function
610
+ shape [batch_size X sequence_length] if not None.
611
+
612
+ Returns:
613
+ The auxiliary loss.
614
+ """
615
+ if gate_logits is None or not isinstance(gate_logits, tuple):
616
+ return 0
617
+
618
+ if isinstance(gate_logits, tuple):
619
+ compute_device = gate_logits[0].device
620
+ concatenated_gate_logits = torch.cat([layer_gate.to(compute_device) for layer_gate in gate_logits], dim=0)
621
+
622
+ routing_weights = torch.nn.functional.softmax(concatenated_gate_logits, dim=-1)
623
+
624
+ _, selected_experts = torch.topk(routing_weights, top_k, dim=-1)
625
+
626
+ expert_mask = torch.nn.functional.one_hot(selected_experts, num_experts)
627
+
628
+ if attention_mask is None:
629
+ # Compute the percentage of tokens routed to each experts
630
+ tokens_per_expert = torch.mean(expert_mask.float(), dim=0)
631
+
632
+ # Compute the average probability of routing to these experts
633
+ router_prob_per_expert = torch.mean(routing_weights, dim=0)
634
+ else:
635
+ batch_size, sequence_length = attention_mask.shape
636
+ num_hidden_layers = concatenated_gate_logits.shape[0] // (batch_size * sequence_length)
637
+
638
+ # Compute the mask that masks all padding tokens as 0 with the same shape of expert_mask
639
+ expert_attention_mask = (
640
+ attention_mask[None, :, :, None, None]
641
+ .expand((num_hidden_layers, batch_size, sequence_length, top_k, num_experts))
642
+ .reshape(-1, top_k, num_experts)
643
+ .to(compute_device)
644
+ )
645
+
646
+ # Compute the percentage of tokens routed to each experts
647
+ tokens_per_expert = torch.sum(expert_mask.float() * expert_attention_mask, dim=0) / torch.sum(
648
+ expert_attention_mask, dim=0
649
+ )
650
+
651
+ # Compute the mask that masks all padding tokens as 0 with the same shape of tokens_per_expert
652
+ router_per_expert_attention_mask = (
653
+ attention_mask[None, :, :, None]
654
+ .expand((num_hidden_layers, batch_size, sequence_length, num_experts))
655
+ .reshape(-1, num_experts)
656
+ .to(compute_device)
657
+ )
658
+
659
+ # Compute the average probability of routing to these experts
660
+ router_prob_per_expert = torch.sum(routing_weights * router_per_expert_attention_mask, dim=0) / torch.sum(
661
+ router_per_expert_attention_mask, dim=0
662
+ )
663
+
664
+ overall_loss = torch.sum(tokens_per_expert * router_prob_per_expert.unsqueeze(0))
665
+ return overall_loss * num_experts
666
+
667
+
668
+ @auto_docstring
669
+ class LagunaForCausalLM(LagunaPreTrainedModel, GenerationMixin):
670
+ _tied_weights_keys = {"lm_head.weight": "model.embed_tokens.weight"}
671
+ _tp_plan = {"lm_head": "colwise_gather_output"}
672
+ _pp_plan = {"lm_head": (["hidden_states"], ["logits"])}
673
+
674
+ def __init__(self, config):
675
+ super().__init__(config)
676
+ self.model = LagunaModel(config)
677
+ self.vocab_size = config.vocab_size
678
+ self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
679
+ self.router_aux_loss_coef = config.router_aux_loss_coef
680
+ self.num_experts = config.num_experts
681
+ self.num_experts_per_tok = config.num_experts_per_tok
682
+
683
+ # Initialize weights and apply final processing
684
+ self.post_init()
685
+
686
+ @can_return_tuple
687
+ @auto_docstring
688
+ def forward(
689
+ self,
690
+ input_ids: torch.LongTensor | None = None,
691
+ attention_mask: torch.Tensor | None = None,
692
+ position_ids: torch.LongTensor | None = None,
693
+ past_key_values: Cache | None = None,
694
+ inputs_embeds: torch.FloatTensor | None = None,
695
+ labels: torch.LongTensor | None = None,
696
+ use_cache: bool | None = None,
697
+ output_router_logits: bool | None = None,
698
+ logits_to_keep: int | torch.Tensor = 0,
699
+ **kwargs: Unpack[TransformersKwargs],
700
+ ) -> MoeCausalLMOutputWithPast:
701
+ r"""
702
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
703
+ Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
704
+ config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
705
+ (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
706
+ """
707
+
708
+ output_router_logits = (
709
+ output_router_logits if output_router_logits is not None else self.config.output_router_logits
710
+ )
711
+
712
+ # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)
713
+ outputs: MoeModelOutputWithPast = self.model(
714
+ input_ids=input_ids,
715
+ attention_mask=attention_mask,
716
+ position_ids=position_ids,
717
+ past_key_values=past_key_values,
718
+ inputs_embeds=inputs_embeds,
719
+ use_cache=use_cache,
720
+ output_router_logits=output_router_logits,
721
+ **kwargs,
722
+ )
723
+
724
+ hidden_states = outputs.last_hidden_state
725
+ # Only compute necessary logits, and do not upcast them to float if we are not computing the loss
726
+ slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep
727
+ logits = self.lm_head(hidden_states[:, slice_indices, :])
728
+
729
+ loss = None
730
+ if labels is not None:
731
+ loss = self.loss_function(logits, labels, self.vocab_size, **kwargs)
732
+
733
+ aux_loss = None
734
+ if output_router_logits:
735
+ aux_loss = load_balancing_loss_func(
736
+ outputs.router_logits,
737
+ self.num_experts,
738
+ self.num_experts_per_tok,
739
+ attention_mask,
740
+ )
741
+ if labels is not None:
742
+ loss += self.router_aux_loss_coef * aux_loss.to(loss.device) # make sure to reside in the same device
743
+
744
+ return MoeCausalLMOutputWithPast(
745
+ loss=loss,
746
+ aux_loss=aux_loss,
747
+ logits=logits,
748
+ past_key_values=outputs.past_key_values,
749
+ hidden_states=outputs.hidden_states,
750
+ attentions=outputs.attentions,
751
+ router_logits=outputs.router_logits,
752
+ )
753
+
754
+
755
+ __all__ = ["LagunaForCausalLM", "LagunaModel", "LagunaPreTrainedModel"]
special_tokens_map.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": "〈|EOS|〉",
3
+ "cls_token": "〈|CLS|〉",
4
+ "eos_token": "〈|EOS|〉",
5
+ "mask_token": "〈|MASK|〉",
6
+ "pad_token": "〈|PAD|〉",
7
+ "sep_token": "〈|SEP|〉",
8
+ "unk_token": "〈|UNK|〉"
9
+ }
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json ADDED
@@ -0,0 +1,576 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "added_tokens_decoder": {
3
+ "0": {
4
+ "content": "〈|UNK|〉",
5
+ "lstrip": false,
6
+ "normalized": false,
7
+ "rstrip": false,
8
+ "single_word": false,
9
+ "special": true
10
+ },
11
+ "1": {
12
+ "content": "〈|CODE_START|〉",
13
+ "lstrip": false,
14
+ "normalized": false,
15
+ "rstrip": false,
16
+ "single_word": false,
17
+ "special": true
18
+ },
19
+ "2": {
20
+ "content": "〈|EOS|〉",
21
+ "lstrip": false,
22
+ "normalized": false,
23
+ "rstrip": false,
24
+ "single_word": false,
25
+ "special": true
26
+ },
27
+ "3": {
28
+ "content": "〈|CODE_END|〉",
29
+ "lstrip": false,
30
+ "normalized": false,
31
+ "rstrip": false,
32
+ "single_word": false,
33
+ "special": true
34
+ },
35
+ "4": {
36
+ "content": "〈|META_START|〉",
37
+ "lstrip": false,
38
+ "normalized": false,
39
+ "rstrip": false,
40
+ "single_word": false,
41
+ "special": true
42
+ },
43
+ "5": {
44
+ "content": "〈|META_END|〉",
45
+ "lstrip": false,
46
+ "normalized": false,
47
+ "rstrip": false,
48
+ "single_word": false,
49
+ "special": true
50
+ },
51
+ "6": {
52
+ "content": "〈|FIM_MIDDLE|〉",
53
+ "lstrip": false,
54
+ "normalized": false,
55
+ "rstrip": false,
56
+ "single_word": false,
57
+ "special": true
58
+ },
59
+ "7": {
60
+ "content": "〈|FIM_SUFFIX|〉",
61
+ "lstrip": false,
62
+ "normalized": false,
63
+ "rstrip": false,
64
+ "single_word": false,
65
+ "special": true
66
+ },
67
+ "8": {
68
+ "content": "〈|SEP|〉",
69
+ "lstrip": false,
70
+ "normalized": false,
71
+ "rstrip": false,
72
+ "single_word": false,
73
+ "special": true
74
+ },
75
+ "9": {
76
+ "content": "〈|PAD|〉",
77
+ "lstrip": false,
78
+ "normalized": false,
79
+ "rstrip": false,
80
+ "single_word": false,
81
+ "special": true
82
+ },
83
+ "10": {
84
+ "content": "〈|CLS|〉",
85
+ "lstrip": false,
86
+ "normalized": false,
87
+ "rstrip": false,
88
+ "single_word": false,
89
+ "special": true
90
+ },
91
+ "11": {
92
+ "content": "〈|FIM_START|〉",
93
+ "lstrip": false,
94
+ "normalized": false,
95
+ "rstrip": false,
96
+ "single_word": false,
97
+ "special": true
98
+ },
99
+ "12": {
100
+ "content": "〈|MASK|〉",
101
+ "lstrip": false,
102
+ "normalized": false,
103
+ "rstrip": false,
104
+ "single_word": false,
105
+ "special": true
106
+ },
107
+ "13": {
108
+ "content": "|◊|",
109
+ "lstrip": false,
110
+ "normalized": false,
111
+ "rstrip": false,
112
+ "single_word": false,
113
+ "special": true
114
+ },
115
+ "14": {
116
+ "content": "〈|",
117
+ "lstrip": false,
118
+ "normalized": false,
119
+ "rstrip": false,
120
+ "single_word": false,
121
+ "special": true
122
+ },
123
+ "15": {
124
+ "content": "|〉",
125
+ "lstrip": false,
126
+ "normalized": false,
127
+ "rstrip": false,
128
+ "single_word": false,
129
+ "special": true
130
+ },
131
+ "16": {
132
+ "content": "〈|/",
133
+ "lstrip": false,
134
+ "normalized": false,
135
+ "rstrip": false,
136
+ "single_word": false,
137
+ "special": true
138
+ },
139
+ "17": {
140
+ "content": "/|〉",
141
+ "lstrip": false,
142
+ "normalized": false,
143
+ "rstrip": false,
144
+ "single_word": false,
145
+ "special": true
146
+ },
147
+ "20": {
148
+ "content": "〈|SPECIAL_1|〉",
149
+ "lstrip": false,
150
+ "normalized": false,
151
+ "rstrip": false,
152
+ "single_word": false,
153
+ "special": true
154
+ },
155
+ "21": {
156
+ "content": "〈|SPECIAL_2|〉",
157
+ "lstrip": false,
158
+ "normalized": false,
159
+ "rstrip": false,
160
+ "single_word": false,
161
+ "special": true
162
+ },
163
+ "22": {
164
+ "content": "〈|SPECIAL_3|〉",
165
+ "lstrip": false,
166
+ "normalized": false,
167
+ "rstrip": false,
168
+ "single_word": false,
169
+ "special": true
170
+ },
171
+ "27": {
172
+ "content": "〈|SPECIAL_8|〉",
173
+ "lstrip": false,
174
+ "normalized": false,
175
+ "rstrip": false,
176
+ "single_word": false,
177
+ "special": true
178
+ },
179
+ "28": {
180
+ "content": "〈|SPECIAL_9|〉",
181
+ "lstrip": false,
182
+ "normalized": false,
183
+ "rstrip": false,
184
+ "single_word": false,
185
+ "special": true
186
+ },
187
+ "29": {
188
+ "content": "〈|SPECIAL_10|〉",
189
+ "lstrip": false,
190
+ "normalized": false,
191
+ "rstrip": false,
192
+ "single_word": false,
193
+ "special": true
194
+ },
195
+ "30": {
196
+ "content": "〈|SPECIAL_11|〉",
197
+ "lstrip": false,
198
+ "normalized": false,
199
+ "rstrip": false,
200
+ "single_word": false,
201
+ "special": true
202
+ },
203
+ "31": {
204
+ "content": "〈|SPECIAL_12|〉",
205
+ "lstrip": false,
206
+ "normalized": false,
207
+ "rstrip": false,
208
+ "single_word": false,
209
+ "special": true
210
+ },
211
+ "32": {
212
+ "content": "〈|SPECIAL_13|〉",
213
+ "lstrip": false,
214
+ "normalized": false,
215
+ "rstrip": false,
216
+ "single_word": false,
217
+ "special": true
218
+ },
219
+ "33": {
220
+ "content": "〈|SPECIAL_14|〉",
221
+ "lstrip": false,
222
+ "normalized": false,
223
+ "rstrip": false,
224
+ "single_word": false,
225
+ "special": true
226
+ },
227
+ "34": {
228
+ "content": "〈|SPECIAL_15|〉",
229
+ "lstrip": false,
230
+ "normalized": false,
231
+ "rstrip": false,
232
+ "single_word": false,
233
+ "special": true
234
+ },
235
+ "35": {
236
+ "content": "〈|SPECIAL_16|〉",
237
+ "lstrip": false,
238
+ "normalized": false,
239
+ "rstrip": false,
240
+ "single_word": false,
241
+ "special": true
242
+ },
243
+ "36": {
244
+ "content": "〈|SPECIAL_17|〉",
245
+ "lstrip": false,
246
+ "normalized": false,
247
+ "rstrip": false,
248
+ "single_word": false,
249
+ "special": true
250
+ },
251
+ "37": {
252
+ "content": "〈|SPECIAL_18|〉",
253
+ "lstrip": false,
254
+ "normalized": false,
255
+ "rstrip": false,
256
+ "single_word": false,
257
+ "special": true
258
+ },
259
+ "38": {
260
+ "content": "〈|SPECIAL_19|〉",
261
+ "lstrip": false,
262
+ "normalized": false,
263
+ "rstrip": false,
264
+ "single_word": false,
265
+ "special": true
266
+ },
267
+ "39": {
268
+ "content": "〈|SPECIAL_20|〉",
269
+ "lstrip": false,
270
+ "normalized": false,
271
+ "rstrip": false,
272
+ "single_word": false,
273
+ "special": true
274
+ },
275
+ "40": {
276
+ "content": "〈|SPECIAL_21|〉",
277
+ "lstrip": false,
278
+ "normalized": false,
279
+ "rstrip": false,
280
+ "single_word": false,
281
+ "special": true
282
+ },
283
+ "41": {
284
+ "content": "〈|SPECIAL_22|〉",
285
+ "lstrip": false,
286
+ "normalized": false,
287
+ "rstrip": false,
288
+ "single_word": false,
289
+ "special": true
290
+ },
291
+ "42": {
292
+ "content": "〈|SPECIAL_23|〉",
293
+ "lstrip": false,
294
+ "normalized": false,
295
+ "rstrip": false,
296
+ "single_word": false,
297
+ "special": true
298
+ },
299
+ "43": {
300
+ "content": "〈|SPECIAL_24|〉",
301
+ "lstrip": false,
302
+ "normalized": false,
303
+ "rstrip": false,
304
+ "single_word": false,
305
+ "special": true
306
+ },
307
+ "44": {
308
+ "content": "〈|SPECIAL_25|〉",
309
+ "lstrip": false,
310
+ "normalized": false,
311
+ "rstrip": false,
312
+ "single_word": false,
313
+ "special": true
314
+ },
315
+ "45": {
316
+ "content": "〈|SPECIAL_26|〉",
317
+ "lstrip": false,
318
+ "normalized": false,
319
+ "rstrip": false,
320
+ "single_word": false,
321
+ "special": true
322
+ },
323
+ "46": {
324
+ "content": "〈|SPECIAL_27|〉",
325
+ "lstrip": false,
326
+ "normalized": false,
327
+ "rstrip": false,
328
+ "single_word": false,
329
+ "special": true
330
+ },
331
+ "47": {
332
+ "content": "〈|SPECIAL_28|〉",
333
+ "lstrip": false,
334
+ "normalized": false,
335
+ "rstrip": false,
336
+ "single_word": false,
337
+ "special": true
338
+ },
339
+ "48": {
340
+ "content": "〈|SPECIAL_29|〉",
341
+ "lstrip": false,
342
+ "normalized": false,
343
+ "rstrip": false,
344
+ "single_word": false,
345
+ "special": true
346
+ },
347
+ "49": {
348
+ "content": "〈|SPECIAL_30|〉",
349
+ "lstrip": false,
350
+ "normalized": false,
351
+ "rstrip": false,
352
+ "single_word": false,
353
+ "special": true
354
+ },
355
+ "50": {
356
+ "content": "〈|SPECIAL_31|〉",
357
+ "lstrip": false,
358
+ "normalized": false,
359
+ "rstrip": false,
360
+ "single_word": false,
361
+ "special": true
362
+ },
363
+ "51": {
364
+ "content": "〈|SPECIAL_32|〉",
365
+ "lstrip": false,
366
+ "normalized": false,
367
+ "rstrip": false,
368
+ "single_word": false,
369
+ "special": true
370
+ },
371
+ "52": {
372
+ "content": "〈|SPECIAL_33|〉",
373
+ "lstrip": false,
374
+ "normalized": false,
375
+ "rstrip": false,
376
+ "single_word": false,
377
+ "special": true
378
+ },
379
+ "53": {
380
+ "content": "〈|SPECIAL_34|〉",
381
+ "lstrip": false,
382
+ "normalized": false,
383
+ "rstrip": false,
384
+ "single_word": false,
385
+ "special": true
386
+ },
387
+ "54": {
388
+ "content": "〈|SPECIAL_35|〉",
389
+ "lstrip": false,
390
+ "normalized": false,
391
+ "rstrip": false,
392
+ "single_word": false,
393
+ "special": true
394
+ },
395
+ "55": {
396
+ "content": "〈|SPECIAL_36|〉",
397
+ "lstrip": false,
398
+ "normalized": false,
399
+ "rstrip": false,
400
+ "single_word": false,
401
+ "special": true
402
+ },
403
+ "56": {
404
+ "content": "〈|SPECIAL_37|〉",
405
+ "lstrip": false,
406
+ "normalized": false,
407
+ "rstrip": false,
408
+ "single_word": false,
409
+ "special": true
410
+ },
411
+ "57": {
412
+ "content": "〈|SPECIAL_38|〉",
413
+ "lstrip": false,
414
+ "normalized": false,
415
+ "rstrip": false,
416
+ "single_word": false,
417
+ "special": true
418
+ },
419
+ "58": {
420
+ "content": "〈|SPECIAL_39|〉",
421
+ "lstrip": false,
422
+ "normalized": false,
423
+ "rstrip": false,
424
+ "single_word": false,
425
+ "special": true
426
+ },
427
+ "59": {
428
+ "content": "〈|SPECIAL_40|〉",
429
+ "lstrip": false,
430
+ "normalized": false,
431
+ "rstrip": false,
432
+ "single_word": false,
433
+ "special": true
434
+ },
435
+ "60": {
436
+ "content": "〈|SPECIAL_41|〉",
437
+ "lstrip": false,
438
+ "normalized": false,
439
+ "rstrip": false,
440
+ "single_word": false,
441
+ "special": true
442
+ },
443
+ "61": {
444
+ "content": "〈|SPECIAL_42|〉",
445
+ "lstrip": false,
446
+ "normalized": false,
447
+ "rstrip": false,
448
+ "single_word": false,
449
+ "special": true
450
+ },
451
+ "62": {
452
+ "content": "〈|SPECIAL_43|〉",
453
+ "lstrip": false,
454
+ "normalized": false,
455
+ "rstrip": false,
456
+ "single_word": false,
457
+ "special": true
458
+ },
459
+ "63": {
460
+ "content": "〈|SPECIAL_44|〉",
461
+ "lstrip": false,
462
+ "normalized": false,
463
+ "rstrip": false,
464
+ "single_word": false,
465
+ "special": true
466
+ },
467
+ "64": {
468
+ "content": "〈|SPECIAL_45|〉",
469
+ "lstrip": false,
470
+ "normalized": false,
471
+ "rstrip": false,
472
+ "single_word": false,
473
+ "special": true
474
+ },
475
+ "65": {
476
+ "content": "〈|SPECIAL_46|〉",
477
+ "lstrip": false,
478
+ "normalized": false,
479
+ "rstrip": false,
480
+ "single_word": false,
481
+ "special": true
482
+ },
483
+ "66": {
484
+ "content": "〈|SPECIAL_47|〉",
485
+ "lstrip": false,
486
+ "normalized": false,
487
+ "rstrip": false,
488
+ "single_word": false,
489
+ "special": true
490
+ },
491
+ "67": {
492
+ "content": "〈|SPECIAL_48|〉",
493
+ "lstrip": false,
494
+ "normalized": false,
495
+ "rstrip": false,
496
+ "single_word": false,
497
+ "special": true
498
+ },
499
+ "68": {
500
+ "content": "〈|SPECIAL_49|〉",
501
+ "lstrip": false,
502
+ "normalized": false,
503
+ "rstrip": false,
504
+ "single_word": false,
505
+ "special": true
506
+ },
507
+ "69": {
508
+ "content": "〈|SPECIAL_50|〉",
509
+ "lstrip": false,
510
+ "normalized": false,
511
+ "rstrip": false,
512
+ "single_word": false,
513
+ "special": true
514
+ },
515
+ "18": {
516
+ "content": "<think>",
517
+ "single_word": false,
518
+ "lstrip": false,
519
+ "rstrip": false,
520
+ "normalized": false,
521
+ "special": false
522
+ },
523
+ "19": {
524
+ "content": "</think>",
525
+ "single_word": false,
526
+ "lstrip": false,
527
+ "rstrip": false,
528
+ "normalized": false,
529
+ "special": false
530
+ },
531
+ "23": {
532
+ "content": "<assistant>",
533
+ "single_word": false,
534
+ "lstrip": false,
535
+ "rstrip": false,
536
+ "normalized": false,
537
+ "special": false
538
+ },
539
+ "24": {
540
+ "content": "</assistant>",
541
+ "single_word": false,
542
+ "lstrip": false,
543
+ "rstrip": false,
544
+ "normalized": false,
545
+ "special": false
546
+ },
547
+ "25": {
548
+ "content": "<tool_call>",
549
+ "single_word": false,
550
+ "lstrip": false,
551
+ "rstrip": false,
552
+ "normalized": false,
553
+ "special": false
554
+ },
555
+ "26": {
556
+ "content": "</tool_call>",
557
+ "single_word": false,
558
+ "lstrip": false,
559
+ "rstrip": false,
560
+ "normalized": false,
561
+ "special": false
562
+ }
563
+ },
564
+ "bos_token": "〈|EOS|〉",
565
+ "clean_up_tokenization_spaces": false,
566
+ "cls_token": "〈|CLS|〉",
567
+ "eos_token": "〈|EOS|〉",
568
+ "extra_special_tokens": {},
569
+ "mask_token": "〈|MASK|〉",
570
+ "model_max_length": 1000000000000000019884624838656,
571
+ "pad_token": "〈|PAD|〉",
572
+ "sep_token": "〈|SEP|〉",
573
+ "tokenizer_class": "PreTrainedTokenizerFast",
574
+ "unk_token": "〈|UNK|〉",
575
+ "chat_template": "{% include 'chat_template.jinja' %}"
576
+ }