joerowell commited on
Commit
2d42f96
·
verified ·
1 Parent(s): 8048f76

Laguna-XS v1.4 base (step 1207000) bf16

Browse files
config.json ADDED
@@ -0,0 +1,151 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "LagunaForCausalLM"
4
+ ],
5
+ "auto_map": {
6
+ "AutoConfig": "configuration_laguna.LagunaConfig",
7
+ "AutoModelForCausalLM": "modeling_laguna.LagunaForCausalLM"
8
+ },
9
+ "model_type": "laguna",
10
+ "vocab_size": 100352,
11
+ "hidden_size": 2048,
12
+ "intermediate_size": 8192,
13
+ "num_hidden_layers": 40,
14
+ "num_attention_heads": 48,
15
+ "num_key_value_heads": 8,
16
+ "head_dim": 128,
17
+ "max_position_embeddings": 131072,
18
+ "qkv_bias": false,
19
+ "attention_bias": false,
20
+ "attention_dropout": 0.0,
21
+ "rms_norm_eps": 1e-06,
22
+ "num_experts": 256,
23
+ "num_experts_per_tok": 8,
24
+ "moe_intermediate_size": 512,
25
+ "shared_expert_intermediate_size": 512,
26
+ "norm_topk_prob": true,
27
+ "router_aux_loss_coef": 0.001,
28
+ "decoder_sparse_step": 1,
29
+ "mlp_only_layers": [
30
+ 0
31
+ ],
32
+ "bos_token_id": 2,
33
+ "eos_token_id": [
34
+ 2,
35
+ 24
36
+ ],
37
+ "pad_token_id": 9,
38
+ "tie_word_embeddings": false,
39
+ "use_cache": true,
40
+ "torch_dtype": "bfloat16",
41
+ "gating": "per-head",
42
+ "sliding_window": 512,
43
+ "rope_parameters": {
44
+ "rope_theta": 500000.0,
45
+ "rope_type": "yarn",
46
+ "factor": 32.0,
47
+ "original_max_position_embeddings": 4096,
48
+ "beta_slow": 1.0,
49
+ "beta_fast": 64.0,
50
+ "attention_factor": 1.0
51
+ },
52
+ "layer_types": [
53
+ "full_attention",
54
+ "sliding_attention",
55
+ "sliding_attention",
56
+ "sliding_attention",
57
+ "full_attention",
58
+ "sliding_attention",
59
+ "sliding_attention",
60
+ "sliding_attention",
61
+ "full_attention",
62
+ "sliding_attention",
63
+ "sliding_attention",
64
+ "sliding_attention",
65
+ "full_attention",
66
+ "sliding_attention",
67
+ "sliding_attention",
68
+ "sliding_attention",
69
+ "full_attention",
70
+ "sliding_attention",
71
+ "sliding_attention",
72
+ "sliding_attention",
73
+ "full_attention",
74
+ "sliding_attention",
75
+ "sliding_attention",
76
+ "sliding_attention",
77
+ "full_attention",
78
+ "sliding_attention",
79
+ "sliding_attention",
80
+ "sliding_attention",
81
+ "full_attention",
82
+ "sliding_attention",
83
+ "sliding_attention",
84
+ "sliding_attention",
85
+ "full_attention",
86
+ "sliding_attention",
87
+ "sliding_attention",
88
+ "sliding_attention",
89
+ "full_attention",
90
+ "sliding_attention",
91
+ "sliding_attention",
92
+ "sliding_attention"
93
+ ],
94
+ "num_attention_heads_per_layer": [
95
+ 48,
96
+ 64,
97
+ 64,
98
+ 64,
99
+ 48,
100
+ 64,
101
+ 64,
102
+ 64,
103
+ 48,
104
+ 64,
105
+ 64,
106
+ 64,
107
+ 48,
108
+ 64,
109
+ 64,
110
+ 64,
111
+ 48,
112
+ 64,
113
+ 64,
114
+ 64,
115
+ 48,
116
+ 64,
117
+ 64,
118
+ 64,
119
+ 48,
120
+ 64,
121
+ 64,
122
+ 64,
123
+ 48,
124
+ 64,
125
+ 64,
126
+ 64,
127
+ 48,
128
+ 64,
129
+ 64,
130
+ 64,
131
+ 48,
132
+ 64,
133
+ 64,
134
+ 64
135
+ ],
136
+ "swa_rope_parameters": {
137
+ "rope_theta": 10000.0,
138
+ "rope_type": "linear",
139
+ "factor": 1.0,
140
+ "partial_rotary_factor": 1.0
141
+ },
142
+ "moe_router_use_sigmoid": true,
143
+ "moe_apply_router_weight_on_input": false,
144
+ "moe_shared_gate": false,
145
+ "moe_routed_scaling_factor": 2.5,
146
+ "qk_norm_type": "rmsnorm",
147
+ "norm_type": "rmsnorm",
148
+ "rope_style": "rotate-half",
149
+ "partial_rotary_factor": 0.5,
150
+ "swa_attention_sink_enabled": false
151
+ }
configuration_laguna.py ADDED
@@ -0,0 +1,194 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2025 Poolside and the HuggingFace Inc. team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from transformers.configuration_utils import PreTrainedConfig
15
+ from transformers.modeling_rope_utils import RopeParameters
16
+
17
+
18
+ class LagunaConfig(PreTrainedConfig):
19
+ r"""
20
+ Configuration class for Laguna model.
21
+
22
+ Laguna is Poolside's MoE architecture with:
23
+ - Attention output gating (softplus gate)
24
+ - Sigmoid routing instead of softmax
25
+ - No QKV bias
26
+ - Explicit head_dim parameter
27
+
28
+ Args:
29
+ head_dim (`int`, *optional*, defaults to 128):
30
+ Dimension of attention heads. Laguna uses explicit head_dim rather than
31
+ computing it from hidden_size // num_attention_heads.
32
+ qkv_bias (`bool`, *optional*, defaults to `False`):
33
+ Whether to add bias to QKV projections. Laguna uses no QKV bias.
34
+ attention_bias (`bool`, *optional*, defaults to `False`):
35
+ Whether to add bias to attention output projection. Laguna uses no attention bias.
36
+ gating (`bool`, *optional*, defaults to `True`):
37
+ Whether to use softplus output gating on attention. When True, a g_proj linear
38
+ layer is added and attn_output = attn_output * softplus(g_proj(x)).
39
+ sliding_window (`int`, *optional*):
40
+ Sliding window attention size. Used by layers whose type in ``layer_types``
41
+ is ``"sliding_attention"``. When ``None``, all layers use full attention.
42
+ layer_types (`list[str]`, *optional*):
43
+ Per-layer attention type. Each element should be ``"sliding_attention"`` or
44
+ ``"full_attention"``. Length must equal ``num_hidden_layers``. When ``None``,
45
+ all layers default to global attention.
46
+ swa_attention_sink_enabled (`bool`, *optional*, defaults to `False`):
47
+ Whether to enable learnable attention sinks on sliding-window attention layers.
48
+ When enabled, a per-head bias parameter is added that allows the model to attend
49
+ to position 0 even when it falls outside the sliding window.
50
+ swa_rope_parameters (`RopeParameters`, *optional*):
51
+ Separate RoPE configuration for sliding-window attention layers. When ``None``,
52
+ SWA layers use the same RoPE as global attention layers.
53
+ vocab_size (`int`, *optional*, defaults to 100352):
54
+ Vocabulary size of the Laguna model.
55
+ hidden_size (`int`, *optional*, defaults to 2048):
56
+ Dimension of the hidden representations.
57
+ intermediate_size (`int`, *optional*, defaults to 8192):
58
+ Dimension of the MLP representations for dense layers.
59
+ num_hidden_layers (`int`, *optional*, defaults to 48):
60
+ Number of hidden layers in the Transformer.
61
+ num_attention_heads (`int`, *optional*, defaults to 32):
62
+ Number of attention heads.
63
+ num_key_value_heads (`int`, *optional*, defaults to 8):
64
+ Number of key-value heads for GQA.
65
+ max_position_embeddings (`int`, *optional*, defaults to 4096):
66
+ Maximum sequence length.
67
+ rms_norm_eps (`float`, *optional*, defaults to 1e-6):
68
+ Epsilon for RMSNorm layers.
69
+ num_experts (`int`, *optional*, defaults to 256):
70
+ Number of routed experts.
71
+ num_experts_per_tok (`int`, *optional*, defaults to 16):
72
+ Number of experts selected per token (top-k).
73
+ moe_intermediate_size (`int`, *optional*, defaults to 1024):
74
+ Intermediate size of routed experts.
75
+ shared_expert_intermediate_size (`int`, *optional*, defaults to 1024):
76
+ Intermediate size of the shared expert.
77
+ norm_topk_prob (`bool`, *optional*, defaults to `True`):
78
+ Whether to normalize top-k routing probabilities.
79
+ decoder_sparse_step (`int`, *optional*, defaults to 1):
80
+ Frequency of MoE layers (1 = every layer is MoE after mlp_only_layers).
81
+ mlp_only_layers (`list[int]`, *optional*, defaults to `[0]`):
82
+ Layer indices that use dense MLP instead of MoE.
83
+ router_aux_loss_coef (`float`, *optional*, defaults to 0.001):
84
+ Auxiliary loss coefficient for load balancing.
85
+ rope_parameters (`RopeParameters`, *optional*):
86
+ RoPE configuration. Defaults to rope_theta=500000.0.
87
+ """
88
+
89
+ model_type = "laguna"
90
+ keys_to_ignore_at_inference = ["past_key_values"]
91
+ base_model_tp_plan = {
92
+ "layers.*.self_attn.q_proj": "colwise",
93
+ "layers.*.self_attn.k_proj": "colwise",
94
+ "layers.*.self_attn.v_proj": "colwise",
95
+ "layers.*.self_attn.g_proj": "colwise", # Laguna-specific gating projection
96
+ "layers.*.self_attn.o_proj": "rowwise",
97
+ "layers.*.mlp.gate_proj": "colwise",
98
+ "layers.*.mlp.up_proj": "colwise",
99
+ "layers.*.mlp.down_proj": "rowwise",
100
+ }
101
+ base_model_pp_plan = {
102
+ "embed_tokens": (["input_ids"], ["inputs_embeds"]),
103
+ "layers": (["hidden_states", "attention_mask"], ["hidden_states"]),
104
+ "norm": (["hidden_states"], ["hidden_states"]),
105
+ }
106
+
107
+ def __init__(
108
+ self,
109
+ vocab_size: int = 100352,
110
+ hidden_size: int = 2048,
111
+ intermediate_size: int = 8192,
112
+ num_hidden_layers: int = 48,
113
+ num_attention_heads: int = 32,
114
+ num_key_value_heads: int = 8,
115
+ head_dim: int = 128,
116
+ qkv_bias: bool = False,
117
+ attention_bias: bool = False,
118
+ gating: bool | str = True,
119
+ hidden_act: str = "silu",
120
+ max_position_embeddings: int = 4096,
121
+ initializer_range: float = 0.02,
122
+ rms_norm_eps: float = 1e-6,
123
+ use_cache: bool = True,
124
+ tie_word_embeddings: bool = False,
125
+ rope_parameters: RopeParameters | dict[str, RopeParameters] | None = None,
126
+ partial_rotary_factor: float = 1.0,
127
+ attention_dropout: float = 0.0,
128
+ sliding_window: int | None = None,
129
+ layer_types: list[str] | None = None,
130
+ swa_attention_sink_enabled: bool = False,
131
+ swa_rope_parameters: RopeParameters | None = None,
132
+ num_attention_heads_per_layer: list[int] | None = None,
133
+ num_experts: int = 256,
134
+ num_experts_per_tok: int = 16,
135
+ moe_intermediate_size: int = 1024,
136
+ shared_expert_intermediate_size: int = 1024,
137
+ norm_topk_prob: bool = True,
138
+ decoder_sparse_step: int = 1,
139
+ mlp_only_layers: list[int] | None = None,
140
+ router_aux_loss_coef: float = 0.001,
141
+ output_router_logits: bool = False,
142
+ moe_routed_scaling_factor: float = 1.0,
143
+ moe_apply_router_weight_on_input: bool = False,
144
+ **kwargs,
145
+ ):
146
+ # Default mlp_only_layers: first layer is dense (moe_first_k_dense_replace=1)
147
+ if mlp_only_layers is None:
148
+ mlp_only_layers = [0]
149
+
150
+ # Default rope_parameters with Laguna's theta
151
+ if rope_parameters is None:
152
+ rope_parameters = {"rope_type": "default", "rope_theta": 500000.0}
153
+
154
+ self.vocab_size = vocab_size
155
+ self.hidden_size = hidden_size
156
+ self.intermediate_size = intermediate_size
157
+ self.num_hidden_layers = num_hidden_layers
158
+ self.num_attention_heads = num_attention_heads
159
+ self.num_key_value_heads = num_key_value_heads
160
+ self.head_dim = head_dim
161
+ self.qkv_bias = qkv_bias
162
+ self.attention_bias = attention_bias
163
+ self.gating = gating
164
+ self.hidden_act = hidden_act
165
+ self.max_position_embeddings = max_position_embeddings
166
+ self.initializer_range = initializer_range
167
+ self.rms_norm_eps = rms_norm_eps
168
+ self.use_cache = use_cache
169
+ self.rope_parameters = rope_parameters
170
+ self.partial_rotary_factor = partial_rotary_factor
171
+ self.attention_dropout = attention_dropout
172
+ # Sliding window attention arguments
173
+ self.sliding_window = sliding_window
174
+ self.layer_types = layer_types
175
+ self.swa_attention_sink_enabled = swa_attention_sink_enabled
176
+ self.swa_rope_parameters = swa_rope_parameters
177
+ self.num_attention_heads_per_layer = num_attention_heads_per_layer
178
+ # MoE arguments
179
+ self.num_experts = num_experts
180
+ self.num_experts_per_tok = num_experts_per_tok
181
+ self.moe_intermediate_size = moe_intermediate_size
182
+ self.shared_expert_intermediate_size = shared_expert_intermediate_size
183
+ self.norm_topk_prob = norm_topk_prob
184
+ self.decoder_sparse_step = decoder_sparse_step
185
+ self.mlp_only_layers = mlp_only_layers
186
+ self.router_aux_loss_coef = router_aux_loss_coef
187
+ self.output_router_logits = output_router_logits
188
+ self.moe_routed_scaling_factor = moe_routed_scaling_factor
189
+ self.moe_apply_router_weight_on_input = moe_apply_router_weight_on_input
190
+
191
+ super().__init__(tie_word_embeddings=tie_word_embeddings, **kwargs)
192
+
193
+
194
+ __all__ = ["LagunaConfig"]
generation_config.json ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token_id": 2,
3
+ "do_sample": true,
4
+ "eos_token_id": [
5
+ 2,
6
+ 24
7
+ ],
8
+ "max_new_tokens": 2048,
9
+ "pad_token_id": 9,
10
+ "temperature": 0.7,
11
+ "top_p": 0.9
12
+ }
model-00001-of-00014.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:096bec47fccb4e593cda439e96c441b4df24da603f6996ad4cc2f42b07b62979
3
+ size 5120041576
model-00002-of-00014.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9b033cde77d0dfc467217228ac1fe56955da6f6f0539d217c0e87bc9c6141a02
3
+ size 5119449520
model-00003-of-00014.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a4322f9a3659ac1b3f1aa6445d23e00294b876d76c2dcb940b103a94afb68290
3
+ size 5119449504
model-00004-of-00014.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fc9a1c934aa3e438031f7272ab103fc42d8dbbaad5b35a6a9041fe8b2615c03b
3
+ size 5119450272
model-00005-of-00014.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:52aac8a7fb885688771a7c74a9d06e62b57cdbbecb5282347e7d9c9ad0ebf59c
3
+ size 5119451824
model-00006-of-00014.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:05f9030c4d16a4b858e31cd470511784d3917a2a6f023ed8a5362bb239b7997c
3
+ size 5119451944
model-00007-of-00014.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2c5fb7baabed09175615fe9d9fd93544bfb8c70b24d81a139719eeaae0b105ab
3
+ size 5119451960
model-00008-of-00014.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:452ccc8d15c66187c90845a504b8eb66105ed185da996d180ff2a93aea19889b
3
+ size 5119451960
model-00009-of-00014.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:91343e6489e08e6b8c1f94ad333dade5c1dd34ff10b9bcd7600aff346337c7e5
3
+ size 5119451872
model-00010-of-00014.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0fd9ba3702aff6e57e11362b8347382279ceef6a9ef0896571771ea5c3d3da08
3
+ size 5119451824
model-00011-of-00014.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b131998e1f04900a4c809675ccbbb33ee2d3fd8237ab364d809331d59c0f09bb
3
+ size 5119451856
model-00012-of-00014.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:07749587fc5f27ce84ca6889afd840b68dd5878019d37e22881ec727cdbf59aa
3
+ size 5119451960
model-00013-of-00014.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:933f10a0e0b31fb9f9904f21a2bbd0beaf5ec211ad1ebb7ff91b6086a304d243
3
+ size 5119451960
model-00014-of-00014.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:52bf6dae97176c8476d198fb820912f9f6a6b51b682b10560befd88f2969c384
3
+ size 335563984
model.safetensors.index.json ADDED
The diff for this file is too large to render. See raw diff
 
modeling_laguna.py ADDED
@@ -0,0 +1,785 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2025 Poolside and the HuggingFace Inc. team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import copy
16
+ from collections.abc import Callable
17
+ from typing import Optional
18
+
19
+ import torch
20
+ import torch.nn.functional as F
21
+ from torch import nn
22
+
23
+ from transformers import initialization as init
24
+ from transformers.activations import ACT2FN
25
+ from transformers.cache_utils import Cache, DynamicCache
26
+ from transformers.generation import GenerationMixin
27
+ from transformers.integrations import (
28
+ use_kernel_forward_from_hub,
29
+ use_kernel_func_from_hub,
30
+ use_kernelized_func,
31
+ )
32
+ from transformers.masking_utils import create_causal_mask, create_sliding_window_causal_mask
33
+ from transformers.modeling_flash_attention_utils import FlashAttentionKwargs
34
+ from transformers.modeling_layers import GradientCheckpointingLayer
35
+ from transformers.modeling_outputs import MoeCausalLMOutputWithPast, MoeModelOutputWithPast
36
+ from transformers.modeling_rope_utils import ROPE_INIT_FUNCTIONS, dynamic_rope_update
37
+ from transformers.modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel
38
+ from transformers.processing_utils import Unpack
39
+ from transformers.utils import auto_docstring, can_return_tuple, is_grouped_mm_available
40
+ from transformers.utils.generic import TransformersKwargs, check_model_inputs, maybe_autocast
41
+
42
+ try:
43
+ # transformers >= 5.5 relocated OutputRecorder to a dedicated module.
44
+ from transformers.utils.output_capturing import OutputRecorder
45
+ except ImportError:
46
+ from transformers.utils.generic import OutputRecorder # type: ignore[no-redef]
47
+ from .configuration_laguna import LagunaConfig
48
+
49
+
50
+ def _build_rope_config(base_config, rope_params, partial_rotary_factor):
51
+ """Shallow-copy the config with rope_parameters / partial_rotary_factor overridden."""
52
+ cfg = copy.copy(base_config)
53
+ if rope_params is not None:
54
+ cfg.rope_parameters = dict(rope_params)
55
+ if partial_rotary_factor is not None:
56
+ cfg.partial_rotary_factor = float(partial_rotary_factor)
57
+ return cfg
58
+
59
+
60
+ @use_kernel_forward_from_hub("RMSNorm")
61
+ class LagunaRMSNorm(nn.Module):
62
+ def __init__(self, hidden_size, eps=1e-6):
63
+ """
64
+ LagunaRMSNorm is equivalent to T5LayerNorm
65
+ """
66
+ super().__init__()
67
+ self.weight = nn.Parameter(torch.ones(hidden_size))
68
+ self.variance_epsilon = eps
69
+
70
+ def forward(self, hidden_states):
71
+ input_dtype = hidden_states.dtype
72
+ hidden_states = hidden_states.to(torch.float32)
73
+ variance = hidden_states.pow(2).mean(-1, keepdim=True)
74
+ hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
75
+ return self.weight * hidden_states.to(input_dtype)
76
+
77
+ def extra_repr(self):
78
+ return f"{tuple(self.weight.shape)}, eps={self.variance_epsilon}"
79
+
80
+
81
+ class LagunaRotaryEmbedding(nn.Module):
82
+ inv_freq: torch.Tensor # fix linting for `register_buffer`
83
+
84
+ def __init__(self, config: LagunaConfig, device=None):
85
+ super().__init__()
86
+ self.max_seq_len_cached = config.max_position_embeddings
87
+ self.original_max_seq_len = config.max_position_embeddings
88
+
89
+ self.config = config
90
+
91
+ self.rope_type = self.config.rope_parameters["rope_type"]
92
+ rope_init_fn: Callable = self.compute_default_rope_parameters
93
+ if self.rope_type != "default":
94
+ rope_init_fn = ROPE_INIT_FUNCTIONS[self.rope_type]
95
+ inv_freq, self.attention_scaling = rope_init_fn(self.config, device)
96
+
97
+ self.register_buffer("inv_freq", inv_freq, persistent=False)
98
+ self.register_buffer("original_inv_freq", inv_freq.clone(), persistent=False)
99
+
100
+ @staticmethod
101
+ def compute_default_rope_parameters(
102
+ config: LagunaConfig | None = None,
103
+ device: Optional["torch.device"] = None,
104
+ seq_len: int | None = None,
105
+ ) -> tuple["torch.Tensor", float]:
106
+ """
107
+ Computes the inverse frequencies according to the original RoPE implementation
108
+ Args:
109
+ config ([`~transformers.PreTrainedConfig`]):
110
+ The model configuration.
111
+ device (`torch.device`):
112
+ The device to use for initialization of the inverse frequencies.
113
+ seq_len (`int`, *optional*):
114
+ The current sequence length. Unused for this type of RoPE.
115
+ Returns:
116
+ Tuple of (`torch.Tensor`, `float`), containing the inverse frequencies for the RoPE embeddings and the
117
+ post-processing scaling factor applied to the computed cos/sin (unused in this type of RoPE).
118
+ """
119
+ base = config.rope_parameters["rope_theta"]
120
+ head_dim = getattr(config, "head_dim", None) or config.hidden_size // config.num_attention_heads
121
+ partial = getattr(config, "partial_rotary_factor", 1.0)
122
+ dim = int(head_dim * partial)
123
+
124
+ attention_factor = 1.0 # Unused in this type of RoPE
125
+
126
+ # Compute the inverse frequencies
127
+ inv_freq = 1.0 / (
128
+ base ** (torch.arange(0, dim, 2, dtype=torch.int64).to(device=device, dtype=torch.float) / dim)
129
+ )
130
+ return inv_freq, attention_factor
131
+
132
+ @torch.no_grad()
133
+ @dynamic_rope_update # power user: used with advanced RoPE types (e.g. dynamic rope)
134
+ def forward(self, x, position_ids):
135
+ inv_freq_expanded = self.inv_freq[None, :, None].float().expand(position_ids.shape[0], -1, 1).to(x.device)
136
+ position_ids_expanded = position_ids[:, None, :].float()
137
+
138
+ device_type = x.device.type if isinstance(x.device.type, str) and x.device.type != "mps" else "cpu"
139
+ with maybe_autocast(device_type=device_type, enabled=False): # Force float32
140
+ freqs = (inv_freq_expanded.float() @ position_ids_expanded.float()).transpose(1, 2)
141
+ emb = torch.cat((freqs, freqs), dim=-1)
142
+ cos = emb.cos() * self.attention_scaling
143
+ sin = emb.sin() * self.attention_scaling
144
+
145
+ return cos.to(dtype=x.dtype), sin.to(dtype=x.dtype)
146
+
147
+
148
+ class LagunaMLP(nn.Module):
149
+ def __init__(self, config, intermediate_size=None):
150
+ super().__init__()
151
+ self.config = config
152
+ self.hidden_size = config.hidden_size
153
+ self.intermediate_size = config.intermediate_size if intermediate_size is None else intermediate_size
154
+ self.gate_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)
155
+ self.up_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)
156
+ self.down_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=False)
157
+ self.act_fn = ACT2FN[config.hidden_act]
158
+
159
+ def forward(self, x):
160
+ down_proj = self.down_proj(self.act_fn(self.gate_proj(x)) * self.up_proj(x))
161
+ return down_proj
162
+
163
+
164
+ class LagunaTopKRouter(nn.Module):
165
+ """Laguna MoE router using sigmoid scoring (not softmax)."""
166
+
167
+ def __init__(self, config):
168
+ super().__init__()
169
+ self.top_k = config.num_experts_per_tok
170
+ self.num_experts = config.num_experts
171
+ self.norm_topk_prob = config.norm_topk_prob
172
+ self.hidden_dim = config.hidden_size
173
+ self.weight = nn.Parameter(torch.zeros(self.num_experts, self.hidden_dim))
174
+
175
+ def forward(
176
+ self,
177
+ hidden_states: torch.Tensor,
178
+ e_score_correction_bias: torch.Tensor | None = None,
179
+ ) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
180
+ hidden_states = hidden_states.reshape(-1, self.hidden_dim)
181
+ router_logits = F.linear(hidden_states, self.weight)
182
+ # Laguna-specific: sigmoid routing in float32 for precision
183
+ routing_weights = torch.sigmoid(router_logits.float())
184
+ if e_score_correction_bias is not None:
185
+ routing_weights = routing_weights + e_score_correction_bias.float()
186
+ routing_weights, selected_experts = torch.topk(routing_weights, self.top_k, dim=-1)
187
+ if self.norm_topk_prob:
188
+ routing_weights = routing_weights / routing_weights.sum(dim=-1, keepdim=True)
189
+ routing_weights = routing_weights.to(hidden_states.dtype)
190
+ return router_logits, routing_weights, selected_experts
191
+
192
+
193
+ class LagunaSparseMoeBlock(nn.Module):
194
+ """Laguna MoE block using sigmoid router, per-expert MLPs, and a shared expert."""
195
+
196
+ def __init__(self, config):
197
+ super().__init__()
198
+ self.num_experts = config.num_experts
199
+ self.top_k = config.num_experts_per_tok
200
+ self.routed_scaling_factor = float(getattr(config, "moe_routed_scaling_factor", 1.0))
201
+ self.apply_router_weight_on_input = bool(getattr(config, "moe_apply_router_weight_on_input", False))
202
+ self.gate = LagunaTopKRouter(config)
203
+ self.experts = nn.ModuleList(
204
+ [LagunaMLP(config, intermediate_size=config.moe_intermediate_size) for _ in range(self.num_experts)]
205
+ )
206
+ self.experts.e_score_correction_bias = nn.Parameter(torch.zeros(self.num_experts))
207
+ self.shared_expert = LagunaMLP(config, intermediate_size=config.shared_expert_intermediate_size)
208
+
209
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
210
+ batch_size, sequence_length, hidden_dim = hidden_states.shape
211
+ hidden_states = hidden_states.view(-1, hidden_dim)
212
+
213
+ shared_expert_output = self.shared_expert(hidden_states)
214
+
215
+ _, routing_weights, selected_experts = self.gate(
216
+ hidden_states, e_score_correction_bias=self.experts.e_score_correction_bias
217
+ )
218
+ routed_output = torch.zeros_like(hidden_states)
219
+
220
+ expert_mask = F.one_hot(selected_experts, num_classes=self.num_experts).permute(2, 1, 0)
221
+
222
+ for expert_idx in range(self.num_experts):
223
+ top_k_pos, token_idx = torch.where(expert_mask[expert_idx])
224
+ if token_idx.shape[0] == 0:
225
+ continue
226
+ w = routing_weights[token_idx, top_k_pos, None]
227
+ if self.apply_router_weight_on_input:
228
+ current = self.experts[expert_idx](hidden_states[token_idx] * w)
229
+ else:
230
+ current = self.experts[expert_idx](hidden_states[token_idx]) * w
231
+ routed_output.index_add_(0, token_idx, current.to(routed_output.dtype))
232
+
233
+ routed_output = routed_output * self.routed_scaling_factor
234
+ final_hidden_states = routed_output + shared_expert_output
235
+ return final_hidden_states.reshape(batch_size, sequence_length, hidden_dim)
236
+
237
+
238
+ def rotate_half(x):
239
+ """Rotates half the hidden dims of the input."""
240
+ x1 = x[..., : x.shape[-1] // 2]
241
+ x2 = x[..., x.shape[-1] // 2 :]
242
+ return torch.cat((-x2, x1), dim=-1)
243
+
244
+
245
+ @use_kernel_func_from_hub("rotary_pos_emb")
246
+ def apply_rotary_pos_emb(q, k, cos, sin, unsqueeze_dim=1):
247
+ """Applies Rotary Position Embedding to the query and key tensors.
248
+
249
+ Args:
250
+ q (`torch.Tensor`): The query tensor.
251
+ k (`torch.Tensor`): The key tensor.
252
+ cos (`torch.Tensor`): The cosine part of the rotary embedding.
253
+ sin (`torch.Tensor`): The sine part of the rotary embedding.
254
+ unsqueeze_dim (`int`, *optional*, defaults to 1):
255
+ The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and
256
+ sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note
257
+ that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and
258
+ k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes
259
+ cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have
260
+ the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2.
261
+ Returns:
262
+ `tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding.
263
+ """
264
+ cos = cos.unsqueeze(unsqueeze_dim)
265
+ sin = sin.unsqueeze(unsqueeze_dim)
266
+ rot_dim = cos.shape[-1]
267
+ if rot_dim == q.shape[-1]:
268
+ q_embed = (q * cos) + (rotate_half(q) * sin)
269
+ k_embed = (k * cos) + (rotate_half(k) * sin)
270
+ return q_embed, k_embed
271
+ q_rot, q_pass = q[..., :rot_dim], q[..., rot_dim:]
272
+ k_rot, k_pass = k[..., :rot_dim], k[..., rot_dim:]
273
+ q_rot = (q_rot * cos) + (rotate_half(q_rot) * sin)
274
+ k_rot = (k_rot * cos) + (rotate_half(k_rot) * sin)
275
+ return torch.cat([q_rot, q_pass], dim=-1), torch.cat([k_rot, k_pass], dim=-1)
276
+
277
+
278
+ def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor:
279
+ """
280
+ This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch,
281
+ num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)
282
+ """
283
+ batch, num_key_value_heads, slen, head_dim = hidden_states.shape
284
+ if n_rep == 1:
285
+ return hidden_states
286
+ hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim)
287
+ return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim)
288
+
289
+
290
+ def eager_attention_forward(
291
+ module: nn.Module,
292
+ query: torch.Tensor,
293
+ key: torch.Tensor,
294
+ value: torch.Tensor,
295
+ attention_mask: torch.Tensor | None,
296
+ scaling: float,
297
+ dropout: float = 0.0,
298
+ **kwargs: Unpack[TransformersKwargs],
299
+ ):
300
+ key_states = repeat_kv(key, module.num_key_value_groups)
301
+ value_states = repeat_kv(value, module.num_key_value_groups)
302
+
303
+ attn_weights = torch.matmul(query, key_states.transpose(2, 3)) * scaling
304
+ if attention_mask is not None:
305
+ causal_mask = attention_mask[:, :, :, : key_states.shape[-2]]
306
+ attn_weights = attn_weights + causal_mask
307
+
308
+ attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query.dtype)
309
+ attn_weights = nn.functional.dropout(attn_weights, p=dropout, training=module.training)
310
+ attn_output = torch.matmul(attn_weights, value_states)
311
+ attn_output = attn_output.transpose(1, 2).contiguous()
312
+
313
+ return attn_output, attn_weights
314
+
315
+
316
+ # Laguna attention is identical to Qwen2MoE attention except:
317
+ # - No QKV bias
318
+ # - Explicit head_dim from config
319
+ # - Output gating: attn_output = attn_output * softplus(g_proj(hidden_states))
320
+ # - No sliding window (full attention only)
321
+ @use_kernelized_func(apply_rotary_pos_emb)
322
+ class LagunaAttention(nn.Module):
323
+ def __init__(self, config: LagunaConfig, layer_idx: int):
324
+ super().__init__()
325
+ self.config = config
326
+ self.layer_idx = layer_idx
327
+ self.head_dim = config.head_dim
328
+
329
+ per_layer_heads = getattr(config, "num_attention_heads_per_layer", None)
330
+ num_heads = per_layer_heads[layer_idx] if per_layer_heads is not None else config.num_attention_heads
331
+ self.num_heads = num_heads
332
+ self.num_key_value_heads = config.num_key_value_heads
333
+ self.num_key_value_groups = num_heads // config.num_key_value_heads
334
+ self.scaling = self.head_dim**-0.5
335
+ self.attention_dropout = config.attention_dropout
336
+ self.is_causal = True
337
+
338
+ self.q_proj = nn.Linear(config.hidden_size, num_heads * self.head_dim, bias=False)
339
+ self.k_proj = nn.Linear(config.hidden_size, self.num_key_value_heads * self.head_dim, bias=False)
340
+ self.v_proj = nn.Linear(config.hidden_size, self.num_key_value_heads * self.head_dim, bias=False)
341
+ self.o_proj = nn.Linear(num_heads * self.head_dim, config.hidden_size, bias=False)
342
+
343
+ gating = getattr(config, "gating", True)
344
+ self.gating = bool(gating)
345
+ self.gate_per_head = gating == "per-head"
346
+ if self.gating:
347
+ g_out = num_heads if self.gate_per_head else num_heads * self.head_dim
348
+ self.g_proj = nn.Linear(config.hidden_size, g_out, bias=False)
349
+
350
+ self.q_norm = LagunaRMSNorm(config.head_dim, eps=config.rms_norm_eps)
351
+ self.k_norm = LagunaRMSNorm(config.head_dim, eps=config.rms_norm_eps)
352
+
353
+ def forward(
354
+ self,
355
+ hidden_states: torch.Tensor,
356
+ position_embeddings: tuple[torch.Tensor, torch.Tensor],
357
+ attention_mask: torch.Tensor | None,
358
+ past_key_values: Cache | None = None,
359
+ cache_position: torch.LongTensor | None = None,
360
+ **kwargs: Unpack[FlashAttentionKwargs],
361
+ ) -> tuple[torch.Tensor, torch.Tensor | None]:
362
+ input_shape = hidden_states.shape[:-1]
363
+ hidden_shape = (*input_shape, -1, self.head_dim)
364
+
365
+ query_states = self.q_proj(hidden_states)
366
+ key_states = self.k_proj(hidden_states)
367
+ value_states = self.v_proj(hidden_states)
368
+
369
+ query_states = query_states.view(hidden_shape).transpose(1, 2)
370
+ key_states = key_states.view(hidden_shape).transpose(1, 2)
371
+ value_states = value_states.view(hidden_shape).transpose(1, 2)
372
+
373
+ # QK normalization (applied per-head before RoPE)
374
+ query_states = self.q_norm(query_states)
375
+ key_states = self.k_norm(key_states)
376
+
377
+ cos, sin = position_embeddings
378
+ query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin)
379
+
380
+ if past_key_values is not None:
381
+ # sin and cos are specific to RoPE models; cache_position needed for the static cache
382
+ cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position}
383
+ key_states, value_states = past_key_values.update(key_states, value_states, self.layer_idx, cache_kwargs)
384
+
385
+ attention_interface: Callable = eager_attention_forward
386
+ if self.config._attn_implementation != "eager":
387
+ attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation]
388
+
389
+ attn_output, attn_weights = attention_interface(
390
+ self,
391
+ query_states,
392
+ key_states,
393
+ value_states,
394
+ attention_mask,
395
+ dropout=0.0 if not self.training else self.attention_dropout,
396
+ scaling=self.scaling,
397
+ **kwargs,
398
+ )
399
+
400
+ attn_output = attn_output.reshape(*input_shape, -1).contiguous()
401
+
402
+ if self.gating:
403
+ gate = F.softplus(self.g_proj(hidden_states).float()).to(attn_output.dtype)
404
+ if self.gate_per_head:
405
+ shape = attn_output.shape
406
+ attn_output = (
407
+ attn_output.view(*shape[:-1], self.num_heads, self.head_dim) * gate.unsqueeze(-1)
408
+ ).view(shape)
409
+ else:
410
+ attn_output = attn_output * gate
411
+
412
+ attn_output = self.o_proj(attn_output)
413
+
414
+ return attn_output, attn_weights
415
+
416
+
417
+ class LagunaDecoderLayer(GradientCheckpointingLayer):
418
+ """Laguna decoder layer with gated attention and sigmoid-routed MoE."""
419
+
420
+ def __init__(self, config: LagunaConfig, layer_idx: int):
421
+ super().__init__()
422
+ self.layer_idx = layer_idx
423
+ layer_types = getattr(config, "layer_types", None)
424
+ self.attention_type = (
425
+ layer_types[layer_idx] if layer_types is not None else "full_attention"
426
+ )
427
+ self.self_attn = LagunaAttention(config, layer_idx)
428
+ if (layer_idx not in config.mlp_only_layers) and (
429
+ config.num_experts > 0 and (layer_idx + 1) % config.decoder_sparse_step == 0
430
+ ):
431
+ self.mlp = LagunaSparseMoeBlock(config)
432
+ else:
433
+ self.mlp = LagunaMLP(config, intermediate_size=config.intermediate_size)
434
+ self.input_layernorm = LagunaRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
435
+ self.post_attention_layernorm = LagunaRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
436
+ self.hidden_size = config.hidden_size
437
+
438
+ def _pick(self, obj):
439
+ if isinstance(obj, dict):
440
+ return obj.get(self.attention_type, obj.get("full_attention"))
441
+ return obj
442
+
443
+ def forward(
444
+ self,
445
+ hidden_states: torch.Tensor,
446
+ attention_mask: torch.Tensor | None = None,
447
+ position_ids: torch.LongTensor | None = None,
448
+ past_key_values: Cache | None = None,
449
+ use_cache: bool | None = False,
450
+ cache_position: torch.LongTensor | None = None,
451
+ position_embeddings: tuple[torch.Tensor, torch.Tensor] | None = None,
452
+ **kwargs: Unpack[TransformersKwargs],
453
+ ) -> torch.Tensor:
454
+ residual = hidden_states
455
+ hidden_states = self.input_layernorm(hidden_states)
456
+ # Self Attention
457
+ hidden_states, _ = self.self_attn(
458
+ hidden_states=hidden_states,
459
+ attention_mask=self._pick(attention_mask),
460
+ position_ids=position_ids,
461
+ past_key_values=past_key_values,
462
+ use_cache=use_cache,
463
+ cache_position=cache_position,
464
+ position_embeddings=self._pick(position_embeddings),
465
+ **kwargs,
466
+ )
467
+ hidden_states = residual + hidden_states
468
+
469
+ # Fully Connected
470
+ residual = hidden_states
471
+ hidden_states = self.post_attention_layernorm(hidden_states)
472
+ hidden_states = self.mlp(hidden_states)
473
+ hidden_states = residual + hidden_states
474
+ return hidden_states
475
+
476
+
477
+ @auto_docstring
478
+ class LagunaPreTrainedModel(PreTrainedModel):
479
+ config: LagunaConfig
480
+ base_model_prefix = "model"
481
+ supports_gradient_checkpointing = True
482
+ _no_split_modules = ["LagunaDecoderLayer"]
483
+ _skip_keys_device_placement = ["past_key_values"]
484
+ _supports_flash_attn = True
485
+ _supports_sdpa = True
486
+ _supports_flex_attn = True
487
+ _can_compile_fullgraph = (
488
+ is_grouped_mm_available()
489
+ ) # https://huggingface.co/docs/transformers/experts_interface#torchcompile
490
+ _supports_attention_backend = True
491
+ _can_record_outputs = {
492
+ "router_logits": OutputRecorder(LagunaTopKRouter, index=0),
493
+ "hidden_states": LagunaDecoderLayer,
494
+ "attentions": LagunaAttention,
495
+ }
496
+
497
+ @torch.no_grad()
498
+ def _init_weights(self, module):
499
+ super()._init_weights(module)
500
+ std = self.config.initializer_range
501
+ if isinstance(module, LagunaTopKRouter):
502
+ init.normal_(module.weight, mean=0.0, std=std)
503
+
504
+
505
+ class LagunaModel(LagunaPreTrainedModel):
506
+ def __init__(self, config: LagunaConfig):
507
+ super().__init__(config)
508
+ self.padding_idx = config.pad_token_id
509
+ self.vocab_size = config.vocab_size
510
+
511
+ self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx)
512
+ self.layers = nn.ModuleList(
513
+ [LagunaDecoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)]
514
+ )
515
+ self.norm = LagunaRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
516
+ self.rotary_emb = LagunaRotaryEmbedding(config=config)
517
+
518
+ self._has_swa = (
519
+ config.layer_types is not None and "sliding_attention" in config.layer_types
520
+ )
521
+ swa_rp = getattr(config, "swa_rope_parameters", None)
522
+ if self._has_swa and swa_rp is not None:
523
+ swa_partial = swa_rp.get("partial_rotary_factor", None)
524
+ swa_cfg = _build_rope_config(config, swa_rp, swa_partial)
525
+ self.swa_rotary_emb = LagunaRotaryEmbedding(config=swa_cfg)
526
+ else:
527
+ self.swa_rotary_emb = None
528
+
529
+ self.gradient_checkpointing = False
530
+
531
+ # Initialize weights and apply final processing
532
+ self.post_init()
533
+
534
+ @check_model_inputs
535
+ def forward(
536
+ self,
537
+ input_ids: torch.LongTensor | None = None,
538
+ attention_mask: torch.Tensor | None = None,
539
+ position_ids: torch.LongTensor | None = None,
540
+ past_key_values: Cache | None = None,
541
+ inputs_embeds: torch.FloatTensor | None = None,
542
+ use_cache: bool | None = None,
543
+ cache_position: torch.LongTensor | None = None,
544
+ **kwargs: Unpack[TransformersKwargs],
545
+ ):
546
+
547
+ if (input_ids is None) ^ (inputs_embeds is not None):
548
+ raise ValueError("You must specify exactly one of input_ids or inputs_embeds")
549
+
550
+ if use_cache and past_key_values is None:
551
+ past_key_values = DynamicCache(config=self.config)
552
+
553
+ if inputs_embeds is None:
554
+ inputs_embeds = self.embed_tokens(input_ids)
555
+
556
+ if cache_position is None:
557
+ past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0
558
+ cache_position = torch.arange(
559
+ past_seen_tokens, past_seen_tokens + inputs_embeds.shape[1], device=inputs_embeds.device
560
+ )
561
+
562
+ if position_ids is None:
563
+ position_ids = cache_position.unsqueeze(0)
564
+
565
+ global_mask = create_causal_mask(
566
+ config=self.config,
567
+ input_embeds=inputs_embeds,
568
+ attention_mask=attention_mask,
569
+ cache_position=cache_position,
570
+ past_key_values=past_key_values,
571
+ position_ids=position_ids,
572
+ )
573
+
574
+ hidden_states = inputs_embeds
575
+ global_pe = self.rotary_emb(hidden_states, position_ids)
576
+
577
+ if self._has_swa:
578
+ swa_mask = create_sliding_window_causal_mask(
579
+ config=self.config,
580
+ input_embeds=inputs_embeds,
581
+ attention_mask=attention_mask,
582
+ cache_position=cache_position,
583
+ past_key_values=past_key_values,
584
+ position_ids=position_ids,
585
+ )
586
+ causal_mask = {"full_attention": global_mask, "sliding_attention": swa_mask}
587
+ swa_pe = self.swa_rotary_emb(hidden_states, position_ids) if self.swa_rotary_emb is not None else global_pe
588
+ position_embeddings = {"full_attention": global_pe, "sliding_attention": swa_pe}
589
+ else:
590
+ causal_mask = global_mask
591
+ position_embeddings = global_pe
592
+
593
+ for decoder_layer in self.layers[: self.config.num_hidden_layers]:
594
+ hidden_states = decoder_layer(
595
+ hidden_states,
596
+ attention_mask=causal_mask,
597
+ position_ids=position_ids,
598
+ past_key_values=past_key_values,
599
+ use_cache=use_cache,
600
+ cache_position=cache_position,
601
+ position_embeddings=position_embeddings,
602
+ **kwargs,
603
+ )
604
+
605
+ hidden_states = self.norm(hidden_states)
606
+
607
+ return MoeModelOutputWithPast(
608
+ last_hidden_state=hidden_states,
609
+ past_key_values=past_key_values,
610
+ )
611
+
612
+
613
+ def load_balancing_loss_func(
614
+ gate_logits: torch.Tensor | tuple[torch.Tensor] | None,
615
+ num_experts: int | None = None,
616
+ top_k=2,
617
+ attention_mask: torch.Tensor | None = None,
618
+ ) -> torch.Tensor | int:
619
+ r"""
620
+ Computes auxiliary load balancing loss as in Switch Transformer - implemented in Pytorch.
621
+
622
+ See Switch Transformer (https://huggingface.co/papers/2101.03961) for more details. This function implements the loss
623
+ function presented in equations (4) - (6) of the paper. It aims at penalizing cases where the routing between
624
+ experts is too unbalanced.
625
+
626
+ Args:
627
+ gate_logits:
628
+ Logits from the `gate`, should be a tuple of model.config.num_hidden_layers tensors of
629
+ shape [batch_size X sequence_length, num_experts].
630
+ num_experts:
631
+ Number of experts
632
+ top_k:
633
+ The number of experts to route per-token, can be also interpreted as the `top-k` routing
634
+ parameter.
635
+ attention_mask (`torch.Tensor`, *optional*):
636
+ The attention_mask used in forward function
637
+ shape [batch_size X sequence_length] if not None.
638
+
639
+ Returns:
640
+ The auxiliary loss.
641
+ """
642
+ if gate_logits is None or not isinstance(gate_logits, tuple):
643
+ return 0
644
+
645
+ if isinstance(gate_logits, tuple):
646
+ compute_device = gate_logits[0].device
647
+ concatenated_gate_logits = torch.cat([layer_gate.to(compute_device) for layer_gate in gate_logits], dim=0)
648
+
649
+ routing_weights = torch.nn.functional.softmax(concatenated_gate_logits, dim=-1)
650
+
651
+ _, selected_experts = torch.topk(routing_weights, top_k, dim=-1)
652
+
653
+ expert_mask = torch.nn.functional.one_hot(selected_experts, num_experts)
654
+
655
+ if attention_mask is None:
656
+ # Compute the percentage of tokens routed to each experts
657
+ tokens_per_expert = torch.mean(expert_mask.float(), dim=0)
658
+
659
+ # Compute the average probability of routing to these experts
660
+ router_prob_per_expert = torch.mean(routing_weights, dim=0)
661
+ else:
662
+ batch_size, sequence_length = attention_mask.shape
663
+ num_hidden_layers = concatenated_gate_logits.shape[0] // (batch_size * sequence_length)
664
+
665
+ # Compute the mask that masks all padding tokens as 0 with the same shape of expert_mask
666
+ expert_attention_mask = (
667
+ attention_mask[None, :, :, None, None]
668
+ .expand((num_hidden_layers, batch_size, sequence_length, top_k, num_experts))
669
+ .reshape(-1, top_k, num_experts)
670
+ .to(compute_device)
671
+ )
672
+
673
+ # Compute the percentage of tokens routed to each experts
674
+ tokens_per_expert = torch.sum(expert_mask.float() * expert_attention_mask, dim=0) / torch.sum(
675
+ expert_attention_mask, dim=0
676
+ )
677
+
678
+ # Compute the mask that masks all padding tokens as 0 with the same shape of tokens_per_expert
679
+ router_per_expert_attention_mask = (
680
+ attention_mask[None, :, :, None]
681
+ .expand((num_hidden_layers, batch_size, sequence_length, num_experts))
682
+ .reshape(-1, num_experts)
683
+ .to(compute_device)
684
+ )
685
+
686
+ # Compute the average probability of routing to these experts
687
+ router_prob_per_expert = torch.sum(routing_weights * router_per_expert_attention_mask, dim=0) / torch.sum(
688
+ router_per_expert_attention_mask, dim=0
689
+ )
690
+
691
+ overall_loss = torch.sum(tokens_per_expert * router_prob_per_expert.unsqueeze(0))
692
+ return overall_loss * num_experts
693
+
694
+
695
+ @auto_docstring
696
+ class LagunaForCausalLM(LagunaPreTrainedModel, GenerationMixin):
697
+ _tied_weights_keys = {"lm_head.weight": "model.embed_tokens.weight"}
698
+ _tp_plan = {"lm_head": "colwise_rep"}
699
+ _pp_plan = {"lm_head": (["hidden_states"], ["logits"])}
700
+
701
+ def __init__(self, config):
702
+ super().__init__(config)
703
+ self.model = LagunaModel(config)
704
+ self.vocab_size = config.vocab_size
705
+ self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
706
+ self.router_aux_loss_coef = config.router_aux_loss_coef
707
+ self.num_experts = config.num_experts
708
+ self.num_experts_per_tok = config.num_experts_per_tok
709
+
710
+ # Initialize weights and apply final processing
711
+ self.post_init()
712
+
713
+ @can_return_tuple
714
+ @auto_docstring
715
+ def forward(
716
+ self,
717
+ input_ids: torch.LongTensor | None = None,
718
+ attention_mask: torch.Tensor | None = None,
719
+ position_ids: torch.LongTensor | None = None,
720
+ past_key_values: Cache | None = None,
721
+ inputs_embeds: torch.FloatTensor | None = None,
722
+ labels: torch.LongTensor | None = None,
723
+ use_cache: bool | None = None,
724
+ output_router_logits: bool | None = None,
725
+ cache_position: torch.LongTensor | None = None,
726
+ logits_to_keep: int | torch.Tensor = 0,
727
+ **kwargs: Unpack[TransformersKwargs],
728
+ ) -> MoeCausalLMOutputWithPast:
729
+ r"""
730
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
731
+ Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
732
+ config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
733
+ (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
734
+ """
735
+ # TODO (Joe) add example here after we got rid of the stale mistral example
736
+
737
+ output_router_logits = (
738
+ output_router_logits if output_router_logits is not None else self.config.output_router_logits
739
+ )
740
+
741
+ # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)
742
+ outputs: MoeModelOutputWithPast = self.model(
743
+ input_ids=input_ids,
744
+ attention_mask=attention_mask,
745
+ position_ids=position_ids,
746
+ past_key_values=past_key_values,
747
+ inputs_embeds=inputs_embeds,
748
+ use_cache=use_cache,
749
+ output_router_logits=output_router_logits,
750
+ cache_position=cache_position,
751
+ **kwargs,
752
+ )
753
+
754
+ hidden_states = outputs.last_hidden_state
755
+ # Only compute necessary logits, and do not upcast them to float if we are not computing the loss
756
+ slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep
757
+ logits = self.lm_head(hidden_states[:, slice_indices, :])
758
+
759
+ loss = None
760
+ if labels is not None:
761
+ loss = self.loss_function(logits, labels, self.vocab_size, **kwargs)
762
+
763
+ aux_loss = None
764
+ if output_router_logits:
765
+ aux_loss = load_balancing_loss_func(
766
+ outputs.router_logits,
767
+ self.num_experts,
768
+ self.num_experts_per_tok,
769
+ attention_mask,
770
+ )
771
+ if labels is not None:
772
+ loss += self.router_aux_loss_coef * aux_loss.to(loss.device) # make sure to reside in the same device
773
+
774
+ return MoeCausalLMOutputWithPast(
775
+ loss=loss,
776
+ aux_loss=aux_loss,
777
+ logits=logits,
778
+ past_key_values=outputs.past_key_values,
779
+ hidden_states=outputs.hidden_states,
780
+ attentions=outputs.attentions,
781
+ router_logits=outputs.router_logits,
782
+ )
783
+
784
+
785
+ __all__ = ["LagunaForCausalLM", "LagunaModel", "LagunaPreTrainedModel"]
special_tokens_map.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": "〈|EOS|〉",
3
+ "cls_token": "〈|CLS|〉",
4
+ "eos_token": "〈|EOS|〉",
5
+ "mask_token": "〈|MASK|〉",
6
+ "pad_token": "〈|PAD|〉",
7
+ "sep_token": "〈|SEP|〉",
8
+ "unk_token": "〈|UNK|〉"
9
+ }
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json ADDED
@@ -0,0 +1,575 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "added_tokens_decoder": {
3
+ "0": {
4
+ "content": "〈|UNK|〉",
5
+ "lstrip": false,
6
+ "normalized": false,
7
+ "rstrip": false,
8
+ "single_word": false,
9
+ "special": true
10
+ },
11
+ "1": {
12
+ "content": "〈|CODE_START|〉",
13
+ "lstrip": false,
14
+ "normalized": false,
15
+ "rstrip": false,
16
+ "single_word": false,
17
+ "special": true
18
+ },
19
+ "2": {
20
+ "content": "〈|EOS|〉",
21
+ "lstrip": false,
22
+ "normalized": false,
23
+ "rstrip": false,
24
+ "single_word": false,
25
+ "special": true
26
+ },
27
+ "3": {
28
+ "content": "〈|CODE_END|〉",
29
+ "lstrip": false,
30
+ "normalized": false,
31
+ "rstrip": false,
32
+ "single_word": false,
33
+ "special": true
34
+ },
35
+ "4": {
36
+ "content": "〈|META_START|〉",
37
+ "lstrip": false,
38
+ "normalized": false,
39
+ "rstrip": false,
40
+ "single_word": false,
41
+ "special": true
42
+ },
43
+ "5": {
44
+ "content": "〈|META_END|〉",
45
+ "lstrip": false,
46
+ "normalized": false,
47
+ "rstrip": false,
48
+ "single_word": false,
49
+ "special": true
50
+ },
51
+ "6": {
52
+ "content": "〈|FIM_MIDDLE|〉",
53
+ "lstrip": false,
54
+ "normalized": false,
55
+ "rstrip": false,
56
+ "single_word": false,
57
+ "special": true
58
+ },
59
+ "7": {
60
+ "content": "〈|FIM_SUFFIX|〉",
61
+ "lstrip": false,
62
+ "normalized": false,
63
+ "rstrip": false,
64
+ "single_word": false,
65
+ "special": true
66
+ },
67
+ "8": {
68
+ "content": "〈|SEP|〉",
69
+ "lstrip": false,
70
+ "normalized": false,
71
+ "rstrip": false,
72
+ "single_word": false,
73
+ "special": true
74
+ },
75
+ "9": {
76
+ "content": "〈|PAD|〉",
77
+ "lstrip": false,
78
+ "normalized": false,
79
+ "rstrip": false,
80
+ "single_word": false,
81
+ "special": true
82
+ },
83
+ "10": {
84
+ "content": "〈|CLS|〉",
85
+ "lstrip": false,
86
+ "normalized": false,
87
+ "rstrip": false,
88
+ "single_word": false,
89
+ "special": true
90
+ },
91
+ "11": {
92
+ "content": "〈|FIM_START|〉",
93
+ "lstrip": false,
94
+ "normalized": false,
95
+ "rstrip": false,
96
+ "single_word": false,
97
+ "special": true
98
+ },
99
+ "12": {
100
+ "content": "〈|MASK|〉",
101
+ "lstrip": false,
102
+ "normalized": false,
103
+ "rstrip": false,
104
+ "single_word": false,
105
+ "special": true
106
+ },
107
+ "13": {
108
+ "content": "|◊|",
109
+ "lstrip": false,
110
+ "normalized": false,
111
+ "rstrip": false,
112
+ "single_word": false,
113
+ "special": true
114
+ },
115
+ "14": {
116
+ "content": "〈|",
117
+ "lstrip": false,
118
+ "normalized": false,
119
+ "rstrip": false,
120
+ "single_word": false,
121
+ "special": true
122
+ },
123
+ "15": {
124
+ "content": "|〉",
125
+ "lstrip": false,
126
+ "normalized": false,
127
+ "rstrip": false,
128
+ "single_word": false,
129
+ "special": true
130
+ },
131
+ "16": {
132
+ "content": "〈|/",
133
+ "lstrip": false,
134
+ "normalized": false,
135
+ "rstrip": false,
136
+ "single_word": false,
137
+ "special": true
138
+ },
139
+ "17": {
140
+ "content": "/|〉",
141
+ "lstrip": false,
142
+ "normalized": false,
143
+ "rstrip": false,
144
+ "single_word": false,
145
+ "special": true
146
+ },
147
+ "18": {
148
+ "content": "〈|THINK_START|〉",
149
+ "lstrip": false,
150
+ "normalized": false,
151
+ "rstrip": false,
152
+ "single_word": false,
153
+ "special": true
154
+ },
155
+ "19": {
156
+ "content": "〈|THINK_END|〉",
157
+ "lstrip": false,
158
+ "normalized": false,
159
+ "rstrip": false,
160
+ "single_word": false,
161
+ "special": true
162
+ },
163
+ "20": {
164
+ "content": "〈|SPECIAL_1|〉",
165
+ "lstrip": false,
166
+ "normalized": false,
167
+ "rstrip": false,
168
+ "single_word": false,
169
+ "special": true
170
+ },
171
+ "21": {
172
+ "content": "〈|SPECIAL_2|〉",
173
+ "lstrip": false,
174
+ "normalized": false,
175
+ "rstrip": false,
176
+ "single_word": false,
177
+ "special": true
178
+ },
179
+ "22": {
180
+ "content": "〈|SPECIAL_3|〉",
181
+ "lstrip": false,
182
+ "normalized": false,
183
+ "rstrip": false,
184
+ "single_word": false,
185
+ "special": true
186
+ },
187
+ "23": {
188
+ "content": "〈|SPECIAL_4|〉",
189
+ "lstrip": false,
190
+ "normalized": false,
191
+ "rstrip": false,
192
+ "single_word": false,
193
+ "special": true
194
+ },
195
+ "24": {
196
+ "content": "〈|SPECIAL_5|〉",
197
+ "lstrip": false,
198
+ "normalized": false,
199
+ "rstrip": false,
200
+ "single_word": false,
201
+ "special": true
202
+ },
203
+ "25": {
204
+ "content": "〈|SPECIAL_6|〉",
205
+ "lstrip": false,
206
+ "normalized": false,
207
+ "rstrip": false,
208
+ "single_word": false,
209
+ "special": true
210
+ },
211
+ "26": {
212
+ "content": "〈|SPECIAL_7|〉",
213
+ "lstrip": false,
214
+ "normalized": false,
215
+ "rstrip": false,
216
+ "single_word": false,
217
+ "special": true
218
+ },
219
+ "27": {
220
+ "content": "〈|SPECIAL_8|〉",
221
+ "lstrip": false,
222
+ "normalized": false,
223
+ "rstrip": false,
224
+ "single_word": false,
225
+ "special": true
226
+ },
227
+ "28": {
228
+ "content": "〈|SPECIAL_9|〉",
229
+ "lstrip": false,
230
+ "normalized": false,
231
+ "rstrip": false,
232
+ "single_word": false,
233
+ "special": true
234
+ },
235
+ "29": {
236
+ "content": "〈|SPECIAL_10|〉",
237
+ "lstrip": false,
238
+ "normalized": false,
239
+ "rstrip": false,
240
+ "single_word": false,
241
+ "special": true
242
+ },
243
+ "30": {
244
+ "content": "〈|SPECIAL_11|〉",
245
+ "lstrip": false,
246
+ "normalized": false,
247
+ "rstrip": false,
248
+ "single_word": false,
249
+ "special": true
250
+ },
251
+ "31": {
252
+ "content": "〈|SPECIAL_12|〉",
253
+ "lstrip": false,
254
+ "normalized": false,
255
+ "rstrip": false,
256
+ "single_word": false,
257
+ "special": true
258
+ },
259
+ "32": {
260
+ "content": "〈|SPECIAL_13|〉",
261
+ "lstrip": false,
262
+ "normalized": false,
263
+ "rstrip": false,
264
+ "single_word": false,
265
+ "special": true
266
+ },
267
+ "33": {
268
+ "content": "〈|SPECIAL_14|〉",
269
+ "lstrip": false,
270
+ "normalized": false,
271
+ "rstrip": false,
272
+ "single_word": false,
273
+ "special": true
274
+ },
275
+ "34": {
276
+ "content": "〈|SPECIAL_15|〉",
277
+ "lstrip": false,
278
+ "normalized": false,
279
+ "rstrip": false,
280
+ "single_word": false,
281
+ "special": true
282
+ },
283
+ "35": {
284
+ "content": "〈|SPECIAL_16|〉",
285
+ "lstrip": false,
286
+ "normalized": false,
287
+ "rstrip": false,
288
+ "single_word": false,
289
+ "special": true
290
+ },
291
+ "36": {
292
+ "content": "〈|SPECIAL_17|〉",
293
+ "lstrip": false,
294
+ "normalized": false,
295
+ "rstrip": false,
296
+ "single_word": false,
297
+ "special": true
298
+ },
299
+ "37": {
300
+ "content": "〈|SPECIAL_18|〉",
301
+ "lstrip": false,
302
+ "normalized": false,
303
+ "rstrip": false,
304
+ "single_word": false,
305
+ "special": true
306
+ },
307
+ "38": {
308
+ "content": "〈|SPECIAL_19|〉",
309
+ "lstrip": false,
310
+ "normalized": false,
311
+ "rstrip": false,
312
+ "single_word": false,
313
+ "special": true
314
+ },
315
+ "39": {
316
+ "content": "〈|SPECIAL_20|〉",
317
+ "lstrip": false,
318
+ "normalized": false,
319
+ "rstrip": false,
320
+ "single_word": false,
321
+ "special": true
322
+ },
323
+ "40": {
324
+ "content": "〈|SPECIAL_21|〉",
325
+ "lstrip": false,
326
+ "normalized": false,
327
+ "rstrip": false,
328
+ "single_word": false,
329
+ "special": true
330
+ },
331
+ "41": {
332
+ "content": "〈|SPECIAL_22|〉",
333
+ "lstrip": false,
334
+ "normalized": false,
335
+ "rstrip": false,
336
+ "single_word": false,
337
+ "special": true
338
+ },
339
+ "42": {
340
+ "content": "〈|SPECIAL_23|〉",
341
+ "lstrip": false,
342
+ "normalized": false,
343
+ "rstrip": false,
344
+ "single_word": false,
345
+ "special": true
346
+ },
347
+ "43": {
348
+ "content": "〈|SPECIAL_24|〉",
349
+ "lstrip": false,
350
+ "normalized": false,
351
+ "rstrip": false,
352
+ "single_word": false,
353
+ "special": true
354
+ },
355
+ "44": {
356
+ "content": "〈|SPECIAL_25|〉",
357
+ "lstrip": false,
358
+ "normalized": false,
359
+ "rstrip": false,
360
+ "single_word": false,
361
+ "special": true
362
+ },
363
+ "45": {
364
+ "content": "〈|SPECIAL_26|〉",
365
+ "lstrip": false,
366
+ "normalized": false,
367
+ "rstrip": false,
368
+ "single_word": false,
369
+ "special": true
370
+ },
371
+ "46": {
372
+ "content": "〈|SPECIAL_27|〉",
373
+ "lstrip": false,
374
+ "normalized": false,
375
+ "rstrip": false,
376
+ "single_word": false,
377
+ "special": true
378
+ },
379
+ "47": {
380
+ "content": "〈|SPECIAL_28|〉",
381
+ "lstrip": false,
382
+ "normalized": false,
383
+ "rstrip": false,
384
+ "single_word": false,
385
+ "special": true
386
+ },
387
+ "48": {
388
+ "content": "〈|SPECIAL_29|〉",
389
+ "lstrip": false,
390
+ "normalized": false,
391
+ "rstrip": false,
392
+ "single_word": false,
393
+ "special": true
394
+ },
395
+ "49": {
396
+ "content": "〈|SPECIAL_30|〉",
397
+ "lstrip": false,
398
+ "normalized": false,
399
+ "rstrip": false,
400
+ "single_word": false,
401
+ "special": true
402
+ },
403
+ "50": {
404
+ "content": "〈|SPECIAL_31|〉",
405
+ "lstrip": false,
406
+ "normalized": false,
407
+ "rstrip": false,
408
+ "single_word": false,
409
+ "special": true
410
+ },
411
+ "51": {
412
+ "content": "〈|SPECIAL_32|〉",
413
+ "lstrip": false,
414
+ "normalized": false,
415
+ "rstrip": false,
416
+ "single_word": false,
417
+ "special": true
418
+ },
419
+ "52": {
420
+ "content": "〈|SPECIAL_33|〉",
421
+ "lstrip": false,
422
+ "normalized": false,
423
+ "rstrip": false,
424
+ "single_word": false,
425
+ "special": true
426
+ },
427
+ "53": {
428
+ "content": "〈|SPECIAL_34|〉",
429
+ "lstrip": false,
430
+ "normalized": false,
431
+ "rstrip": false,
432
+ "single_word": false,
433
+ "special": true
434
+ },
435
+ "54": {
436
+ "content": "〈|SPECIAL_35|〉",
437
+ "lstrip": false,
438
+ "normalized": false,
439
+ "rstrip": false,
440
+ "single_word": false,
441
+ "special": true
442
+ },
443
+ "55": {
444
+ "content": "〈|SPECIAL_36|〉",
445
+ "lstrip": false,
446
+ "normalized": false,
447
+ "rstrip": false,
448
+ "single_word": false,
449
+ "special": true
450
+ },
451
+ "56": {
452
+ "content": "〈|SPECIAL_37|〉",
453
+ "lstrip": false,
454
+ "normalized": false,
455
+ "rstrip": false,
456
+ "single_word": false,
457
+ "special": true
458
+ },
459
+ "57": {
460
+ "content": "〈|SPECIAL_38|〉",
461
+ "lstrip": false,
462
+ "normalized": false,
463
+ "rstrip": false,
464
+ "single_word": false,
465
+ "special": true
466
+ },
467
+ "58": {
468
+ "content": "〈|SPECIAL_39|〉",
469
+ "lstrip": false,
470
+ "normalized": false,
471
+ "rstrip": false,
472
+ "single_word": false,
473
+ "special": true
474
+ },
475
+ "59": {
476
+ "content": "〈|SPECIAL_40|〉",
477
+ "lstrip": false,
478
+ "normalized": false,
479
+ "rstrip": false,
480
+ "single_word": false,
481
+ "special": true
482
+ },
483
+ "60": {
484
+ "content": "〈|SPECIAL_41|〉",
485
+ "lstrip": false,
486
+ "normalized": false,
487
+ "rstrip": false,
488
+ "single_word": false,
489
+ "special": true
490
+ },
491
+ "61": {
492
+ "content": "〈|SPECIAL_42|〉",
493
+ "lstrip": false,
494
+ "normalized": false,
495
+ "rstrip": false,
496
+ "single_word": false,
497
+ "special": true
498
+ },
499
+ "62": {
500
+ "content": "〈|SPECIAL_43|〉",
501
+ "lstrip": false,
502
+ "normalized": false,
503
+ "rstrip": false,
504
+ "single_word": false,
505
+ "special": true
506
+ },
507
+ "63": {
508
+ "content": "〈|SPECIAL_44|〉",
509
+ "lstrip": false,
510
+ "normalized": false,
511
+ "rstrip": false,
512
+ "single_word": false,
513
+ "special": true
514
+ },
515
+ "64": {
516
+ "content": "〈|SPECIAL_45|〉",
517
+ "lstrip": false,
518
+ "normalized": false,
519
+ "rstrip": false,
520
+ "single_word": false,
521
+ "special": true
522
+ },
523
+ "65": {
524
+ "content": "〈|SPECIAL_46|〉",
525
+ "lstrip": false,
526
+ "normalized": false,
527
+ "rstrip": false,
528
+ "single_word": false,
529
+ "special": true
530
+ },
531
+ "66": {
532
+ "content": "〈|SPECIAL_47|〉",
533
+ "lstrip": false,
534
+ "normalized": false,
535
+ "rstrip": false,
536
+ "single_word": false,
537
+ "special": true
538
+ },
539
+ "67": {
540
+ "content": "〈|SPECIAL_48|〉",
541
+ "lstrip": false,
542
+ "normalized": false,
543
+ "rstrip": false,
544
+ "single_word": false,
545
+ "special": true
546
+ },
547
+ "68": {
548
+ "content": "〈|SPECIAL_49|〉",
549
+ "lstrip": false,
550
+ "normalized": false,
551
+ "rstrip": false,
552
+ "single_word": false,
553
+ "special": true
554
+ },
555
+ "69": {
556
+ "content": "〈|SPECIAL_50|〉",
557
+ "lstrip": false,
558
+ "normalized": false,
559
+ "rstrip": false,
560
+ "single_word": false,
561
+ "special": true
562
+ }
563
+ },
564
+ "bos_token": "〈|EOS|〉",
565
+ "clean_up_tokenization_spaces": false,
566
+ "cls_token": "〈|CLS|〉",
567
+ "eos_token": "〈|EOS|〉",
568
+ "extra_special_tokens": {},
569
+ "mask_token": "〈|MASK|〉",
570
+ "model_max_length": 1000000000000000019884624838656,
571
+ "pad_token": "〈|PAD|〉",
572
+ "sep_token": "〈|SEP|〉",
573
+ "tokenizer_class": "PreTrainedTokenizerFast",
574
+ "unk_token": "〈|UNK|〉"
575
+ }