akshitab commited on
Commit
ef87dbb
·
verified ·
1 Parent(s): 37b534c

Add model and trust_remote_code files.

Browse files
chat_template.jinja ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {% for message in messages %}{{'<|im_start|>' + message['role'] + '
2
+ ' + message['content'] + '<|im_end|>' + '
3
+ '}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant
4
+ ' }}{% endif %}
config.json ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "always_active_experts": null,
3
+ "always_active_experts_per_layer": null,
4
+ "architectures": [
5
+ "FlexOlmoNoQKNormPrenormForCausalLM"
6
+ ],
7
+ "attention_bias": false,
8
+ "attention_dropout": 0.0,
9
+ "dense_intermediate_size": null,
10
+ "dense_mlp_bias": false,
11
+ "dtype": "float32",
12
+ "eos_token_id": 100257,
13
+ "hidden_act": "silu",
14
+ "hidden_size": 2048,
15
+ "initializer_range": 0.02,
16
+ "intermediate_size": 1024,
17
+ "max_position_embeddings": 4096,
18
+ "model_type": "flex_olmo_noqknorm_prenorm",
19
+ "norm_topk_prob": false,
20
+ "num_attention_heads": 16,
21
+ "num_experts": 128,
22
+ "num_experts_per_layer": null,
23
+ "num_experts_per_tok": 8,
24
+ "num_hidden_layers": 16,
25
+ "num_key_value_heads": 16,
26
+ "num_shared_experts": 1,
27
+ "num_shared_experts_per_layer": null,
28
+ "output_router_logits": true,
29
+ "pad_token_id": 100277,
30
+ "rms_norm_eps": 1e-06,
31
+ "rope_scaling": null,
32
+ "rope_theta": 500000,
33
+ "router_aux_loss_coef": 0.01,
34
+ "tie_word_embeddings": false,
35
+ "transformers_version": "4.57.1",
36
+ "use_cache": true,
37
+ "vocab_size": 100352,
38
+ "auto_map": {
39
+ "AutoConfig": "configuration_flex_olmo_noqknorm_prenorm.FlexOlmoNoQKNormPrenormConfig",
40
+ "AutoModelForCausalLM": "modeling_flex_olmo_noqknorm_prenorm.FlexOlmoNoQKNormPrenormForCausalLM"
41
+ }
42
+ }
configuration_flex_olmo_noqknorm_prenorm.py ADDED
@@ -0,0 +1,217 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨
2
+ # This file was automatically generated from src/transformers/models/flex_olmo_noqknorm_prenorm/modular_flex_olmo_noqknorm_prenorm.py.
3
+ # Do NOT edit this file manually as any edits will be overwritten by the generation of
4
+ # the file from the modular. If any change should be done, please apply the change to the
5
+ # modular_flex_olmo_noqknorm_prenorm.py file directly. One of our CI enforces this.
6
+ # 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨
7
+ # coding=utf-8
8
+ # Copyright 2025 the HuggingFace Team. All rights reserved.
9
+ #
10
+ # Licensed under the Apache License, Version 2.0 (the "License");
11
+ # you may not use this file except in compliance with the License.
12
+ # You may obtain a copy of the License at
13
+ #
14
+ # http://www.apache.org/licenses/LICENSE-2.0
15
+ #
16
+ # Unless required by applicable law or agreed to in writing, software
17
+ # distributed under the License is distributed on an "AS IS" BASIS,
18
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
19
+ # See the License for the specific language governing permissions and
20
+ # limitations under the License.
21
+
22
+ from typing import Optional
23
+
24
+ from transformers.configuration_utils import PretrainedConfig
25
+ from transformers.modeling_rope_utils import rope_config_validation
26
+
27
+
28
+ class FlexOlmoNoQKNormPrenormConfig(PretrainedConfig):
29
+ r"""
30
+ This is the configuration class to store the configuration of a [`FlexOlmoNoQKNormPrenormModel`]. It is used to instantiate an FlexOlmoNoQKNormPrenorm
31
+ model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
32
+ defaults will yield a similar configuration to that of the [allenai/FlexOlmoNoQKNormPrenorm-7x7B-1T](https://huggingface.co/allenai/FlexOlmoNoQKNormPrenorm-7x7B-1T).
33
+
34
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
35
+ documentation from [`PretrainedConfig`] for more information.
36
+
37
+
38
+ Args:
39
+ vocab_size (`int`, *optional*, defaults to 100352):
40
+ Vocabulary size of the FlexOlmoNoQKNormPrenorm model. Defines the number of different tokens that can be represented by the
41
+ `inputs_ids` passed when calling [`FlexOlmoNoQKNormPrenormModel`]
42
+ hidden_size (`int`, *optional*, defaults to 4096):
43
+ Dimension of the hidden representations.
44
+ intermediate_size (`int`, *optional*, defaults to 11008):
45
+ Dimension of the MLP representations.
46
+ num_hidden_layers (`int`, *optional*, defaults to 32):
47
+ Number of hidden layers in the Transformer decoder.
48
+ num_attention_heads (`int`, *optional*, defaults to 32):
49
+ Number of attention heads for each attention layer in the Transformer decoder.
50
+ num_key_value_heads (`int`, *optional*):
51
+ This is the number of key_value heads that should be used to implement Grouped Query Attention. If
52
+ `num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if
53
+ `num_key_value_heads=1` the model will use Multi Query Attention (MQA) otherwise GQA is used. When
54
+ converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed
55
+ by meanpooling all the original heads within that group. For more details, check out [this
56
+ paper](https://huggingface.co/papers/2305.13245). If it is not specified, will default to
57
+ `num_attention_heads`.
58
+ hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
59
+ The non-linear activation function (function or string) in the decoder.
60
+ max_position_embeddings (`int`, *optional*, defaults to 4096):
61
+ The maximum sequence length that this model might ever be used with.
62
+ initializer_range (`float`, *optional*, defaults to 0.02):
63
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
64
+ rms_norm_eps (`float`, *optional*, defaults to 1e-06):
65
+ The epsilon used by the rms normalization layers.
66
+ use_cache (`bool`, *optional*, defaults to `True`):
67
+ Whether or not the model should return the last key/values attentions (not used by all models). Only
68
+ relevant if `config.is_decoder=True`.
69
+ pad_token_id (`int`, *optional*, defaults to 100277):
70
+ Padding token id.
71
+ bos_token_id (`int`, *optional*):
72
+ Beginning of stream token id.
73
+ eos_token_id (`int`, *optional*, defaults to 100257):
74
+ End of stream token id.
75
+ tie_word_embeddings (`bool`, *optional*, defaults to `False`):
76
+ Whether to tie weight embeddings
77
+ rope_theta (`float`, *optional*, defaults to 500000.0):
78
+ The base period of the RoPE embeddings.
79
+ rope_scaling (`Dict`, *optional*):
80
+ Dictionary containing the scaling configuration for the RoPE embeddings. Currently supports two scaling
81
+ strategies: linear and dynamic. Their scaling factor must be a float greater than 1. The expected format is
82
+ `{"type": strategy name, "factor": scaling factor}`. When using this flag, don't update
83
+ `max_position_embeddings` to the expected new maximum. See the following thread for more information on how
84
+ these scaling strategies behave:
85
+ https://www.reddit.com/r/LocalLLaMA/comments/14mrgpr/dynamically_scaled_rope_further_increases/. This is an
86
+ experimental feature, subject to breaking API changes in future versions.
87
+ attention_bias (`bool`, defaults to `False`, *optional*, defaults to `False`):
88
+ Whether to use a bias in the query, key, value and output projection layers during self-attention.
89
+ attention_dropout (`float`, *optional*, defaults to 0.0):
90
+ The dropout ratio for the attention probabilities.
91
+ num_experts_per_tok (`int`, *optional*, defaults to 5):
92
+ Number of selected experts.
93
+ num_experts (`int`, *optional*, defaults to 7):
94
+ Number of routed experts.
95
+ output_router_logits (`bool`, *optional*, defaults to `False`):
96
+ Whether or not the router logits should be returned by the model. Enabling this will also
97
+ allow the model to output the auxiliary loss, including load balancing loss and router z-loss.
98
+ router_aux_loss_coef (`float`, *optional*, defaults to 0.01):
99
+ The aux loss factor for the total loss.
100
+ norm_topk_prob (`bool`, *optional*, defaults to `False`):
101
+ Whether to normalize the topk probabilities.
102
+
103
+ ```python
104
+ >>> from transformers import FlexOlmoNoQKNormPrenormModel, FlexOlmoNoQKNormPrenormConfig
105
+
106
+ >>> # Initializing a FlexOlmoNoQKNormPrenorm style configuration
107
+ >>> configuration = FlexOlmoNoQKNormPrenormConfig()
108
+
109
+ >>> # Initializing a model from the FlexOlmoNoQKNormPrenorm style configuration
110
+ >>> model = FlexOlmoNoQKNormPrenormModel(configuration)
111
+
112
+ >>> # Accessing the model configuration
113
+ >>> configuration = model.config
114
+ ```"""
115
+
116
+ model_type = "flex_olmo_noqknorm_prenorm"
117
+ keys_to_ignore_at_inference = ["past_key_values"]
118
+ # Update base_model_tp_plan to remove the "rep" suffixes since no qk-norms
119
+ base_model_tp_plan = {
120
+ "layers.*.self_attn.q_proj": "colwise", # No longer need rep
121
+ "layers.*.self_attn.k_proj": "colwise", # No longer need rep
122
+ "layers.*.self_attn.v_proj": "colwise",
123
+ "layers.*.self_attn.o_proj": "rowwise", # No longer need rep
124
+ "layers.*.mlp.gate_proj": "colwise",
125
+ "layers.*.mlp.up_proj": "colwise",
126
+ "layers.*.mlp.down_proj": "rowwise",
127
+ }
128
+ base_model_pp_plan = {
129
+ "embed_tokens": (["input_ids"], ["inputs_embeds"]),
130
+ "layers": (["hidden_states", "attention_mask"], ["hidden_states"]),
131
+ "norm": (["hidden_states"], ["hidden_states"]),
132
+ }
133
+
134
+ def __init__(
135
+ self,
136
+ vocab_size=100352,
137
+ hidden_size=4096,
138
+ intermediate_size=11008,
139
+ num_hidden_layers=32,
140
+ num_attention_heads=32,
141
+ num_key_value_heads=None,
142
+ hidden_act="silu",
143
+ max_position_embeddings=4096,
144
+ initializer_range=0.02,
145
+ rms_norm_eps=1e-06,
146
+ use_cache=True,
147
+ pad_token_id=100277,
148
+ bos_token_id=None,
149
+ eos_token_id=100257,
150
+ tie_word_embeddings=False,
151
+ rope_theta=500000.0,
152
+ rope_scaling=None,
153
+ attention_bias=False,
154
+ attention_dropout=0.0,
155
+ num_experts_per_tok=5,
156
+ num_experts=7,
157
+ output_router_logits=False,
158
+ router_aux_loss_coef=0.01,
159
+ norm_topk_prob=False,
160
+ num_shared_experts=0,
161
+ num_experts_per_layer: Optional[list[int]] = None,
162
+ num_shared_experts_per_layer: Optional[list[int]] = None,
163
+ always_active_experts: Optional[list[int]] = None,
164
+ always_active_experts_per_layer: Optional[list[list[int]]] = None,
165
+ dense_intermediate_size: Optional[int] = None,
166
+ dense_mlp_bias: bool = False, # Some densefirst models were accidentally trained with bias=True on dense MLPs due to OLMo Core's FeedForwardConfig defaulting bias to True when not explicitly set
167
+ **kwargs,
168
+ ):
169
+ super().__init__(
170
+ pad_token_id=pad_token_id,
171
+ bos_token_id=bos_token_id,
172
+ eos_token_id=eos_token_id,
173
+ tie_word_embeddings=tie_word_embeddings,
174
+ **kwargs,
175
+ )
176
+ self.vocab_size = vocab_size
177
+ self.max_position_embeddings = max_position_embeddings
178
+ self.hidden_size = hidden_size
179
+ self.intermediate_size = intermediate_size
180
+ self.num_hidden_layers = num_hidden_layers
181
+ self.num_attention_heads = num_attention_heads
182
+
183
+ # for backward compatibility
184
+ if num_key_value_heads is None:
185
+ num_key_value_heads = num_attention_heads
186
+
187
+ self.num_key_value_heads = num_key_value_heads
188
+ self.hidden_act = hidden_act
189
+ self.initializer_range = initializer_range
190
+ self.rms_norm_eps = rms_norm_eps
191
+ self.use_cache = use_cache
192
+ self.rope_theta = rope_theta
193
+ self.rope_scaling = rope_scaling
194
+ self.attention_bias = attention_bias
195
+ self.attention_dropout = attention_dropout
196
+ self.num_experts_per_tok = num_experts_per_tok
197
+ self.num_experts = num_experts
198
+ self.output_router_logits = output_router_logits
199
+ self.router_aux_loss_coef = router_aux_loss_coef
200
+ self.norm_topk_prob = norm_topk_prob
201
+ # Validate the correctness of rotary position embeddings parameters
202
+ # BC: if there is a 'type' field, move it to 'rope_type'.
203
+ if self.rope_scaling is not None and "type" in self.rope_scaling:
204
+ self.rope_scaling["rope_type"] = self.rope_scaling["type"]
205
+ rope_config_validation(self)
206
+ assert num_shared_experts <= num_experts, "num_shared_experts cannot be greater than num_experts"
207
+
208
+ self.num_shared_experts = num_shared_experts # note: we don't care about pruning here - pruning should be handled by the pruning script - the model should just assume that it will use all the experts available
209
+ self.num_experts_per_layer = num_experts_per_layer
210
+ self.num_shared_experts_per_layer = num_shared_experts_per_layer
211
+ self.always_active_experts = always_active_experts
212
+ self.always_active_experts_per_layer = always_active_experts_per_layer
213
+ self.dense_intermediate_size = dense_intermediate_size
214
+ self.dense_mlp_bias = dense_mlp_bias
215
+
216
+
217
+ __all__ = ["FlexOlmoNoQKNormPrenormConfig"]
generation_config.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "transformers_version": "4.57.1"
4
+ }
merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
model-00001-of-00011.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b7e717ca28e080b583eb2dd6ce0e813714df613d33fe87f75b56eb060c4ed6c1
3
+ size 4993404400
model-00002-of-00011.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6c533f6d918782c976fa4d113fb2938e50de4460208cdeaab80eb69d859616fd
3
+ size 4992359752
model-00003-of-00011.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e4808949fd1f1f7231866ee3f7710d9d1c73b478e7f27b128d539152613be57a
3
+ size 4993424584
model-00004-of-00011.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fa24c856963ae94f4bc979404e2512426ccce866000966dd88fa41a2c9addc27
3
+ size 4992359768
model-00005-of-00011.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0fa388f0022d06daedfabbdd7ba8e425a3c4523a07ccba90f47a32db36551c83
3
+ size 4993424568
model-00006-of-00011.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4febe5341e9a1e54cd9be18391cb86fd156daefa005b5e872df1802c2cead783
3
+ size 4992359784
model-00007-of-00011.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8ef254c8ec77ff66e5cb63fb4082f6195ed926c9ffd42bcdd7094664e33b3088
3
+ size 4993424704
model-00008-of-00011.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a407e22d64255c2abe3fce86d42c999ef7ad3cfd86fbc4bcc53d9ff2e228b71b
3
+ size 4992360384
model-00009-of-00011.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:739b5a1ccd6adf8de11365a704bbdded893a3314463ea024750b60503aed1c99
3
+ size 4993425136
model-00010-of-00011.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bd3a715cde8e4cdfd5bd3265e5954df375934297e3dc73ec1a67abe81ccf33b3
3
+ size 4992360400
model-00011-of-00011.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:37f5cc7e2959c27f27d91023ea041f4008a16d2e0f4cca5bbe83deb71916ec8b
3
+ size 4346432736
model.safetensors.index.json ADDED
The diff for this file is too large to render. See raw diff
 
modeling_flex_olmo_noqknorm_prenorm.py ADDED
@@ -0,0 +1,1095 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨
2
+ # This file was automatically generated from src/transformers/models/flex_olmo_noqknorm_prenorm/modular_flex_olmo_noqknorm_prenorm.py.
3
+ # Do NOT edit this file manually as any edits will be overwritten by the generation of
4
+ # the file from the modular. If any change should be done, please apply the change to the
5
+ # modular_flex_olmo_noqknorm_prenorm.py file directly. One of our CI enforces this.
6
+ # 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨
7
+ # coding=utf-8
8
+ # Copyright 2025 the HuggingFace Team. All rights reserved.
9
+ #
10
+ # Licensed under the Apache License, Version 2.0 (the "License");
11
+ # you may not use this file except in compliance with the License.
12
+ # You may obtain a copy of the License at
13
+ #
14
+ # http://www.apache.org/licenses/LICENSE-2.0
15
+ #
16
+ # Unless required by applicable law or agreed to in writing, software
17
+ # distributed under the License is distributed on an "AS IS" BASIS,
18
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
19
+ # See the License for the specific language governing permissions and
20
+ # limitations under the License.
21
+
22
+ from dataclasses import dataclass
23
+ from typing import Callable, Optional, Union
24
+
25
+ import torch
26
+ import torch.nn as nn
27
+ import torch.nn.functional as F
28
+
29
+ from transformers.activations import ACT2FN
30
+ from transformers.cache_utils import Cache, DynamicCache
31
+ from transformers.generation import GenerationMixin
32
+ from transformers.integrations import use_kernel_forward_from_hub
33
+ from transformers.masking_utils import create_causal_mask
34
+ from transformers.modeling_layers import GradientCheckpointingLayer
35
+ from transformers.modeling_outputs import MoeModelOutputWithPast
36
+ from transformers.modeling_rope_utils import ROPE_INIT_FUNCTIONS, dynamic_rope_update
37
+ from transformers.modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel
38
+ from transformers.processing_utils import Unpack
39
+ from transformers.utils import ModelOutput, TransformersKwargs, auto_docstring
40
+ from transformers.utils.deprecation import deprecate_kwarg
41
+ from transformers.utils.generic import OutputRecorder, check_model_inputs
42
+ from .configuration_flex_olmo_noqknorm_prenorm import FlexOlmoNoQKNormPrenormConfig
43
+
44
+
45
+ @use_kernel_forward_from_hub("RMSNorm")
46
+ class FlexOlmoNoQKNormPrenormRMSNorm(nn.Module):
47
+ def __init__(self, hidden_size, eps=1e-6):
48
+ """
49
+ FlexOlmoNoQKNormPrenormRMSNorm is equivalent to T5LayerNorm
50
+ """
51
+ super().__init__()
52
+ self.weight = nn.Parameter(torch.ones(hidden_size))
53
+ self.variance_epsilon = eps
54
+
55
+ def forward(self, hidden_states):
56
+ input_dtype = hidden_states.dtype
57
+ hidden_states = hidden_states.to(torch.float32)
58
+ variance = hidden_states.pow(2).mean(-1, keepdim=True)
59
+ hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
60
+ return (self.weight * hidden_states).to(input_dtype)
61
+
62
+ def extra_repr(self):
63
+ return f"{tuple(self.weight.shape)}, eps={self.variance_epsilon}"
64
+
65
+
66
+ class FlexOlmoNoQKNormPrenormMLP(nn.Module):
67
+ def __init__(self, config):
68
+ super().__init__()
69
+ self.config = config
70
+ self.hidden_size = config.hidden_size
71
+ self.intermediate_size = config.intermediate_size
72
+ self.gate_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)
73
+ self.up_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)
74
+ self.down_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=False)
75
+ self.act_fn = ACT2FN[config.hidden_act]
76
+ # Some densefirst models were accidentally trained with bias=True on dense MLPs
77
+ # (OLMo Core's FeedForwardConfig defaults bias to True when not explicitly set).
78
+ # We support loading those weights here.
79
+ dense_mlp_bias = getattr(config, "dense_mlp_bias", False)
80
+ if dense_mlp_bias:
81
+ del self.gate_proj
82
+ del self.up_proj
83
+ del self.down_proj
84
+ self.gate_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=True)
85
+ self.up_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=True)
86
+ self.down_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=True)
87
+
88
+ def forward(self, x):
89
+ down_proj = self.down_proj(self.act_fn(self.gate_proj(x)) * self.up_proj(x))
90
+ return down_proj
91
+
92
+
93
+ def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor:
94
+ """
95
+ This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch,
96
+ num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)
97
+ """
98
+ batch, num_key_value_heads, slen, head_dim = hidden_states.shape
99
+ if n_rep == 1:
100
+ return hidden_states
101
+ hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim)
102
+ return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim)
103
+
104
+
105
+ def eager_attention_forward(
106
+ module: nn.Module,
107
+ query: torch.Tensor,
108
+ key: torch.Tensor,
109
+ value: torch.Tensor,
110
+ attention_mask: Optional[torch.Tensor],
111
+ scaling: float,
112
+ dropout: float = 0.0,
113
+ **kwargs: Unpack[TransformersKwargs],
114
+ ):
115
+ key_states = repeat_kv(key, module.num_key_value_groups)
116
+ value_states = repeat_kv(value, module.num_key_value_groups)
117
+
118
+ attn_weights = torch.matmul(query, key_states.transpose(2, 3)) * scaling
119
+ if attention_mask is not None:
120
+ causal_mask = attention_mask[:, :, :, : key_states.shape[-2]]
121
+ attn_weights = attn_weights + causal_mask
122
+
123
+ attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query.dtype)
124
+ attn_weights = nn.functional.dropout(attn_weights, p=dropout, training=module.training)
125
+ attn_output = torch.matmul(attn_weights, value_states)
126
+ attn_output = attn_output.transpose(1, 2).contiguous()
127
+
128
+ return attn_output, attn_weights
129
+
130
+
131
+ def apply_rotary_pos_emb(q, k, cos, sin, position_ids=None, unsqueeze_dim=1):
132
+ """Applies Rotary Position Embedding to the query and key tensors.
133
+
134
+ Args:
135
+ q (`torch.Tensor`): The query tensor.
136
+ k (`torch.Tensor`): The key tensor.
137
+ cos (`torch.Tensor`): The cosine part of the rotary embedding.
138
+ sin (`torch.Tensor`): The sine part of the rotary embedding.
139
+ position_ids (`torch.Tensor`, *optional*):
140
+ Deprecated and unused.
141
+ unsqueeze_dim (`int`, *optional*, defaults to 1):
142
+ The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and
143
+ sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note
144
+ that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and
145
+ k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes
146
+ cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have
147
+ the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2.
148
+ Returns:
149
+ `tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding.
150
+ """
151
+ q_type, k_type = q.dtype, k.dtype
152
+ cos = cos.unsqueeze(unsqueeze_dim)
153
+ sin = sin.unsqueeze(unsqueeze_dim)
154
+ q_embed = (q * cos) + (rotate_half(q) * sin)
155
+ k_embed = (k * cos) + (rotate_half(k) * sin)
156
+ return q_embed.to(q_type), k_embed.to(k_type)
157
+
158
+
159
+ def rotate_half(x):
160
+ """Rotates half the hidden dims of the input."""
161
+ x1 = x[..., : x.shape[-1] // 2]
162
+ x2 = x[..., x.shape[-1] // 2 :]
163
+ return torch.cat((-x2, x1), dim=-1)
164
+
165
+
166
+ class FlexOlmoNoQKNormPrenormAttention(nn.Module):
167
+ """Multi-headed attention from 'Attention Is All You Need' paper"""
168
+
169
+ def __init__(self, config: FlexOlmoNoQKNormPrenormConfig, layer_idx: Optional[int] = None):
170
+ super().__init__()
171
+ self.config = config
172
+ self.layer_idx = layer_idx
173
+ self.head_dim = getattr(config, "head_dim", config.hidden_size // config.num_attention_heads)
174
+ self.num_key_value_groups = config.num_attention_heads // config.num_key_value_heads
175
+ self.scaling = self.head_dim**-0.5
176
+ self.attention_dropout = config.attention_dropout
177
+ self.is_causal = True
178
+
179
+ self.q_proj = nn.Linear(
180
+ config.hidden_size, config.num_attention_heads * self.head_dim, bias=config.attention_bias
181
+ )
182
+ self.k_proj = nn.Linear(
183
+ config.hidden_size, config.num_key_value_heads * self.head_dim, bias=config.attention_bias
184
+ )
185
+ self.v_proj = nn.Linear(
186
+ config.hidden_size, config.num_key_value_heads * self.head_dim, bias=config.attention_bias
187
+ )
188
+ self.o_proj = nn.Linear(
189
+ config.num_attention_heads * self.head_dim, config.hidden_size, bias=config.attention_bias
190
+ )
191
+
192
+ @deprecate_kwarg("past_key_value", new_name="past_key_values", version="4.58")
193
+ def forward(
194
+ self,
195
+ hidden_states: torch.Tensor,
196
+ position_embeddings: tuple[torch.Tensor, torch.Tensor],
197
+ attention_mask: Optional[torch.Tensor],
198
+ past_key_values: Optional[Cache] = None,
199
+ cache_position: Optional[torch.LongTensor] = None,
200
+ **kwargs: Unpack[TransformersKwargs],
201
+ ) -> tuple[torch.Tensor, Optional[torch.Tensor]]:
202
+ input_shape = hidden_states.shape[:-1]
203
+ hidden_shape = (*input_shape, -1, self.head_dim)
204
+
205
+ query_states = self.q_proj(hidden_states)
206
+ key_states = self.k_proj(hidden_states)
207
+ value_states = self.v_proj(hidden_states)
208
+
209
+ query_states = query_states.view(hidden_shape).transpose(1, 2)
210
+ key_states = key_states.view(hidden_shape).transpose(1, 2)
211
+ value_states = value_states.view(hidden_shape).transpose(1, 2)
212
+
213
+ cos, sin = position_embeddings
214
+ query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin)
215
+
216
+ if past_key_values is not None:
217
+ # sin and cos are specific to RoPE models; cache_position needed for the static cache
218
+ cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position}
219
+ key_states, value_states = past_key_values.update(key_states, value_states, self.layer_idx, cache_kwargs)
220
+
221
+ attention_interface: Callable = eager_attention_forward
222
+ if self.config._attn_implementation != "eager":
223
+ attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation]
224
+
225
+ attn_output, attn_weights = attention_interface(
226
+ self,
227
+ query_states,
228
+ key_states,
229
+ value_states,
230
+ attention_mask,
231
+ dropout=0.0 if not self.training else self.attention_dropout,
232
+ scaling=self.scaling,
233
+ **kwargs,
234
+ )
235
+
236
+ attn_output = attn_output.reshape(*input_shape, -1).contiguous()
237
+ attn_output = self.o_proj(attn_output)
238
+ return attn_output, attn_weights
239
+
240
+
241
+ class FlexOlmoNoQKNormPrenormSparseMoeBlock(nn.Module):
242
+ def __init__(self, config, num_experts: int, num_shared_experts: int, always_active_experts: Optional[list[int]] = None):
243
+ super().__init__()
244
+ self.top_k = config.num_experts_per_tok
245
+ self.norm_topk_prob = config.norm_topk_prob
246
+
247
+ self.num_shared_experts = num_shared_experts
248
+ self.always_active_experts = always_active_experts
249
+ self.num_experts = num_experts
250
+ self.gate = nn.Linear(config.hidden_size, self.num_experts, bias=False)
251
+ # Expert MLPs should never use dense_mlp_bias (that's only for dense FFN layers)
252
+ import copy
253
+
254
+ expert_config = copy.copy(config)
255
+ expert_config.dense_mlp_bias = False
256
+ self.experts = nn.ModuleList([FlexOlmoNoQKNormPrenormMLP(expert_config) for _ in range(self.num_experts)])
257
+
258
+ def _get_top_k_with_always_active(self, scores: torch.Tensor) -> tuple[torch.Tensor, torch.Tensor]:
259
+ """
260
+ Select top-k experts where always_active_experts are always included.
261
+ Softmax is computed over all experts, then always-active are masked out for topk selection.
262
+ """
263
+ always_active = self.always_active_experts
264
+ num_always_active = len(always_active)
265
+ routed_top_k = self.top_k - num_always_active
266
+
267
+ # Mask out always-active experts so they aren't selected by topk.
268
+ masked_scores = scores.clone()
269
+ masked_scores[:, always_active] = float("-inf")
270
+
271
+ # Select top-(top_k - num_always_active) from the remaining experts.
272
+ if routed_top_k == 1:
273
+ _, routed_indices = masked_scores.max(dim=-1, keepdim=True)
274
+ else:
275
+ _, routed_indices = torch.topk(masked_scores, routed_top_k, dim=-1)
276
+
277
+ # Gather actual weights from original (unmasked) scores.
278
+ routed_weights = scores.gather(-1, routed_indices)
279
+
280
+ # Build always-active indices and weights.
281
+ always_active_tensor = torch.tensor(always_active, device=scores.device, dtype=routed_indices.dtype)
282
+ always_active_indices = always_active_tensor.unsqueeze(0).expand(scores.shape[0], num_always_active)
283
+ always_active_weights = scores.gather(-1, always_active_indices)
284
+
285
+ # Concatenate: always-active first, then routed.
286
+ selected_experts = torch.cat([always_active_indices, routed_indices], dim=-1)
287
+ routing_weights = torch.cat([always_active_weights, routed_weights], dim=-1)
288
+
289
+ return routing_weights, selected_experts
290
+
291
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
292
+ batch_size, sequence_length, hidden_dim = hidden_states.shape
293
+ hidden_states = hidden_states.view(-1, hidden_dim)
294
+ # router_logits: (batch * sequence_length, n_experts)
295
+ router_logits = self.gate(hidden_states)
296
+
297
+ if self.always_active_experts is not None and len(self.always_active_experts) > 0:
298
+ # Use masking approach: softmax over all experts, mask always-active for topk
299
+ routing_weights = F.softmax(router_logits, dim=1, dtype=torch.float)
300
+ routing_weights, selected_experts = self._get_top_k_with_always_active(routing_weights)
301
+ elif self.num_shared_experts > 0:
302
+ # Legacy path: shared experts are the last N experts
303
+ # split the router logits into shared and unshared experts
304
+ router_logits_standard = router_logits[
305
+ :, : -self.num_shared_experts
306
+ ] # (batch * sequence_length, n_experts - num_shared_experts)
307
+ router_logits_shared = router_logits[
308
+ :, -self.num_shared_experts :
309
+ ] # (batch * sequence_length, num_shared_experts)
310
+
311
+ # compute the routing weights for the standard experts and shared experts separately
312
+ routing_weights_standard = F.softmax(router_logits_standard, dim=1, dtype=torch.float)
313
+ routing_weights_shared = F.softmax(router_logits_shared, dim=1, dtype=torch.float)
314
+
315
+ # select the routing weights and experts for the standard experts and shared experts separately
316
+ routing_weights_standard, selected_experts_standard = torch.topk(
317
+ routing_weights_standard, self.top_k - self.num_shared_experts, dim=-1
318
+ )
319
+ routing_weights_shared, selected_experts_shared = torch.topk(
320
+ routing_weights_shared, self.num_shared_experts, dim=-1
321
+ )
322
+
323
+ # concatenate the routing weights and selected experts for the standard experts and shared experts
324
+ routing_weights = torch.cat([routing_weights_standard, routing_weights_shared], dim=1)
325
+ selected_experts = torch.cat(
326
+ [selected_experts_standard, selected_experts_shared + (self.num_experts - self.num_shared_experts)],
327
+ dim=1,
328
+ ) # we need to add the offset to the selected experts for the shared experts since they are at the end of the router logits
329
+
330
+ # make sure there are self.top_k experts selected in total
331
+ assert routing_weights.shape == selected_experts.shape == (batch_size * sequence_length, self.top_k), (
332
+ f"routing_weights and selected_experts should have the same shape of (batch_size * sequence_length, self.top_k), but got {routing_weights.shape} and {selected_experts.shape}"
333
+ )
334
+ else:
335
+ routing_weights = F.softmax(router_logits, dim=1, dtype=torch.float)
336
+ routing_weights, selected_experts = torch.topk(routing_weights, self.top_k, dim=-1)
337
+
338
+ if self.norm_topk_prob:
339
+ if self.num_shared_experts > 0 or (self.always_active_experts is not None and len(self.always_active_experts) > 0):
340
+ raise NotImplementedError(
341
+ "norm_topk_prob is not implemented for the case where num_shared_experts > 0 or always_active_experts is set"
342
+ )
343
+ routing_weights /= routing_weights.sum(dim=-1, keepdim=True)
344
+
345
+ # we cast back to the input dtype
346
+ routing_weights = routing_weights.to(hidden_states.dtype)
347
+
348
+ final_hidden_states = torch.zeros(
349
+ (batch_size * sequence_length, hidden_dim), dtype=hidden_states.dtype, device=hidden_states.device
350
+ )
351
+
352
+ # One hot encode the selected experts to create an expert mask
353
+ # this will be used to easily index which expert is going to be selected
354
+ expert_mask = torch.nn.functional.one_hot(selected_experts, num_classes=self.num_experts).permute(2, 1, 0)
355
+
356
+ # Loop over all available experts in the model and perform the computation on each expert
357
+ for expert_idx in range(self.num_experts):
358
+ expert_layer = self.experts[expert_idx]
359
+ idx, top_x = torch.where(expert_mask[expert_idx])
360
+
361
+ # Index the correct hidden states and compute the expert hidden state for
362
+ # the current expert. We need to make sure to multiply the output hidden
363
+ # states by `routing_weights` on the corresponding tokens (top-1 and top-2)
364
+ current_state = hidden_states[None, top_x].reshape(-1, hidden_dim)
365
+ current_hidden_states = expert_layer(current_state) * routing_weights[top_x, idx, None]
366
+
367
+ # However `index_add_` only support torch tensors for indexing so we'll use
368
+ # the `top_x` tensor here.
369
+ final_hidden_states.index_add_(0, top_x, current_hidden_states.to(hidden_states.dtype))
370
+ final_hidden_states = final_hidden_states.reshape(batch_size, sequence_length, hidden_dim)
371
+ return final_hidden_states, router_logits
372
+
373
+
374
+ class FlexOlmoNoQKNormPrenormDecoderLayer(GradientCheckpointingLayer):
375
+ def __init__(
376
+ self, config: FlexOlmoNoQKNormPrenormConfig, layer_idx: int, num_experts: int, num_shared_experts: int,
377
+ always_active_experts: Optional[list[int]] = None,
378
+ ):
379
+ super().__init__()
380
+ self.hidden_size = config.hidden_size
381
+ self.self_attn = FlexOlmoNoQKNormPrenormAttention(config=config, layer_idx=layer_idx)
382
+
383
+ self.num_experts = num_experts
384
+
385
+ if num_experts == 0:
386
+ # Dense layer: use MLP with dense_intermediate_size
387
+ dense_intermediate_size = getattr(config, "dense_intermediate_size", None)
388
+ if dense_intermediate_size is None:
389
+ raise ValueError(
390
+ "num_experts=0 (dense layer) but config.dense_intermediate_size is not set. "
391
+ "Please set dense_intermediate_size in the config."
392
+ )
393
+ import copy
394
+
395
+ dense_config = copy.copy(config)
396
+ dense_config.intermediate_size = dense_intermediate_size
397
+ dense_config.dense_mlp_bias = getattr(config, "dense_mlp_bias", False)
398
+ self.mlp = FlexOlmoNoQKNormPrenormMLP(dense_config)
399
+ else:
400
+ self.mlp = FlexOlmoNoQKNormPrenormSparseMoeBlock(config, num_experts, num_shared_experts, always_active_experts)
401
+
402
+ self.pre_attention_layernorm = FlexOlmoNoQKNormPrenormRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
403
+ self.pre_feedforward_layernorm = FlexOlmoNoQKNormPrenormRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
404
+
405
+ @deprecate_kwarg("past_key_value", new_name="past_key_values", version="4.58")
406
+ def forward(
407
+ self,
408
+ hidden_states: torch.Tensor,
409
+ attention_mask: Optional[torch.Tensor] = None,
410
+ position_ids: Optional[torch.LongTensor] = None,
411
+ past_key_values: Optional[Cache] = None,
412
+ cache_position: Optional[torch.LongTensor] = None,
413
+ position_embeddings: Optional[tuple[torch.Tensor, torch.Tensor]] = None,
414
+ **kwargs,
415
+ ) -> torch.FloatTensor:
416
+ """
417
+ Args:
418
+ hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
419
+ attention_mask (`torch.FloatTensor`, *optional*):
420
+ attention mask of size `(batch_size, sequence_length)` if flash attention is used or `(batch_size, 1,
421
+ query_sequence_length, key_sequence_length)` if default attention is used.
422
+ output_attentions (`bool`, *optional*):
423
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
424
+ returned tensors for more detail.
425
+ output_router_logits (`bool`, *optional*):
426
+ Whether or not to return the logits of all the routers. They are useful for computing the router loss,
427
+ and should not be returned during inference.
428
+ use_cache (`bool`, *optional*):
429
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding
430
+ (see `past_key_values`).
431
+ past_key_values (`Cache`, *optional*): cached past key and value projection states
432
+ cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*):
433
+ Indices depicting the position of the input sequence tokens in the sequence
434
+ position_embeddings (`tuple[torch.FloatTensor, torch.FloatTensor]`, *optional*):
435
+ Tuple containing the cosine and sine positional embeddings of shape `(batch_size, seq_len, head_dim)`,
436
+ with `head_dim` being the embedding dimension of each attention head.
437
+ kwargs (`dict`, *optional*):
438
+ Arbitrary kwargs to be ignored, used for FSDP and other methods that injects code
439
+ into the model
440
+ """
441
+ residual = hidden_states
442
+ # apply norm before attention
443
+ hidden_states = self.pre_attention_layernorm(hidden_states)
444
+ # Self Attention
445
+ hidden_states, _ = self.self_attn(
446
+ hidden_states=hidden_states,
447
+ attention_mask=attention_mask,
448
+ position_ids=position_ids,
449
+ past_key_values=past_key_values,
450
+ cache_position=cache_position,
451
+ position_embeddings=position_embeddings,
452
+ **kwargs,
453
+ )
454
+ hidden_states = residual + hidden_states
455
+
456
+ # Fully Connected
457
+ residual = hidden_states
458
+ # apply norm before feedforward
459
+ hidden_states = self.pre_feedforward_layernorm(hidden_states)
460
+ mlp_output = self.mlp(hidden_states)
461
+ if isinstance(mlp_output, tuple):
462
+ hidden_states, _ = mlp_output
463
+ else:
464
+ hidden_states = mlp_output
465
+ hidden_states = residual + hidden_states
466
+ return hidden_states
467
+
468
+
469
+ @auto_docstring
470
+ class FlexOlmoNoQKNormPrenormPreTrainedModel(PreTrainedModel):
471
+ config: FlexOlmoNoQKNormPrenormConfig
472
+ base_model_prefix = "model"
473
+ supports_gradient_checkpointing = True
474
+ _no_split_modules = ["FlexOlmoNoQKNormPrenormDecoderLayer"]
475
+ _skip_keys_device_placement = ["past_key_values"]
476
+ _supports_flash_attn = True
477
+ _supports_sdpa = True
478
+ _supports_flex_attn = True
479
+ _can_compile_fullgraph = False # MoE models don't work with torch.compile (`torch.where(condition)` not supported)
480
+ _supports_attention_backend = True
481
+ _can_record_outputs = {
482
+ "router_logits": OutputRecorder(FlexOlmoNoQKNormPrenormSparseMoeBlock, index=1),
483
+ "hidden_states": FlexOlmoNoQKNormPrenormDecoderLayer,
484
+ "attentions": FlexOlmoNoQKNormPrenormAttention,
485
+ }
486
+ config_class = FlexOlmoNoQKNormPrenormConfig
487
+
488
+
489
+ class FlexOlmoNoQKNormPrenormRotaryEmbedding(nn.Module):
490
+ inv_freq: torch.Tensor # fix linting for `register_buffer`
491
+
492
+ def __init__(self, config: FlexOlmoNoQKNormPrenormConfig, device=None):
493
+ super().__init__()
494
+ # BC: "rope_type" was originally "type"
495
+ if hasattr(config, "rope_scaling") and isinstance(config.rope_scaling, dict):
496
+ self.rope_type = config.rope_scaling.get("rope_type", config.rope_scaling.get("type"))
497
+ else:
498
+ self.rope_type = "default"
499
+ self.max_seq_len_cached = config.max_position_embeddings
500
+ self.original_max_seq_len = config.max_position_embeddings
501
+
502
+ self.config = config
503
+ self.rope_init_fn = ROPE_INIT_FUNCTIONS[self.rope_type]
504
+
505
+ inv_freq, self.attention_scaling = self.rope_init_fn(self.config, device)
506
+ self.register_buffer("inv_freq", inv_freq, persistent=False)
507
+ self.original_inv_freq = self.inv_freq
508
+
509
+ @torch.no_grad()
510
+ @dynamic_rope_update # power user: used with advanced RoPE types (e.g. dynamic rope)
511
+ def forward(self, x, position_ids):
512
+ inv_freq_expanded = self.inv_freq[None, :, None].float().expand(position_ids.shape[0], -1, 1).to(x.device)
513
+ position_ids_expanded = position_ids[:, None, :].float()
514
+
515
+ device_type = x.device.type if isinstance(x.device.type, str) and x.device.type != "mps" else "cpu"
516
+ with torch.autocast(device_type=device_type, enabled=False): # Force float32
517
+ freqs = (inv_freq_expanded.float() @ position_ids_expanded.float()).transpose(1, 2)
518
+ emb = torch.cat((freqs, freqs), dim=-1)
519
+ cos = emb.cos() * self.attention_scaling
520
+ sin = emb.sin() * self.attention_scaling
521
+ return cos, sin
522
+
523
+
524
+ @auto_docstring
525
+ class FlexOlmoNoQKNormPrenormModel(FlexOlmoNoQKNormPrenormPreTrainedModel):
526
+ def __init__(self, config):
527
+ super().__init__(config)
528
+ self.padding_idx = config.pad_token_id
529
+ self.vocab_size = config.vocab_size
530
+
531
+ self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx)
532
+ self.norm = FlexOlmoNoQKNormPrenormRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
533
+ self.rotary_emb = FlexOlmoNoQKNormPrenormRotaryEmbedding(config=config)
534
+ self.gradient_checkpointing = False
535
+
536
+ # Check if per-layer expert counts are specified
537
+ num_experts_per_layer = getattr(config, "num_experts_per_layer", None)
538
+ num_shared_experts_per_layer = getattr(config, "num_shared_experts_per_layer", None)
539
+ always_active_experts_per_layer = getattr(config, "always_active_experts_per_layer", None)
540
+ always_active_experts = getattr(config, "always_active_experts", None)
541
+
542
+ # Resolve always_active_experts to a per-layer list
543
+ if always_active_experts_per_layer is None and always_active_experts is not None:
544
+ always_active_experts_per_layer = [always_active_experts] * config.num_hidden_layers
545
+
546
+ if num_experts_per_layer is not None:
547
+ # Use per-layer expert counts
548
+ assert len(num_experts_per_layer) == config.num_hidden_layers, (
549
+ f"num_experts_per_layer has length {len(num_experts_per_layer)} but model has {config.num_hidden_layers} layers"
550
+ )
551
+ if num_shared_experts_per_layer is None:
552
+ # Default: use config.num_shared_experts for all layers, but cap at layer's num_experts
553
+ num_shared_experts_per_layer = [
554
+ min(config.num_shared_experts, num_experts_per_layer[i]) for i in range(config.num_hidden_layers)
555
+ ]
556
+ self.layers = nn.ModuleList(
557
+ [
558
+ FlexOlmoNoQKNormPrenormDecoderLayer(
559
+ config, layer_idx, num_experts_per_layer[layer_idx], num_shared_experts_per_layer[layer_idx],
560
+ always_active_experts=always_active_experts_per_layer[layer_idx] if always_active_experts_per_layer is not None else None,
561
+ )
562
+ for layer_idx in range(config.num_hidden_layers)
563
+ ]
564
+ )
565
+ else:
566
+ # Fall back to original behavior: all layers use config.num_experts
567
+ self.layers = nn.ModuleList(
568
+ [
569
+ FlexOlmoNoQKNormPrenormDecoderLayer(
570
+ config, layer_idx, config.num_experts, config.num_shared_experts,
571
+ always_active_experts=always_active_experts_per_layer[layer_idx] if always_active_experts_per_layer is not None else None,
572
+ )
573
+ for layer_idx in range(config.num_hidden_layers)
574
+ ]
575
+ )
576
+
577
+ # Initialize weights and apply final processing
578
+ self.post_init()
579
+
580
+ @check_model_inputs
581
+ @auto_docstring
582
+ def forward(
583
+ self,
584
+ input_ids: Optional[torch.LongTensor] = None,
585
+ attention_mask: Optional[torch.Tensor] = None,
586
+ position_ids: Optional[torch.LongTensor] = None,
587
+ past_key_values: Optional[Cache] = None,
588
+ inputs_embeds: Optional[torch.FloatTensor] = None,
589
+ use_cache: Optional[bool] = None,
590
+ cache_position: Optional[torch.LongTensor] = None,
591
+ **kwargs: Unpack[TransformersKwargs],
592
+ ) -> MoeModelOutputWithPast:
593
+ if (input_ids is None) ^ (inputs_embeds is not None):
594
+ raise ValueError("You must specify exactly one of input_ids or inputs_embeds")
595
+
596
+ if use_cache and past_key_values is None:
597
+ past_key_values = DynamicCache(config=self.config)
598
+
599
+ if inputs_embeds is None:
600
+ inputs_embeds = self.embed_tokens(input_ids)
601
+
602
+ if cache_position is None:
603
+ past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0
604
+ cache_position = torch.arange(
605
+ past_seen_tokens, past_seen_tokens + inputs_embeds.shape[1], device=inputs_embeds.device
606
+ )
607
+ if position_ids is None:
608
+ position_ids = cache_position.unsqueeze(0)
609
+
610
+ causal_mask = create_causal_mask(
611
+ config=self.config,
612
+ input_embeds=inputs_embeds,
613
+ attention_mask=attention_mask,
614
+ cache_position=cache_position,
615
+ past_key_values=past_key_values,
616
+ position_ids=position_ids,
617
+ )
618
+
619
+ hidden_states = inputs_embeds
620
+
621
+ # create position embeddings to be shared across the decoder layers
622
+ position_embeddings = self.rotary_emb(hidden_states, position_ids)
623
+
624
+ for decoder_layer in self.layers[: self.config.num_hidden_layers]:
625
+ hidden_states = decoder_layer(
626
+ hidden_states,
627
+ position_embeddings=position_embeddings,
628
+ attention_mask=causal_mask,
629
+ position_ids=position_ids,
630
+ past_key_values=past_key_values,
631
+ use_cache=use_cache,
632
+ cache_position=cache_position,
633
+ **kwargs,
634
+ )
635
+
636
+ hidden_states = self.norm(hidden_states)
637
+
638
+ return MoeModelOutputWithPast( # only diff with Mistral is the output type, we need MoE
639
+ last_hidden_state=hidden_states,
640
+ past_key_values=past_key_values,
641
+ )
642
+
643
+
644
+ @dataclass
645
+ class MoeCausalLMOutputWithPast(ModelOutput):
646
+ """
647
+ Base class for causal language model (or autoregressive) with mixture of experts outputs.
648
+
649
+ Args:
650
+ loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
651
+ Language modeling loss (for next-token prediction).
652
+
653
+ logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`):
654
+ Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
655
+
656
+ aux_loss (`torch.FloatTensor`, *optional*, returned when `labels` is provided):
657
+ aux_loss for the sparse modules.
658
+
659
+ router_logits (`tuple(torch.FloatTensor)`, *optional*, returned when `output_router_probs=True` and `config.add_router_probs=True` is passed or when `config.output_router_probs=True`):
660
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, sequence_length, num_experts)`.
661
+
662
+ Raw router logtis (post-softmax) that are computed by MoE routers, these terms are used to compute the auxiliary
663
+ loss for Mixture of Experts models.
664
+
665
+ past_key_values (`Cache`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
666
+ It is a [`~cache_utils.Cache`] instance. For more details, see our [kv cache guide](https://huggingface.co/docs/transformers/en/kv_cache).
667
+
668
+ Contains pre-computed hidden-states (key and values in the self-attention blocks) that can be used (see
669
+ `past_key_values` input) to speed up sequential decoding.
670
+ hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
671
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, +
672
+ one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
673
+
674
+ Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.
675
+ attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
676
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
677
+ sequence_length)`.
678
+
679
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
680
+ heads.
681
+ """
682
+
683
+ loss: Optional[torch.FloatTensor] = None
684
+ aux_loss: Optional[torch.FloatTensor] = None
685
+ lb_loss: Optional[torch.FloatTensor] = None
686
+ ce_loss: Optional[torch.FloatTensor] = None
687
+ logits: Optional[torch.FloatTensor] = None
688
+ past_key_values: Optional[Cache] = None
689
+ hidden_states: Optional[tuple[torch.FloatTensor, ...]] = None
690
+ attentions: Optional[tuple[torch.FloatTensor, ...]] = None
691
+ router_logits: Optional[tuple[torch.FloatTensor]] = None
692
+
693
+
694
+ def load_balancing_loss_func_olmoe(
695
+ gate_logits: Union[torch.Tensor, tuple[torch.Tensor], None],
696
+ num_experts: Optional[int] = None,
697
+ top_k=2,
698
+ attention_mask: Optional[torch.Tensor] = None,
699
+ labels: Optional[torch.Tensor] = None,
700
+ num_items_in_batch: Optional[
701
+ torch.Tensor
702
+ ] = None, # the number of tokens within a global batch (including across dp ranks)
703
+ ignore_index=-100,
704
+ num_shared_experts=0,
705
+ num_experts_per_layer: Optional[list[int]] = None,
706
+ num_shared_experts_per_layer: Optional[list[int]] = None,
707
+ always_active_experts: Optional[list[int]] = None,
708
+ always_active_experts_per_layer: Optional[list[list[int]]] = None,
709
+ ) -> Union[torch.Tensor, int]:
710
+ r"""
711
+ Computes auxiliary load balancing loss as in Switch Transformer - implemented in Pytorch.
712
+
713
+ This version supports variable per-layer expert counts by computing the loss
714
+ per-layer individually and averaging across layers.
715
+
716
+ See Switch Transformer (https://huggingface.co/papers/2101.03961) for more details. This function implements the loss
717
+ function presented in equations (4) - (6) of the paper. It aims at penalizing cases where the routing between
718
+ experts is too unbalanced.
719
+
720
+ Args:
721
+ gate_logits:
722
+ Logits from the `gate`, should be a tuple of model.config.num_hidden_layers tensors of
723
+ shape [batch_size X sequence_length, num_experts]. This has not been softmaxed yet.
724
+ Note: each layer may have a different num_experts if num_experts_per_layer is set.
725
+ num_experts:
726
+ Number of experts (used as fallback if num_experts_per_layer is None)
727
+ top_k:
728
+ The number of experts to route per-token, can be also interpreted as the `top-k` routing
729
+ parameter.
730
+ attention_mask (`torch.Tensor`, *optional*):
731
+ The attention_mask used in forward function
732
+ shape [batch_size X sequence_length] if not None.
733
+ num_experts_per_layer:
734
+ List of expert counts per layer. If None, uses num_experts for all layers.
735
+ num_shared_experts_per_layer:
736
+ List of shared expert counts per layer. If None, uses num_shared_experts for all layers.
737
+
738
+ Returns:
739
+ The auxiliary loss.
740
+ """
741
+ if gate_logits is None or not isinstance(gate_logits, tuple):
742
+ return 0
743
+
744
+ compute_device = gate_logits[0].device
745
+ num_hidden_layers = len(gate_logits)
746
+
747
+ # Resolve always_active_experts for the uniform path
748
+ if always_active_experts_per_layer is None and always_active_experts is not None:
749
+ always_active_experts_per_layer = [always_active_experts] * num_hidden_layers
750
+
751
+ # Check if we have variable expert counts
752
+ has_variable_experts = num_experts_per_layer is not None and len(set(num_experts_per_layer)) > 1
753
+
754
+ if not has_variable_experts:
755
+ # All layers have the same expert count - use the original stacking approach
756
+ concatenated_gate_logits = torch.stack(
757
+ [layer_gate.to(compute_device) for layer_gate in gate_logits], dim=0
758
+ ) # shape: (num_hidden_layers, batch_size * sequence_length, num_experts)
759
+
760
+ # remove the shared experts from the gate logits since they are not used for routing in the loss function
761
+ if num_shared_experts > 0:
762
+ concatenated_gate_logits = concatenated_gate_logits[:, :, :-num_shared_experts]
763
+ # adjust the num_experts and top_k accordingly for the loss computation
764
+ num_experts = num_experts - num_shared_experts
765
+ top_k = top_k - num_shared_experts
766
+
767
+ routing_weights = torch.nn.functional.softmax(concatenated_gate_logits, dim=-1)
768
+
769
+ # Exclude always-active experts from the LB loss by removing their
770
+ # columns entirely so that num_experts matches the last dimension.
771
+ if always_active_experts_per_layer is not None and len(always_active_experts_per_layer[0]) > 0:
772
+ aa_experts = always_active_experts_per_layer[0] # uniform across layers in this path
773
+ routed_mask = torch.ones(num_experts, dtype=torch.bool, device=compute_device)
774
+ routed_mask[aa_experts] = False
775
+ routing_weights = routing_weights[:, :, routed_mask]
776
+ num_experts = num_experts - len(aa_experts)
777
+ top_k = top_k - len(aa_experts)
778
+
779
+ _, selected_experts = torch.topk(
780
+ routing_weights, top_k, dim=-1
781
+ ) # shape: (num_hidden_layers, batch_size * sequence_length, top_k)
782
+
783
+ expert_counts_onehot = torch.nn.functional.one_hot(
784
+ selected_experts, num_experts
785
+ ) # shape: (num_hidden_layers, batch_size * sequence_length, top_k, num_experts)
786
+
787
+ if attention_mask is None and labels is None:
788
+ # Compute the percentage of tokens routed to each experts
789
+ counts_per_expert = torch.mean(
790
+ expert_counts_onehot.float(), dim=(1, 2)
791
+ ) # shape: (num_hidden_layers, num_experts)
792
+
793
+ # Compute the average probability of routing to these experts
794
+ prob_per_expert = torch.mean(routing_weights, dim=1) # shape: (num_hidden_layers, num_experts)
795
+ else:
796
+ # if there are labels, then we want to ignore the indices that are in the prompt as well (if there is any)
797
+ if labels is not None:
798
+ attention_mask = labels != ignore_index
799
+ batch_size, sequence_length = attention_mask.shape
800
+
801
+ # Compute the mask that masks all padding tokens as 0 with the same shape of expert_mask
802
+ expert_attention_mask = (
803
+ attention_mask[None, :, :, None, None]
804
+ .expand((num_hidden_layers, batch_size, sequence_length, top_k, num_experts))
805
+ .reshape(num_hidden_layers, -1, top_k, num_experts)
806
+ .to(compute_device)
807
+ )
808
+
809
+ # Compute the percentage of tokens routed to each experts
810
+ counts_per_expert = torch.sum(expert_counts_onehot.float() * expert_attention_mask, dim=(1, 2))
811
+
812
+ # Compute the mask that masks all padding tokens as 0 with the same shape of frequency_per_expert
813
+ router_per_expert_attention_mask = (
814
+ attention_mask[None, :, :, None]
815
+ .expand((num_hidden_layers, batch_size, sequence_length, num_experts))
816
+ .reshape(num_hidden_layers, -1, num_experts)
817
+ .to(compute_device)
818
+ )
819
+
820
+ # average the probability across valid tokens
821
+ prob_per_expert = torch.sum(routing_weights * router_per_expert_attention_mask, dim=1) / torch.sum(
822
+ attention_mask
823
+ ) # shape: (num_hidden_layers, num_experts)
824
+
825
+ overall_loss = torch.sum(counts_per_expert * prob_per_expert)
826
+
827
+ # Fallback when num_items_in_batch isn't provided (e.g., manual forward calls)
828
+ if num_items_in_batch is None:
829
+ if labels is not None:
830
+ num_items_in_batch = (labels != ignore_index).sum()
831
+ elif attention_mask is not None:
832
+ num_items_in_batch = attention_mask.sum()
833
+ else:
834
+ # fall back to total tokens in batch/seq from gate logits
835
+ num_items_in_batch = gate_logits[0].shape[0]
836
+
837
+ if torch.is_tensor(num_items_in_batch):
838
+ num_items_in_batch = num_items_in_batch.to(compute_device)
839
+
840
+ # we follow olmo-core and use counts for dot product instead of frequency, and divide by total number token across gradient accumulation steps
841
+ overall_loss = overall_loss / (num_items_in_batch * top_k)
842
+
843
+ overall_loss = (
844
+ overall_loss * num_experts / num_hidden_layers
845
+ ) # times num_experts according to lb equation, divide by num_hidden_layers to get average over layers
846
+
847
+ return overall_loss
848
+
849
+ else:
850
+ # Variable expert counts - compute loss per layer and average
851
+ if num_shared_experts_per_layer is None:
852
+ num_shared_experts_per_layer = [num_shared_experts] * num_hidden_layers
853
+
854
+ # Compute attention mask once
855
+ if labels is not None:
856
+ attention_mask = labels != ignore_index
857
+
858
+ if attention_mask is not None:
859
+ batch_size, sequence_length = attention_mask.shape
860
+
861
+ # Fallback when num_items_in_batch isn't provided
862
+ if num_items_in_batch is None:
863
+ if labels is not None:
864
+ num_items_in_batch = (labels != ignore_index).sum()
865
+ elif attention_mask is not None:
866
+ num_items_in_batch = attention_mask.sum()
867
+ else:
868
+ num_items_in_batch = gate_logits[0].shape[0]
869
+
870
+ if torch.is_tensor(num_items_in_batch):
871
+ num_items_in_batch = num_items_in_batch.to(compute_device)
872
+
873
+ layer_losses = []
874
+
875
+ for layer_idx, layer_gate in enumerate(gate_logits):
876
+ layer_gate = layer_gate.to(compute_device)
877
+ layer_num_experts = num_experts_per_layer[layer_idx]
878
+ layer_num_shared = num_shared_experts_per_layer[layer_idx]
879
+
880
+ # Remove shared experts from logits
881
+ if layer_num_shared > 0:
882
+ layer_gate = layer_gate[:, :-layer_num_shared]
883
+ effective_num_experts = layer_num_experts - layer_num_shared
884
+ effective_top_k = top_k - layer_num_shared
885
+ else:
886
+ effective_num_experts = layer_num_experts
887
+ effective_top_k = top_k
888
+
889
+ # Compute routing weights
890
+ routing_weights = torch.nn.functional.softmax(layer_gate, dim=-1)
891
+
892
+ # Exclude always-active experts from the LB loss by removing their columns
893
+ layer_aa = always_active_experts_per_layer[layer_idx] if always_active_experts_per_layer is not None else None
894
+ if layer_aa is not None and len(layer_aa) > 0:
895
+ routed_mask = torch.ones(effective_num_experts, dtype=torch.bool, device=compute_device)
896
+ routed_mask[layer_aa] = False
897
+ routing_weights = routing_weights[:, routed_mask]
898
+ effective_num_experts = effective_num_experts - len(layer_aa)
899
+ effective_top_k = effective_top_k - len(layer_aa)
900
+
901
+ _, selected_experts = torch.topk(
902
+ routing_weights, effective_top_k, dim=-1
903
+ ) # shape: (batch_size * sequence_length, top_k)
904
+
905
+ expert_counts_onehot = torch.nn.functional.one_hot(
906
+ selected_experts, effective_num_experts
907
+ ) # shape: (batch_size * sequence_length, top_k, num_experts)
908
+
909
+ if attention_mask is None:
910
+ counts_per_expert = torch.mean(expert_counts_onehot.float(), dim=(0, 1)) # shape: (num_experts,)
911
+ prob_per_expert = torch.mean(routing_weights, dim=0) # shape: (num_experts,)
912
+ else:
913
+ # Reshape for masking
914
+ expert_attention_mask = (
915
+ attention_mask[:, :, None, None]
916
+ .expand((batch_size, sequence_length, effective_top_k, effective_num_experts))
917
+ .reshape(-1, effective_top_k, effective_num_experts)
918
+ .to(compute_device)
919
+ )
920
+
921
+ counts_per_expert = torch.sum(expert_counts_onehot.float() * expert_attention_mask, dim=(0, 1))
922
+
923
+ router_attention_mask = (
924
+ attention_mask[:, :, None]
925
+ .expand((batch_size, sequence_length, effective_num_experts))
926
+ .reshape(-1, effective_num_experts)
927
+ .to(compute_device)
928
+ )
929
+
930
+ prob_per_expert = torch.sum(routing_weights * router_attention_mask, dim=0) / torch.sum(attention_mask)
931
+
932
+ layer_loss = torch.sum(counts_per_expert * prob_per_expert)
933
+ layer_loss = layer_loss / (num_items_in_batch * effective_top_k)
934
+ layer_loss = layer_loss * effective_num_experts
935
+
936
+ layer_losses.append(layer_loss)
937
+
938
+ # Average across layers
939
+ overall_loss = torch.stack(layer_losses).mean()
940
+
941
+ return overall_loss
942
+
943
+
944
+ class FlexOlmoNoQKNormPrenormForCausalLM(FlexOlmoNoQKNormPrenormPreTrainedModel, GenerationMixin):
945
+ _tied_weights_keys = ["lm_head.weight"]
946
+
947
+ def __init__(self, config):
948
+ super().__init__(config)
949
+ self.model = FlexOlmoNoQKNormPrenormModel(config)
950
+ self.vocab_size = config.vocab_size
951
+ self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
952
+
953
+ self.router_aux_loss_coef = config.router_aux_loss_coef
954
+ self.num_experts = config.num_experts
955
+ self.num_experts_per_tok = config.num_experts_per_tok
956
+ # Initialize weights and apply final processing
957
+ self.post_init()
958
+
959
+ @auto_docstring
960
+ def forward(
961
+ self,
962
+ input_ids: Optional[torch.LongTensor] = None,
963
+ attention_mask: Optional[torch.Tensor] = None,
964
+ position_ids: Optional[torch.LongTensor] = None,
965
+ past_key_values: Optional[Cache] = None,
966
+ inputs_embeds: Optional[torch.FloatTensor] = None,
967
+ labels: Optional[torch.LongTensor] = None,
968
+ use_cache: Optional[bool] = None,
969
+ output_attentions: Optional[bool] = None,
970
+ output_hidden_states: Optional[bool] = None,
971
+ output_router_logits: Optional[bool] = None,
972
+ return_dict: Optional[bool] = None,
973
+ cache_position: Optional[torch.LongTensor] = None,
974
+ logits_to_keep: Union[int, torch.Tensor] = 0,
975
+ **kwargs,
976
+ ) -> Union[tuple, MoeCausalLMOutputWithPast]:
977
+ r"""
978
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
979
+ Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
980
+ config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
981
+ (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
982
+
983
+ Example:
984
+
985
+ ```python
986
+ >>> from transformers import AutoTokenizer, FlexOlmoNoQKNormPrenormForCausalLM
987
+
988
+ >>> model = FlexOlmoNoQKNormPrenormForCausalLM.from_pretrained("allenai/FlexOlmoNoQKNormPrenorm-1B-7B-0924")
989
+ >>> tokenizer = AutoTokenizer.from_pretrained("allenai/FlexOlmoNoQKNormPrenorm-1B-7B-0924")
990
+
991
+ >>> prompt = "Hey, are you conscious? Can you talk to me?"
992
+ >>> inputs = tokenizer(prompt, return_tensors="pt")
993
+
994
+ >>> # Generate
995
+ >>> generate_ids = model.generate(inputs.input_ids, max_length=30)
996
+ >>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
997
+ 'Hey, are you conscious? Can you talk to me?\nI’m not sure if you’re conscious of this, but I’m'
998
+ ```
999
+ """
1000
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
1001
+ output_router_logits = (
1002
+ output_router_logits if output_router_logits is not None else self.config.output_router_logits
1003
+ )
1004
+ output_hidden_states = (
1005
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
1006
+ )
1007
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1008
+
1009
+ # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)
1010
+ outputs = self.model(
1011
+ input_ids=input_ids,
1012
+ attention_mask=attention_mask,
1013
+ position_ids=position_ids,
1014
+ past_key_values=past_key_values,
1015
+ inputs_embeds=inputs_embeds,
1016
+ use_cache=use_cache,
1017
+ output_attentions=output_attentions,
1018
+ output_hidden_states=output_hidden_states,
1019
+ output_router_logits=output_router_logits,
1020
+ return_dict=return_dict,
1021
+ cache_position=cache_position,
1022
+ )
1023
+
1024
+ hidden_states = outputs[0]
1025
+ # Only compute necessary logits, and do not upcast them to float if we are not computing the loss
1026
+ slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep
1027
+ logits = self.lm_head(hidden_states[:, slice_indices, :])
1028
+
1029
+ loss = None
1030
+ ce_loss = None
1031
+ if labels is not None:
1032
+ ce_loss = self.loss_function(logits, labels, self.vocab_size, **kwargs)
1033
+ loss = ce_loss
1034
+
1035
+ lb_loss = None
1036
+
1037
+ if output_router_logits:
1038
+ # Get per-layer expert counts if available
1039
+ num_experts_per_layer = getattr(self.config, "num_experts_per_layer", None)
1040
+ num_shared_experts_per_layer = getattr(self.config, "num_shared_experts_per_layer", None)
1041
+
1042
+ # Filter out dense layers (num_experts == 0) since they produce no router_logits
1043
+ if num_experts_per_layer is not None:
1044
+ moe_mask = [i for i, n in enumerate(num_experts_per_layer) if n > 0]
1045
+ num_experts_per_layer = [num_experts_per_layer[i] for i in moe_mask]
1046
+ if num_shared_experts_per_layer is not None:
1047
+ num_shared_experts_per_layer = [num_shared_experts_per_layer[i] for i in moe_mask]
1048
+
1049
+ # Resolve always_active_experts for LB loss
1050
+ always_active_experts_per_layer_for_loss = getattr(self.config, "always_active_experts_per_layer", None)
1051
+ always_active_experts_for_loss = getattr(self.config, "always_active_experts", None)
1052
+ # Filter out dense layers if needed
1053
+ if num_experts_per_layer is not None and always_active_experts_per_layer_for_loss is not None:
1054
+ always_active_experts_per_layer_for_loss = [always_active_experts_per_layer_for_loss[i] for i in moe_mask]
1055
+
1056
+ lb_loss = load_balancing_loss_func_olmoe(
1057
+ outputs.router_logits if return_dict else outputs[-1],
1058
+ self.num_experts,
1059
+ self.num_experts_per_tok,
1060
+ attention_mask,
1061
+ labels,
1062
+ num_shared_experts=self.config.num_shared_experts,
1063
+ num_experts_per_layer=num_experts_per_layer,
1064
+ num_shared_experts_per_layer=num_shared_experts_per_layer,
1065
+ always_active_experts=always_active_experts_for_loss,
1066
+ always_active_experts_per_layer=always_active_experts_per_layer_for_loss,
1067
+ **kwargs,
1068
+ )
1069
+ if labels is not None:
1070
+ loss += self.router_aux_loss_coef * lb_loss.to(loss.device) # make sure to reside in the same device
1071
+
1072
+ if not return_dict:
1073
+ output = (logits,) + outputs[1:]
1074
+ if output_router_logits:
1075
+ output = (lb_loss,) + output
1076
+ return (loss,) + output if loss is not None else output
1077
+
1078
+ return MoeCausalLMOutputWithPast(
1079
+ loss=loss,
1080
+ aux_loss=lb_loss,
1081
+ lb_loss=lb_loss.detach().clone() if lb_loss is not None else None, # for logging callback
1082
+ ce_loss=ce_loss.detach().clone() if ce_loss is not None else None, # for logging callback
1083
+ logits=logits,
1084
+ past_key_values=outputs.past_key_values,
1085
+ hidden_states=outputs.hidden_states,
1086
+ attentions=outputs.attentions,
1087
+ router_logits=outputs.router_logits,
1088
+ )
1089
+
1090
+
1091
+ __all__ = [
1092
+ "FlexOlmoNoQKNormPrenormForCausalLM",
1093
+ "FlexOlmoNoQKNormPrenormModel",
1094
+ "FlexOlmoNoQKNormPrenormPreTrainedModel",
1095
+ ]
special_tokens_map.json ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "eos_token": "<|endoftext|>",
3
+ "pad_token": "<|pad|>",
4
+ "unk_token": {
5
+ "content": "<|endoftext|>",
6
+ "lstrip": false,
7
+ "normalized": false,
8
+ "rstrip": false,
9
+ "single_word": false
10
+ }
11
+ }
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json ADDED
@@ -0,0 +1,189 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_prefix_space": false,
3
+ "added_tokens_decoder": {
4
+ "100256": {
5
+ "content": "<|extra_id_0|>",
6
+ "lstrip": false,
7
+ "normalized": false,
8
+ "rstrip": false,
9
+ "single_word": false,
10
+ "special": false
11
+ },
12
+ "100257": {
13
+ "content": "<|endoftext|>",
14
+ "lstrip": false,
15
+ "normalized": false,
16
+ "rstrip": false,
17
+ "single_word": false,
18
+ "special": true
19
+ },
20
+ "100258": {
21
+ "content": "<|fim_prefix|>",
22
+ "lstrip": false,
23
+ "normalized": false,
24
+ "rstrip": false,
25
+ "single_word": false,
26
+ "special": true
27
+ },
28
+ "100259": {
29
+ "content": "<|fim_middle|>",
30
+ "lstrip": false,
31
+ "normalized": false,
32
+ "rstrip": false,
33
+ "single_word": false,
34
+ "special": true
35
+ },
36
+ "100260": {
37
+ "content": "<|fim_suffix|>",
38
+ "lstrip": false,
39
+ "normalized": false,
40
+ "rstrip": false,
41
+ "single_word": false,
42
+ "special": true
43
+ },
44
+ "100261": {
45
+ "content": "|||PHONE_NUMBER|||",
46
+ "lstrip": false,
47
+ "normalized": false,
48
+ "rstrip": false,
49
+ "single_word": false,
50
+ "special": false
51
+ },
52
+ "100262": {
53
+ "content": "|||EMAIL_ADDRESS|||",
54
+ "lstrip": false,
55
+ "normalized": false,
56
+ "rstrip": false,
57
+ "single_word": false,
58
+ "special": false
59
+ },
60
+ "100263": {
61
+ "content": "|||IP_ADDRESS|||",
62
+ "lstrip": false,
63
+ "normalized": false,
64
+ "rstrip": false,
65
+ "single_word": false,
66
+ "special": false
67
+ },
68
+ "100264": {
69
+ "content": "<|im_start|>",
70
+ "lstrip": false,
71
+ "normalized": false,
72
+ "rstrip": false,
73
+ "single_word": false,
74
+ "special": true
75
+ },
76
+ "100265": {
77
+ "content": "<|im_end|>",
78
+ "lstrip": false,
79
+ "normalized": false,
80
+ "rstrip": false,
81
+ "single_word": false,
82
+ "special": true
83
+ },
84
+ "100266": {
85
+ "content": "<|extra_id_1|>",
86
+ "lstrip": false,
87
+ "normalized": false,
88
+ "rstrip": false,
89
+ "single_word": false,
90
+ "special": false
91
+ },
92
+ "100267": {
93
+ "content": "<|extra_id_2|>",
94
+ "lstrip": false,
95
+ "normalized": false,
96
+ "rstrip": false,
97
+ "single_word": false,
98
+ "special": false
99
+ },
100
+ "100268": {
101
+ "content": "<|extra_id_3|>",
102
+ "lstrip": false,
103
+ "normalized": false,
104
+ "rstrip": false,
105
+ "single_word": false,
106
+ "special": false
107
+ },
108
+ "100269": {
109
+ "content": "<|extra_id_4|>",
110
+ "lstrip": false,
111
+ "normalized": false,
112
+ "rstrip": false,
113
+ "single_word": false,
114
+ "special": false
115
+ },
116
+ "100270": {
117
+ "content": "<|extra_id_5|>",
118
+ "lstrip": false,
119
+ "normalized": false,
120
+ "rstrip": false,
121
+ "single_word": false,
122
+ "special": false
123
+ },
124
+ "100271": {
125
+ "content": "<|extra_id_6|>",
126
+ "lstrip": false,
127
+ "normalized": false,
128
+ "rstrip": false,
129
+ "single_word": false,
130
+ "special": false
131
+ },
132
+ "100272": {
133
+ "content": "<|extra_id_7|>",
134
+ "lstrip": false,
135
+ "normalized": false,
136
+ "rstrip": false,
137
+ "single_word": false,
138
+ "special": false
139
+ },
140
+ "100273": {
141
+ "content": "<|extra_id_8|>",
142
+ "lstrip": false,
143
+ "normalized": false,
144
+ "rstrip": false,
145
+ "single_word": false,
146
+ "special": false
147
+ },
148
+ "100274": {
149
+ "content": "<|extra_id_9|>",
150
+ "lstrip": false,
151
+ "normalized": false,
152
+ "rstrip": false,
153
+ "single_word": false,
154
+ "special": false
155
+ },
156
+ "100275": {
157
+ "content": "<|extra_id_10|>",
158
+ "lstrip": false,
159
+ "normalized": false,
160
+ "rstrip": false,
161
+ "single_word": false,
162
+ "special": false
163
+ },
164
+ "100276": {
165
+ "content": "<|endofprompt|>",
166
+ "lstrip": false,
167
+ "normalized": false,
168
+ "rstrip": false,
169
+ "single_word": false,
170
+ "special": true
171
+ },
172
+ "100277": {
173
+ "content": "<|pad|>",
174
+ "lstrip": false,
175
+ "normalized": false,
176
+ "rstrip": false,
177
+ "single_word": false,
178
+ "special": true
179
+ }
180
+ },
181
+ "bos_token": null,
182
+ "clean_up_tokenization_spaces": false,
183
+ "eos_token": "<|endoftext|>",
184
+ "extra_special_tokens": {},
185
+ "model_max_length": 4096,
186
+ "pad_token": "<|pad|>",
187
+ "tokenizer_class": "GPT2Tokenizer",
188
+ "unk_token": "<|endoftext|>"
189
+ }
vocab.json ADDED
The diff for this file is too large to render. See raw diff