akshitab commited on
Commit
dfff18b
·
verified ·
1 Parent(s): f129d66

Emo rename: upload configuration_emo.py

Browse files
Files changed (1) hide show
  1. configuration_emo.py +219 -0
configuration_emo.py ADDED
@@ -0,0 +1,219 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨
2
+ # This file was automatically generated from src/transformers/models/emo/modular_emo.py.
3
+ # Do NOT edit this file manually as any edits will be overwritten by the generation of
4
+ # the file from the modular. If any change should be done, please apply the change to the
5
+ # modular_emo.py file directly. One of our CI enforces this.
6
+ # 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨
7
+ # coding=utf-8
8
+ # Copyright 2025 the HuggingFace Team. All rights reserved.
9
+ #
10
+ # Licensed under the Apache License, Version 2.0 (the "License");
11
+ # you may not use this file except in compliance with the License.
12
+ # You may obtain a copy of the License at
13
+ #
14
+ # http://www.apache.org/licenses/LICENSE-2.0
15
+ #
16
+ # Unless required by applicable law or agreed to in writing, software
17
+ # distributed under the License is distributed on an "AS IS" BASIS,
18
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
19
+ # See the License for the specific language governing permissions and
20
+ # limitations under the License.
21
+
22
+ from typing import Optional
23
+
24
+ from transformers.configuration_utils import PretrainedConfig
25
+ from transformers.modeling_rope_utils import rope_config_validation
26
+
27
+
28
+ class EmoConfig(PretrainedConfig):
29
+ r"""
30
+ This is the configuration class to store the configuration of a [`EmoModel`]. It is used to instantiate an Emo
31
+ model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
32
+ defaults will yield a similar configuration to that of the [allenai/Emo-7x7B-1T](https://huggingface.co/allenai/Emo-7x7B-1T).
33
+
34
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
35
+ documentation from [`PretrainedConfig`] for more information.
36
+
37
+
38
+ Args:
39
+ vocab_size (`int`, *optional*, defaults to 100352):
40
+ Vocabulary size of the Emo model. Defines the number of different tokens that can be represented by the
41
+ `inputs_ids` passed when calling [`EmoModel`]
42
+ hidden_size (`int`, *optional*, defaults to 4096):
43
+ Dimension of the hidden representations.
44
+ intermediate_size (`int`, *optional*, defaults to 11008):
45
+ Dimension of the MLP representations.
46
+ num_hidden_layers (`int`, *optional*, defaults to 32):
47
+ Number of hidden layers in the Transformer decoder.
48
+ num_attention_heads (`int`, *optional*, defaults to 32):
49
+ Number of attention heads for each attention layer in the Transformer decoder.
50
+ num_key_value_heads (`int`, *optional*):
51
+ This is the number of key_value heads that should be used to implement Grouped Query Attention. If
52
+ `num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if
53
+ `num_key_value_heads=1` the model will use Multi Query Attention (MQA) otherwise GQA is used. When
54
+ converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed
55
+ by meanpooling all the original heads within that group. For more details, check out [this
56
+ paper](https://huggingface.co/papers/2305.13245). If it is not specified, will default to
57
+ `num_attention_heads`.
58
+ hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
59
+ The non-linear activation function (function or string) in the decoder.
60
+ max_position_embeddings (`int`, *optional*, defaults to 4096):
61
+ The maximum sequence length that this model might ever be used with.
62
+ initializer_range (`float`, *optional*, defaults to 0.02):
63
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
64
+ rms_norm_eps (`float`, *optional*, defaults to 1e-06):
65
+ The epsilon used by the rms normalization layers.
66
+ use_cache (`bool`, *optional*, defaults to `True`):
67
+ Whether or not the model should return the last key/values attentions (not used by all models). Only
68
+ relevant if `config.is_decoder=True`.
69
+ pad_token_id (`int`, *optional*, defaults to 100277):
70
+ Padding token id.
71
+ bos_token_id (`int`, *optional*):
72
+ Beginning of stream token id.
73
+ eos_token_id (`int`, *optional*, defaults to 100257):
74
+ End of stream token id.
75
+ tie_word_embeddings (`bool`, *optional*, defaults to `False`):
76
+ Whether to tie weight embeddings
77
+ rope_theta (`float`, *optional*, defaults to 500000.0):
78
+ The base period of the RoPE embeddings.
79
+ rope_scaling (`Dict`, *optional*):
80
+ Dictionary containing the scaling configuration for the RoPE embeddings. Currently supports two scaling
81
+ strategies: linear and dynamic. Their scaling factor must be a float greater than 1. The expected format is
82
+ `{"type": strategy name, "factor": scaling factor}`. When using this flag, don't update
83
+ `max_position_embeddings` to the expected new maximum. See the following thread for more information on how
84
+ these scaling strategies behave:
85
+ https://www.reddit.com/r/LocalLLaMA/comments/14mrgpr/dynamically_scaled_rope_further_increases/. This is an
86
+ experimental feature, subject to breaking API changes in future versions.
87
+ attention_bias (`bool`, defaults to `False`, *optional*, defaults to `False`):
88
+ Whether to use a bias in the query, key, value and output projection layers during self-attention.
89
+ attention_dropout (`float`, *optional*, defaults to 0.0):
90
+ The dropout ratio for the attention probabilities.
91
+ num_experts_per_tok (`int`, *optional*, defaults to 5):
92
+ Number of selected experts.
93
+ num_experts (`int`, *optional*, defaults to 7):
94
+ Number of routed experts.
95
+ output_router_logits (`bool`, *optional*, defaults to `False`):
96
+ Whether or not the router logits should be returned by the model. Enabling this will also
97
+ allow the model to output the auxiliary loss, including load balancing loss and router z-loss.
98
+ router_aux_loss_coef (`float`, *optional*, defaults to 0.01):
99
+ The aux loss factor for the total loss.
100
+ norm_topk_prob (`bool`, *optional*, defaults to `False`):
101
+ Whether to normalize the topk probabilities.
102
+
103
+ ```python
104
+ >>> from transformers import EmoModel, EmoConfig
105
+
106
+ >>> # Initializing a Emo style configuration
107
+ >>> configuration = EmoConfig()
108
+
109
+ >>> # Initializing a model from the Emo style configuration
110
+ >>> model = EmoModel(configuration)
111
+
112
+ >>> # Accessing the model configuration
113
+ >>> configuration = model.config
114
+ ```"""
115
+
116
+ model_type = "emo"
117
+ keys_to_ignore_at_inference = ["past_key_values"]
118
+ # Update base_model_tp_plan to remove the "rep" suffixes since no qk-norms
119
+ base_model_tp_plan = {
120
+ "layers.*.self_attn.q_proj": "colwise", # No longer need rep
121
+ "layers.*.self_attn.k_proj": "colwise", # No longer need rep
122
+ "layers.*.self_attn.v_proj": "colwise",
123
+ "layers.*.self_attn.o_proj": "rowwise", # No longer need rep
124
+ "layers.*.mlp.gate_proj": "colwise",
125
+ "layers.*.mlp.up_proj": "colwise",
126
+ "layers.*.mlp.down_proj": "rowwise",
127
+ }
128
+ base_model_pp_plan = {
129
+ "embed_tokens": (["input_ids"], ["inputs_embeds"]),
130
+ "layers": (["hidden_states", "attention_mask"], ["hidden_states"]),
131
+ "norm": (["hidden_states"], ["hidden_states"]),
132
+ }
133
+
134
+ def __init__(
135
+ self,
136
+ vocab_size=100352,
137
+ hidden_size=4096,
138
+ intermediate_size=11008,
139
+ num_hidden_layers=32,
140
+ num_attention_heads=32,
141
+ num_key_value_heads=None,
142
+ hidden_act="silu",
143
+ max_position_embeddings=4096,
144
+ initializer_range=0.02,
145
+ rms_norm_eps=1e-06,
146
+ use_cache=True,
147
+ pad_token_id=100277,
148
+ bos_token_id=None,
149
+ eos_token_id=100257,
150
+ tie_word_embeddings=False,
151
+ rope_theta=500000.0,
152
+ rope_scaling=None,
153
+ attention_bias=False,
154
+ attention_dropout=0.0,
155
+ num_experts_per_tok=5,
156
+ num_experts=7,
157
+ output_router_logits=False,
158
+ router_aux_loss_coef=0.01,
159
+ norm_topk_prob=False,
160
+ num_shared_experts=0,
161
+ num_experts_per_layer: Optional[list[int]] = None,
162
+ num_shared_experts_per_layer: Optional[list[int]] = None,
163
+ always_active_experts: Optional[list[int]] = None,
164
+ always_active_experts_per_layer: Optional[list[list[int]]] = None,
165
+ dense_intermediate_size: Optional[int] = None,
166
+ dense_mlp_bias: bool = False, # Some densefirst models were accidentally trained with bias=True on dense MLPs due to OLMo Core's FeedForwardConfig defaulting bias to True when not explicitly set
167
+ **kwargs,
168
+ ):
169
+ super().__init__(
170
+ pad_token_id=pad_token_id,
171
+ bos_token_id=bos_token_id,
172
+ eos_token_id=eos_token_id,
173
+ tie_word_embeddings=tie_word_embeddings,
174
+ **kwargs,
175
+ )
176
+ self.vocab_size = vocab_size
177
+ self.max_position_embeddings = max_position_embeddings
178
+ self.hidden_size = hidden_size
179
+ self.intermediate_size = intermediate_size
180
+ self.num_hidden_layers = num_hidden_layers
181
+ self.num_attention_heads = num_attention_heads
182
+
183
+ # for backward compatibility
184
+ if num_key_value_heads is None:
185
+ num_key_value_heads = num_attention_heads
186
+
187
+ self.num_key_value_heads = num_key_value_heads
188
+ self.hidden_act = hidden_act
189
+ self.initializer_range = initializer_range
190
+ self.rms_norm_eps = rms_norm_eps
191
+ self.use_cache = use_cache
192
+ self.rope_theta = rope_theta
193
+ self.rope_scaling = rope_scaling
194
+ self.attention_bias = attention_bias
195
+ self.attention_dropout = attention_dropout
196
+ self.num_experts_per_tok = num_experts_per_tok
197
+ self.num_experts = num_experts
198
+ self.output_router_logits = output_router_logits
199
+ self.router_aux_loss_coef = router_aux_loss_coef
200
+ self.norm_topk_prob = norm_topk_prob
201
+ # Validate the correctness of rotary position embeddings parameters
202
+ # BC: if there is a 'type' field, move it to 'rope_type'.
203
+ if self.rope_scaling is not None and "type" in self.rope_scaling:
204
+ self.rope_scaling["rope_type"] = self.rope_scaling["type"]
205
+ rope_config_validation(self)
206
+ assert (
207
+ num_shared_experts <= num_experts
208
+ ), "num_shared_experts cannot be greater than num_experts"
209
+
210
+ self.num_shared_experts = num_shared_experts # note: we don't care about pruning here - pruning should be handled by the pruning script - the model should just assume that it will use all the experts available
211
+ self.num_experts_per_layer = num_experts_per_layer
212
+ self.num_shared_experts_per_layer = num_shared_experts_per_layer
213
+ self.always_active_experts = always_active_experts
214
+ self.always_active_experts_per_layer = always_active_experts_per_layer
215
+ self.dense_intermediate_size = dense_intermediate_size
216
+ self.dense_mlp_bias = dense_mlp_bias
217
+
218
+
219
+ __all__ = ["EmoConfig"]