| """MiniMind Max2 Configuration""" |
| from transformers import PretrainedConfig |
|
|
| class MiniMindConfig(PretrainedConfig): |
| model_type = "minimind" |
| |
| def __init__( |
| self, |
| vocab_size=102400, |
| hidden_size=1024, |
| intermediate_size=2816, |
| num_hidden_layers=12, |
| num_attention_heads=16, |
| num_key_value_heads=4, |
| max_position_embeddings=32768, |
| rms_norm_eps=1e-6, |
| rope_theta=10000.0, |
| num_experts=8, |
| num_experts_per_token=2, |
| pad_token_id=0, |
| bos_token_id=1, |
| eos_token_id=2, |
| tie_word_embeddings=True, |
| **kwargs, |
| ): |
| self.vocab_size = vocab_size |
| self.hidden_size = hidden_size |
| self.intermediate_size = intermediate_size |
| self.num_hidden_layers = num_hidden_layers |
| self.num_attention_heads = num_attention_heads |
| self.num_key_value_heads = num_key_value_heads |
| self.max_position_embeddings = max_position_embeddings |
| self.rms_norm_eps = rms_norm_eps |
| self.rope_theta = rope_theta |
| self.num_experts = num_experts |
| self.num_experts_per_token = num_experts_per_token |
| super().__init__( |
| pad_token_id=pad_token_id, |
| bos_token_id=bos_token_id, |
| eos_token_id=eos_token_id, |
| tie_word_embeddings=tie_word_embeddings, |
| **kwargs, |
| ) |
|
|