| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| """MixtureOfTokens configuration""" |
|
|
| from transformers import PretrainedConfig |
| from transformers.utils import logging |
|
|
|
|
| logger = logging.get_logger(__name__) |
|
|
|
|
| class MoTConfig(PretrainedConfig): |
| """ |
| This is the configuration class to store the configuration of a [`MoTModel`]. It is used to |
| instantiate a MixtureOfTokens model according to the specified arguments, defining the model architecture. Instantiating a |
| configuration with the defaults will yield a similar configuration to that of the MixtureOfTokens |
| [mot](https://huggingface.co/mot) architecture. |
| |
| Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the |
| documentation from [`PretrainedConfig`] for more information. |
| |
| |
| Args: |
| vocab_size (`int`, *optional*, defaults to 50257): |
| Vocabulary size of the MixtureOfTokens model. Defines the number of different tokens that can be represented by the |
| `inputs_ids` passed when calling [`MoTModel`]. |
| n_positions (`int`, *optional*, defaults to 1024): |
| The maximum sequence length that this model might ever be used with. Typically set this to something large |
| just in case (e.g., 512 or 1024 or 2048). |
| n_embd (`int`, *optional*, defaults to 768): |
| Dimensionality of the embeddings and hidden states. |
| n_layer (`int`, *optional*, defaults to 12): |
| Number of hidden layers in the Transformer encoder. |
| n_head (`int`, *optional*, defaults to 12): |
| Number of attention heads for each attention layer in the Transformer encoder. |
| n_inner (`int`, *optional*): |
| Dimensionality of the inner feed-forward layers. `None` will set it to 4 times n_embd |
| n_expert (`int`, *optional*, defaults to 32): |
| The number of experts. |
| group_size (`int`, *optional*, defaults to 32): |
| The number of tokens per expert. |
| expert_size (`int`, *optional*): |
| The dimensionality of an expert. `None` will set it to n_inner / n_head. |
| init_scale (`float`, *optional*, defaults to 1.0): |
| The scaling factor for the initialization of MoTMLP weights. Inactive when creating through `from_pretrained`. |
| activation_function (`str`, *optional*, defaults to `"gelu_new"`): |
| Activation function, to be selected in the list `["relu", "silu", "gelu", "tanh", "gelu_new"]`. |
| resid_pdrop (`float`, *optional*, defaults to 0.1): |
| The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. |
| embd_pdrop (`float`, *optional*, defaults to 0.1): |
| The dropout ratio for the embeddings. |
| attn_pdrop (`float`, *optional*, defaults to 0.1): |
| The dropout ratio for the attention. |
| layer_norm_epsilon (`float`, *optional*, defaults to 1e-05): |
| The epsilon to use in the layer normalization layers. |
| initializer_range (`float`, *optional*, defaults to 0.02): |
| The standard deviation of the truncated_normal_initializer for initializing all weight matrices. |
| scale_attn_weights (`bool`, *optional*, defaults to `True`): |
| Scale attention weights by dividing by sqrt(hidden_size).. |
| use_cache (`bool`, *optional*, defaults to `True`): |
| Whether or not the model should return the last key/values attentions (not used by all models). |
| bos_token_id (`int`, *optional*, defaults to 50256): |
| Id of the beginning of sentence token in the vocabulary. |
| eos_token_id (`int`, *optional*, defaults to 50256): |
| Id of the end of sentence token in the vocabulary. |
| scale_attn_by_inverse_layer_idx (`bool`, *optional*, defaults to `False`): |
| Whether to additionally scale attention weights by `1 / layer_idx + 1`. |
| reorder_and_upcast_attn (`bool`, *optional*, defaults to `False`): |
| Whether to scale keys (K) prior to computing attention (dot-product) and upcast attention |
| dot-product/softmax to float() when training with mixed precision. |
| emit_softmax_over_experts (`bool`, *optional*, defaults to `False`): |
| Determines the method of redistributing aggregated tokens in the MoT MLP. By default the model uses the merge weights. |
| This flag switches it to taking a softmax over the experts. |
| use_discrete_routing (`bool`, *optional*, defaults to `False`): |
| Discretize the mixing, sending only to the expert with the highest weight. Inference-only. |
| |
| Example: |
| |
| ```python |
| >>> from transformers import MoTConfig, MoTModel |
| |
| >>> # Initializing a MoT configuration |
| >>> configuration = MoTConfig() |
| |
| >>> # Initializing a model (with random weights) from the configuration |
| >>> model = MoTModel(configuration) |
| |
| >>> # Accessing the model configuration |
| >>> configuration = model.config |
| ```""" |
|
|
| model_type = "mot" |
| keys_to_ignore_at_inference = ["past_key_values"] |
| attribute_map = { |
| "hidden_size": "n_embd", |
| "max_position_embeddings": "n_positions", |
| "num_attention_heads": "n_head", |
| "num_hidden_layers": "n_layer", |
| } |
|
|
| def __init__( |
| self, |
| vocab_size=50257, |
| n_positions=1024, |
| n_embd=768, |
| n_layer=12, |
| n_head=12, |
| n_inner=None, |
| n_expert=32, |
| group_size=32, |
| expert_size=None, |
| init_scale=1.0, |
| activation_function="gelu_new", |
| resid_pdrop=0.1, |
| embd_pdrop=0.1, |
| attn_pdrop=0.1, |
| layer_norm_epsilon=1e-5, |
| initializer_range=0.02, |
| scale_attn_weights=True, |
| use_cache=True, |
| bos_token_id=50256, |
| eos_token_id=50256, |
| scale_attn_by_inverse_layer_idx=False, |
| reorder_and_upcast_attn=False, |
| emit_softmax_over_experts=False, |
| use_discrete_routing=False, |
| **kwargs, |
| ): |
| self.vocab_size = vocab_size |
| self.n_positions = n_positions |
| self.n_embd = n_embd |
| self.n_layer = n_layer |
| self.n_head = n_head |
| self.n_inner = n_inner |
| self.n_expert = n_expert |
| self.group_size = group_size |
| self.expert_size = expert_size |
| self.init_scale = init_scale |
| self.activation_function = activation_function |
| self.resid_pdrop = resid_pdrop |
| self.embd_pdrop = embd_pdrop |
| self.attn_pdrop = attn_pdrop |
| self.layer_norm_epsilon = layer_norm_epsilon |
| self.initializer_range = initializer_range |
| self.scale_attn_weights = scale_attn_weights |
| self.use_cache = use_cache |
| self.scale_attn_by_inverse_layer_idx = scale_attn_by_inverse_layer_idx |
| self.reorder_and_upcast_attn = reorder_and_upcast_attn |
| self.emit_softmax_over_experts = emit_softmax_over_experts |
| self.use_discrete_routing = use_discrete_routing |
|
|
| self.bos_token_id = bos_token_id |
| self.eos_token_id = eos_token_id |
|
|
| super().__init__(bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs) |
|
|