| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| """ Evf model configuration""" |
|
|
| from transformers.configuration_utils import PretrainedConfig |
| from transformers.utils import logging |
|
|
| logger = logging.get_logger(__name__) |
|
|
| EVF_PRETRAINED_CONFIG_ARCHIVE_MAP = {} |
|
|
|
|
| class EvfConfig(PretrainedConfig): |
| r""" |
| This is the configuration class to store the configuration of a [`EvfSam`]. |
| |
| Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the |
| documentation from [`PretrainedConfig`] for more information. |
| |
| Args: |
| hidden_size (`int`, *optional*, defaults to 4096): |
| Dimension of the hidden representations. |
| pretraining_tp (`int`, *optional*, defaults to `1`): |
| Experimental feature. Tensor parallelism rank used during pretraining. Please refer to [this |
| document](https://huggingface.co/docs/transformers/parallelism) to understand more about it. This value is |
| necessary to ensure exact reproducibility of the pretraining results. Please refer to [this |
| issue](https://github.com/pytorch/pytorch/issues/76232). |
| rope_scaling (`Dict`, *optional*): |
| Dictionary containing the scaling configuration for the RoPE embeddings. Currently supports three scaling |
| strategies: linear and dynamic. Their scaling factor must be an float greater than 1. The expected format |
| is `{"type": strategy name, "factor": scaling factor}`. When using this flag, don't update |
| `max_position_embeddings` to the expected new maximum. See the following thread for more information on how |
| these scaling strategies behave: |
| https://www.reddit.com/r/LocalLLaMA/comments/14mrgpr/dynamically_scaled_rope_further_increases/. This is an |
| experimental feature, subject to breaking API changes in future versions. |
| |
| Example: |
| |
| ```python |
| |
| >>> configuration = EvfConfig() |
| >>> model = EvfSam(configuration) |
| |
| >>> # Accessing the model configuration |
| >>> configuration = model.config |
| ```""" |
| model_type = "evf" |
| keys_to_ignore_at_inference = ["past_key_values"] |
|
|
| def __init__( |
| self, |
| hidden_size=768, |
| pad_token_id=1, |
| bos_token_id=0, |
| eos_token_id=2, |
| pretraining_tp=1, |
| tie_word_embeddings=False, |
| rope_scaling=None, |
| out_dim=256, |
| **kwargs, |
| ): |
| self.hidden_size = hidden_size |
| self.out_dim = out_dim |
|
|
| |
| |
| |
|
|
| super().__init__( |
| pad_token_id=pad_token_id, |
| bos_token_id=bos_token_id, |
| eos_token_id=eos_token_id, |
| tie_word_embeddings=tie_word_embeddings, |
| **kwargs, |
| ) |
|
|
| def _rope_scaling_validation(self): |
| """ |
| Validate the `rope_scaling` configuration. |
| """ |
| if self.rope_scaling is None: |
| return |
|
|
| if not isinstance(self.rope_scaling, dict) or len(self.rope_scaling) != 2: |
| raise ValueError( |
| "`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, " |
| f"got {self.rope_scaling}" |
| ) |
| rope_scaling_type = self.rope_scaling.get("type", None) |
| rope_scaling_factor = self.rope_scaling.get("factor", None) |
| if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]: |
| raise ValueError( |
| f"`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}" |
| ) |
| if rope_scaling_factor is None or not isinstance(rope_scaling_factor, float) or rope_scaling_factor <= 1.0: |
| raise ValueError(f"`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}") |
|
|