| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| """ T5 model configuration """ |
| from transformers import PretrainedConfig |
|
|
| T5_PRETRAINED_CONFIG_ARCHIVE_MAP = { |
| "t5-small": "https://s3.amazonaws.com/models.huggingface.co/bert/t5-small-config.json", |
| "t5-base": "https://s3.amazonaws.com/models.huggingface.co/bert/t5-base-config.json", |
| "t5-large": "https://s3.amazonaws.com/models.huggingface.co/bert/t5-large-config.json", |
| "t5-3b": "https://s3.amazonaws.com/models.huggingface.co/bert/t5-3b-config.json", |
| "t5-11b": "https://s3.amazonaws.com/models.huggingface.co/bert/t5-11b-config.json", |
| } |
|
|
|
|
| class T5Config(PretrainedConfig): |
| r""" |
| This is the configuration class to store the configuration of a :class:`~transformers.T5Model` or a |
| :class:`~transformers.TFT5Model`. It is used to instantiate a T5 model according to the specified |
| arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar |
| configuration to that of the T5 `t5-small <https://huggingface.co/t5-small>`__ architecture. |
| |
| Configuration objects inherit from :class:`~transformers.PretrainedConfig` and can be used |
| to control the model outputs. Read the documentation from :class:`~transformers.PretrainedConfig` |
| for more information. |
| |
| Arguments: |
| vocab_size (:obj:`int`, `optional`, defaults to 32128): |
| Vocabulary size of the T5 model. Defines the number of different tokens that can be represented by the |
| :obj:`inputs_ids` passed when calling :class:`~transformers.T5Model` or |
| :class:`~transformers.TFT5Model`. |
| n_positions (:obj:`int`, `optional`, defaults to 512): |
| The maximum sequence length that this model might ever be used with. Typically set this to something large |
| just in case (e.g., 512 or 1024 or 2048). |
| d_model (:obj:`int`, `optional`, defaults to 512): |
| Size of the encoder layers and the pooler layer. |
| d_kv (:obj:`int`, `optional`, defaults to 64): |
| Size of the key, query, value projections per attention head. :obj:`d_kv` has to be equal to |
| :obj:`d_model // num_heads`. |
| d_ff (:obj:`int`, `optional`, defaults to 2048): |
| Size of the intermediate feed forward layer in each :obj:`T5Block`. |
| num_layers (:obj:`int`, `optional`, defaults to 6): |
| Number of hidden layers in the Transformer encoder. |
| num_decoder_layers (:obj:`int`, `optional`): |
| Number of hidden layers in the Transformer decoder. Will use the same value as :obj:`num_layers` if not set. |
| num_heads (:obj:`int`, `optional`, defaults to 8): |
| Number of attention heads for each attention layer in |
| the Transformer encoder. |
| relative_attention_num_buckets (:obj:`int`, `optional`, defaults to 32): |
| The number of buckets to use for each attention layer. |
| dropout_rate (:obj:`float`, `optional`, defaults to 0.1): |
| The ratio for all dropout layers. |
| layer_norm_eps (:obj:`float`, `optional`, defaults to 1e-6): |
| The epsilon used by the layer normalization layers. |
| initializer_factor (:obj:`float`, `optional`, defaults to 1): |
| A factor for initializing all weight matrices (should be kept to 1, used internally for initialization |
| testing). |
| """ |
| model_type = "t5" |
|
|
| def __init__( |
| self, |
| vocab_size=32128, |
| n_positions=512, |
| d_model=512, |
| d_kv=64, |
| d_ff=2048, |
| num_layers=6, |
| num_decoder_layers=None, |
| num_heads=8, |
| relative_attention_num_buckets=32, |
| dropout_rate=0.1, |
| layer_norm_epsilon=1e-6, |
| initializer_factor=1.0, |
| is_encoder_decoder=True, |
| pad_token_id=0, |
| eos_token_id=1, |
| **kwargs |
| ): |
| super().__init__( |
| pad_token_id=pad_token_id, |
| eos_token_id=eos_token_id, |
| is_encoder_decoder=is_encoder_decoder, |
| **kwargs, |
| ) |
| self.vocab_size = vocab_size |
| self.n_positions = n_positions |
| self.d_model = d_model |
| self.d_kv = d_kv |
| self.d_ff = d_ff |
| self.num_layers = num_layers |
| self.num_decoder_layers = ( |
| num_decoder_layers if num_decoder_layers is not None else self.num_layers |
| ) |
| self.num_heads = num_heads |
| self.relative_attention_num_buckets = relative_attention_num_buckets |
| self.dropout_rate = dropout_rate |
| self.layer_norm_epsilon = layer_norm_epsilon |
| self.initializer_factor = initializer_factor |
|
|
| @property |
| def max_position_embeddings(self): |
| return self.n_positions |
|
|
| @property |
| def hidden_size(self): |
| return self.d_model |
|
|
| @property |
| def num_attention_heads(self): |
| return self.num_heads |
|
|
| @property |
| def num_hidden_layers(self): |
| return self.num_layers |
|
|