| |
| |
|
|
| from transformers.configuration_utils import PretrainedConfig |
|
|
|
|
| class TAASConfig(PretrainedConfig): |
| model_type = "TAAS" |
|
|
| def __init__( |
| self, |
| hidd_dropout=0.1, |
| intermediate_size=3072, |
| initialize_range=0.02, |
| max_pos_embeddings=2048, |
| hidd_act="gelu", |
| attention_dropout=0.1, |
| using_task_id=True, |
| vocabulary_size=40000, |
| hidd_size=768, |
| num_hidd_layers=12, |
| layer_norm_rate=1e-05, |
| num_atten_heads=12, |
| pad_token_id=0, |
| task_vocab_size=3, |
| classifier_drop=None, |
| pos_embedding="absolute", |
| use_cache=True, |
| vocab_size=4, |
| **kwargs |
| ): |
| super().__init__(pad_token_id=pad_token_id, **kwargs) |
|
|
| self.vocab_size = vocabulary_size |
| self.max_position_embeddings = max_pos_embeddings |
| self.type_vocab_size = vocab_size |
| self.use_task_id = using_task_id |
| self.layer_norm_eps = layer_norm_rate |
| self.position_embedding_type = pos_embedding |
| self.num_attention_heads = num_atten_heads |
| self.hidden_size = hidd_size |
| self.attention_probs_dropout_prob = attention_dropout |
| self.initializer_range = initialize_range |
| self.hidden_act = hidd_act |
| self.intermediate_size = intermediate_size |
| self.hidden_dropout_prob = hidd_dropout |
| self.use_cache = use_cache |
| self.classifier_dropout = classifier_drop |
| self.num_hidden_layers = num_hidd_layers |
| self.task_type_vocab_size = task_vocab_size |
|
|
| |
|
|
|
|