| { |
| "architectures": [ |
| "DecoderOnlyT5Model" |
| ], |
| "auto_map": { |
| "AutoConfig": "decoderonlyt5_config.DecoderOnlyT5Config", |
| "AutoModelForCausalLM": "decoderonlyt5_modeling.DecoderOnlyT5Model" |
| }, |
| "classifier_dropout": 0.0, |
| "d_ff": 16384, |
| "d_kv": 256, |
| "d_model": 4096, |
| "decoder_start_token_id": 0, |
| "dense_act_fn": "swish", |
| "dropout_rate": 0.0, |
| "dtype": "bfloat16", |
| "eos_token_id": 2, |
| "feed_forward_proj": "gated-swish", |
| "has_relative_attention_bias": false, |
| "initializer_factor": 1.0, |
| "is_decoder": false, |
| "is_decoder_only": true, |
| "is_encoder_decoder": false, |
| "is_gated_act": true, |
| "layer_norm_epsilon": 1e-06, |
| "model_type": "t5", |
| "multi_query_attention": true, |
| "n_positions": 512, |
| "num_decoder_layers": 16, |
| "num_heads": 16, |
| "num_layers": 0, |
| "output_past": true, |
| "pad_token_id": 1, |
| "parallel_layers": true, |
| "relative_attention_max_distance": 128, |
| "relative_attention_num_buckets": 32, |
| "rotary_embedding_max_timescale": 1000, |
| "scale_decoder_outputs": true, |
| "task_specific_params": {}, |
| "tie_word_embeddings": true, |
| "transformers_version": "5.0.0", |
| "use_cache": false, |
| "use_rotary_embedding": true, |
| "vocab_size": 256512 |
| } |
|
|