File size: 1,557 Bytes
519afec 610278d 519afec | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 | {
"name": "eqlm-large-770m",
"hf_config": {
"name": "eqlm-large-770m"
},
"block_size": 2048,
"n_embd": 1280,
"intermediate_size": 5120,
"num_attention_heads": 10,
"num_key_value_heads": 10,
"vocab_size": 32768,
"padding_multiple": 64,
"padded_vocab_size": 32768,
"rope_settings": {
"use_rope": true,
"rope_condense_ratio": 1,
"rope_base": 50000
},
"use_abacus": false,
"randomize_positions_from": null,
"block_class_name": "TransformerPreNormBlock",
"norm_class_name": "RMSNorm",
"attn_impl": "flash",
"norm_eps": 1e-05,
"mlp_class_name": "BaseMLP",
"nonlin_name": "ReLU2",
"bias": false,
"qk_bias": false,
"init_strategy": "scaled-zero",
"init_orthogonal": true,
"skip_initialization": false,
"mup_model_scaling_factor": 1,
"use_fused_head": "pytorch",
"debias_attention": false,
"center_attention": false,
"clip_qkv": null,
"qk_norm": true,
"logit_softcap": null,
"activation_checkpoint_impl": "per-block",
"simple_ops": false,
"strategy": "ddp",
"n_backbone_layers": 35,
"n_fp_blocks": 2,
"tie_embeddings": true,
"solver": "anderson",
"max_iter": 32,
"min_iter": 6,
"tol": 0.00015,
"anderson_m": 5,
"anderson_beta": 0.96,
"backward_type": "onestep",
"backward_max_iter": 32,
"backward_min_iter": 6,
"backward_tol": 0.00015,
"adjoint_grad_clip": 2.0,
"layer_scale_init": 0.73,
"gamma_max": 0.9,
"fp_lr_scale": 0.4,
"fp_wd": 0.1,
"recurrent_embedding_dimension": 1280,
"model_class_name": "EQLM",
"_class_name": "EQLMConfig"
} |