| { |
| "attention_projection_layout": "fused_qkv", |
| "vocab_size": 152576, |
| "max_position_embeddings": 1048576, |
| "hidden_size": 6144, |
| "intermediate_size": 16384, |
| "num_hidden_layers": 70, |
| "num_attention_heads": 128, |
| "num_key_value_heads": 8, |
| "hidden_act": "silu", |
| "initializer_range": 0.02, |
| "layernorm_epsilon": 1e-05, |
| "use_cache": true, |
| "rope_theta": 10000000, |
| "rope_parameters": { |
| "rope_type": "default", |
| "rope_theta": 10000000, |
| "partial_rotary_factor": 0.334 |
| }, |
| "attention_dropout": 0.0, |
| "attention_bias": false, |
| "attention_value_scale": 0.612, |
| "head_dim": 192, |
| "v_head_dim": 128, |
| "swa_num_attention_heads": 128, |
| "swa_num_key_value_heads": 8, |
| "swa_head_dim": 192, |
| "swa_v_head_dim": 128, |
| "swa_rope_theta": 10000, |
| "sliding_window": 128, |
| "sliding_window_size": 128, |
| "add_full_attention_sink_bias": false, |
| "add_swa_attention_sink_bias": true, |
| "hybrid_block_size": null, |
| "hybrid_layer_pattern": [ |
| 0, |
| 1, |
| 1, |
| 1, |
| 1, |
| 1, |
| 1, |
| 0, |
| 1, |
| 1, |
| 1, |
| 1, |
| 1, |
| 1, |
| 1, |
| 0, |
| 1, |
| 1, |
| 1, |
| 1, |
| 1, |
| 1, |
| 1, |
| 0, |
| 1, |
| 1, |
| 1, |
| 1, |
| 1, |
| 1, |
| 1, |
| 0, |
| 1, |
| 1, |
| 1, |
| 1, |
| 1, |
| 1, |
| 1, |
| 0, |
| 1, |
| 1, |
| 1, |
| 1, |
| 1, |
| 1, |
| 1, |
| 0, |
| 1, |
| 1, |
| 1, |
| 1, |
| 1, |
| 1, |
| 1, |
| 0, |
| 1, |
| 1, |
| 1, |
| 1, |
| 1, |
| 1, |
| 0, |
| 1, |
| 1, |
| 1, |
| 1, |
| 1, |
| 1, |
| 0 |
| ], |
| "partial_rotary_factor": 0.334, |
| "n_routed_experts": 384, |
| "moe_intermediate_size": 2048, |
| "num_experts_per_tok": 8, |
| "routed_scaling_factor": null, |
| "scoring_func": "sigmoid", |
| "topk_method": "noaux_tc", |
| "n_group": 1, |
| "topk_group": 1, |
| "norm_topk_prob": true, |
| "moe_layer_freq": [ |
| 0, |
| 1, |
| 1, |
| 1, |
| 1, |
| 1, |
| 1, |
| 1, |
| 1, |
| 1, |
| 1, |
| 1, |
| 1, |
| 1, |
| 1, |
| 1, |
| 1, |
| 1, |
| 1, |
| 1, |
| 1, |
| 1, |
| 1, |
| 1, |
| 1, |
| 1, |
| 1, |
| 1, |
| 1, |
| 1, |
| 1, |
| 1, |
| 1, |
| 1, |
| 1, |
| 1, |
| 1, |
| 1, |
| 1, |
| 1, |
| 1, |
| 1, |
| 1, |
| 1, |
| 1, |
| 1, |
| 1, |
| 1, |
| 1, |
| 1, |
| 1, |
| 1, |
| 1, |
| 1, |
| 1, |
| 1, |
| 1, |
| 1, |
| 1, |
| 1, |
| 1, |
| 1, |
| 1, |
| 1, |
| 1, |
| 1, |
| 1, |
| 1, |
| 1, |
| 1 |
| ], |
| "transformers_version": "5.5.3", |
| "architectures": [ |
| "MiMoV2FlashForCausalLM" |
| ], |
| "output_hidden_states": false, |
| "return_dict": true, |
| "dtype": "bfloat16", |
| "chunk_size_feed_forward": 0, |
| "is_encoder_decoder": false, |
| "id2label": { |
| "0": "LABEL_0", |
| "1": "LABEL_1" |
| }, |
| "label2id": { |
| "LABEL_0": 0, |
| "LABEL_1": 1 |
| }, |
| "problem_type": null, |
| "_name_or_path": "/models/XiaomiMiMo/MiMo-V2.5-Pro", |
| "tie_word_embeddings": false, |
| "attention_chunk_size": 128, |
| "model_type": "mimo_v2", |
| "n_shared_experts": null, |
| "layer_types": [ |
| "full_attention", |
| "sliding_attention", |
| "sliding_attention", |
| "sliding_attention", |
| "sliding_attention", |
| "sliding_attention", |
| "sliding_attention", |
| "full_attention", |
| "sliding_attention", |
| "sliding_attention", |
| "sliding_attention", |
| "sliding_attention", |
| "sliding_attention", |
| "sliding_attention", |
| "sliding_attention", |
| "full_attention", |
| "sliding_attention", |
| "sliding_attention", |
| "sliding_attention", |
| "sliding_attention", |
| "sliding_attention", |
| "sliding_attention", |
| "sliding_attention", |
| "full_attention", |
| "sliding_attention", |
| "sliding_attention", |
| "sliding_attention", |
| "sliding_attention", |
| "sliding_attention", |
| "sliding_attention", |
| "sliding_attention", |
| "full_attention", |
| "sliding_attention", |
| "sliding_attention", |
| "sliding_attention", |
| "sliding_attention", |
| "sliding_attention", |
| "sliding_attention", |
| "sliding_attention", |
| "full_attention", |
| "sliding_attention", |
| "sliding_attention", |
| "sliding_attention", |
| "sliding_attention", |
| "sliding_attention", |
| "sliding_attention", |
| "sliding_attention", |
| "full_attention", |
| "sliding_attention", |
| "sliding_attention", |
| "sliding_attention", |
| "sliding_attention", |
| "sliding_attention", |
| "sliding_attention", |
| "sliding_attention", |
| "full_attention", |
| "sliding_attention", |
| "sliding_attention", |
| "sliding_attention", |
| "sliding_attention", |
| "sliding_attention", |
| "sliding_attention", |
| "full_attention", |
| "sliding_attention", |
| "sliding_attention", |
| "sliding_attention", |
| "sliding_attention", |
| "sliding_attention", |
| "sliding_attention", |
| "full_attention" |
| ], |
| "output_attentions": false, |
| "quantization_config": { |
| "config_groups": { |
| "group_0": { |
| "input_activations": { |
| "dynamic": false, |
| "num_bits": 4, |
| "type": "float", |
| "group_size": 16 |
| }, |
| "weights": { |
| "dynamic": false, |
| "num_bits": 4, |
| "type": "float", |
| "group_size": 16 |
| }, |
| "targets": [ |
| "Linear" |
| ] |
| } |
| }, |
| "ignore": [ |
| "lm_head", |
| "model.layers.0.self_attn*", |
| "model.layers.1.self_attn*", |
| "model.layers.10.self_attn*", |
| "model.layers.11.self_attn*", |
| "model.layers.12.self_attn*", |
| "model.layers.13.self_attn*", |
| "model.layers.14.self_attn*", |
| "model.layers.15.self_attn*", |
| "model.layers.16.self_attn*", |
| "model.layers.17.self_attn*", |
| "model.layers.18.self_attn*", |
| "model.layers.19.self_attn*", |
| "model.layers.2.self_attn*", |
| "model.layers.20.self_attn*", |
| "model.layers.21.self_attn*", |
| "model.layers.22.self_attn*", |
| "model.layers.23.self_attn*", |
| "model.layers.24.self_attn*", |
| "model.layers.25.self_attn*", |
| "model.layers.26.self_attn*", |
| "model.layers.27.self_attn*", |
| "model.layers.28.self_attn*", |
| "model.layers.29.self_attn*", |
| "model.layers.3.self_attn*", |
| "model.layers.30.self_attn*", |
| "model.layers.31.self_attn*", |
| "model.layers.32.self_attn*", |
| "model.layers.33.self_attn*", |
| "model.layers.34.self_attn*", |
| "model.layers.35.self_attn*", |
| "model.layers.36.self_attn*", |
| "model.layers.37.self_attn*", |
| "model.layers.38.self_attn*", |
| "model.layers.39.self_attn*", |
| "model.layers.4.self_attn*", |
| "model.layers.40.self_attn*", |
| "model.layers.41.self_attn*", |
| "model.layers.42.self_attn*", |
| "model.layers.43.self_attn*", |
| "model.layers.44.self_attn*", |
| "model.layers.45.self_attn*", |
| "model.layers.46.self_attn*", |
| "model.layers.47.self_attn*", |
| "model.layers.48.self_attn*", |
| "model.layers.49.self_attn*", |
| "model.layers.5.self_attn*", |
| "model.layers.50.self_attn*", |
| "model.layers.51.self_attn*", |
| "model.layers.52.self_attn*", |
| "model.layers.53.self_attn*", |
| "model.layers.54.self_attn*", |
| "model.layers.55.self_attn*", |
| "model.layers.56.self_attn*", |
| "model.layers.57.self_attn*", |
| "model.layers.58.self_attn*", |
| "model.layers.59.self_attn*", |
| "model.layers.6.self_attn*", |
| "model.layers.60.self_attn*", |
| "model.layers.61.self_attn*", |
| "model.layers.62.self_attn*", |
| "model.layers.63.self_attn*", |
| "model.layers.64.self_attn*", |
| "model.layers.65.self_attn*", |
| "model.layers.66.self_attn*", |
| "model.layers.67.self_attn*", |
| "model.layers.68.self_attn*", |
| "model.layers.69.self_attn*", |
| "model.layers.7.self_attn*", |
| "model.layers.8.self_attn*", |
| "model.layers.9.self_attn*" |
| ], |
| "quant_algo": "NVFP4", |
| "kv_cache_scheme": { |
| "dynamic": false, |
| "num_bits": 8, |
| "type": "float" |
| }, |
| "producer": { |
| "name": "modelopt", |
| "version": "0.43.0" |
| }, |
| "quant_method": "modelopt" |
| }, |
| "auto_map": { |
| "AutoConfig": "configuration_mimo_v2.MiMoV2Config", |
| "AutoModel": "modeling_mimo_v2.MiMoV2Model", |
| "AutoModelForCausalLM": "modeling_mimo_v2.MiMoV2ForCausalLM" |
| } |
| } |