{ "_external_rope_config_kwargs": {}, "architectures": [ "Glm4MoeForCausalLM" ], "attention_bias": true, "attention_dropout": 0.0, "attn_mechanism": "vanilla", "backend": null, "bits": null, "blocksize_b": 1, "blocksize_k": 128, "blocksize_q": 128, "decode_attn_mechanism": null, "dtype": "bfloat16", "easy_method": "train", "eos_token_id": [ 151329, 151336, 151338 ], "fcm_max_ratio": 0.0, "fcm_min_ratio": 0.0, "first_k_dense_replace": 3, "flash_attention_backward_pass_impl": "triton", "fsdp_is_ep_bound": true, "gradient_checkpointing": "", "gradient_checkpointing_targets": null, "hardware_abstraction": false, "head_dim": 128, "hidden_act": "silu", "hidden_size": 5120, "initializer_range": 0.02, "intermediate_size": 12288, "kv_cache_quantization_config": null, "kv_cache_sharding_sequence_axis_name": "sp", "layer_types": [ "full_attention", "full_attention", "full_attention", "full_attention", "full_attention", "full_attention", "full_attention", "full_attention", "full_attention", "full_attention", "full_attention", "full_attention", "full_attention", "full_attention", "full_attention", "full_attention", "full_attention", "full_attention", "full_attention", "full_attention", "full_attention", "full_attention", "full_attention", "full_attention", "full_attention", "full_attention", "full_attention", "full_attention", "full_attention", "full_attention", "full_attention", "full_attention", "full_attention", "full_attention", "full_attention", "full_attention", "full_attention", "full_attention", "full_attention", "full_attention", "full_attention", "full_attention", "full_attention", "full_attention", "full_attention", "full_attention", "full_attention", "full_attention", "full_attention", "full_attention", "full_attention", "full_attention", "full_attention", "full_attention", "full_attention", "full_attention", "full_attention", "full_attention", "full_attention", "full_attention", "full_attention", "full_attention", "full_attention", "full_attention", "full_attention", "full_attention", "full_attention", "full_attention", "full_attention", "full_attention", "full_attention", "full_attention", "full_attention", "full_attention", "full_attention", "full_attention", "full_attention", "full_attention", "full_attention", "full_attention", "full_attention", "full_attention", "full_attention", "full_attention", "full_attention", "full_attention", "full_attention", "full_attention", "full_attention", "full_attention", "full_attention", "full_attention" ], "max_position_embeddings": 202752, "model_type": "glm4_moe", "moe_force_xla_gmm": false, "moe_intermediate_size": 1536, "moe_method": "fused_moe", "moe_tiling_size_batch": 4, "moe_tiling_size_dim": 128, "moe_tiling_size_seqlen": 128, "n_group": 1, "n_routed_experts": 160, "n_shared_experts": 1, "norm_topk_prob": true, "num_attention_heads": 96, "num_experts_per_tok": 8, "num_hidden_layers": 92, "num_key_value_heads": 8, "num_nextn_predict_layers": 1, "operation_configs": null, "pad_token_id": 151329, "pallas_k_block_size": 128, "pallas_m_block_size": 128, "pallas_n_block_size": 128, "partial_rotary_factor": 0.5, "partition_axis": { "attention_dim_axis": null, "attention_kv_dim_axis": null, "batch_axis": [ "fsdp", "dp" ], "bias_head_sequence_axis": null, "bias_key_sequence_axis": null, "data_parallel_axis": "dp", "decode_attention_dim_axis": null, "decode_attention_kv_dim_axis": null, "decode_batch_axis": [ "fsdp", "dp" ], "decode_head_axis": "tp", "decode_key_sequence_axis": "sp", "decode_kv_head_axis": "tp", "decode_query_sequence_axis": null, "expert_axis": "ep", "expert_gate_axis": null, "expert_parallel_axis": "ep", "fully_sharded_data_parallel_axis": "fsdp", "head_axis": "tp", "hidden_state_axis": "tp", "key_sequence_axis": "sp", "kv_head_axis": "tp", "mlp_intermediate_axis": "tp", "query_sequence_axis": "sp", "sequence_axis": "sp", "sequence_parallel_axis": "sp", "tensor_parallel_axis": "tp", "vocab_axis": "tp" }, "platform": null, "precompute_masks": true, "pretraining_tp": 1, "quantization_config": null, "rms_norm_eps": 1e-05, "rope_scaling": null, "rope_theta": 1000000, "routed_scaling_factor": 2.5, "scan_attention_layers": false, "scan_mlp_chunk_size": 1024, "scan_ring_attention": true, "sequence_axis_name": "sp", "sharding_axis_dims": [ 1, -1, 1, 1, 1 ], "sharding_axis_names": [ "dp", "fsdp", "ep", "tp", "sp" ], "sharding_dcn_axis_dims": null, "sp_is_ep_bound": true, "tie_word_embeddings": false, "topk_group": 1, "transformers_version": "4.57.3", "use_cache": true, "use_expert_tensor_mode": false, "use_qk_norm": true, "use_ring_of_experts": false, "use_scan_mlp": false, "use_sharded_kv_caching": false, "use_sharding_constraint": false, "vocab_size": 151552 }