{ "version": 2, "weight_format": "mxtq", "profile": "JANGTQ2", "source_model": { "name": "Ling-2.6-flash", "org": "inclusionAI", "architecture": "bailing_hybrid" }, "mxtq_seed": 42, "mxtq_bits": { "routed_expert": 2, "attention": 8, "shared_expert": 8, "dense_mlp": 8, "embed_tokens": 8, "lm_head": 8, "mtp_eh_proj": 8, "norms_router_biases": 16 }, "quantization": { "method": "affine+mxtq", "group_size": 64, "bits_default": 2 }, "capabilities": { "reasoning_parser": "deepseek_r1", "tool_parser": "deepseek", "think_in_template": false, "supports_tools": true, "supports_thinking": true, "family": "bailing_hybrid", "modality": "text", "cache_type": "hybrid" }, "routed_expert_layout": "prestacked" }