Helion-OSC / inference /model.safetensors.index.json
Trouter-Library's picture
Update inference/model.safetensors.index.json
6df3951 verified
{
"metadata": {
"total_size": 374982361088,
"total_shards": 116,
"shard_size_gb": 3.01,
"format": "safetensors",
"model_type": "helion-osc",
"architecture": "MoE-Transformer",
"precision": "bfloat16",
"created_by": "DeepXR",
"version": "1.0.0"
},
"weight_map": {
"model.embed_tokens.weight": "model-00001-of-00116.safetensors",
"model.norm.weight": "model-00116-of-00116.safetensors",
"lm_head.weight": "model-00116-of-00116.safetensors",
"model.layers.0.self_attn.q_proj.weight": "model-00001-of-00116.safetensors",
"model.layers.0.self_attn.k_proj.weight": "model-00001-of-00116.safetensors",
"model.layers.0.self_attn.v_proj.weight": "model-00001-of-00116.safetensors",
"model.layers.0.self_attn.o_proj.weight": "model-00001-of-00116.safetensors",
"model.layers.0.input_layernorm.weight": "model-00001-of-00116.safetensors",
"model.layers.0.post_attention_layernorm.weight": "model-00001-of-00116.safetensors",
"model.layers.0.mlp.gate.weight": "model-00002-of-00116.safetensors",
"model.layers.0.mlp.experts.0.gate_proj.weight": "model-00002-of-00116.safetensors",
"model.layers.0.mlp.experts.0.up_proj.weight": "model-00002-of-00116.safetensors",
"model.layers.0.mlp.experts.0.down_proj.weight": "model-00002-of-00116.safetensors",
"model.layers.0.mlp.shared_experts.gate_proj.weight": "model-00002-of-00116.safetensors",
"model.layers.0.mlp.shared_experts.up_proj.weight": "model-00002-of-00116.safetensors",
"model.layers.0.mlp.shared_experts.down_proj.weight": "model-00002-of-00116.safetensors",
"model.layers.1.self_attn.q_proj.weight": "model-00003-of-00116.safetensors",
"model.layers.1.self_attn.k_proj.weight": "model-00003-of-00116.safetensors",
"model.layers.1.self_attn.v_proj.weight": "model-00003-of-00116.safetensors",
"model.layers.1.self_attn.o_proj.weight": "model-00003-of-00116.safetensors",
"model.layers.1.input_layernorm.weight": "model-00003-of-00116.safetensors",
"model.layers.1.post_attention_layernorm.weight": "model-00003-of-00116.safetensors",
"model.layers.1.mlp.gate.weight": "model-00004-of-00116.safetensors",
"model.layers.10.self_attn.q_proj.weight": "model-00020-of-00116.safetensors",
"model.layers.10.self_attn.k_proj.weight": "model-00020-of-00116.safetensors",
"model.layers.10.self_attn.v_proj.weight": "model-00020-of-00116.safetensors",
"model.layers.10.self_attn.o_proj.weight": "model-00020-of-00116.safetensors",
"model.layers.20.self_attn.q_proj.weight": "model-00040-of-00116.safetensors",
"model.layers.20.self_attn.k_proj.weight": "model-00040-of-00116.safetensors",
"model.layers.20.self_attn.v_proj.weight": "model-00040-of-00116.safetensors",
"model.layers.20.self_attn.o_proj.weight": "model-00040-of-00116.safetensors",
"model.layers.30.self_attn.q_proj.weight": "model-00060-of-00116.safetensors",
"model.layers.30.self_attn.k_proj.weight": "model-00060-of-00116.safetensors",
"model.layers.30.self_attn.v_proj.weight": "model-00060-of-00116.safetensors",
"model.layers.30.self_attn.o_proj.weight": "model-00060-of-00116.safetensors",
"model.layers.40.self_attn.q_proj.weight": "model-00080-of-00116.safetensors",
"model.layers.40.self_attn.k_proj.weight": "model-00080-of-00116.safetensors",
"model.layers.40.self_attn.v_proj.weight": "model-00080-of-00116.safetensors",
"model.layers.40.self_attn.o_proj.weight": "model-00080-of-00116.safetensors",
"model.layers.50.self_attn.q_proj.weight": "model-00095-of-00116.safetensors",
"model.layers.60.self_attn.q_proj.weight": "model-00100-of-00116.safetensors",
"model.layers.70.self_attn.q_proj.weight": "model-00110-of-00116.safetensors",
"model.layers.79.self_attn.q_proj.weight": "model-00115-of-00116.safetensors",
"model.layers.79.self_attn.k_proj.weight": "model-00115-of-00116.safetensors",
"model.layers.79.self_attn.v_proj.weight": "model-00115-of-00116.safetensors",
"model.layers.79.self_attn.o_proj.weight": "model-00115-of-00116.safetensors",
"model.layers.79.input_layernorm.weight": "model-00115-of-00116.safetensors",
"model.layers.79.post_attention_layernorm.weight": "model-00115-of-00116.safetensors",
"model.layers.79.mlp.gate.weight": "model-00116-of-00116.safetensors"
}
}