SenseNova-U1-8B-MoT / config.json
Merjia's picture
Duplicate from sensenova/SenseNova-U1-8B-MoT
5b442e6
{
"architectures": [
"NEOChatModel"
],
"auto_map": {
"AutoConfig": "configuration_neo_chat.NEOChatConfig",
"AutoModel": "modeling_neo_chat.NEOChatModel",
"AutoModelForCausalLM": "modeling_neo_chat.NEOChatModel"
},
"downsample_ratio": 0.5,
"eos_token_id": 151645,
"llm_config": {
"_name_or_path": null,
"architectures": [
"Qwen3ForCausalLM"
],
"attention_bias": false,
"attention_dropout": 0.0,
"bos_token_id": 151643,
"eos_token_id": 151645,
"head_dim": 128,
"hidden_act": "silu",
"hidden_size": 4096,
"intermediate_size": 12288,
"max_position_embeddings": 262144,
"max_position_embeddings_hw": 10000,
"max_window_layers": 42,
"model_type": "qwen3",
"num_attention_heads": 32,
"num_hidden_layers": 42,
"num_key_value_heads": 8,
"rms_norm_eps": 1e-06,
"rope_scaling": null,
"rope_theta": 5000000.0,
"rope_theta_hw": 10000.0,
"sliding_window": null,
"torch_dtype": "bfloat16",
"use_cache": false,
"use_deepep": false,
"use_sliding_window": false,
"vocab_size": 151936,
"pure_llm": false
},
"model_type": "neo_chat",
"pad_token_id": 151643,
"template": "neo1_0",
"tie_word_embeddings": false,
"torch_dtype": "bfloat16",
"transformers_version": "4.37.2",
"use_backbone_lora": 0,
"use_llm_lora": 0,
"min_pixels": 65536,
"max_pixels": 16777216,
"patch_size": 16,
"timestep_shift": 1.0,
"time_schedule": "standard",
"time_shift_type": "exponential",
"base_shift": 0.5,
"max_shift": 1.15,
"base_image_seq_len": 64,
"max_image_seq_len": 4096,
"noise_scale_mode": "resolution",
"noise_scale_base_image_seq_len": 64,
"add_noise_scale_embedding": true,
"noise_scale_max_value": 8.0,
"noise_scale": 1.0,
"P_mean": -0.8,
"P_std": 0.8,
"t_eps": 0.05,
"fm_head_dim": 1536,
"fm_head_layers": 2,
"fm_head_mlp_ratio": 1,
"extra_num_layers_post": 0,
"concat_time_token_num": 0,
"use_pixel_head": false,
"use_adaLN": false,
"vision_config": {
"architectures": [
"NEOVisionModel"
],
"attention_dropout": 0.0,
"auto_map": {
"AutoConfig": "configuration_neo_vit.NEOVisionConfig",
"AutoModel": "modeling_neo_vit.NEOVisionModel"
},
"llm_hidden_size": 4096,
"downsample_ratio": 0.5,
"hidden_size": 1024,
"model_type": "neo_vision",
"rope_theta_vision": 10000.0,
"max_position_embeddings_vision": 10000,
"num_channels": 3,
"patch_size": 16,
"torch_dtype": "bfloat16",
"transformers_version": "4.37.2",
"min_pixels": 65536,
"max_pixels": 16777216
}
}