yujingfeng commited on
Commit
8ccb0bf
·
verified ·
1 Parent(s): 3ed2d00

Upload config.json

Browse files
Files changed (1) hide show
  1. config.json +4 -6
config.json CHANGED
@@ -3,10 +3,11 @@
3
  "architectures": [
4
  "QWenVLChatModel"
5
  ],
 
 
 
 
6
  "attn_dropout_prob": 0.0,
7
- "auto_map": {
8
- "AutoModelForCausalLM": "qwen2_5_vl.modeling_qwen2_5_vl.QWenVLChatModel"
9
- },
10
  "bf16": false,
11
  "emb_dropout_prob": 0.0,
12
  "fp16": false,
@@ -17,7 +18,6 @@
17
  "kv_channels": 128,
18
  "layer_norm_epsilon": 1e-06,
19
  "max_position_embeddings": 8192,
20
- "model_type": "qwen2_5_vl",
21
  "no_bias": true,
22
  "num_attention_heads": 32,
23
  "num_hidden_layers": 32,
@@ -28,8 +28,6 @@
28
  "seq_length": 2048,
29
  "tie_word_embeddings": false,
30
  "tokenizer_type": "QWenTokenizer",
31
- "torch_dtype": "bfloat16",
32
- "transformers_version": "4.31.0",
33
  "use_cache": true,
34
  "use_dynamic_ntk": true,
35
  "use_flash_attn": false,
 
3
  "architectures": [
4
  "QWenVLChatModel"
5
  ],
6
+ "model_type": "qwen2_5_vl",
7
+ "torch_dtype": "bfloat16",
8
+ "transformers_version": "4.39.3",
9
+
10
  "attn_dropout_prob": 0.0,
 
 
 
11
  "bf16": false,
12
  "emb_dropout_prob": 0.0,
13
  "fp16": false,
 
18
  "kv_channels": 128,
19
  "layer_norm_epsilon": 1e-06,
20
  "max_position_embeddings": 8192,
 
21
  "no_bias": true,
22
  "num_attention_heads": 32,
23
  "num_hidden_layers": 32,
 
28
  "seq_length": 2048,
29
  "tie_word_embeddings": false,
30
  "tokenizer_type": "QWenTokenizer",
 
 
31
  "use_cache": true,
32
  "use_dynamic_ntk": true,
33
  "use_flash_attn": false,