Alan Joshua commited on
Commit
9f22d5e
·
verified ·
1 Parent(s): ec5a958

Upload model trained with Unsloth

Browse files

Upload model trained with Unsloth 2x faster

Files changed (4) hide show
  1. README.md +2 -0
  2. config.json +97 -0
  3. generation_config.json +14 -0
  4. model.safetensors +3 -0
README.md CHANGED
@@ -1,3 +1,5 @@
1
  ---
2
  license: mit
 
 
3
  ---
 
1
  ---
2
  license: mit
3
+ tags:
4
+ - unsloth
5
  ---
config.json ADDED
@@ -0,0 +1,97 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "Qwen2VLForConditionalGeneration"
4
+ ],
5
+ "dtype": "float16",
6
+ "image_token_id": 151655,
7
+ "model_name": "unsloth/Qwen2-VL-2B-Instruct",
8
+ "model_type": "qwen2_vl",
9
+ "pad_token_id": 151654,
10
+ "text_config": {
11
+ "attention_dropout": 0.0,
12
+ "bos_token_id": 151643,
13
+ "dtype": "float16",
14
+ "eos_token_id": 151645,
15
+ "hidden_act": "silu",
16
+ "hidden_size": 1536,
17
+ "initializer_range": 0.02,
18
+ "intermediate_size": 8960,
19
+ "layer_types": [
20
+ "full_attention",
21
+ "full_attention",
22
+ "full_attention",
23
+ "full_attention",
24
+ "full_attention",
25
+ "full_attention",
26
+ "full_attention",
27
+ "full_attention",
28
+ "full_attention",
29
+ "full_attention",
30
+ "full_attention",
31
+ "full_attention",
32
+ "full_attention",
33
+ "full_attention",
34
+ "full_attention",
35
+ "full_attention",
36
+ "full_attention",
37
+ "full_attention",
38
+ "full_attention",
39
+ "full_attention",
40
+ "full_attention",
41
+ "full_attention",
42
+ "full_attention",
43
+ "full_attention",
44
+ "full_attention",
45
+ "full_attention",
46
+ "full_attention",
47
+ "full_attention"
48
+ ],
49
+ "max_position_embeddings": 32768,
50
+ "max_window_layers": 28,
51
+ "model_type": "qwen2_vl_text",
52
+ "num_attention_heads": 12,
53
+ "num_hidden_layers": 28,
54
+ "num_key_value_heads": 2,
55
+ "pad_token_id": 151654,
56
+ "rms_norm_eps": 1e-06,
57
+ "rope_parameters": {
58
+ "mrope_section": [
59
+ 16,
60
+ 24,
61
+ 24
62
+ ],
63
+ "rope_theta": 1000000.0,
64
+ "rope_type": "default",
65
+ "type": "default"
66
+ },
67
+ "sliding_window": null,
68
+ "use_cache": true,
69
+ "use_sliding_window": false,
70
+ "vocab_size": 151936
71
+ },
72
+ "tie_word_embeddings": true,
73
+ "transformers_version": "5.2.0",
74
+ "unsloth_fixed": true,
75
+ "unsloth_version": "2026.3.3",
76
+ "video_token_id": 151656,
77
+ "vision_config": {
78
+ "depth": 32,
79
+ "dtype": "float16",
80
+ "embed_dim": 1280,
81
+ "hidden_act": "quick_gelu",
82
+ "hidden_size": 1536,
83
+ "in_channels": 3,
84
+ "in_chans": 3,
85
+ "initializer_range": 0.02,
86
+ "mlp_ratio": 4,
87
+ "model_type": "qwen2_vl",
88
+ "num_heads": 16,
89
+ "patch_size": 14,
90
+ "spatial_merge_size": 2,
91
+ "spatial_patch_size": 14,
92
+ "temporal_patch_size": 2
93
+ },
94
+ "vision_end_token_id": 151653,
95
+ "vision_start_token_id": 151652,
96
+ "vision_token_id": 151654
97
+ }
generation_config.json ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token_id": 151643,
3
+ "do_sample": true,
4
+ "eos_token_id": [
5
+ 151645,
6
+ 151643
7
+ ],
8
+ "max_length": 32768,
9
+ "pad_token_id": 151654,
10
+ "temperature": 0.01,
11
+ "top_k": 1,
12
+ "top_p": 0.001,
13
+ "transformers_version": "5.2.0"
14
+ }
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:292f3a08aeb0bfb4de7059dd8ac593181b3cb417738e1ad5250ca61fdc106a61
3
+ size 4418050120