Jashan887 commited on
Commit
ae75c2c
·
verified ·
1 Parent(s): a06f967

Upload folder using huggingface_hub

Browse files
.gitattributes CHANGED
@@ -33,3 +33,5 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ gemma-4-E4B-it.Q4_K_M.gguf filter=lfs diff=lfs merge=lfs -text
37
+ gemma-4-E4B-it.BF16-mmproj.gguf filter=lfs diff=lfs merge=lfs -text
Modelfile ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ FROM .
3
+ TEMPLATE """{{- range $i, $_ := .Messages }}
4
+ {{- $last := eq (len (slice $.Messages $i)) 1 }}
5
+ <|turn>{{ .Role }}
6
+ {{ .Content }}{{ if not $last }}<turn|>
7
+ {{ end }}
8
+ {{- end }}<turn|>
9
+ <|turn>model
10
+ """
README.md ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ tags:
3
+ - gguf
4
+ - llama.cpp
5
+ - unsloth
6
+ - vision-language-model
7
+ ---
8
+
9
+ # hackinglix : GGUF
10
+
11
+ This model was finetuned and converted to GGUF format using [Unsloth](https://github.com/unslothai/unsloth).
12
+
13
+ **Example usage**:
14
+ - For text only LLMs: `llama-cli -hf deathafteryou/hackinglix --jinja`
15
+ - For multimodal models: `llama-mtmd-cli -hf deathafteryou/hackinglix --jinja`
16
+
17
+ ## Available Model files:
18
+ - `gemma-4-E4B-it.Q4_K_M.gguf`
19
+ - `gemma-4-E4B-it.BF16-mmproj.gguf`
20
+
21
+ ## ⚠️ Ollama Note for Vision Models
22
+ **Important:** Ollama currently does not support separate mmproj files for vision models.
23
+
24
+ To create an Ollama model from this vision model:
25
+ 1. Place the `Modelfile` in the same directory as the finetuned bf16 merged model
26
+ 3. Run: `ollama create model_name -f ./Modelfile`
27
+ (Replace `model_name` with your desired name)
28
+
29
+ This will create a unified bf16 model that Ollama can use.
30
+ This was trained 2x faster with [Unsloth](https://github.com/unslothai/unsloth)
31
+ [<img src="https://raw.githubusercontent.com/unslothai/unsloth/main/images/unsloth%20made%20with%20love.png" width="200"/>](https://github.com/unslothai/unsloth)
config.json ADDED
@@ -0,0 +1,198 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "Gemma4ForConditionalGeneration"
4
+ ],
5
+ "audio_config": {
6
+ "_name_or_path": "",
7
+ "architectures": null,
8
+ "attention_chunk_size": 12,
9
+ "attention_context_left": 13,
10
+ "attention_context_right": 0,
11
+ "attention_invalid_logits_value": -1000000000.0,
12
+ "attention_logit_cap": 50.0,
13
+ "chunk_size_feed_forward": 0,
14
+ "conv_kernel_size": 5,
15
+ "torch_dtype": "bfloat16",
16
+ "gradient_clipping": 10000000000.0,
17
+ "hidden_act": "silu",
18
+ "hidden_size": 1024,
19
+ "id2label": {
20
+ "0": "LABEL_0",
21
+ "1": "LABEL_1"
22
+ },
23
+ "initializer_range": 0.02,
24
+ "is_encoder_decoder": false,
25
+ "label2id": {
26
+ "LABEL_0": 0,
27
+ "LABEL_1": 1
28
+ },
29
+ "model_type": "gemma4_audio",
30
+ "num_attention_heads": 8,
31
+ "num_hidden_layers": 12,
32
+ "output_attentions": false,
33
+ "output_hidden_states": false,
34
+ "output_proj_dims": 1536,
35
+ "problem_type": null,
36
+ "residual_weight": 0.5,
37
+ "return_dict": true,
38
+ "rms_norm_eps": 1e-06,
39
+ "subsampling_conv_channels": [
40
+ 128,
41
+ 32
42
+ ],
43
+ "use_clipped_linears": true
44
+ },
45
+ "audio_token_id": 258881,
46
+ "boa_token_id": 256000,
47
+ "boi_token_id": 255999,
48
+ "torch_dtype": "bfloat16",
49
+ "eoa_token_id": 258883,
50
+ "eoa_token_index": 258883,
51
+ "eoi_token_id": 258882,
52
+ "eos_token_id": 106,
53
+ "image_token_id": 258880,
54
+ "initializer_range": 0.02,
55
+ "model_name": "unsloth/gemma-4-E4B-it",
56
+ "model_type": "gemma4",
57
+ "pad_token_id": 0,
58
+ "text_config": {
59
+ "attention_bias": false,
60
+ "attention_dropout": 0.0,
61
+ "attention_k_eq_v": false,
62
+ "bos_token_id": 2,
63
+ "torch_dtype": "bfloat16",
64
+ "enable_moe_block": false,
65
+ "eos_token_id": 1,
66
+ "expert_intermediate_size": null,
67
+ "final_logit_softcapping": 30.0,
68
+ "global_head_dim": 512,
69
+ "head_dim": 256,
70
+ "hidden_activation": "gelu_pytorch_tanh",
71
+ "hidden_size": 2560,
72
+ "hidden_size_per_layer_input": 256,
73
+ "initializer_range": 0.02,
74
+ "intermediate_size": 10240,
75
+ "layer_types": [
76
+ "sliding_attention",
77
+ "sliding_attention",
78
+ "sliding_attention",
79
+ "sliding_attention",
80
+ "sliding_attention",
81
+ "full_attention",
82
+ "sliding_attention",
83
+ "sliding_attention",
84
+ "sliding_attention",
85
+ "sliding_attention",
86
+ "sliding_attention",
87
+ "full_attention",
88
+ "sliding_attention",
89
+ "sliding_attention",
90
+ "sliding_attention",
91
+ "sliding_attention",
92
+ "sliding_attention",
93
+ "full_attention",
94
+ "sliding_attention",
95
+ "sliding_attention",
96
+ "sliding_attention",
97
+ "sliding_attention",
98
+ "sliding_attention",
99
+ "full_attention",
100
+ "sliding_attention",
101
+ "sliding_attention",
102
+ "sliding_attention",
103
+ "sliding_attention",
104
+ "sliding_attention",
105
+ "full_attention",
106
+ "sliding_attention",
107
+ "sliding_attention",
108
+ "sliding_attention",
109
+ "sliding_attention",
110
+ "sliding_attention",
111
+ "full_attention",
112
+ "sliding_attention",
113
+ "sliding_attention",
114
+ "sliding_attention",
115
+ "sliding_attention",
116
+ "sliding_attention",
117
+ "full_attention"
118
+ ],
119
+ "max_position_embeddings": 131072,
120
+ "model_type": "gemma4_text",
121
+ "moe_intermediate_size": null,
122
+ "num_attention_heads": 8,
123
+ "num_experts": null,
124
+ "num_global_key_value_heads": null,
125
+ "num_hidden_layers": 42,
126
+ "num_key_value_heads": 2,
127
+ "num_kv_shared_layers": 18,
128
+ "pad_token_id": 0,
129
+ "rms_norm_eps": 1e-06,
130
+ "rope_parameters": {
131
+ "full_attention": {
132
+ "partial_rotary_factor": 0.25,
133
+ "rope_theta": 1000000.0,
134
+ "rope_type": "proportional"
135
+ },
136
+ "sliding_attention": {
137
+ "rope_theta": 10000.0,
138
+ "rope_type": "default"
139
+ }
140
+ },
141
+ "sliding_window": 512,
142
+ "tie_word_embeddings": true,
143
+ "top_k_experts": null,
144
+ "use_bidirectional_attention": null,
145
+ "use_cache": true,
146
+ "use_double_wide_mlp": false,
147
+ "vocab_size": 262144,
148
+ "vocab_size_per_layer_input": 262144
149
+ },
150
+ "tie_word_embeddings": true,
151
+ "unsloth_fixed": true,
152
+ "unsloth_version": "2026.4.4",
153
+ "video_token_id": 258884,
154
+ "vision_config": {
155
+ "_name_or_path": "",
156
+ "architectures": null,
157
+ "attention_bias": false,
158
+ "attention_dropout": 0.0,
159
+ "chunk_size_feed_forward": 0,
160
+ "default_output_length": 280,
161
+ "torch_dtype": "bfloat16",
162
+ "global_head_dim": 64,
163
+ "head_dim": 64,
164
+ "hidden_activation": "gelu_pytorch_tanh",
165
+ "hidden_size": 768,
166
+ "id2label": {
167
+ "0": "LABEL_0",
168
+ "1": "LABEL_1"
169
+ },
170
+ "initializer_range": 0.02,
171
+ "intermediate_size": 3072,
172
+ "is_encoder_decoder": false,
173
+ "label2id": {
174
+ "LABEL_0": 0,
175
+ "LABEL_1": 1
176
+ },
177
+ "max_position_embeddings": 131072,
178
+ "model_type": "gemma4_vision",
179
+ "num_attention_heads": 12,
180
+ "num_hidden_layers": 16,
181
+ "num_key_value_heads": 12,
182
+ "output_attentions": false,
183
+ "output_hidden_states": false,
184
+ "patch_size": 16,
185
+ "pooling_kernel_size": 3,
186
+ "position_embedding_size": 10240,
187
+ "problem_type": null,
188
+ "return_dict": true,
189
+ "rms_norm_eps": 1e-06,
190
+ "rope_parameters": {
191
+ "rope_theta": 100.0,
192
+ "rope_type": "default"
193
+ },
194
+ "standardize": false,
195
+ "use_clipped_linears": true
196
+ },
197
+ "vision_soft_tokens_per_image": 280
198
+ }
gemma-4-E4B-it.BF16-mmproj.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c734fd386b47bd15de76b49feba9ee16f351478d314cf3b9bfdcfc20bd70e9d5
3
+ size 991551904
gemma-4-E4B-it.Q4_K_M.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:49d4ee68704c8dc237c041b1f974e284a274cda977a05e73d000eb205f034b96
3
+ size 5335285376