gsaltintas commited on
Commit
d3822d6
·
verified ·
1 Parent(s): 11f3e34

Upload model files

Browse files
config.json CHANGED
@@ -1,35 +1,35 @@
1
  {
2
  "architectures": [
3
- "LlamaForCausalLM"
4
  ],
5
  "attention_bias": false,
6
  "attention_dropout": 0.0,
 
 
 
 
 
7
  "bos_token_id": 128000,
 
 
8
  "eos_token_id": 128001,
9
- "head_dim": 64,
10
  "hidden_act": "silu",
11
  "hidden_size": 2048,
12
  "initializer_range": 0.02,
13
- "intermediate_size": 8192,
14
- "max_position_embeddings": 131072,
15
  "mlp_bias": false,
16
- "model_type": "llama",
17
- "num_attention_heads": 32,
18
- "num_hidden_layers": 16,
19
- "num_key_value_heads": 8,
20
  "pretraining_tp": 1,
21
  "rms_norm_eps": 1e-05,
22
- "rope_scaling": {
23
- "factor": 32.0,
24
- "high_freq_factor": 4.0,
25
- "low_freq_factor": 1.0,
26
- "original_max_position_embeddings": 8192,
27
- "rope_type": "llama3"
28
- },
29
- "rope_theta": 500000.0,
30
- "tie_word_embeddings": true,
31
- "torch_dtype": "bfloat16",
32
- "transformers_version": "4.45.0.dev0",
33
  "use_cache": true,
34
  "vocab_size": 128256
35
  }
 
1
  {
2
  "architectures": [
3
+ "LlamaAlbertForCausalLM"
4
  ],
5
  "attention_bias": false,
6
  "attention_dropout": 0.0,
7
+ "auto_map": {
8
+ "AutoConfig": "modeling_llama_albert.LlamaAlbertConfig",
9
+ "AutoModelForCausalLM": "modeling_llama_albert.LlamaAlbertForCausalLM",
10
+ "modeling_llama_albert.LlamaAlbertForCausalLM": "modeling_llama_albert.LlamaAlbertConfig"
11
+ },
12
  "bos_token_id": 128000,
13
+ "dtype": "bfloat16",
14
+ "embedding_dim": 512,
15
  "eos_token_id": 128001,
16
+ "head_dim": 128,
17
  "hidden_act": "silu",
18
  "hidden_size": 2048,
19
  "initializer_range": 0.02,
20
+ "intermediate_size": 5632,
21
+ "max_position_embeddings": 8192,
22
  "mlp_bias": false,
23
+ "model_type": "llama_albert",
24
+ "num_attention_heads": 16,
25
+ "num_hidden_layers": 25,
26
+ "num_key_value_heads": 16,
27
  "pretraining_tp": 1,
28
  "rms_norm_eps": 1e-05,
29
+ "rope_scaling": null,
30
+ "rope_theta": 10000.0,
31
+ "tie_word_embeddings": false,
32
+ "transformers_version": "4.57.3",
 
 
 
 
 
 
 
33
  "use_cache": true,
34
  "vocab_size": 128256
35
  }
generation_config.json CHANGED
@@ -1,9 +1,8 @@
1
  {
2
- "_from_model_config": true,
3
  "bos_token_id": 128000,
4
- "eos_token_id": 128001,
5
- "transformers_version": "4.45.0.dev0",
6
  "do_sample": true,
 
7
  "temperature": 0.6,
8
- "top_p": 0.9
 
9
  }
 
1
  {
 
2
  "bos_token_id": 128000,
 
 
3
  "do_sample": true,
4
+ "eos_token_id": 128001,
5
  "temperature": 0.6,
6
+ "top_p": 0.9,
7
+ "transformers_version": "4.57.3"
8
  }
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cfa935fda8189ddc9a89706027e11cbca8a6d4237301cc23a723546d6b2b754c
3
+ size 2836109184
modeling_llama_albert.py ADDED
@@ -0,0 +1,65 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import List, Optional, Tuple, Union
2
+
3
+ import torch
4
+ import torch.nn as nn
5
+ from transformers import LlamaConfig, LlamaForCausalLM
6
+ from transformers.modeling_outputs import CausalLMOutputWithPast
7
+
8
+
9
+ class LlamaAlbertConfig(LlamaConfig):
10
+ model_type = "llama_albert"
11
+ architectures = ["LlamaAlbertForCausalLM"]
12
+
13
+ def __init__(self, embedding_dim=128, **kwargs):
14
+ super().__init__(
15
+ **kwargs,
16
+ )
17
+ self.embedding_dim = embedding_dim
18
+ self.auto_map={
19
+ "AutoConfig": "modeling_llama_albert.LlamaAlbertConfig",
20
+ "AutoModelForCausalLM": "modeling_llama_albert.LlamaAlbertForCausalLM",
21
+ }
22
+ self._auto_class="modeling_llama_albert.LlamaAlbertForCausalLM"
23
+
24
+
25
+ class LlamaAlbertForCausalLM(LlamaForCausalLM):
26
+ config_class = LlamaAlbertConfig
27
+
28
+ def __init__(self, config):
29
+ super().__init__(config)
30
+
31
+ # 1. Factorized Embeddings (ALBERT style)
32
+ # Replacing self.model.embed_tokens with a Sequential layer
33
+ self.model.embed_tokens = nn.Sequential(
34
+ nn.Embedding(config.vocab_size, config.embedding_dim),
35
+ nn.Linear(config.embedding_dim, config.hidden_size, bias=False),
36
+ )
37
+
38
+ # 2. Factorized LM Head
39
+ # Sequential: Hidden -> Embedding Dim -> Vocab
40
+ self.lm_head = nn.Sequential(
41
+ nn.Linear(config.hidden_size, config.embedding_dim, bias=False),
42
+ nn.Linear(config.embedding_dim, config.vocab_size, bias=False),
43
+ )
44
+
45
+ # Re-initialize weights for the new layers
46
+ self.post_init()
47
+
48
+ def get_input_embeddings(self):
49
+ return self.model.embed_tokens[0]
50
+
51
+ def set_input_embeddings(self, value):
52
+ self.model.embed_tokens[0] = value
53
+
54
+ def get_output_embeddings(self):
55
+ return self.lm_head[1]
56
+
57
+ def set_output_embeddings(self, new_embeddings):
58
+ self.lm_head[1] = new_embeddings
59
+
60
+ def forward(self, input_ids=None, **kwargs):
61
+ # The base LlamaForCausalLM forward calls self.model(...)
62
+ # Since we replaced self.model.embed_tokens with a Sequential,
63
+ # LlamaModel's internal call to embed_tokens(input_ids) will
64
+ # automatically run through both the Embedding and the Linear layer.
65
+ return super().forward(input_ids=input_ids, **kwargs)