HenrySentinel commited on
Commit
a42373f
·
verified ·
1 Parent(s): 3623324

Add modeling_tinymind.py

Browse files
Files changed (1) hide show
  1. modeling_tinymind.py +180 -0
modeling_tinymind.py ADDED
@@ -0,0 +1,180 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """TinyMind model - HuggingFace compatible wrapper.
2
+
3
+ Matches the original pytorch_model.bin parameter names exactly:
4
+ model.token_embedding.weight, model.position_embedding.weight,
5
+ model.blocks.{i}.ln1.weight/bias, model.blocks.{i}.attn.qkv.weight,
6
+ model.blocks.{i}.attn.proj.weight/bias, model.blocks.{i}.ln2.weight/bias,
7
+ model.blocks.{i}.ff.net.0.weight/bias, model.blocks.{i}.ff.net.3.weight/bias,
8
+ model.ln_f.weight/bias, model.head.weight
9
+ """
10
+ import math
11
+ import torch
12
+ import torch.nn as nn
13
+ from transformers import PreTrainedModel, GenerationMixin
14
+ from transformers.modeling_outputs import CausalLMOutputWithPast
15
+ from configuration_tinymind import TinyMindConfig
16
+
17
+
18
+ class TinyMindAttention(nn.Module):
19
+ def __init__(self, config: TinyMindConfig):
20
+ super().__init__()
21
+ self.n_heads = config.n_heads
22
+ self.head_dim = config.n_embd // config.n_heads
23
+ # Original: qkv is bias=False (768, 256), proj has bias (256, 256)
24
+ self.qkv = nn.Linear(config.n_embd, 3 * config.n_embd, bias=False)
25
+ self.proj = nn.Linear(config.n_embd, config.n_embd)
26
+ self.attn_drop = nn.Dropout(config.dropout)
27
+
28
+ def forward(self, x, attention_mask=None):
29
+ B, T, C = x.shape
30
+ q, k, v = self.qkv(x).split(C, dim=2)
31
+
32
+ q = q.view(B, T, self.n_heads, self.head_dim).transpose(1, 2)
33
+ k = k.view(B, T, self.n_heads, self.head_dim).transpose(1, 2)
34
+ v = v.view(B, T, self.n_heads, self.head_dim).transpose(1, 2)
35
+
36
+ scale = math.sqrt(self.head_dim)
37
+ scores = torch.matmul(q, k.transpose(-2, -1)) / scale
38
+
39
+ # Causal mask
40
+ causal = torch.tril(torch.ones(T, T, device=x.device, dtype=torch.bool))
41
+ scores = scores.masked_fill(~causal.view(1, 1, T, T), float('-inf'))
42
+
43
+ if attention_mask is not None:
44
+ # HF convention: 0 = masked, 1 = attend
45
+ # Convert to additive mask: 0 → 0, 0-positions → -inf
46
+ attn_mask = (1.0 - attention_mask[:, None, None, :].float()) * torch.finfo(scores.dtype).min
47
+ scores = scores + attn_mask
48
+
49
+ weights = self.attn_drop(torch.softmax(scores, dim=-1))
50
+ out = torch.matmul(weights, v)
51
+ out = out.transpose(1, 2).contiguous().view(B, T, C)
52
+ return self.proj(out)
53
+
54
+
55
+ class TinyMindFF(nn.Module):
56
+ """Matches original: ff.net.0 = Linear, ff.net.3 = Linear (with GELU + Dropout in between)"""
57
+ def __init__(self, config: TinyMindConfig):
58
+ super().__init__()
59
+ # Original uses nn.Sequential with indices 0, 1(GELU), 2(Dropout), 3
60
+ self.net = nn.Sequential(
61
+ nn.Linear(config.n_embd, 4 * config.n_embd), # net.0
62
+ nn.GELU(), # net.1
63
+ nn.Dropout(config.dropout), # net.2
64
+ nn.Linear(4 * config.n_embd, config.n_embd), # net.3
65
+ nn.Dropout(config.dropout), # net.4
66
+ )
67
+
68
+ def forward(self, x):
69
+ return self.net(x)
70
+
71
+
72
+ class TinyMindBlock(nn.Module):
73
+ def __init__(self, config: TinyMindConfig):
74
+ super().__init__()
75
+ self.ln1 = nn.LayerNorm(config.n_embd)
76
+ self.attn = TinyMindAttention(config)
77
+ self.ln2 = nn.LayerNorm(config.n_embd)
78
+ self.ff = TinyMindFF(config)
79
+
80
+ def forward(self, x, attention_mask=None):
81
+ x = x + self.attn(self.ln1(x), attention_mask=attention_mask)
82
+ x = x + self.ff(self.ln2(x))
83
+ return x
84
+
85
+
86
+ class TinyMindModel(nn.Module):
87
+ """Inner model matching original 'model.*' weight prefix."""
88
+ def __init__(self, config: TinyMindConfig):
89
+ super().__init__()
90
+ self.token_embedding = nn.Embedding(config.vocab_size, config.n_embd)
91
+ self.position_embedding = nn.Embedding(config.max_seq_len, config.n_embd)
92
+ self.drop = nn.Dropout(config.dropout)
93
+ self.blocks = nn.ModuleList([TinyMindBlock(config) for _ in range(config.n_layers)])
94
+ self.ln_f = nn.LayerNorm(config.n_embd)
95
+ self.head = nn.Linear(config.vocab_size, config.n_embd, bias=False) # placeholder, will be tied
96
+
97
+ def forward(self, input_ids, attention_mask=None):
98
+ B, T = input_ids.shape
99
+ pos = torch.arange(T, device=input_ids.device).unsqueeze(0)
100
+
101
+ x = self.drop(self.token_embedding(input_ids) + self.position_embedding(pos))
102
+ for block in self.blocks:
103
+ x = block(x, attention_mask=attention_mask)
104
+ x = self.ln_f(x)
105
+ return x
106
+
107
+
108
+ class TinyMindForCausalLM(PreTrainedModel, GenerationMixin):
109
+ config_class = TinyMindConfig
110
+ base_model_prefix = "model"
111
+ supports_gradient_checkpointing = True
112
+ _tied_weights_keys = {"model.head.weight": "model.token_embedding.weight"}
113
+
114
+ def __init__(self, config: TinyMindConfig):
115
+ super().__init__(config)
116
+ # Architecture matches original weight names under 'model.*'
117
+ self.model = TinyMindModel(config)
118
+ # LM head - will be weight-tied with token embedding
119
+ self.model.head = nn.Linear(config.n_embd, config.vocab_size, bias=False)
120
+ # Weight tying
121
+ self.model.head.weight = self.model.token_embedding.weight
122
+
123
+ self.post_init()
124
+
125
+ def _tie_weights(self):
126
+ self.model.head.weight = self.model.token_embedding.weight
127
+
128
+ def get_input_embeddings(self):
129
+ return self.model.token_embedding
130
+
131
+ def set_input_embeddings(self, value):
132
+ self.model.token_embedding = value
133
+
134
+ def get_output_embeddings(self):
135
+ return self.model.head
136
+
137
+ def set_output_embeddings(self, new_embeddings):
138
+ self.model.head = new_embeddings
139
+
140
+ def prepare_inputs_for_generation(self, input_ids, attention_mask=None, **kwargs):
141
+ return {
142
+ "input_ids": input_ids,
143
+ "attention_mask": attention_mask,
144
+ }
145
+
146
+ def forward(
147
+ self,
148
+ input_ids=None,
149
+ attention_mask=None,
150
+ labels=None,
151
+ **kwargs,
152
+ ):
153
+ B, T = input_ids.shape
154
+ pos = torch.arange(T, device=input_ids.device).unsqueeze(0)
155
+
156
+ x = self.model.drop(
157
+ self.model.token_embedding(input_ids) + self.model.position_embedding(pos)
158
+ )
159
+ for block in self.model.blocks:
160
+ x = block(x, attention_mask=attention_mask)
161
+ x = self.model.ln_f(x)
162
+ logits = self.model.head(x)
163
+
164
+ loss = None
165
+ if labels is not None:
166
+ shift_logits = logits[..., :-1, :].contiguous()
167
+ shift_labels = labels[..., 1:].contiguous()
168
+ loss = nn.functional.cross_entropy(
169
+ shift_logits.view(-1, shift_logits.size(-1)),
170
+ shift_labels.view(-1),
171
+ ignore_index=-100,
172
+ )
173
+
174
+ return CausalLMOutputWithPast(
175
+ loss=loss,
176
+ logits=logits,
177
+ past_key_values=None,
178
+ hidden_states=None,
179
+ attentions=None,
180
+ )