HenrySentinel commited on
Commit
2f2a785
·
verified ·
1 Parent(s): 590734c

Add modeling class with HF generate() support

Browse files
Files changed (1) hide show
  1. modeling_tinymind.py +122 -0
modeling_tinymind.py ADDED
@@ -0,0 +1,122 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """TinyMind model - HuggingFace compatible wrapper."""
2
+ import math
3
+ import torch
4
+ import torch.nn as nn
5
+ from transformers import PreTrainedModel, GenerationMixin
6
+ from transformers.modeling_outputs import CausalLMOutputWithPast
7
+ from configuration_tinymind import TinyMindConfig
8
+
9
+
10
+ class TinyMindAttention(nn.Module):
11
+ def __init__(self, config):
12
+ super().__init__()
13
+ self.n_heads = config.n_heads
14
+ self.head_dim = config.n_embd // config.n_heads
15
+ self.qkv = nn.Linear(config.n_embd, 3 * config.n_embd, bias=False)
16
+ self.proj = nn.Linear(config.n_embd, config.n_embd)
17
+ self.attn_drop = nn.Dropout(config.dropout)
18
+
19
+ def forward(self, x, attention_mask=None):
20
+ B, T, C = x.shape
21
+ q, k, v = self.qkv(x).split(C, dim=2)
22
+ q = q.view(B, T, self.n_heads, self.head_dim).transpose(1, 2)
23
+ k = k.view(B, T, self.n_heads, self.head_dim).transpose(1, 2)
24
+ v = v.view(B, T, self.n_heads, self.head_dim).transpose(1, 2)
25
+ scale = math.sqrt(self.head_dim)
26
+ scores = torch.matmul(q, k.transpose(-2, -1)) / scale
27
+ causal = torch.tril(torch.ones(T, T, device=x.device, dtype=torch.bool))
28
+ scores = scores.masked_fill(~causal.view(1, 1, T, T), float('-inf'))
29
+ if attention_mask is not None:
30
+ attn_mask = (1.0 - attention_mask[:, None, None, :].float()) * torch.finfo(scores.dtype).min
31
+ scores = scores + attn_mask
32
+ weights = self.attn_drop(torch.softmax(scores, dim=-1))
33
+ out = torch.matmul(weights, v)
34
+ out = out.transpose(1, 2).contiguous().view(B, T, C)
35
+ return self.proj(out)
36
+
37
+
38
+ class TinyMindFF(nn.Module):
39
+ def __init__(self, config):
40
+ super().__init__()
41
+ self.net = nn.Sequential(
42
+ nn.Linear(config.n_embd, 4 * config.n_embd),
43
+ nn.GELU(),
44
+ nn.Dropout(config.dropout),
45
+ nn.Linear(4 * config.n_embd, config.n_embd),
46
+ nn.Dropout(config.dropout),
47
+ )
48
+
49
+ def forward(self, x):
50
+ return self.net(x)
51
+
52
+
53
+ class TinyMindBlock(nn.Module):
54
+ def __init__(self, config):
55
+ super().__init__()
56
+ self.ln1 = nn.LayerNorm(config.n_embd)
57
+ self.attn = TinyMindAttention(config)
58
+ self.ln2 = nn.LayerNorm(config.n_embd)
59
+ self.ff = TinyMindFF(config)
60
+
61
+ def forward(self, x, attention_mask=None):
62
+ x = x + self.attn(self.ln1(x), attention_mask=attention_mask)
63
+ x = x + self.ff(self.ln2(x))
64
+ return x
65
+
66
+
67
+ class TinyMindModel(nn.Module):
68
+ def __init__(self, config):
69
+ super().__init__()
70
+ self.token_embedding = nn.Embedding(config.vocab_size, config.n_embd)
71
+ self.position_embedding = nn.Embedding(config.max_seq_len, config.n_embd)
72
+ self.drop = nn.Dropout(config.dropout)
73
+ self.blocks = nn.ModuleList([TinyMindBlock(config) for _ in range(config.n_layers)])
74
+ self.ln_f = nn.LayerNorm(config.n_embd)
75
+ self.head = nn.Linear(config.vocab_size, config.n_embd, bias=False)
76
+
77
+
78
+ class TinyMindForCausalLM(PreTrainedModel, GenerationMixin):
79
+ config_class = TinyMindConfig
80
+ base_model_prefix = "model"
81
+ supports_gradient_checkpointing = True
82
+ _tied_weights_keys = {"model.head.weight": "model.token_embedding.weight"}
83
+
84
+ def __init__(self, config):
85
+ super().__init__(config)
86
+ self.model = TinyMindModel(config)
87
+ self.model.head = nn.Linear(config.n_embd, config.vocab_size, bias=False)
88
+ self.model.head.weight = self.model.token_embedding.weight
89
+ self.post_init()
90
+
91
+ def _tie_weights(self):
92
+ self.model.head.weight = self.model.token_embedding.weight
93
+
94
+ def get_input_embeddings(self):
95
+ return self.model.token_embedding
96
+
97
+ def set_input_embeddings(self, value):
98
+ self.model.token_embedding = value
99
+
100
+ def get_output_embeddings(self):
101
+ return self.model.head
102
+
103
+ def set_output_embeddings(self, new_embeddings):
104
+ self.model.head = new_embeddings
105
+
106
+ def prepare_inputs_for_generation(self, input_ids, attention_mask=None, **kwargs):
107
+ return {"input_ids": input_ids, "attention_mask": attention_mask}
108
+
109
+ def forward(self, input_ids=None, attention_mask=None, labels=None, **kwargs):
110
+ B, T = input_ids.shape
111
+ pos = torch.arange(T, device=input_ids.device).unsqueeze(0)
112
+ x = self.model.drop(self.model.token_embedding(input_ids) + self.model.position_embedding(pos))
113
+ for block in self.model.blocks:
114
+ x = block(x, attention_mask=attention_mask)
115
+ x = self.model.ln_f(x)
116
+ logits = self.model.head(x)
117
+ loss = None
118
+ if labels is not None:
119
+ shift_logits = logits[..., :-1, :].contiguous()
120
+ shift_labels = labels[..., 1:].contiguous()
121
+ loss = nn.functional.cross_entropy(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1), ignore_index=-100)
122
+ return CausalLMOutputWithPast(loss=loss, logits=logits, past_key_values=None, hidden_states=None, attentions=None)