ASTERIZER commited on
Commit
8e239f6
Β·
verified Β·
1 Parent(s): 7c7c7ac

Upload generate.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. generate.py +268 -0
generate.py ADDED
@@ -0,0 +1,268 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ LUNA 100M β€” Text Generation / Interactive Chat
3
+ Usage:
4
+ python generate.py # interactive REPL
5
+ python generate.py --prompt "The future of AI is" # single prompt
6
+ python generate.py --ckpt Base/out/luna_100m/latest.pt # custom checkpoint
7
+ python generate.py --max_new 200 --temp 0.8 --top_p 0.9 # tune generation
8
+ """
9
+
10
+ import sys
11
+ import math
12
+ import argparse
13
+ import torch
14
+ import torch.nn as nn
15
+ import torch.nn.functional as F
16
+ from pathlib import Path
17
+
18
+
19
+ # ─── Model (must match train.py exactly) ──────────────────────────────────────
20
+
21
+ class RotaryEmbedding(nn.Module):
22
+ def __init__(self, dim, max_seq_len=1024):
23
+ super().__init__()
24
+ inv_freq = 1.0 / (10000 ** (torch.arange(0, dim, 2).float() / dim))
25
+ self.register_buffer("inv_freq", inv_freq)
26
+ t = torch.arange(max_seq_len).float()
27
+ freqs = torch.einsum("i,j->ij", t, inv_freq)
28
+ emb = torch.cat([freqs, freqs], dim=-1)
29
+ self.register_buffer("cos_cached", emb.cos())
30
+ self.register_buffer("sin_cached", emb.sin())
31
+
32
+ def forward(self, seq_len):
33
+ return self.cos_cached[:seq_len], self.sin_cached[:seq_len]
34
+
35
+
36
+ def rotate_half(x):
37
+ x1, x2 = x.chunk(2, dim=-1)
38
+ return torch.cat([-x2, x1], dim=-1)
39
+
40
+
41
+ def apply_rotary(x, cos, sin):
42
+ c = cos.unsqueeze(0).unsqueeze(0)
43
+ s = sin.unsqueeze(0).unsqueeze(0)
44
+ return x * c + rotate_half(x) * s
45
+
46
+
47
+ class CausalSelfAttention(nn.Module):
48
+ def __init__(self, n_embd, n_head, block_size, rotary_pct=0.25):
49
+ super().__init__()
50
+ self.n_head = n_head
51
+ self.head_dim = n_embd // n_head
52
+ self.rot_dim = int(self.head_dim * rotary_pct)
53
+ self.c_attn = nn.Linear(n_embd, 3 * n_embd, bias=True)
54
+ self.c_proj = nn.Linear(n_embd, n_embd, bias=True)
55
+ self.rotary = RotaryEmbedding(self.rot_dim, block_size)
56
+
57
+ def forward(self, x):
58
+ B, T, C = x.size()
59
+ qkv = self.c_attn(x).reshape(B, T, 3, self.n_head, self.head_dim).permute(2, 0, 3, 1, 4)
60
+ q, k, v = qkv.unbind(0)
61
+ cos, sin = self.rotary(T)
62
+ q = torch.cat([apply_rotary(q[..., :self.rot_dim], cos, sin), q[..., self.rot_dim:]], dim=-1)
63
+ k = torch.cat([apply_rotary(k[..., :self.rot_dim], cos, sin), k[..., self.rot_dim:]], dim=-1)
64
+ y = F.scaled_dot_product_attention(q, k, v, is_causal=True)
65
+ return self.c_proj(y.transpose(1, 2).contiguous().view(B, T, C))
66
+
67
+
68
+ class MLP(nn.Module):
69
+ def __init__(self, n_embd):
70
+ super().__init__()
71
+ self.fc = nn.Linear(n_embd, 4 * n_embd, bias=True)
72
+ self.gelu = nn.GELU()
73
+ self.proj = nn.Linear(4 * n_embd, n_embd, bias=True)
74
+
75
+ def forward(self, x):
76
+ return self.proj(self.gelu(self.fc(x)))
77
+
78
+
79
+ class Block(nn.Module):
80
+ def __init__(self, n_embd, n_head, block_size):
81
+ super().__init__()
82
+ self.ln1 = nn.LayerNorm(n_embd)
83
+ self.attn = CausalSelfAttention(n_embd, n_head, block_size)
84
+ self.ln2 = nn.LayerNorm(n_embd)
85
+ self.mlp = MLP(n_embd)
86
+
87
+ def forward(self, x):
88
+ x = x + self.attn(self.ln1(x))
89
+ x = x + self.mlp(self.ln2(x))
90
+ return x
91
+
92
+
93
+ class LUNAModel(nn.Module):
94
+ def __init__(self, vocab_size=50304, block_size=1024,
95
+ n_layer=10, n_embd=768, n_head=12):
96
+ super().__init__()
97
+ self.block_size = block_size
98
+ self.wte = nn.Embedding(vocab_size, n_embd)
99
+ self.blocks = nn.ModuleList([Block(n_embd, n_head, block_size) for _ in range(n_layer)])
100
+ self.ln_f = nn.LayerNorm(n_embd)
101
+ self.lm_head = nn.Linear(n_embd, vocab_size, bias=False)
102
+ self.lm_head.weight = self.wte.weight # tied
103
+
104
+ def forward(self, idx):
105
+ x = self.wte(idx)
106
+ for block in self.blocks:
107
+ x = block(x)
108
+ return self.lm_head(self.ln_f(x))
109
+
110
+
111
+ # ─── Generation ───────────────────────────────────────────────────────────────
112
+
113
+ @torch.no_grad()
114
+ def generate(model, input_ids, max_new=200, temperature=0.8,
115
+ top_p=0.9, top_k=50, repetition_penalty=1.1, device="cpu"):
116
+ model.eval()
117
+ ids = input_ids.clone().to(device)
118
+ generated = []
119
+
120
+ for _ in range(max_new):
121
+ # Crop to block_size
122
+ ctx = ids[:, -model.block_size:]
123
+ logits = model(ctx) # (1, T, V)
124
+ logits = logits[:, -1, :] # last token
125
+
126
+ # Repetition penalty
127
+ if repetition_penalty != 1.0:
128
+ for token_id in set(ids[0].tolist()):
129
+ logits[0, token_id] /= repetition_penalty
130
+
131
+ logits = logits / max(temperature, 1e-8)
132
+
133
+ # Top-k
134
+ if top_k > 0:
135
+ vals, _ = torch.topk(logits, min(top_k, logits.size(-1)))
136
+ logits[logits < vals[:, -1:]] = -float("inf")
137
+
138
+ # Top-p (nucleus)
139
+ probs = torch.softmax(logits, dim=-1)
140
+ if top_p < 1.0:
141
+ sorted_probs, sorted_idx = torch.sort(probs, descending=True)
142
+ cum = torch.cumsum(sorted_probs, dim=-1)
143
+ mask = cum - sorted_probs > top_p
144
+ sorted_probs[mask] = 0.0
145
+ sorted_probs /= sorted_probs.sum()
146
+ next_token = sorted_idx[0, torch.multinomial(sorted_probs[0], 1)]
147
+ else:
148
+ next_token = torch.multinomial(probs[0], 1)
149
+
150
+ ids = torch.cat([ids, next_token.view(1, 1)], dim=1)
151
+ generated.append(next_token.item())
152
+
153
+ # Stop at EOS
154
+ if next_token.item() == 50276:
155
+ break
156
+
157
+ return generated
158
+
159
+
160
+ # ─── Load ─────────────────────────────────────────────────────────────────────
161
+
162
+ def load_model(ckpt_path: str, device: str):
163
+ print(f"Loading checkpoint: {ckpt_path}")
164
+ ckpt = torch.load(ckpt_path, map_location="cpu", weights_only=True)
165
+
166
+ # Handle both raw state_dict and {'model': ...} wrappers
167
+ state = ckpt["model"] if "model" in ckpt else ckpt
168
+ step = ckpt.get("step", "?")
169
+ tokens = ckpt.get("tokens_seen", 0)
170
+ print(f" Step: {step} | Tokens seen: {tokens:,}")
171
+
172
+ model = LUNAModel()
173
+ model.load_state_dict(state, strict=True)
174
+ model = model.to(device)
175
+ model.eval()
176
+ print(f" Parameters: {sum(p.numel() for p in model.parameters()):,}")
177
+ return model
178
+
179
+
180
+ def load_tokenizer(tok_dir: str):
181
+ try:
182
+ from transformers import AutoTokenizer
183
+ tok = AutoTokenizer.from_pretrained(tok_dir)
184
+ print(f" Tokenizer: {tok_dir} (vocab {tok.vocab_size})")
185
+ return tok
186
+ except Exception as e:
187
+ print(f" ERROR loading tokenizer: {e}")
188
+ print(" Install: pip install transformers")
189
+ sys.exit(1)
190
+
191
+
192
+ # ─── Entry ────────────────────────────────────────────────────────────────────
193
+
194
+ def parse_args():
195
+ p = argparse.ArgumentParser(description="LUNA 100M - Text Generation")
196
+ p.add_argument("--ckpt", default="Base/out/luna_100m/latest.pt")
197
+ p.add_argument("--tok_dir", default="Base/checkpoints/EleutherAI/pythia-160m")
198
+ p.add_argument("--prompt", default=None, help="Single prompt (else interactive)")
199
+ p.add_argument("--max_new", type=int, default=200)
200
+ p.add_argument("--temp", type=float, default=0.8)
201
+ p.add_argument("--top_p", type=float, default=0.9)
202
+ p.add_argument("--top_k", type=int, default=50)
203
+ p.add_argument("--rep_pen", type=float, default=1.1, help="Repetition penalty")
204
+ p.add_argument("--device", default="auto")
205
+ return p.parse_args()
206
+
207
+
208
+ def run_prompt(model, tokenizer, prompt, args, device):
209
+ ids = tokenizer.encode(prompt, return_tensors="pt")
210
+ print(f"\n{'='*60}")
211
+ print(f"PROMPT: {prompt}")
212
+ print(f"{'='*60}")
213
+ print(prompt, end="", flush=True)
214
+
215
+ new_ids = generate(
216
+ model, ids,
217
+ max_new=args.max_new,
218
+ temperature=args.temp,
219
+ top_p=args.top_p,
220
+ top_k=args.top_k,
221
+ repetition_penalty=args.rep_pen,
222
+ device=device,
223
+ )
224
+ output = tokenizer.decode(new_ids, skip_special_tokens=True)
225
+ print(output)
226
+ print(f"{'='*60}")
227
+ print(f"Generated {len(new_ids)} tokens")
228
+
229
+
230
+ def main():
231
+ args = parse_args()
232
+
233
+ if args.device == "auto":
234
+ device = "cuda" if torch.cuda.is_available() else "cpu"
235
+ else:
236
+ device = args.device
237
+ print(f"\nDevice: {device}")
238
+
239
+ model = load_model(args.ckpt, device)
240
+ tokenizer = load_tokenizer(args.tok_dir)
241
+
242
+ if args.prompt:
243
+ run_prompt(model, tokenizer, args.prompt, args, device)
244
+ return
245
+
246
+ # Interactive REPL
247
+ print(f"\n{'='*60}")
248
+ print(" LUNA 100M - Interactive Generation")
249
+ print(f" Checkpoint: {args.ckpt}")
250
+ print(f" max_new={args.max_new} temp={args.temp} top_p={args.top_p} top_k={args.top_k}")
251
+ print(" Type your prompt and press Enter. Ctrl+C to exit.")
252
+ print(f"{'='*60}\n")
253
+
254
+ while True:
255
+ try:
256
+ prompt = input(">>> ").strip()
257
+ if not prompt:
258
+ continue
259
+ run_prompt(model, tokenizer, prompt, args, device)
260
+ except KeyboardInterrupt:
261
+ print("\nBye!")
262
+ break
263
+ except EOFError:
264
+ break
265
+
266
+
267
+ if __name__ == "__main__":
268
+ main()