| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
|
|
| import os |
| import torch |
| import torch.nn as nn |
| import torch.nn.functional as F |
| from typing import Optional |
|
|
| VOCAB_SIZE = 50257 |
| MODEL_DIM = 768 |
| NUM_HEADS = 12 |
| NUM_LAYERS = 6 |
| MAX_SEQ_LEN = 8192 |
| FFN_HIDDEN_DIM = 4 * MODEL_DIM |
| HEAD_DIM = MODEL_DIM // NUM_HEADS |
|
|
| device = torch.device("cuda" if torch.cuda.is_available() else "cpu") |
|
|
|
|
| class LearnedPositionalEmbedding(nn.Module): |
| def __init__(self, max_seq_len: int, embed_dim: int): |
| super().__init__() |
| self.pos_emb = nn.Parameter(torch.zeros(max_seq_len, embed_dim)) |
|
|
| def forward(self, x: torch.Tensor, pos_offset: int = 0) -> torch.Tensor: |
| seq_len = x.size(1) |
| pos = self.pos_emb[pos_offset : pos_offset + seq_len] |
| return x + pos.unsqueeze(0) |
|
|
|
|
| class MultiHeadAttention(nn.Module): |
| def __init__(self): |
| super().__init__() |
| self.q_proj = nn.Linear(MODEL_DIM, MODEL_DIM, bias=False) |
| self.k_proj = nn.Linear(MODEL_DIM, MODEL_DIM, bias=False) |
| self.v_proj = nn.Linear(MODEL_DIM, MODEL_DIM, bias=False) |
| self.out_proj = nn.Linear(MODEL_DIM, MODEL_DIM, bias=False) |
| self.scale = HEAD_DIM ** -0.5 |
|
|
| def forward(self, x: torch.Tensor, past_kv=None): |
| B, T, _ = x.shape |
| q = self.q_proj(x).view(B, T, NUM_HEADS, HEAD_DIM).transpose(1, 2) |
| k = self.k_proj(x).view(B, T, NUM_HEADS, HEAD_DIM).transpose(1, 2) |
| v = self.v_proj(x).view(B, T, NUM_HEADS, HEAD_DIM).transpose(1, 2) |
|
|
| if past_kv is not None and past_kv[0] is not None: |
| past_k, past_v = past_kv |
| k = torch.cat([past_k, k], dim=2) |
| v = torch.cat([past_v, v], dim=2) |
|
|
| seqlen = k.size(2) |
|
|
| attn = torch.matmul(q, k.transpose(-2, -1)) * self.scale |
|
|
| if T == seqlen: |
| mask = torch.tril(torch.ones(T, seqlen, device=x.device, dtype=torch.bool)) |
| mask = mask.view(1, 1, T, seqlen) |
| attn = attn.masked_fill(~mask, float('-inf')) |
|
|
| attn = F.softmax(attn, dim=-1) |
| out = torch.matmul(attn, v) |
| out = out.transpose(1, 2).contiguous().view(B, T, MODEL_DIM) |
| out = self.out_proj(out) |
|
|
| return out, (k, v) |
|
|
|
|
| class FeedForward(nn.Module): |
| def __init__(self): |
| super().__init__() |
| self.c_fc = nn.Linear(MODEL_DIM, FFN_HIDDEN_DIM, bias=False) |
| self.c_proj = nn.Linear(FFN_HIDDEN_DIM, MODEL_DIM, bias=False) |
|
|
| def forward(self, x): |
| return self.c_proj(F.gelu(self.c_fc(x), approximate='tanh')) |
|
|
|
|
| class TransformerBlock(nn.Module): |
| def __init__(self): |
| super().__init__() |
| self.attn = MultiHeadAttention() |
| self.ffn = FeedForward() |
| self.norm1 = nn.LayerNorm(MODEL_DIM) |
| self.norm2 = nn.LayerNorm(MODEL_DIM) |
|
|
| def forward(self, x, past_kv=None): |
| attn_out, new_kv = self.attn(self.norm1(x), past_kv) |
| x = x + attn_out |
| x = x + self.ffn(self.norm2(x)) |
| return x, new_kv |
|
|
|
|
| class GPTPyTorch(nn.Module): |
| def __init__(self): |
| super().__init__() |
| self.token_emb = nn.Embedding(VOCAB_SIZE, MODEL_DIM) |
| self.pos_emb = LearnedPositionalEmbedding(MAX_SEQ_LEN, MODEL_DIM) |
| self.blocks = nn.ModuleList([TransformerBlock() for _ in range(NUM_LAYERS)]) |
| self.ln_f = nn.LayerNorm(MODEL_DIM) |
| self.lm_head = nn.Linear(MODEL_DIM, VOCAB_SIZE, bias=False) |
| |
| signature = "Konstantin V Gbabko . original author © 2025" |
| bytes_tensor = torch.tensor([ord(c) for c in signature], dtype=torch.uint8) |
| self.register_buffer("konstantin_gbabko_proof_of_authorship", bytes_tensor) |
| self.register_buffer("konstantin_gbabko_birth_date", torch.tensor([20251126], dtype=torch.int64)) |
|
|
| self.lm_head.weight = self.token_emb.weight |
| self.apply(self._init_weights) |
|
|
| def _init_weights(self, module): |
| if isinstance(module, nn.Linear): |
| torch.nn.init.xavier_uniform_(module.weight) |
| elif isinstance(module, nn.Embedding): |
| torch.nn.init.normal_(module.weight, mean=0.0, std=0.02) |
| elif isinstance(module, nn.LayerNorm): |
| nn.init.zeros_(module.bias) |
| nn.init.ones_(module.weight) |
|
|
| def forward(self, input_ids, past_kv: Optional[list] = None): |
| B, T = input_ids.shape |
| x = self.token_emb(input_ids) |
|
|
| |
| if past_kv is not None and past_kv[0] is not None: |
| pos_offset = past_kv[0][0].size(2) |
| else: |
| pos_offset = 0 |
| x = self.pos_emb(x, pos_offset=pos_offset) |
|
|
| new_kv_cache = [] if past_kv is not None else None |
|
|
| for i, block in enumerate(self.blocks): |
| layer_past = past_kv[i] if (past_kv is not None and past_kv[i] is not None) else None |
| x, layer_kv = block(x, layer_past) |
| if new_kv_cache is not None: |
| new_kv_cache.append(layer_kv) |
|
|
| x = self.ln_f(x) |
| logits = self.lm_head(x) |
| return logits, new_kv_cache |
|
|
| @torch.no_grad() |
| def generate( |
| self, |
| input_ids: torch.Tensor, |
| max_new_tokens: int = 100, |
| temperature: float = 0.8, |
| top_p: float = 0.95, |
| repetition_penalty: float = 1.0, |
| do_sample: bool = True, |
| eos_token_id: int = 50256 |
| ) -> torch.Tensor: |
| kv_cache = [None] * NUM_LAYERS |
| current_ids = input_ids.clone() |
|
|
| for step in range(max_new_tokens): |
| if step == 0: |
| input_for_model = current_ids |
| else: |
| input_for_model = current_ids[:, -1].unsqueeze(-1) |
|
|
| logits, kv_cache = self(input_for_model, kv_cache) |
| next_token_logits = logits[:, -1, :] |
|
|
| if temperature > 0: |
| next_token_logits = next_token_logits / temperature |
|
|
| if repetition_penalty != 1.0: |
| for i in range(current_ids.shape[0]): |
| unique_tokens = torch.unique(current_ids[i]).tolist() |
| for token_id in unique_tokens: |
| score = next_token_logits[i, token_id] |
| if score < 0: |
| next_token_logits[i, token_id] = score * repetition_penalty |
| else: |
| next_token_logits[i, token_id] = score / repetition_penalty |
|
|
| if do_sample and top_p < 1.0: |
| sorted_logits, sorted_indices = torch.sort(next_token_logits, descending=True) |
| cumulative_probs = torch.softmax(sorted_logits, dim=-1).cumsum(dim=-1) |
| sorted_indices_to_remove = cumulative_probs > top_p |
| sorted_indices_to_remove[:, 1:] = sorted_indices_to_remove[:, :-1].clone() |
| sorted_indices_to_remove[:, 0] = False |
| indices_to_remove = sorted_indices_to_remove.scatter(1, sorted_indices, sorted_indices_to_remove) |
| next_token_logits = next_token_logits.masked_fill(indices_to_remove, float('-inf')) |
|
|
| if do_sample and temperature > 0: |
| probs = torch.softmax(next_token_logits, dim=-1) |
| next_token = torch.multinomial(probs, num_samples=1) |
| else: |
| next_token = torch.argmax(next_token_logits, dim=-1, keepdim=True) |
|
|
| if next_token.item() == eos_token_id: |
| break |
|
|
| current_ids = torch.cat([current_ids, next_token], dim=1) |
|
|
| return current_ids |
|
|
|
|
| if __name__ == "__main__": |
| os.makedirs("models", exist_ok=True) |
|
|
| model = GPTPyTorch().to(device) |
| model.eval() |
|
|
| print(f"Device: {device}") |
| print(f"Total parameters: {sum(p.numel() for p in model.parameters()) / 1e6:.2f}M") |
|
|
| input_ids = torch.randint(0, VOCAB_SIZE, (1, 50), device=device) |
| logits, _ = model(input_ids) |
| print("logits shape:", logits.shape) |
|
|
| generated = model.generate(input_ids, max_new_tokens=100, temperature=0.8, top_p=0.9) |
| print("Generated sequence length:", generated.shape[1]) |
|
|
| torch.save(model.state_dict(), "models/JiRack_H12_L6_V50257_D768_MSL8192_FF768x4.pt") |
| print("Model successfully saved to models/JiRack.pt") |