|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| import os
|
| import torch
|
| import torch.nn as nn
|
| import torch.nn.functional as F
|
| from typing import Optional, Tuple, List
|
| import math
|
| from pathlib import Path
|
|
|
|
|
|
|
|
|
| VOCAB_SIZE = 50257
|
| MODEL_DIM = 768
|
| NUM_HEADS = 12
|
| NUM_LAYERS = 18
|
| MAX_SEQ_LEN = 8192
|
| FFN_HIDDEN_DIM = 4 * MODEL_DIM
|
| HEAD_DIM = MODEL_DIM // NUM_HEADS
|
|
|
| if torch.cuda.is_available():
|
| device = torch.device("cuda")
|
| elif hasattr(torch, 'hip') and torch.hip.is_available():
|
| device = torch.device("cuda")
|
| else:
|
| device = torch.device("cpu")
|
|
|
|
|
| class LearnedPositionalEmbedding(nn.Module):
|
| def __init__(self, max_seq_len: int, embed_dim: int):
|
| super().__init__()
|
| self.pos_emb = nn.Parameter(torch.zeros(max_seq_len, embed_dim))
|
|
|
| def forward(self, x: torch.Tensor, pos_offset: int = 0) -> torch.Tensor:
|
| seq_len = x.size(1)
|
| pos = self.pos_emb[pos_offset : pos_offset + seq_len]
|
| return x + pos.unsqueeze(0)
|
|
|
|
|
|
|
| class MultiHeadAttention(nn.Module):
|
| def __init__(self):
|
| super().__init__()
|
| self.q_proj = nn.Linear(MODEL_DIM, MODEL_DIM, bias=False)
|
| self.k_proj = nn.Linear(MODEL_DIM, MODEL_DIM, bias=False)
|
| self.v_proj = nn.Linear(MODEL_DIM, MODEL_DIM, bias=False)
|
| self.out_proj = nn.Linear(MODEL_DIM, MODEL_DIM, bias=False)
|
| self.scale = HEAD_DIM ** -0.5
|
|
|
| def forward(self, x: torch.Tensor, past_kv: Optional[Tuple[torch.Tensor, torch.Tensor]] = None):
|
| B, T, D = x.shape
|
|
|
| q = self.q_proj(x).view(B, T, NUM_HEADS, HEAD_DIM).transpose(1, 2)
|
| k = self.k_proj(x).view(B, T, NUM_HEADS, HEAD_DIM).transpose(1, 2)
|
| v = self.v_proj(x).view(B, T, NUM_HEADS, HEAD_DIM).transpose(1, 2)
|
|
|
| pos_offset = 0
|
| new_kv = None
|
| if past_kv is not None and past_kv[0] is not None:
|
| past_k, past_v = past_kv
|
| k = torch.cat([past_k, k], dim=2)
|
| v = torch.cat([past_v, v], dim=2)
|
| pos_offset = past_k.size(2)
|
| new_kv = (k, v)
|
|
|
| seqlen_k = k.size(2)
|
|
|
| attn = torch.matmul(q, k.transpose(-2, -1)) * self.scale
|
|
|
|
|
|
|
|
|
| mask = torch.full((T, seqlen_k), float("-inf"), device=x.device, dtype=attn.dtype)
|
| current_causal_mask = torch.tril(torch.ones(T, T, device=x.device, dtype=torch.bool))
|
|
|
| mask[:, :pos_offset] = 0.0
|
| mask[:, pos_offset : pos_offset + T].masked_fill_(~current_causal_mask, float('-inf'))
|
|
|
| attn = attn + mask[None, None, :, :]
|
|
|
|
|
| attn = F.softmax(attn, dim=-1)
|
| out = torch.matmul(attn, v)
|
| out = out.transpose(1, 2).contiguous().view(B, T, D)
|
| out = self.out_proj(out)
|
|
|
| return out, new_kv
|
|
|
|
|
|
|
| class FeedForward(nn.Module):
|
| def __init__(self):
|
| super().__init__()
|
| self.c_fc = nn.Linear(MODEL_DIM, FFN_HIDDEN_DIM, bias=False)
|
| self.c_proj = nn.Linear(FFN_HIDDEN_DIM, MODEL_DIM, bias=False)
|
|
|
| def forward(self, x):
|
| return self.c_proj(F.gelu(self.c_fc(x), approximate='tanh'))
|
|
|
|
|
|
|
| class TransformerBlock(nn.Module):
|
| def __init__(self):
|
| super().__init__()
|
| self.attn = MultiHeadAttention()
|
| self.ffn = FeedForward()
|
| self.norm1 = nn.LayerNorm(MODEL_DIM, eps=1e-5)
|
| self.norm2 = nn.LayerNorm(MODEL_DIM, eps=1e-5)
|
|
|
| def forward(self, x, past_kv: Optional[Tuple[torch.Tensor, torch.Tensor]] = None):
|
| attn_out, new_kv = self.attn(self.norm1(x), past_kv)
|
| x = x + attn_out
|
| x = x + self.ffn(self.norm2(x))
|
| return x, new_kv
|
|
|
|
|
|
|
|
|
|
|
| class GPTPyTorch(nn.Module):
|
| def __init__(self):
|
| super().__init__()
|
| self.token_emb = nn.Embedding(VOCAB_SIZE, MODEL_DIM)
|
| self.pos_emb = LearnedPositionalEmbedding(MAX_SEQ_LEN, MODEL_DIM)
|
| self.blocks = nn.ModuleList([TransformerBlock() for _ in range(NUM_LAYERS)])
|
| self.ln_f = nn.LayerNorm(MODEL_DIM, eps=1e-5)
|
| self.lm_head = nn.Linear(MODEL_DIM, VOCAB_SIZE, bias=False)
|
|
|
| signature = "Konstantin V Gbabko . original author © 2025"
|
| bytes_tensor = torch.tensor([ord(c) for c in signature], dtype=torch.uint8)
|
| self.register_buffer("konstantin_gbabko_proof_of_authorship", bytes_tensor)
|
| self.register_buffer("konstantin_gbabko_birth_date", torch.tensor([20251126], dtype=torch.int64))
|
|
|
| self.lm_head.weight = self.token_emb.weight
|
| self.apply(self._init_weights)
|
|
|
| def _init_weights(self, module):
|
| if isinstance(module, nn.Linear):
|
| std = 0.02 / math.sqrt(2 * NUM_LAYERS) if isinstance(module, nn.Linear) and module.out_features == MODEL_DIM else 0.02
|
| torch.nn.init.normal_(module.weight, mean=0.0, std=std)
|
| elif isinstance(module, nn.Embedding):
|
| torch.nn.init.normal_(module.weight, mean=0.0, std=0.02)
|
| elif isinstance(module, nn.LayerNorm):
|
| nn.init.zeros_(module.bias)
|
| nn.init.ones_(module.weight)
|
|
|
| def forward(self, input_ids, past_kv: Optional[List[Tuple[torch.Tensor, torch.Tensor]]] = None):
|
| B, T = input_ids.shape
|
| x = self.token_emb(input_ids)
|
|
|
| pos_offset = 0
|
| if past_kv is not None and past_kv[0] is not None:
|
| pos_offset = past_kv[0][0].size(2)
|
|
|
| x = self.pos_emb(x, pos_offset=pos_offset)
|
|
|
| new_kv_cache = [] if past_kv is not None else None
|
| current_past = past_kv
|
|
|
| for i, block in enumerate(self.blocks):
|
| layer_past = current_past[i] if (current_past and i < len(current_past)) else None
|
| x, layer_kv = block(x, layer_past)
|
|
|
| if new_kv_cache is not None:
|
| new_kv_cache.append(layer_kv)
|
|
|
| x = self.ln_f(x)
|
| logits = self.lm_head(x)
|
|
|
| if past_kv is None:
|
| return logits
|
| else:
|
| return logits, new_kv_cache
|
|
|
|
|
|
|
|
|
| class GPTPyTorchNoCache(nn.Module):
|
| def __init__(self, model):
|
| super().__init__()
|
| self.model = model
|
|
|
| def forward(self, input_ids):
|
| return self.model(input_ids, None)
|
|
|
|
|
|
|
|
|
| if __name__ == "__main__":
|
| os.makedirs("models", exist_ok=True)
|
|
|
| TRAIN_SEQ_LEN = 256
|
| JIT_SAVE_PATH = Path("models/JiRack_H12_L18_V50257_D768_MSL8192_FF768x4.script.pt")
|
|
|
| model = GPTPyTorch().to(device)
|
| model.eval()
|
|
|
| total_params = sum(p.numel() for p in model.parameters())
|
| print(f"Device: {device}")
|
| print(f"Total parameters: {total_params / 1e6:.2f}M")
|
|
|
|
|
| dummy_input = torch.randint(0, VOCAB_SIZE, (1, TRAIN_SEQ_LEN), device=device)
|
| with torch.no_grad():
|
| logits_test = model(dummy_input, None)
|
| print(f"Test logits shape: {logits_test.shape}")
|
|
|
|
|
| print(f"\nTracing model for JIT export (input sequence length: {TRAIN_SEQ_LEN})...")
|
|
|
| model_no_cache = GPTPyTorchNoCache(model).to(device)
|
|
|
| try:
|
| traced_script_module = torch.jit.trace(model_no_cache, dummy_input, strict=False)
|
|
|
| traced_script_module.save(JIT_SAVE_PATH)
|
| print(f"✅ Success! Model saved as TorchScript (JIT) to: {JIT_SAVE_PATH}")
|
| print("Now you can run your training script.")
|
|
|
| except Exception as e:
|
| print(f"🚨 ERROR during JIT tracing: {e}")
|
| print("Model may contain operations incompatible with torch.jit.trace.")
|
|
|
|
|
| ORIGINAL_SAVE_PATH = "models/JiRack_H12_L18_V50257_D768_MSL8192_FF768x4.pt"
|
| torch.save(model.state_dict(), ORIGINAL_SAVE_PATH)
|
| print(f"\nOriginal state_dict saved to {ORIGINAL_SAVE_PATH}") |