LUNA-Training / lora_chat.py
ASTERIZER's picture
Upload lora_chat.py with huggingface_hub
d426a2d verified
"""
LUNA 100M β€” LoRA Adapter Chat
Loads the base SFT model, injects LoRA, and applies an adapter checkpoint.
Usage:
python lora_chat.py --adapter Base/out/sft/rag_mcp_lora/final/adapter_model.pt
python lora_chat.py --adapter Base/out/sft/rag_mcp_lora/step-001554/adapter_model.pt
python lora_chat.py --adapter /path/to/adapter_model.pt --max_new 300 --temp 0.8
# Use the full bundle (has rank/alpha/targets embedded):
python lora_chat.py --adapter Base/out/sft/rag_mcp_lora/final/adapter_bundle.pt --bundle
"""
import argparse
import math
import os
import torch
import torch.nn as nn
import torch.nn.functional as F
from pathlib import Path
# ─── Model (matches sft_train.py exactly) ─────────────────────────────────────
class RotaryEmbedding(nn.Module):
def __init__(self, dim, max_seq_len=1024):
super().__init__()
inv_freq = 1.0 / (10000 ** (torch.arange(0, dim, 2).float() / dim))
self.register_buffer("inv_freq", inv_freq)
t = torch.arange(max_seq_len).float()
freqs = torch.einsum("i,j->ij", t, inv_freq)
emb = torch.cat([freqs, freqs], dim=-1)
self.register_buffer("cos_cached", emb.cos())
self.register_buffer("sin_cached", emb.sin())
def forward(self, seq_len):
return self.cos_cached[:seq_len], self.sin_cached[:seq_len]
def rotate_half(x):
x1, x2 = x.chunk(2, dim=-1)
return torch.cat([-x2, x1], dim=-1)
def apply_rotary(x, cos, sin):
c = cos.unsqueeze(0).unsqueeze(0)
s = sin.unsqueeze(0).unsqueeze(0)
return x * c + rotate_half(x) * s
class CausalSelfAttention(nn.Module):
def __init__(self, n_embd, n_head, block_size, rotary_pct=0.25):
super().__init__()
self.n_head = n_head
self.head_dim = n_embd // n_head
self.rot_dim = int(self.head_dim * rotary_pct)
self.c_attn = nn.Linear(n_embd, 3 * n_embd, bias=True)
self.c_proj = nn.Linear(n_embd, n_embd, bias=True)
self.rotary = RotaryEmbedding(self.rot_dim, block_size)
def forward(self, x):
B, T, C = x.size()
qkv = self.c_attn(x).reshape(B, T, 3, self.n_head, self.head_dim).permute(2, 0, 3, 1, 4)
q, k, v = qkv.unbind(0)
cos, sin = self.rotary(T)
q = torch.cat([apply_rotary(q[..., :self.rot_dim], cos, sin), q[..., self.rot_dim:]], dim=-1)
k = torch.cat([apply_rotary(k[..., :self.rot_dim], cos, sin), k[..., self.rot_dim:]], dim=-1)
y = F.scaled_dot_product_attention(q, k, v, is_causal=True)
return self.c_proj(y.transpose(1, 2).contiguous().view(B, T, C))
class MLP(nn.Module):
def __init__(self, n_embd):
super().__init__()
self.fc = nn.Linear(n_embd, 4 * n_embd, bias=True)
self.gelu = nn.GELU()
self.proj = nn.Linear(4 * n_embd, n_embd, bias=True)
def forward(self, x):
return self.proj(self.gelu(self.fc(x)))
class Block(nn.Module):
def __init__(self, n_embd, n_head, block_size):
super().__init__()
self.ln1 = nn.LayerNorm(n_embd)
self.attn = CausalSelfAttention(n_embd, n_head, block_size)
self.ln2 = nn.LayerNorm(n_embd)
self.mlp = MLP(n_embd)
def forward(self, x):
x = x + self.attn(self.ln1(x))
x = x + self.mlp(self.ln2(x))
return x
class LUNAModel(nn.Module):
def __init__(self, vocab_size=50304, block_size=1024,
n_layer=10, n_embd=768, n_head=12):
super().__init__()
self.block_size = block_size
self.wte = nn.Embedding(vocab_size, n_embd)
self.blocks = nn.ModuleList([Block(n_embd, n_head, block_size) for _ in range(n_layer)])
self.ln_f = nn.LayerNorm(n_embd)
self.lm_head = nn.Linear(n_embd, vocab_size, bias=False)
self.lm_head.weight = self.wte.weight
def forward(self, idx):
x = self.wte(idx)
for block in self.blocks:
x = block(x)
return self.lm_head(self.ln_f(x))
# ─── LoRA ─────────────────────────────────────────────────────────────────────
class LoRALinear(nn.Module):
def __init__(self, base_layer, rank=16, alpha=32, dropout=0.0):
super().__init__()
self.base = base_layer
self.scale = alpha / max(rank, 1)
self.dropout = nn.Dropout(dropout)
self.lora_a = nn.Linear(base_layer.in_features, rank, bias=False)
self.lora_b = nn.Linear(rank, base_layer.out_features, bias=False)
nn.init.kaiming_uniform_(self.lora_a.weight, a=math.sqrt(5))
nn.init.zeros_(self.lora_b.weight)
for p in self.base.parameters():
p.requires_grad = False
def forward(self, x):
return self.base(x) + self.lora_b(self.lora_a(self.dropout(x))) * self.scale
def inject_lora(model, target_modules, rank, alpha):
for module_name, module in list(model.named_modules()):
if not isinstance(module, nn.Linear):
continue
if not any(module_name.endswith(t) for t in target_modules):
continue
parent_name, _, child_name = module_name.rpartition(".")
parent = model.get_submodule(parent_name) if parent_name else model
wrapped = LoRALinear(module, rank=rank, alpha=alpha)
wrapped = wrapped.to(device=module.weight.device, dtype=module.weight.dtype)
setattr(parent, child_name, wrapped)
# ─── Generation ───────────────────────────────────────────────────────────────
@torch.no_grad()
def generate(model, input_ids, max_new=200, temperature=0.7,
top_p=0.9, top_k=50, rep_pen=1.1, device="cpu"):
ids = input_ids.to(device)
for _ in range(max_new):
logits = model(ids[:, -model.block_size:])[:, -1, :]
if rep_pen != 1.0:
for tid in set(ids[0].tolist()):
logits[0, tid] = logits[0, tid] / rep_pen if logits[0, tid] > 0 else logits[0, tid] * rep_pen
if temperature < 1e-6:
next_tok = logits.argmax(dim=-1, keepdim=True)
else:
logits = logits / temperature
probs = F.softmax(logits, dim=-1)
if top_k > 0:
kv, _ = torch.topk(probs, min(top_k, probs.size(-1)))
probs[probs < kv[:, [-1]]] = 0.0
probs /= probs.sum()
if top_p < 1.0:
sp, si = torch.sort(probs, descending=True)
cum = torch.cumsum(sp, dim=-1)
sp[cum - sp > top_p] = 0.0
sp /= sp.sum()
next_tok = si[0, torch.multinomial(sp[0], 1)]
else:
next_tok = torch.multinomial(probs[0], 1)
ids = torch.cat([ids, next_tok.view(1, 1)], dim=1)
if next_tok.item() == 0:
break
return ids[0, input_ids.shape[1]:].tolist()
def format_prompt(instruction):
return f"### Instruction:\n{instruction.strip()}\n\n### Response:\n"
# ─── Main ─────────────────────────────────────────────────────────────────────
def main():
parser = argparse.ArgumentParser(description="LUNA 100M β€” LoRA Adapter Chat")
parser.add_argument("--adapter", required=True,
help="Path to adapter_model.pt or adapter_bundle.pt")
parser.add_argument("--bundle", action="store_true",
help="Adapter file is an adapter_bundle.pt (has config embedded)")
parser.add_argument("--base_ckpt", default=None,
help="Path to base model .pth (auto-downloads from HF if not set)")
parser.add_argument("--tok_dir", default="Base/checkpoints/EleutherAI/pythia-160m")
parser.add_argument("--rank", type=int, default=16)
parser.add_argument("--alpha", type=float, default=32.0)
parser.add_argument("--targets", nargs="+",
default=["attn.c_attn", "attn.c_proj", "mlp.fc", "mlp.proj"])
parser.add_argument("--max_new", type=int, default=200)
parser.add_argument("--temp", type=float, default=0.7)
parser.add_argument("--top_p", type=float, default=0.9)
parser.add_argument("--top_k", type=int, default=50)
parser.add_argument("--rep_pen", type=float, default=1.1)
parser.add_argument("--device", default="auto")
args = parser.parse_args()
# ── device ──
if args.device == "auto":
device = "cuda" if torch.cuda.is_available() else "cpu"
else:
device = args.device
# ── load adapter ──
adapter_path = Path(args.adapter)
if not adapter_path.exists():
raise FileNotFoundError(f"Adapter not found: {adapter_path}")
bundle = torch.load(adapter_path, map_location="cpu", weights_only=True)
if args.bundle and isinstance(bundle, dict) and "lora_rank" in bundle:
rank = bundle["lora_rank"]
alpha = bundle["lora_alpha"]
targets = bundle["target_modules"]
adapter_state = bundle["adapter"]
print(f" Bundle config: rank={rank}, alpha={alpha}, targets={targets}")
else:
rank = args.rank
alpha = args.alpha
targets = args.targets
adapter_state = bundle
# ── resolve base checkpoint ──
base_ckpt = args.base_ckpt
if base_ckpt is None:
default = Path("Base/out/input_models/luna_sft_v1/sft_v1/final/model.pth")
if default.exists():
base_ckpt = str(default)
else:
print(" Base checkpoint not found locally β€” downloading from HF...")
from huggingface_hub import hf_hub_download
default.parent.mkdir(parents=True, exist_ok=True)
hf_hub_download(
repo_id="ASTERIZER/LUNA-100M",
filename="sft_v1/final/model.pth",
local_dir=str(default.parent.parent.parent),
token=os.environ.get("HF_TOKEN"),
)
base_ckpt = str(default)
# ── build and load base model ──
print(f" Loading base: {base_ckpt}")
base_state = torch.load(base_ckpt, map_location="cpu", weights_only=True)
if isinstance(base_state, dict) and "model" in base_state:
base_state = base_state["model"]
model = LUNAModel()
model.load_state_dict(base_state, strict=True)
model = model.to(device)
# ── inject LoRA and load adapter weights ──
inject_lora(model, target_modules=targets, rank=rank, alpha=alpha)
missing, unexpected = model.load_state_dict(adapter_state, strict=False)
if unexpected:
print(f" Warning: unexpected keys in adapter: {unexpected[:5]}")
lora_keys = [k for k in adapter_state if "lora" in k]
print(f" Loaded {len(lora_keys)} LoRA weight tensors from {adapter_path.name}")
model.eval()
# ── tokenizer ──
from transformers import AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained(args.tok_dir)
# ── info ──
print(f"\n{'='*60}")
print(f" LUNA 100M + LoRA Adapter")
print(f" Adapter : {adapter_path}")
print(f" Device : {device}")
print(f" max_new : {args.max_new} temp: {args.temp} top_p: {args.top_p}")
print(f"{'='*60}")
print(" Type your instruction and press Enter. Ctrl+C to quit.\n")
# ── REPL ──
while True:
try:
user_input = input("You: ").strip()
except (EOFError, KeyboardInterrupt):
print("\nBye.")
break
if not user_input:
continue
prompt = format_prompt(user_input)
input_ids = tokenizer.encode(prompt, return_tensors="pt")
tokens = generate(
model, input_ids,
max_new=args.max_new,
temperature=args.temp,
top_p=args.top_p,
top_k=args.top_k,
rep_pen=args.rep_pen,
device=device,
)
response = tokenizer.decode(tokens, skip_special_tokens=True)
print(f"\nLUNA: {response.strip()}\n")
if __name__ == "__main__":
main()