File size: 11,050 Bytes
1b1fe45 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 | #!/usr/bin/env python3
"""Full Little Fig GPU Benchmark β Quant Quality + Training on t4-medium (30GB RAM)"""
import os, sys, subprocess, json, time, gc, traceback
import numpy as np
subprocess.check_call([sys.executable, "-m", "pip", "install", "-q", "transformers", "accelerate", "peft", "bitsandbytes", "datasets", "sentencepiece", "protobuf", "psutil", "numpy"])
if not os.path.exists("/app/littlefig"):
subprocess.check_call(["git", "clone", "https://github.com/ticketguy/littlefig.git", "/app/littlefig"])
subprocess.check_call([sys.executable, "-m", "pip", "install", "-q", "-e", "/app/littlefig[train]"])
sys.path.insert(0, "/app/littlefig/src")
import torch
import torch.nn.functional as F
MODEL = "TinyLlama/TinyLlama-1.1B-Chat-v1.0"
LORA_R = 16; LORA_ALPHA = 32; GROUP_SIZE = 128
LORA_TARGETS = ["q_proj","k_proj","v_proj","o_proj"]
MAX_SEQ = 512; TRAIN_STEPS = 100; BATCH_SIZE = 4; GRAD_ACCUM = 4; LR = 2e-4
RESULTS = {}
def log(msg): print(f"[BENCH] {msg}", flush=True)
def gpu_mb(): return torch.cuda.max_memory_allocated()/1e6 if torch.cuda.is_available() else 0
def reset(): gc.collect(); torch.cuda.empty_cache() if torch.cuda.is_available() else None; torch.cuda.reset_peak_memory_stats() if torch.cuda.is_available() else None
log(f"PyTorch {torch.__version__}, CUDA={torch.cuda.is_available()}")
if torch.cuda.is_available():
log(f"GPU: {torch.cuda.get_device_name()} ({torch.cuda.get_device_properties(0).total_memory/1e9:.1f}GB)")
import psutil
log(f"System RAM: {psutil.virtual_memory().total/1e9:.1f}GB")
# βββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
# PART A: QUANTIZATION QUALITY (FP32 load on CPU β needs 30GB RAM)
# βββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
log("\n" + "="*60 + "\n QUANTIZATION QUALITY (TinyLlama 1.1B)\n" + "="*60)
from transformers import AutoModelForCausalLM
from little_fig.engine.figquant import figquant_quantize, figquant_dequantize
log("Loading TinyLlama FP32 (CPU)...")
model_fp32 = AutoModelForCausalLM.from_pretrained(MODEL, torch_dtype=torch.float32, low_cpu_mem_usage=True)
def nf4_qd(W, gs=128):
s, n = W.shape, W.numel(); f = W.reshape(-1).float()
p = (gs-n%gs)%gs
if p>0: f = torch.cat([f, torch.zeros(p)])
g = f.reshape(-1,gs); sc = g.abs().amax(1).clamp(min=1e-10)
cb = torch.tensor([-1.0,-0.6962,-0.5251,-0.3949,-0.2844,-0.1848,-0.0911,0.0,0.0796,0.1609,0.2461,0.3379,0.4407,0.5626,0.7230,1.0])
idx = ((g/sc.unsqueeze(1)).reshape(-1).unsqueeze(1)-cb.unsqueeze(0)).abs().argmin(1).reshape(-1,gs)
return (torch.gather(cb.unsqueeze(0).expand(idx.shape[0],-1),1,idx.long())*sc.unsqueeze(1)).reshape(-1)[:n].reshape(s)
methods = {"figquant":{"mse":[],"cos":[]},"nf4":{"mse":[],"cos":[]}}
n=0; fw=0
for name, param in model_fp32.named_parameters():
if param.ndim!=2 or param.numel()<1024: continue
W = param.data.float()
q = figquant_quantize(W, group_size=GROUP_SIZE, n_iters=8)
deq = figquant_dequantize(q)
mse_fq = F.mse_loss(deq, W).item()
cos_fq = F.cosine_similarity(W.flatten().unsqueeze(0), deq.flatten().unsqueeze(0)).item()
W_nf4 = nf4_qd(W, GROUP_SIZE)
mse_nf = F.mse_loss(W_nf4, W).item()
cos_nf = F.cosine_similarity(W.flatten().unsqueeze(0), W_nf4.flatten().unsqueeze(0)).item()
methods["figquant"]["mse"].append(mse_fq); methods["figquant"]["cos"].append(cos_fq)
methods["nf4"]["mse"].append(mse_nf); methods["nf4"]["cos"].append(cos_nf)
if mse_fq < mse_nf: fw+=1
n+=1
if n%20==0: log(f" {n} layers done...")
avgs = {m:{k:float(np.mean(v)) for k,v in d.items()} for m,d in methods.items()}
mvn = (avgs["nf4"]["mse"]-avgs["figquant"]["mse"])/avgs["nf4"]["mse"]*100
RESULTS["quant"] = {"avgs":avgs, "n":n, "fw":fw, "mvn":mvn}
log(f"\n QUANTIZATION RESULTS ({n} layers):")
log(f" FigQuant: MSE={avgs['figquant']['mse']:.6e}, cos={avgs['figquant']['cos']:.6f}")
log(f" NF4: MSE={avgs['nf4']['mse']:.6e}, cos={avgs['nf4']['cos']:.6f}")
log(f" FigQuant vs NF4: {mvn:+.1f}% MSE (wins {fw}/{n} layers)")
del model_fp32; gc.collect()
# βββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
# PART B: TRAINING CONVERGENCE
# βββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
log("\n" + "="*60 + "\n TRAINING (100 steps, batch=4x4)\n" + "="*60)
from datasets import load_dataset
from transformers import AutoTokenizer, BitsAndBytesConfig
from peft import LoraConfig, get_peft_model, prepare_model_for_kbit_training
ds = load_dataset("tatsu-lab/alpaca", split="train").select(range(1000))
def hf_loop(model, tokenizer, name):
dev = next(model.parameters()).device
def tok_fn(ex):
inst=ex.get("instruction",""); inp=ex.get("input","").strip(); out=ex.get("output","")
txt = f"### Instruction:\n{inst}\n\n### Input:\n{inp}\n\n### Response:\n{out}" if inp else f"### Instruction:\n{inst}\n\n### Response:\n{out}"
e = tokenizer(txt, truncation=True, max_length=MAX_SEQ, padding="max_length")
e["labels"] = e["input_ids"].copy(); return e
td = ds.map(tok_fn, remove_columns=ds.column_names); td.set_format("torch")
from torch.utils.data import DataLoader
dl = DataLoader(td, batch_size=BATCH_SIZE, shuffle=True, collate_fn=lambda b: {k:torch.stack([x[k] for x in b]) for k in b[0] if isinstance(b[0][k], torch.Tensor)}, drop_last=True)
opt = torch.optim.AdamW([p for p in model.parameters() if p.requires_grad], lr=LR, weight_decay=0.01)
model.train(); losses=[]; gs=0; al=0.0; reset(); t0=time.time()
for batch in dl:
if gs>=TRAIN_STEPS*GRAD_ACCUM: break
batch = {k:v.to(dev) for k,v in batch.items()}
with torch.autocast("cuda", dtype=torch.bfloat16):
loss = model(**batch).loss / GRAD_ACCUM
loss.backward(); al+=loss.item(); gs+=1
if gs%GRAD_ACCUM==0:
torch.nn.utils.clip_grad_norm_([p for p in model.parameters() if p.requires_grad], 1.0)
opt.step(); opt.zero_grad(); s=gs//GRAD_ACCUM; losses.append(al); al=0.0
if s%20==0: log(f" [{name}] step={s} loss={losses[-1]:.4f}")
tt=time.time()-t0; pm=gpu_mb()
del model, opt; gc.collect(); torch.cuda.empty_cache()
RESULTS[name] = {"final":float(losses[-1]),"time_s":tt,"steps":len(losses),"gpu_mb":pm}
log(f" [{name}] loss={losses[-1]:.4f} time={tt:.0f}s gpu={pm:.0f}MB")
# FP16
log("\n--- FP16 LoRA ---"); reset()
m = AutoModelForCausalLM.from_pretrained(MODEL, torch_dtype=torch.float16, device_map="auto")
t = AutoTokenizer.from_pretrained(MODEL); t.pad_token=t.eos_token
m.gradient_checkpointing_enable(gradient_checkpointing_kwargs={"use_reentrant":False})
m = get_peft_model(m, LoraConfig(r=LORA_R, lora_alpha=LORA_ALPHA, target_modules=LORA_TARGETS, bias="none", task_type="CAUSAL_LM"))
hf_loop(m, t, "fp16_lora")
# NF4
log("\n--- BnB NF4 ---"); reset()
m = AutoModelForCausalLM.from_pretrained(MODEL, quantization_config=BitsAndBytesConfig(load_in_4bit=True, bnb_4bit_quant_type="nf4", bnb_4bit_use_double_quant=True, bnb_4bit_compute_dtype=torch.bfloat16), device_map="auto")
t = AutoTokenizer.from_pretrained(MODEL); t.pad_token=t.eos_token
m = prepare_model_for_kbit_training(m)
m = get_peft_model(m, LoraConfig(r=LORA_R, lora_alpha=LORA_ALPHA, target_modules=LORA_TARGETS, bias="none", task_type="CAUSAL_LM"))
hf_loop(m, t, "bnb_nf4")
# FigQuant
log("\n--- FigQuant LoRA ---"); reset()
from little_fig.engine import FigModel
from little_fig.engine.tier import TrainingTier
model = FigModel.from_pretrained(MODEL, lora_r=LORA_R, lora_alpha=LORA_ALPHA, tier=TrainingTier.STREAMING_LORA, target_modules=LORA_TARGETS)
tok = model.tokenizer
examples = [dict(r) for r in ds]
def ftok(ex):
inst=ex.get("instruction",""); inp=ex.get("input","").strip(); out=ex.get("output","")
txt = f"### Instruction:\n{inst}\n\n### Input:\n{inp}\n\n### Response:\n{out}" if inp else f"### Instruction:\n{inst}\n\n### Response:\n{out}"
e = tok(txt, truncation=True, max_length=MAX_SEQ, padding="max_length")
return {"input_ids":e["input_ids"],"labels":e["input_ids"].copy(),"attention_mask":e["attention_mask"]}
tokenized = [ftok(ex) for ex in examples]
class DS(torch.utils.data.Dataset):
def __init__(s,d): s.d=d
def __len__(s): return len(s.d)
def __getitem__(s,i): return {k:torch.tensor(v,dtype=torch.long) for k,v in s.d[i].items()}
from torch.utils.data import DataLoader
dl = DataLoader(DS(tokenized), batch_size=BATCH_SIZE, shuffle=True, drop_last=True)
dev = torch.device("cuda"); model = model.to(dev)
params = model.get_trainable_parameters()
opt = torch.optim.AdamW(params, lr=LR, weight_decay=0.01)
model.model.train(); losses=[]; gs=0; al=0.0; reset(); t0=time.time()
for batch in dl:
if gs>=TRAIN_STEPS*GRAD_ACCUM: break
batch = {k:v.to(dev) for k,v in batch.items()}
with torch.autocast("cuda", dtype=torch.bfloat16):
loss = model(input_ids=batch["input_ids"],attention_mask=batch["attention_mask"],labels=batch["labels"]).loss / GRAD_ACCUM
loss.backward(); al+=loss.item(); gs+=1
if gs%GRAD_ACCUM==0:
torch.nn.utils.clip_grad_norm_(params,1.0); opt.step(); opt.zero_grad()
s=gs//GRAD_ACCUM; losses.append(al); al=0.0
if s%20==0: log(f" [figquant] step={s} loss={losses[-1]:.4f}")
tt=time.time()-t0; pm=gpu_mb()
RESULTS["figquant"] = {"final":float(losses[-1]),"time_s":tt,"steps":len(losses),"gpu_mb":pm}
log(f" [figquant] loss={losses[-1]:.4f} time={tt:.0f}s gpu={pm:.0f}MB")
del model, opt; gc.collect(); torch.cuda.empty_cache()
# βββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
log("\n" + "="*60)
log(" π FINAL RESULTS: Little Fig vs Industry")
log("="*60)
if "quant" in RESULTS:
q=RESULTS["quant"]
log(f"\n QUANTIZATION ({q['n']} layers): FigQuant vs NF4 = {q['mvn']:+.1f}% MSE (wins {q['fw']}/{q['n']})")
log(f"\n TRAINING:")
log(f" {'Method':>12} {'Loss':>8} {'Time':>7} {'GPU MB':>8}")
log(f" {'β'*40}")
for k in ["fp16_lora","bnb_nf4","figquant"]:
if k in RESULTS:
r=RESULTS[k]
log(f" {k:>12} {r['final']:.4f} {r['time_s']:.0f}s {r['gpu_mb']:.0f}")
log("="*60)
with open("/app/results.json","w") as f: json.dump(RESULTS,f,indent=2,default=str)
log("π Done.")
|