Full GPU benchmark including FP32 quant quality test
Browse files- bench_full.py +184 -0
bench_full.py
ADDED
|
@@ -0,0 +1,184 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env python3
|
| 2 |
+
"""Full Little Fig GPU Benchmark β Quant Quality + Training on t4-medium (30GB RAM)"""
|
| 3 |
+
import os, sys, subprocess, json, time, gc, traceback
|
| 4 |
+
import numpy as np
|
| 5 |
+
subprocess.check_call([sys.executable, "-m", "pip", "install", "-q", "transformers", "accelerate", "peft", "bitsandbytes", "datasets", "sentencepiece", "protobuf", "psutil", "numpy"])
|
| 6 |
+
if not os.path.exists("/app/littlefig"):
|
| 7 |
+
subprocess.check_call(["git", "clone", "https://github.com/ticketguy/littlefig.git", "/app/littlefig"])
|
| 8 |
+
subprocess.check_call([sys.executable, "-m", "pip", "install", "-q", "-e", "/app/littlefig[train]"])
|
| 9 |
+
sys.path.insert(0, "/app/littlefig/src")
|
| 10 |
+
import torch
|
| 11 |
+
import torch.nn.functional as F
|
| 12 |
+
|
| 13 |
+
MODEL = "TinyLlama/TinyLlama-1.1B-Chat-v1.0"
|
| 14 |
+
LORA_R = 16; LORA_ALPHA = 32; GROUP_SIZE = 128
|
| 15 |
+
LORA_TARGETS = ["q_proj","k_proj","v_proj","o_proj"]
|
| 16 |
+
MAX_SEQ = 512; TRAIN_STEPS = 100; BATCH_SIZE = 4; GRAD_ACCUM = 4; LR = 2e-4
|
| 17 |
+
RESULTS = {}
|
| 18 |
+
|
| 19 |
+
def log(msg): print(f"[BENCH] {msg}", flush=True)
|
| 20 |
+
def gpu_mb(): return torch.cuda.max_memory_allocated()/1e6 if torch.cuda.is_available() else 0
|
| 21 |
+
def reset(): gc.collect(); torch.cuda.empty_cache() if torch.cuda.is_available() else None; torch.cuda.reset_peak_memory_stats() if torch.cuda.is_available() else None
|
| 22 |
+
|
| 23 |
+
log(f"PyTorch {torch.__version__}, CUDA={torch.cuda.is_available()}")
|
| 24 |
+
if torch.cuda.is_available():
|
| 25 |
+
log(f"GPU: {torch.cuda.get_device_name()} ({torch.cuda.get_device_properties(0).total_memory/1e9:.1f}GB)")
|
| 26 |
+
import psutil
|
| 27 |
+
log(f"System RAM: {psutil.virtual_memory().total/1e9:.1f}GB")
|
| 28 |
+
|
| 29 |
+
# βββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
|
| 30 |
+
# PART A: QUANTIZATION QUALITY (FP32 load on CPU β needs 30GB RAM)
|
| 31 |
+
# βββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
|
| 32 |
+
log("\n" + "="*60 + "\n QUANTIZATION QUALITY (TinyLlama 1.1B)\n" + "="*60)
|
| 33 |
+
|
| 34 |
+
from transformers import AutoModelForCausalLM
|
| 35 |
+
from little_fig.engine.figquant import figquant_quantize, figquant_dequantize
|
| 36 |
+
|
| 37 |
+
log("Loading TinyLlama FP32 (CPU)...")
|
| 38 |
+
model_fp32 = AutoModelForCausalLM.from_pretrained(MODEL, torch_dtype=torch.float32, low_cpu_mem_usage=True)
|
| 39 |
+
|
| 40 |
+
def nf4_qd(W, gs=128):
|
| 41 |
+
s, n = W.shape, W.numel(); f = W.reshape(-1).float()
|
| 42 |
+
p = (gs-n%gs)%gs
|
| 43 |
+
if p>0: f = torch.cat([f, torch.zeros(p)])
|
| 44 |
+
g = f.reshape(-1,gs); sc = g.abs().amax(1).clamp(min=1e-10)
|
| 45 |
+
cb = torch.tensor([-1.0,-0.6962,-0.5251,-0.3949,-0.2844,-0.1848,-0.0911,0.0,0.0796,0.1609,0.2461,0.3379,0.4407,0.5626,0.7230,1.0])
|
| 46 |
+
idx = ((g/sc.unsqueeze(1)).reshape(-1).unsqueeze(1)-cb.unsqueeze(0)).abs().argmin(1).reshape(-1,gs)
|
| 47 |
+
return (torch.gather(cb.unsqueeze(0).expand(idx.shape[0],-1),1,idx.long())*sc.unsqueeze(1)).reshape(-1)[:n].reshape(s)
|
| 48 |
+
|
| 49 |
+
methods = {"figquant":{"mse":[],"cos":[]},"nf4":{"mse":[],"cos":[]}}
|
| 50 |
+
n=0; fw=0
|
| 51 |
+
for name, param in model_fp32.named_parameters():
|
| 52 |
+
if param.ndim!=2 or param.numel()<1024: continue
|
| 53 |
+
W = param.data.float()
|
| 54 |
+
q = figquant_quantize(W, group_size=GROUP_SIZE, n_iters=8)
|
| 55 |
+
deq = figquant_dequantize(q)
|
| 56 |
+
mse_fq = F.mse_loss(deq, W).item()
|
| 57 |
+
cos_fq = F.cosine_similarity(W.flatten().unsqueeze(0), deq.flatten().unsqueeze(0)).item()
|
| 58 |
+
W_nf4 = nf4_qd(W, GROUP_SIZE)
|
| 59 |
+
mse_nf = F.mse_loss(W_nf4, W).item()
|
| 60 |
+
cos_nf = F.cosine_similarity(W.flatten().unsqueeze(0), W_nf4.flatten().unsqueeze(0)).item()
|
| 61 |
+
methods["figquant"]["mse"].append(mse_fq); methods["figquant"]["cos"].append(cos_fq)
|
| 62 |
+
methods["nf4"]["mse"].append(mse_nf); methods["nf4"]["cos"].append(cos_nf)
|
| 63 |
+
if mse_fq < mse_nf: fw+=1
|
| 64 |
+
n+=1
|
| 65 |
+
if n%20==0: log(f" {n} layers done...")
|
| 66 |
+
|
| 67 |
+
avgs = {m:{k:float(np.mean(v)) for k,v in d.items()} for m,d in methods.items()}
|
| 68 |
+
mvn = (avgs["nf4"]["mse"]-avgs["figquant"]["mse"])/avgs["nf4"]["mse"]*100
|
| 69 |
+
RESULTS["quant"] = {"avgs":avgs, "n":n, "fw":fw, "mvn":mvn}
|
| 70 |
+
log(f"\n QUANTIZATION RESULTS ({n} layers):")
|
| 71 |
+
log(f" FigQuant: MSE={avgs['figquant']['mse']:.6e}, cos={avgs['figquant']['cos']:.6f}")
|
| 72 |
+
log(f" NF4: MSE={avgs['nf4']['mse']:.6e}, cos={avgs['nf4']['cos']:.6f}")
|
| 73 |
+
log(f" FigQuant vs NF4: {mvn:+.1f}% MSE (wins {fw}/{n} layers)")
|
| 74 |
+
del model_fp32; gc.collect()
|
| 75 |
+
|
| 76 |
+
# βββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
|
| 77 |
+
# PART B: TRAINING CONVERGENCE
|
| 78 |
+
# βββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
|
| 79 |
+
log("\n" + "="*60 + "\n TRAINING (100 steps, batch=4x4)\n" + "="*60)
|
| 80 |
+
|
| 81 |
+
from datasets import load_dataset
|
| 82 |
+
from transformers import AutoTokenizer, BitsAndBytesConfig
|
| 83 |
+
from peft import LoraConfig, get_peft_model, prepare_model_for_kbit_training
|
| 84 |
+
|
| 85 |
+
ds = load_dataset("tatsu-lab/alpaca", split="train").select(range(1000))
|
| 86 |
+
|
| 87 |
+
def hf_loop(model, tokenizer, name):
|
| 88 |
+
dev = next(model.parameters()).device
|
| 89 |
+
def tok_fn(ex):
|
| 90 |
+
inst=ex.get("instruction",""); inp=ex.get("input","").strip(); out=ex.get("output","")
|
| 91 |
+
txt = f"### Instruction:\n{inst}\n\n### Input:\n{inp}\n\n### Response:\n{out}" if inp else f"### Instruction:\n{inst}\n\n### Response:\n{out}"
|
| 92 |
+
e = tokenizer(txt, truncation=True, max_length=MAX_SEQ, padding="max_length")
|
| 93 |
+
e["labels"] = e["input_ids"].copy(); return e
|
| 94 |
+
td = ds.map(tok_fn, remove_columns=ds.column_names); td.set_format("torch")
|
| 95 |
+
from torch.utils.data import DataLoader
|
| 96 |
+
dl = DataLoader(td, batch_size=BATCH_SIZE, shuffle=True, collate_fn=lambda b: {k:torch.stack([x[k] for x in b]) for k in b[0] if isinstance(b[0][k], torch.Tensor)}, drop_last=True)
|
| 97 |
+
opt = torch.optim.AdamW([p for p in model.parameters() if p.requires_grad], lr=LR, weight_decay=0.01)
|
| 98 |
+
model.train(); losses=[]; gs=0; al=0.0; reset(); t0=time.time()
|
| 99 |
+
for batch in dl:
|
| 100 |
+
if gs>=TRAIN_STEPS*GRAD_ACCUM: break
|
| 101 |
+
batch = {k:v.to(dev) for k,v in batch.items()}
|
| 102 |
+
with torch.autocast("cuda", dtype=torch.bfloat16):
|
| 103 |
+
loss = model(**batch).loss / GRAD_ACCUM
|
| 104 |
+
loss.backward(); al+=loss.item(); gs+=1
|
| 105 |
+
if gs%GRAD_ACCUM==0:
|
| 106 |
+
torch.nn.utils.clip_grad_norm_([p for p in model.parameters() if p.requires_grad], 1.0)
|
| 107 |
+
opt.step(); opt.zero_grad(); s=gs//GRAD_ACCUM; losses.append(al); al=0.0
|
| 108 |
+
if s%20==0: log(f" [{name}] step={s} loss={losses[-1]:.4f}")
|
| 109 |
+
tt=time.time()-t0; pm=gpu_mb()
|
| 110 |
+
del model, opt; gc.collect(); torch.cuda.empty_cache()
|
| 111 |
+
RESULTS[name] = {"final":float(losses[-1]),"time_s":tt,"steps":len(losses),"gpu_mb":pm}
|
| 112 |
+
log(f" [{name}] loss={losses[-1]:.4f} time={tt:.0f}s gpu={pm:.0f}MB")
|
| 113 |
+
|
| 114 |
+
# FP16
|
| 115 |
+
log("\n--- FP16 LoRA ---"); reset()
|
| 116 |
+
m = AutoModelForCausalLM.from_pretrained(MODEL, torch_dtype=torch.float16, device_map="auto")
|
| 117 |
+
t = AutoTokenizer.from_pretrained(MODEL); t.pad_token=t.eos_token
|
| 118 |
+
m.gradient_checkpointing_enable(gradient_checkpointing_kwargs={"use_reentrant":False})
|
| 119 |
+
m = get_peft_model(m, LoraConfig(r=LORA_R, lora_alpha=LORA_ALPHA, target_modules=LORA_TARGETS, bias="none", task_type="CAUSAL_LM"))
|
| 120 |
+
hf_loop(m, t, "fp16_lora")
|
| 121 |
+
|
| 122 |
+
# NF4
|
| 123 |
+
log("\n--- BnB NF4 ---"); reset()
|
| 124 |
+
m = AutoModelForCausalLM.from_pretrained(MODEL, quantization_config=BitsAndBytesConfig(load_in_4bit=True, bnb_4bit_quant_type="nf4", bnb_4bit_use_double_quant=True, bnb_4bit_compute_dtype=torch.bfloat16), device_map="auto")
|
| 125 |
+
t = AutoTokenizer.from_pretrained(MODEL); t.pad_token=t.eos_token
|
| 126 |
+
m = prepare_model_for_kbit_training(m)
|
| 127 |
+
m = get_peft_model(m, LoraConfig(r=LORA_R, lora_alpha=LORA_ALPHA, target_modules=LORA_TARGETS, bias="none", task_type="CAUSAL_LM"))
|
| 128 |
+
hf_loop(m, t, "bnb_nf4")
|
| 129 |
+
|
| 130 |
+
# FigQuant
|
| 131 |
+
log("\n--- FigQuant LoRA ---"); reset()
|
| 132 |
+
from little_fig.engine import FigModel
|
| 133 |
+
from little_fig.engine.tier import TrainingTier
|
| 134 |
+
model = FigModel.from_pretrained(MODEL, lora_r=LORA_R, lora_alpha=LORA_ALPHA, tier=TrainingTier.STREAMING_LORA, target_modules=LORA_TARGETS)
|
| 135 |
+
tok = model.tokenizer
|
| 136 |
+
examples = [dict(r) for r in ds]
|
| 137 |
+
def ftok(ex):
|
| 138 |
+
inst=ex.get("instruction",""); inp=ex.get("input","").strip(); out=ex.get("output","")
|
| 139 |
+
txt = f"### Instruction:\n{inst}\n\n### Input:\n{inp}\n\n### Response:\n{out}" if inp else f"### Instruction:\n{inst}\n\n### Response:\n{out}"
|
| 140 |
+
e = tok(txt, truncation=True, max_length=MAX_SEQ, padding="max_length")
|
| 141 |
+
return {"input_ids":e["input_ids"],"labels":e["input_ids"].copy(),"attention_mask":e["attention_mask"]}
|
| 142 |
+
tokenized = [ftok(ex) for ex in examples]
|
| 143 |
+
class DS(torch.utils.data.Dataset):
|
| 144 |
+
def __init__(s,d): s.d=d
|
| 145 |
+
def __len__(s): return len(s.d)
|
| 146 |
+
def __getitem__(s,i): return {k:torch.tensor(v,dtype=torch.long) for k,v in s.d[i].items()}
|
| 147 |
+
from torch.utils.data import DataLoader
|
| 148 |
+
dl = DataLoader(DS(tokenized), batch_size=BATCH_SIZE, shuffle=True, drop_last=True)
|
| 149 |
+
dev = torch.device("cuda"); model = model.to(dev)
|
| 150 |
+
params = model.get_trainable_parameters()
|
| 151 |
+
opt = torch.optim.AdamW(params, lr=LR, weight_decay=0.01)
|
| 152 |
+
model.model.train(); losses=[]; gs=0; al=0.0; reset(); t0=time.time()
|
| 153 |
+
for batch in dl:
|
| 154 |
+
if gs>=TRAIN_STEPS*GRAD_ACCUM: break
|
| 155 |
+
batch = {k:v.to(dev) for k,v in batch.items()}
|
| 156 |
+
with torch.autocast("cuda", dtype=torch.bfloat16):
|
| 157 |
+
loss = model(input_ids=batch["input_ids"],attention_mask=batch["attention_mask"],labels=batch["labels"]).loss / GRAD_ACCUM
|
| 158 |
+
loss.backward(); al+=loss.item(); gs+=1
|
| 159 |
+
if gs%GRAD_ACCUM==0:
|
| 160 |
+
torch.nn.utils.clip_grad_norm_(params,1.0); opt.step(); opt.zero_grad()
|
| 161 |
+
s=gs//GRAD_ACCUM; losses.append(al); al=0.0
|
| 162 |
+
if s%20==0: log(f" [figquant] step={s} loss={losses[-1]:.4f}")
|
| 163 |
+
tt=time.time()-t0; pm=gpu_mb()
|
| 164 |
+
RESULTS["figquant"] = {"final":float(losses[-1]),"time_s":tt,"steps":len(losses),"gpu_mb":pm}
|
| 165 |
+
log(f" [figquant] loss={losses[-1]:.4f} time={tt:.0f}s gpu={pm:.0f}MB")
|
| 166 |
+
del model, opt; gc.collect(); torch.cuda.empty_cache()
|
| 167 |
+
|
| 168 |
+
# βββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
|
| 169 |
+
log("\n" + "="*60)
|
| 170 |
+
log(" π FINAL RESULTS: Little Fig vs Industry")
|
| 171 |
+
log("="*60)
|
| 172 |
+
if "quant" in RESULTS:
|
| 173 |
+
q=RESULTS["quant"]
|
| 174 |
+
log(f"\n QUANTIZATION ({q['n']} layers): FigQuant vs NF4 = {q['mvn']:+.1f}% MSE (wins {q['fw']}/{q['n']})")
|
| 175 |
+
log(f"\n TRAINING:")
|
| 176 |
+
log(f" {'Method':>12} {'Loss':>8} {'Time':>7} {'GPU MB':>8}")
|
| 177 |
+
log(f" {'β'*40}")
|
| 178 |
+
for k in ["fp16_lora","bnb_nf4","figquant"]:
|
| 179 |
+
if k in RESULTS:
|
| 180 |
+
r=RESULTS[k]
|
| 181 |
+
log(f" {k:>12} {r['final']:.4f} {r['time_s']:.0f}s {r['gpu_mb']:.0f}")
|
| 182 |
+
log("="*60)
|
| 183 |
+
with open("/app/results.json","w") as f: json.dump(RESULTS,f,indent=2,default=str)
|
| 184 |
+
log("π Done.")
|