File size: 7,301 Bytes
45b9d61 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 | #!/usr/bin/env python3
"""Little Fig GPU Training Benchmark β FP16 vs NF4 vs FigQuant on TinyLlama"""
import os, sys, subprocess, json, time, gc, traceback
import numpy as np
subprocess.check_call([sys.executable, "-m", "pip", "install", "-q", "transformers", "accelerate", "peft", "bitsandbytes", "datasets", "sentencepiece", "protobuf", "psutil", "numpy"])
if not os.path.exists("/app/littlefig"):
subprocess.check_call(["git", "clone", "https://github.com/ticketguy/littlefig.git", "/app/littlefig"])
subprocess.check_call([sys.executable, "-m", "pip", "install", "-q", "-e", "/app/littlefig[train]"])
sys.path.insert(0, "/app/littlefig/src")
import torch
import torch.nn.functional as F
MODEL = "TinyLlama/TinyLlama-1.1B-Chat-v1.0"
LORA_R = 16; LORA_ALPHA = 32; LORA_TARGETS = ["q_proj","k_proj","v_proj","o_proj"]
MAX_SEQ = 512; TRAIN_STEPS = 100; BATCH_SIZE = 4; GRAD_ACCUM = 4; LR = 2e-4
RESULTS = {}
def log(msg): print(f"[BENCH] {msg}", flush=True)
def gpu_mb(): return torch.cuda.max_memory_allocated()/1e6 if torch.cuda.is_available() else 0
def reset(): gc.collect(); torch.cuda.empty_cache(); torch.cuda.reset_peak_memory_stats()
log(f"PyTorch {torch.__version__}, CUDA={torch.cuda.is_available()}")
if torch.cuda.is_available():
log(f"GPU: {torch.cuda.get_device_name()} ({torch.cuda.get_device_properties(0).total_memory/1e9:.1f}GB)")
from datasets import load_dataset
ds = load_dataset("tatsu-lab/alpaca", split="train").select(range(1000))
log(f"Dataset: {len(ds)} examples")
def hf_loop(model, tokenizer, name):
dev = next(model.parameters()).device
def tok_fn(ex):
inst=ex.get("instruction",""); inp=ex.get("input","").strip(); out=ex.get("output","")
txt = f"### Instruction:\n{inst}\n\n### Input:\n{inp}\n\n### Response:\n{out}" if inp else f"### Instruction:\n{inst}\n\n### Response:\n{out}"
e = tokenizer(txt, truncation=True, max_length=MAX_SEQ, padding="max_length")
e["labels"] = e["input_ids"].copy(); return e
td = ds.map(tok_fn, remove_columns=ds.column_names); td.set_format("torch")
from torch.utils.data import DataLoader
dl = DataLoader(td, batch_size=BATCH_SIZE, shuffle=True, collate_fn=lambda b: {k:torch.stack([x[k] for x in b]) for k in b[0] if isinstance(b[0][k], torch.Tensor)}, drop_last=True)
opt = torch.optim.AdamW([p for p in model.parameters() if p.requires_grad], lr=LR, weight_decay=0.01)
model.train(); losses=[]; gs=0; al=0.0; reset(); t0=time.time()
for batch in dl:
if gs>=TRAIN_STEPS*GRAD_ACCUM: break
batch = {k:v.to(dev) for k,v in batch.items()}
with torch.autocast("cuda", dtype=torch.bfloat16):
loss = model(**batch).loss / GRAD_ACCUM
loss.backward(); al+=loss.item(); gs+=1
if gs%GRAD_ACCUM==0:
torch.nn.utils.clip_grad_norm_([p for p in model.parameters() if p.requires_grad], 1.0)
opt.step(); opt.zero_grad()
s=gs//GRAD_ACCUM; losses.append(al); al=0.0
if s%20==0: log(f" [{name}] step={s} loss={losses[-1]:.4f}")
tt=time.time()-t0; pm=gpu_mb()
del model, opt; gc.collect(); torch.cuda.empty_cache()
RESULTS[name] = {"final":float(losses[-1]) if losses else None, "time_s":tt, "steps":len(losses), "gpu_mb":pm, "losses":[float(l) for l in losses]}
log(f" [{name}] DONE: loss={losses[-1]:.4f} time={tt:.0f}s gpu={pm:.0f}MB")
# ββ FP16 LoRA ββ
log("\n=== FP16 LoRA ===")
from transformers import AutoModelForCausalLM, AutoTokenizer
from peft import LoraConfig, get_peft_model
reset()
m = AutoModelForCausalLM.from_pretrained(MODEL, torch_dtype=torch.float16, device_map="auto")
t = AutoTokenizer.from_pretrained(MODEL); t.pad_token=t.eos_token
m.gradient_checkpointing_enable(gradient_checkpointing_kwargs={"use_reentrant":False})
m = get_peft_model(m, LoraConfig(r=LORA_R, lora_alpha=LORA_ALPHA, target_modules=LORA_TARGETS, bias="none", task_type="CAUSAL_LM"))
hf_loop(m, t, "fp16_lora")
# ββ BnB NF4 ββ
log("\n=== BnB NF4 QLoRA ===")
from transformers import BitsAndBytesConfig
from peft import prepare_model_for_kbit_training
reset()
m = AutoModelForCausalLM.from_pretrained(MODEL, quantization_config=BitsAndBytesConfig(load_in_4bit=True, bnb_4bit_quant_type="nf4", bnb_4bit_use_double_quant=True, bnb_4bit_compute_dtype=torch.bfloat16), device_map="auto")
t = AutoTokenizer.from_pretrained(MODEL); t.pad_token=t.eos_token
m = prepare_model_for_kbit_training(m)
m = get_peft_model(m, LoraConfig(r=LORA_R, lora_alpha=LORA_ALPHA, target_modules=LORA_TARGETS, bias="none", task_type="CAUSAL_LM"))
hf_loop(m, t, "bnb_nf4")
# ββ FigQuant ββ
log("\n=== FigQuant LoRA ===")
from little_fig.engine import FigModel
from little_fig.engine.tier import TrainingTier
reset()
model = FigModel.from_pretrained(MODEL, lora_r=LORA_R, lora_alpha=LORA_ALPHA, tier=TrainingTier.STREAMING_LORA, target_modules=LORA_TARGETS)
tok = model.tokenizer
examples = [dict(r) for r in ds]
def tok_fn(ex):
inst=ex.get("instruction",""); inp=ex.get("input","").strip(); out=ex.get("output","")
txt = f"### Instruction:\n{inst}\n\n### Input:\n{inp}\n\n### Response:\n{out}" if inp else f"### Instruction:\n{inst}\n\n### Response:\n{out}"
e = tok(txt, truncation=True, max_length=MAX_SEQ, padding="max_length")
return {"input_ids": e["input_ids"], "labels": e["input_ids"].copy(), "attention_mask": e["attention_mask"]}
tokenized = [tok_fn(ex) for ex in examples]
class DS(torch.utils.data.Dataset):
def __init__(s, d): s.d=d
def __len__(s): return len(s.d)
def __getitem__(s, i): return {k:torch.tensor(v, dtype=torch.long) for k,v in s.d[i].items()}
from torch.utils.data import DataLoader
dl = DataLoader(DS(tokenized), batch_size=BATCH_SIZE, shuffle=True, drop_last=True)
dev = torch.device("cuda"); model = model.to(dev)
params = model.get_trainable_parameters()
opt = torch.optim.AdamW(params, lr=LR, weight_decay=0.01)
model.model.train(); losses=[]; gs=0; al=0.0; reset(); t0=time.time()
for batch in dl:
if gs>=TRAIN_STEPS*GRAD_ACCUM: break
batch = {k:v.to(dev) for k,v in batch.items()}
with torch.autocast("cuda", dtype=torch.bfloat16):
loss = model(input_ids=batch["input_ids"], attention_mask=batch["attention_mask"], labels=batch["labels"]).loss / GRAD_ACCUM
loss.backward(); al+=loss.item(); gs+=1
if gs%GRAD_ACCUM==0:
torch.nn.utils.clip_grad_norm_(params, 1.0); opt.step(); opt.zero_grad()
s=gs//GRAD_ACCUM; losses.append(al); al=0.0
if s%20==0: log(f" [figquant] step={s} loss={losses[-1]:.4f}")
tt=time.time()-t0; pm=gpu_mb()
RESULTS["figquant"] = {"final":float(losses[-1]) if losses else None, "time_s":tt, "steps":len(losses), "gpu_mb":pm}
log(f" [figquant] DONE: loss={losses[-1]:.4f} time={tt:.0f}s gpu={pm:.0f}MB")
del model, opt; gc.collect(); torch.cuda.empty_cache()
# ββ Summary ββ
log("\n" + "="*60)
log(" RESULTS: TinyLlama 1.1B, 100 steps, batch=4x4, LoRA r=16")
log("="*60)
log(f" {'Method':>12} {'Loss':>8} {'Time':>7} {'GPU MB':>8}")
log(f" {'β'*40}")
for k in ["fp16_lora","bnb_nf4","figquant"]:
if k in RESULTS:
r=RESULTS[k]
log(f" {k:>12} {r['final']:.4f} {r['time_s']:.0f}s {r['gpu_mb']:.0f}")
log("="*60)
with open("/app/results.json","w") as f: json.dump(RESULTS, f, indent=2, default=str)
|