ticketguy commited on
Commit
45b9d61
·
verified ·
1 Parent(s): 6e900ca

Training-only GPU benchmark (skip FP32 quant quality — already proven on CPU)

Browse files
Files changed (1) hide show
  1. bench_train_only.py +130 -0
bench_train_only.py ADDED
@@ -0,0 +1,130 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ """Little Fig GPU Training Benchmark — FP16 vs NF4 vs FigQuant on TinyLlama"""
3
+ import os, sys, subprocess, json, time, gc, traceback
4
+ import numpy as np
5
+ subprocess.check_call([sys.executable, "-m", "pip", "install", "-q", "transformers", "accelerate", "peft", "bitsandbytes", "datasets", "sentencepiece", "protobuf", "psutil", "numpy"])
6
+ if not os.path.exists("/app/littlefig"):
7
+ subprocess.check_call(["git", "clone", "https://github.com/ticketguy/littlefig.git", "/app/littlefig"])
8
+ subprocess.check_call([sys.executable, "-m", "pip", "install", "-q", "-e", "/app/littlefig[train]"])
9
+ sys.path.insert(0, "/app/littlefig/src")
10
+ import torch
11
+ import torch.nn.functional as F
12
+
13
+ MODEL = "TinyLlama/TinyLlama-1.1B-Chat-v1.0"
14
+ LORA_R = 16; LORA_ALPHA = 32; LORA_TARGETS = ["q_proj","k_proj","v_proj","o_proj"]
15
+ MAX_SEQ = 512; TRAIN_STEPS = 100; BATCH_SIZE = 4; GRAD_ACCUM = 4; LR = 2e-4
16
+ RESULTS = {}
17
+
18
+ def log(msg): print(f"[BENCH] {msg}", flush=True)
19
+ def gpu_mb(): return torch.cuda.max_memory_allocated()/1e6 if torch.cuda.is_available() else 0
20
+ def reset(): gc.collect(); torch.cuda.empty_cache(); torch.cuda.reset_peak_memory_stats()
21
+
22
+ log(f"PyTorch {torch.__version__}, CUDA={torch.cuda.is_available()}")
23
+ if torch.cuda.is_available():
24
+ log(f"GPU: {torch.cuda.get_device_name()} ({torch.cuda.get_device_properties(0).total_memory/1e9:.1f}GB)")
25
+
26
+ from datasets import load_dataset
27
+ ds = load_dataset("tatsu-lab/alpaca", split="train").select(range(1000))
28
+ log(f"Dataset: {len(ds)} examples")
29
+
30
+ def hf_loop(model, tokenizer, name):
31
+ dev = next(model.parameters()).device
32
+ def tok_fn(ex):
33
+ inst=ex.get("instruction",""); inp=ex.get("input","").strip(); out=ex.get("output","")
34
+ txt = f"### Instruction:\n{inst}\n\n### Input:\n{inp}\n\n### Response:\n{out}" if inp else f"### Instruction:\n{inst}\n\n### Response:\n{out}"
35
+ e = tokenizer(txt, truncation=True, max_length=MAX_SEQ, padding="max_length")
36
+ e["labels"] = e["input_ids"].copy(); return e
37
+ td = ds.map(tok_fn, remove_columns=ds.column_names); td.set_format("torch")
38
+ from torch.utils.data import DataLoader
39
+ dl = DataLoader(td, batch_size=BATCH_SIZE, shuffle=True, collate_fn=lambda b: {k:torch.stack([x[k] for x in b]) for k in b[0] if isinstance(b[0][k], torch.Tensor)}, drop_last=True)
40
+ opt = torch.optim.AdamW([p for p in model.parameters() if p.requires_grad], lr=LR, weight_decay=0.01)
41
+ model.train(); losses=[]; gs=0; al=0.0; reset(); t0=time.time()
42
+ for batch in dl:
43
+ if gs>=TRAIN_STEPS*GRAD_ACCUM: break
44
+ batch = {k:v.to(dev) for k,v in batch.items()}
45
+ with torch.autocast("cuda", dtype=torch.bfloat16):
46
+ loss = model(**batch).loss / GRAD_ACCUM
47
+ loss.backward(); al+=loss.item(); gs+=1
48
+ if gs%GRAD_ACCUM==0:
49
+ torch.nn.utils.clip_grad_norm_([p for p in model.parameters() if p.requires_grad], 1.0)
50
+ opt.step(); opt.zero_grad()
51
+ s=gs//GRAD_ACCUM; losses.append(al); al=0.0
52
+ if s%20==0: log(f" [{name}] step={s} loss={losses[-1]:.4f}")
53
+ tt=time.time()-t0; pm=gpu_mb()
54
+ del model, opt; gc.collect(); torch.cuda.empty_cache()
55
+ RESULTS[name] = {"final":float(losses[-1]) if losses else None, "time_s":tt, "steps":len(losses), "gpu_mb":pm, "losses":[float(l) for l in losses]}
56
+ log(f" [{name}] DONE: loss={losses[-1]:.4f} time={tt:.0f}s gpu={pm:.0f}MB")
57
+
58
+ # ── FP16 LoRA ──
59
+ log("\n=== FP16 LoRA ===")
60
+ from transformers import AutoModelForCausalLM, AutoTokenizer
61
+ from peft import LoraConfig, get_peft_model
62
+ reset()
63
+ m = AutoModelForCausalLM.from_pretrained(MODEL, torch_dtype=torch.float16, device_map="auto")
64
+ t = AutoTokenizer.from_pretrained(MODEL); t.pad_token=t.eos_token
65
+ m.gradient_checkpointing_enable(gradient_checkpointing_kwargs={"use_reentrant":False})
66
+ m = get_peft_model(m, LoraConfig(r=LORA_R, lora_alpha=LORA_ALPHA, target_modules=LORA_TARGETS, bias="none", task_type="CAUSAL_LM"))
67
+ hf_loop(m, t, "fp16_lora")
68
+
69
+ # ── BnB NF4 ──
70
+ log("\n=== BnB NF4 QLoRA ===")
71
+ from transformers import BitsAndBytesConfig
72
+ from peft import prepare_model_for_kbit_training
73
+ reset()
74
+ m = AutoModelForCausalLM.from_pretrained(MODEL, quantization_config=BitsAndBytesConfig(load_in_4bit=True, bnb_4bit_quant_type="nf4", bnb_4bit_use_double_quant=True, bnb_4bit_compute_dtype=torch.bfloat16), device_map="auto")
75
+ t = AutoTokenizer.from_pretrained(MODEL); t.pad_token=t.eos_token
76
+ m = prepare_model_for_kbit_training(m)
77
+ m = get_peft_model(m, LoraConfig(r=LORA_R, lora_alpha=LORA_ALPHA, target_modules=LORA_TARGETS, bias="none", task_type="CAUSAL_LM"))
78
+ hf_loop(m, t, "bnb_nf4")
79
+
80
+ # ── FigQuant ──
81
+ log("\n=== FigQuant LoRA ===")
82
+ from little_fig.engine import FigModel
83
+ from little_fig.engine.tier import TrainingTier
84
+ reset()
85
+ model = FigModel.from_pretrained(MODEL, lora_r=LORA_R, lora_alpha=LORA_ALPHA, tier=TrainingTier.STREAMING_LORA, target_modules=LORA_TARGETS)
86
+ tok = model.tokenizer
87
+ examples = [dict(r) for r in ds]
88
+ def tok_fn(ex):
89
+ inst=ex.get("instruction",""); inp=ex.get("input","").strip(); out=ex.get("output","")
90
+ txt = f"### Instruction:\n{inst}\n\n### Input:\n{inp}\n\n### Response:\n{out}" if inp else f"### Instruction:\n{inst}\n\n### Response:\n{out}"
91
+ e = tok(txt, truncation=True, max_length=MAX_SEQ, padding="max_length")
92
+ return {"input_ids": e["input_ids"], "labels": e["input_ids"].copy(), "attention_mask": e["attention_mask"]}
93
+ tokenized = [tok_fn(ex) for ex in examples]
94
+ class DS(torch.utils.data.Dataset):
95
+ def __init__(s, d): s.d=d
96
+ def __len__(s): return len(s.d)
97
+ def __getitem__(s, i): return {k:torch.tensor(v, dtype=torch.long) for k,v in s.d[i].items()}
98
+ from torch.utils.data import DataLoader
99
+ dl = DataLoader(DS(tokenized), batch_size=BATCH_SIZE, shuffle=True, drop_last=True)
100
+ dev = torch.device("cuda"); model = model.to(dev)
101
+ params = model.get_trainable_parameters()
102
+ opt = torch.optim.AdamW(params, lr=LR, weight_decay=0.01)
103
+ model.model.train(); losses=[]; gs=0; al=0.0; reset(); t0=time.time()
104
+ for batch in dl:
105
+ if gs>=TRAIN_STEPS*GRAD_ACCUM: break
106
+ batch = {k:v.to(dev) for k,v in batch.items()}
107
+ with torch.autocast("cuda", dtype=torch.bfloat16):
108
+ loss = model(input_ids=batch["input_ids"], attention_mask=batch["attention_mask"], labels=batch["labels"]).loss / GRAD_ACCUM
109
+ loss.backward(); al+=loss.item(); gs+=1
110
+ if gs%GRAD_ACCUM==0:
111
+ torch.nn.utils.clip_grad_norm_(params, 1.0); opt.step(); opt.zero_grad()
112
+ s=gs//GRAD_ACCUM; losses.append(al); al=0.0
113
+ if s%20==0: log(f" [figquant] step={s} loss={losses[-1]:.4f}")
114
+ tt=time.time()-t0; pm=gpu_mb()
115
+ RESULTS["figquant"] = {"final":float(losses[-1]) if losses else None, "time_s":tt, "steps":len(losses), "gpu_mb":pm}
116
+ log(f" [figquant] DONE: loss={losses[-1]:.4f} time={tt:.0f}s gpu={pm:.0f}MB")
117
+ del model, opt; gc.collect(); torch.cuda.empty_cache()
118
+
119
+ # ── Summary ──
120
+ log("\n" + "="*60)
121
+ log(" RESULTS: TinyLlama 1.1B, 100 steps, batch=4x4, LoRA r=16")
122
+ log("="*60)
123
+ log(f" {'Method':>12} {'Loss':>8} {'Time':>7} {'GPU MB':>8}")
124
+ log(f" {'─'*40}")
125
+ for k in ["fp16_lora","bnb_nf4","figquant"]:
126
+ if k in RESULTS:
127
+ r=RESULTS[k]
128
+ log(f" {k:>12} {r['final']:.4f} {r['time_s']:.0f}s {r['gpu_mb']:.0f}")
129
+ log("="*60)
130
+ with open("/app/results.json","w") as f: json.dump(RESULTS, f, indent=2, default=str)