#!/usr/bin/env python3 """FigQuant training on GPU with the dtype fix applied.""" import os, sys, subprocess, time, gc import numpy as np subprocess.check_call([sys.executable, "-m", "pip", "install", "-q", "transformers", "accelerate", "datasets", "sentencepiece", "protobuf", "psutil", "numpy"]) subprocess.check_call(["git", "clone", "https://github.com/ticketguy/littlefig.git", "/app/littlefig"]) subprocess.check_call([sys.executable, "-m", "pip", "install", "-q", "-e", "/app/littlefig[train]"]) sys.path.insert(0, "/app/littlefig/src") import torch def log(msg): print(f"[GPU] {msg}", flush=True) log(f"PyTorch {torch.__version__}, CUDA={torch.cuda.is_available()}") if torch.cuda.is_available(): log(f"GPU: {torch.cuda.get_device_name()} ({torch.cuda.get_device_properties(0).total_memory/1e9:.1f}GB)") from little_fig.engine import FigModel from little_fig.engine.tier import TrainingTier from datasets import load_dataset from torch.utils.data import DataLoader MODEL = "TinyLlama/TinyLlama-1.1B-Chat-v1.0" ds = load_dataset("tatsu-lab/alpaca", split="train").select(range(1000)) log(f"Data: {len(ds)} examples") log("Loading FigQuant (lowram mode)...") gc.collect(); torch.cuda.empty_cache(); torch.cuda.reset_peak_memory_stats() model = FigModel.from_pretrained(MODEL, lora_r=16, lora_alpha=32, tier=TrainingTier.STREAMING_LORA, target_modules=["q_proj","k_proj","v_proj","o_proj"], fast=False) # lowram mode tok = model.tokenizer examples = [dict(r) for r in ds] def tok_fn(ex): inst=ex.get("instruction",""); inp=ex.get("input","").strip(); out=ex.get("output","") txt = f"### Instruction:\n{inst}\n\n### Input:\n{inp}\n\n### Response:\n{out}" if inp else \ f"### Instruction:\n{inst}\n\n### Response:\n{out}" e = tok(txt, truncation=True, max_length=512, padding="max_length") return {"input_ids": e["input_ids"], "labels": e["input_ids"].copy(), "attention_mask": e["attention_mask"]} tokenized = [tok_fn(ex) for ex in examples] class DS(torch.utils.data.Dataset): def __init__(s, d): s.d = d def __len__(s): return len(s.d) def __getitem__(s, i): return {k: torch.tensor(v, dtype=torch.long) for k, v in s.d[i].items()} dl = DataLoader(DS(tokenized), batch_size=4, shuffle=True, drop_last=True) dev = torch.device("cuda"); model = model.to(dev) params = model.get_trainable_parameters() opt = torch.optim.AdamW(params, lr=2e-4, weight_decay=0.01) model.model.train() losses = []; gs = 0; al = 0.0 torch.cuda.reset_peak_memory_stats() t0 = time.time() for batch in dl: if gs >= 400: break # 100 optimizer steps × 4 grad accum batch = {k: v.to(dev) for k, v in batch.items()} with torch.autocast("cuda", dtype=torch.float16): loss = model(input_ids=batch["input_ids"], attention_mask=batch["attention_mask"], labels=batch["labels"]).loss / 4 loss.backward() al += loss.item(); gs += 1 if gs % 4 == 0: torch.nn.utils.clip_grad_norm_(params, 1.0) opt.step(); opt.zero_grad() s = gs // 4; losses.append(al); al = 0.0 if s % 20 == 0: log(f" step={s} loss={losses[-1]:.4f}") tt = time.time() - t0 peak = torch.cuda.max_memory_allocated() / 1e6 log(f"\n{'='*50}") log(f" FigQuant LoRA (lowram) on GPU — RESULTS") log(f"{'='*50}") log(f" Final loss: {losses[-1]:.4f}") log(f" Time: {tt:.0f}s") log(f" GPU Memory: {peak:.0f} MB") log(f" Steps: {len(losses)}") log(f"") log(f" COMPARISON (same model, same data, same config):") log(f" {'Method':>16} {'Loss':>8} {'Time':>7} {'GPU MB':>8}") log(f" {'─'*44}") log(f" {'FP16 LoRA':>16} {'0.2252':>8} {'1309s':>7} {'3585':>8}") log(f" {'BnB NF4 QLoRA':>16} {'0.2399':>8} {'1423s':>7} {'2441':>8}") log(f" {'FigQuant LoRA':>16} {losses[-1]:>8.4f} {tt:>6.0f}s {peak:>7.0f}") log(f"{'='*50}")