| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| """ |
| Train an LLM on Latin using streaming datasets. |
| |
| Demonstrates continued pretraining with streaming - no disk space needed. |
| Uses FineWeb-2's Latin subset (1.47M texts, ~1.7GB). |
| |
| Run locally (if you have a GPU): |
| uv run latin-llm-streaming.py |
| |
| Run on HF Jobs: |
| hf jobs uv run latin-llm-streaming.py --flavor a100-large --secrets HF_TOKEN |
| |
| With custom settings: |
| hf jobs uv run latin-llm-streaming.py --flavor a100-large --secrets HF_TOKEN -- \ |
| --max-steps 1000 --output-repo your-username/qwen-latin |
| """ |
|
|
| import argparse |
| import time |
| import os |
|
|
|
|
| def parse_args(): |
| parser = argparse.ArgumentParser( |
| description="Train an LLM on Latin using streaming datasets" |
| ) |
| parser.add_argument( |
| "--base-model", |
| default="unsloth/Qwen3-0.6B-Base-unsloth-bnb-4bit", |
| help="Base model to fine-tune", |
| ) |
| parser.add_argument( |
| "--output-repo", |
| default=None, |
| help="HF Hub repo to push model to (e.g., 'username/qwen-latin')", |
| ) |
| parser.add_argument( |
| "--max-steps", |
| type=int, |
| default=500, |
| help="Number of training steps (default: 500)", |
| ) |
| parser.add_argument( |
| "--batch-size", |
| type=int, |
| default=4, |
| help="Per-device batch size (default: 4)", |
| ) |
| parser.add_argument( |
| "--gradient-accumulation", |
| type=int, |
| default=4, |
| help="Gradient accumulation steps (default: 4)", |
| ) |
| parser.add_argument( |
| "--learning-rate", |
| type=float, |
| default=2e-4, |
| help="Learning rate (default: 2e-4)", |
| ) |
| parser.add_argument( |
| "--max-seq-length", |
| type=int, |
| default=2048, |
| help="Maximum sequence length (default: 2048)", |
| ) |
| parser.add_argument( |
| "--lora-r", |
| type=int, |
| default=16, |
| help="LoRA rank (default: 16)", |
| ) |
| parser.add_argument( |
| "--save-local", |
| default="latin-llm-output", |
| help="Local directory to save model (default: latin-llm-output)", |
| ) |
| parser.add_argument( |
| "--wandb-project", |
| default="latin-llm-streaming", |
| help="Wandb project name (default: latin-llm-streaming)", |
| ) |
| parser.add_argument( |
| "--wandb-run-name", |
| default=None, |
| help="Wandb run name (default: auto-generated)", |
| ) |
| return parser.parse_args() |
|
|
|
|
| def main(): |
| args = parse_args() |
|
|
| print("=" * 70) |
| print("Latin LLM Training with Streaming Datasets") |
| print("=" * 70) |
| print(f"\nConfiguration:") |
| print(f" Base model: {args.base_model}") |
| print(f" Max steps: {args.max_steps}") |
| print(f" Batch size: {args.batch_size} x {args.gradient_accumulation} = {args.batch_size * args.gradient_accumulation}") |
| print(f" Learning rate: {args.learning_rate}") |
| print(f" LoRA rank: {args.lora_r}") |
| print(f" Output repo: {args.output_repo or '(local only)'}") |
| print(f" Wandb project: {args.wandb_project}") |
| print() |
|
|
| |
| from unsloth import FastLanguageModel |
| from datasets import load_dataset |
| from trl import SFTTrainer, SFTConfig |
| from huggingface_hub import login |
|
|
| |
| if args.output_repo: |
| token = os.environ.get("HF_TOKEN") |
| if token: |
| login(token=token) |
| print("✓ Logged in to Hugging Face Hub") |
| else: |
| print("⚠ HF_TOKEN not set - model will only be saved locally") |
| args.output_repo = None |
|
|
| |
| import wandb |
| wandb_key = os.environ.get("WANDB_API_KEY") |
| if wandb_key: |
| wandb.login(key=wandb_key) |
| wandb.init( |
| project=args.wandb_project, |
| name=args.wandb_run_name or f"latin-{args.max_steps}steps", |
| config={ |
| "base_model": args.base_model, |
| "max_steps": args.max_steps, |
| "batch_size": args.batch_size, |
| "gradient_accumulation": args.gradient_accumulation, |
| "effective_batch_size": args.batch_size * args.gradient_accumulation, |
| "learning_rate": args.learning_rate, |
| "lora_r": args.lora_r, |
| "max_seq_length": args.max_seq_length, |
| "dataset": "HuggingFaceFW/fineweb-2 (lat_Latn)", |
| }, |
| ) |
| print(f"✓ Wandb initialized: {wandb.run.url}") |
|
|
| |
| print("\n[1/5] Loading model...") |
| start = time.time() |
|
|
| model, tokenizer = FastLanguageModel.from_pretrained( |
| args.base_model, |
| max_seq_length=args.max_seq_length, |
| load_in_4bit=True, |
| ) |
|
|
| model = FastLanguageModel.get_peft_model( |
| model, |
| r=args.lora_r, |
| lora_alpha=args.lora_r * 2, |
| lora_dropout=0, |
| target_modules=[ |
| "q_proj", "k_proj", "v_proj", "o_proj", |
| "gate_proj", "up_proj", "down_proj" |
| ], |
| bias="none", |
| use_gradient_checkpointing="unsloth", |
| random_state=3407, |
| ) |
| print(f"✓ Model loaded in {time.time() - start:.1f}s") |
|
|
| |
| print("\n[2/5] Loading streaming dataset (FineWeb-2 Latin)...") |
| start = time.time() |
|
|
| dataset = load_dataset( |
| "HuggingFaceFW/fineweb-2", |
| name="lat_Latn", |
| split="train", |
| streaming=True, |
| ) |
|
|
| |
| sample = next(iter(dataset)) |
| print(f"✓ Dataset ready in {time.time() - start:.1f}s") |
| print(f" Sample: {sample['text'][:100]}...") |
|
|
| |
| print("\n[3/5] Preparing dataset...") |
|
|
| def format_text(example): |
| return {"text": example["text"] + tokenizer.eos_token} |
|
|
| formatted_dataset = dataset.map(format_text) |
|
|
| |
| print(f"\n[4/5] Training for {args.max_steps} steps...") |
| start = time.time() |
|
|
| trainer = SFTTrainer( |
| model=model, |
| tokenizer=tokenizer, |
| train_dataset=formatted_dataset, |
| args=SFTConfig( |
| per_device_train_batch_size=args.batch_size, |
| gradient_accumulation_steps=args.gradient_accumulation, |
| warmup_steps=min(10, args.max_steps // 10), |
| max_steps=args.max_steps, |
| learning_rate=args.learning_rate, |
| logging_steps=max(1, args.max_steps // 20), |
| optim="adamw_8bit", |
| weight_decay=0.01, |
| lr_scheduler_type="linear", |
| seed=3407, |
| output_dir=args.save_local, |
| report_to="wandb", |
| run_name=args.wandb_run_name or f"latin-{args.max_steps}steps", |
| dataset_text_field="text", |
| max_seq_length=args.max_seq_length, |
| packing=False, |
| ), |
| ) |
|
|
| trainer.train() |
| train_time = time.time() - start |
|
|
| print(f"\n✓ Training completed in {train_time / 60:.1f} minutes") |
| print(f" Speed: {args.max_steps / train_time:.2f} it/s") |
|
|
| |
| print("\n[5/5] Saving model...") |
|
|
| |
| model.save_pretrained(args.save_local) |
| tokenizer.save_pretrained(args.save_local) |
| print(f"✓ Saved locally to {args.save_local}/") |
|
|
| |
| if args.output_repo: |
| print(f"\nPushing to {args.output_repo}...") |
| model.push_to_hub(args.output_repo, tokenizer=tokenizer) |
| print(f"✓ Model available at: https://huggingface.co/{args.output_repo}") |
|
|
| |
| print("\n" + "=" * 70) |
| print("Quick inference test:") |
| print("=" * 70) |
|
|
| FastLanguageModel.for_inference(model) |
|
|
| prompt = "Lingua Latina est" |
| inputs = tokenizer(prompt, return_tensors="pt").to("cuda") |
| outputs = model.generate( |
| **inputs, |
| max_new_tokens=64, |
| temperature=0.7, |
| do_sample=True, |
| ) |
| generated = tokenizer.decode(outputs[0], skip_special_tokens=True) |
|
|
| print(f"\nPrompt: {prompt}") |
| print(f"Generated: {generated}") |
|
|
| print("\n" + "=" * 70) |
| print("Done!") |
| print("=" * 70) |
|
|
|
|
| if __name__ == "__main__": |
| main() |
|
|