File size: 3,486 Bytes
5db4c88 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 | """
PhD Research OS — SFT Training Script
=======================================
Fine-tunes Qwen2.5-3B-Instruct using QLoRA on multi-task scientific research data.
Tasks trained:
1. Scientific Claim Extraction (structured JSON output)
2. Epistemic Classification (Fact/Interpretation/Hypothesis/Conflict_Hypothesis)
3. Confidence Scoring (evidence_strength × study_quality × journal_tier × completeness)
4. Contradiction Detection (claim pair → conflict analysis)
5. Query Decomposition (broad question → sub-queries)
6. Decision Object Generation (knowledge gaps → proposed research actions)
Base model: Qwen/Qwen2.5-3B-Instruct
Method: QLoRA (r=64, all-linear) following "LoRA Without Regret" recipe
Reference: arxiv:2212.05238 (LLM-NERRE), arxiv:2401.00579 (multi-task biomedical SFT)
Usage:
pip install torch transformers trl peft datasets bitsandbytes accelerate trackio
python train.py
"""
import os
import torch
from datasets import load_dataset
from transformers import BitsAndBytesConfig
from peft import LoraConfig
from trl import SFTConfig, SFTTrainer
import trackio
MODEL_NAME = "Qwen/Qwen2.5-3B-Instruct"
DATASET_NAME = "nkshirsa/phd-research-os-sft-data"
OUTPUT_DIR = "./phd-research-os-brain"
HUB_MODEL_ID = "nkshirsa/phd-research-os-brain"
trackio.init(project="phd-research-os-training", run="sft-qwen25-3b-qlora-v1")
print("Loading dataset...")
dataset = load_dataset(DATASET_NAME)
train_dataset = dataset["train"]
eval_dataset = dataset["test"]
print(f"Train: {len(train_dataset)} examples, Eval: {len(eval_dataset)} examples")
bnb_config = BitsAndBytesConfig(
load_in_4bit=True, bnb_4bit_use_double_quant=True,
bnb_4bit_quant_type="nf4", bnb_4bit_compute_dtype=torch.bfloat16)
peft_config = LoraConfig(
r=64, lora_alpha=16, lora_dropout=0.05, bias="none",
task_type="CAUSAL_LM", target_modules="all-linear")
training_args = SFTConfig(
output_dir=OUTPUT_DIR,
num_train_epochs=3,
per_device_train_batch_size=2,
per_device_eval_batch_size=2,
gradient_accumulation_steps=8,
learning_rate=2e-4,
lr_scheduler_type="cosine",
warmup_ratio=0.05,
weight_decay=0.01,
max_grad_norm=1.0,
bf16=True,
gradient_checkpointing=True,
max_length=2048,
model_init_kwargs={"quantization_config": bnb_config, "torch_dtype": torch.bfloat16},
assistant_only_loss=True,
logging_steps=5, logging_first_step=True, disable_tqdm=True,
report_to=["tensorboard"], logging_dir=f"{OUTPUT_DIR}/logs",
eval_strategy="steps", eval_steps=50,
save_strategy="steps", save_steps=100, save_total_limit=3,
load_best_model_at_end=True, metric_for_best_model="eval_loss", greater_is_better=False,
push_to_hub=True, hub_model_id=HUB_MODEL_ID, hub_strategy="every_save",
seed=42, data_seed=42)
trainer = SFTTrainer(
model=MODEL_NAME, args=training_args,
train_dataset=train_dataset, eval_dataset=eval_dataset,
peft_config=peft_config)
trainable = sum(p.numel() for p in trainer.model.parameters() if p.requires_grad)
total = sum(p.numel() for p in trainer.model.parameters())
print(f"Model: {MODEL_NAME} | Total: {total:,} | Trainable: {trainable:,} ({100*trainable/total:.2f}%)")
train_result = trainer.train()
trainer.save_model()
trainer.push_to_hub(commit_message="Final model: PhD Research OS Brain v1")
print(f"\nTraining complete! Model at: https://huggingface.co/{HUB_MODEL_ID}")
for k, v in train_result.metrics.items():
print(f" {k}: {v}")
|