File size: 2,951 Bytes
1cef7c9 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 | """
Fine-tune Qwen2.5-Coder-3B-Instruct on Google Classroom & Drive API code.
Uses LoRA via PEFT for memory-efficient training.
"""
import os
from datasets import load_dataset
from trl import SFTTrainer, SFTConfig
from peft import LoraConfig
# ββ Config βββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
MODEL_ID = "Qwen/Qwen2.5-Coder-3B-Instruct"
DATASET_ID = "esmith5594/google-classroom-drive-api-code"
OUTPUT_DIR = "qwen25-coder-3b-google-api-lora"
HUB_MODEL_ID = "esmith5594/qwen25-coder-3b-google-api-lora"
# ββ Load Dataset βββββββββββββββββββββββββββββββββββββββββββββββββββββ
dataset = load_dataset(DATASET_ID, split="train")
print(f"Loaded {len(dataset)} training examples")
# ββ LoRA Config (based on Octopus paper + TRL best practices) βββββββββ
peft_config = LoraConfig(
r=16,
lora_alpha=32,
target_modules=[
"q_proj", "k_proj", "v_proj", "o_proj",
"gate_proj", "up_proj", "down_proj",
],
lora_dropout=0.05,
bias="none",
task_type="CAUSAL_LM",
)
# ββ Training Config ββββββββββββββββββββββββββββββββββββββββββββββββββ
training_args = SFTConfig(
output_dir=OUTPUT_DIR,
hub_model_id=HUB_MODEL_ID,
push_to_hub=True,
num_train_epochs=5,
per_device_train_batch_size=4,
gradient_accumulation_steps=128,
learning_rate=2e-5,
lr_scheduler_type="constant",
warmup_ratio=0.0,
bf16=True,
gradient_checkpointing=True,
max_seq_length=4096,
logging_steps=10,
logging_first_step=True,
disable_tqdm=True,
save_strategy="epoch",
save_total_limit=2,
report_to="trackio",
run_name="qwen25-coder-3b-google-api-lora",
project="google-api-coder",
assistant_only_loss=True,
packing=False,
)
# ββ Trainer ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
trainer = SFTTrainer(
model=MODEL_ID,
train_dataset=dataset,
peft_config=peft_config,
args=training_args,
)
# ββ Train ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
trainer.train()
# ββ Save βββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
trainer.save_model(os.path.join(OUTPUT_DIR, "final"))
trainer.push_to_hub()
print(f"\nTraining complete! Model saved to {HUB_MODEL_ID}")
|