| """ |
| Fine-tune Qwen2.5-Coder-3B-Instruct on Google Classroom & Drive API code. |
| Uses LoRA via PEFT for memory-efficient training. |
| """ |
| import os |
| from datasets import load_dataset |
| from trl import SFTTrainer, SFTConfig |
| from peft import LoraConfig |
|
|
| |
| MODEL_ID = "Qwen/Qwen2.5-Coder-3B-Instruct" |
| DATASET_ID = "esmith5594/google-classroom-drive-api-code" |
| OUTPUT_DIR = "qwen25-coder-3b-google-api-lora" |
| HUB_MODEL_ID = "esmith5594/qwen25-coder-3b-google-api-lora" |
|
|
| |
| dataset = load_dataset(DATASET_ID, split="train") |
| print(f"Loaded {len(dataset)} training examples") |
|
|
| |
| peft_config = LoraConfig( |
| r=16, |
| lora_alpha=32, |
| target_modules=[ |
| "q_proj", "k_proj", "v_proj", "o_proj", |
| "gate_proj", "up_proj", "down_proj", |
| ], |
| lora_dropout=0.05, |
| bias="none", |
| task_type="CAUSAL_LM", |
| ) |
|
|
| |
| training_args = SFTConfig( |
| output_dir=OUTPUT_DIR, |
| hub_model_id=HUB_MODEL_ID, |
| push_to_hub=True, |
| num_train_epochs=5, |
| per_device_train_batch_size=4, |
| gradient_accumulation_steps=128, |
| learning_rate=2e-5, |
| lr_scheduler_type="constant", |
| warmup_ratio=0.0, |
| bf16=True, |
| gradient_checkpointing=True, |
| max_seq_length=4096, |
| logging_steps=10, |
| logging_first_step=True, |
| disable_tqdm=True, |
| save_strategy="epoch", |
| save_total_limit=2, |
| report_to="trackio", |
| run_name="qwen25-coder-3b-google-api-lora", |
| project="google-api-coder", |
| assistant_only_loss=True, |
| packing=False, |
| ) |
|
|
| |
| trainer = SFTTrainer( |
| model=MODEL_ID, |
| train_dataset=dataset, |
| peft_config=peft_config, |
| args=training_args, |
| ) |
|
|
| |
| trainer.train() |
|
|
| |
| trainer.save_model(os.path.join(OUTPUT_DIR, "final")) |
| trainer.push_to_hub() |
|
|
| print(f"\nTraining complete! Model saved to {HUB_MODEL_ID}") |
|
|