| |
| """ |
| Submit Unsloth VLM fine-tuning job to HF Jobs. |
| |
| This script submits a training job using the Unsloth Docker image with UV script execution. |
| Simplifies the process of running iconclass-vlm-sft.py on cloud GPUs. |
| """ |
|
|
| import os |
| from huggingface_hub import HfApi |
| from dotenv import load_dotenv |
|
|
| load_dotenv() |
|
|
|
|
| |
| |
| |
|
|
| |
| BASE_MODEL = "Qwen/Qwen3-VL-8B-Instruct" |
| DATASET = "davanstrien/iconclass-vlm-sft" |
| OUTPUT_MODEL = "davanstrien/Qwen3-VL-8B-iconclass-vlm" |
|
|
| |
| BATCH_SIZE = 2 |
| GRADIENT_ACCUMULATION = 8 |
| MAX_STEPS = None |
| NUM_EPOCHS = 1.0 |
| LEARNING_RATE = 2e-5 |
|
|
| |
| LORA_R = 16 |
| LORA_ALPHA = 32 |
| LORA_DROPOUT = 0.1 |
|
|
| |
| GPU_FLAVOR = "a100-large" |
| TIMEOUT = "12h" |
|
|
| |
| SCRIPT_URL = "https://huggingface.co/datasets/uv-scripts/training/raw/main/iconclass-vlm-sft.py" |
| |
| |
|
|
| |
| if MAX_STEPS is None: |
| from datasets import load_dataset |
|
|
| print("Calculating max_steps for full dataset...") |
| dataset = load_dataset(DATASET, split="train") |
| steps_per_epoch = len(dataset) // (BATCH_SIZE * GRADIENT_ACCUMULATION) |
| MAX_STEPS = int(steps_per_epoch * NUM_EPOCHS) |
| print(f"Dataset size: {len(dataset):,} samples") |
| print(f"Steps per epoch: {steps_per_epoch:,}") |
| print(f"Total steps ({NUM_EPOCHS} epoch(s)): {MAX_STEPS:,}") |
| print() |
|
|
|
|
| |
| |
| |
|
|
|
|
| def submit_training_job(): |
| """Submit VLM training job using HF Jobs with Unsloth Docker image.""" |
|
|
| |
| HF_TOKEN = os.environ.get("HF_TOKEN") |
| if not HF_TOKEN: |
| print("⚠️ HF_TOKEN not found in environment") |
| print("Please set: export HF_TOKEN=your_token_here") |
| print("Or add it to a .env file in this directory") |
| return |
|
|
| api = HfApi(token=HF_TOKEN) |
|
|
| |
| script_args = [ |
| "--base-model", |
| BASE_MODEL, |
| "--dataset", |
| DATASET, |
| "--output-model", |
| OUTPUT_MODEL, |
| "--lora-r", |
| str(LORA_R), |
| "--lora-alpha", |
| str(LORA_ALPHA), |
| "--lora-dropout", |
| str(LORA_DROPOUT), |
| "--learning-rate", |
| str(LEARNING_RATE), |
| "--batch-size", |
| str(BATCH_SIZE), |
| "--gradient-accumulation", |
| str(GRADIENT_ACCUMULATION), |
| "--max-steps", |
| str(MAX_STEPS), |
| "--logging-steps", |
| "10", |
| "--save-steps", |
| "100", |
| "--eval-steps", |
| "100", |
| ] |
|
|
| print("=" * 80) |
| print("Submitting Unsloth VLM Fine-tuning Job to HF Jobs") |
| print("=" * 80) |
| print(f"\n📦 Configuration:") |
| print(f" Base Model: {BASE_MODEL}") |
| print(f" Dataset: {DATASET}") |
| print(f" Output: {OUTPUT_MODEL}") |
| print(f"\n🎛️ Training Settings:") |
| print(f" Max Steps: {MAX_STEPS:,}") |
| print(f" Batch Size: {BATCH_SIZE}") |
| print(f" Grad Accum: {GRADIENT_ACCUMULATION}") |
| print(f" Effective BS: {BATCH_SIZE * GRADIENT_ACCUMULATION}") |
| print(f" Learning Rate: {LEARNING_RATE}") |
| print(f"\n🔧 LoRA Settings:") |
| print(f" Rank (r): {LORA_R}") |
| print(f" Alpha: {LORA_ALPHA}") |
| print(f" Dropout: {LORA_DROPOUT}") |
| print(f"\n💻 Infrastructure:") |
| print(f" GPU: {GPU_FLAVOR}") |
| print(f" Timeout: {TIMEOUT}") |
| print(f"\n🚀 Submitting job...") |
|
|
| |
| job = api.run_uv_job( |
| script=SCRIPT_URL, |
| script_args=script_args, |
| dependencies=[], |
| flavor=GPU_FLAVOR, |
| timeout=TIMEOUT, |
| env={ |
| "HF_HUB_ENABLE_HF_TRANSFER": "1", |
| }, |
| secrets={ |
| "HF_TOKEN": HF_TOKEN, |
| }, |
| ) |
|
|
| print("\n✅ Job submitted successfully!") |
| print("\n📊 Job Details:") |
| print(f" Job ID: {job.id}") |
| print(f" Status: {job.status}") |
| print(f" URL: https://huggingface.co/jobs/{job.id}") |
| print("\n💡 Monitor your job:") |
| print(f" • Web: https://huggingface.co/jobs/{job.id}") |
| print(f" • CLI: hfjobs status {job.id}") |
| print(f" • Logs: hfjobs logs {job.id} --follow") |
| print("\n🎯 Your model will be available at:") |
| print(f" https://huggingface.co/{OUTPUT_MODEL}") |
| print("\n" + "=" * 80) |
|
|
| return job |
|
|
|
|
| |
| |
| |
|
|
|
|
| def main(): |
| """Main entry point.""" |
| job = submit_training_job() |
|
|
| if job: |
| |
| print("\n📝 To monitor this job programmatically:") |
| print(""" |
| from huggingface_hub import HfApi |
| |
| api = HfApi() |
| job = api.get_job("{}") |
| print(job.status) # Check status |
| print(job.logs()) # View logs |
| """.format(job.id)) |
|
|
|
|
| if __name__ == "__main__": |
| main() |
|
|