| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| """ |
| PolyGuard GRPO Training Job β runs on Hugging Face Jobs cloud GPU. |
| |
| This script: |
| 1. Clones the project from GitHub |
| 2. Installs the polyguard-rl package |
| 3. Loads the SFT adapter from Hub (or base model if no adapter) |
| 4. Generates GRPO rollout prompts from the PolyGuard environment |
| 5. Runs GRPO training with 4 grouped reward functions |
| 6. Pushes the final adapter to the Hugging Face Hub |
| |
| Submit via CLI: |
| hf jobs uv run \ |
| --flavor a10g-large \ |
| --timeout 4h \ |
| --secrets HF_TOKEN \ |
| "https://huggingface.co/TheJackBright/polyguard-training-scripts/resolve/main/hf_grpo_train.py" |
| |
| Environment variables: |
| HF_TOKEN : HF write token (required, passed via --secrets) |
| SFT_MODEL_ID : SFT adapter on Hub (default: TheJackBright/polyguard-qwen-sft) |
| HUB_MODEL_ID : output GRPO model repo (default: TheJackBright/polyguard-qwen-grpo) |
| N_EPISODES : GRPO rollout episodes (default: 256) |
| EPOCHS : training epochs (default: 2) |
| BATCH_SIZE : per-device train batch size (default: 2) |
| GRAD_ACCUM : gradient accumulation steps (default: 8) |
| MAX_PROMPT_LEN : max prompt tokens (default: 512) |
| MAX_COMPLETION_LEN: max completion tokens (default: 512) |
| GROUP_SIZE : GRPO group size (default: 4) |
| """ |
| from __future__ import annotations |
|
|
| import inspect |
| import json |
| import os |
| import subprocess |
| import sys |
| from pathlib import Path |
| from typing import Any, Callable, Dict, List, Optional |
|
|
| |
| GITHUB_REPO = os.environ.get("GITHUB_REPO", "https://github.com/Vishwa-docs/Meta_PyTorch_Scalar_OpenEnv-Hackathon.git") |
| SFT_MODEL_ID = os.environ.get("SFT_MODEL_ID", "TheJackBright/polyguard-qwen-sft") |
| BASE_MODEL_NAME = os.environ.get("BASE_MODEL_NAME", "Qwen/Qwen2.5-1.5B-Instruct") |
| HUB_MODEL_ID = os.environ.get("HUB_MODEL_ID", "TheJackBright/polyguard-qwen-grpo") |
| N_EPISODES = int(os.environ.get("N_EPISODES", "256")) |
| EPOCHS = int(os.environ.get("EPOCHS", "2")) |
| BATCH_SIZE = int(os.environ.get("BATCH_SIZE", "2")) |
| GRAD_ACCUM = int(os.environ.get("GRAD_ACCUM", "8")) |
| MAX_PROMPT_LEN = int(os.environ.get("MAX_PROMPT_LEN", "512")) |
| MAX_COMPLETION_LEN = int(os.environ.get("MAX_COMPLETION_LEN", "512")) |
| GROUP_SIZE = int(os.environ.get("GROUP_SIZE", "4")) |
| SEED = 42 |
| OUTPUT_DIR = "/tmp/polyguard_grpo_output" |
| PROMPTS_PATH = "/tmp/polyguard_grpo_prompts.jsonl" |
|
|
| print("=" * 60) |
| print("PolyGuard GRPO Training on HF Jobs") |
| print(f" SFT checkpoint: {SFT_MODEL_ID}") |
| print(f" Hub output: {HUB_MODEL_ID}") |
| print(f" Episodes: {N_EPISODES}") |
| print(f" Epochs: {EPOCHS}") |
| print(f" Group size: {GROUP_SIZE}") |
| print("=" * 60) |
|
|
|
|
| |
| print("\n[1/5] Cloning project from GitHub...") |
| clone_dir = Path("/tmp/polyguard_project") |
| if not clone_dir.exists(): |
| subprocess.run( |
| ["git", "clone", "--depth=1", GITHUB_REPO, str(clone_dir)], |
| check=True, |
| ) |
| else: |
| print(" Already cloned.") |
|
|
| polyguard_rl_dir = clone_dir / "polyguard-rl" |
| print(f"\n[2/5] Installing polyguard-rl...") |
| subprocess.run( |
| [sys.executable, "-m", "pip", "install", "-e", str(polyguard_rl_dir), "--quiet"], |
| check=True, |
| ) |
| if str(polyguard_rl_dir) not in sys.path: |
| sys.path.insert(0, str(polyguard_rl_dir)) |
| os.chdir(polyguard_rl_dir) |
|
|
|
|
| |
| print(f"\n[3/5] Building GRPO rollout dataset ({N_EPISODES} episodes)...") |
|
|
| from app.env.env_core import PolyGuardEnv |
| from app.models.policy.prompt_templates import build_planner_prompt |
| from app.training.openenv_wrapper import PolyGuardEnvWrapper |
|
|
| env = PolyGuardEnv() |
| prompts: List[Dict[str, Any]] = [] |
|
|
| for ep_idx in range(N_EPISODES): |
| seed_i = SEED + ep_idx |
| obs, _ = env.reset(seed=seed_i) |
| prompt_text = build_planner_prompt(obs) |
| prompts.append({ |
| "prompt": prompt_text, |
| "seed": seed_i, |
| "episode": ep_idx, |
| "difficulty": getattr(obs, "difficulty", "medium"), |
| }) |
|
|
| print(f" Built {len(prompts)} GRPO prompts.") |
| with open(PROMPTS_PATH, "w") as f: |
| for row in prompts: |
| f.write(json.dumps(row) + "\n") |
|
|
|
|
| |
| print(f"\n[4/5] Loading SFT model from {SFT_MODEL_ID}...") |
|
|
| import torch |
| from datasets import Dataset |
| from huggingface_hub import HfApi |
| from peft import PeftModel |
| from transformers import AutoModelForCausalLM, AutoTokenizer |
| from trl import GRPOConfig, GRPOTrainer |
|
|
| dtype = torch.bfloat16 if (torch.cuda.is_available() and torch.cuda.is_bf16_supported()) else torch.float16 |
| print(f" CUDA: {torch.cuda.is_available()}") |
| if torch.cuda.is_available(): |
| print(f" GPU: {torch.cuda.get_device_name(0)}") |
|
|
| tokenizer = AutoTokenizer.from_pretrained(SFT_MODEL_ID, trust_remote_code=True) |
| if tokenizer.pad_token is None: |
| tokenizer.pad_token = tokenizer.eos_token |
|
|
| |
| try: |
| base_model = AutoModelForCausalLM.from_pretrained( |
| BASE_MODEL_NAME, |
| torch_dtype=dtype, |
| device_map="auto", |
| trust_remote_code=True, |
| ) |
| model = PeftModel.from_pretrained(base_model, SFT_MODEL_ID) |
| print(" Loaded SFT adapter on top of base model.") |
| except Exception as e: |
| print(f" SFT load failed ({e}), falling back to base model...") |
| model = AutoModelForCausalLM.from_pretrained( |
| BASE_MODEL_NAME, |
| torch_dtype=dtype, |
| device_map="auto", |
| trust_remote_code=True, |
| ) |
|
|
|
|
| |
| env_wrapper = PolyGuardEnvWrapper(score_timeout_s=30.0) |
|
|
| def build_polyguard_reward_fn(group_name: str) -> Callable: |
| from app.training.grpo_rewards import make_group_reward_fn |
| return make_group_reward_fn(env_wrapper=env_wrapper, group_name=group_name) |
|
|
| |
| |
| from app.training.grpo_rewards import REWARD_GROUPS |
|
|
| _group_fns = {g: build_polyguard_reward_fn(g) for g in REWARD_GROUPS} |
|
|
| def combined_reward_fn( |
| prompts: List[str], |
| completions: List[str], |
| **kwargs, |
| ) -> List[float]: |
| weights = { |
| "format_schema": 0.20, |
| "clinical_safety": 0.45, |
| "process_grounding": 0.20, |
| "anti_hack": 0.15, |
| } |
| n = len(prompts) |
| totals = [0.0] * n |
| for group, fn in _group_fns.items(): |
| group_rewards = fn(prompts=prompts, completions=completions, **kwargs) |
| w = weights.get(group, 0.25) |
| for i, r in enumerate(group_rewards): |
| totals[i] += w * float(r) |
| |
| return [max(0.001, min(0.999, round(v, 3))) for v in totals] |
|
|
|
|
| |
| ds = Dataset.from_list([{"prompt": p["prompt"]} for p in prompts]).shuffle(seed=SEED) |
|
|
| grpo_config_kwargs: Dict[str, Any] = { |
| "output_dir": OUTPUT_DIR, |
| "num_train_epochs": EPOCHS, |
| "per_device_train_batch_size": BATCH_SIZE, |
| "gradient_accumulation_steps": GRAD_ACCUM, |
| "learning_rate": 1e-5, |
| "bf16": dtype == torch.bfloat16, |
| "fp16": dtype == torch.float16, |
| "logging_steps": 5, |
| "save_steps": 50, |
| "save_total_limit": 2, |
| "seed": SEED, |
| "report_to": ["trackio"], |
| "run_name": "polyguard-grpo-qwen", |
| "project": "polyguard-training", |
| "push_to_hub": True, |
| "hub_model_id": HUB_MODEL_ID, |
| "hub_strategy": "every_save", |
| "num_generations": GROUP_SIZE, |
| "max_prompt_length": MAX_PROMPT_LEN, |
| "max_completion_length": MAX_COMPLETION_LEN, |
| "temperature": 0.9, |
| "beta": 0.1, |
| } |
|
|
| grpo_params = set(inspect.signature(GRPOConfig).parameters) |
| grpo_config = GRPOConfig(**{k: v for k, v in grpo_config_kwargs.items() if k in grpo_params}) |
|
|
| trainer_kwargs: Dict[str, Any] = { |
| "model": model, |
| "args": grpo_config, |
| "train_dataset": ds, |
| "reward_funcs": combined_reward_fn, |
| } |
| trainer_params = set(inspect.signature(GRPOTrainer).parameters) |
| if "processing_class" in trainer_params: |
| trainer_kwargs["processing_class"] = tokenizer |
| elif "tokenizer" in trainer_params: |
| trainer_kwargs["tokenizer"] = tokenizer |
|
|
| trainer = GRPOTrainer(**{k: v for k, v in trainer_kwargs.items() if k in trainer_params}) |
|
|
| print(f"\n GRPO dataset size: {len(ds)}") |
| print(f" Group size: {GROUP_SIZE}") |
| print(f" Starting GRPO training...\n") |
|
|
| train_result = trainer.train() |
|
|
| print("\n GRPO training complete.") |
| print(f" Steps: {train_result.global_step}") |
|
|
| print(f"\n[5/5] Pushing GRPO model to Hub: {HUB_MODEL_ID}...") |
| trainer.push_to_hub() |
| tokenizer.push_to_hub(HUB_MODEL_ID) |
|
|
| print("\n" + "=" * 60) |
| print("GRPO training complete!") |
| print(f"Model saved to: https://huggingface.co/{HUB_MODEL_ID}") |
| print("=" * 60) |
|
|