File size: 9,966 Bytes
c04333f | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 | # /// script
# dependencies = [
# "torch>=2.2.0",
# "transformers>=4.40.0",
# "trl>=1.2.0",
# "peft>=0.10.0",
# "accelerate>=0.27.0",
# "bitsandbytes>=0.43.0",
# "datasets>=2.18.0",
# "huggingface-hub>=0.22.0",
# "trackio",
# "pydantic>=2.0",
# "numpy>=1.24",
# "pandas>=2.0",
# "matplotlib>=3.8",
# "tqdm>=4.60",
# "networkx>=3.0",
# "scipy>=1.10",
# "fastapi>=0.100",
# "uvicorn>=0.22",
# "httpx>=0.24",
# "pyyaml>=6.0",
# ]
# ///
"""
PolyGuard GRPO Training Job β runs on Hugging Face Jobs cloud GPU.
This script:
1. Clones the project from GitHub
2. Installs the polyguard-rl package
3. Loads the SFT adapter from Hub (or base model if no adapter)
4. Generates GRPO rollout prompts from the PolyGuard environment
5. Runs GRPO training with 4 grouped reward functions
6. Pushes the final adapter to the Hugging Face Hub
Submit via CLI:
hf jobs uv run \
--flavor a10g-large \
--timeout 4h \
--secrets HF_TOKEN \
"https://huggingface.co/TheJackBright/polyguard-training-scripts/resolve/main/hf_grpo_train.py"
Environment variables:
HF_TOKEN : HF write token (required, passed via --secrets)
SFT_MODEL_ID : SFT adapter on Hub (default: TheJackBright/polyguard-qwen-sft)
HUB_MODEL_ID : output GRPO model repo (default: TheJackBright/polyguard-qwen-grpo)
N_EPISODES : GRPO rollout episodes (default: 256)
EPOCHS : training epochs (default: 2)
BATCH_SIZE : per-device train batch size (default: 2)
GRAD_ACCUM : gradient accumulation steps (default: 8)
MAX_PROMPT_LEN : max prompt tokens (default: 512)
MAX_COMPLETION_LEN: max completion tokens (default: 512)
GROUP_SIZE : GRPO group size (default: 4)
"""
from __future__ import annotations
import inspect
import json
import os
import subprocess
import sys
from pathlib import Path
from typing import Any, Callable, Dict, List, Optional
# βββ Config from environment ββββββββββββββββββββββββββββββββββββββββββββββββ
GITHUB_REPO = os.environ.get("GITHUB_REPO", "https://github.com/Vishwa-docs/Meta_PyTorch_Scalar_OpenEnv-Hackathon.git")
SFT_MODEL_ID = os.environ.get("SFT_MODEL_ID", "TheJackBright/polyguard-qwen-sft")
BASE_MODEL_NAME = os.environ.get("BASE_MODEL_NAME", "Qwen/Qwen2.5-1.5B-Instruct")
HUB_MODEL_ID = os.environ.get("HUB_MODEL_ID", "TheJackBright/polyguard-qwen-grpo")
N_EPISODES = int(os.environ.get("N_EPISODES", "256"))
EPOCHS = int(os.environ.get("EPOCHS", "2"))
BATCH_SIZE = int(os.environ.get("BATCH_SIZE", "2"))
GRAD_ACCUM = int(os.environ.get("GRAD_ACCUM", "8"))
MAX_PROMPT_LEN = int(os.environ.get("MAX_PROMPT_LEN", "512"))
MAX_COMPLETION_LEN = int(os.environ.get("MAX_COMPLETION_LEN", "512"))
GROUP_SIZE = int(os.environ.get("GROUP_SIZE", "4"))
SEED = 42
OUTPUT_DIR = "/tmp/polyguard_grpo_output"
PROMPTS_PATH = "/tmp/polyguard_grpo_prompts.jsonl"
print("=" * 60)
print("PolyGuard GRPO Training on HF Jobs")
print(f" SFT checkpoint: {SFT_MODEL_ID}")
print(f" Hub output: {HUB_MODEL_ID}")
print(f" Episodes: {N_EPISODES}")
print(f" Epochs: {EPOCHS}")
print(f" Group size: {GROUP_SIZE}")
print("=" * 60)
# βββ Step 1: Clone repo and install polyguard-rl ββββββββββββββββββββββββββββ
print("\n[1/5] Cloning project from GitHub...")
clone_dir = Path("/tmp/polyguard_project")
if not clone_dir.exists():
subprocess.run(
["git", "clone", "--depth=1", GITHUB_REPO, str(clone_dir)],
check=True,
)
else:
print(" Already cloned.")
polyguard_rl_dir = clone_dir / "polyguard-rl"
print(f"\n[2/5] Installing polyguard-rl...")
subprocess.run(
[sys.executable, "-m", "pip", "install", "-e", str(polyguard_rl_dir), "--quiet"],
check=True,
)
if str(polyguard_rl_dir) not in sys.path:
sys.path.insert(0, str(polyguard_rl_dir))
os.chdir(polyguard_rl_dir)
# βββ Step 2: Build GRPO prompt dataset ββββββββββββββββββββββββββββββββββββββ
print(f"\n[3/5] Building GRPO rollout dataset ({N_EPISODES} episodes)...")
from app.env.env_core import PolyGuardEnv # noqa: E402
from app.models.policy.prompt_templates import build_planner_prompt # noqa: E402
from app.training.openenv_wrapper import PolyGuardEnvWrapper # noqa: E402
env = PolyGuardEnv()
prompts: List[Dict[str, Any]] = []
for ep_idx in range(N_EPISODES):
seed_i = SEED + ep_idx
obs, _ = env.reset(seed=seed_i)
prompt_text = build_planner_prompt(obs)
prompts.append({
"prompt": prompt_text,
"seed": seed_i,
"episode": ep_idx,
"difficulty": getattr(obs, "difficulty", "medium"),
})
print(f" Built {len(prompts)} GRPO prompts.")
with open(PROMPTS_PATH, "w") as f:
for row in prompts:
f.write(json.dumps(row) + "\n")
# βββ Step 3: Load model from SFT checkpoint βββββββββββββββββββββββββββββββββ
print(f"\n[4/5] Loading SFT model from {SFT_MODEL_ID}...")
import torch
from datasets import Dataset
from huggingface_hub import HfApi
from peft import PeftModel
from transformers import AutoModelForCausalLM, AutoTokenizer
from trl import GRPOConfig, GRPOTrainer
dtype = torch.bfloat16 if (torch.cuda.is_available() and torch.cuda.is_bf16_supported()) else torch.float16
print(f" CUDA: {torch.cuda.is_available()}")
if torch.cuda.is_available():
print(f" GPU: {torch.cuda.get_device_name(0)}")
tokenizer = AutoTokenizer.from_pretrained(SFT_MODEL_ID, trust_remote_code=True)
if tokenizer.pad_token is None:
tokenizer.pad_token = tokenizer.eos_token
# Try loading SFT adapter, fall back to base model
try:
base_model = AutoModelForCausalLM.from_pretrained(
BASE_MODEL_NAME,
torch_dtype=dtype,
device_map="auto",
trust_remote_code=True,
)
model = PeftModel.from_pretrained(base_model, SFT_MODEL_ID)
print(" Loaded SFT adapter on top of base model.")
except Exception as e:
print(f" SFT load failed ({e}), falling back to base model...")
model = AutoModelForCausalLM.from_pretrained(
BASE_MODEL_NAME,
torch_dtype=dtype,
device_map="auto",
trust_remote_code=True,
)
# βββ Step 4: Build grouped reward function ββββββββββββββββββββββββββββββββββ
env_wrapper = PolyGuardEnvWrapper(score_timeout_s=30.0)
def build_polyguard_reward_fn(group_name: str) -> Callable:
from app.training.grpo_rewards import make_group_reward_fn
return make_group_reward_fn(env_wrapper=env_wrapper, group_name=group_name)
# TRL GRPO expects a single reward function that returns List[float]
# We combine the 4 reward groups into a weighted aggregate
from app.training.grpo_rewards import REWARD_GROUPS # noqa: E402
_group_fns = {g: build_polyguard_reward_fn(g) for g in REWARD_GROUPS}
def combined_reward_fn(
prompts: List[str],
completions: List[str],
**kwargs,
) -> List[float]:
weights = {
"format_schema": 0.20,
"clinical_safety": 0.45,
"process_grounding": 0.20,
"anti_hack": 0.15,
}
n = len(prompts)
totals = [0.0] * n
for group, fn in _group_fns.items():
group_rewards = fn(prompts=prompts, completions=completions, **kwargs)
w = weights.get(group, 0.25)
for i, r in enumerate(group_rewards):
totals[i] += w * float(r)
# Clamp to [0.001, 0.999]
return [max(0.001, min(0.999, round(v, 3))) for v in totals]
# βββ Step 5: GRPO Training ββββββββββββββββββββββββββββββββββββββββββββββββββ
ds = Dataset.from_list([{"prompt": p["prompt"]} for p in prompts]).shuffle(seed=SEED)
grpo_config_kwargs: Dict[str, Any] = {
"output_dir": OUTPUT_DIR,
"num_train_epochs": EPOCHS,
"per_device_train_batch_size": BATCH_SIZE,
"gradient_accumulation_steps": GRAD_ACCUM,
"learning_rate": 1e-5,
"bf16": dtype == torch.bfloat16,
"fp16": dtype == torch.float16,
"logging_steps": 5,
"save_steps": 50,
"save_total_limit": 2,
"seed": SEED,
"report_to": ["trackio"],
"run_name": "polyguard-grpo-qwen",
"project": "polyguard-training",
"push_to_hub": True,
"hub_model_id": HUB_MODEL_ID,
"hub_strategy": "every_save",
"num_generations": GROUP_SIZE,
"max_prompt_length": MAX_PROMPT_LEN,
"max_completion_length": MAX_COMPLETION_LEN,
"temperature": 0.9,
"beta": 0.1,
}
grpo_params = set(inspect.signature(GRPOConfig).parameters)
grpo_config = GRPOConfig(**{k: v for k, v in grpo_config_kwargs.items() if k in grpo_params})
trainer_kwargs: Dict[str, Any] = {
"model": model,
"args": grpo_config,
"train_dataset": ds,
"reward_funcs": combined_reward_fn,
}
trainer_params = set(inspect.signature(GRPOTrainer).parameters)
if "processing_class" in trainer_params:
trainer_kwargs["processing_class"] = tokenizer
elif "tokenizer" in trainer_params:
trainer_kwargs["tokenizer"] = tokenizer
trainer = GRPOTrainer(**{k: v for k, v in trainer_kwargs.items() if k in trainer_params})
print(f"\n GRPO dataset size: {len(ds)}")
print(f" Group size: {GROUP_SIZE}")
print(f" Starting GRPO training...\n")
train_result = trainer.train()
print("\n GRPO training complete.")
print(f" Steps: {train_result.global_step}")
print(f"\n[5/5] Pushing GRPO model to Hub: {HUB_MODEL_ID}...")
trainer.push_to_hub()
tokenizer.push_to_hub(HUB_MODEL_ID)
print("\n" + "=" * 60)
print("GRPO training complete!")
print(f"Model saved to: https://huggingface.co/{HUB_MODEL_ID}")
print("=" * 60)
|