Pratyush-01's picture
training: add early-stop callback + lr=1e-5 for next run
6a3d97c verified
"""GRPO training loop using Unsloth + TRL + W&B.
Requires the ``[train]`` optional dependency group. Importing this module on
a machine without the heavy ML deps installed will fail at module load,
which is the documented contract — local development tools (env server,
verifier, demo UI) live in lighter modules and remain usable.
Run via::
python -m physix.training.loop \
--model Qwen/Qwen2.5-1.5B-Instruct \
--output-dir runs/physix-1.5b-rl \
--num-steps 300
Environment variables:
- ``WANDB_PROJECT`` (default ``physix-live``)
- ``HUGGINGFACE_HUB_TOKEN`` if pushing the adapter to the Hub
"""
from __future__ import annotations
import argparse
import logging
import os
from pathlib import Path
from typing import Literal, Optional
import torch
from datasets import Dataset
from pydantic import BaseModel, ConfigDict
from transformers import AutoTokenizer, TrainerCallback, TrainerControl, TrainerState
from transformers import TrainingArguments as HFTrainingArguments
from physix.systems import SUPPORTED_SYSTEMS
from physix.training.dataset import (
DatasetSpec,
build_training_dataset,
)
from physix.training.reward_fns import make_reward_funcs
from physix.training.scorer import Scorer
# IMPORTANT: Unsloth's GRPO patches must be applied *before* importing
# ``GRPOTrainer`` so its kernels are swapped in. Without this, the trainer
# falls back to the stock TRL path and Unsloth's optimisations are bypassed
# (and on recent versions the import will hard-fail). Keep this block
# directly above the ``trl`` import — order matters.
#
# Version note: this requires ``trl<=0.24.0``. Newer TRL versions ship
# ``trl.experimental.openenv`` which Unsloth's ``patch_trl_openenv``
# hook tries to ``inspect.getsource()`` on; that fails with ``OSError:
# could not get source code`` and crashes ``PatchFastRL``. ``trl==0.24.0``
# is the pinned upper bound declared in unsloth's pyproject.toml.
from unsloth import FastLanguageModel, PatchFastRL # noqa: E402
PatchFastRL("GRPO", FastLanguageModel)
from trl import GRPOConfig, GRPOTrainer # noqa: E402 (must come after PatchFastRL)
_log = logging.getLogger(__name__)
Ablation = Literal["no_progress", "no_simplicity", "no_format"]
SaveMethod = Literal["lora", "merged_16bit", "merged_4bit"]
class TrainingConfig(BaseModel):
"""All hyperparameters in one place; the CLI populates this."""
model_config = ConfigDict(frozen=True)
model_name: str = "Qwen/Qwen2.5-1.5B-Instruct"
#: Optional path to a LoRA adapter produced by the SFT warm-start step.
#: When set, the base model is loaded and the adapter weights are applied
#: before GRPO begins. Without this the cold base model rarely produces
#: any reward signal in early steps.
sft_checkpoint: Optional[str] = None
#: Optional Hub repo id (or local path) of an existing LoRA adapter to
#: warm-start GRPO from — e.g. a previous GRPO run that was interrupted
#: and pushed checkpoints to ``hub_checkpoint_repo_id``. When set, the
#: base ``model_name`` is loaded and this adapter is applied as the
#: starting trainable LoRA (skipping the fresh ``get_peft_model`` call).
#: SFT is unnecessary in this case (the adapter is already downstream
#: of an SFT warm-start), so leave ``sft_checkpoint`` unset when using
#: this flag.
lora_adapter_repo: Optional[str] = None
output_dir: str = "runs/physix-1.5b-rl"
max_seq_length: int = 2048
lora_r: int = 16
lora_alpha: int = 32
learning_rate: float = 5.0e-6
temperature: float = 0.9
max_completion_length: int = 256
beta: float = 0.04
num_generations: int = 4
per_device_train_batch_size: int = 1
gradient_accumulation_steps: int = 8
num_steps: int = 300
#: Stop early if ``reward_std`` stays below 0.05 for this many consecutive
#: logged steps. Set to 0 to disable early stopping.
early_stop_patience: int = 50
seed: int = 0
instances_per_system: int = 32
#: Subset of system IDs to train on. Defaults to all SUPPORTED_SYSTEMS.
#: Pass a single ID (e.g. ``("damped_spring",)``) for focused single-task runs.
system_ids: tuple[str, ...] = SUPPORTED_SYSTEMS
ablation: Optional[Ablation] = None
wandb_project: str = "physix-live"
wandb_run_name: Optional[str] = None
push_to_hub: bool = False
hub_repo_id: Optional[str] = None
#: HF repo to push LoRA checkpoints to every save_steps during GRPO.
#: Separate from hub_repo_id (which receives the final merged model).
#: Set this to enable mid-run checkpoint persistence and W&B artifact logging.
hub_checkpoint_repo_id: Optional[str] = None
#: Path to a Trainer checkpoint dir to resume GRPO from (e.g. from a
#: previous run killed mid-training). Set automatically by train.sh.
resume_from_checkpoint: Optional[str] = None
#: How to persist the final adapter. ``"lora"`` saves only the adapter
#: weights (small, requires the base model at load time). ``"merged_16bit"``
#: merges the adapter into the base and saves a deployable bf16/fp16
#: checkpoint (large, but loadable as a normal HF model — what you want
#: for Hub pushes and Ollama exports).
save_method: SaveMethod = "merged_16bit"
def train(config: TrainingConfig) -> None:
"""Run a full GRPO training loop with the given configuration."""
_configure_logging()
import wandb
run_name = config.wandb_run_name or f"physix-grpo-{config.num_steps}steps"
wandb.init(
project=config.wandb_project,
name=run_name,
config=config.model_dump(),
tags=["grpo", "physix", config.model_name.split("/")[-1]],
resume="allow",
)
# Pin a few high-signal pointers into the run summary right away so the
# W&B "Overview" tab shows them prominently (no scrolling, no hunting).
if config.hub_checkpoint_repo_id:
ckpt_url = f"https://huggingface.co/{config.hub_checkpoint_repo_id}"
wandb.run.summary["checkpoint/repo"] = config.hub_checkpoint_repo_id
wandb.run.summary["checkpoint/repo_url"] = ckpt_url
if config.hub_repo_id:
wandb.run.summary["model/final_repo"] = config.hub_repo_id
wandb.run.summary["model/final_url"] = (
f"https://huggingface.co/{config.hub_repo_id}"
)
if config.lora_adapter_repo:
wandb.run.summary["resume/from_adapter"] = config.lora_adapter_repo
wandb.run.summary["resume/from_url"] = (
f"https://huggingface.co/{config.lora_adapter_repo}"
)
# If a parent W&B run is named (set by the orchestrator script),
# surface it prominently so the lineage is one click away.
parent_run = os.environ.get("WANDB_RESUMED_FROM")
if parent_run:
wandb.run.summary["resume/parent_wandb_run"] = parent_run
wandb.run.summary["resume/parent_wandb_url"] = (
f"https://wandb.ai/{wandb.run.entity}/{wandb.run.project}/runs/{parent_run}"
)
print(
f"\n[wandb] WARM-STARTED run — adapter "
f"https://huggingface.co/{config.lora_adapter_repo}\n",
flush=True,
)
_log.info("Loading model %s with Unsloth (4-bit, LoRA-%d)", config.model_name, config.lora_r)
model, tokenizer = _load_model_and_tokenizer(config)
train_dataset = _build_and_format_dataset(config, tokenizer)
reward_funcs = _select_reward_funcs(config.ablation)
grpo_config = _build_grpo_config(config)
callbacks = []
if config.early_stop_patience > 0:
callbacks.append(_RewardConvergenceCallback(patience=config.early_stop_patience))
_log.info(
"Early stopping enabled: will stop if reward_std < 0.05 for %d consecutive steps",
config.early_stop_patience,
)
if config.hub_checkpoint_repo_id:
callbacks.append(_WandbCheckpointCallback(config.hub_checkpoint_repo_id))
_log.info(
"Checkpoint hub push enabled → %s (every %d steps)",
config.hub_checkpoint_repo_id,
grpo_config.save_steps,
)
trainer = GRPOTrainer(
model=model,
processing_class=tokenizer,
args=grpo_config,
train_dataset=train_dataset,
reward_funcs=reward_funcs,
callbacks=callbacks or None,
)
if config.resume_from_checkpoint:
_log.info("Resuming from checkpoint: %s", config.resume_from_checkpoint)
_log.info("Starting GRPO training for %d steps", config.num_steps)
trainer.train(resume_from_checkpoint=config.resume_from_checkpoint)
_log_reward_summary(trainer)
_render_training_curves(trainer, config)
_log.info("Saving adapter (%s) to %s", config.save_method, config.output_dir)
_save_artifacts(model, tokenizer, config)
wandb.finish()
def _log_reward_summary(trainer: "GRPOTrainer") -> None:
"""Emit a final reward-signal summary at end of training.
Pulls the last ``log_history`` entry that contains reward keys and prints
the mean of every ``rewards/*/mean`` it finds. If *no* reward keys are
present we hard-fail — that means the reward functions never produced a
non-NaN value, which is a real bug worth surfacing.
Note on ``train/loss``: this scalar IS the GRPO surrogate objective
(advantage-weighted token log-probabilities, plus the KL-to-ref penalty
when ``beta > 0``). Per the TRL docs (``trl/docs/source/grpo_trainer.md``)
the ``Trainer`` superclass logs the full surrogate as ``loss``, not just
the KL term. So ``train/loss`` collapsing without ``train/reward`` rising
is a real failure mode — typically a sign of reward hacking or saturated
advantages — and should be debugged, not dismissed.
"""
history = getattr(trainer.state, "log_history", []) or []
reward_entries = [
entry for entry in history
if any(k.startswith("rewards/") or k == "reward" for k in entry)
]
if not reward_entries:
_log.error(
"No reward metrics logged during training. This usually means "
"every rollout failed to parse. Check `train/reward` in W&B and "
"the most recent completion samples."
)
raise RuntimeError(
"GRPO produced no reward metrics — training silently failed."
)
last = reward_entries[-1]
first = reward_entries[0]
_log.info("=" * 60)
_log.info("GRPO reward summary (first → last logged step):")
for key in sorted(last):
if key.startswith("rewards/") or key == "reward":
v0 = first.get(key)
v1 = last.get(key)
if isinstance(v0, (int, float)) and isinstance(v1, (int, float)):
_log.info(" %-40s %.4f → %.4f (Δ=%+.4f)", key, v0, v1, v1 - v0)
_log.info("-" * 60)
_log.info("Interpretation guide:")
_log.info(" train/loss — full GRPO surrogate (policy + KL*beta).")
_log.info(" Should DECREASE as advantages get exploited.")
_log.info(" train/reward — mean episode reward across rollouts.")
_log.info(" Should INCREASE; this is the headline curve.")
_log.info(" train/kl — KL(policy || ref). Should grow slowly.")
_log.info(" rewards/*/mean — per-component reward (match, simplicity, …).")
_log.info("Loss-down WITHOUT reward-up is a red flag (reward hacking or")
_log.info("advantage saturation).")
_log.info("=" * 60)
def _render_training_curves(
trainer: "GRPOTrainer",
config: TrainingConfig,
) -> None:
"""Render the headline training curves to PNG and ship them.
Why we do this in-process at end of training (instead of pulling from
W&B post-hoc):
1. The competition's automated validation requires PNG plots committed
to the public repo at submission time. Wandb-only links don't count.
2. ``trainer.state.log_history`` already contains every metric the
Trainer logged step-by-step — no API roundtrip needed.
3. We can also push the PNGs to the model Hub repo so they're discoverable
from the model card without a separate deploy step.
Renders three curves:
- ``loss.png`` — ``train/loss`` over global step.
GRPO surrogate; SHOULD trend down.
- ``reward.png`` — ``reward`` (or ``train/reward``) over step
with ±1σ band. SHOULD trend up.
- ``reward_components.png`` — overlay of every ``rewards/<name>/mean``
so reward hacking shows up visually
(e.g. ``simplicity`` rising while
``match`` regresses).
Failures are logged and swallowed — a missing plot must not crash a
successful training run, since the model artefact is still useful.
"""
try:
import matplotlib
matplotlib.use("Agg") # headless / no display server in HF Jobs
import matplotlib.pyplot as plt
except Exception as exc: # noqa: BLE001
_log.warning("matplotlib unavailable, skipping curve PNGs: %s", exc)
return
history = list(getattr(trainer.state, "log_history", []) or [])
if not history:
_log.warning("No log_history found — cannot render curves.")
return
plots_dir = Path(config.output_dir) / "plots"
plots_dir.mkdir(parents=True, exist_ok=True)
def _series(metric: str) -> tuple[list[int], list[float]]:
xs: list[int] = []
ys: list[float] = []
for entry in history:
if metric in entry and "step" in entry:
value = entry[metric]
if isinstance(value, (int, float)):
xs.append(int(entry["step"]))
ys.append(float(value))
return xs, ys
rendered: list[Path] = []
# 1) Loss — the GRPO surrogate.
steps_l, losses = _series("loss")
if steps_l:
fig, ax = plt.subplots(figsize=(8, 4.5))
ax.plot(steps_l, losses, color="#d62728", linewidth=1.8)
ax.set_xlabel("training step")
ax.set_ylabel("GRPO surrogate loss")
ax.set_title("PhysiX GRPO — train/loss (lower is better)")
ax.grid(alpha=0.3)
path = plots_dir / "loss.png"
fig.tight_layout()
fig.savefig(path, dpi=140)
plt.close(fig)
rendered.append(path)
else:
_log.warning("No 'loss' entries in log_history.")
# 2) Reward — headline curve (with ±std band when available).
steps_r, rewards = _series("reward")
_, reward_std = _series("reward_std")
if steps_r:
fig, ax = plt.subplots(figsize=(8, 4.5))
ax.plot(steps_r, rewards, color="#2ca02c", linewidth=2.0, label="mean reward")
if reward_std and len(reward_std) == len(rewards):
import numpy as np
r = np.asarray(rewards)
s = np.asarray(reward_std)
ax.fill_between(steps_r, r - s, r + s, color="#2ca02c", alpha=0.18,
label="±1σ across rollouts")
ax.set_xlabel("training step")
ax.set_ylabel("mean reward (sum of components)")
ax.set_title("PhysiX GRPO — train/reward (higher is better)")
ax.legend(loc="best")
ax.grid(alpha=0.3)
path = plots_dir / "reward.png"
fig.tight_layout()
fig.savefig(path, dpi=140)
plt.close(fig)
rendered.append(path)
else:
_log.warning("No 'reward' entries in log_history.")
# 3) Per-component reward overlay — exposes reward hacking patterns.
component_keys = sorted({
k for entry in history for k in entry
if k.startswith("rewards/") and k.endswith("/mean")
})
if component_keys:
fig, ax = plt.subplots(figsize=(8, 4.5))
for k in component_keys:
xs, ys = _series(k)
if xs:
label = k.removeprefix("rewards/").removesuffix("/mean")
ax.plot(xs, ys, linewidth=1.6, label=label)
ax.set_xlabel("training step")
ax.set_ylabel("component mean reward")
ax.set_title("PhysiX GRPO — per-component reward (rewards/*/mean)")
ax.legend(loc="best", fontsize=8)
ax.grid(alpha=0.3)
path = plots_dir / "reward_components.png"
fig.tight_layout()
fig.savefig(path, dpi=140)
plt.close(fig)
rendered.append(path)
if not rendered:
_log.warning("No PNGs rendered — log_history had no recognised metrics.")
return
_log.info("Rendered %d curve PNG(s) to %s", len(rendered), plots_dir)
# Log the PNGs as wandb.Images so they appear in the run's Media tab,
# and persist to the run summary as a reference table.
try:
import wandb
if wandb.run is not None:
wandb.log({
f"plots/{p.stem}": wandb.Image(str(p)) for p in rendered
})
_log.info("Logged %d plot(s) to wandb.Media", len(rendered))
except Exception as exc: # noqa: BLE001
_log.warning("Could not log plots to wandb: %s", exc)
# Push PNGs to the final Hub model repo under ``plots/`` so the model
# card can render them and ``sync-plots.sh`` can pull them locally.
if config.push_to_hub and config.hub_repo_id:
try:
from huggingface_hub import HfApi, create_repo
api = HfApi(token=os.environ.get("HUGGINGFACE_HUB_TOKEN"))
create_repo(
repo_id=config.hub_repo_id,
repo_type="model",
exist_ok=True,
token=os.environ.get("HUGGINGFACE_HUB_TOKEN"),
)
for p in rendered:
api.upload_file(
path_or_fileobj=str(p),
path_in_repo=f"plots/{p.name}",
repo_id=config.hub_repo_id,
repo_type="model",
commit_message=f"plots: {p.name}",
)
_log.info(
"Pushed %d plot(s) to https://huggingface.co/%s/tree/main/plots",
len(rendered),
config.hub_repo_id,
)
except Exception as exc: # noqa: BLE001
_log.warning("Could not push plots to Hub: %s", exc)
def _load_model_and_tokenizer(
config: TrainingConfig,
) -> tuple[FastLanguageModel, AutoTokenizer]:
"""Load Qwen via Unsloth in 4-bit and attach a LoRA adapter.
If ``config.sft_checkpoint`` is set, the SFT adapter weights are merged
on top of the base model before GRPO starts. This gives GRPO a warm base
policy that already knows the JSON format and equation grammar, so early
rollouts produce meaningful reward signal instead of all scoring zero.
"""
if config.lora_adapter_repo:
# Resume path: load the base model and attach the existing LoRA
# adapter via PEFT. We deliberately do NOT call
# ``FastLanguageModel.from_pretrained(model_name=adapter_repo)``
# because the adapter's ``adapter_config.json`` may carry a stale
# ``base_model_name_or_path`` pointing at a path that only existed
# inside the previous training container (e.g. ``/tmp/physix-sft/merged``).
# PEFT's ``load_adapter`` ignores that field — it adapts onto whatever
# base we hand it.
_log.info(
"Resuming from existing LoRA adapter %s on top of %s",
config.lora_adapter_repo,
config.model_name,
)
model, tokenizer = FastLanguageModel.from_pretrained(
model_name=config.model_name,
max_seq_length=config.max_seq_length,
load_in_4bit=True,
dtype=None,
)
# Wrap the base in a fresh trainable LoRA, then overwrite its
# weights with the saved adapter. We use the adapter's own r/alpha
# by relying on PEFT's ``load_adapter`` resolving from the repo's
# adapter_config.json. The dummy ``get_peft_model`` call is just to
# turn the model into a ``PeftModel`` instance whose ``load_adapter``
# method accepts a hub repo id.
model = FastLanguageModel.get_peft_model(
model,
r=config.lora_r,
lora_alpha=config.lora_alpha,
target_modules=[
"q_proj", "k_proj", "v_proj", "o_proj",
"gate_proj", "up_proj", "down_proj",
],
bias="none",
use_gradient_checkpointing="unsloth",
random_state=config.seed,
)
# Overwrite the freshly-initialised LoRA weights with the saved ones.
# ``adapter_name='default'`` matches what ``get_peft_model`` creates.
model.load_adapter(
config.lora_adapter_repo,
adapter_name="default",
is_trainable=True,
)
_log.info("Adapter loaded; LoRA is trainable and ready for GRPO.")
return model, tokenizer
if config.sft_checkpoint:
_log.info(
"Loading SFT-warmed model from %s (GRPO will refine from here)",
config.sft_checkpoint,
)
model, tokenizer = FastLanguageModel.from_pretrained(
model_name=config.sft_checkpoint,
max_seq_length=config.max_seq_length,
load_in_4bit=True,
dtype=None,
)
else:
_log.warning(
"No --sft-checkpoint supplied. Starting GRPO from cold base model. "
"Early reward signal will be near-zero; consider running sft.py first."
)
model, tokenizer = FastLanguageModel.from_pretrained(
model_name=config.model_name,
max_seq_length=config.max_seq_length,
load_in_4bit=True,
dtype=None,
)
model = FastLanguageModel.get_peft_model(
model,
r=config.lora_r,
lora_alpha=config.lora_alpha,
target_modules=[
"q_proj",
"k_proj",
"v_proj",
"o_proj",
"gate_proj",
"up_proj",
"down_proj",
],
bias="none",
use_gradient_checkpointing="unsloth",
random_state=config.seed,
)
return model, tokenizer
def _build_and_format_dataset(
config: TrainingConfig,
tokenizer: AutoTokenizer,
) -> Dataset:
spec = DatasetSpec(
system_ids=config.system_ids,
instances_per_system=config.instances_per_system,
seed=config.seed,
)
dataset = build_training_dataset(spec)
_log.info(
"Built training dataset: %d rows across %d systems (%s)",
len(dataset),
len(config.system_ids),
", ".join(config.system_ids),
)
def _apply_chat_template(example: dict[str, object]) -> dict[str, object]:
formatted = tokenizer.apply_chat_template(
example["prompt"],
tokenize=False,
add_generation_prompt=True,
)
return {"prompt": formatted}
return dataset.map(_apply_chat_template)
def _select_reward_funcs(ablation: Optional[Ablation]) -> list[object]:
"""Return the GRPO reward function set.
Default set (5 functions, summed by GRPOTrainer into the advantage):
- ``reward_match`` — raw R² (linear).
- ``reward_match_dense`` — sqrt(R²); dense low-value gradient.
- ``reward_correctness`` — binary cliff at R² ≥ 0.70.
- ``reward_simplicity`` — gated on R² ≥ 0.10 (anti-hack).
- ``reward_format`` — 1.0 only if parsed AND simulated.
Why this composition: empirically (RCA from W&B run 5kuqns9x) the
previous ``{match, progress, simplicity, format}`` mix had a
progress-equals-match duplicate (single-turn ``previous_r_match=0``)
AND let the model farm format+simplicity by emitting trivial
parseable equations. The new set both removes the duplicate and
triple-weights correctness via three different correctness-shaped
signals (match, match_dense, correctness_bonus) so that physical
accuracy dominates the GRPO advantage.
Ablations strip one signal at a time (used by the experiment matrix,
not by the main runs).
"""
scorer = Scorer()
funcs = make_reward_funcs(scorer)
full = [
funcs["match"],
funcs["match_dense"],
funcs["correctness"],
funcs["simplicity"],
funcs["format"],
]
if ablation is None:
return full
if ablation == "no_simplicity":
return [funcs["match"], funcs["match_dense"], funcs["correctness"], funcs["format"]]
if ablation == "no_format":
return [funcs["match"], funcs["match_dense"], funcs["correctness"], funcs["simplicity"]]
if ablation == "no_progress":
# Backward-compat alias: ``progress`` no longer exists, the new
# reward set already excludes it. Treat ``no_progress`` as the
# full default set so old job configs still work without surprise.
return full
raise ValueError(
f"Unknown ablation {ablation!r}. Choose from "
"no_progress | no_simplicity | no_format | None."
)
class _RewardConvergenceCallback(TrainerCallback):
"""Stop training early when the GRPO reward has converged.
Convergence criterion: ``reward_std`` (std of total reward across the
rollout batch) stays below ``min_std`` for ``patience`` consecutive
logged steps. When ``reward_std ≈ 0`` every generation scores the
same, so the GRPO advantage estimates are all zero and the policy
gradient vanishes — continuing burns compute without learning.
The callback also logs the early-stop event to W&B so the decision
is visible on the run page.
"""
def __init__(self, patience: int = 50, min_std: float = 0.05) -> None:
self._patience = patience
self._min_std = min_std
self._flat_steps: int = 0
def on_log(
self,
args: HFTrainingArguments,
state: TrainerState,
control: TrainerControl,
logs: dict | None = None,
**kwargs,
) -> None:
if not logs:
return
reward_std = logs.get("reward_std")
if reward_std is None:
return
if reward_std < self._min_std:
self._flat_steps += 1
else:
self._flat_steps = 0
if self._flat_steps >= self._patience:
step = state.global_step
msg = (
f"[early-stop] reward_std < {self._min_std} for "
f"{self._flat_steps} consecutive steps at step {step}. "
"Stopping training — policy has converged."
)
print(f"\n{msg}\n", flush=True)
_log.info(msg)
try:
import wandb
if wandb.run is not None:
wandb.run.summary["early_stop/step"] = step
wandb.run.summary["early_stop/reason"] = (
f"reward_std < {self._min_std} for {self._flat_steps} steps"
)
wandb.log({"early_stop/triggered": 1}, step=step)
except Exception as exc: # noqa: BLE001
_log.debug("Could not log early-stop event to W&B: %s", exc)
control.should_training_stop = True
class _WandbCheckpointCallback(TrainerCallback):
"""Make checkpoints first-class in W&B.
After every Trainer save, this callback:
1. Resolves the latest commit hash on the Hub repo (best-effort — the
trainer's own ``PushToHubCallback`` runs ``git push`` asynchronously
so we may briefly see an older commit; that is fine, it self-corrects
on the next save).
2. Updates the W&B run summary with persistent, prominent keys
(visible in the "Overview" tab of the run):
- ``checkpoint/last_step``
- ``checkpoint/last_commit``
- ``checkpoint/repo_url``
- ``checkpoint/last_url``
3. Logs a step-indexed scalar ``checkpoint/step`` so a chart appears
on the W&B run page (one tick per save).
4. Maintains a running ``checkpoint_history`` ``wandb.Table`` so every
saved checkpoint is browsable as a sortable table directly on the
run page (Tables tab).
5. Prints a banner to stdout (visible in ``hf jobs logs``) with the
direct URL — so the checkpoint is also impossible to miss in the
job logs.
No model bytes are uploaded to W&B; the actual weights live on the HF
Hub checkpoint repo. We never crash training if any of this fails.
"""
def __init__(self, hub_checkpoint_repo_id: str) -> None:
self._repo = hub_checkpoint_repo_id
self._repo_url = f"https://huggingface.co/{hub_checkpoint_repo_id}"
self._table = None # lazy: wandb may not be initialised at __init__
def on_train_begin(
self,
args: HFTrainingArguments,
state: TrainerState,
control: TrainerControl,
**kwargs,
) -> None:
# Pin the repo URL into the run config + summary at the very start
# so the link is visible on the W&B "Overview" panel from step 0.
try:
import wandb
if wandb.run is None:
return
wandb.run.summary["checkpoint/repo_url"] = self._repo_url
wandb.run.summary["checkpoint/repo"] = self._repo
wandb.config.update(
{"checkpoint_repo_url": self._repo_url, "checkpoint_repo": self._repo},
allow_val_change=True,
)
print(
f"\n[wandb] Checkpoint repo pinned in run summary: {self._repo_url}\n",
flush=True,
)
# Stash the W&B run id at the *root* of the checkpoint repo so a
# future re-launch can find it without W&B API calls. Atomic with
# checkpoint storage, ~36 bytes. We do this once at train begin
# instead of every save to avoid 200 redundant commits.
self._publish_wandb_run_id(wandb.run.id)
except Exception as exc: # noqa: BLE001
_log.warning("Could not pin checkpoint repo to W&B summary: %s", exc)
def _publish_wandb_run_id(self, run_id: str) -> None:
try:
import tempfile
from huggingface_hub import HfApi, create_repo
token = os.environ.get("HUGGINGFACE_HUB_TOKEN") or os.environ.get("HF_TOKEN")
api = HfApi(token=token)
create_repo(self._repo, exist_ok=True, repo_type="model", token=token)
with tempfile.NamedTemporaryFile("w", suffix=".txt", delete=False) as tmp:
tmp.write(run_id)
tmp_path = tmp.name
api.upload_file(
path_or_fileobj=tmp_path,
path_in_repo="wandb_run_id.txt",
repo_id=self._repo,
repo_type="model",
commit_message=f"Pin W&B run id {run_id}",
token=token,
)
print(f"[wandb] Published run_id={run_id} to {self._repo_url}/wandb_run_id.txt", flush=True)
except Exception as exc: # noqa: BLE001
_log.warning("Could not publish wandb run id (non-fatal): %s", exc)
def on_save(
self,
args: HFTrainingArguments,
state: TrainerState,
control: TrainerControl,
**kwargs,
) -> None:
try:
import wandb
if wandb.run is None:
return
step = state.global_step
commit_sha = self._latest_commit_sha()
short = (commit_sha or "pending")[:8]
tree_url = (
f"{self._repo_url}/tree/{commit_sha}"
if commit_sha
else f"{self._repo_url}/tree/main"
)
# 1. Persistent summary keys (top-of-run, always visible).
wandb.run.summary["checkpoint/last_step"] = step
wandb.run.summary["checkpoint/last_commit"] = commit_sha or "pending"
wandb.run.summary["checkpoint/last_url"] = tree_url
# 2. Step-indexed scalar so a small chart appears on the run page.
wandb.log({"checkpoint/step": step}, step=step)
# 3. Running history table.
if self._table is None:
self._table = wandb.Table(
columns=["step", "commit", "url", "repo"]
)
self._table.add_data(step, commit_sha or "pending", tree_url, self._repo)
# Re-log the entire table each time so the latest version shows.
wandb.log({"checkpoint_history": self._table}, step=step)
# 4. Pointer-only W&B Artifact (~200 bytes JSON). Doesn't upload
# weights — those are on the Hub already — but makes every
# checkpoint a first-class, addressable W&B artifact that can
# be looked up later by `wandb artifact get`. Side effect:
# populates the run's "Artifacts" panel with one entry per save.
if commit_sha:
from physix.training.checkpoints import (
CheckpointHandle,
log_link_artifact_to_wandb,
)
handle = CheckpointHandle(
repo_id=self._repo,
subfolder=f"checkpoint-{step}",
revision=commit_sha,
step=step,
)
log_link_artifact_to_wandb(
handle,
artifact_name="physix-grpo-checkpoint",
)
# 5. Stdout banner — also visible in `hf jobs logs`.
print(
"\n"
"================ CHECKPOINT SAVED ================\n"
f" step : {step}\n"
f" commit: {short}\n"
f" url : {tree_url}\n"
f" repo : {self._repo_url}\n"
"==================================================\n",
flush=True,
)
_log.info(
"W&B checkpoint metadata logged: step=%d commit=%s",
step,
short,
)
except Exception as exc: # noqa: BLE001
_log.warning(
"W&B checkpoint callback skipped at step %d: %s. "
"Training continues; the actual checkpoint is still pushed "
"to the HF Hub by the trainer's PushToHubCallback.",
state.global_step,
exc,
)
def _latest_commit_sha(self) -> Optional[str]:
"""Best-effort fetch of the most recent commit on the checkpoint repo.
Uses ``HfApi.list_repo_commits`` if available; returns ``None`` on
any failure. The async ``git push`` may not be done at the instant
``on_save`` fires, so we may see the *previous* checkpoint's commit;
that's acceptable — it self-corrects on the next save.
"""
try:
from huggingface_hub import HfApi
api = HfApi(token=os.environ.get("HUGGINGFACE_HUB_TOKEN"))
commits = api.list_repo_commits(repo_id=self._repo, repo_type="model")
if commits:
return commits[0].commit_id
except Exception as exc: # noqa: BLE001
_log.debug("Could not fetch latest commit sha: %s", exc)
return None
def _build_grpo_config(config: TrainingConfig) -> GRPOConfig:
# Note on the metrics this run will produce in W&B (per TRL docs):
# train/loss — the GRPO surrogate objective being minimized.
# = -E[advantage * logπ(action|state)] + β * KL.
# Should DECREASE as the policy exploits advantages.
# train/reward — mean total reward per rollout. Should INCREASE.
# train/kl — KL(policy || reference). Bounded by β; grows slowly.
# rewards/<f>/mean — per-component reward (one per reward function).
#
# ``train/loss`` going to ~0 *only* if ``train/reward`` rises in lockstep
# is fine — it just means advantages got fully exploited. Loss collapsing
# without reward growth is reward hacking, broken parsing, or a saturated
# KL anchor. We surface both via _log_reward_summary at end of training
# AND via _GenerateCurvesCallback which renders both curves to PNG.
effective_batch = (
config.per_device_train_batch_size * config.gradient_accumulation_steps
)
if effective_batch % config.num_generations != 0:
raise ValueError(
f"effective_batch_size ({effective_batch}) must be divisible by "
f"num_generations ({config.num_generations}). Adjust "
"per_device_train_batch_size, gradient_accumulation_steps, or "
"num_generations."
)
hub_kwargs: dict = {}
if config.hub_checkpoint_repo_id:
hub_kwargs = dict(
push_to_hub=True,
hub_model_id=config.hub_checkpoint_repo_id,
hub_strategy="checkpoint",
hub_token=os.environ.get("HUGGINGFACE_HUB_TOKEN") or os.environ.get("HF_TOKEN"),
)
return GRPOConfig(
output_dir=config.output_dir,
learning_rate=config.learning_rate,
per_device_train_batch_size=config.per_device_train_batch_size,
gradient_accumulation_steps=config.gradient_accumulation_steps,
num_train_epochs=1,
max_steps=config.num_steps,
num_generations=config.num_generations,
max_completion_length=config.max_completion_length,
max_prompt_length=config.max_seq_length - config.max_completion_length,
temperature=config.temperature,
beta=config.beta,
logging_steps=1,
save_strategy="steps",
save_steps=max(50, config.num_steps // 6),
report_to=["wandb"],
run_name=config.wandb_run_name,
seed=config.seed,
bf16=torch.cuda.is_bf16_supported() if torch.cuda.is_available() else False,
fp16=not torch.cuda.is_bf16_supported() if torch.cuda.is_available() else False,
**hub_kwargs,
)
def _save_artifacts(
model: FastLanguageModel,
tokenizer: AutoTokenizer,
config: TrainingConfig,
) -> None:
"""Persist the trained adapter via Unsloth's save path.
``save_pretrained_merged`` dispatches on ``save_method``:
- ``"lora"``: writes only the adapter weights (small; requires the base
model at load time).
- ``"merged_16bit"``: merges LoRA into base and writes a standard HF
checkpoint in bf16/fp16 (large; loadable without Unsloth, exportable to
GGUF for Ollama).
- ``"merged_4bit"``: same merge but quantised back to 4-bit.
Hub pushes use the same ``save_method`` so the on-disk artifact and the
Hub artifact are byte-identical.
"""
out_path = Path(config.output_dir)
out_path.mkdir(parents=True, exist_ok=True)
save_dir = out_path / config.save_method
model.save_pretrained_merged(
save_directory=str(save_dir),
tokenizer=tokenizer,
save_method=config.save_method,
)
if config.push_to_hub and config.hub_repo_id:
_log.info("Pushing %s artifact to Hugging Face Hub: %s", config.save_method, config.hub_repo_id)
model.push_to_hub_merged(
config.hub_repo_id,
tokenizer,
save_method=config.save_method,
token=os.environ.get("HUGGINGFACE_HUB_TOKEN"),
)
def _configure_logging() -> None:
logging.basicConfig(
level=os.environ.get("PHYSIX_LOG_LEVEL", "INFO"),
format="[%(asctime)s] %(levelname)s %(name)s | %(message)s",
)
def _parse_args() -> TrainingConfig:
parser = argparse.ArgumentParser(description="Train PhysiX-Live with GRPO.")
parser.add_argument("--model", default="Qwen/Qwen2.5-1.5B-Instruct")
parser.add_argument("--output-dir", default="runs/physix-1.5b-rl")
parser.add_argument("--num-steps", type=int, default=300)
parser.add_argument("--learning-rate", type=float, default=5.0e-6)
parser.add_argument("--num-generations", type=int, default=4)
parser.add_argument("--max-completion-length", type=int, default=256,
help="Max tokens per rollout completion. Shorter = faster generation.")
parser.add_argument("--lora-r", type=int, default=16)
parser.add_argument("--instances-per-system", type=int, default=32)
parser.add_argument(
"--system-ids",
default=None,
help=(
"Comma-separated list of system IDs to train on "
"(e.g. 'damped_spring' or 'free_fall,simple_pendulum'). "
"Defaults to all SUPPORTED_SYSTEMS when omitted."
),
)
parser.add_argument(
"--ablation",
choices=("no_progress", "no_simplicity", "no_format"),
default=None,
)
parser.add_argument(
"--save-method",
choices=("lora", "merged_16bit", "merged_4bit"),
default="merged_16bit",
help="How to persist the final adapter (merged_16bit is deployable).",
)
parser.add_argument("--sft-checkpoint", default=None,
help="Path to a merged SFT model from sft.py to warm-start from.")
parser.add_argument(
"--lora-adapter-repo",
default=None,
help=(
"Hub repo id (or local path) of an existing LoRA adapter to warm-start "
"GRPO from — e.g. a previous run's checkpoint at "
"user/physix-1.5b-rl-ckpt. Mutually exclusive with --sft-checkpoint."
),
)
parser.add_argument("--wandb-project", default="physix-live")
parser.add_argument("--wandb-run-name", default=None)
parser.add_argument("--push-to-hub", action="store_true")
parser.add_argument("--hub-repo-id", default=None)
parser.add_argument(
"--hub-checkpoint-repo-id",
default=None,
help="HF repo to push LoRA checkpoints to every save_steps (e.g. user/physix-ckpt).",
)
parser.add_argument(
"--resume-from-checkpoint",
default=None,
help="Path to a Trainer checkpoint directory to resume GRPO from.",
)
parser.add_argument(
"--early-stop-patience",
type=int,
default=50,
help=(
"Stop training early if reward_std stays below 0.05 for this many "
"consecutive steps (policy converged, GRPO advantage ≈ 0). "
"Set to 0 to disable."
),
)
parser.add_argument("--seed", type=int, default=0)
args = parser.parse_args()
if args.sft_checkpoint and args.lora_adapter_repo:
parser.error(
"--sft-checkpoint and --lora-adapter-repo are mutually exclusive. "
"Use --lora-adapter-repo to resume from a prior GRPO run, or "
"--sft-checkpoint for a fresh GRPO from a merged SFT model."
)
system_ids = (
tuple(s.strip() for s in args.system_ids.split(",") if s.strip())
if args.system_ids
else SUPPORTED_SYSTEMS
)
return TrainingConfig(
model_name=args.model,
sft_checkpoint=args.sft_checkpoint,
lora_adapter_repo=args.lora_adapter_repo,
output_dir=args.output_dir,
num_steps=args.num_steps,
learning_rate=args.learning_rate,
num_generations=args.num_generations,
max_completion_length=args.max_completion_length,
lora_r=args.lora_r,
instances_per_system=args.instances_per_system,
system_ids=system_ids,
ablation=args.ablation,
save_method=args.save_method,
wandb_project=args.wandb_project,
wandb_run_name=args.wandb_run_name,
push_to_hub=args.push_to_hub,
hub_repo_id=args.hub_repo_id,
hub_checkpoint_repo_id=args.hub_checkpoint_repo_id,
resume_from_checkpoint=args.resume_from_checkpoint,
early_stop_patience=args.early_stop_patience,
seed=args.seed,
)
def main() -> None:
config = _parse_args()
os.environ.setdefault("WANDB_PROJECT", config.wandb_project)
train(config)
if __name__ == "__main__":
main()