File size: 13,479 Bytes
b99b9ee 906b21d b99b9ee 430e433 b99b9ee 88a3c69 b99b9ee 906b21d b99b9ee 430e433 b99b9ee 906b21d b99b9ee 430e433 b99b9ee 906b21d b99b9ee 906b21d b99b9ee 430e433 b99b9ee | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 | """SFT warm-start before GRPO training.
Trains Qwen2.5-1.5B-Instruct for 2 epochs on supervised (prompt, completion)
pairs where the completion is the ground-truth equation in the action JSON
format the env expects. This is the essential bootstrap step: without it a
cold 1.5B model outputs LaTeX / incoherent text on ~80% of turns, yielding
near-zero GRPO advantages and a flat loss curve that wastes GPU credits.
After SFT the model:
- Emits valid JSON with ``equation``, ``params``, ``rationale`` on >90% turns.
- Writes equations in the ASCII grammar (``d2y/dt2 = ...``), not LaTeX.
- Knows the per-system equation family (gravity, drag, pendulum, spring).
Then GRPO refines physics accuracy via the verifiable R² reward.
Run::
python -m physix.training.sft \
--model Qwen/Qwen2.5-1.5B-Instruct \
--output-dir runs/physix-1.5b-sft \
--epochs 2 \
--instances-per-system 32
Typical runtime: 5-8 min on an A10G, 3-4 min on an A100.
"""
from __future__ import annotations
import argparse
import json
import logging
import os
from pathlib import Path
import numpy as np
from datasets import Dataset
from physix.systems import (
SUPPORTED_SYSTEMS,
SYSTEM_REGISTRY,
get_system,
)
from physix.systems.base import PhysicalSystem, TrajectoryData
from physix.training.prompt import build_prompt
from physix.models import DEFAULT_MAX_TURNS, PhysiXObservation
_log = logging.getLogger(__name__)
# ─── Dataset ──────────────────────────────────────────────────────────────────
def _gt_completion(system: PhysicalSystem) -> str:
"""Build the ground-truth completion JSON for one system.
We include the system's sampled parameters so the model learns that the
``params`` field must contain the symbols it references in the equation.
The SFT target is the *exact* JSON string the env's verifier accepts;
GRPO will later teach the model to refine parameter values per trajectory.
"""
import re as _re
eq = system.ground_truth_equation()
# Extract all identifier tokens that appear in the equation, then keep
# only those that are declared as system parameters. We use a proper
# identifier regex (not split-on-whitespace) so symbols inside function
# calls like sin(theta) and fractions like -(g/L) are caught.
reserved = set(system.state_variables) | {"dt", "d", "t", "sin", "cos",
"tan", "exp", "log", "sqrt", "abs"}
eq_tokens = set(_re.findall(r'\b([A-Za-z_][A-Za-z0-9_]*)\b', eq))
relevant_keys = eq_tokens & set(system.parameters) - reserved
relevant = {k: round(system.parameters[k], 4) for k in sorted(relevant_keys)}
return json.dumps({
"equation": eq,
"params": relevant,
"rationale": (
f"Ground-truth equation for {system.system_id.replace('_', ' ')}."
),
})
def build_sft_dataset(
system_ids: tuple[str, ...] = SUPPORTED_SYSTEMS,
instances_per_system: int = 32,
seed: int = 0,
) -> Dataset:
if not system_ids:
raise ValueError("system_ids must be non-empty.")
unknown = [sid for sid in system_ids if sid not in SYSTEM_REGISTRY]
if unknown:
raise ValueError(
f"Unknown system_ids in build_sft_dataset: {unknown!r}. "
f"Registered: {sorted(SYSTEM_REGISTRY)!r}."
)
rng = np.random.default_rng(seed)
rows: list[dict] = []
for system_id in system_ids:
system = get_system(system_id)
for _ in range(instances_per_system):
trajectory = system.simulate(rng)
obs = _build_obs(system, trajectory)
prompt_messages = build_prompt(obs)
completion = _gt_completion(system)
rows.append({"prompt": prompt_messages, "completion": completion})
_log.info(
"Built SFT dataset: %d rows across %d systems (%s)",
len(rows),
len(system_ids),
", ".join(system_ids),
)
return Dataset.from_list(rows)
def _build_obs(system: PhysicalSystem, trajectory: TrajectoryData) -> PhysiXObservation:
return PhysiXObservation(
done=False,
reward=None,
trajectory=trajectory.to_observation_samples(),
state_variables=list(system.state_variables),
hint=system.hint(system.parameters),
history=[],
mismatch_summary="",
turn=0,
turn_remaining=DEFAULT_MAX_TURNS,
system_id=system.system_id,
stats=trajectory.stats(),
reward_breakdown={},
)
# ─── Training ─────────────────────────────────────────────────────────────────
def train_sft(
model_name: str = "Qwen/Qwen2.5-1.5B-Instruct",
output_dir: str = "runs/physix-1.5b-sft",
epochs: int = 2,
max_seq_length: int = 2048,
lora_r: int = 16,
lora_alpha: int = 32,
per_device_batch_size: int = 2,
gradient_accumulation_steps: int = 4,
learning_rate: float = 2e-5,
instances_per_system: int = 32,
system_ids: tuple[str, ...] = SUPPORTED_SYSTEMS,
seed: int = 0,
wandb_run_name: str | None = None,
hub_checkpoint_repo_id: str | None = None,
hub_token: str | None = None,
) -> None:
_configure_logging()
# Heavy imports: only available in [train] env.
import wandb
from unsloth import FastLanguageModel
from trl import SFTTrainer, SFTConfig
# Force a fresh W&B run for SFT regardless of any inherited WANDB_RUN_ID
# / WANDB_RESUME env vars (those are intended for the GRPO stage). If we
# let wandb.init() try to resume a foreign run id it will block for ~90s
# fetching that run's history before giving up.
for stale in ("WANDB_RUN_ID", "WANDB_RESUME"):
os.environ.pop(stale, None)
wandb.init(
project=os.environ.get("WANDB_PROJECT", "physix-live"),
name=wandb_run_name or f"physix-sft-{epochs}ep",
config={
"stage": "sft",
"model_name": model_name,
"epochs": epochs,
"lora_r": lora_r,
"lora_alpha": lora_alpha,
"learning_rate": learning_rate,
"per_device_batch_size": per_device_batch_size,
"gradient_accumulation_steps": gradient_accumulation_steps,
"instances_per_system": instances_per_system,
"seed": seed,
},
tags=["sft", "physix", model_name.split("/")[-1]],
)
_log.info("Loading model %s (4-bit, LoRA-%d)", model_name, lora_r)
model, tokenizer = FastLanguageModel.from_pretrained(
model_name=model_name,
max_seq_length=max_seq_length,
load_in_4bit=True,
dtype=None,
)
model = FastLanguageModel.get_peft_model(
model,
r=lora_r,
lora_alpha=lora_alpha,
target_modules=["q_proj", "k_proj", "v_proj", "o_proj",
"gate_proj", "up_proj", "down_proj"],
bias="none",
use_gradient_checkpointing="unsloth",
random_state=seed,
)
dataset = build_sft_dataset(system_ids=system_ids, instances_per_system=instances_per_system, seed=seed)
def _format_row(row: dict) -> dict:
"""Combine prompt + completion into a single training string."""
messages = row["prompt"] + [{"role": "assistant", "content": row["completion"]}]
text = tokenizer.apply_chat_template(
messages, tokenize=False, add_generation_prompt=False
)
return {"text": text}
formatted = dataset.map(_format_row, remove_columns=["prompt", "completion"])
_log.info("SFT dataset ready: %d rows", len(formatted))
import torch
sft_config = SFTConfig(
output_dir=output_dir,
num_train_epochs=epochs,
per_device_train_batch_size=per_device_batch_size,
gradient_accumulation_steps=gradient_accumulation_steps,
learning_rate=learning_rate,
max_seq_length=max_seq_length,
dataset_text_field="text",
packing=True,
logging_steps=1,
save_strategy="epoch",
report_to=["wandb"],
seed=seed,
bf16=torch.cuda.is_bf16_supported() if torch.cuda.is_available() else False,
fp16=not torch.cuda.is_bf16_supported() if torch.cuda.is_available() else False,
)
trainer = SFTTrainer(
model=model,
tokenizer=tokenizer,
args=sft_config,
train_dataset=formatted,
)
_log.info("Starting SFT for %d epochs on %d examples", epochs, len(formatted))
trainer.train()
# We save as merged_16bit (full model + config + tokenizer) rather than
# "lora" (adapter weights only). GRPO's downstream
# ``FastLanguageModel.from_pretrained(sft_checkpoint)`` needs a complete
# model directory — config.json + tokenizer + weights — to load. A bare
# adapter shard makes Unsloth raise "No config file found". The merged
# checkpoint is ~3 GB (1.5B params × 2 bytes) which is fine on /tmp.
out_path = Path(output_dir) / "merged"
out_path.mkdir(parents=True, exist_ok=True)
model.save_pretrained_merged(
save_directory=str(out_path),
tokenizer=tokenizer,
save_method="merged_16bit",
)
_log.info("SFT model (merged 16-bit) saved → %s", out_path)
if hub_checkpoint_repo_id:
# Push the merged SFT model to the same checkpoint repo GRPO uses,
# under a fixed `sft/` subfolder. Re-runs overwrite the subfolder
# but produce a new commit, so the revision SHA still uniquely
# identifies *this* SFT result.
from physix.training.checkpoints import (
SFT_SUBFOLDER,
log_link_artifact_to_wandb,
push_checkpoint_to_hub,
)
try:
handle = push_checkpoint_to_hub(
local_dir=out_path,
repo_id=hub_checkpoint_repo_id,
subfolder=SFT_SUBFOLDER,
commit_message=(
f"SFT merged_16bit: {model_name} | "
f"epochs={epochs} lora_r={lora_r}"
),
token=hub_token,
)
_log.info("SFT checkpoint pushed to Hub: %s", handle.hub_url)
wandb.run.summary["sft/hub_repo"] = handle.repo_id
wandb.run.summary["sft/hub_url"] = handle.hub_url
wandb.run.summary["sft/hub_revision"] = handle.revision
log_link_artifact_to_wandb(
handle,
artifact_name="physix-sft-checkpoint",
extra={"model_name": model_name, "epochs": epochs, "lora_r": lora_r},
)
except Exception as exc: # noqa: BLE001
# Don't kill SFT just because the hub push failed; the GRPO step
# downstream can fall back to the local /tmp checkpoint.
_log.error("SFT hub push failed (non-fatal): %s", exc)
wandb.finish()
# ─── CLI ──────────────────────────────────────────────────────────────────────
def _configure_logging() -> None:
logging.basicConfig(
level=os.environ.get("PHYSIX_LOG_LEVEL", "INFO"),
format="[%(asctime)s] %(levelname)s %(name)s | %(message)s",
)
def main() -> None:
parser = argparse.ArgumentParser(description="SFT warm-start for PhysiX RLVR.")
parser.add_argument("--model", default="Qwen/Qwen2.5-1.5B-Instruct")
parser.add_argument("--output-dir", default="runs/physix-1.5b-sft")
parser.add_argument("--epochs", type=int, default=2)
parser.add_argument("--instances-per-system", type=int, default=32)
parser.add_argument(
"--system-ids",
default=None,
help=(
"Comma-separated list of system IDs to include in the SFT dataset "
"(e.g. 'damped_spring'). Defaults to all SUPPORTED_SYSTEMS."
),
)
parser.add_argument("--lora-r", type=int, default=32)
parser.add_argument("--learning-rate", type=float, default=2e-5)
parser.add_argument("--seed", type=int, default=0)
parser.add_argument("--wandb-run-name", default=None,
help="Override W&B run name. Defaults to physix-sft-{epochs}ep.")
parser.add_argument(
"--hub-checkpoint-repo-id",
default=None,
help=(
"If set, push the merged SFT model to <repo>/sft on the Hub "
"and log a pointer-only artifact to W&B."
),
)
args = parser.parse_args()
system_ids = (
tuple(s.strip() for s in args.system_ids.split(",") if s.strip())
if args.system_ids
else SUPPORTED_SYSTEMS
)
os.environ.setdefault("WANDB_PROJECT", "physix-live")
train_sft(
model_name=args.model,
output_dir=args.output_dir,
epochs=args.epochs,
lora_r=args.lora_r,
learning_rate=args.learning_rate,
instances_per_system=args.instances_per_system,
system_ids=system_ids,
seed=args.seed,
wandb_run_name=args.wandb_run_name,
hub_checkpoint_repo_id=args.hub_checkpoint_repo_id,
hub_token=os.environ.get("HF_TOKEN"),
)
if __name__ == "__main__":
main()
|