File size: 5,283 Bytes
b99b9ee 88a3c69 b99b9ee | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 | """Build the prompt dataset for GRPO training.
Responsibility: enumerate the curriculum of physical systems, simulate each
one a configurable number of times, and emit a :class:`datasets.Dataset`
whose rows contain everything the training loop needs:
- ``prompt``: the chat-format string passed to the model
- ``system_id``, ``state_variables``, ``parameters``, ``initial_conditions``,
``timestamps``, ``observed``: the system context the scorer needs
- ``previous_r_match``: 0.0 at turn-0 (we train on first-turn prompts; the
iterative refinement skill emerges from the model's general ability to
read history at inference time)
Multi-turn prompts can be added later by extending this builder; the
hackathon scope deliberately keeps it to turn-0 prompts.
"""
from __future__ import annotations
from collections.abc import Iterable
import numpy as np
from datasets import Dataset
from pydantic import BaseModel, ConfigDict
from physix.models import DEFAULT_MAX_TURNS, PhysiXObservation
from physix.systems import (
SYSTEM_REGISTRY,
SUPPORTED_SYSTEMS,
SystemTier,
get_system,
list_systems_by_tier,
)
from physix.systems.base import PhysicalSystem, TrajectoryData
from physix.training.prompt import build_prompt
class DatasetSpec(BaseModel):
"""Configuration for :func:`build_training_dataset`."""
model_config = ConfigDict(frozen=True)
system_ids: tuple[str, ...] = SUPPORTED_SYSTEMS
instances_per_system: int = 32
seed: int = 0
class EvalDatasetSpec(BaseModel):
"""Held-out evaluation set, drawn separately so seeds do not overlap."""
model_config = ConfigDict(frozen=True)
train_tiers: tuple[SystemTier, ...] = (SystemTier.TIER_1, SystemTier.TIER_2)
held_out_tiers: tuple[SystemTier, ...] = (SystemTier.TIER_3,)
instances_per_system: int = 8
seed: int = 1_000_000 # large to avoid overlap with training seeds
def build_training_dataset(spec: DatasetSpec | None = None) -> Dataset:
"""Build the GRPO training dataset.
Each row contains one (system, instance) prompt at turn 0.
"""
spec = spec or DatasetSpec()
_validate_system_ids(spec.system_ids)
rng = np.random.default_rng(spec.seed)
rows: list[dict[str, object]] = []
for system_id in spec.system_ids:
for _ in range(spec.instances_per_system):
rows.append(_build_row(system_id, rng))
return Dataset.from_list(rows)
def _validate_system_ids(system_ids: tuple[str, ...]) -> None:
"""Fail fast if the spec references an unregistered system."""
if not system_ids:
raise ValueError(
"DatasetSpec.system_ids must be non-empty. "
f"Available: {sorted(SYSTEM_REGISTRY)!r}."
)
unknown = [sid for sid in system_ids if sid not in SYSTEM_REGISTRY]
if unknown:
raise ValueError(
f"Unknown system_ids in DatasetSpec: {unknown!r}. "
f"Registered: {sorted(SYSTEM_REGISTRY)!r}."
)
def build_eval_dataset(spec: EvalDatasetSpec | None = None) -> Dataset:
"""Build a held-out evaluation dataset spanning held-out tiers too."""
spec = spec or EvalDatasetSpec()
rng = np.random.default_rng(spec.seed)
rows: list[dict[str, object]] = []
for system_id in _list_systems(spec.train_tiers + spec.held_out_tiers):
for _ in range(spec.instances_per_system):
row = _build_row(system_id, rng)
row["is_held_out"] = system_id in _list_systems(spec.held_out_tiers)
rows.append(row)
return Dataset.from_list(rows)
def _list_systems(tiers: Iterable[SystemTier]) -> list[str]:
out: list[str] = []
for tier in tiers:
out.extend(list_systems_by_tier(tier))
return out
def _build_row(system_id: str, rng: np.random.Generator) -> dict[str, object]:
"""Generate one (prompt + system context) row for a given system."""
system = get_system(system_id)
trajectory = system.simulate(rng)
obs = _build_observation(system, trajectory)
prompt = build_prompt(obs)
return {
"prompt": prompt, # chat list of {"role", "content"} dicts
"system_id": system_id,
"state_variables": list(system.state_variables),
"parameters": dict(system.parameters),
"initial_conditions": dict(system.initial_conditions),
"timestamps": trajectory.timestamps.tolist(),
"observed": {var: trajectory.states[var].tolist() for var in system.state_variables},
"previous_r_match": 0.0,
}
def _build_observation(
system: PhysicalSystem,
trajectory: TrajectoryData,
) -> PhysiXObservation:
"""Construct a turn-0 :class:`PhysiXObservation` for a fresh system.
We bypass :class:`PhysiXEnvironment` here because its lifecycle (history,
convergence flag, episode budget) is irrelevant for dataset construction.
"""
return PhysiXObservation(
done=False,
reward=None,
trajectory=trajectory.to_observation_samples(),
state_variables=list(system.state_variables),
hint=system.hint(system.parameters),
history=[],
mismatch_summary="",
turn=0,
turn_remaining=DEFAULT_MAX_TURNS,
system_id=system.system_id,
stats=trajectory.stats(),
reward_breakdown={},
)
|