Pratyush-01's picture
Sync physix/ to merged tree (post train/ merge, pre 4ep/500step run)
88a3c69 verified
"""Single-completion scorer used by both training and evaluation.
Responsibility: given the agent's raw completion text plus the system context
(state variables, parameters, IC, observed trajectory), compute the same
4-component :class:`RewardBreakdown` the env produces during a normal
``step()`` call. This is the bridge between TRL's "reward function over a
batch of completions" interface and our env's verifier pipeline.
Caching: a :class:`Scorer` instance memoises by ``(dataset_index, completion)``
so per-component reward functions can each ask the scorer for the *same*
completion without re-running parse + simulate four times.
"""
from __future__ import annotations
import numpy as np
from pydantic import BaseModel, ConfigDict, Field
from physix.models import RewardBreakdown
from physix.training.prompt import parse_completion
from physix.verifier import (
ParseError,
SimulationError,
compute_match,
compute_reward,
parse_equation,
simulate_hypothesis,
)
def _drop_none(mapping: object) -> dict[str, float]:
"""Return ``{k: float(v)}`` for keys whose value is not ``None``.
HuggingFace ``Dataset`` columns are schema-unified across rows, so a
row that lacks a key gets ``None`` for it. We drop those at ingest
so per-row dicts only contain the keys that actually apply to the
row's system.
"""
if not isinstance(mapping, dict):
return {}
return {str(k): float(v) for k, v in mapping.items() if v is not None}
class SystemContext(BaseModel):
"""Per-prompt context the scorer needs to evaluate completions.
These fields correspond 1:1 with dataset columns at training time.
"""
model_config = ConfigDict(frozen=True, arbitrary_types_allowed=True)
system_id: str
state_variables: tuple[str, ...]
parameters: dict[str, float] = Field(default_factory=dict)
initial_conditions: dict[str, float] = Field(default_factory=dict)
timestamps: np.ndarray
observed: dict[str, np.ndarray] = Field(default_factory=dict)
previous_r_match: float = 0.0
@classmethod
def from_row(cls, row: dict[str, object]) -> "SystemContext":
"""Hydrate from a HuggingFace dataset row.
Two non-obvious transforms happen here:
1. **Lists -> arrays.** The dataset stores trajectories as plain
Python lists for JSON serialisability; we lift them back into
``np.ndarray`` so the verifier's NumPy code path works.
2. **Strip ``None`` fillers.** ``Dataset.from_list`` schema-unifies
rows across all systems: a ``free_fall`` row ends up with
``parameters={'g': 9.81, 'mass': 3.4, 'k': None, 'L': None, ...}``
because *other* systems define those keys. Left as-is, ``None``
values would (a) inflate the verifier's allowed-symbol set, so
the model could "validly" reference parameters that don't
exist for this system, and (b) crash the simulator on
substitution. We drop them at ingest, restoring per-system
parameter sets.
"""
state_variables = tuple(row.get("state_variables", ()))
observed: dict[str, np.ndarray] = {}
observed_raw = row.get("observed", {})
if isinstance(observed_raw, dict):
for key, values in observed_raw.items():
if key not in state_variables:
continue
observed[str(key)] = np.asarray(values, dtype=float)
return cls(
system_id=str(row.get("system_id", "")),
state_variables=state_variables,
parameters=_drop_none(row.get("parameters", {})),
initial_conditions=_drop_none(row.get("initial_conditions", {})),
timestamps=np.asarray(row.get("timestamps", []), dtype=float),
observed=observed,
previous_r_match=float(row.get("previous_r_match", 0.0)),
)
class Scorer:
"""Stateless completion scorer with optional per-batch memoisation."""
def __init__(self) -> None:
self._cache: dict[int, RewardBreakdown] = {}
def reset(self) -> None:
"""Clear the memoisation cache (call once per training step)."""
self._cache.clear()
def score(
self,
completion: str,
context: SystemContext,
*,
cache_key: int | None = None,
) -> RewardBreakdown:
"""Score one completion. Optionally memoise by ``cache_key``."""
if cache_key is not None and cache_key in self._cache:
return self._cache[cache_key]
breakdown = self._score_uncached(completion, context)
if cache_key is not None:
self._cache[cache_key] = breakdown
return breakdown
# --------------------------------------------------------------- internals
def _score_uncached(
self,
completion: str,
context: SystemContext,
) -> RewardBreakdown:
action = parse_completion(completion)
parameter_names = frozenset(action.params or {}) | frozenset(context.parameters)
try:
parsed = parse_equation(
action.equation,
state_variables=context.state_variables,
parameter_names=parameter_names,
)
except ParseError:
return compute_reward(
parse_succeeded=False,
r_match=0.0,
operator_count=0,
previous_r_match=context.previous_r_match,
)
# The agent's params take precedence over the system's; agent is
# allowed to use system parameter names like ``g`` if it provides
# values, but if it omits them we fall back to ground-truth values
# (which is fine — the agent's structural correctness is what we
# primarily score).
merged_parameters = {**context.parameters, **(action.params or {})}
try:
predicted = simulate_hypothesis(
parsed,
state_variables=context.state_variables,
parameters=merged_parameters,
initial_conditions=context.initial_conditions,
timestamps=context.timestamps,
)
except SimulationError:
# Equation parsed but the simulator could not produce a usable
# trajectory (NaN/inf, stiff blow-up, dimension mismatch, …).
# We mark ``simulation_succeeded=False`` so ``compute_reward``
# zeros out *every* component including ``format`` — otherwise
# the model gets paid for "looks valid but doesn't work".
return compute_reward(
parse_succeeded=True,
simulation_succeeded=False,
r_match=0.0,
operator_count=parsed.operator_count,
previous_r_match=context.previous_r_match,
)
r_match = compute_match(
observed=context.observed,
predicted=predicted,
state_variables=context.state_variables,
)
return compute_reward(
parse_succeeded=True,
simulation_succeeded=True,
r_match=r_match,
operator_count=parsed.operator_count,
previous_r_match=context.previous_r_match,
)