File size: 7,328 Bytes
b99b9ee 88a3c69 b99b9ee 0eddd67 b99b9ee 0eddd67 b99b9ee 0eddd67 b99b9ee | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 | """Single-completion scorer used by both training and evaluation.
Responsibility: given the agent's raw completion text plus the system context
(state variables, parameters, IC, observed trajectory), compute the same
4-component :class:`RewardBreakdown` the env produces during a normal
``step()`` call. This is the bridge between TRL's "reward function over a
batch of completions" interface and our env's verifier pipeline.
Caching: a :class:`Scorer` instance memoises by ``(dataset_index, completion)``
so per-component reward functions can each ask the scorer for the *same*
completion without re-running parse + simulate four times.
"""
from __future__ import annotations
import numpy as np
from pydantic import BaseModel, ConfigDict, Field
from physix.models import RewardBreakdown
from physix.training.prompt import parse_completion
from physix.verifier import (
ParseError,
SimulationError,
compute_match,
compute_reward,
parse_equation,
simulate_hypothesis,
)
def _drop_none(mapping: object) -> dict[str, float]:
"""Return ``{k: float(v)}`` for keys whose value is not ``None``.
HuggingFace ``Dataset`` columns are schema-unified across rows, so a
row that lacks a key gets ``None`` for it. We drop those at ingest
so per-row dicts only contain the keys that actually apply to the
row's system.
"""
if not isinstance(mapping, dict):
return {}
return {str(k): float(v) for k, v in mapping.items() if v is not None}
class SystemContext(BaseModel):
"""Per-prompt context the scorer needs to evaluate completions.
These fields correspond 1:1 with dataset columns at training time.
"""
model_config = ConfigDict(frozen=True, arbitrary_types_allowed=True)
system_id: str
state_variables: tuple[str, ...]
parameters: dict[str, float] = Field(default_factory=dict)
initial_conditions: dict[str, float] = Field(default_factory=dict)
timestamps: np.ndarray
observed: dict[str, np.ndarray] = Field(default_factory=dict)
previous_r_match: float = 0.0
@classmethod
def from_row(cls, row: dict[str, object]) -> "SystemContext":
"""Hydrate from a HuggingFace dataset row.
Two non-obvious transforms happen here:
1. **Lists -> arrays.** The dataset stores trajectories as plain
Python lists for JSON serialisability; we lift them back into
``np.ndarray`` so the verifier's NumPy code path works.
2. **Strip ``None`` fillers.** ``Dataset.from_list`` schema-unifies
rows across all systems: a ``free_fall`` row ends up with
``parameters={'g': 9.81, 'mass': 3.4, 'k': None, 'L': None, ...}``
because *other* systems define those keys. Left as-is, ``None``
values would (a) inflate the verifier's allowed-symbol set, so
the model could "validly" reference parameters that don't
exist for this system, and (b) crash the simulator on
substitution. We drop them at ingest, restoring per-system
parameter sets.
"""
state_variables = tuple(row.get("state_variables", ()))
observed: dict[str, np.ndarray] = {}
observed_raw = row.get("observed", {})
if isinstance(observed_raw, dict):
for key, values in observed_raw.items():
if key not in state_variables:
continue
observed[str(key)] = np.asarray(values, dtype=float)
return cls(
system_id=str(row.get("system_id", "")),
state_variables=state_variables,
parameters=_drop_none(row.get("parameters", {})),
initial_conditions=_drop_none(row.get("initial_conditions", {})),
timestamps=np.asarray(row.get("timestamps", []), dtype=float),
observed=observed,
previous_r_match=float(row.get("previous_r_match", 0.0)),
)
class Scorer:
"""Stateless completion scorer with optional per-batch memoisation."""
def __init__(self) -> None:
self._cache: dict[int, RewardBreakdown] = {}
def reset(self) -> None:
"""Clear the memoisation cache (call once per training step)."""
self._cache.clear()
def score(
self,
completion: str,
context: SystemContext,
*,
cache_key: int | None = None,
) -> RewardBreakdown:
"""Score one completion. Optionally memoise by ``cache_key``."""
if cache_key is not None and cache_key in self._cache:
return self._cache[cache_key]
breakdown = self._score_uncached(completion, context)
if cache_key is not None:
self._cache[cache_key] = breakdown
return breakdown
# --------------------------------------------------------------- internals
def _score_uncached(
self,
completion: str,
context: SystemContext,
) -> RewardBreakdown:
action = parse_completion(completion)
parameter_names = frozenset(action.params or {}) | frozenset(context.parameters)
try:
parsed = parse_equation(
action.equation,
state_variables=context.state_variables,
parameter_names=parameter_names,
)
except ParseError:
return compute_reward(
parse_succeeded=False,
r_match=0.0,
operator_count=0,
previous_r_match=context.previous_r_match,
)
# The agent's params take precedence over the system's; agent is
# allowed to use system parameter names like ``g`` if it provides
# values, but if it omits them we fall back to ground-truth values
# (which is fine — the agent's structural correctness is what we
# primarily score).
merged_parameters = {**context.parameters, **(action.params or {})}
try:
predicted = simulate_hypothesis(
parsed,
state_variables=context.state_variables,
parameters=merged_parameters,
initial_conditions=context.initial_conditions,
timestamps=context.timestamps,
)
except SimulationError:
# Equation parsed but the simulator could not produce a usable
# trajectory (NaN/inf, stiff blow-up, dimension mismatch, …).
# We mark ``simulation_succeeded=False`` so ``compute_reward``
# zeros out *every* component including ``format`` — otherwise
# the model gets paid for "looks valid but doesn't work".
return compute_reward(
parse_succeeded=True,
simulation_succeeded=False,
r_match=0.0,
operator_count=parsed.operator_count,
previous_r_match=context.previous_r_match,
)
r_match = compute_match(
observed=context.observed,
predicted=predicted,
state_variables=context.state_variables,
)
return compute_reward(
parse_succeeded=True,
simulation_succeeded=True,
r_match=r_match,
operator_count=parsed.operator_count,
previous_r_match=context.previous_r_match,
)
|