anti-hack rewards: gate simplicity on match>=.10, format on sim success, +match_dense +correctness_bonus, drop progress
Browse files- physix/server/environment.py +2 -0
- physix/training/loop.py +37 -6
- physix/training/reward_fns.py +76 -16
- physix/training/scorer.py +7 -0
- physix/verifier/reward.py +87 -30
physix/server/environment.py
CHANGED
|
@@ -193,6 +193,7 @@ class PhysiXEnvironment(Environment[PhysiXAction, PhysiXObservation, PhysiXState
|
|
| 193 |
_log.debug("simulate_hypothesis failed: %s", exc)
|
| 194 |
breakdown = compute_reward(
|
| 195 |
parse_succeeded=True,
|
|
|
|
| 196 |
r_match=0.0,
|
| 197 |
operator_count=parsed.operator_count,
|
| 198 |
previous_r_match=self._state.last_r_match,
|
|
@@ -220,6 +221,7 @@ class PhysiXEnvironment(Environment[PhysiXAction, PhysiXObservation, PhysiXState
|
|
| 220 |
|
| 221 |
breakdown = compute_reward(
|
| 222 |
parse_succeeded=True,
|
|
|
|
| 223 |
r_match=r_match,
|
| 224 |
operator_count=parsed.operator_count,
|
| 225 |
previous_r_match=self._state.last_r_match,
|
|
|
|
| 193 |
_log.debug("simulate_hypothesis failed: %s", exc)
|
| 194 |
breakdown = compute_reward(
|
| 195 |
parse_succeeded=True,
|
| 196 |
+
simulation_succeeded=False,
|
| 197 |
r_match=0.0,
|
| 198 |
operator_count=parsed.operator_count,
|
| 199 |
previous_r_match=self._state.last_r_match,
|
|
|
|
| 221 |
|
| 222 |
breakdown = compute_reward(
|
| 223 |
parse_succeeded=True,
|
| 224 |
+
simulation_succeeded=True,
|
| 225 |
r_match=r_match,
|
| 226 |
operator_count=parsed.operator_count,
|
| 227 |
previous_r_match=self._state.last_r_match,
|
physix/training/loop.py
CHANGED
|
@@ -377,17 +377,48 @@ def _build_and_format_dataset(
|
|
| 377 |
|
| 378 |
|
| 379 |
def _select_reward_funcs(ablation: Optional[Ablation]) -> list[object]:
|
| 380 |
-
"""Return the
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 381 |
scorer = Scorer()
|
| 382 |
funcs = make_reward_funcs(scorer)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 383 |
if ablation is None:
|
| 384 |
-
return
|
| 385 |
-
if ablation == "no_progress":
|
| 386 |
-
return [funcs["match"], funcs["simplicity"], funcs["format"]]
|
| 387 |
if ablation == "no_simplicity":
|
| 388 |
-
return [funcs["match"], funcs["
|
| 389 |
if ablation == "no_format":
|
| 390 |
-
return [funcs["match"], funcs["
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 391 |
raise ValueError(
|
| 392 |
f"Unknown ablation {ablation!r}. Choose from "
|
| 393 |
"no_progress | no_simplicity | no_format | None."
|
|
|
|
| 377 |
|
| 378 |
|
| 379 |
def _select_reward_funcs(ablation: Optional[Ablation]) -> list[object]:
|
| 380 |
+
"""Return the GRPO reward function set.
|
| 381 |
+
|
| 382 |
+
Default set (5 functions, summed by GRPOTrainer into the advantage):
|
| 383 |
+
|
| 384 |
+
- ``reward_match`` — raw R² (linear).
|
| 385 |
+
- ``reward_match_dense`` — sqrt(R²); dense low-value gradient.
|
| 386 |
+
- ``reward_correctness`` — binary cliff at R² ≥ 0.70.
|
| 387 |
+
- ``reward_simplicity`` — gated on R² ≥ 0.10 (anti-hack).
|
| 388 |
+
- ``reward_format`` — 1.0 only if parsed AND simulated.
|
| 389 |
+
|
| 390 |
+
Why this composition: empirically (RCA from W&B run 5kuqns9x) the
|
| 391 |
+
previous ``{match, progress, simplicity, format}`` mix had a
|
| 392 |
+
progress-equals-match duplicate (single-turn ``previous_r_match=0``)
|
| 393 |
+
AND let the model farm format+simplicity by emitting trivial
|
| 394 |
+
parseable equations. The new set both removes the duplicate and
|
| 395 |
+
triple-weights correctness via three different correctness-shaped
|
| 396 |
+
signals (match, match_dense, correctness_bonus) so that physical
|
| 397 |
+
accuracy dominates the GRPO advantage.
|
| 398 |
+
|
| 399 |
+
Ablations strip one signal at a time (used by the experiment matrix,
|
| 400 |
+
not by the main runs).
|
| 401 |
+
"""
|
| 402 |
scorer = Scorer()
|
| 403 |
funcs = make_reward_funcs(scorer)
|
| 404 |
+
full = [
|
| 405 |
+
funcs["match"],
|
| 406 |
+
funcs["match_dense"],
|
| 407 |
+
funcs["correctness"],
|
| 408 |
+
funcs["simplicity"],
|
| 409 |
+
funcs["format"],
|
| 410 |
+
]
|
| 411 |
if ablation is None:
|
| 412 |
+
return full
|
|
|
|
|
|
|
| 413 |
if ablation == "no_simplicity":
|
| 414 |
+
return [funcs["match"], funcs["match_dense"], funcs["correctness"], funcs["format"]]
|
| 415 |
if ablation == "no_format":
|
| 416 |
+
return [funcs["match"], funcs["match_dense"], funcs["correctness"], funcs["simplicity"]]
|
| 417 |
+
if ablation == "no_progress":
|
| 418 |
+
# Backward-compat alias: ``progress`` no longer exists, the new
|
| 419 |
+
# reward set already excludes it. Treat ``no_progress`` as the
|
| 420 |
+
# full default set so old job configs still work without surprise.
|
| 421 |
+
return full
|
| 422 |
raise ValueError(
|
| 423 |
f"Unknown ablation {ablation!r}. Choose from "
|
| 424 |
"no_progress | no_simplicity | no_format | None."
|
physix/training/reward_fns.py
CHANGED
|
@@ -1,9 +1,9 @@
|
|
| 1 |
"""TRL-compatible reward functions for GRPO training.
|
| 2 |
|
| 3 |
Responsibility: expose a stateless reward function for each independent
|
| 4 |
-
reward
|
| 5 |
-
|
| 6 |
-
|
| 7 |
|
| 8 |
The TRL signature for a reward function is::
|
| 9 |
|
|
@@ -12,6 +12,22 @@ The TRL signature for a reward function is::
|
|
| 12 |
where ``prompts`` and ``completions`` are batched lists. Extra columns from
|
| 13 |
the training dataset arrive as keyword arguments — we expect the columns
|
| 14 |
listed in :class:`SystemContext` to be present.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 15 |
"""
|
| 16 |
|
| 17 |
from __future__ import annotations
|
|
@@ -20,16 +36,15 @@ from collections.abc import Callable, Sequence
|
|
| 20 |
from typing import Any
|
| 21 |
|
| 22 |
from physix.training.scorer import Scorer, SystemContext
|
|
|
|
| 23 |
|
| 24 |
|
| 25 |
RewardFunction = Callable[..., list[float]]
|
| 26 |
|
| 27 |
|
| 28 |
-
#
|
| 29 |
-
#
|
| 30 |
-
|
| 31 |
-
# pre-computed total would double-count every component.
|
| 32 |
-
_COMPONENT_NAMES: tuple[str, ...] = ("match", "progress", "simplicity", "format")
|
| 33 |
|
| 34 |
|
| 35 |
def make_reward_funcs(
|
|
@@ -41,17 +56,25 @@ def make_reward_funcs(
|
|
| 41 |
logs them individually to W&B under
|
| 42 |
``train/rewards/reward_<component>/mean``.
|
| 43 |
|
| 44 |
-
The scorer is shared across all
|
| 45 |
-
|
| 46 |
-
|
| 47 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 48 |
|
| 49 |
-
|
| 50 |
-
|
| 51 |
"""
|
| 52 |
shared = scorer if scorer is not None else Scorer()
|
| 53 |
|
| 54 |
-
def
|
| 55 |
def _reward_fn(
|
| 56 |
prompts: Sequence[Any],
|
| 57 |
completions: Sequence[str],
|
|
@@ -73,7 +96,44 @@ def make_reward_funcs(
|
|
| 73 |
_reward_fn.__name__ = f"reward_{component}"
|
| 74 |
return _reward_fn
|
| 75 |
|
| 76 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 77 |
|
| 78 |
|
| 79 |
def _hydrate_contexts(batch_size: int, kwargs: dict[str, Any]) -> list[SystemContext]:
|
|
|
|
| 1 |
"""TRL-compatible reward functions for GRPO training.
|
| 2 |
|
| 3 |
Responsibility: expose a stateless reward function for each independent
|
| 4 |
+
reward signal. Internally each component delegates to a shared
|
| 5 |
+
:class:`Scorer` so a single completion is parsed and simulated exactly
|
| 6 |
+
once per training step regardless of how many reward functions query it.
|
| 7 |
|
| 8 |
The TRL signature for a reward function is::
|
| 9 |
|
|
|
|
| 12 |
where ``prompts`` and ``completions`` are batched lists. Extra columns from
|
| 13 |
the training dataset arrive as keyword arguments — we expect the columns
|
| 14 |
listed in :class:`SystemContext` to be present.
|
| 15 |
+
|
| 16 |
+
Reward set design (anti-hack, RCA from W&B run 5kuqns9x):
|
| 17 |
+
|
| 18 |
+
- ``reward_match`` — raw R² on the trajectory (linear).
|
| 19 |
+
- ``reward_match_dense`` — sqrt(R²); denser gradient at low values.
|
| 20 |
+
- ``reward_correctness`` — binary cliff at R² ≥ 0.70; pushes past plateau.
|
| 21 |
+
- ``reward_simplicity`` — gated on R² ≥ 0.10 (no free reward for trivial
|
| 22 |
+
equations).
|
| 23 |
+
- ``reward_format`` — 1.0 only if the equation parsed *and*
|
| 24 |
+
simulated. No partial credit for parseable
|
| 25 |
+
but uncomputable garbage.
|
| 26 |
+
|
| 27 |
+
The legacy ``reward_progress`` is intentionally absent. In single-turn
|
| 28 |
+
GRPO every dataset row carries ``previous_r_match=0``, which made
|
| 29 |
+
``progress = max(0, match - 0) = match`` for every rollout — a perfect
|
| 30 |
+
duplicate of ``reward_match`` that diluted advantage estimation.
|
| 31 |
"""
|
| 32 |
|
| 33 |
from __future__ import annotations
|
|
|
|
| 36 |
from typing import Any
|
| 37 |
|
| 38 |
from physix.training.scorer import Scorer, SystemContext
|
| 39 |
+
from physix.verifier.reward import correctness_bonus, match_dense
|
| 40 |
|
| 41 |
|
| 42 |
RewardFunction = Callable[..., list[float]]
|
| 43 |
|
| 44 |
|
| 45 |
+
#: Components that read directly from the :class:`RewardBreakdown` produced
|
| 46 |
+
#: by :class:`Scorer.score`. ``progress`` is omitted (see module docstring).
|
| 47 |
+
_BREAKDOWN_COMPONENTS: tuple[str, ...] = ("match", "simplicity", "format")
|
|
|
|
|
|
|
| 48 |
|
| 49 |
|
| 50 |
def make_reward_funcs(
|
|
|
|
| 56 |
logs them individually to W&B under
|
| 57 |
``train/rewards/reward_<component>/mean``.
|
| 58 |
|
| 59 |
+
The scorer is shared across all functions; calling ``scorer.reset()``
|
| 60 |
+
between steps avoids unbounded cache growth and ensures each
|
| 61 |
+
completion is parsed + simulated exactly once per step regardless of
|
| 62 |
+
how many reward functions query it.
|
| 63 |
+
|
| 64 |
+
Returns a dict whose keys are:
|
| 65 |
+
|
| 66 |
+
- ``match`` / ``simplicity`` / ``format`` — direct reads from the
|
| 67 |
+
:class:`RewardBreakdown`. ``simplicity`` is internally gated on
|
| 68 |
+
match ≥ 0.10 and ``format`` on simulation success.
|
| 69 |
+
- ``match_dense`` — ``sqrt(match)`` for denser low-value gradient.
|
| 70 |
+
- ``correctness`` — binary 1.0 above an R² threshold (``0.70``).
|
| 71 |
|
| 72 |
+
All functions share the scorer cache, so they cost one parse +
|
| 73 |
+
simulate per completion combined, not five.
|
| 74 |
"""
|
| 75 |
shared = scorer if scorer is not None else Scorer()
|
| 76 |
|
| 77 |
+
def _make_breakdown_reader(component: str) -> RewardFunction:
|
| 78 |
def _reward_fn(
|
| 79 |
prompts: Sequence[Any],
|
| 80 |
completions: Sequence[str],
|
|
|
|
| 96 |
_reward_fn.__name__ = f"reward_{component}"
|
| 97 |
return _reward_fn
|
| 98 |
|
| 99 |
+
def _reward_match_dense(
|
| 100 |
+
prompts: Sequence[Any],
|
| 101 |
+
completions: Sequence[str],
|
| 102 |
+
**kwargs: Any,
|
| 103 |
+
) -> list[float]:
|
| 104 |
+
del prompts
|
| 105 |
+
shared.reset()
|
| 106 |
+
contexts = _hydrate_contexts(len(completions), kwargs)
|
| 107 |
+
out: list[float] = []
|
| 108 |
+
for i, completion in enumerate(completions):
|
| 109 |
+
b = shared.score(completion=completion, context=contexts[i], cache_key=i)
|
| 110 |
+
out.append(match_dense(b.match))
|
| 111 |
+
return out
|
| 112 |
+
|
| 113 |
+
_reward_match_dense.__name__ = "reward_match_dense"
|
| 114 |
+
|
| 115 |
+
def _reward_correctness(
|
| 116 |
+
prompts: Sequence[Any],
|
| 117 |
+
completions: Sequence[str],
|
| 118 |
+
**kwargs: Any,
|
| 119 |
+
) -> list[float]:
|
| 120 |
+
del prompts
|
| 121 |
+
shared.reset()
|
| 122 |
+
contexts = _hydrate_contexts(len(completions), kwargs)
|
| 123 |
+
out: list[float] = []
|
| 124 |
+
for i, completion in enumerate(completions):
|
| 125 |
+
b = shared.score(completion=completion, context=contexts[i], cache_key=i)
|
| 126 |
+
out.append(correctness_bonus(b.match))
|
| 127 |
+
return out
|
| 128 |
+
|
| 129 |
+
_reward_correctness.__name__ = "reward_correctness"
|
| 130 |
+
|
| 131 |
+
funcs: dict[str, RewardFunction] = {
|
| 132 |
+
name: _make_breakdown_reader(name) for name in _BREAKDOWN_COMPONENTS
|
| 133 |
+
}
|
| 134 |
+
funcs["match_dense"] = _reward_match_dense
|
| 135 |
+
funcs["correctness"] = _reward_correctness
|
| 136 |
+
return funcs
|
| 137 |
|
| 138 |
|
| 139 |
def _hydrate_contexts(batch_size: int, kwargs: dict[str, Any]) -> list[SystemContext]:
|
physix/training/scorer.py
CHANGED
|
@@ -162,8 +162,14 @@ class Scorer:
|
|
| 162 |
timestamps=context.timestamps,
|
| 163 |
)
|
| 164 |
except SimulationError:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 165 |
return compute_reward(
|
| 166 |
parse_succeeded=True,
|
|
|
|
| 167 |
r_match=0.0,
|
| 168 |
operator_count=parsed.operator_count,
|
| 169 |
previous_r_match=context.previous_r_match,
|
|
@@ -176,6 +182,7 @@ class Scorer:
|
|
| 176 |
)
|
| 177 |
return compute_reward(
|
| 178 |
parse_succeeded=True,
|
|
|
|
| 179 |
r_match=r_match,
|
| 180 |
operator_count=parsed.operator_count,
|
| 181 |
previous_r_match=context.previous_r_match,
|
|
|
|
| 162 |
timestamps=context.timestamps,
|
| 163 |
)
|
| 164 |
except SimulationError:
|
| 165 |
+
# Equation parsed but the simulator could not produce a usable
|
| 166 |
+
# trajectory (NaN/inf, stiff blow-up, dimension mismatch, …).
|
| 167 |
+
# We mark ``simulation_succeeded=False`` so ``compute_reward``
|
| 168 |
+
# zeros out *every* component including ``format`` — otherwise
|
| 169 |
+
# the model gets paid for "looks valid but doesn't work".
|
| 170 |
return compute_reward(
|
| 171 |
parse_succeeded=True,
|
| 172 |
+
simulation_succeeded=False,
|
| 173 |
r_match=0.0,
|
| 174 |
operator_count=parsed.operator_count,
|
| 175 |
previous_r_match=context.previous_r_match,
|
|
|
|
| 182 |
)
|
| 183 |
return compute_reward(
|
| 184 |
parse_succeeded=True,
|
| 185 |
+
simulation_succeeded=True,
|
| 186 |
r_match=r_match,
|
| 187 |
operator_count=parsed.operator_count,
|
| 188 |
previous_r_match=context.previous_r_match,
|
physix/verifier/reward.py
CHANGED
|
@@ -1,20 +1,44 @@
|
|
| 1 |
-
"""Compose the
|
| 2 |
|
| 3 |
Responsibility:
|
| 4 |
|
| 5 |
- Take pre-computed ``r_match`` (from :mod:`physix.verifier.metrics`),
|
| 6 |
``operator_count`` (from :mod:`physix.verifier.parser`), the previous turn's
|
| 7 |
-
|
| 8 |
-
|
| 9 |
-
- Return a :class:`RewardBreakdown` with all
|
| 10 |
-
total.
|
| 11 |
-
|
| 12 |
-
|
| 13 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 14 |
"""
|
| 15 |
|
| 16 |
from __future__ import annotations
|
| 17 |
|
|
|
|
|
|
|
| 18 |
from physix.models import REWARD_WEIGHTS, RewardBreakdown
|
| 19 |
|
| 20 |
|
|
@@ -23,6 +47,18 @@ from physix.models import REWARD_WEIGHTS, RewardBreakdown
|
|
| 23 |
#: operator scores ~1.0.
|
| 24 |
SIMPLICITY_OPERATOR_CAP: int = 12
|
| 25 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 26 |
|
| 27 |
def compute_reward(
|
| 28 |
*,
|
|
@@ -30,25 +66,38 @@ def compute_reward(
|
|
| 30 |
r_match: float,
|
| 31 |
operator_count: int,
|
| 32 |
previous_r_match: float,
|
|
|
|
| 33 |
) -> RewardBreakdown:
|
| 34 |
-
"""Compose the
|
| 35 |
|
| 36 |
-
|
| 37 |
-
we still surface a non-trivial signal so the agent can learn that
|
| 38 |
-
parse-failures are bad without all rewards collapsing to NaN.
|
| 39 |
-
"""
|
| 40 |
-
r_format = 1.0 if parse_succeeded else 0.0
|
| 41 |
|
| 42 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 43 |
return RewardBreakdown(
|
| 44 |
match=0.0,
|
| 45 |
progress=0.0,
|
| 46 |
simplicity=0.0,
|
| 47 |
-
format=
|
| 48 |
-
total=
|
| 49 |
)
|
| 50 |
|
| 51 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 52 |
r_progress = _progress_score(r_match=r_match, previous_r_match=previous_r_match)
|
| 53 |
|
| 54 |
total = _weighted_total(
|
|
@@ -67,6 +116,25 @@ def compute_reward(
|
|
| 67 |
)
|
| 68 |
|
| 69 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 70 |
def _simplicity_score(operator_count: int) -> float:
|
| 71 |
"""Map raw operator count to a [0, 1] score where smaller is better."""
|
| 72 |
if operator_count <= 0:
|
|
@@ -77,18 +145,7 @@ def _simplicity_score(operator_count: int) -> float:
|
|
| 77 |
|
| 78 |
|
| 79 |
def _progress_score(*, r_match: float, previous_r_match: float) -> float:
|
| 80 |
-
"""Score for improving the physics fit over the previous turn.
|
| 81 |
-
|
| 82 |
-
Compares ``r_match`` (R² of the current hypothesis) against
|
| 83 |
-
``previous_r_match`` (R² of the last hypothesis that actually parsed).
|
| 84 |
-
|
| 85 |
-
Critically, we compare match-against-match, not match-against-total.
|
| 86 |
-
The old code used ``previous_total``, which includes simplicity and format
|
| 87 |
-
bonuses — a model that parsed cleanly on turn 1 (earning total=0.30 from
|
| 88 |
-
format+simplicity alone, match=0) would never see ``r_progress > 0`` on
|
| 89 |
-
turn 2 even if its match improved from 0 → 0.25, because 0.25 < 0.30.
|
| 90 |
-
That incorrectly killed the improvement signal.
|
| 91 |
-
"""
|
| 92 |
delta = r_match - previous_r_match
|
| 93 |
if delta <= 0.0:
|
| 94 |
return 0.0
|
|
|
|
| 1 |
+
"""Compose the multi-component reward (anti-hack design).
|
| 2 |
|
| 3 |
Responsibility:
|
| 4 |
|
| 5 |
- Take pre-computed ``r_match`` (from :mod:`physix.verifier.metrics`),
|
| 6 |
``operator_count`` (from :mod:`physix.verifier.parser`), the previous turn's
|
| 7 |
+
match (for env-driven progress; unused in single-turn GRPO), and flags
|
| 8 |
+
for whether parsing and simulation succeeded.
|
| 9 |
+
- Return a :class:`RewardBreakdown` with all components plus the legacy
|
| 10 |
+
weighted total.
|
| 11 |
+
|
| 12 |
+
Anti-hack invariants (RCA from W&B run 5kuqns9x):
|
| 13 |
+
|
| 14 |
+
1. **`format` requires simulation success, not just parse success.** A
|
| 15 |
+
syntactically valid equation that crashes the simulator is a failure,
|
| 16 |
+
not a half-success. Otherwise the model learns to emit nonsense that
|
| 17 |
+
is "almost runnable" and farm format reward.
|
| 18 |
+
2. **`simplicity` is gated on `r_match >= MATCH_GATE`.** An empty/trivial
|
| 19 |
+
equation (e.g. ``dx/dt = 0``) parses, simulates, and earns
|
| 20 |
+
``simplicity=1`` for being short — but produces a trajectory wildly
|
| 21 |
+
off the truth. We must not pay reward for "elegant nonsense". The
|
| 22 |
+
gate forces the model to be at least somewhat physically right
|
| 23 |
+
before it is allowed to bank simplicity.
|
| 24 |
+
3. **`correctness_bonus` is a binary cliff at high R².** Provides a
|
| 25 |
+
strong terminal signal once the model is genuinely close to the
|
| 26 |
+
true equation — pushes past the plateau of "decent but not right".
|
| 27 |
+
4. **`match_dense = sqrt(r_match)`** gives non-trivial gradient at low
|
| 28 |
+
R² values where raw ``r_match`` is near zero and gradient-starved.
|
| 29 |
+
|
| 30 |
+
The legacy 4-tuple (match/progress/simplicity/format) remains in the
|
| 31 |
+
breakdown for backward compatibility with the demo/UI/eval code; the
|
| 32 |
+
GRPO trainer subscribes only to the components it cares about via
|
| 33 |
+
``make_reward_funcs`` in ``physix.training.reward_fns``.
|
| 34 |
+
|
| 35 |
+
This module owns no NumPy/SciPy dependencies. It only knows about scalars.
|
| 36 |
"""
|
| 37 |
|
| 38 |
from __future__ import annotations
|
| 39 |
|
| 40 |
+
import math
|
| 41 |
+
|
| 42 |
from physix.models import REWARD_WEIGHTS, RewardBreakdown
|
| 43 |
|
| 44 |
|
|
|
|
| 47 |
#: operator scores ~1.0.
|
| 48 |
SIMPLICITY_OPERATOR_CAP: int = 12
|
| 49 |
|
| 50 |
+
#: Minimum ``r_match`` required to earn any simplicity reward. Below this
|
| 51 |
+
#: threshold the equation is judged as "wrong physics" regardless of how
|
| 52 |
+
#: short it is, so simplicity collapses to 0. Eliminates the "output
|
| 53 |
+
#: ``dx/dt = 0`` for free reward" exploit.
|
| 54 |
+
MATCH_GATE_FOR_SIMPLICITY: float = 0.10
|
| 55 |
+
|
| 56 |
+
#: ``r_match`` threshold above which the binary correctness_bonus fires.
|
| 57 |
+
#: Calibrated against typical R² distributions from the verifier — 0.70
|
| 58 |
+
#: requires the agent to capture the dominant dynamics, not just the
|
| 59 |
+
#: initial condition or a constant approximation.
|
| 60 |
+
CORRECTNESS_BONUS_THRESHOLD: float = 0.70
|
| 61 |
+
|
| 62 |
|
| 63 |
def compute_reward(
|
| 64 |
*,
|
|
|
|
| 66 |
r_match: float,
|
| 67 |
operator_count: int,
|
| 68 |
previous_r_match: float,
|
| 69 |
+
simulation_succeeded: bool = True,
|
| 70 |
) -> RewardBreakdown:
|
| 71 |
+
"""Compose the reward components for one turn.
|
| 72 |
|
| 73 |
+
Failure modes (return all-zero except where noted):
|
|
|
|
|
|
|
|
|
|
|
|
|
| 74 |
|
| 75 |
+
- Parse failure: everything 0. The agent emitted unparseable output.
|
| 76 |
+
- Simulation failure: everything 0. The equation parsed but produced
|
| 77 |
+
a non-runnable system (NaN/inf, integration blowup, etc.).
|
| 78 |
+
- Trivial-but-runnable output (``r_match < MATCH_GATE_FOR_SIMPLICITY``):
|
| 79 |
+
``format=1`` (we acknowledge it parsed and ran), but ``simplicity=0``.
|
| 80 |
+
"""
|
| 81 |
+
if not parse_succeeded or not simulation_succeeded:
|
| 82 |
+
# Hard zero. No partial credit for parseable-but-broken or
|
| 83 |
+
# parseable-but-uncomputable output. (Empirically, leaving any
|
| 84 |
+
# partial credit here produces immediate reward hacking.)
|
| 85 |
return RewardBreakdown(
|
| 86 |
match=0.0,
|
| 87 |
progress=0.0,
|
| 88 |
simplicity=0.0,
|
| 89 |
+
format=0.0,
|
| 90 |
+
total=0.0,
|
| 91 |
)
|
| 92 |
|
| 93 |
+
# Past this point: the equation parsed AND simulated. ``r_match`` is
|
| 94 |
+
# a legitimate signal of physical correctness.
|
| 95 |
+
r_format = 1.0
|
| 96 |
+
r_simplicity = (
|
| 97 |
+
_simplicity_score(operator_count)
|
| 98 |
+
if r_match >= MATCH_GATE_FOR_SIMPLICITY
|
| 99 |
+
else 0.0
|
| 100 |
+
)
|
| 101 |
r_progress = _progress_score(r_match=r_match, previous_r_match=previous_r_match)
|
| 102 |
|
| 103 |
total = _weighted_total(
|
|
|
|
| 116 |
)
|
| 117 |
|
| 118 |
|
| 119 |
+
def correctness_bonus(r_match: float) -> float:
|
| 120 |
+
"""Binary cliff: 1.0 iff ``r_match >= CORRECTNESS_BONUS_THRESHOLD``."""
|
| 121 |
+
return 1.0 if r_match >= CORRECTNESS_BONUS_THRESHOLD else 0.0
|
| 122 |
+
|
| 123 |
+
|
| 124 |
+
def match_dense(r_match: float) -> float:
|
| 125 |
+
"""Square-root-shaped match reward.
|
| 126 |
+
|
| 127 |
+
``sqrt(R²)`` magnifies small but non-zero matches — the model gets a
|
| 128 |
+
meaningful gradient even when raw ``r_match`` is 0.05 or 0.10
|
| 129 |
+
(near-zero linear value but ``sqrt(0.10) ≈ 0.32``). Saturates at 1.0.
|
| 130 |
+
"""
|
| 131 |
+
if r_match <= 0.0:
|
| 132 |
+
return 0.0
|
| 133 |
+
if r_match >= 1.0:
|
| 134 |
+
return 1.0
|
| 135 |
+
return float(math.sqrt(r_match))
|
| 136 |
+
|
| 137 |
+
|
| 138 |
def _simplicity_score(operator_count: int) -> float:
|
| 139 |
"""Map raw operator count to a [0, 1] score where smaller is better."""
|
| 140 |
if operator_count <= 0:
|
|
|
|
| 145 |
|
| 146 |
|
| 147 |
def _progress_score(*, r_match: float, previous_r_match: float) -> float:
|
| 148 |
+
"""Score for improving the physics fit over the previous turn (env only)."""
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 149 |
delta = r_match - previous_r_match
|
| 150 |
if delta <= 0.0:
|
| 151 |
return 0.0
|