Pratyush-01's picture
anti-hack rewards: gate simplicity on match>=.10, format on sim success, +match_dense +correctness_bonus, drop progress
0eddd67 verified
"""Compose the multi-component reward (anti-hack design).
Responsibility:
- Take pre-computed ``r_match`` (from :mod:`physix.verifier.metrics`),
``operator_count`` (from :mod:`physix.verifier.parser`), the previous turn's
match (for env-driven progress; unused in single-turn GRPO), and flags
for whether parsing and simulation succeeded.
- Return a :class:`RewardBreakdown` with all components plus the legacy
weighted total.
Anti-hack invariants (RCA from W&B run 5kuqns9x):
1. **`format` requires simulation success, not just parse success.** A
syntactically valid equation that crashes the simulator is a failure,
not a half-success. Otherwise the model learns to emit nonsense that
is "almost runnable" and farm format reward.
2. **`simplicity` is gated on `r_match >= MATCH_GATE`.** An empty/trivial
equation (e.g. ``dx/dt = 0``) parses, simulates, and earns
``simplicity=1`` for being short — but produces a trajectory wildly
off the truth. We must not pay reward for "elegant nonsense". The
gate forces the model to be at least somewhat physically right
before it is allowed to bank simplicity.
3. **`correctness_bonus` is a binary cliff at high R².** Provides a
strong terminal signal once the model is genuinely close to the
true equation — pushes past the plateau of "decent but not right".
4. **`match_dense = sqrt(r_match)`** gives non-trivial gradient at low
R² values where raw ``r_match`` is near zero and gradient-starved.
The legacy 4-tuple (match/progress/simplicity/format) remains in the
breakdown for backward compatibility with the demo/UI/eval code; the
GRPO trainer subscribes only to the components it cares about via
``make_reward_funcs`` in ``physix.training.reward_fns``.
This module owns no NumPy/SciPy dependencies. It only knows about scalars.
"""
from __future__ import annotations
import math
from physix.models import REWARD_WEIGHTS, RewardBreakdown
#: Operator-count cap used to normalise ``r_simplicity``. An equation with
#: this many operators or more scores 0.0 on simplicity; an equation with one
#: operator scores ~1.0.
SIMPLICITY_OPERATOR_CAP: int = 12
#: Minimum ``r_match`` required to earn any simplicity reward. Below this
#: threshold the equation is judged as "wrong physics" regardless of how
#: short it is, so simplicity collapses to 0. Eliminates the "output
#: ``dx/dt = 0`` for free reward" exploit.
MATCH_GATE_FOR_SIMPLICITY: float = 0.10
#: ``r_match`` threshold above which the binary correctness_bonus fires.
#: Calibrated against typical R² distributions from the verifier — 0.70
#: requires the agent to capture the dominant dynamics, not just the
#: initial condition or a constant approximation.
CORRECTNESS_BONUS_THRESHOLD: float = 0.70
def compute_reward(
*,
parse_succeeded: bool,
r_match: float,
operator_count: int,
previous_r_match: float,
simulation_succeeded: bool = True,
) -> RewardBreakdown:
"""Compose the reward components for one turn.
Failure modes (return all-zero except where noted):
- Parse failure: everything 0. The agent emitted unparseable output.
- Simulation failure: everything 0. The equation parsed but produced
a non-runnable system (NaN/inf, integration blowup, etc.).
- Trivial-but-runnable output (``r_match < MATCH_GATE_FOR_SIMPLICITY``):
``format=1`` (we acknowledge it parsed and ran), but ``simplicity=0``.
"""
if not parse_succeeded or not simulation_succeeded:
# Hard zero. No partial credit for parseable-but-broken or
# parseable-but-uncomputable output. (Empirically, leaving any
# partial credit here produces immediate reward hacking.)
return RewardBreakdown(
match=0.0,
progress=0.0,
simplicity=0.0,
format=0.0,
total=0.0,
)
# Past this point: the equation parsed AND simulated. ``r_match`` is
# a legitimate signal of physical correctness.
r_format = 1.0
r_simplicity = (
_simplicity_score(operator_count)
if r_match >= MATCH_GATE_FOR_SIMPLICITY
else 0.0
)
r_progress = _progress_score(r_match=r_match, previous_r_match=previous_r_match)
total = _weighted_total(
match=r_match,
progress=r_progress,
simplicity=r_simplicity,
format=r_format,
)
return RewardBreakdown(
match=r_match,
progress=r_progress,
simplicity=r_simplicity,
format=r_format,
total=total,
)
def correctness_bonus(r_match: float) -> float:
"""Binary cliff: 1.0 iff ``r_match >= CORRECTNESS_BONUS_THRESHOLD``."""
return 1.0 if r_match >= CORRECTNESS_BONUS_THRESHOLD else 0.0
def match_dense(r_match: float) -> float:
"""Square-root-shaped match reward.
``sqrt(R²)`` magnifies small but non-zero matches — the model gets a
meaningful gradient even when raw ``r_match`` is 0.05 or 0.10
(near-zero linear value but ``sqrt(0.10) ≈ 0.32``). Saturates at 1.0.
"""
if r_match <= 0.0:
return 0.0
if r_match >= 1.0:
return 1.0
return float(math.sqrt(r_match))
def _simplicity_score(operator_count: int) -> float:
"""Map raw operator count to a [0, 1] score where smaller is better."""
if operator_count <= 0:
return 1.0
if operator_count >= SIMPLICITY_OPERATOR_CAP:
return 0.0
return 1.0 - (operator_count / SIMPLICITY_OPERATOR_CAP)
def _progress_score(*, r_match: float, previous_r_match: float) -> float:
"""Score for improving the physics fit over the previous turn (env only)."""
delta = r_match - previous_r_match
if delta <= 0.0:
return 0.0
if delta >= 1.0:
return 1.0
return float(delta)
def _weighted_total(
*,
match: float,
progress: float,
simplicity: float,
format: float,
) -> float:
return float(
REWARD_WEIGHTS["match"] * match
+ REWARD_WEIGHTS["progress"] * progress
+ REWARD_WEIGHTS["simplicity"] * simplicity
+ REWARD_WEIGHTS["format"] * format
)