Pratyush-01's picture
Initial physix-live source for HF Jobs training
b99b9ee verified
"""Prompt rendering and completion parsing for PhysiX-Live.
Responsibility:
- :func:`render_observation_for_prompt`: serialise a :class:`PhysiXObservation`
into a compact, token-efficient string the agent can read.
- :func:`build_prompt`: combine the system message, grammar hint, and the
current observation into a single chat-formatted prompt.
- :func:`parse_completion`: parse a raw model completion (which may contain a
JSON object inside arbitrary text) into a :class:`PhysiXAction`.
This module imports nothing from :mod:`torch`, :mod:`unsloth`, or :mod:`trl`
so it can be tested on any machine.
"""
from __future__ import annotations
import json
import re
from typing import Any
from physix.models import (
DEFAULT_MAX_TURNS,
PhysiXAction,
PhysiXObservation,
)
from physix.verifier.parser import GRAMMAR_HINT
SYSTEM_MESSAGE: str = (
"You are an expert physicist. Your task is to discover the equation of "
"motion that produced an observed trajectory. Each turn you propose a "
"candidate equation; the environment simulates it and tells you how well "
"the prediction matches observation. Refine your guess across turns based "
"on the residual feedback. Keep equations as simple as possible.\n\n"
+ GRAMMAR_HINT
+ "\n\n"
"Output a single JSON object with exactly these keys: "
'"equation" (string, required), "params" (object of name->number, '
'optional), "rationale" (short string, optional). Do not rename the '
'keys: always emit "equation", never "eqn"/"ode"/"formula"/"expr". '
'Example: {"equation": "d2y/dt2 = -9.81", "params": {}, '
'"rationale": "free fall"}'
)
# Maximum number of trajectory samples shipped to the agent. We downsample
# from 100 to 12 to keep prompt size bounded; statistics carry the rest.
_TRAJECTORY_DOWNSAMPLE_COUNT: int = 12
# Maximum number of prior history entries surfaced. With 8 turns max budget,
# 7 prior turns is the upper bound; we cap at 5 to stay token-efficient.
_HISTORY_CAP: int = 5
def render_observation_for_prompt(obs: PhysiXObservation) -> str:
"""Render an observation as a compact string the agent can read.
Format::
SYSTEM_ID: free_fall_drag
STATE_VARIABLES: y, vy
HINT: <one-sentence physical context>
STATS: y_min=-2.13 y_max=78.93 ... duration=6.00
TRAJECTORY (12 samples downsampled from 100):
t=0.000 y=78.93 vy=0.00
t=0.500 y=77.71 vy=-4.84
...
HISTORY (turns so far):
turn=1 reward=0.42 [match=0.42 progress=0.00 simplicity=0.95 format=1.00] equation=`d2y/dt2 = -9.81`
mismatch: predicted y diverges past t=2.0s ...
turn=2 ...
TURN: 3 / 8 (5 turns remaining)
HISTORY uses the literal string ``equation=`` (not a shorthand like
``eqn=``). Mid-strength chat models will mimic the field name they
see in HISTORY when emitting the next turn's JSON, so the in-prompt
name *must* match the JSON key the parser reads. Drift here silently
produces ``{"eqn": ...}`` outputs that the parser ignores, scoring
every post-first turn ``r_format=0`` even when the equation is
perfect.
"""
sections = [
_render_metadata_block(obs),
_render_trajectory_block(obs),
]
if obs.history:
sections.append(_render_history_block(obs))
sections.append(_render_turn_footer(obs))
return "\n\n".join(sections)
def build_prompt(obs: PhysiXObservation) -> list[dict[str, str]]:
"""Build a chat-format prompt list (system + user) for the model.
The return value is the standard ``[{"role": "system", "content": ...},
{"role": "user", "content": ...}]`` shape expected by Hugging Face
chat-template tokenisers.
"""
return [
{"role": "system", "content": SYSTEM_MESSAGE},
{"role": "user", "content": render_observation_for_prompt(obs)},
]
#: Field names we accept for the equation payload, in priority order. The
#: canonical key is ``equation`` and the system prompt asks for it
#: explicitly, but mid-strength chat models routinely substitute one of
#: these synonyms — especially after the first turn, where the model has
#: latched onto a different naming convention from its own pretraining
#: corpus. Treating these as missing produced silent ``r_format=0`` runs
#: even when the underlying equation was perfect; matching them
#: explicitly closes that hole without weakening the verifier (the
#: equation grammar itself remains strict).
_EQUATION_KEYS: tuple[str, ...] = (
"equation",
"eqn",
"ode",
"formula",
"expression",
"expr",
)
#: Same idea for the optional rationale payload. We never gate on this so
#: the cost of being permissive is zero.
_RATIONALE_KEYS: tuple[str, ...] = (
"rationale",
"reasoning",
"explanation",
"thought",
"thoughts",
)
#: And for the params dict. Some models emit ``parameters`` instead.
_PARAMS_KEYS: tuple[str, ...] = ("params", "parameters", "constants")
def parse_completion(completion: str) -> PhysiXAction:
"""Parse a raw model completion into a :class:`PhysiXAction`.
Scope is intentionally narrow: extract the first JSON object from the
completion (which may be wrapped in markdown fences or surrounded by
scratchpad text) and copy its fields verbatim into the action.
The ``equation`` string is **not** rewritten or normalised here. The
verifier in :mod:`physix.verifier.parser` defines the grammar, and any
deviation must surface as a parse error so the env can score
``r_format=0`` and feed the failure back to the agent on the next turn.
Rewriting equations upstream would silently change the agent's output
and obscure that signal.
Field-name aliases (``eqn``/``ode``/``formula``/...) are accepted in
addition to ``equation``: refusing them produced a particularly
confusing failure mode where every turn after the first scored
``r_format=0`` because the model latched onto the shorthand form
used in the HISTORY block. We've fixed the prompt too, but accepting
the synonyms is cheap defense-in-depth against future drift and
against models with their own naming preferences.
If no JSON object can be extracted (e.g. the model emitted free-form
prose, or invalid JSON that even the JSON-aware decoder rejected),
the action's ``equation`` is left **empty** so the verifier reports
a clean ``Empty equation payload`` parse error and the env scores
``r_format=0``. The raw model text is preserved in ``rationale`` so
the UI / training logs still show what was emitted, but it is
*never* fed to the equation parser as if it were an equation —
that produced misleading errors like ``Equation has no '=' sign:
'{'`` which made the verifier look broken when the real fault was
upstream.
"""
payload = _extract_json_payload(completion)
if payload is None:
return PhysiXAction(equation="", rationale=completion.strip()[:500])
normalized = _lowercase_keys(payload)
equation = _first_string_value(normalized, _EQUATION_KEYS)
rationale = _first_string_value(normalized, _RATIONALE_KEYS)
params_raw = _first_value(normalized, _PARAMS_KEYS) or {}
params = _coerce_params(params_raw)
return PhysiXAction(
equation=equation,
params=params,
rationale=rationale,
)
def _lowercase_keys(payload: dict[str, Any]) -> dict[str, Any]:
"""Return ``payload`` with top-level keys lowercased.
Some models emit ``"Equation"`` / ``"EQN"``; lowercasing once means
the lookup tables above stay declarative.
"""
return {str(k).lower(): v for k, v in payload.items()}
def _first_value(payload: dict[str, Any], keys: tuple[str, ...]) -> Any:
for key in keys:
if key in payload:
return payload[key]
return None
def _first_string_value(payload: dict[str, Any], keys: tuple[str, ...]) -> str:
value = _first_value(payload, keys)
if value is None:
return ""
return str(value).strip()
def _render_metadata_block(obs: PhysiXObservation) -> str:
state_vars = ", ".join(obs.state_variables) or "(none)"
stats_text = " ".join(f"{k}={v:.3g}" for k, v in obs.stats.items())
return (
f"SYSTEM_ID: {obs.system_id or 'unknown'}\n"
f"STATE_VARIABLES: {state_vars}\n"
f"HINT: {obs.hint}\n"
f"STATS: {stats_text}"
)
def _render_trajectory_block(obs: PhysiXObservation) -> str:
samples = _downsample(obs.trajectory, _TRAJECTORY_DOWNSAMPLE_COUNT)
lines = [f"TRAJECTORY ({len(samples)} samples downsampled from {len(obs.trajectory)}):"]
for sample in samples:
parts: list[str] = [f"t={sample['t']:.3f}"]
for var in obs.state_variables:
if var in sample:
parts.append(f"{var}={sample[var]:.3f}")
lines.append(" " + " ".join(parts))
return "\n".join(lines)
#: Order in which reward components are surfaced to the model. Match
#: matters most (it's the headline accuracy signal); format is last
#: because once it stabilises the others dominate the gradient. Stable
#: order also matters for the model's in-context retrieval: a fixed
#: column position is a reliable cue across turns.
_REWARD_COMPONENT_ORDER: tuple[str, ...] = (
"match",
"progress",
"simplicity",
"format",
)
def _render_history_block(obs: PhysiXObservation) -> str:
"""Render the most recent ``_HISTORY_CAP`` turns.
Field name is ``equation=`` rather than ``eqn=`` deliberately:
chat-tuned models tend to mimic the most-recent token spelling when
emitting their own JSON, so we must use the same key here that the
parser expects in the model's reply.
Each turn's full *dense* reward breakdown is surfaced in addition to
the scalar ``reward=`` total. The dense components are the same
values the GRPO trainer optimises (``match``/``progress``/
``simplicity``/``format``), so showing them in-context lets the
model attribute its own gains and losses turn-over-turn instead of
having to infer them from the residual prose alone — e.g. it can
see ``format=0.0`` after a parse error and prioritise grammar fixes,
or ``match=0.62, progress=0.0`` after a stuck plateau and try a
structurally different equation rather than tweaking the same
coefficients.
"""
recent = obs.history[-_HISTORY_CAP:]
lines = ["HISTORY:"]
for entry in recent:
eqn = entry.get("equation", "")
reward = float(entry.get("reward_total", 0.0))
components_text = _format_reward_components(entry.get("reward_components"))
lines.append(
f" turn={entry.get('turn')} reward={reward:.3f} "
f"[{components_text}] equation=`{eqn}`"
)
mismatch = entry.get("mismatch_summary", "")
if mismatch:
lines.append(f" mismatch: {mismatch}")
return "\n".join(lines)
def _format_reward_components(components: Any) -> str:
"""Render ``{match, progress, simplicity, format}`` as a compact line.
Always emits all four fields in :data:`_REWARD_COMPONENT_ORDER`,
defaulting to ``0.00`` when absent so the model never has to guess
why a column is missing. Three-decimal formatting matches the
server's history serialisation precision.
"""
if not isinstance(components, dict):
return " ".join(f"{name}=0.00" for name in _REWARD_COMPONENT_ORDER)
parts: list[str] = []
for name in _REWARD_COMPONENT_ORDER:
try:
value = float(components.get(name, 0.0))
except (TypeError, ValueError):
value = 0.0
parts.append(f"{name}={value:.2f}")
return " ".join(parts)
def _render_turn_footer(obs: PhysiXObservation) -> str:
total = obs.turn + obs.turn_remaining or DEFAULT_MAX_TURNS
return (
f"TURN: {obs.turn + 1} / {total} ({obs.turn_remaining} remaining)\n"
"Emit the next hypothesis as JSON."
)
def _downsample(samples: list[dict[str, float]], target: int) -> list[dict[str, float]]:
if len(samples) <= target:
return samples
step = max(1, len(samples) // target)
indices = list(range(0, len(samples), step))[:target]
if indices[-1] != len(samples) - 1:
indices[-1] = len(samples) - 1
return [samples[i] for i in indices]
_JSON_DECODER = json.JSONDecoder()
def _extract_json_payload(text: str) -> dict[str, Any] | None:
"""Find the first ``{...}`` block in ``text`` that parses as a JSON object.
Uses :meth:`json.JSONDecoder.raw_decode` so that braces appearing inside
JSON *string* values (e.g. LaTeX like ``"\\frac{d vy}{dt}"``) do not
confuse the scanner — a regex-based brace matcher would mis-balance
here and return the whole completion as a malformed equation.
"""
candidate = _strip_code_fences(text)
for i, ch in enumerate(candidate):
if ch != "{":
continue
try:
payload, _ = _JSON_DECODER.raw_decode(candidate[i:])
except json.JSONDecodeError:
continue
if isinstance(payload, dict):
return payload
return None
def _strip_code_fences(text: str) -> str:
"""Remove Markdown code-fence wrappers (```json``` / ```python``` / ```).
This is *not* equation rewriting — it strips the outer fence syntax
only, so the JSON-aware extractor below can find the object payload.
"""
text = re.sub(r"```(?:json|python)?\s*", "", text)
text = text.replace("```", "")
return text
def _coerce_params(params_raw: Any) -> dict[str, float]:
"""Best-effort coercion of a raw params payload into ``dict[str, float]``."""
if not isinstance(params_raw, dict):
return {}
out: dict[str, float] = {}
for key, value in params_raw.items():
try:
out[str(key)] = float(value)
except (TypeError, ValueError):
continue
return out