File size: 14,007 Bytes
b99b9ee | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 | """Prompt rendering and completion parsing for PhysiX-Live.
Responsibility:
- :func:`render_observation_for_prompt`: serialise a :class:`PhysiXObservation`
into a compact, token-efficient string the agent can read.
- :func:`build_prompt`: combine the system message, grammar hint, and the
current observation into a single chat-formatted prompt.
- :func:`parse_completion`: parse a raw model completion (which may contain a
JSON object inside arbitrary text) into a :class:`PhysiXAction`.
This module imports nothing from :mod:`torch`, :mod:`unsloth`, or :mod:`trl`
so it can be tested on any machine.
"""
from __future__ import annotations
import json
import re
from typing import Any
from physix.models import (
DEFAULT_MAX_TURNS,
PhysiXAction,
PhysiXObservation,
)
from physix.verifier.parser import GRAMMAR_HINT
SYSTEM_MESSAGE: str = (
"You are an expert physicist. Your task is to discover the equation of "
"motion that produced an observed trajectory. Each turn you propose a "
"candidate equation; the environment simulates it and tells you how well "
"the prediction matches observation. Refine your guess across turns based "
"on the residual feedback. Keep equations as simple as possible.\n\n"
+ GRAMMAR_HINT
+ "\n\n"
"Output a single JSON object with exactly these keys: "
'"equation" (string, required), "params" (object of name->number, '
'optional), "rationale" (short string, optional). Do not rename the '
'keys: always emit "equation", never "eqn"/"ode"/"formula"/"expr". '
'Example: {"equation": "d2y/dt2 = -9.81", "params": {}, '
'"rationale": "free fall"}'
)
# Maximum number of trajectory samples shipped to the agent. We downsample
# from 100 to 12 to keep prompt size bounded; statistics carry the rest.
_TRAJECTORY_DOWNSAMPLE_COUNT: int = 12
# Maximum number of prior history entries surfaced. With 8 turns max budget,
# 7 prior turns is the upper bound; we cap at 5 to stay token-efficient.
_HISTORY_CAP: int = 5
def render_observation_for_prompt(obs: PhysiXObservation) -> str:
"""Render an observation as a compact string the agent can read.
Format::
SYSTEM_ID: free_fall_drag
STATE_VARIABLES: y, vy
HINT: <one-sentence physical context>
STATS: y_min=-2.13 y_max=78.93 ... duration=6.00
TRAJECTORY (12 samples downsampled from 100):
t=0.000 y=78.93 vy=0.00
t=0.500 y=77.71 vy=-4.84
...
HISTORY (turns so far):
turn=1 reward=0.42 [match=0.42 progress=0.00 simplicity=0.95 format=1.00] equation=`d2y/dt2 = -9.81`
mismatch: predicted y diverges past t=2.0s ...
turn=2 ...
TURN: 3 / 8 (5 turns remaining)
HISTORY uses the literal string ``equation=`` (not a shorthand like
``eqn=``). Mid-strength chat models will mimic the field name they
see in HISTORY when emitting the next turn's JSON, so the in-prompt
name *must* match the JSON key the parser reads. Drift here silently
produces ``{"eqn": ...}`` outputs that the parser ignores, scoring
every post-first turn ``r_format=0`` even when the equation is
perfect.
"""
sections = [
_render_metadata_block(obs),
_render_trajectory_block(obs),
]
if obs.history:
sections.append(_render_history_block(obs))
sections.append(_render_turn_footer(obs))
return "\n\n".join(sections)
def build_prompt(obs: PhysiXObservation) -> list[dict[str, str]]:
"""Build a chat-format prompt list (system + user) for the model.
The return value is the standard ``[{"role": "system", "content": ...},
{"role": "user", "content": ...}]`` shape expected by Hugging Face
chat-template tokenisers.
"""
return [
{"role": "system", "content": SYSTEM_MESSAGE},
{"role": "user", "content": render_observation_for_prompt(obs)},
]
#: Field names we accept for the equation payload, in priority order. The
#: canonical key is ``equation`` and the system prompt asks for it
#: explicitly, but mid-strength chat models routinely substitute one of
#: these synonyms — especially after the first turn, where the model has
#: latched onto a different naming convention from its own pretraining
#: corpus. Treating these as missing produced silent ``r_format=0`` runs
#: even when the underlying equation was perfect; matching them
#: explicitly closes that hole without weakening the verifier (the
#: equation grammar itself remains strict).
_EQUATION_KEYS: tuple[str, ...] = (
"equation",
"eqn",
"ode",
"formula",
"expression",
"expr",
)
#: Same idea for the optional rationale payload. We never gate on this so
#: the cost of being permissive is zero.
_RATIONALE_KEYS: tuple[str, ...] = (
"rationale",
"reasoning",
"explanation",
"thought",
"thoughts",
)
#: And for the params dict. Some models emit ``parameters`` instead.
_PARAMS_KEYS: tuple[str, ...] = ("params", "parameters", "constants")
def parse_completion(completion: str) -> PhysiXAction:
"""Parse a raw model completion into a :class:`PhysiXAction`.
Scope is intentionally narrow: extract the first JSON object from the
completion (which may be wrapped in markdown fences or surrounded by
scratchpad text) and copy its fields verbatim into the action.
The ``equation`` string is **not** rewritten or normalised here. The
verifier in :mod:`physix.verifier.parser` defines the grammar, and any
deviation must surface as a parse error so the env can score
``r_format=0`` and feed the failure back to the agent on the next turn.
Rewriting equations upstream would silently change the agent's output
and obscure that signal.
Field-name aliases (``eqn``/``ode``/``formula``/...) are accepted in
addition to ``equation``: refusing them produced a particularly
confusing failure mode where every turn after the first scored
``r_format=0`` because the model latched onto the shorthand form
used in the HISTORY block. We've fixed the prompt too, but accepting
the synonyms is cheap defense-in-depth against future drift and
against models with their own naming preferences.
If no JSON object can be extracted (e.g. the model emitted free-form
prose, or invalid JSON that even the JSON-aware decoder rejected),
the action's ``equation`` is left **empty** so the verifier reports
a clean ``Empty equation payload`` parse error and the env scores
``r_format=0``. The raw model text is preserved in ``rationale`` so
the UI / training logs still show what was emitted, but it is
*never* fed to the equation parser as if it were an equation —
that produced misleading errors like ``Equation has no '=' sign:
'{'`` which made the verifier look broken when the real fault was
upstream.
"""
payload = _extract_json_payload(completion)
if payload is None:
return PhysiXAction(equation="", rationale=completion.strip()[:500])
normalized = _lowercase_keys(payload)
equation = _first_string_value(normalized, _EQUATION_KEYS)
rationale = _first_string_value(normalized, _RATIONALE_KEYS)
params_raw = _first_value(normalized, _PARAMS_KEYS) or {}
params = _coerce_params(params_raw)
return PhysiXAction(
equation=equation,
params=params,
rationale=rationale,
)
def _lowercase_keys(payload: dict[str, Any]) -> dict[str, Any]:
"""Return ``payload`` with top-level keys lowercased.
Some models emit ``"Equation"`` / ``"EQN"``; lowercasing once means
the lookup tables above stay declarative.
"""
return {str(k).lower(): v for k, v in payload.items()}
def _first_value(payload: dict[str, Any], keys: tuple[str, ...]) -> Any:
for key in keys:
if key in payload:
return payload[key]
return None
def _first_string_value(payload: dict[str, Any], keys: tuple[str, ...]) -> str:
value = _first_value(payload, keys)
if value is None:
return ""
return str(value).strip()
def _render_metadata_block(obs: PhysiXObservation) -> str:
state_vars = ", ".join(obs.state_variables) or "(none)"
stats_text = " ".join(f"{k}={v:.3g}" for k, v in obs.stats.items())
return (
f"SYSTEM_ID: {obs.system_id or 'unknown'}\n"
f"STATE_VARIABLES: {state_vars}\n"
f"HINT: {obs.hint}\n"
f"STATS: {stats_text}"
)
def _render_trajectory_block(obs: PhysiXObservation) -> str:
samples = _downsample(obs.trajectory, _TRAJECTORY_DOWNSAMPLE_COUNT)
lines = [f"TRAJECTORY ({len(samples)} samples downsampled from {len(obs.trajectory)}):"]
for sample in samples:
parts: list[str] = [f"t={sample['t']:.3f}"]
for var in obs.state_variables:
if var in sample:
parts.append(f"{var}={sample[var]:.3f}")
lines.append(" " + " ".join(parts))
return "\n".join(lines)
#: Order in which reward components are surfaced to the model. Match
#: matters most (it's the headline accuracy signal); format is last
#: because once it stabilises the others dominate the gradient. Stable
#: order also matters for the model's in-context retrieval: a fixed
#: column position is a reliable cue across turns.
_REWARD_COMPONENT_ORDER: tuple[str, ...] = (
"match",
"progress",
"simplicity",
"format",
)
def _render_history_block(obs: PhysiXObservation) -> str:
"""Render the most recent ``_HISTORY_CAP`` turns.
Field name is ``equation=`` rather than ``eqn=`` deliberately:
chat-tuned models tend to mimic the most-recent token spelling when
emitting their own JSON, so we must use the same key here that the
parser expects in the model's reply.
Each turn's full *dense* reward breakdown is surfaced in addition to
the scalar ``reward=`` total. The dense components are the same
values the GRPO trainer optimises (``match``/``progress``/
``simplicity``/``format``), so showing them in-context lets the
model attribute its own gains and losses turn-over-turn instead of
having to infer them from the residual prose alone — e.g. it can
see ``format=0.0`` after a parse error and prioritise grammar fixes,
or ``match=0.62, progress=0.0`` after a stuck plateau and try a
structurally different equation rather than tweaking the same
coefficients.
"""
recent = obs.history[-_HISTORY_CAP:]
lines = ["HISTORY:"]
for entry in recent:
eqn = entry.get("equation", "")
reward = float(entry.get("reward_total", 0.0))
components_text = _format_reward_components(entry.get("reward_components"))
lines.append(
f" turn={entry.get('turn')} reward={reward:.3f} "
f"[{components_text}] equation=`{eqn}`"
)
mismatch = entry.get("mismatch_summary", "")
if mismatch:
lines.append(f" mismatch: {mismatch}")
return "\n".join(lines)
def _format_reward_components(components: Any) -> str:
"""Render ``{match, progress, simplicity, format}`` as a compact line.
Always emits all four fields in :data:`_REWARD_COMPONENT_ORDER`,
defaulting to ``0.00`` when absent so the model never has to guess
why a column is missing. Three-decimal formatting matches the
server's history serialisation precision.
"""
if not isinstance(components, dict):
return " ".join(f"{name}=0.00" for name in _REWARD_COMPONENT_ORDER)
parts: list[str] = []
for name in _REWARD_COMPONENT_ORDER:
try:
value = float(components.get(name, 0.0))
except (TypeError, ValueError):
value = 0.0
parts.append(f"{name}={value:.2f}")
return " ".join(parts)
def _render_turn_footer(obs: PhysiXObservation) -> str:
total = obs.turn + obs.turn_remaining or DEFAULT_MAX_TURNS
return (
f"TURN: {obs.turn + 1} / {total} ({obs.turn_remaining} remaining)\n"
"Emit the next hypothesis as JSON."
)
def _downsample(samples: list[dict[str, float]], target: int) -> list[dict[str, float]]:
if len(samples) <= target:
return samples
step = max(1, len(samples) // target)
indices = list(range(0, len(samples), step))[:target]
if indices[-1] != len(samples) - 1:
indices[-1] = len(samples) - 1
return [samples[i] for i in indices]
_JSON_DECODER = json.JSONDecoder()
def _extract_json_payload(text: str) -> dict[str, Any] | None:
"""Find the first ``{...}`` block in ``text`` that parses as a JSON object.
Uses :meth:`json.JSONDecoder.raw_decode` so that braces appearing inside
JSON *string* values (e.g. LaTeX like ``"\\frac{d vy}{dt}"``) do not
confuse the scanner — a regex-based brace matcher would mis-balance
here and return the whole completion as a malformed equation.
"""
candidate = _strip_code_fences(text)
for i, ch in enumerate(candidate):
if ch != "{":
continue
try:
payload, _ = _JSON_DECODER.raw_decode(candidate[i:])
except json.JSONDecodeError:
continue
if isinstance(payload, dict):
return payload
return None
def _strip_code_fences(text: str) -> str:
"""Remove Markdown code-fence wrappers (```json``` / ```python``` / ```).
This is *not* equation rewriting — it strips the outer fence syntax
only, so the JSON-aware extractor below can find the object payload.
"""
text = re.sub(r"```(?:json|python)?\s*", "", text)
text = text.replace("```", "")
return text
def _coerce_params(params_raw: Any) -> dict[str, float]:
"""Best-effort coercion of a raw params payload into ``dict[str, float]``."""
if not isinstance(params_raw, dict):
return {}
out: dict[str, float] = {}
for key, value in params_raw.items():
try:
out[str(key)] = float(value)
except (TypeError, ValueError):
continue
return out
|