Initial physix-live source for HF Jobs training
Browse files- README.md +308 -0
- physix/__init__.py +32 -0
- physix/client.py +43 -0
- physix/models.py +138 -0
- physix/server/__init__.py +8 -0
- physix/server/app.py +117 -0
- physix/server/environment.py +278 -0
- physix/server/interactive.py +430 -0
- physix/systems/__init__.py +20 -0
- physix/systems/base.py +192 -0
- physix/systems/registry.py +48 -0
- physix/systems/tier1.py +143 -0
- physix/systems/tier2.py +128 -0
- physix/systems/tier3.py +132 -0
- physix/training/__init__.py +18 -0
- physix/training/dataset.py +153 -0
- physix/training/loop.py +536 -0
- physix/training/prompt.py +369 -0
- physix/training/reward_fns.py +107 -0
- physix/training/scorer.py +182 -0
- physix/training/sft.py +293 -0
- physix/verifier/__init__.py +33 -0
- physix/verifier/metrics.py +114 -0
- physix/verifier/mismatch.py +138 -0
- physix/verifier/parser.py +396 -0
- physix/verifier/reward.py +112 -0
- physix/verifier/simulator.py +224 -0
- pyproject.toml +48 -0
- tests/__init__.py +0 -0
- tests/test_client_ws.py +88 -0
- tests/test_dataset.py +92 -0
- tests/test_environment.py +180 -0
- tests/test_interactive_api.py +329 -0
- tests/test_parser.py +329 -0
- tests/test_prompt.py +300 -0
- tests/test_registry.py +28 -0
- tests/test_scorer.py +68 -0
- tests/test_sft_dataset.py +60 -0
README.md
ADDED
|
@@ -0,0 +1,308 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# PhysiX-Live
|
| 2 |
+
|
| 3 |
+
**One-line pitch:** an OpenEnv RL environment where a small (1.5B) language model iteratively
|
| 4 |
+
discovers equations of motion from trajectory data plus a one-sentence English hint —
|
| 5 |
+
verifier is `scipy.integrate.odeint` plus per-step R², no LLM-as-judge in the reward loop.
|
| 6 |
+
|
| 7 |
+
A submission for the **OpenEnv hackathon** (Apr 2026). The deliverables are: a clean
|
| 8 |
+
OpenEnv-compatible env, a TRL+Unsloth+GRPO training pipeline targeting Qwen2.5-1.5B with
|
| 9 |
+
LoRA-32, a React + TypeScript + Tailwind demo UI that animates trajectories side-by-side
|
| 10 |
+
for the trained vs. untrained model, and a recording script for pre-baked demo episodes.
|
| 11 |
+
|
| 12 |
+
---
|
| 13 |
+
|
| 14 |
+
## Repository layout
|
| 15 |
+
|
| 16 |
+
```
|
| 17 |
+
physix-live/
|
| 18 |
+
├── physix/ # Python package
|
| 19 |
+
│ ├── __init__.py # narrow public API
|
| 20 |
+
│ ├── models.py # Pydantic Action / Observation / State
|
| 21 |
+
│ ├── client.py # OpenEnv WebSocket client subclass
|
| 22 |
+
│ ├── systems/ # 8 physical systems in 3 tiers
|
| 23 |
+
│ │ ├── base.py # PhysicalSystem ABC + TrajectoryData
|
| 24 |
+
│ │ ├── tier1.py # FreeFall, FreeFallWithDrag, SimplePendulum
|
| 25 |
+
│ │ ├── tier2.py # DampedPendulum, SpringMass, DampedSpring
|
| 26 |
+
│ │ ├── tier3.py # ProjectileWithDrag, ChargedInBField (held out)
|
| 27 |
+
│ │ └── registry.py # system_id -> factory mapping
|
| 28 |
+
│ ├── verifier/ # scoring pipeline
|
| 29 |
+
│ │ ├── parser.py # SymPy whitelisted parser
|
| 30 |
+
│ │ ├── simulator.py # scipy.odeint forward sim
|
| 31 |
+
│ │ ├── metrics.py # per-step R²
|
| 32 |
+
│ │ ├── mismatch.py # English residual summary
|
| 33 |
+
│ │ └── reward.py # 4-component reward composition
|
| 34 |
+
│ ├── server/ # FastAPI + OpenEnv
|
| 35 |
+
│ │ ├── environment.py # PhysiXEnvironment subclass
|
| 36 |
+
│ │ ├── interactive.py # session-based REST router (`/interactive/*`)
|
| 37 |
+
│ │ └── app.py # FastAPI factory + CLI entry point
|
| 38 |
+
│ └── training/ # GRPO training pipeline
|
| 39 |
+
│ ├── prompt.py # observation -> prompt, completion -> action
|
| 40 |
+
│ ├── scorer.py # single-completion scorer (training + eval)
|
| 41 |
+
│ ├── reward_fns.py # TRL-compatible reward callables
|
| 42 |
+
│ ├── dataset.py # build training / eval datasets
|
| 43 |
+
│ └── loop.py # Unsloth + TRL GRPO loop (cloud A100)
|
| 44 |
+
├── frontend/ # React + TS + Tailwind demo UI
|
| 45 |
+
│ └── src/
|
| 46 |
+
│ ├── App.tsx # tabs: "Run with LLM" + "Manual"
|
| 47 |
+
│ ├── components/ # RunWithLlmPane, InteractivePane, …
|
| 48 |
+
│ ├── hooks/ # useLlmEpisodeRunner, useInteractiveSession
|
| 49 |
+
│ ├── lib/ # interactiveClient, trajectory, format
|
| 50 |
+
│ └── types/physix.ts
|
| 51 |
+
└── tests/ # full pipeline coverage incl. /interactive/*
|
| 52 |
+
```
|
| 53 |
+
|
| 54 |
+
---
|
| 55 |
+
|
| 56 |
+
## What the env does (one episode end-to-end)
|
| 57 |
+
|
| 58 |
+
```mermaid
|
| 59 |
+
sequenceDiagram
|
| 60 |
+
participant Agent
|
| 61 |
+
participant Env as PhysiXEnvironment
|
| 62 |
+
participant Sim as scipy.odeint
|
| 63 |
+
participant Verifier
|
| 64 |
+
|
| 65 |
+
Env->>Agent: reset(): observed trajectory + hint
|
| 66 |
+
loop up to 8 turns
|
| 67 |
+
Agent->>Env: step(SymPy eqn + params + rationale)
|
| 68 |
+
Env->>Sim: simulate from hypothesis
|
| 69 |
+
Sim-->>Verifier: predicted trajectory
|
| 70 |
+
Verifier-->>Env: r_match + r_progress + r_simplicity + r_format
|
| 71 |
+
Env->>Agent: obs (mismatch summary, history) + reward
|
| 72 |
+
alt r_match > 0.93 or budget exhausted
|
| 73 |
+
Env-->>Agent: done=True
|
| 74 |
+
end
|
| 75 |
+
end
|
| 76 |
+
```
|
| 77 |
+
|
| 78 |
+
**Action space:** the agent emits structured text in a constrained SymPy grammar
|
| 79 |
+
(`d2y/dt2 = -9.81 + 0.05 * vy**2`). Allowed operators: `+ - * / **`. Allowed
|
| 80 |
+
functions: `sin cos tan exp log sqrt abs`. Parse failures score `r_format = 0`.
|
| 81 |
+
|
| 82 |
+
**Reward:** four independent components (each in `[0, 1]`), weighted into a total.
|
| 83 |
+
|
| 84 |
+
| Component | Weight | What it measures |
|
| 85 |
+
|---|---:|---|
|
| 86 |
+
| `r_match` | 0.5 | Per-step R² between observed and predicted trajectory |
|
| 87 |
+
| `r_progress` | 0.2 | Improvement over prior turn (dense per-turn shaping) |
|
| 88 |
+
| `r_simplicity` | 0.2 | 1 − normalised operator count (Occam's razor) |
|
| 89 |
+
| `r_format` | 0.1 | Binary: SymPy parses + dimensional consistency |
|
| 90 |
+
|
| 91 |
+
The reward is fully verifiable — the env never calls an LLM-as-judge.
|
| 92 |
+
|
| 93 |
+
---
|
| 94 |
+
|
| 95 |
+
## Quick start
|
| 96 |
+
|
| 97 |
+
### 1. Install (Python)
|
| 98 |
+
|
| 99 |
+
Requires Python 3.10+. Inside a fresh conda env or venv:
|
| 100 |
+
|
| 101 |
+
```bash
|
| 102 |
+
pip install -e . # base deps (env server, verifier, client)
|
| 103 |
+
pip install -e ".[dev]" # + pytest, ruff
|
| 104 |
+
pip install -e ".[demo]" # + ollama (live LLM episodes via /interactive/llm-step)
|
| 105 |
+
pip install -e ".[train]" # + torch, transformers, trl, unsloth, wandb
|
| 106 |
+
```
|
| 107 |
+
|
| 108 |
+
Notes:
|
| 109 |
+
- `[train]` requires CUDA. Install it on the cloud A100 box, not on your laptop.
|
| 110 |
+
- `[demo]` adds the `ollama` Python client used by the server when the UI's
|
| 111 |
+
"Run with LLM" pane drives an episode. Start `ollama serve` and pull the
|
| 112 |
+
base model once with `ollama pull qwen2.5:1.5b-instruct`.
|
| 113 |
+
- The repo ships a `.vscode/settings.json` that pins the workspace's Python
|
| 114 |
+
interpreter to `~/miniconda3/envs/openenv_run/bin/python`. If your venv
|
| 115 |
+
lives somewhere else and your IDE shows "import could not be resolved",
|
| 116 |
+
update that path or run **Python: Select Interpreter** from the command
|
| 117 |
+
palette.
|
| 118 |
+
|
| 119 |
+
### 2. Run the test suite
|
| 120 |
+
|
| 121 |
+
```bash
|
| 122 |
+
pytest tests/ # 30 tests, ~3 seconds
|
| 123 |
+
```
|
| 124 |
+
|
| 125 |
+
### 3. Boot the env server locally
|
| 126 |
+
|
| 127 |
+
```bash
|
| 128 |
+
python -m physix.server.app --host 127.0.0.1 --port 8000
|
| 129 |
+
# or
|
| 130 |
+
uvicorn physix.server.app:app --host 127.0.0.1 --port 8000
|
| 131 |
+
```
|
| 132 |
+
|
| 133 |
+
The server exposes:
|
| 134 |
+
|
| 135 |
+
- OpenEnv endpoints: `/reset`, `/step`, `/state`, `/schema`, `/health`. These
|
| 136 |
+
are stateless — each request gets a fresh env. Fine for headless agents.
|
| 137 |
+
- A stateful WebSocket at `/ws` (used by the Python `PhysiXEnv` client).
|
| 138 |
+
- A bespoke session-based REST router at `/interactive/*` (see
|
| 139 |
+
`physix/server/interactive.py`) used by the demo UI. It maintains
|
| 140 |
+
in-process sessions so a browser can drive a multi-turn episode by
|
| 141 |
+
POSTing equations.
|
| 142 |
+
|
| 143 |
+
CORS is enabled out of the box for `http://localhost:5173` (the Vite dev
|
| 144 |
+
server). Override with `PHYSIX_CORS_ORIGINS=https://your-host.example` (or
|
| 145 |
+
`*` for any origin, dev only).
|
| 146 |
+
|
| 147 |
+
For sustained Python-side interaction use the WebSocket client:
|
| 148 |
+
|
| 149 |
+
```python
|
| 150 |
+
import asyncio
|
| 151 |
+
from physix import PhysiXEnv, PhysiXAction
|
| 152 |
+
|
| 153 |
+
async def main():
|
| 154 |
+
async with PhysiXEnv(base_url="http://127.0.0.1:8000") as env:
|
| 155 |
+
result = await env.reset(system_id="free_fall_drag", seed=42)
|
| 156 |
+
result = await env.step(
|
| 157 |
+
PhysiXAction(equation="d2y/dt2 = -9.81 + 0.05 * vy**2")
|
| 158 |
+
)
|
| 159 |
+
print(result.observation.reward_breakdown)
|
| 160 |
+
|
| 161 |
+
asyncio.run(main())
|
| 162 |
+
```
|
| 163 |
+
|
| 164 |
+
### 4. Run the demo UI
|
| 165 |
+
|
| 166 |
+
```bash
|
| 167 |
+
cd frontend
|
| 168 |
+
pnpm install
|
| 169 |
+
pnpm dev # http://localhost:5173
|
| 170 |
+
```
|
| 171 |
+
|
| 172 |
+
The UI has two tabs, both backed by the same live env server:
|
| 173 |
+
|
| 174 |
+
- **Run with LLM** — pick a system + an Ollama model tag, click ▶ Run, and
|
| 175 |
+
watch the model propose ODEs turn-by-turn. Each call hits
|
| 176 |
+
`POST /interactive/sessions/:id/llm-step`, which builds the env's prompt,
|
| 177 |
+
calls the local Ollama daemon, parses the reply, scores it via the
|
| 178 |
+
verifier, and streams the resulting turn back to the page. Pause anytime.
|
| 179 |
+
- **Manual** — submit equations yourself. No LLM in the loop. Same scoring
|
| 180 |
+
pipeline, useful for building intuition for the verifier.
|
| 181 |
+
|
| 182 |
+
The UI expects the env server to be reachable on the URL in
|
| 183 |
+
`VITE_PHYSIX_API_URL` (default `http://localhost:8000`). For the LLM tab,
|
| 184 |
+
you also need a local Ollama daemon (`ollama serve`) with the model tag
|
| 185 |
+
pulled in advance:
|
| 186 |
+
|
| 187 |
+
```bash
|
| 188 |
+
ollama pull qwen2.5:1.5b-instruct
|
| 189 |
+
# or, after exporting your merged adapter to GGUF and building a Modelfile:
|
| 190 |
+
ollama create physix-trained:latest -f Modelfile
|
| 191 |
+
```
|
| 192 |
+
|
| 193 |
+
There are no pre-recorded episodes to regenerate. Every turn shown in the
|
| 194 |
+
UI is a real LLM call against the live env.
|
| 195 |
+
|
| 196 |
+
### 5. Train (cloud A100)
|
| 197 |
+
|
| 198 |
+
```bash
|
| 199 |
+
WANDB_PROJECT=physix-live python -m physix.training.loop \
|
| 200 |
+
--model Qwen/Qwen2.5-1.5B-Instruct \
|
| 201 |
+
--output-dir runs/physix-1.5b-rl \
|
| 202 |
+
--num-steps 300
|
| 203 |
+
|
| 204 |
+
# Run an ablation:
|
| 205 |
+
python -m physix.training.loop --num-steps 300 --ablation no_progress
|
| 206 |
+
```
|
| 207 |
+
|
| 208 |
+
After training, push the merged adapter to the Hub. By default the loop
|
| 209 |
+
saves a `merged_16bit` artifact (LoRA merged into the base, written as a
|
| 210 |
+
standard HF checkpoint) so it can be loaded without Unsloth and exported to
|
| 211 |
+
GGUF for Ollama:
|
| 212 |
+
|
| 213 |
+
```bash
|
| 214 |
+
python -m physix.training.loop \
|
| 215 |
+
--num-steps 300 \
|
| 216 |
+
--save-method merged_16bit \
|
| 217 |
+
--push-to-hub --hub-repo-id you/physix-1.5b-rl
|
| 218 |
+
```
|
| 219 |
+
|
| 220 |
+
Pass `--save-method lora` if you want the small adapter-only artifact
|
| 221 |
+
instead. The training loop calls `unsloth.PatchFastRL("GRPO", FastLanguageModel)`
|
| 222 |
+
before importing `GRPOTrainer` — required for Unsloth's GRPO kernels to be
|
| 223 |
+
swapped in.
|
| 224 |
+
|
| 225 |
+
---
|
| 226 |
+
|
| 227 |
+
## Adding a new physical system
|
| 228 |
+
|
| 229 |
+
The framework generalises beyond the 8 shipped systems. Adding a new one is
|
| 230 |
+
about 50 lines:
|
| 231 |
+
|
| 232 |
+
```python
|
| 233 |
+
# physix/systems/tier2.py (or your own module)
|
| 234 |
+
import numpy as np
|
| 235 |
+
from physix.systems.base import PhysicalSystem, SystemTier
|
| 236 |
+
|
| 237 |
+
class CoupledOscillators(PhysicalSystem):
|
| 238 |
+
system_id: str = "coupled_oscillators"
|
| 239 |
+
tier: SystemTier = SystemTier.TIER_2
|
| 240 |
+
state_variables: tuple[str, ...] = ("x1", "vx1", "x2", "vx2")
|
| 241 |
+
hint_template: str = "Two masses coupled by a spring; observe both positions."
|
| 242 |
+
|
| 243 |
+
def sample_parameters(self, rng):
|
| 244 |
+
return {"k": float(rng.uniform(2, 10)), "k_c": float(rng.uniform(0.5, 2))}
|
| 245 |
+
|
| 246 |
+
def sample_initial_conditions(self, rng):
|
| 247 |
+
return {"x1": float(rng.uniform(0.5, 1)), "vx1": 0.0, "x2": 0.0, "vx2": 0.0}
|
| 248 |
+
|
| 249 |
+
def rhs(self, t, state, params):
|
| 250 |
+
x1, vx1, x2, vx2 = state
|
| 251 |
+
return np.array([
|
| 252 |
+
vx1, -params["k"] * x1 + params["k_c"] * (x2 - x1),
|
| 253 |
+
vx2, -params["k"] * x2 + params["k_c"] * (x1 - x2),
|
| 254 |
+
])
|
| 255 |
+
|
| 256 |
+
def ground_truth_equation(self) -> str:
|
| 257 |
+
return "d2x1/dt2 = -k*x1 + k_c*(x2-x1); d2x2/dt2 = -k*x2 + k_c*(x1-x2)"
|
| 258 |
+
```
|
| 259 |
+
|
| 260 |
+
`PhysicalSystem` is a Pydantic model with an `ABCMeta` mixin — subclasses
|
| 261 |
+
declare overridden fields as plain class-level annotations and pydantic
|
| 262 |
+
treats them as field overrides. No `@dataclass` decorator needed.
|
| 263 |
+
|
| 264 |
+
Then register it in `physix/systems/registry.py`:
|
| 265 |
+
|
| 266 |
+
```python
|
| 267 |
+
SYSTEM_REGISTRY["coupled_oscillators"] = CoupledOscillators
|
| 268 |
+
```
|
| 269 |
+
|
| 270 |
+
That's it — the env, parser, simulator, scorer, and training loop all pick it
|
| 271 |
+
up automatically.
|
| 272 |
+
|
| 273 |
+
---
|
| 274 |
+
|
| 275 |
+
## Themes (OpenEnv hackathon rubric)
|
| 276 |
+
|
| 277 |
+
- **Primary: World-Modeling** — the agent literally builds an internal model
|
| 278 |
+
of physical dynamics from data + context, refines it, and is scored against
|
| 279 |
+
ground truth.
|
| 280 |
+
- **Primary: Long-Horizon** — episodes are 5-8 turns of stateful refinement;
|
| 281 |
+
earlier hypotheses condition later ones via the prompt history.
|
| 282 |
+
- **Secondary: Self-Improvement** — curriculum from 1-D undamped (Tier 1)
|
| 283 |
+
through 1-D damped (Tier 2) to 2-D coupled (Tier 3, held out).
|
| 284 |
+
|
| 285 |
+
---
|
| 286 |
+
|
| 287 |
+
## Honest framing
|
| 288 |
+
|
| 289 |
+
We do **not** claim:
|
| 290 |
+
|
| 291 |
+
- The env discovers genuinely new physics.
|
| 292 |
+
- A 1.5B model beats GPT-4o on equation discovery.
|
| 293 |
+
- The model learns physics from scratch.
|
| 294 |
+
|
| 295 |
+
We **do** claim:
|
| 296 |
+
|
| 297 |
+
- The same 1.5B converges in fewer turns *after* RL training than *before*.
|
| 298 |
+
- The trained model generalises to held-out 2-D systems (Tier 3).
|
| 299 |
+
- The trained model uses NL hints meaningfully (ablate the hint, performance drops).
|
| 300 |
+
|
| 301 |
+
This calibrated framing is part of the storytelling axis (30%) — judges trust
|
| 302 |
+
self-comparison numbers more than claims to beat frontier models.
|
| 303 |
+
|
| 304 |
+
---
|
| 305 |
+
|
| 306 |
+
## License
|
| 307 |
+
|
| 308 |
+
MIT.
|
physix/__init__.py
ADDED
|
@@ -0,0 +1,32 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""PhysiX-Live: OpenEnv environment for iterative equation discovery.
|
| 2 |
+
|
| 3 |
+
Public API:
|
| 4 |
+
|
| 5 |
+
- :class:`PhysiXEnv`: the OpenEnv client (HTTP/WebSocket).
|
| 6 |
+
- :class:`PhysiXAction`, :class:`PhysiXObservation`, :class:`PhysiXState`:
|
| 7 |
+
the env's wire-protocol Pydantic models.
|
| 8 |
+
- :class:`RewardBreakdown`: 4-component reward record.
|
| 9 |
+
- :data:`GRAMMAR_HINT`: machine-generated DSL description for the LLM
|
| 10 |
+
system prompt (single source of truth: :mod:`physix.verifier.parser`).
|
| 11 |
+
"""
|
| 12 |
+
|
| 13 |
+
from physix.client import PhysiXEnv
|
| 14 |
+
from physix.models import (
|
| 15 |
+
PhysiXAction,
|
| 16 |
+
PhysiXObservation,
|
| 17 |
+
PhysiXState,
|
| 18 |
+
RewardBreakdown,
|
| 19 |
+
)
|
| 20 |
+
from physix.verifier.parser import GRAMMAR_HINT
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
__version__ = "0.1.0"
|
| 24 |
+
__all__ = [
|
| 25 |
+
"PhysiXEnv",
|
| 26 |
+
"PhysiXAction",
|
| 27 |
+
"PhysiXObservation",
|
| 28 |
+
"PhysiXState",
|
| 29 |
+
"RewardBreakdown",
|
| 30 |
+
"GRAMMAR_HINT",
|
| 31 |
+
"__version__",
|
| 32 |
+
]
|
physix/client.py
ADDED
|
@@ -0,0 +1,43 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""HTTP/WebSocket client for the PhysiX-Live environment.
|
| 2 |
+
|
| 3 |
+
Subclasses :class:`openenv.core.EnvClient` to provide PhysiX-specific
|
| 4 |
+
serialisation and parsing. The base class handles WebSocket connection,
|
| 5 |
+
session management, and the OpenEnv wire protocol.
|
| 6 |
+
"""
|
| 7 |
+
|
| 8 |
+
from __future__ import annotations
|
| 9 |
+
|
| 10 |
+
from typing import Any
|
| 11 |
+
|
| 12 |
+
from openenv.core import EnvClient
|
| 13 |
+
from openenv.core.client_types import StepResult
|
| 14 |
+
|
| 15 |
+
from physix.models import PhysiXAction, PhysiXObservation, PhysiXState
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
class PhysiXEnv(EnvClient[PhysiXAction, PhysiXObservation, PhysiXState]):
|
| 19 |
+
"""Client for the PhysiX-Live OpenEnv environment.
|
| 20 |
+
|
| 21 |
+
Example::
|
| 22 |
+
|
| 23 |
+
>>> async with PhysiXEnv(base_url="http://localhost:8000") as env:
|
| 24 |
+
... result = await env.reset()
|
| 25 |
+
... while not result.done:
|
| 26 |
+
... action = agent.predict(result.observation)
|
| 27 |
+
... result = await env.step(action)
|
| 28 |
+
"""
|
| 29 |
+
|
| 30 |
+
def _step_payload(self, action: PhysiXAction) -> dict[str, Any]:
|
| 31 |
+
return action.model_dump(exclude_none=False)
|
| 32 |
+
|
| 33 |
+
def _parse_result(self, payload: dict[str, Any]) -> StepResult[PhysiXObservation]:
|
| 34 |
+
observation_data = payload.get("observation", {}) or {}
|
| 35 |
+
observation = PhysiXObservation(**observation_data)
|
| 36 |
+
return StepResult(
|
| 37 |
+
observation=observation,
|
| 38 |
+
reward=payload.get("reward"),
|
| 39 |
+
done=payload.get("done", False),
|
| 40 |
+
)
|
| 41 |
+
|
| 42 |
+
def _parse_state(self, payload: dict[str, Any]) -> PhysiXState:
|
| 43 |
+
return PhysiXState(**payload)
|
physix/models.py
ADDED
|
@@ -0,0 +1,138 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Pydantic schemas + constants. Behaviour lives elsewhere."""
|
| 2 |
+
|
| 3 |
+
from __future__ import annotations
|
| 4 |
+
|
| 5 |
+
from typing import Any
|
| 6 |
+
|
| 7 |
+
from pydantic import BaseModel, ConfigDict, Field
|
| 8 |
+
|
| 9 |
+
from openenv.core.env_server import Action, Observation, State
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
#: Per-episode turn budget. Episodes terminate earlier if r_match crosses
|
| 13 |
+
#: :data:`CONVERGENCE_THRESHOLD`.
|
| 14 |
+
DEFAULT_MAX_TURNS: int = 8
|
| 15 |
+
CONVERGENCE_THRESHOLD: float = 0.93
|
| 16 |
+
|
| 17 |
+
#: Reward component weights. Ablations only edit this dict.
|
| 18 |
+
REWARD_WEIGHTS: dict[str, float] = {
|
| 19 |
+
"match": 0.5,
|
| 20 |
+
"progress": 0.2,
|
| 21 |
+
"simplicity": 0.2,
|
| 22 |
+
"format": 0.1,
|
| 23 |
+
}
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
class PhysiXAction(Action):
|
| 27 |
+
"""One agent step.
|
| 28 |
+
|
| 29 |
+
Fields have defaults so tests can construct partial actions, and so
|
| 30 |
+
the env can fabricate a ``format=0`` no-op action for completions
|
| 31 |
+
that fail to parse JSON. The LLM is expected to fill all three;
|
| 32 |
+
an empty string / dict is fine when irrelevant.
|
| 33 |
+
"""
|
| 34 |
+
|
| 35 |
+
equation: str = Field(default="", description="ODE in the verifier DSL")
|
| 36 |
+
params: dict[str, float] = Field(
|
| 37 |
+
default_factory=dict,
|
| 38 |
+
description="Numerical substitutions for free symbols on the RHS",
|
| 39 |
+
)
|
| 40 |
+
rationale: str = Field(default="")
|
| 41 |
+
|
| 42 |
+
|
| 43 |
+
class PhysiXObservation(Observation):
|
| 44 |
+
"""What the agent sees per step. Inherits ``done`` / ``reward`` from
|
| 45 |
+
:class:`openenv.core.env_server.Observation`."""
|
| 46 |
+
|
| 47 |
+
trajectory: list[dict[str, float]] = Field(
|
| 48 |
+
default_factory=list,
|
| 49 |
+
description="Observed (noisy) trajectory as list of timestep dicts",
|
| 50 |
+
)
|
| 51 |
+
state_variables: list[str] = Field(
|
| 52 |
+
default_factory=list,
|
| 53 |
+
description="Names of state-variable keys present in trajectory[i] (excluding t)",
|
| 54 |
+
)
|
| 55 |
+
hint: str = Field(default="", description="One-sentence physical-context string")
|
| 56 |
+
history: list[dict[str, Any]] = Field(
|
| 57 |
+
default_factory=list,
|
| 58 |
+
description="Prior turns surfaced back so the agent can refine",
|
| 59 |
+
)
|
| 60 |
+
mismatch_summary: str = Field(
|
| 61 |
+
default="",
|
| 62 |
+
description="English description of where last prediction diverged",
|
| 63 |
+
)
|
| 64 |
+
turn: int = Field(default=0, ge=0, description="0-indexed turn counter")
|
| 65 |
+
turn_remaining: int = Field(
|
| 66 |
+
default=DEFAULT_MAX_TURNS,
|
| 67 |
+
ge=0,
|
| 68 |
+
description="Turns left in the episode budget",
|
| 69 |
+
)
|
| 70 |
+
system_id: str = Field(default="", description="Stable id of underlying system")
|
| 71 |
+
stats: dict[str, float] = Field(
|
| 72 |
+
default_factory=dict, description="Aggregate trajectory statistics"
|
| 73 |
+
)
|
| 74 |
+
reward_breakdown: dict[str, float] = Field(
|
| 75 |
+
default_factory=dict,
|
| 76 |
+
description="Four reward components from the previous step",
|
| 77 |
+
)
|
| 78 |
+
|
| 79 |
+
|
| 80 |
+
class PhysiXState(State):
|
| 81 |
+
"""Episode-level state. The ground-truth equation lives here for logging;
|
| 82 |
+
it is *never* surfaced to the agent. ``last_reward_total`` feeds the
|
| 83 |
+
per-turn ``progress`` reward delta."""
|
| 84 |
+
|
| 85 |
+
system_id: str = Field(default="")
|
| 86 |
+
ground_truth_equation: str = Field(default="")
|
| 87 |
+
ground_truth_params: dict[str, float] = Field(default_factory=dict)
|
| 88 |
+
last_reward_total: float = Field(default=0.0)
|
| 89 |
+
last_r_match: float = Field(default=0.0)
|
| 90 |
+
converged: bool = Field(default=False)
|
| 91 |
+
max_turns: int = Field(default=DEFAULT_MAX_TURNS, ge=1)
|
| 92 |
+
|
| 93 |
+
|
| 94 |
+
class HistoryEntry(BaseModel):
|
| 95 |
+
"""One previous turn, surfaced back to the agent on the next step."""
|
| 96 |
+
|
| 97 |
+
model_config = ConfigDict(extra="forbid")
|
| 98 |
+
|
| 99 |
+
turn: int
|
| 100 |
+
equation: str
|
| 101 |
+
params: dict[str, float]
|
| 102 |
+
reward_total: float
|
| 103 |
+
reward_components: dict[str, float]
|
| 104 |
+
mismatch_summary: str
|
| 105 |
+
|
| 106 |
+
def as_dict(self) -> dict[str, Any]:
|
| 107 |
+
return {
|
| 108 |
+
"turn": self.turn,
|
| 109 |
+
"equation": self.equation,
|
| 110 |
+
"params": self.params,
|
| 111 |
+
"reward_total": round(self.reward_total, 4),
|
| 112 |
+
"reward_components": {
|
| 113 |
+
k: round(v, 4) for k, v in self.reward_components.items()
|
| 114 |
+
},
|
| 115 |
+
"mismatch_summary": self.mismatch_summary,
|
| 116 |
+
}
|
| 117 |
+
|
| 118 |
+
|
| 119 |
+
class RewardBreakdown(BaseModel):
|
| 120 |
+
"""4-component reward, each in ``[0, 1]``. ``total`` is the weighted sum
|
| 121 |
+
using :data:`REWARD_WEIGHTS`."""
|
| 122 |
+
|
| 123 |
+
model_config = ConfigDict(extra="forbid")
|
| 124 |
+
|
| 125 |
+
match: float = 0.0
|
| 126 |
+
progress: float = 0.0
|
| 127 |
+
simplicity: float = 0.0
|
| 128 |
+
format: float = 0.0
|
| 129 |
+
total: float = 0.0
|
| 130 |
+
|
| 131 |
+
def as_dict(self) -> dict[str, float]:
|
| 132 |
+
return {
|
| 133 |
+
"match": self.match,
|
| 134 |
+
"progress": self.progress,
|
| 135 |
+
"simplicity": self.simplicity,
|
| 136 |
+
"format": self.format,
|
| 137 |
+
"total": self.total,
|
| 138 |
+
}
|
physix/server/__init__.py
ADDED
|
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""HTTP server layer for the PhysiX-Live environment.
|
| 2 |
+
|
| 3 |
+
Submodules:
|
| 4 |
+
- :mod:`physix.server.environment` defines the OpenEnv ``Environment`` subclass
|
| 5 |
+
that owns episode lifecycle and reward dispatch.
|
| 6 |
+
- :mod:`physix.server.app` builds the FastAPI application and exposes a
|
| 7 |
+
``main()`` entry point for the ``physix-server`` console script.
|
| 8 |
+
"""
|
physix/server/app.py
ADDED
|
@@ -0,0 +1,117 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""FastAPI app + ``physix-server`` console-script entry point.
|
| 2 |
+
|
| 3 |
+
Mounts the OpenEnv stateless endpoints (``/reset`` etc.) plus the bespoke
|
| 4 |
+
``/interactive/*`` router that maintains in-process sessions for browsers.
|
| 5 |
+
CORS allows the Vite dev origin out of the box; override with the
|
| 6 |
+
``PHYSIX_CORS_ORIGINS`` env var (comma-separated, or ``*`` for any).
|
| 7 |
+
"""
|
| 8 |
+
|
| 9 |
+
from __future__ import annotations
|
| 10 |
+
|
| 11 |
+
import argparse
|
| 12 |
+
import logging
|
| 13 |
+
import os
|
| 14 |
+
|
| 15 |
+
import uvicorn
|
| 16 |
+
from fastapi import FastAPI, Request
|
| 17 |
+
from fastapi.exceptions import RequestValidationError
|
| 18 |
+
from fastapi.middleware.cors import CORSMiddleware
|
| 19 |
+
from fastapi.responses import JSONResponse
|
| 20 |
+
from openenv.core.env_server import create_fastapi_app
|
| 21 |
+
from starlette.exceptions import HTTPException as StarletteHTTPException
|
| 22 |
+
|
| 23 |
+
from physix.models import PhysiXAction, PhysiXObservation
|
| 24 |
+
from physix.server.environment import PhysiXEnvironment
|
| 25 |
+
from physix.server.interactive import build_interactive_router
|
| 26 |
+
|
| 27 |
+
|
| 28 |
+
_DEFAULT_CORS_ORIGINS = (
|
| 29 |
+
"http://localhost:5173",
|
| 30 |
+
"http://127.0.0.1:5173",
|
| 31 |
+
)
|
| 32 |
+
|
| 33 |
+
|
| 34 |
+
def build_app() -> FastAPI:
|
| 35 |
+
app = create_fastapi_app(
|
| 36 |
+
env=PhysiXEnvironment,
|
| 37 |
+
action_cls=PhysiXAction,
|
| 38 |
+
observation_cls=PhysiXObservation,
|
| 39 |
+
)
|
| 40 |
+
_install_cors(app)
|
| 41 |
+
_install_error_handlers(app)
|
| 42 |
+
app.include_router(build_interactive_router())
|
| 43 |
+
return app
|
| 44 |
+
|
| 45 |
+
|
| 46 |
+
def _install_cors(app: FastAPI) -> None:
|
| 47 |
+
raw = os.environ.get("PHYSIX_CORS_ORIGINS", "")
|
| 48 |
+
origins = (
|
| 49 |
+
[o.strip() for o in raw.split(",") if o.strip()]
|
| 50 |
+
if raw
|
| 51 |
+
else list(_DEFAULT_CORS_ORIGINS)
|
| 52 |
+
)
|
| 53 |
+
allow_all = origins == ["*"]
|
| 54 |
+
app.add_middleware(
|
| 55 |
+
CORSMiddleware,
|
| 56 |
+
allow_origins=origins if not allow_all else ["*"],
|
| 57 |
+
allow_credentials=not allow_all,
|
| 58 |
+
allow_methods=["*"],
|
| 59 |
+
allow_headers=["*"],
|
| 60 |
+
)
|
| 61 |
+
|
| 62 |
+
|
| 63 |
+
def _install_error_handlers(app: FastAPI) -> None:
|
| 64 |
+
"""Make sure 4xx/5xx responses still carry the request's CORS headers.
|
| 65 |
+
|
| 66 |
+
Starlette's ``CORSMiddleware`` only annotates responses produced by
|
| 67 |
+
successful route handlers; raw ``HTTPException`` responses skip the
|
| 68 |
+
middleware and reach the browser without ``Access-Control-Allow-Origin``,
|
| 69 |
+
which the browser surfaces as a generic network error rather than the
|
| 70 |
+
real status code (e.g. 502 "Ollama not reachable" was reported as 404
|
| 71 |
+
in the dev UI). Re-emitting through ``JSONResponse`` runs the middleware.
|
| 72 |
+
"""
|
| 73 |
+
|
| 74 |
+
@app.exception_handler(StarletteHTTPException)
|
| 75 |
+
async def _http_exc(_request: Request, exc: StarletteHTTPException) -> JSONResponse:
|
| 76 |
+
return JSONResponse(
|
| 77 |
+
status_code=exc.status_code,
|
| 78 |
+
content={"detail": exc.detail},
|
| 79 |
+
headers=exc.headers,
|
| 80 |
+
)
|
| 81 |
+
|
| 82 |
+
@app.exception_handler(RequestValidationError)
|
| 83 |
+
async def _validation_exc(
|
| 84 |
+
_request: Request, exc: RequestValidationError
|
| 85 |
+
) -> JSONResponse:
|
| 86 |
+
return JSONResponse(status_code=422, content={"detail": exc.errors()})
|
| 87 |
+
|
| 88 |
+
|
| 89 |
+
app: FastAPI = build_app()
|
| 90 |
+
|
| 91 |
+
|
| 92 |
+
def main() -> None:
|
| 93 |
+
parser = argparse.ArgumentParser(description="Run the PhysiX-Live env server.")
|
| 94 |
+
parser.add_argument("--host", default=os.environ.get("PHYSIX_HOST", "0.0.0.0"))
|
| 95 |
+
parser.add_argument("--port", type=int, default=int(os.environ.get("PORT", "8000")))
|
| 96 |
+
parser.add_argument(
|
| 97 |
+
"--log-level", default=os.environ.get("PHYSIX_LOG_LEVEL", "info")
|
| 98 |
+
)
|
| 99 |
+
parser.add_argument(
|
| 100 |
+
"--reload",
|
| 101 |
+
action="store_true",
|
| 102 |
+
help="Auto-reload on source changes. Use during development only.",
|
| 103 |
+
)
|
| 104 |
+
args = parser.parse_args()
|
| 105 |
+
|
| 106 |
+
logging.basicConfig(level=args.log_level.upper())
|
| 107 |
+
uvicorn.run(
|
| 108 |
+
"physix.server.app:app",
|
| 109 |
+
host=args.host,
|
| 110 |
+
port=args.port,
|
| 111 |
+
log_level=args.log_level,
|
| 112 |
+
reload=args.reload,
|
| 113 |
+
)
|
| 114 |
+
|
| 115 |
+
|
| 116 |
+
if __name__ == "__main__":
|
| 117 |
+
main()
|
physix/server/environment.py
ADDED
|
@@ -0,0 +1,278 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""OpenEnv :class:`Environment` subclass for PhysiX-Live.
|
| 2 |
+
|
| 3 |
+
Owns one episode's lifecycle (state + budget + termination) and orchestrates
|
| 4 |
+
the parser/simulator/metrics/reward modules. No scoring logic lives here.
|
| 5 |
+
"""
|
| 6 |
+
|
| 7 |
+
from __future__ import annotations
|
| 8 |
+
|
| 9 |
+
import logging
|
| 10 |
+
import uuid
|
| 11 |
+
from typing import Any, Optional
|
| 12 |
+
|
| 13 |
+
import numpy as np
|
| 14 |
+
from openenv.core.env_server import Environment
|
| 15 |
+
|
| 16 |
+
from physix.models import (
|
| 17 |
+
CONVERGENCE_THRESHOLD,
|
| 18 |
+
DEFAULT_MAX_TURNS,
|
| 19 |
+
HistoryEntry,
|
| 20 |
+
PhysiXAction,
|
| 21 |
+
PhysiXObservation,
|
| 22 |
+
PhysiXState,
|
| 23 |
+
RewardBreakdown,
|
| 24 |
+
)
|
| 25 |
+
from physix.systems import PhysicalSystem, SystemTier, get_system, list_systems_by_tier
|
| 26 |
+
from physix.systems.base import TrajectoryData
|
| 27 |
+
from physix.verifier import (
|
| 28 |
+
ParseError,
|
| 29 |
+
SimulationError,
|
| 30 |
+
compute_match,
|
| 31 |
+
compute_reward,
|
| 32 |
+
parse_equation,
|
| 33 |
+
residual_summary,
|
| 34 |
+
simulate_hypothesis,
|
| 35 |
+
summarize_mismatch,
|
| 36 |
+
)
|
| 37 |
+
|
| 38 |
+
|
| 39 |
+
_log = logging.getLogger(__name__)
|
| 40 |
+
|
| 41 |
+
|
| 42 |
+
class PhysiXEnvironment(Environment[PhysiXAction, PhysiXObservation, PhysiXState]):
|
| 43 |
+
"""OpenEnv environment that drives one episode of equation discovery."""
|
| 44 |
+
|
| 45 |
+
def __init__(
|
| 46 |
+
self,
|
| 47 |
+
*,
|
| 48 |
+
max_turns: int = DEFAULT_MAX_TURNS,
|
| 49 |
+
train_tiers: tuple[SystemTier, ...] = (SystemTier.TIER_1, SystemTier.TIER_2),
|
| 50 |
+
seed: Optional[int] = None,
|
| 51 |
+
) -> None:
|
| 52 |
+
super().__init__()
|
| 53 |
+
self._max_turns = max_turns
|
| 54 |
+
self._train_tiers = train_tiers
|
| 55 |
+
self._rng = np.random.default_rng(seed)
|
| 56 |
+
|
| 57 |
+
self._state = PhysiXState(max_turns=max_turns)
|
| 58 |
+
self._system: Optional[PhysicalSystem] = None
|
| 59 |
+
self._trajectory: Optional[TrajectoryData] = None
|
| 60 |
+
self._history: list[HistoryEntry] = []
|
| 61 |
+
|
| 62 |
+
def reset(
|
| 63 |
+
self,
|
| 64 |
+
seed: Optional[int] = None,
|
| 65 |
+
episode_id: Optional[str] = None,
|
| 66 |
+
**kwargs: Any,
|
| 67 |
+
) -> PhysiXObservation:
|
| 68 |
+
"""Start a new episode. Pass ``system_id=`` to force a specific system."""
|
| 69 |
+
if seed is not None:
|
| 70 |
+
self._rng = np.random.default_rng(seed)
|
| 71 |
+
|
| 72 |
+
forced_id = kwargs.get("system_id")
|
| 73 |
+
chosen_id = forced_id or self._sample_training_system_id()
|
| 74 |
+
|
| 75 |
+
self._system = get_system(chosen_id)
|
| 76 |
+
self._trajectory = self._system.simulate(self._rng)
|
| 77 |
+
self._history = []
|
| 78 |
+
|
| 79 |
+
self._state = PhysiXState(
|
| 80 |
+
episode_id=episode_id or str(uuid.uuid4()),
|
| 81 |
+
step_count=0,
|
| 82 |
+
system_id=chosen_id,
|
| 83 |
+
ground_truth_equation=self._system.ground_truth_equation(),
|
| 84 |
+
ground_truth_params=dict(self._system.parameters),
|
| 85 |
+
last_reward_total=0.0,
|
| 86 |
+
converged=False,
|
| 87 |
+
max_turns=self._max_turns,
|
| 88 |
+
)
|
| 89 |
+
|
| 90 |
+
return self._build_observation(
|
| 91 |
+
mismatch_summary="",
|
| 92 |
+
reward_breakdown=RewardBreakdown(),
|
| 93 |
+
)
|
| 94 |
+
|
| 95 |
+
def step(
|
| 96 |
+
self,
|
| 97 |
+
action: PhysiXAction,
|
| 98 |
+
timeout_s: Optional[float] = None,
|
| 99 |
+
**kwargs: Any,
|
| 100 |
+
) -> PhysiXObservation:
|
| 101 |
+
del timeout_s, kwargs # accepted for OpenEnv API conformance, unused
|
| 102 |
+
|
| 103 |
+
if self._system is None or self._trajectory is None:
|
| 104 |
+
raise RuntimeError("step() called before reset(); call reset() first.")
|
| 105 |
+
|
| 106 |
+
self._state.step_count = self._state.step_count + 1
|
| 107 |
+
|
| 108 |
+
breakdown, mismatch_text = self._score_hypothesis(action)
|
| 109 |
+
self._record_history(action, breakdown, mismatch_text)
|
| 110 |
+
|
| 111 |
+
self._state.last_reward_total = breakdown.total
|
| 112 |
+
self._state.last_r_match = breakdown.match
|
| 113 |
+
if breakdown.match >= CONVERGENCE_THRESHOLD:
|
| 114 |
+
self._state.converged = True
|
| 115 |
+
|
| 116 |
+
return self._build_observation(
|
| 117 |
+
mismatch_summary=mismatch_text,
|
| 118 |
+
reward_breakdown=breakdown,
|
| 119 |
+
)
|
| 120 |
+
|
| 121 |
+
@property
|
| 122 |
+
def state(self) -> PhysiXState:
|
| 123 |
+
return self._state
|
| 124 |
+
|
| 125 |
+
def current_observation(self) -> Optional[PhysiXObservation]:
|
| 126 |
+
"""Re-render the observation an external driver should feed to the
|
| 127 |
+
agent for the *next* turn (i.e. before calling :meth:`step`).
|
| 128 |
+
|
| 129 |
+
Used by the interactive HTTP router to build prompts mid-session.
|
| 130 |
+
Returns ``None`` before :meth:`reset` has been called.
|
| 131 |
+
"""
|
| 132 |
+
if self._system is None or self._trajectory is None:
|
| 133 |
+
return None
|
| 134 |
+
last = self._history[-1] if self._history else None
|
| 135 |
+
breakdown = (
|
| 136 |
+
RewardBreakdown(**last.reward_components)
|
| 137 |
+
if last is not None
|
| 138 |
+
else RewardBreakdown()
|
| 139 |
+
)
|
| 140 |
+
mismatch = last.mismatch_summary if last is not None else ""
|
| 141 |
+
return self._build_observation(
|
| 142 |
+
mismatch_summary=mismatch,
|
| 143 |
+
reward_breakdown=breakdown,
|
| 144 |
+
)
|
| 145 |
+
|
| 146 |
+
@property
|
| 147 |
+
def current_trajectory(self) -> Optional[TrajectoryData]:
|
| 148 |
+
return self._trajectory
|
| 149 |
+
|
| 150 |
+
@property
|
| 151 |
+
def current_system(self) -> Optional[PhysicalSystem]:
|
| 152 |
+
return self._system
|
| 153 |
+
|
| 154 |
+
def _is_done(self) -> bool:
|
| 155 |
+
if self._state.converged:
|
| 156 |
+
return True
|
| 157 |
+
return self._state.step_count >= self._max_turns
|
| 158 |
+
|
| 159 |
+
def _score_hypothesis(
|
| 160 |
+
self,
|
| 161 |
+
action: PhysiXAction,
|
| 162 |
+
) -> tuple[RewardBreakdown, str]:
|
| 163 |
+
assert self._system is not None
|
| 164 |
+
assert self._trajectory is not None
|
| 165 |
+
|
| 166 |
+
parameter_names = frozenset(action.params or {})
|
| 167 |
+
|
| 168 |
+
try:
|
| 169 |
+
parsed = parse_equation(
|
| 170 |
+
action.equation,
|
| 171 |
+
state_variables=self._system.state_variables,
|
| 172 |
+
parameter_names=parameter_names,
|
| 173 |
+
)
|
| 174 |
+
except ParseError as exc:
|
| 175 |
+
_log.debug("parse_equation failed: %s", exc)
|
| 176 |
+
breakdown = compute_reward(
|
| 177 |
+
parse_succeeded=False,
|
| 178 |
+
r_match=0.0,
|
| 179 |
+
operator_count=0,
|
| 180 |
+
previous_r_match=self._state.last_r_match,
|
| 181 |
+
)
|
| 182 |
+
return breakdown, f"Parse error: {exc}"
|
| 183 |
+
|
| 184 |
+
try:
|
| 185 |
+
predicted = simulate_hypothesis(
|
| 186 |
+
parsed,
|
| 187 |
+
state_variables=self._system.state_variables,
|
| 188 |
+
parameters=dict(action.params or {}),
|
| 189 |
+
initial_conditions=self._trajectory.initial_conditions,
|
| 190 |
+
timestamps=self._trajectory.timestamps,
|
| 191 |
+
)
|
| 192 |
+
except SimulationError as exc:
|
| 193 |
+
_log.debug("simulate_hypothesis failed: %s", exc)
|
| 194 |
+
breakdown = compute_reward(
|
| 195 |
+
parse_succeeded=True,
|
| 196 |
+
r_match=0.0,
|
| 197 |
+
operator_count=parsed.operator_count,
|
| 198 |
+
previous_r_match=self._state.last_r_match,
|
| 199 |
+
)
|
| 200 |
+
return breakdown, f"Simulation error: {exc}"
|
| 201 |
+
|
| 202 |
+
r_match = compute_match(
|
| 203 |
+
observed=self._trajectory.states,
|
| 204 |
+
predicted=predicted,
|
| 205 |
+
state_variables=self._system.state_variables,
|
| 206 |
+
)
|
| 207 |
+
residuals = residual_summary(
|
| 208 |
+
timestamps=self._trajectory.timestamps,
|
| 209 |
+
observed=self._trajectory.states,
|
| 210 |
+
predicted=predicted,
|
| 211 |
+
state_variables=self._system.state_variables,
|
| 212 |
+
)
|
| 213 |
+
mismatch_text = summarize_mismatch(
|
| 214 |
+
observed=self._trajectory.states,
|
| 215 |
+
predicted=predicted,
|
| 216 |
+
state_variables=self._system.state_variables,
|
| 217 |
+
timestamps=self._trajectory.timestamps,
|
| 218 |
+
summary=residuals,
|
| 219 |
+
)
|
| 220 |
+
|
| 221 |
+
breakdown = compute_reward(
|
| 222 |
+
parse_succeeded=True,
|
| 223 |
+
r_match=r_match,
|
| 224 |
+
operator_count=parsed.operator_count,
|
| 225 |
+
previous_r_match=self._state.last_r_match,
|
| 226 |
+
)
|
| 227 |
+
return breakdown, mismatch_text
|
| 228 |
+
|
| 229 |
+
def _record_history(
|
| 230 |
+
self,
|
| 231 |
+
action: PhysiXAction,
|
| 232 |
+
breakdown: RewardBreakdown,
|
| 233 |
+
mismatch_text: str,
|
| 234 |
+
) -> None:
|
| 235 |
+
entry = HistoryEntry(
|
| 236 |
+
turn=self._state.step_count,
|
| 237 |
+
equation=action.equation,
|
| 238 |
+
params=dict(action.params or {}),
|
| 239 |
+
reward_total=breakdown.total,
|
| 240 |
+
reward_components=breakdown.as_dict(),
|
| 241 |
+
mismatch_summary=mismatch_text,
|
| 242 |
+
)
|
| 243 |
+
self._history.append(entry)
|
| 244 |
+
|
| 245 |
+
def _build_observation(
|
| 246 |
+
self,
|
| 247 |
+
*,
|
| 248 |
+
mismatch_summary: str,
|
| 249 |
+
reward_breakdown: RewardBreakdown,
|
| 250 |
+
) -> PhysiXObservation:
|
| 251 |
+
assert self._system is not None
|
| 252 |
+
assert self._trajectory is not None
|
| 253 |
+
|
| 254 |
+
return PhysiXObservation(
|
| 255 |
+
done=self._is_done(),
|
| 256 |
+
reward=reward_breakdown.total,
|
| 257 |
+
trajectory=self._trajectory.to_observation_samples(),
|
| 258 |
+
state_variables=list(self._system.state_variables),
|
| 259 |
+
hint=self._system.hint(self._state.ground_truth_params),
|
| 260 |
+
history=[entry.as_dict() for entry in self._history],
|
| 261 |
+
mismatch_summary=mismatch_summary,
|
| 262 |
+
turn=self._state.step_count,
|
| 263 |
+
turn_remaining=max(0, self._max_turns - self._state.step_count),
|
| 264 |
+
system_id=self._state.system_id,
|
| 265 |
+
stats=self._trajectory.stats(),
|
| 266 |
+
reward_breakdown=reward_breakdown.as_dict(),
|
| 267 |
+
)
|
| 268 |
+
|
| 269 |
+
def _sample_training_system_id(self) -> str:
|
| 270 |
+
candidates: list[str] = []
|
| 271 |
+
for tier in self._train_tiers:
|
| 272 |
+
candidates.extend(list_systems_by_tier(tier))
|
| 273 |
+
if not candidates:
|
| 274 |
+
raise RuntimeError(
|
| 275 |
+
f"No training systems found for tiers {self._train_tiers!r}."
|
| 276 |
+
)
|
| 277 |
+
idx = int(self._rng.integers(0, len(candidates)))
|
| 278 |
+
return candidates[idx]
|
physix/server/interactive.py
ADDED
|
@@ -0,0 +1,430 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Session-based REST router for browser-driven episodes."""
|
| 2 |
+
|
| 3 |
+
from __future__ import annotations
|
| 4 |
+
|
| 5 |
+
import logging
|
| 6 |
+
import threading
|
| 7 |
+
import time
|
| 8 |
+
import uuid
|
| 9 |
+
from collections.abc import Callable
|
| 10 |
+
from typing import Optional
|
| 11 |
+
|
| 12 |
+
import numpy as np
|
| 13 |
+
from fastapi import APIRouter, HTTPException
|
| 14 |
+
from pydantic import BaseModel, ConfigDict, Field
|
| 15 |
+
|
| 16 |
+
from physix.models import (
|
| 17 |
+
DEFAULT_MAX_TURNS,
|
| 18 |
+
PhysiXAction,
|
| 19 |
+
PhysiXObservation,
|
| 20 |
+
)
|
| 21 |
+
from physix.server.environment import PhysiXEnvironment
|
| 22 |
+
from physix.systems import list_supported_systems, list_systems
|
| 23 |
+
from physix.systems.base import PhysicalSystem, TrajectoryData
|
| 24 |
+
from physix.training.prompt import build_prompt, parse_completion
|
| 25 |
+
from physix.verifier.parser import parse_equation
|
| 26 |
+
from physix.verifier.simulator import simulate_hypothesis
|
| 27 |
+
|
| 28 |
+
|
| 29 |
+
_log = logging.getLogger(__name__)
|
| 30 |
+
|
| 31 |
+
|
| 32 |
+
class InteractiveResetRequest(BaseModel):
|
| 33 |
+
model_config = ConfigDict(extra="forbid")
|
| 34 |
+
|
| 35 |
+
system_id: Optional[str] = Field(
|
| 36 |
+
default=None,
|
| 37 |
+
description="Force a specific system. None = sample at random.",
|
| 38 |
+
)
|
| 39 |
+
seed: Optional[int] = None
|
| 40 |
+
max_turns: int = Field(default=DEFAULT_MAX_TURNS, ge=1, le=32)
|
| 41 |
+
|
| 42 |
+
|
| 43 |
+
class SystemDescriptor(BaseModel):
|
| 44 |
+
model_config = ConfigDict(frozen=True)
|
| 45 |
+
|
| 46 |
+
system_id: str
|
| 47 |
+
state_variables: tuple[str, ...]
|
| 48 |
+
|
| 49 |
+
|
| 50 |
+
class InteractiveStartResponse(BaseModel):
|
| 51 |
+
session_id: str
|
| 52 |
+
observation: PhysiXObservation
|
| 53 |
+
system: SystemDescriptor
|
| 54 |
+
max_turns: int
|
| 55 |
+
|
| 56 |
+
|
| 57 |
+
class LlmStepRequest(BaseModel):
|
| 58 |
+
"""Server-side LLM call. Browser names a model tag; server hits Ollama."""
|
| 59 |
+
|
| 60 |
+
model_config = ConfigDict(extra="forbid")
|
| 61 |
+
|
| 62 |
+
model: str = "qwen2.5:1.5b-instruct"
|
| 63 |
+
temperature: float = Field(default=0.4, ge=0.0, le=2.0)
|
| 64 |
+
max_tokens: int = Field(default=2048, ge=64, le=8192)
|
| 65 |
+
host: Optional[str] = None
|
| 66 |
+
|
| 67 |
+
|
| 68 |
+
class LlmModelInfo(BaseModel):
|
| 69 |
+
"""A single locally-pulled Ollama model tag."""
|
| 70 |
+
|
| 71 |
+
model_config = ConfigDict(frozen=True)
|
| 72 |
+
|
| 73 |
+
name: str
|
| 74 |
+
size_bytes: Optional[int] = None
|
| 75 |
+
parameter_size: Optional[str] = None
|
| 76 |
+
family: Optional[str] = None
|
| 77 |
+
|
| 78 |
+
|
| 79 |
+
class LlmModelsResponse(BaseModel):
|
| 80 |
+
models: list[LlmModelInfo] = Field(default_factory=list)
|
| 81 |
+
error: Optional[str] = None
|
| 82 |
+
|
| 83 |
+
|
| 84 |
+
class LlmStepResponse(BaseModel):
|
| 85 |
+
observation: PhysiXObservation
|
| 86 |
+
predicted_trajectory: list[dict[str, float]] = Field(default_factory=list)
|
| 87 |
+
action: PhysiXAction
|
| 88 |
+
raw_completion: str
|
| 89 |
+
latency_s: float
|
| 90 |
+
model: str
|
| 91 |
+
|
| 92 |
+
|
| 93 |
+
class SessionSummary(BaseModel):
|
| 94 |
+
session_id: str
|
| 95 |
+
system_id: str
|
| 96 |
+
turn: int
|
| 97 |
+
max_turns: int
|
| 98 |
+
converged: bool
|
| 99 |
+
done: bool
|
| 100 |
+
|
| 101 |
+
|
| 102 |
+
class _Session:
|
| 103 |
+
__slots__ = ("env", "system_id", "max_turns", "lock")
|
| 104 |
+
|
| 105 |
+
def __init__(self, env: PhysiXEnvironment, system_id: str, max_turns: int) -> None:
|
| 106 |
+
self.env = env
|
| 107 |
+
self.system_id = system_id
|
| 108 |
+
self.max_turns = max_turns
|
| 109 |
+
self.lock = threading.Lock()
|
| 110 |
+
|
| 111 |
+
|
| 112 |
+
class InteractiveSessionStore:
|
| 113 |
+
"""Threadsafe in-memory session map."""
|
| 114 |
+
|
| 115 |
+
def __init__(self) -> None:
|
| 116 |
+
self._sessions: dict[str, _Session] = {}
|
| 117 |
+
self._lock = threading.Lock()
|
| 118 |
+
|
| 119 |
+
def create(
|
| 120 |
+
self,
|
| 121 |
+
*,
|
| 122 |
+
system_id: Optional[str],
|
| 123 |
+
seed: Optional[int],
|
| 124 |
+
max_turns: int,
|
| 125 |
+
) -> tuple[str, _Session, PhysiXObservation]:
|
| 126 |
+
env = PhysiXEnvironment(seed=seed, max_turns=max_turns)
|
| 127 |
+
observation = env.reset(seed=seed, system_id=system_id)
|
| 128 |
+
session = _Session(env=env, system_id=env.state.system_id, max_turns=max_turns)
|
| 129 |
+
session_id = uuid.uuid4().hex
|
| 130 |
+
with self._lock:
|
| 131 |
+
self._sessions[session_id] = session
|
| 132 |
+
return session_id, session, observation
|
| 133 |
+
|
| 134 |
+
def get(self, session_id: str) -> _Session:
|
| 135 |
+
with self._lock:
|
| 136 |
+
session = self._sessions.get(session_id)
|
| 137 |
+
if session is None:
|
| 138 |
+
raise HTTPException(status_code=404, detail="Unknown session_id.")
|
| 139 |
+
return session
|
| 140 |
+
|
| 141 |
+
def delete(self, session_id: str) -> None:
|
| 142 |
+
with self._lock:
|
| 143 |
+
self._sessions.pop(session_id, None)
|
| 144 |
+
|
| 145 |
+
def __len__(self) -> int:
|
| 146 |
+
with self._lock:
|
| 147 |
+
return len(self._sessions)
|
| 148 |
+
|
| 149 |
+
|
| 150 |
+
LlmPolicy = Callable[[list[dict[str, str]]], str]
|
| 151 |
+
LlmPolicyFactory = Callable[[LlmStepRequest], LlmPolicy]
|
| 152 |
+
LlmModelsLister = Callable[[], LlmModelsResponse]
|
| 153 |
+
|
| 154 |
+
|
| 155 |
+
def default_ollama_models_lister() -> LlmModelsResponse:
|
| 156 |
+
try:
|
| 157 |
+
import ollama # type: ignore[import-not-found]
|
| 158 |
+
except ImportError:
|
| 159 |
+
return LlmModelsResponse(
|
| 160 |
+
models=[],
|
| 161 |
+
error=(
|
| 162 |
+
"The 'ollama' Python package is not installed on the server. "
|
| 163 |
+
"Install with: pip install -e '.[demo]'"
|
| 164 |
+
),
|
| 165 |
+
)
|
| 166 |
+
|
| 167 |
+
try:
|
| 168 |
+
response = ollama.Client().list()
|
| 169 |
+
except Exception as exc: # noqa: BLE001 — surfaced in the response body
|
| 170 |
+
return LlmModelsResponse(
|
| 171 |
+
models=[],
|
| 172 |
+
error=(
|
| 173 |
+
f"Could not reach the local Ollama daemon ({exc}). "
|
| 174 |
+
"Is 'ollama serve' running?"
|
| 175 |
+
),
|
| 176 |
+
)
|
| 177 |
+
|
| 178 |
+
raw_models = getattr(response, "models", None)
|
| 179 |
+
if raw_models is None and isinstance(response, dict):
|
| 180 |
+
raw_models = response.get("models", [])
|
| 181 |
+
raw_models = raw_models or []
|
| 182 |
+
|
| 183 |
+
out: list[LlmModelInfo] = []
|
| 184 |
+
for entry in raw_models:
|
| 185 |
+
name = _model_attr(entry, "model") or _model_attr(entry, "name")
|
| 186 |
+
if not isinstance(name, str) or not name:
|
| 187 |
+
continue
|
| 188 |
+
details = _model_attr(entry, "details")
|
| 189 |
+
out.append(
|
| 190 |
+
LlmModelInfo(
|
| 191 |
+
name=name,
|
| 192 |
+
size_bytes=_coerce_int(_model_attr(entry, "size")),
|
| 193 |
+
parameter_size=_model_attr(details, "parameter_size"),
|
| 194 |
+
family=_model_attr(details, "family"),
|
| 195 |
+
)
|
| 196 |
+
)
|
| 197 |
+
|
| 198 |
+
out.sort(key=lambda m: m.name)
|
| 199 |
+
return LlmModelsResponse(models=out)
|
| 200 |
+
|
| 201 |
+
|
| 202 |
+
def _model_attr(obj: object, key: str) -> object:
|
| 203 |
+
if obj is None:
|
| 204 |
+
return None
|
| 205 |
+
if isinstance(obj, dict):
|
| 206 |
+
return obj.get(key)
|
| 207 |
+
return getattr(obj, key, None)
|
| 208 |
+
|
| 209 |
+
|
| 210 |
+
def _coerce_int(value: object) -> Optional[int]:
|
| 211 |
+
if value is None:
|
| 212 |
+
return None
|
| 213 |
+
try:
|
| 214 |
+
return int(value)
|
| 215 |
+
except (TypeError, ValueError):
|
| 216 |
+
return None
|
| 217 |
+
|
| 218 |
+
|
| 219 |
+
def default_ollama_policy_factory(request: LlmStepRequest) -> LlmPolicy:
|
| 220 |
+
try:
|
| 221 |
+
import ollama # type: ignore[import-not-found]
|
| 222 |
+
except ImportError as exc: # pragma: no cover
|
| 223 |
+
raise HTTPException(
|
| 224 |
+
status_code=503,
|
| 225 |
+
detail=(
|
| 226 |
+
"The 'ollama' Python package is not installed on the server. "
|
| 227 |
+
"Install with: pip install -e '.[demo]'"
|
| 228 |
+
),
|
| 229 |
+
) from exc
|
| 230 |
+
|
| 231 |
+
client = ollama.Client(host=request.host) if request.host else ollama.Client()
|
| 232 |
+
|
| 233 |
+
def _policy(prompt: list[dict[str, str]]) -> str:
|
| 234 |
+
try:
|
| 235 |
+
response = client.chat(
|
| 236 |
+
model=request.model,
|
| 237 |
+
messages=prompt,
|
| 238 |
+
format="json",
|
| 239 |
+
options={
|
| 240 |
+
"temperature": request.temperature,
|
| 241 |
+
"num_predict": request.max_tokens,
|
| 242 |
+
},
|
| 243 |
+
)
|
| 244 |
+
except Exception as exc: # noqa: BLE001 — surfaced as 502
|
| 245 |
+
raise HTTPException(
|
| 246 |
+
status_code=502,
|
| 247 |
+
detail=(
|
| 248 |
+
f"Ollama call failed for model {request.model!r}: {exc}. "
|
| 249 |
+
"Is 'ollama serve' running and the model pulled "
|
| 250 |
+
f"('ollama pull {request.model}')?"
|
| 251 |
+
),
|
| 252 |
+
) from exc
|
| 253 |
+
return str(response["message"]["content"])
|
| 254 |
+
|
| 255 |
+
return _policy
|
| 256 |
+
|
| 257 |
+
|
| 258 |
+
def build_interactive_router(
|
| 259 |
+
store: Optional[InteractiveSessionStore] = None,
|
| 260 |
+
*,
|
| 261 |
+
policy_factory: LlmPolicyFactory = default_ollama_policy_factory,
|
| 262 |
+
models_lister: LlmModelsLister = default_ollama_models_lister,
|
| 263 |
+
) -> APIRouter:
|
| 264 |
+
sessions = store if store is not None else InteractiveSessionStore()
|
| 265 |
+
router = APIRouter(prefix="/interactive", tags=["Interactive"])
|
| 266 |
+
|
| 267 |
+
@router.get("/models", response_model=LlmModelsResponse)
|
| 268 |
+
def list_local_models() -> LlmModelsResponse:
|
| 269 |
+
return models_lister()
|
| 270 |
+
|
| 271 |
+
@router.get("/systems", response_model=list[SystemDescriptor])
|
| 272 |
+
def list_public_systems() -> list[SystemDescriptor]:
|
| 273 |
+
from physix.systems import get_system
|
| 274 |
+
|
| 275 |
+
out: list[SystemDescriptor] = []
|
| 276 |
+
for system_id in list_supported_systems():
|
| 277 |
+
system = get_system(system_id)
|
| 278 |
+
out.append(
|
| 279 |
+
SystemDescriptor(
|
| 280 |
+
system_id=system.system_id,
|
| 281 |
+
state_variables=system.state_variables,
|
| 282 |
+
)
|
| 283 |
+
)
|
| 284 |
+
return out
|
| 285 |
+
|
| 286 |
+
@router.post("/sessions", response_model=InteractiveStartResponse)
|
| 287 |
+
def start_session(payload: InteractiveResetRequest) -> InteractiveStartResponse:
|
| 288 |
+
from physix.systems import get_system
|
| 289 |
+
|
| 290 |
+
if payload.system_id is not None and payload.system_id not in list_systems():
|
| 291 |
+
raise HTTPException(
|
| 292 |
+
status_code=400, detail=f"Unknown system_id {payload.system_id!r}."
|
| 293 |
+
)
|
| 294 |
+
chosen_system_id = payload.system_id
|
| 295 |
+
if chosen_system_id is None:
|
| 296 |
+
demo_ids = list_supported_systems()
|
| 297 |
+
if demo_ids:
|
| 298 |
+
rng = (
|
| 299 |
+
np.random.default_rng(payload.seed)
|
| 300 |
+
if payload.seed is not None
|
| 301 |
+
else np.random.default_rng()
|
| 302 |
+
)
|
| 303 |
+
chosen_system_id = str(rng.choice(demo_ids))
|
| 304 |
+
session_id, session, observation = sessions.create(
|
| 305 |
+
system_id=chosen_system_id,
|
| 306 |
+
seed=payload.seed,
|
| 307 |
+
max_turns=payload.max_turns,
|
| 308 |
+
)
|
| 309 |
+
system = get_system(session.system_id)
|
| 310 |
+
return InteractiveStartResponse(
|
| 311 |
+
session_id=session_id,
|
| 312 |
+
observation=observation,
|
| 313 |
+
system=SystemDescriptor(
|
| 314 |
+
system_id=system.system_id,
|
| 315 |
+
state_variables=system.state_variables,
|
| 316 |
+
),
|
| 317 |
+
max_turns=session.max_turns,
|
| 318 |
+
)
|
| 319 |
+
|
| 320 |
+
@router.post(
|
| 321 |
+
"/sessions/{session_id}/llm-step", response_model=LlmStepResponse
|
| 322 |
+
)
|
| 323 |
+
def llm_step_session(
|
| 324 |
+
session_id: str, payload: LlmStepRequest
|
| 325 |
+
) -> LlmStepResponse:
|
| 326 |
+
session = sessions.get(session_id)
|
| 327 |
+
with session.lock:
|
| 328 |
+
_ensure_budget(session)
|
| 329 |
+
|
| 330 |
+
current_obs = session.env.current_observation()
|
| 331 |
+
if current_obs is None:
|
| 332 |
+
raise HTTPException(
|
| 333 |
+
status_code=500, detail="Session has no current observation."
|
| 334 |
+
)
|
| 335 |
+
|
| 336 |
+
policy = policy_factory(payload)
|
| 337 |
+
t0 = time.perf_counter()
|
| 338 |
+
raw_completion = policy(build_prompt(current_obs))
|
| 339 |
+
latency_s = time.perf_counter() - t0
|
| 340 |
+
|
| 341 |
+
action = parse_completion(raw_completion)
|
| 342 |
+
observation = session.env.step(action)
|
| 343 |
+
predicted = _safe_predict(session.env, action)
|
| 344 |
+
|
| 345 |
+
return LlmStepResponse(
|
| 346 |
+
observation=observation,
|
| 347 |
+
predicted_trajectory=predicted,
|
| 348 |
+
action=action,
|
| 349 |
+
raw_completion=raw_completion,
|
| 350 |
+
latency_s=latency_s,
|
| 351 |
+
model=payload.model,
|
| 352 |
+
)
|
| 353 |
+
|
| 354 |
+
@router.delete("/sessions/{session_id}", status_code=204)
|
| 355 |
+
def end_session(session_id: str) -> None:
|
| 356 |
+
sessions.delete(session_id)
|
| 357 |
+
|
| 358 |
+
@router.get("/sessions/{session_id}", response_model=SessionSummary)
|
| 359 |
+
def get_session(session_id: str) -> SessionSummary:
|
| 360 |
+
session = sessions.get(session_id)
|
| 361 |
+
return SessionSummary(
|
| 362 |
+
session_id=session_id,
|
| 363 |
+
system_id=session.system_id,
|
| 364 |
+
turn=session.env.state.step_count,
|
| 365 |
+
max_turns=session.max_turns,
|
| 366 |
+
converged=session.env.state.converged,
|
| 367 |
+
done=(
|
| 368 |
+
session.env.state.converged
|
| 369 |
+
or session.env.state.step_count >= session.max_turns
|
| 370 |
+
),
|
| 371 |
+
)
|
| 372 |
+
|
| 373 |
+
return router
|
| 374 |
+
|
| 375 |
+
|
| 376 |
+
def _ensure_budget(session: _Session) -> None:
|
| 377 |
+
if session.env.state.step_count >= session.max_turns:
|
| 378 |
+
raise HTTPException(
|
| 379 |
+
status_code=409,
|
| 380 |
+
detail="Episode budget already exhausted; start a new session.",
|
| 381 |
+
)
|
| 382 |
+
|
| 383 |
+
|
| 384 |
+
def _safe_predict(
|
| 385 |
+
env: PhysiXEnvironment, action: PhysiXAction
|
| 386 |
+
) -> list[dict[str, float]]:
|
| 387 |
+
"""Forward-simulate the user's hypothesis for the UI overlay.
|
| 388 |
+
|
| 389 |
+
Returns ``[]`` on parse / simulation failure — the env's reward is
|
| 390 |
+
authoritative; this is best-effort visualisation only.
|
| 391 |
+
"""
|
| 392 |
+
system: Optional[PhysicalSystem] = env.current_system
|
| 393 |
+
trajectory: Optional[TrajectoryData] = env.current_trajectory
|
| 394 |
+
if system is None or trajectory is None:
|
| 395 |
+
return []
|
| 396 |
+
|
| 397 |
+
parameter_names = frozenset(action.params or {}) | frozenset(system.parameters)
|
| 398 |
+
try:
|
| 399 |
+
parsed = parse_equation(
|
| 400 |
+
action.equation,
|
| 401 |
+
state_variables=system.state_variables,
|
| 402 |
+
parameter_names=parameter_names,
|
| 403 |
+
)
|
| 404 |
+
except Exception as exc: # noqa: BLE001
|
| 405 |
+
_log.debug("predict parse failed: %s", exc)
|
| 406 |
+
return []
|
| 407 |
+
|
| 408 |
+
merged = {**system.parameters, **(action.params or {})}
|
| 409 |
+
try:
|
| 410 |
+
predicted = simulate_hypothesis(
|
| 411 |
+
parsed,
|
| 412 |
+
state_variables=system.state_variables,
|
| 413 |
+
parameters=merged,
|
| 414 |
+
initial_conditions=trajectory.initial_conditions,
|
| 415 |
+
timestamps=trajectory.timestamps,
|
| 416 |
+
)
|
| 417 |
+
except Exception as exc: # noqa: BLE001
|
| 418 |
+
_log.debug("predict simulate failed: %s", exc)
|
| 419 |
+
return []
|
| 420 |
+
|
| 421 |
+
samples: list[dict[str, float]] = []
|
| 422 |
+
for i, t in enumerate(trajectory.timestamps):
|
| 423 |
+
sample: dict[str, float] = {"t": round(float(t), 5)}
|
| 424 |
+
for var in system.state_variables:
|
| 425 |
+
value = predicted[var][i]
|
| 426 |
+
if not np.isfinite(value):
|
| 427 |
+
return []
|
| 428 |
+
sample[var] = round(float(value), 5)
|
| 429 |
+
samples.append(sample)
|
| 430 |
+
return samples
|
physix/systems/__init__.py
ADDED
|
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from physix.systems.base import PhysicalSystem, SystemTier
|
| 2 |
+
from physix.systems.registry import (
|
| 3 |
+
SUPPORTED_SYSTEMS,
|
| 4 |
+
SYSTEM_REGISTRY,
|
| 5 |
+
get_system,
|
| 6 |
+
list_supported_systems,
|
| 7 |
+
list_systems,
|
| 8 |
+
list_systems_by_tier,
|
| 9 |
+
)
|
| 10 |
+
|
| 11 |
+
__all__ = [
|
| 12 |
+
"PhysicalSystem",
|
| 13 |
+
"SystemTier",
|
| 14 |
+
"SYSTEM_REGISTRY",
|
| 15 |
+
"SUPPORTED_SYSTEMS",
|
| 16 |
+
"get_system",
|
| 17 |
+
"list_systems",
|
| 18 |
+
"list_systems_by_tier",
|
| 19 |
+
"list_supported_systems",
|
| 20 |
+
]
|
physix/systems/base.py
ADDED
|
@@ -0,0 +1,192 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Abstract base class and shared types for physical systems.
|
| 2 |
+
|
| 3 |
+
A physical system is responsible for three things and three things only:
|
| 4 |
+
|
| 5 |
+
1. Exposing **stable metadata** (id, tier, state-variable names, NL hint).
|
| 6 |
+
2. Generating a **noisy trajectory** for one episode given an RNG seed.
|
| 7 |
+
3. Reporting its **ground-truth equation** as a canonical SymPy expression
|
| 8 |
+
for logging and verification.
|
| 9 |
+
|
| 10 |
+
Reward computation, simulation of the *agent's* hypotheses, residual analysis,
|
| 11 |
+
and natural-language mismatch summarisation all live in :mod:`physix.verifier`.
|
| 12 |
+
Systems do not import anything from the verifier; the dependency runs in one
|
| 13 |
+
direction.
|
| 14 |
+
"""
|
| 15 |
+
|
| 16 |
+
from __future__ import annotations
|
| 17 |
+
|
| 18 |
+
from abc import ABC, ABCMeta, abstractmethod
|
| 19 |
+
from enum import Enum
|
| 20 |
+
|
| 21 |
+
import numpy as np
|
| 22 |
+
from pydantic import BaseModel, ConfigDict, Field
|
| 23 |
+
from pydantic._internal._model_construction import ModelMetaclass
|
| 24 |
+
from scipy.integrate import odeint
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
class SystemTier(str, Enum):
|
| 28 |
+
"""Curriculum tier. Tier 3 is held out of training to enable a
|
| 29 |
+
generalisation claim ("converges on systems it never trained on")."""
|
| 30 |
+
|
| 31 |
+
TIER_1 = "tier1"
|
| 32 |
+
TIER_2 = "tier2"
|
| 33 |
+
TIER_3 = "tier3"
|
| 34 |
+
|
| 35 |
+
|
| 36 |
+
class TrajectoryData(BaseModel):
|
| 37 |
+
"""Numerical trajectory plus its initial conditions.
|
| 38 |
+
|
| 39 |
+
``initial_conditions`` is included so the verifier can re-simulate the
|
| 40 |
+
agent's hypothesis from the *same* starting point, making residuals
|
| 41 |
+
directly comparable.
|
| 42 |
+
"""
|
| 43 |
+
|
| 44 |
+
model_config = ConfigDict(frozen=True, arbitrary_types_allowed=True)
|
| 45 |
+
|
| 46 |
+
timestamps: np.ndarray
|
| 47 |
+
states: dict[str, np.ndarray]
|
| 48 |
+
initial_conditions: dict[str, float]
|
| 49 |
+
state_variables: tuple[str, ...]
|
| 50 |
+
|
| 51 |
+
def to_observation_samples(self, decimals: int = 5) -> list[dict[str, float]]:
|
| 52 |
+
"""Render as a JSON-friendly list of timestep dicts for the agent."""
|
| 53 |
+
samples: list[dict[str, float]] = []
|
| 54 |
+
for i, t in enumerate(self.timestamps):
|
| 55 |
+
sample: dict[str, float] = {"t": round(float(t), decimals)}
|
| 56 |
+
for var in self.state_variables:
|
| 57 |
+
sample[var] = round(float(self.states[var][i]), decimals)
|
| 58 |
+
samples.append(sample)
|
| 59 |
+
return samples
|
| 60 |
+
|
| 61 |
+
def stats(self) -> dict[str, float]:
|
| 62 |
+
"""Aggregate statistics for the agent's stats panel."""
|
| 63 |
+
out: dict[str, float] = {
|
| 64 |
+
"duration": float(self.timestamps[-1] - self.timestamps[0]),
|
| 65 |
+
"n_timesteps": float(len(self.timestamps)),
|
| 66 |
+
"dt": float(self.timestamps[1] - self.timestamps[0]) if len(self.timestamps) > 1 else 0.0,
|
| 67 |
+
}
|
| 68 |
+
for var in self.state_variables:
|
| 69 |
+
arr = self.states[var]
|
| 70 |
+
out[f"{var}_min"] = float(np.min(arr))
|
| 71 |
+
out[f"{var}_max"] = float(np.max(arr))
|
| 72 |
+
out[f"{var}_std"] = float(np.std(arr))
|
| 73 |
+
return out
|
| 74 |
+
|
| 75 |
+
|
| 76 |
+
class _AbstractModelMeta(ModelMetaclass, ABCMeta):
|
| 77 |
+
"""Metaclass union so :class:`PhysicalSystem` is both a pydantic model and
|
| 78 |
+
a true ABC (i.e. instantiating the base or a subclass that fails to
|
| 79 |
+
implement an abstract method raises ``TypeError``)."""
|
| 80 |
+
|
| 81 |
+
|
| 82 |
+
class PhysicalSystem(BaseModel, ABC, metaclass=_AbstractModelMeta):
|
| 83 |
+
"""Abstract physical system.
|
| 84 |
+
|
| 85 |
+
Concrete subclasses must:
|
| 86 |
+
|
| 87 |
+
- Override :attr:`system_id`, :attr:`tier`, :attr:`state_variables`, and
|
| 88 |
+
:attr:`hint_template`.
|
| 89 |
+
- Implement :meth:`sample_parameters` to draw random episode parameters.
|
| 90 |
+
- Implement :meth:`sample_initial_conditions` to draw random initial state.
|
| 91 |
+
- Implement :meth:`rhs` returning the time derivatives.
|
| 92 |
+
- Implement :meth:`ground_truth_equation` returning a canonical SymPy
|
| 93 |
+
string of the system's equation of motion.
|
| 94 |
+
|
| 95 |
+
The base class implements :meth:`simulate` once for all subclasses by
|
| 96 |
+
delegating to ``scipy.integrate.odeint`` against :meth:`rhs`.
|
| 97 |
+
"""
|
| 98 |
+
|
| 99 |
+
model_config = ConfigDict(arbitrary_types_allowed=True)
|
| 100 |
+
|
| 101 |
+
system_id: str = ""
|
| 102 |
+
tier: SystemTier = SystemTier.TIER_1
|
| 103 |
+
state_variables: tuple[str, ...] = ()
|
| 104 |
+
hint_template: str = ""
|
| 105 |
+
|
| 106 |
+
duration: float = 10.0
|
| 107 |
+
n_timesteps: int = 100
|
| 108 |
+
#: Gaussian noise applied to each observed sample, expressed as a fraction
|
| 109 |
+
#: of the per-variable *standard deviation* of the clean trajectory. Using
|
| 110 |
+
#: std (rather than range) avoids the pathology where a system with a
|
| 111 |
+
#: large total excursion (e.g. free fall) produces overwhelming noise.
|
| 112 |
+
noise_std: float = 0.02
|
| 113 |
+
|
| 114 |
+
parameters: dict[str, float] = Field(default_factory=dict)
|
| 115 |
+
initial_conditions: dict[str, float] = Field(default_factory=dict)
|
| 116 |
+
|
| 117 |
+
# ------------------------------------------------------------------ API
|
| 118 |
+
|
| 119 |
+
@abstractmethod
|
| 120 |
+
def sample_parameters(self, rng: np.random.Generator) -> dict[str, float]:
|
| 121 |
+
"""Draw a fresh set of physical parameters for one episode."""
|
| 122 |
+
|
| 123 |
+
@abstractmethod
|
| 124 |
+
def sample_initial_conditions(self, rng: np.random.Generator) -> dict[str, float]:
|
| 125 |
+
"""Draw fresh initial conditions for one episode."""
|
| 126 |
+
|
| 127 |
+
@abstractmethod
|
| 128 |
+
def rhs(
|
| 129 |
+
self,
|
| 130 |
+
t: float,
|
| 131 |
+
state: np.ndarray,
|
| 132 |
+
params: dict[str, float],
|
| 133 |
+
) -> np.ndarray:
|
| 134 |
+
"""Time derivatives at time ``t``.
|
| 135 |
+
|
| 136 |
+
``state`` is laid out in the order of :attr:`state_variables`. The
|
| 137 |
+
return value must be a 1-D array of the same length.
|
| 138 |
+
"""
|
| 139 |
+
|
| 140 |
+
@abstractmethod
|
| 141 |
+
def ground_truth_equation(self) -> str:
|
| 142 |
+
"""Canonical SymPy-grammar string of the equation of motion.
|
| 143 |
+
|
| 144 |
+
Used for logging only. Never surfaced to the agent during training
|
| 145 |
+
or inference.
|
| 146 |
+
"""
|
| 147 |
+
|
| 148 |
+
def hint(self, parameters: dict[str, float]) -> str:
|
| 149 |
+
"""Render the natural-language hint for the agent.
|
| 150 |
+
|
| 151 |
+
Default behaviour formats :attr:`hint_template` against
|
| 152 |
+
``parameters``. Subclasses may override for more flexibility.
|
| 153 |
+
"""
|
| 154 |
+
try:
|
| 155 |
+
return self.hint_template.format(**parameters)
|
| 156 |
+
except (KeyError, IndexError):
|
| 157 |
+
return self.hint_template
|
| 158 |
+
|
| 159 |
+
# -------------------------------------------------------------- behaviour
|
| 160 |
+
|
| 161 |
+
def simulate(self, rng: np.random.Generator) -> TrajectoryData:
|
| 162 |
+
"""Generate one noisy trajectory for an episode."""
|
| 163 |
+
params = self.sample_parameters(rng)
|
| 164 |
+
ic = self.sample_initial_conditions(rng)
|
| 165 |
+
|
| 166 |
+
timestamps = np.linspace(0.0, self.duration, self.n_timesteps)
|
| 167 |
+
initial_state = np.array([ic[var] for var in self.state_variables], dtype=float)
|
| 168 |
+
|
| 169 |
+
# scipy.integrate.odeint expects f(state, t, *args); we wrap to put
|
| 170 |
+
# `t` second and pass params via closure.
|
| 171 |
+
def _rhs_wrapper(state: np.ndarray, t: float) -> np.ndarray:
|
| 172 |
+
return self.rhs(t, state, params)
|
| 173 |
+
|
| 174 |
+
clean = odeint(_rhs_wrapper, initial_state, timestamps, full_output=False)
|
| 175 |
+
|
| 176 |
+
states: dict[str, np.ndarray] = {}
|
| 177 |
+
for col, var in enumerate(self.state_variables):
|
| 178 |
+
clean_col = clean[:, col]
|
| 179 |
+
scale = max(float(np.std(clean_col)), 1e-6)
|
| 180 |
+
noise = rng.normal(0.0, self.noise_std * scale, size=clean_col.shape)
|
| 181 |
+
states[var] = clean_col + noise
|
| 182 |
+
|
| 183 |
+
# Cache for downstream access (parameters surfaced to the env).
|
| 184 |
+
self.parameters = params
|
| 185 |
+
self.initial_conditions = ic
|
| 186 |
+
|
| 187 |
+
return TrajectoryData(
|
| 188 |
+
timestamps=timestamps,
|
| 189 |
+
states=states,
|
| 190 |
+
initial_conditions=ic,
|
| 191 |
+
state_variables=self.state_variables,
|
| 192 |
+
)
|
physix/systems/registry.py
ADDED
|
@@ -0,0 +1,48 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from __future__ import annotations
|
| 2 |
+
|
| 3 |
+
from typing import Callable
|
| 4 |
+
|
| 5 |
+
from physix.systems.base import PhysicalSystem, SystemTier
|
| 6 |
+
from physix.systems.tier1 import FreeFall, FreeFallWithDrag, SimplePendulum
|
| 7 |
+
from physix.systems.tier2 import DampedPendulum, DampedSpring, SpringMass
|
| 8 |
+
from physix.systems.tier3 import ChargedInBField, ProjectileWithDrag
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
SystemFactory = Callable[[], PhysicalSystem]
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
SYSTEM_REGISTRY: dict[str, SystemFactory] = {
|
| 15 |
+
"free_fall": FreeFall,
|
| 16 |
+
"free_fall_drag": FreeFallWithDrag,
|
| 17 |
+
"simple_pendulum": SimplePendulum,
|
| 18 |
+
"damped_pendulum": DampedPendulum,
|
| 19 |
+
"spring_mass": SpringMass,
|
| 20 |
+
"damped_spring": DampedSpring,
|
| 21 |
+
"projectile_drag": ProjectileWithDrag,
|
| 22 |
+
"charged_b_field": ChargedInBField,
|
| 23 |
+
}
|
| 24 |
+
|
| 25 |
+
SUPPORTED_SYSTEMS: tuple[str, ...] = (
|
| 26 |
+
"free_fall",
|
| 27 |
+
"damped_spring",
|
| 28 |
+
"simple_pendulum",
|
| 29 |
+
)
|
| 30 |
+
|
| 31 |
+
|
| 32 |
+
def get_system(system_id: str) -> PhysicalSystem:
|
| 33 |
+
if system_id not in SYSTEM_REGISTRY:
|
| 34 |
+
valid = ", ".join(sorted(SYSTEM_REGISTRY))
|
| 35 |
+
raise KeyError(f"Unknown system_id={system_id!r}. Valid: {valid}")
|
| 36 |
+
return SYSTEM_REGISTRY[system_id]()
|
| 37 |
+
|
| 38 |
+
|
| 39 |
+
def list_systems() -> list[str]:
|
| 40 |
+
return list(SYSTEM_REGISTRY)
|
| 41 |
+
|
| 42 |
+
|
| 43 |
+
def list_systems_by_tier(tier: SystemTier) -> list[str]:
|
| 44 |
+
return [sid for sid, cls in SYSTEM_REGISTRY.items() if cls().tier == tier]
|
| 45 |
+
|
| 46 |
+
|
| 47 |
+
def list_supported_systems() -> list[str]:
|
| 48 |
+
return [sid for sid in SUPPORTED_SYSTEMS if sid in SYSTEM_REGISTRY]
|
physix/systems/tier1.py
ADDED
|
@@ -0,0 +1,143 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Tier-1 physical systems: simple, single-variable, no damping."""
|
| 2 |
+
|
| 3 |
+
from __future__ import annotations
|
| 4 |
+
|
| 5 |
+
import numpy as np
|
| 6 |
+
|
| 7 |
+
from physix.systems.base import PhysicalSystem, SystemTier
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
class FreeFall(PhysicalSystem):
|
| 11 |
+
"""Simple free fall under constant gravity.
|
| 12 |
+
|
| 13 |
+
Equation of motion: ``d2y/dt2 = -g``.
|
| 14 |
+
State variables: ``y`` (vertical position), ``vy`` (vertical velocity).
|
| 15 |
+
"""
|
| 16 |
+
|
| 17 |
+
system_id: str = "free_fall"
|
| 18 |
+
tier: SystemTier = SystemTier.TIER_1
|
| 19 |
+
state_variables: tuple[str, ...] = ("y", "vy")
|
| 20 |
+
duration: float = 3.0 # short enough that the object does not pass y=0
|
| 21 |
+
hint_template: str = (
|
| 22 |
+
"Object dropped near Earth's surface in vacuum. "
|
| 23 |
+
"Mass {mass:.1f} kg, released from rest at altitude {y0:.1f} m."
|
| 24 |
+
)
|
| 25 |
+
|
| 26 |
+
def sample_parameters(self, rng: np.random.Generator) -> dict[str, float]:
|
| 27 |
+
return {
|
| 28 |
+
"g": 9.81,
|
| 29 |
+
"mass": float(rng.uniform(0.5, 5.0)),
|
| 30 |
+
}
|
| 31 |
+
|
| 32 |
+
def sample_initial_conditions(self, rng: np.random.Generator) -> dict[str, float]:
|
| 33 |
+
return {
|
| 34 |
+
"y": float(rng.uniform(20.0, 60.0)),
|
| 35 |
+
"vy": 0.0,
|
| 36 |
+
}
|
| 37 |
+
|
| 38 |
+
def rhs(
|
| 39 |
+
self,
|
| 40 |
+
t: float,
|
| 41 |
+
state: np.ndarray,
|
| 42 |
+
params: dict[str, float],
|
| 43 |
+
) -> np.ndarray:
|
| 44 |
+
_y, vy = state
|
| 45 |
+
return np.array([vy, -params["g"]], dtype=float)
|
| 46 |
+
|
| 47 |
+
def ground_truth_equation(self) -> str:
|
| 48 |
+
return "d2y/dt2 = -g"
|
| 49 |
+
|
| 50 |
+
def hint(self, parameters: dict[str, float]) -> str:
|
| 51 |
+
ic = self.initial_conditions or {"y": 30.0}
|
| 52 |
+
return self.hint_template.format(mass=parameters["mass"], y0=ic["y"])
|
| 53 |
+
|
| 54 |
+
|
| 55 |
+
class FreeFallWithDrag(PhysicalSystem):
|
| 56 |
+
"""Free fall with quadratic air drag — the demo system.
|
| 57 |
+
|
| 58 |
+
Equation of motion: ``d2y/dt2 = -g + k * vy**2`` (drag opposes motion;
|
| 59 |
+
when ``vy < 0`` the ``vy**2`` term provides a positive deceleration).
|
| 60 |
+
"""
|
| 61 |
+
|
| 62 |
+
system_id: str = "free_fall_drag"
|
| 63 |
+
tier: SystemTier = SystemTier.TIER_1
|
| 64 |
+
state_variables: tuple[str, ...] = ("y", "vy")
|
| 65 |
+
duration: float = 6.0 # long enough to clearly see terminal-velocity onset
|
| 66 |
+
hint_template: str = (
|
| 67 |
+
"Object dropped from altitude {y0:.1f} m, mass {mass:.1f} kg, "
|
| 68 |
+
"in air. Air resistance may be non-negligible."
|
| 69 |
+
)
|
| 70 |
+
|
| 71 |
+
def sample_parameters(self, rng: np.random.Generator) -> dict[str, float]:
|
| 72 |
+
return {
|
| 73 |
+
"g": 9.81,
|
| 74 |
+
"mass": float(rng.uniform(1.0, 3.0)),
|
| 75 |
+
# Drag coefficient tuned so terminal velocity is reached within ~5s
|
| 76 |
+
# for the altitudes we sample.
|
| 77 |
+
"k": float(rng.uniform(0.02, 0.10)),
|
| 78 |
+
}
|
| 79 |
+
|
| 80 |
+
def sample_initial_conditions(self, rng: np.random.Generator) -> dict[str, float]:
|
| 81 |
+
return {
|
| 82 |
+
"y": float(rng.uniform(40.0, 80.0)),
|
| 83 |
+
"vy": 0.0,
|
| 84 |
+
}
|
| 85 |
+
|
| 86 |
+
def rhs(
|
| 87 |
+
self,
|
| 88 |
+
t: float,
|
| 89 |
+
state: np.ndarray,
|
| 90 |
+
params: dict[str, float],
|
| 91 |
+
) -> np.ndarray:
|
| 92 |
+
_y, vy = state
|
| 93 |
+
# vy is negative on descent; vy**2 keeps the magnitude correct.
|
| 94 |
+
return np.array([vy, -params["g"] + params["k"] * vy * vy], dtype=float)
|
| 95 |
+
|
| 96 |
+
def ground_truth_equation(self) -> str:
|
| 97 |
+
return "d2y/dt2 = -g + k * vy**2"
|
| 98 |
+
|
| 99 |
+
def hint(self, parameters: dict[str, float]) -> str:
|
| 100 |
+
ic = self.initial_conditions or {"y": 50.0}
|
| 101 |
+
return self.hint_template.format(mass=parameters["mass"], y0=ic["y"])
|
| 102 |
+
|
| 103 |
+
|
| 104 |
+
class SimplePendulum(PhysicalSystem):
|
| 105 |
+
"""Idealised pendulum (small or large angle), no damping.
|
| 106 |
+
|
| 107 |
+
Equation of motion: ``d2theta/dt2 = -(g / L) * sin(theta)``.
|
| 108 |
+
"""
|
| 109 |
+
|
| 110 |
+
system_id: str = "simple_pendulum"
|
| 111 |
+
tier: SystemTier = SystemTier.TIER_1
|
| 112 |
+
state_variables: tuple[str, ...] = ("theta", "dtheta")
|
| 113 |
+
hint_template: str = (
|
| 114 |
+
"Simple pendulum of length {L:.2f} m swinging in vacuum. "
|
| 115 |
+
"No friction, no air resistance."
|
| 116 |
+
)
|
| 117 |
+
|
| 118 |
+
def sample_parameters(self, rng: np.random.Generator) -> dict[str, float]:
|
| 119 |
+
return {
|
| 120 |
+
"g": 9.81,
|
| 121 |
+
"L": float(rng.uniform(0.5, 2.0)),
|
| 122 |
+
}
|
| 123 |
+
|
| 124 |
+
def sample_initial_conditions(self, rng: np.random.Generator) -> dict[str, float]:
|
| 125 |
+
return {
|
| 126 |
+
"theta": float(rng.uniform(0.3, 1.0)), # ~17-57 degrees
|
| 127 |
+
"dtheta": 0.0,
|
| 128 |
+
}
|
| 129 |
+
|
| 130 |
+
def rhs(
|
| 131 |
+
self,
|
| 132 |
+
t: float,
|
| 133 |
+
state: np.ndarray,
|
| 134 |
+
params: dict[str, float],
|
| 135 |
+
) -> np.ndarray:
|
| 136 |
+
theta, dtheta = state
|
| 137 |
+
return np.array(
|
| 138 |
+
[dtheta, -(params["g"] / params["L"]) * np.sin(theta)],
|
| 139 |
+
dtype=float,
|
| 140 |
+
)
|
| 141 |
+
|
| 142 |
+
def ground_truth_equation(self) -> str:
|
| 143 |
+
return "d2theta/dt2 = -(g / L) * sin(theta)"
|
physix/systems/tier2.py
ADDED
|
@@ -0,0 +1,128 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Tier-2 physical systems: damped or with a second active force term."""
|
| 2 |
+
|
| 3 |
+
from __future__ import annotations
|
| 4 |
+
|
| 5 |
+
import numpy as np
|
| 6 |
+
|
| 7 |
+
from physix.systems.base import PhysicalSystem, SystemTier
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
class DampedPendulum(PhysicalSystem):
|
| 11 |
+
"""Pendulum with linear angular damping.
|
| 12 |
+
|
| 13 |
+
Equation of motion: ``d2theta/dt2 = -(g/L)*sin(theta) - b*dtheta``.
|
| 14 |
+
"""
|
| 15 |
+
|
| 16 |
+
system_id: str = "damped_pendulum"
|
| 17 |
+
tier: SystemTier = SystemTier.TIER_2
|
| 18 |
+
state_variables: tuple[str, ...] = ("theta", "dtheta")
|
| 19 |
+
hint_template: str = (
|
| 20 |
+
"Pendulum of length {L:.2f} m. Oscillation amplitude visibly decreases "
|
| 21 |
+
"over time, suggesting linear angular damping."
|
| 22 |
+
)
|
| 23 |
+
|
| 24 |
+
def sample_parameters(self, rng: np.random.Generator) -> dict[str, float]:
|
| 25 |
+
return {
|
| 26 |
+
"g": 9.81,
|
| 27 |
+
"L": float(rng.uniform(0.5, 2.0)),
|
| 28 |
+
"b": float(rng.uniform(0.05, 0.30)),
|
| 29 |
+
}
|
| 30 |
+
|
| 31 |
+
def sample_initial_conditions(self, rng: np.random.Generator) -> dict[str, float]:
|
| 32 |
+
return {
|
| 33 |
+
"theta": float(rng.uniform(0.3, 1.0)),
|
| 34 |
+
"dtheta": 0.0,
|
| 35 |
+
}
|
| 36 |
+
|
| 37 |
+
def rhs(
|
| 38 |
+
self,
|
| 39 |
+
t: float,
|
| 40 |
+
state: np.ndarray,
|
| 41 |
+
params: dict[str, float],
|
| 42 |
+
) -> np.ndarray:
|
| 43 |
+
theta, dtheta = state
|
| 44 |
+
d2theta = -(params["g"] / params["L"]) * np.sin(theta) - params["b"] * dtheta
|
| 45 |
+
return np.array([dtheta, d2theta], dtype=float)
|
| 46 |
+
|
| 47 |
+
def ground_truth_equation(self) -> str:
|
| 48 |
+
return "d2theta/dt2 = -(g/L)*sin(theta) - b*dtheta"
|
| 49 |
+
|
| 50 |
+
|
| 51 |
+
class SpringMass(PhysicalSystem):
|
| 52 |
+
"""Undamped harmonic oscillator.
|
| 53 |
+
|
| 54 |
+
Equation of motion: ``d2x/dt2 = -(k/m) * x``.
|
| 55 |
+
"""
|
| 56 |
+
|
| 57 |
+
system_id: str = "spring_mass"
|
| 58 |
+
tier: SystemTier = SystemTier.TIER_2
|
| 59 |
+
state_variables: tuple[str, ...] = ("x", "vx")
|
| 60 |
+
hint_template: str = (
|
| 61 |
+
"Mass {m:.2f} kg attached to a spring of stiffness {k:.2f} N/m, "
|
| 62 |
+
"frictionless surface."
|
| 63 |
+
)
|
| 64 |
+
|
| 65 |
+
def sample_parameters(self, rng: np.random.Generator) -> dict[str, float]:
|
| 66 |
+
return {
|
| 67 |
+
"k": float(rng.uniform(2.0, 20.0)),
|
| 68 |
+
"m": float(rng.uniform(0.5, 2.0)),
|
| 69 |
+
}
|
| 70 |
+
|
| 71 |
+
def sample_initial_conditions(self, rng: np.random.Generator) -> dict[str, float]:
|
| 72 |
+
return {
|
| 73 |
+
"x": float(rng.uniform(0.5, 2.0)),
|
| 74 |
+
"vx": 0.0,
|
| 75 |
+
}
|
| 76 |
+
|
| 77 |
+
def rhs(
|
| 78 |
+
self,
|
| 79 |
+
t: float,
|
| 80 |
+
state: np.ndarray,
|
| 81 |
+
params: dict[str, float],
|
| 82 |
+
) -> np.ndarray:
|
| 83 |
+
x, vx = state
|
| 84 |
+
return np.array([vx, -(params["k"] / params["m"]) * x], dtype=float)
|
| 85 |
+
|
| 86 |
+
def ground_truth_equation(self) -> str:
|
| 87 |
+
return "d2x/dt2 = -(k/m) * x"
|
| 88 |
+
|
| 89 |
+
|
| 90 |
+
class DampedSpring(PhysicalSystem):
|
| 91 |
+
"""Damped harmonic oscillator.
|
| 92 |
+
|
| 93 |
+
Equation of motion: ``d2x/dt2 = -(k/m)*x - (c/m)*vx``.
|
| 94 |
+
"""
|
| 95 |
+
|
| 96 |
+
system_id: str = "damped_spring"
|
| 97 |
+
tier: SystemTier = SystemTier.TIER_2
|
| 98 |
+
state_variables: tuple[str, ...] = ("x", "vx")
|
| 99 |
+
hint_template: str = (
|
| 100 |
+
"Mass {m:.2f} kg on a spring of stiffness {k:.2f} N/m with viscous "
|
| 101 |
+
"damping coefficient {c:.2f}. Oscillation amplitude decays over time."
|
| 102 |
+
)
|
| 103 |
+
|
| 104 |
+
def sample_parameters(self, rng: np.random.Generator) -> dict[str, float]:
|
| 105 |
+
return {
|
| 106 |
+
"k": float(rng.uniform(2.0, 20.0)),
|
| 107 |
+
"m": float(rng.uniform(0.5, 2.0)),
|
| 108 |
+
"c": float(rng.uniform(0.1, 1.0)),
|
| 109 |
+
}
|
| 110 |
+
|
| 111 |
+
def sample_initial_conditions(self, rng: np.random.Generator) -> dict[str, float]:
|
| 112 |
+
return {
|
| 113 |
+
"x": float(rng.uniform(0.5, 2.0)),
|
| 114 |
+
"vx": 0.0,
|
| 115 |
+
}
|
| 116 |
+
|
| 117 |
+
def rhs(
|
| 118 |
+
self,
|
| 119 |
+
t: float,
|
| 120 |
+
state: np.ndarray,
|
| 121 |
+
params: dict[str, float],
|
| 122 |
+
) -> np.ndarray:
|
| 123 |
+
x, vx = state
|
| 124 |
+
d2x = -(params["k"] / params["m"]) * x - (params["c"] / params["m"]) * vx
|
| 125 |
+
return np.array([vx, d2x], dtype=float)
|
| 126 |
+
|
| 127 |
+
def ground_truth_equation(self) -> str:
|
| 128 |
+
return "d2x/dt2 = -(k/m)*x - (c/m)*vx"
|
physix/systems/tier3.py
ADDED
|
@@ -0,0 +1,132 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Tier-3 physical systems: held out of training to support a generalisation
|
| 2 |
+
claim ("converges on systems it never trained on")."""
|
| 3 |
+
|
| 4 |
+
from __future__ import annotations
|
| 5 |
+
|
| 6 |
+
import numpy as np
|
| 7 |
+
|
| 8 |
+
from physix.systems.base import PhysicalSystem, SystemTier
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
class ProjectileWithDrag(PhysicalSystem):
|
| 12 |
+
"""2-D projectile with quadratic air drag.
|
| 13 |
+
|
| 14 |
+
Equations of motion::
|
| 15 |
+
|
| 16 |
+
d2x/dt2 = -k * |v| * vx
|
| 17 |
+
d2y/dt2 = -g - k * |v| * vy
|
| 18 |
+
|
| 19 |
+
where ``|v| = sqrt(vx**2 + vy**2)``.
|
| 20 |
+
"""
|
| 21 |
+
|
| 22 |
+
system_id: str = "projectile_drag"
|
| 23 |
+
tier: SystemTier = SystemTier.TIER_3
|
| 24 |
+
state_variables: tuple[str, ...] = ("x", "y", "vx", "vy")
|
| 25 |
+
duration: float = 5.0 # typical flight time for the parameter ranges below
|
| 26 |
+
hint_template: str = (
|
| 27 |
+
"Projectile launched at angle {angle_deg:.0f} degrees with initial "
|
| 28 |
+
"speed {v0:.1f} m/s. Air drag is non-negligible."
|
| 29 |
+
)
|
| 30 |
+
|
| 31 |
+
def sample_parameters(self, rng: np.random.Generator) -> dict[str, float]:
|
| 32 |
+
return {
|
| 33 |
+
"g": 9.81,
|
| 34 |
+
"k": float(rng.uniform(0.005, 0.02)),
|
| 35 |
+
}
|
| 36 |
+
|
| 37 |
+
def sample_initial_conditions(self, rng: np.random.Generator) -> dict[str, float]:
|
| 38 |
+
speed = float(rng.uniform(15.0, 30.0))
|
| 39 |
+
angle = float(rng.uniform(np.deg2rad(30.0), np.deg2rad(70.0)))
|
| 40 |
+
return {
|
| 41 |
+
"x": 0.0,
|
| 42 |
+
"y": 0.0,
|
| 43 |
+
"vx": float(speed * np.cos(angle)),
|
| 44 |
+
"vy": float(speed * np.sin(angle)),
|
| 45 |
+
}
|
| 46 |
+
|
| 47 |
+
def rhs(
|
| 48 |
+
self,
|
| 49 |
+
t: float,
|
| 50 |
+
state: np.ndarray,
|
| 51 |
+
params: dict[str, float],
|
| 52 |
+
) -> np.ndarray:
|
| 53 |
+
_x, _y, vx, vy = state
|
| 54 |
+
speed = float(np.sqrt(vx * vx + vy * vy))
|
| 55 |
+
return np.array(
|
| 56 |
+
[
|
| 57 |
+
vx,
|
| 58 |
+
vy,
|
| 59 |
+
-params["k"] * speed * vx,
|
| 60 |
+
-params["g"] - params["k"] * speed * vy,
|
| 61 |
+
],
|
| 62 |
+
dtype=float,
|
| 63 |
+
)
|
| 64 |
+
|
| 65 |
+
def ground_truth_equation(self) -> str:
|
| 66 |
+
# Two-equation system rendered in a single string so the parser can
|
| 67 |
+
# split on ';' or '\n'. Verifier handles both delimiters.
|
| 68 |
+
return (
|
| 69 |
+
"d2x/dt2 = -k*sqrt(vx**2 + vy**2)*vx; "
|
| 70 |
+
"d2y/dt2 = -g - k*sqrt(vx**2 + vy**2)*vy"
|
| 71 |
+
)
|
| 72 |
+
|
| 73 |
+
def hint(self, parameters: dict[str, float]) -> str:
|
| 74 |
+
ic = self.initial_conditions
|
| 75 |
+
if not ic:
|
| 76 |
+
return self.hint_template
|
| 77 |
+
v0 = float(np.sqrt(ic["vx"] ** 2 + ic["vy"] ** 2))
|
| 78 |
+
angle_deg = float(np.rad2deg(np.arctan2(ic["vy"], ic["vx"])))
|
| 79 |
+
return self.hint_template.format(angle_deg=angle_deg, v0=v0)
|
| 80 |
+
|
| 81 |
+
|
| 82 |
+
class ChargedInBField(PhysicalSystem):
|
| 83 |
+
"""Charged particle in a uniform magnetic field along z (circular motion).
|
| 84 |
+
|
| 85 |
+
Equations of motion (assuming B = B_z * ẑ and v in xy-plane)::
|
| 86 |
+
|
| 87 |
+
d2x/dt2 = (q*B/m) * vy
|
| 88 |
+
d2y/dt2 = -(q*B/m) * vx
|
| 89 |
+
"""
|
| 90 |
+
|
| 91 |
+
system_id: str = "charged_b_field"
|
| 92 |
+
tier: SystemTier = SystemTier.TIER_3
|
| 93 |
+
state_variables: tuple[str, ...] = ("x", "y", "vx", "vy")
|
| 94 |
+
hint_template: str = (
|
| 95 |
+
"Charged particle in a uniform magnetic field. Charge-to-mass ratio "
|
| 96 |
+
"q/m = {qm:.2f}, field strength {B:.2f} T."
|
| 97 |
+
)
|
| 98 |
+
|
| 99 |
+
def sample_parameters(self, rng: np.random.Generator) -> dict[str, float]:
|
| 100 |
+
return {
|
| 101 |
+
"q": float(rng.choice([-1.0, 1.0])),
|
| 102 |
+
"m": float(rng.uniform(0.5, 2.0)),
|
| 103 |
+
"B": float(rng.uniform(0.5, 2.0)),
|
| 104 |
+
}
|
| 105 |
+
|
| 106 |
+
def sample_initial_conditions(self, rng: np.random.Generator) -> dict[str, float]:
|
| 107 |
+
return {
|
| 108 |
+
"x": 0.0,
|
| 109 |
+
"y": 0.0,
|
| 110 |
+
"vx": float(rng.uniform(0.5, 2.0)),
|
| 111 |
+
"vy": float(rng.uniform(-2.0, 2.0)),
|
| 112 |
+
}
|
| 113 |
+
|
| 114 |
+
def rhs(
|
| 115 |
+
self,
|
| 116 |
+
t: float,
|
| 117 |
+
state: np.ndarray,
|
| 118 |
+
params: dict[str, float],
|
| 119 |
+
) -> np.ndarray:
|
| 120 |
+
_x, _y, vx, vy = state
|
| 121 |
+
omega = params["q"] * params["B"] / params["m"]
|
| 122 |
+
return np.array([vx, vy, omega * vy, -omega * vx], dtype=float)
|
| 123 |
+
|
| 124 |
+
def ground_truth_equation(self) -> str:
|
| 125 |
+
return (
|
| 126 |
+
"d2x/dt2 = (q*B/m)*vy; "
|
| 127 |
+
"d2y/dt2 = -(q*B/m)*vx"
|
| 128 |
+
)
|
| 129 |
+
|
| 130 |
+
def hint(self, parameters: dict[str, float]) -> str:
|
| 131 |
+
qm = parameters["q"] / parameters["m"]
|
| 132 |
+
return self.hint_template.format(qm=qm, B=parameters["B"])
|
physix/training/__init__.py
ADDED
|
@@ -0,0 +1,18 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Training utilities for PhysiX-Live.
|
| 2 |
+
|
| 3 |
+
The ``loop`` submodule pulls in heavy ML deps (torch, unsloth, trl) and is
|
| 4 |
+
imported lazily on demand. The lighter prompt + scorer surface is exposed
|
| 5 |
+
here so callers without CUDA can still build datasets and score completions.
|
| 6 |
+
"""
|
| 7 |
+
|
| 8 |
+
from physix.training.prompt import (
|
| 9 |
+
build_prompt,
|
| 10 |
+
parse_completion,
|
| 11 |
+
render_observation_for_prompt,
|
| 12 |
+
)
|
| 13 |
+
|
| 14 |
+
__all__ = [
|
| 15 |
+
"build_prompt",
|
| 16 |
+
"parse_completion",
|
| 17 |
+
"render_observation_for_prompt",
|
| 18 |
+
]
|
physix/training/dataset.py
ADDED
|
@@ -0,0 +1,153 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Build the prompt dataset for GRPO training.
|
| 2 |
+
|
| 3 |
+
Responsibility: enumerate the curriculum of physical systems, simulate each
|
| 4 |
+
one a configurable number of times, and emit a :class:`datasets.Dataset`
|
| 5 |
+
whose rows contain everything the training loop needs:
|
| 6 |
+
|
| 7 |
+
- ``prompt``: the chat-format string passed to the model
|
| 8 |
+
- ``system_id``, ``state_variables``, ``parameters``, ``initial_conditions``,
|
| 9 |
+
``timestamps``, ``observed``: the system context the scorer needs
|
| 10 |
+
- ``previous_total``: 0.0 at turn-0 (we train on first-turn prompts; the
|
| 11 |
+
iterative refinement skill emerges from the model's general ability to
|
| 12 |
+
read history at inference time)
|
| 13 |
+
|
| 14 |
+
Multi-turn prompts can be added later by extending this builder; the
|
| 15 |
+
hackathon scope deliberately keeps it to turn-0 prompts.
|
| 16 |
+
"""
|
| 17 |
+
|
| 18 |
+
from __future__ import annotations
|
| 19 |
+
|
| 20 |
+
from collections.abc import Iterable
|
| 21 |
+
|
| 22 |
+
import numpy as np
|
| 23 |
+
from datasets import Dataset
|
| 24 |
+
from pydantic import BaseModel, ConfigDict
|
| 25 |
+
|
| 26 |
+
from physix.models import DEFAULT_MAX_TURNS, PhysiXObservation
|
| 27 |
+
from physix.systems import (
|
| 28 |
+
SYSTEM_REGISTRY,
|
| 29 |
+
SUPPORTED_SYSTEMS,
|
| 30 |
+
SystemTier,
|
| 31 |
+
get_system,
|
| 32 |
+
list_systems_by_tier,
|
| 33 |
+
)
|
| 34 |
+
from physix.systems.base import PhysicalSystem, TrajectoryData
|
| 35 |
+
from physix.training.prompt import build_prompt
|
| 36 |
+
|
| 37 |
+
|
| 38 |
+
class DatasetSpec(BaseModel):
|
| 39 |
+
"""Configuration for :func:`build_training_dataset`."""
|
| 40 |
+
|
| 41 |
+
model_config = ConfigDict(frozen=True)
|
| 42 |
+
|
| 43 |
+
system_ids: tuple[str, ...] = SUPPORTED_SYSTEMS
|
| 44 |
+
instances_per_system: int = 32
|
| 45 |
+
seed: int = 0
|
| 46 |
+
|
| 47 |
+
|
| 48 |
+
class EvalDatasetSpec(BaseModel):
|
| 49 |
+
"""Held-out evaluation set, drawn separately so seeds do not overlap."""
|
| 50 |
+
|
| 51 |
+
model_config = ConfigDict(frozen=True)
|
| 52 |
+
|
| 53 |
+
train_tiers: tuple[SystemTier, ...] = (SystemTier.TIER_1, SystemTier.TIER_2)
|
| 54 |
+
held_out_tiers: tuple[SystemTier, ...] = (SystemTier.TIER_3,)
|
| 55 |
+
instances_per_system: int = 8
|
| 56 |
+
seed: int = 1_000_000 # large to avoid overlap with training seeds
|
| 57 |
+
|
| 58 |
+
|
| 59 |
+
def build_training_dataset(spec: DatasetSpec | None = None) -> Dataset:
|
| 60 |
+
"""Build the GRPO training dataset.
|
| 61 |
+
|
| 62 |
+
Each row contains one (system, instance) prompt at turn 0.
|
| 63 |
+
"""
|
| 64 |
+
spec = spec or DatasetSpec()
|
| 65 |
+
_validate_system_ids(spec.system_ids)
|
| 66 |
+
rng = np.random.default_rng(spec.seed)
|
| 67 |
+
|
| 68 |
+
rows: list[dict[str, object]] = []
|
| 69 |
+
for system_id in spec.system_ids:
|
| 70 |
+
for _ in range(spec.instances_per_system):
|
| 71 |
+
rows.append(_build_row(system_id, rng))
|
| 72 |
+
return Dataset.from_list(rows)
|
| 73 |
+
|
| 74 |
+
|
| 75 |
+
def _validate_system_ids(system_ids: tuple[str, ...]) -> None:
|
| 76 |
+
"""Fail fast if the spec references an unregistered system."""
|
| 77 |
+
if not system_ids:
|
| 78 |
+
raise ValueError(
|
| 79 |
+
"DatasetSpec.system_ids must be non-empty. "
|
| 80 |
+
f"Available: {sorted(SYSTEM_REGISTRY)!r}."
|
| 81 |
+
)
|
| 82 |
+
unknown = [sid for sid in system_ids if sid not in SYSTEM_REGISTRY]
|
| 83 |
+
if unknown:
|
| 84 |
+
raise ValueError(
|
| 85 |
+
f"Unknown system_ids in DatasetSpec: {unknown!r}. "
|
| 86 |
+
f"Registered: {sorted(SYSTEM_REGISTRY)!r}."
|
| 87 |
+
)
|
| 88 |
+
|
| 89 |
+
|
| 90 |
+
def build_eval_dataset(spec: EvalDatasetSpec | None = None) -> Dataset:
|
| 91 |
+
"""Build a held-out evaluation dataset spanning held-out tiers too."""
|
| 92 |
+
spec = spec or EvalDatasetSpec()
|
| 93 |
+
rng = np.random.default_rng(spec.seed)
|
| 94 |
+
|
| 95 |
+
rows: list[dict[str, object]] = []
|
| 96 |
+
for system_id in _list_systems(spec.train_tiers + spec.held_out_tiers):
|
| 97 |
+
for _ in range(spec.instances_per_system):
|
| 98 |
+
row = _build_row(system_id, rng)
|
| 99 |
+
row["is_held_out"] = system_id in _list_systems(spec.held_out_tiers)
|
| 100 |
+
rows.append(row)
|
| 101 |
+
return Dataset.from_list(rows)
|
| 102 |
+
|
| 103 |
+
|
| 104 |
+
def _list_systems(tiers: Iterable[SystemTier]) -> list[str]:
|
| 105 |
+
out: list[str] = []
|
| 106 |
+
for tier in tiers:
|
| 107 |
+
out.extend(list_systems_by_tier(tier))
|
| 108 |
+
return out
|
| 109 |
+
|
| 110 |
+
|
| 111 |
+
def _build_row(system_id: str, rng: np.random.Generator) -> dict[str, object]:
|
| 112 |
+
"""Generate one (prompt + system context) row for a given system."""
|
| 113 |
+
system = get_system(system_id)
|
| 114 |
+
trajectory = system.simulate(rng)
|
| 115 |
+
|
| 116 |
+
obs = _build_observation(system, trajectory)
|
| 117 |
+
prompt = build_prompt(obs)
|
| 118 |
+
|
| 119 |
+
return {
|
| 120 |
+
"prompt": prompt, # chat list of {"role", "content"} dicts
|
| 121 |
+
"system_id": system_id,
|
| 122 |
+
"state_variables": list(system.state_variables),
|
| 123 |
+
"parameters": dict(system.parameters),
|
| 124 |
+
"initial_conditions": dict(system.initial_conditions),
|
| 125 |
+
"timestamps": trajectory.timestamps.tolist(),
|
| 126 |
+
"observed": {var: trajectory.states[var].tolist() for var in system.state_variables},
|
| 127 |
+
"previous_r_match": 0.0,
|
| 128 |
+
}
|
| 129 |
+
|
| 130 |
+
|
| 131 |
+
def _build_observation(
|
| 132 |
+
system: PhysicalSystem,
|
| 133 |
+
trajectory: TrajectoryData,
|
| 134 |
+
) -> PhysiXObservation:
|
| 135 |
+
"""Construct a turn-0 :class:`PhysiXObservation` for a fresh system.
|
| 136 |
+
|
| 137 |
+
We bypass :class:`PhysiXEnvironment` here because its lifecycle (history,
|
| 138 |
+
convergence flag, episode budget) is irrelevant for dataset construction.
|
| 139 |
+
"""
|
| 140 |
+
return PhysiXObservation(
|
| 141 |
+
done=False,
|
| 142 |
+
reward=None,
|
| 143 |
+
trajectory=trajectory.to_observation_samples(),
|
| 144 |
+
state_variables=list(system.state_variables),
|
| 145 |
+
hint=system.hint(system.parameters),
|
| 146 |
+
history=[],
|
| 147 |
+
mismatch_summary="",
|
| 148 |
+
turn=0,
|
| 149 |
+
turn_remaining=DEFAULT_MAX_TURNS,
|
| 150 |
+
system_id=system.system_id,
|
| 151 |
+
stats=trajectory.stats(),
|
| 152 |
+
reward_breakdown={},
|
| 153 |
+
)
|
physix/training/loop.py
ADDED
|
@@ -0,0 +1,536 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""GRPO training loop using Unsloth + TRL + W&B.
|
| 2 |
+
|
| 3 |
+
Requires the ``[train]`` optional dependency group. Importing this module on
|
| 4 |
+
a machine without the heavy ML deps installed will fail at module load,
|
| 5 |
+
which is the documented contract — local development tools (env server,
|
| 6 |
+
verifier, demo UI) live in lighter modules and remain usable.
|
| 7 |
+
|
| 8 |
+
Run via::
|
| 9 |
+
|
| 10 |
+
python -m physix.training.loop \
|
| 11 |
+
--model Qwen/Qwen2.5-1.5B-Instruct \
|
| 12 |
+
--output-dir runs/physix-1.5b-rl \
|
| 13 |
+
--num-steps 300
|
| 14 |
+
|
| 15 |
+
Environment variables:
|
| 16 |
+
|
| 17 |
+
- ``WANDB_PROJECT`` (default ``physix-live``)
|
| 18 |
+
- ``HUGGINGFACE_HUB_TOKEN`` if pushing the adapter to the Hub
|
| 19 |
+
"""
|
| 20 |
+
|
| 21 |
+
from __future__ import annotations
|
| 22 |
+
|
| 23 |
+
import argparse
|
| 24 |
+
import logging
|
| 25 |
+
import os
|
| 26 |
+
from pathlib import Path
|
| 27 |
+
from typing import Literal, Optional
|
| 28 |
+
|
| 29 |
+
import torch
|
| 30 |
+
from datasets import Dataset
|
| 31 |
+
from pydantic import BaseModel, ConfigDict
|
| 32 |
+
from transformers import AutoTokenizer, TrainerCallback, TrainerControl, TrainerState
|
| 33 |
+
from transformers import TrainingArguments as HFTrainingArguments
|
| 34 |
+
|
| 35 |
+
from physix.systems import SUPPORTED_SYSTEMS
|
| 36 |
+
from physix.training.dataset import (
|
| 37 |
+
DatasetSpec,
|
| 38 |
+
build_training_dataset,
|
| 39 |
+
)
|
| 40 |
+
from physix.training.reward_fns import make_reward_funcs
|
| 41 |
+
from physix.training.scorer import Scorer
|
| 42 |
+
|
| 43 |
+
# IMPORTANT: Unsloth's GRPO patches must be applied *before* importing
|
| 44 |
+
# ``GRPOTrainer`` so its kernels are swapped in. Without this, the trainer
|
| 45 |
+
# falls back to the stock TRL path and Unsloth's optimisations are bypassed
|
| 46 |
+
# (and on recent versions the import will hard-fail). Keep this block
|
| 47 |
+
# directly above the ``trl`` import — order matters.
|
| 48 |
+
#
|
| 49 |
+
# Version note: this requires ``trl<=0.24.0``. Newer TRL versions ship
|
| 50 |
+
# ``trl.experimental.openenv`` which Unsloth's ``patch_trl_openenv``
|
| 51 |
+
# hook tries to ``inspect.getsource()`` on; that fails with ``OSError:
|
| 52 |
+
# could not get source code`` and crashes ``PatchFastRL``. ``trl==0.24.0``
|
| 53 |
+
# is the pinned upper bound declared in unsloth's pyproject.toml.
|
| 54 |
+
from unsloth import FastLanguageModel, PatchFastRL # noqa: E402
|
| 55 |
+
|
| 56 |
+
PatchFastRL("GRPO", FastLanguageModel)
|
| 57 |
+
|
| 58 |
+
from trl import GRPOConfig, GRPOTrainer # noqa: E402 (must come after PatchFastRL)
|
| 59 |
+
|
| 60 |
+
|
| 61 |
+
_log = logging.getLogger(__name__)
|
| 62 |
+
|
| 63 |
+
|
| 64 |
+
Ablation = Literal["no_progress", "no_simplicity", "no_format"]
|
| 65 |
+
SaveMethod = Literal["lora", "merged_16bit", "merged_4bit"]
|
| 66 |
+
|
| 67 |
+
|
| 68 |
+
class TrainingConfig(BaseModel):
|
| 69 |
+
"""All hyperparameters in one place; the CLI populates this."""
|
| 70 |
+
|
| 71 |
+
model_config = ConfigDict(frozen=True)
|
| 72 |
+
|
| 73 |
+
model_name: str = "Qwen/Qwen2.5-1.5B-Instruct"
|
| 74 |
+
#: Optional path to a LoRA adapter produced by the SFT warm-start step.
|
| 75 |
+
#: When set, the base model is loaded and the adapter weights are applied
|
| 76 |
+
#: before GRPO begins. Without this the cold base model rarely produces
|
| 77 |
+
#: any reward signal in early steps.
|
| 78 |
+
sft_checkpoint: Optional[str] = None
|
| 79 |
+
output_dir: str = "runs/physix-1.5b-rl"
|
| 80 |
+
max_seq_length: int = 2048
|
| 81 |
+
lora_r: int = 16
|
| 82 |
+
lora_alpha: int = 32
|
| 83 |
+
learning_rate: float = 5.0e-6
|
| 84 |
+
temperature: float = 0.9
|
| 85 |
+
max_completion_length: int = 256
|
| 86 |
+
beta: float = 0.04
|
| 87 |
+
num_generations: int = 4
|
| 88 |
+
per_device_train_batch_size: int = 1
|
| 89 |
+
gradient_accumulation_steps: int = 8
|
| 90 |
+
num_steps: int = 300
|
| 91 |
+
seed: int = 0
|
| 92 |
+
instances_per_system: int = 32
|
| 93 |
+
ablation: Optional[Ablation] = None
|
| 94 |
+
wandb_project: str = "physix-live"
|
| 95 |
+
wandb_run_name: Optional[str] = None
|
| 96 |
+
push_to_hub: bool = False
|
| 97 |
+
hub_repo_id: Optional[str] = None
|
| 98 |
+
#: HF repo to push LoRA checkpoints to every save_steps during GRPO.
|
| 99 |
+
#: Separate from hub_repo_id (which receives the final merged model).
|
| 100 |
+
#: Set this to enable mid-run checkpoint persistence and W&B artifact logging.
|
| 101 |
+
hub_checkpoint_repo_id: Optional[str] = None
|
| 102 |
+
#: Path to a Trainer checkpoint dir to resume GRPO from (e.g. from a
|
| 103 |
+
#: previous run killed mid-training). Set automatically by train.sh.
|
| 104 |
+
resume_from_checkpoint: Optional[str] = None
|
| 105 |
+
#: How to persist the final adapter. ``"lora"`` saves only the adapter
|
| 106 |
+
#: weights (small, requires the base model at load time). ``"merged_16bit"``
|
| 107 |
+
#: merges the adapter into the base and saves a deployable bf16/fp16
|
| 108 |
+
#: checkpoint (large, but loadable as a normal HF model — what you want
|
| 109 |
+
#: for Hub pushes and Ollama exports).
|
| 110 |
+
save_method: SaveMethod = "merged_16bit"
|
| 111 |
+
|
| 112 |
+
|
| 113 |
+
def train(config: TrainingConfig) -> None:
|
| 114 |
+
"""Run a full GRPO training loop with the given configuration."""
|
| 115 |
+
_configure_logging()
|
| 116 |
+
|
| 117 |
+
import wandb
|
| 118 |
+
|
| 119 |
+
run_name = config.wandb_run_name or f"physix-grpo-{config.num_steps}steps"
|
| 120 |
+
wandb.init(
|
| 121 |
+
project=config.wandb_project,
|
| 122 |
+
name=run_name,
|
| 123 |
+
config=config.model_dump(),
|
| 124 |
+
tags=["grpo", "physix", config.model_name.split("/")[-1]],
|
| 125 |
+
resume="allow",
|
| 126 |
+
)
|
| 127 |
+
|
| 128 |
+
_log.info("Loading model %s with Unsloth (4-bit, LoRA-%d)", config.model_name, config.lora_r)
|
| 129 |
+
model, tokenizer = _load_model_and_tokenizer(config)
|
| 130 |
+
train_dataset = _build_and_format_dataset(config, tokenizer)
|
| 131 |
+
|
| 132 |
+
reward_funcs = _select_reward_funcs(config.ablation)
|
| 133 |
+
|
| 134 |
+
grpo_config = _build_grpo_config(config)
|
| 135 |
+
|
| 136 |
+
callbacks = []
|
| 137 |
+
if config.hub_checkpoint_repo_id:
|
| 138 |
+
callbacks.append(_WandbCheckpointCallback(config.hub_checkpoint_repo_id))
|
| 139 |
+
_log.info(
|
| 140 |
+
"Checkpoint hub push enabled → %s (every %d steps)",
|
| 141 |
+
config.hub_checkpoint_repo_id,
|
| 142 |
+
grpo_config.save_steps,
|
| 143 |
+
)
|
| 144 |
+
|
| 145 |
+
trainer = GRPOTrainer(
|
| 146 |
+
model=model,
|
| 147 |
+
processing_class=tokenizer,
|
| 148 |
+
args=grpo_config,
|
| 149 |
+
train_dataset=train_dataset,
|
| 150 |
+
reward_funcs=reward_funcs,
|
| 151 |
+
callbacks=callbacks or None,
|
| 152 |
+
)
|
| 153 |
+
|
| 154 |
+
if config.resume_from_checkpoint:
|
| 155 |
+
_log.info("Resuming from checkpoint: %s", config.resume_from_checkpoint)
|
| 156 |
+
|
| 157 |
+
_log.info("Starting GRPO training for %d steps", config.num_steps)
|
| 158 |
+
trainer.train(resume_from_checkpoint=config.resume_from_checkpoint)
|
| 159 |
+
|
| 160 |
+
_log_reward_summary(trainer)
|
| 161 |
+
|
| 162 |
+
_log.info("Saving adapter (%s) to %s", config.save_method, config.output_dir)
|
| 163 |
+
_save_artifacts(model, tokenizer, config)
|
| 164 |
+
wandb.finish()
|
| 165 |
+
|
| 166 |
+
|
| 167 |
+
def _log_reward_summary(trainer: "GRPOTrainer") -> None:
|
| 168 |
+
"""Emit a final reward-signal summary so log readers don't misinterpret
|
| 169 |
+
GRPO's near-zero ``train/loss`` as a broken run. ``train/loss`` is just
|
| 170 |
+
the KL term; what matters is whether reward components moved.
|
| 171 |
+
|
| 172 |
+
Pulls the last ``log_history`` entry that contains reward keys and prints
|
| 173 |
+
the mean of every ``rewards/*/mean`` it finds, plus an explicit
|
| 174 |
+
interpretation hint. If *no* reward keys are present we hard-fail — that
|
| 175 |
+
means the reward functions never produced a non-NaN value, which is a
|
| 176 |
+
real bug worth surfacing.
|
| 177 |
+
"""
|
| 178 |
+
history = getattr(trainer.state, "log_history", []) or []
|
| 179 |
+
reward_entries = [
|
| 180 |
+
entry for entry in history
|
| 181 |
+
if any(k.startswith("rewards/") or k == "reward" for k in entry)
|
| 182 |
+
]
|
| 183 |
+
if not reward_entries:
|
| 184 |
+
_log.error(
|
| 185 |
+
"No reward metrics logged during training. This usually means "
|
| 186 |
+
"every rollout failed to parse. Check `train/reward` in W&B and "
|
| 187 |
+
"the most recent completion samples."
|
| 188 |
+
)
|
| 189 |
+
raise RuntimeError(
|
| 190 |
+
"GRPO produced no reward metrics — training silently failed."
|
| 191 |
+
)
|
| 192 |
+
|
| 193 |
+
last = reward_entries[-1]
|
| 194 |
+
first = reward_entries[0]
|
| 195 |
+
_log.info("=" * 60)
|
| 196 |
+
_log.info("GRPO reward summary (first → last logged step):")
|
| 197 |
+
for key in sorted(last):
|
| 198 |
+
if key.startswith("rewards/") or key == "reward":
|
| 199 |
+
v0 = first.get(key)
|
| 200 |
+
v1 = last.get(key)
|
| 201 |
+
if isinstance(v0, (int, float)) and isinstance(v1, (int, float)):
|
| 202 |
+
_log.info(" %-40s %.4f → %.4f (Δ=%+.4f)", key, v0, v1, v1 - v0)
|
| 203 |
+
_log.info("-" * 60)
|
| 204 |
+
_log.info("NOTE: train/loss near zero is EXPECTED for GRPO — it is only")
|
| 205 |
+
_log.info("the KL-term contribution (beta=%.3f). The model learns via the",
|
| 206 |
+
trainer.args.beta)
|
| 207 |
+
_log.info("advantage-weighted policy gradient, which doesn't appear in")
|
| 208 |
+
_log.info("the displayed loss scalar. Trust `train/reward` and `rewards/*`.")
|
| 209 |
+
_log.info("=" * 60)
|
| 210 |
+
|
| 211 |
+
|
| 212 |
+
def _load_model_and_tokenizer(
|
| 213 |
+
config: TrainingConfig,
|
| 214 |
+
) -> tuple[FastLanguageModel, AutoTokenizer]:
|
| 215 |
+
"""Load Qwen via Unsloth in 4-bit and attach a LoRA adapter.
|
| 216 |
+
|
| 217 |
+
If ``config.sft_checkpoint`` is set, the SFT adapter weights are merged
|
| 218 |
+
on top of the base model before GRPO starts. This gives GRPO a warm base
|
| 219 |
+
policy that already knows the JSON format and equation grammar, so early
|
| 220 |
+
rollouts produce meaningful reward signal instead of all scoring zero.
|
| 221 |
+
"""
|
| 222 |
+
if config.sft_checkpoint:
|
| 223 |
+
_log.info(
|
| 224 |
+
"Loading SFT-warmed model from %s (GRPO will refine from here)",
|
| 225 |
+
config.sft_checkpoint,
|
| 226 |
+
)
|
| 227 |
+
model, tokenizer = FastLanguageModel.from_pretrained(
|
| 228 |
+
model_name=config.sft_checkpoint,
|
| 229 |
+
max_seq_length=config.max_seq_length,
|
| 230 |
+
load_in_4bit=True,
|
| 231 |
+
dtype=None,
|
| 232 |
+
)
|
| 233 |
+
else:
|
| 234 |
+
_log.warning(
|
| 235 |
+
"No --sft-checkpoint supplied. Starting GRPO from cold base model. "
|
| 236 |
+
"Early reward signal will be near-zero; consider running sft.py first."
|
| 237 |
+
)
|
| 238 |
+
model, tokenizer = FastLanguageModel.from_pretrained(
|
| 239 |
+
model_name=config.model_name,
|
| 240 |
+
max_seq_length=config.max_seq_length,
|
| 241 |
+
load_in_4bit=True,
|
| 242 |
+
dtype=None,
|
| 243 |
+
)
|
| 244 |
+
model = FastLanguageModel.get_peft_model(
|
| 245 |
+
model,
|
| 246 |
+
r=config.lora_r,
|
| 247 |
+
lora_alpha=config.lora_alpha,
|
| 248 |
+
target_modules=[
|
| 249 |
+
"q_proj",
|
| 250 |
+
"k_proj",
|
| 251 |
+
"v_proj",
|
| 252 |
+
"o_proj",
|
| 253 |
+
"gate_proj",
|
| 254 |
+
"up_proj",
|
| 255 |
+
"down_proj",
|
| 256 |
+
],
|
| 257 |
+
bias="none",
|
| 258 |
+
use_gradient_checkpointing="unsloth",
|
| 259 |
+
random_state=config.seed,
|
| 260 |
+
)
|
| 261 |
+
return model, tokenizer
|
| 262 |
+
|
| 263 |
+
|
| 264 |
+
def _build_and_format_dataset(
|
| 265 |
+
config: TrainingConfig,
|
| 266 |
+
tokenizer: AutoTokenizer,
|
| 267 |
+
) -> Dataset:
|
| 268 |
+
spec = DatasetSpec(
|
| 269 |
+
system_ids=SUPPORTED_SYSTEMS,
|
| 270 |
+
instances_per_system=config.instances_per_system,
|
| 271 |
+
seed=config.seed,
|
| 272 |
+
)
|
| 273 |
+
dataset = build_training_dataset(spec)
|
| 274 |
+
_log.info(
|
| 275 |
+
"Built training dataset: %d rows across %d systems (%s)",
|
| 276 |
+
len(dataset),
|
| 277 |
+
len(SUPPORTED_SYSTEMS),
|
| 278 |
+
", ".join(SUPPORTED_SYSTEMS),
|
| 279 |
+
)
|
| 280 |
+
|
| 281 |
+
def _apply_chat_template(example: dict[str, object]) -> dict[str, object]:
|
| 282 |
+
formatted = tokenizer.apply_chat_template(
|
| 283 |
+
example["prompt"],
|
| 284 |
+
tokenize=False,
|
| 285 |
+
add_generation_prompt=True,
|
| 286 |
+
)
|
| 287 |
+
return {"prompt": formatted}
|
| 288 |
+
|
| 289 |
+
return dataset.map(_apply_chat_template)
|
| 290 |
+
|
| 291 |
+
|
| 292 |
+
def _select_reward_funcs(ablation: Optional[Ablation]) -> list[object]:
|
| 293 |
+
"""Return the list of reward functions, dropping one for ablation runs."""
|
| 294 |
+
scorer = Scorer()
|
| 295 |
+
funcs = make_reward_funcs(scorer)
|
| 296 |
+
if ablation is None:
|
| 297 |
+
return [funcs["match"], funcs["progress"], funcs["simplicity"], funcs["format"]]
|
| 298 |
+
if ablation == "no_progress":
|
| 299 |
+
return [funcs["match"], funcs["simplicity"], funcs["format"]]
|
| 300 |
+
if ablation == "no_simplicity":
|
| 301 |
+
return [funcs["match"], funcs["progress"], funcs["format"]]
|
| 302 |
+
if ablation == "no_format":
|
| 303 |
+
return [funcs["match"], funcs["progress"], funcs["simplicity"]]
|
| 304 |
+
raise ValueError(
|
| 305 |
+
f"Unknown ablation {ablation!r}. Choose from "
|
| 306 |
+
"no_progress | no_simplicity | no_format | None."
|
| 307 |
+
)
|
| 308 |
+
|
| 309 |
+
|
| 310 |
+
class _WandbCheckpointCallback(TrainerCallback):
|
| 311 |
+
"""Logs a lightweight W&B artifact reference after each Trainer checkpoint.
|
| 312 |
+
|
| 313 |
+
The artifact does not re-upload model weights — it simply records the
|
| 314 |
+
HuggingFace Hub URL of the checkpoint so the W&B run links back to it.
|
| 315 |
+
"""
|
| 316 |
+
|
| 317 |
+
def __init__(self, hub_checkpoint_repo_id: str) -> None:
|
| 318 |
+
self._repo = hub_checkpoint_repo_id
|
| 319 |
+
|
| 320 |
+
def on_save(
|
| 321 |
+
self,
|
| 322 |
+
args: HFTrainingArguments,
|
| 323 |
+
state: TrainerState,
|
| 324 |
+
control: TrainerControl,
|
| 325 |
+
**kwargs,
|
| 326 |
+
) -> None:
|
| 327 |
+
# The W&B artifact stores ONLY a URL reference back to the HF Hub
|
| 328 |
+
# checkpoint — no model bytes are uploaded to wandb. This is purely
|
| 329 |
+
# a convenience link in the W&B run page.
|
| 330 |
+
#
|
| 331 |
+
# Failure of this callback must NEVER crash training. The trainer's
|
| 332 |
+
# own PushToHubCallback runs the actual git push asynchronously, so
|
| 333 |
+
# at the instant `on_save` fires the Hub URL almost always 404s
|
| 334 |
+
# (push hasn't finished yet). We try once, swallow any error, and
|
| 335 |
+
# carry on. Loss of a wandb-side hyperlink is not a training bug.
|
| 336 |
+
try:
|
| 337 |
+
import wandb
|
| 338 |
+
|
| 339 |
+
if wandb.run is None:
|
| 340 |
+
return
|
| 341 |
+
step = state.global_step
|
| 342 |
+
artifact = wandb.Artifact(
|
| 343 |
+
name="grpo-checkpoint",
|
| 344 |
+
type="model",
|
| 345 |
+
metadata={"step": step, "hub_repo": self._repo},
|
| 346 |
+
)
|
| 347 |
+
artifact.add_reference(
|
| 348 |
+
f"https://huggingface.co/{self._repo}/tree/main/checkpoint-{step}"
|
| 349 |
+
)
|
| 350 |
+
wandb.log_artifact(artifact, aliases=[f"step-{step}", "latest"])
|
| 351 |
+
_log.info(
|
| 352 |
+
"W&B artifact link logged for checkpoint-%d → %s", step, self._repo
|
| 353 |
+
)
|
| 354 |
+
except Exception as exc: # noqa: BLE001 — see comment above
|
| 355 |
+
_log.warning(
|
| 356 |
+
"W&B checkpoint-link callback skipped at step %d: %s. "
|
| 357 |
+
"Training continues; this only affects the wandb hyperlink, "
|
| 358 |
+
"the actual checkpoint is still pushed to the HF Hub by "
|
| 359 |
+
"the trainer's PushToHubCallback.",
|
| 360 |
+
state.global_step,
|
| 361 |
+
exc,
|
| 362 |
+
)
|
| 363 |
+
|
| 364 |
+
|
| 365 |
+
def _build_grpo_config(config: TrainingConfig) -> GRPOConfig:
|
| 366 |
+
# NOTE on "train/loss → 0" — this is expected GRPO behaviour, not a bug.
|
| 367 |
+
# The scalar TRL logs as `train/loss` is *only* the KL-divergence term
|
| 368 |
+
# weighted by beta; the advantage-weighted policy-gradient term that
|
| 369 |
+
# actually drives learning contributes gradients but is not in the
|
| 370 |
+
# displayed loss. At step 0, policy == reference → KL = 0 → loss = 0.
|
| 371 |
+
# As the policy drifts, loss rises slightly (with beta=0.04 typically
|
| 372 |
+
# to ~0.001–0.05). The signal you care about is `train/rewards/*` and
|
| 373 |
+
# `train/reward`, not `train/loss`. See:
|
| 374 |
+
# https://github.com/huggingface/trl/issues/2703
|
| 375 |
+
# https://github.com/huggingface/open-r1/issues/239
|
| 376 |
+
effective_batch = (
|
| 377 |
+
config.per_device_train_batch_size * config.gradient_accumulation_steps
|
| 378 |
+
)
|
| 379 |
+
if effective_batch % config.num_generations != 0:
|
| 380 |
+
raise ValueError(
|
| 381 |
+
f"effective_batch_size ({effective_batch}) must be divisible by "
|
| 382 |
+
f"num_generations ({config.num_generations}). Adjust "
|
| 383 |
+
"per_device_train_batch_size, gradient_accumulation_steps, or "
|
| 384 |
+
"num_generations."
|
| 385 |
+
)
|
| 386 |
+
hub_kwargs: dict = {}
|
| 387 |
+
if config.hub_checkpoint_repo_id:
|
| 388 |
+
hub_kwargs = dict(
|
| 389 |
+
push_to_hub=True,
|
| 390 |
+
hub_model_id=config.hub_checkpoint_repo_id,
|
| 391 |
+
hub_strategy="checkpoint",
|
| 392 |
+
hub_token=os.environ.get("HUGGINGFACE_HUB_TOKEN") or os.environ.get("HF_TOKEN"),
|
| 393 |
+
)
|
| 394 |
+
|
| 395 |
+
return GRPOConfig(
|
| 396 |
+
output_dir=config.output_dir,
|
| 397 |
+
learning_rate=config.learning_rate,
|
| 398 |
+
per_device_train_batch_size=config.per_device_train_batch_size,
|
| 399 |
+
gradient_accumulation_steps=config.gradient_accumulation_steps,
|
| 400 |
+
num_train_epochs=1,
|
| 401 |
+
max_steps=config.num_steps,
|
| 402 |
+
num_generations=config.num_generations,
|
| 403 |
+
max_completion_length=config.max_completion_length,
|
| 404 |
+
max_prompt_length=config.max_seq_length - config.max_completion_length,
|
| 405 |
+
temperature=config.temperature,
|
| 406 |
+
beta=config.beta,
|
| 407 |
+
logging_steps=1,
|
| 408 |
+
save_strategy="steps",
|
| 409 |
+
save_steps=max(50, config.num_steps // 6),
|
| 410 |
+
report_to=["wandb"],
|
| 411 |
+
run_name=config.wandb_run_name,
|
| 412 |
+
seed=config.seed,
|
| 413 |
+
bf16=torch.cuda.is_bf16_supported() if torch.cuda.is_available() else False,
|
| 414 |
+
fp16=not torch.cuda.is_bf16_supported() if torch.cuda.is_available() else False,
|
| 415 |
+
**hub_kwargs,
|
| 416 |
+
)
|
| 417 |
+
|
| 418 |
+
|
| 419 |
+
def _save_artifacts(
|
| 420 |
+
model: FastLanguageModel,
|
| 421 |
+
tokenizer: AutoTokenizer,
|
| 422 |
+
config: TrainingConfig,
|
| 423 |
+
) -> None:
|
| 424 |
+
"""Persist the trained adapter via Unsloth's save path.
|
| 425 |
+
|
| 426 |
+
``save_pretrained_merged`` dispatches on ``save_method``:
|
| 427 |
+
|
| 428 |
+
- ``"lora"``: writes only the adapter weights (small; requires the base
|
| 429 |
+
model at load time).
|
| 430 |
+
- ``"merged_16bit"``: merges LoRA into base and writes a standard HF
|
| 431 |
+
checkpoint in bf16/fp16 (large; loadable without Unsloth, exportable to
|
| 432 |
+
GGUF for Ollama).
|
| 433 |
+
- ``"merged_4bit"``: same merge but quantised back to 4-bit.
|
| 434 |
+
|
| 435 |
+
Hub pushes use the same ``save_method`` so the on-disk artifact and the
|
| 436 |
+
Hub artifact are byte-identical.
|
| 437 |
+
"""
|
| 438 |
+
out_path = Path(config.output_dir)
|
| 439 |
+
out_path.mkdir(parents=True, exist_ok=True)
|
| 440 |
+
|
| 441 |
+
save_dir = out_path / config.save_method
|
| 442 |
+
model.save_pretrained_merged(
|
| 443 |
+
save_directory=str(save_dir),
|
| 444 |
+
tokenizer=tokenizer,
|
| 445 |
+
save_method=config.save_method,
|
| 446 |
+
)
|
| 447 |
+
|
| 448 |
+
if config.push_to_hub and config.hub_repo_id:
|
| 449 |
+
_log.info("Pushing %s artifact to Hugging Face Hub: %s", config.save_method, config.hub_repo_id)
|
| 450 |
+
model.push_to_hub_merged(
|
| 451 |
+
config.hub_repo_id,
|
| 452 |
+
tokenizer,
|
| 453 |
+
save_method=config.save_method,
|
| 454 |
+
token=os.environ.get("HUGGINGFACE_HUB_TOKEN"),
|
| 455 |
+
)
|
| 456 |
+
|
| 457 |
+
|
| 458 |
+
def _configure_logging() -> None:
|
| 459 |
+
logging.basicConfig(
|
| 460 |
+
level=os.environ.get("PHYSIX_LOG_LEVEL", "INFO"),
|
| 461 |
+
format="[%(asctime)s] %(levelname)s %(name)s | %(message)s",
|
| 462 |
+
)
|
| 463 |
+
|
| 464 |
+
|
| 465 |
+
def _parse_args() -> TrainingConfig:
|
| 466 |
+
parser = argparse.ArgumentParser(description="Train PhysiX-Live with GRPO.")
|
| 467 |
+
parser.add_argument("--model", default="Qwen/Qwen2.5-1.5B-Instruct")
|
| 468 |
+
parser.add_argument("--output-dir", default="runs/physix-1.5b-rl")
|
| 469 |
+
parser.add_argument("--num-steps", type=int, default=300)
|
| 470 |
+
parser.add_argument("--learning-rate", type=float, default=5.0e-6)
|
| 471 |
+
parser.add_argument("--num-generations", type=int, default=4)
|
| 472 |
+
parser.add_argument("--max-completion-length", type=int, default=256,
|
| 473 |
+
help="Max tokens per rollout completion. Shorter = faster generation.")
|
| 474 |
+
parser.add_argument("--lora-r", type=int, default=16)
|
| 475 |
+
parser.add_argument("--instances-per-system", type=int, default=32)
|
| 476 |
+
parser.add_argument(
|
| 477 |
+
"--ablation",
|
| 478 |
+
choices=("no_progress", "no_simplicity", "no_format"),
|
| 479 |
+
default=None,
|
| 480 |
+
)
|
| 481 |
+
parser.add_argument(
|
| 482 |
+
"--save-method",
|
| 483 |
+
choices=("lora", "merged_16bit", "merged_4bit"),
|
| 484 |
+
default="merged_16bit",
|
| 485 |
+
help="How to persist the final adapter (merged_16bit is deployable).",
|
| 486 |
+
)
|
| 487 |
+
parser.add_argument("--sft-checkpoint", default=None,
|
| 488 |
+
help="Path to a LoRA adapter from sft.py to warm-start from.")
|
| 489 |
+
parser.add_argument("--wandb-project", default="physix-live")
|
| 490 |
+
parser.add_argument("--wandb-run-name", default=None)
|
| 491 |
+
parser.add_argument("--push-to-hub", action="store_true")
|
| 492 |
+
parser.add_argument("--hub-repo-id", default=None)
|
| 493 |
+
parser.add_argument(
|
| 494 |
+
"--hub-checkpoint-repo-id",
|
| 495 |
+
default=None,
|
| 496 |
+
help="HF repo to push LoRA checkpoints to every save_steps (e.g. user/physix-ckpt).",
|
| 497 |
+
)
|
| 498 |
+
parser.add_argument(
|
| 499 |
+
"--resume-from-checkpoint",
|
| 500 |
+
default=None,
|
| 501 |
+
help="Path to a Trainer checkpoint directory to resume GRPO from.",
|
| 502 |
+
)
|
| 503 |
+
parser.add_argument("--seed", type=int, default=0)
|
| 504 |
+
|
| 505 |
+
args = parser.parse_args()
|
| 506 |
+
|
| 507 |
+
return TrainingConfig(
|
| 508 |
+
model_name=args.model,
|
| 509 |
+
sft_checkpoint=args.sft_checkpoint,
|
| 510 |
+
output_dir=args.output_dir,
|
| 511 |
+
num_steps=args.num_steps,
|
| 512 |
+
learning_rate=args.learning_rate,
|
| 513 |
+
num_generations=args.num_generations,
|
| 514 |
+
max_completion_length=args.max_completion_length,
|
| 515 |
+
lora_r=args.lora_r,
|
| 516 |
+
instances_per_system=args.instances_per_system,
|
| 517 |
+
ablation=args.ablation,
|
| 518 |
+
save_method=args.save_method,
|
| 519 |
+
wandb_project=args.wandb_project,
|
| 520 |
+
wandb_run_name=args.wandb_run_name,
|
| 521 |
+
push_to_hub=args.push_to_hub,
|
| 522 |
+
hub_repo_id=args.hub_repo_id,
|
| 523 |
+
hub_checkpoint_repo_id=args.hub_checkpoint_repo_id,
|
| 524 |
+
resume_from_checkpoint=args.resume_from_checkpoint,
|
| 525 |
+
seed=args.seed,
|
| 526 |
+
)
|
| 527 |
+
|
| 528 |
+
|
| 529 |
+
def main() -> None:
|
| 530 |
+
config = _parse_args()
|
| 531 |
+
os.environ.setdefault("WANDB_PROJECT", config.wandb_project)
|
| 532 |
+
train(config)
|
| 533 |
+
|
| 534 |
+
|
| 535 |
+
if __name__ == "__main__":
|
| 536 |
+
main()
|
physix/training/prompt.py
ADDED
|
@@ -0,0 +1,369 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Prompt rendering and completion parsing for PhysiX-Live.
|
| 2 |
+
|
| 3 |
+
Responsibility:
|
| 4 |
+
|
| 5 |
+
- :func:`render_observation_for_prompt`: serialise a :class:`PhysiXObservation`
|
| 6 |
+
into a compact, token-efficient string the agent can read.
|
| 7 |
+
- :func:`build_prompt`: combine the system message, grammar hint, and the
|
| 8 |
+
current observation into a single chat-formatted prompt.
|
| 9 |
+
- :func:`parse_completion`: parse a raw model completion (which may contain a
|
| 10 |
+
JSON object inside arbitrary text) into a :class:`PhysiXAction`.
|
| 11 |
+
|
| 12 |
+
This module imports nothing from :mod:`torch`, :mod:`unsloth`, or :mod:`trl`
|
| 13 |
+
so it can be tested on any machine.
|
| 14 |
+
"""
|
| 15 |
+
|
| 16 |
+
from __future__ import annotations
|
| 17 |
+
|
| 18 |
+
import json
|
| 19 |
+
import re
|
| 20 |
+
from typing import Any
|
| 21 |
+
|
| 22 |
+
from physix.models import (
|
| 23 |
+
DEFAULT_MAX_TURNS,
|
| 24 |
+
PhysiXAction,
|
| 25 |
+
PhysiXObservation,
|
| 26 |
+
)
|
| 27 |
+
from physix.verifier.parser import GRAMMAR_HINT
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
SYSTEM_MESSAGE: str = (
|
| 31 |
+
"You are an expert physicist. Your task is to discover the equation of "
|
| 32 |
+
"motion that produced an observed trajectory. Each turn you propose a "
|
| 33 |
+
"candidate equation; the environment simulates it and tells you how well "
|
| 34 |
+
"the prediction matches observation. Refine your guess across turns based "
|
| 35 |
+
"on the residual feedback. Keep equations as simple as possible.\n\n"
|
| 36 |
+
+ GRAMMAR_HINT
|
| 37 |
+
+ "\n\n"
|
| 38 |
+
"Output a single JSON object with exactly these keys: "
|
| 39 |
+
'"equation" (string, required), "params" (object of name->number, '
|
| 40 |
+
'optional), "rationale" (short string, optional). Do not rename the '
|
| 41 |
+
'keys: always emit "equation", never "eqn"/"ode"/"formula"/"expr". '
|
| 42 |
+
'Example: {"equation": "d2y/dt2 = -9.81", "params": {}, '
|
| 43 |
+
'"rationale": "free fall"}'
|
| 44 |
+
)
|
| 45 |
+
|
| 46 |
+
|
| 47 |
+
# Maximum number of trajectory samples shipped to the agent. We downsample
|
| 48 |
+
# from 100 to 12 to keep prompt size bounded; statistics carry the rest.
|
| 49 |
+
_TRAJECTORY_DOWNSAMPLE_COUNT: int = 12
|
| 50 |
+
|
| 51 |
+
# Maximum number of prior history entries surfaced. With 8 turns max budget,
|
| 52 |
+
# 7 prior turns is the upper bound; we cap at 5 to stay token-efficient.
|
| 53 |
+
_HISTORY_CAP: int = 5
|
| 54 |
+
|
| 55 |
+
|
| 56 |
+
def render_observation_for_prompt(obs: PhysiXObservation) -> str:
|
| 57 |
+
"""Render an observation as a compact string the agent can read.
|
| 58 |
+
|
| 59 |
+
Format::
|
| 60 |
+
|
| 61 |
+
SYSTEM_ID: free_fall_drag
|
| 62 |
+
STATE_VARIABLES: y, vy
|
| 63 |
+
HINT: <one-sentence physical context>
|
| 64 |
+
STATS: y_min=-2.13 y_max=78.93 ... duration=6.00
|
| 65 |
+
|
| 66 |
+
TRAJECTORY (12 samples downsampled from 100):
|
| 67 |
+
t=0.000 y=78.93 vy=0.00
|
| 68 |
+
t=0.500 y=77.71 vy=-4.84
|
| 69 |
+
...
|
| 70 |
+
|
| 71 |
+
HISTORY (turns so far):
|
| 72 |
+
turn=1 reward=0.42 [match=0.42 progress=0.00 simplicity=0.95 format=1.00] equation=`d2y/dt2 = -9.81`
|
| 73 |
+
mismatch: predicted y diverges past t=2.0s ...
|
| 74 |
+
turn=2 ...
|
| 75 |
+
|
| 76 |
+
TURN: 3 / 8 (5 turns remaining)
|
| 77 |
+
|
| 78 |
+
HISTORY uses the literal string ``equation=`` (not a shorthand like
|
| 79 |
+
``eqn=``). Mid-strength chat models will mimic the field name they
|
| 80 |
+
see in HISTORY when emitting the next turn's JSON, so the in-prompt
|
| 81 |
+
name *must* match the JSON key the parser reads. Drift here silently
|
| 82 |
+
produces ``{"eqn": ...}`` outputs that the parser ignores, scoring
|
| 83 |
+
every post-first turn ``r_format=0`` even when the equation is
|
| 84 |
+
perfect.
|
| 85 |
+
"""
|
| 86 |
+
sections = [
|
| 87 |
+
_render_metadata_block(obs),
|
| 88 |
+
_render_trajectory_block(obs),
|
| 89 |
+
]
|
| 90 |
+
if obs.history:
|
| 91 |
+
sections.append(_render_history_block(obs))
|
| 92 |
+
sections.append(_render_turn_footer(obs))
|
| 93 |
+
return "\n\n".join(sections)
|
| 94 |
+
|
| 95 |
+
|
| 96 |
+
def build_prompt(obs: PhysiXObservation) -> list[dict[str, str]]:
|
| 97 |
+
"""Build a chat-format prompt list (system + user) for the model.
|
| 98 |
+
|
| 99 |
+
The return value is the standard ``[{"role": "system", "content": ...},
|
| 100 |
+
{"role": "user", "content": ...}]`` shape expected by Hugging Face
|
| 101 |
+
chat-template tokenisers.
|
| 102 |
+
"""
|
| 103 |
+
return [
|
| 104 |
+
{"role": "system", "content": SYSTEM_MESSAGE},
|
| 105 |
+
{"role": "user", "content": render_observation_for_prompt(obs)},
|
| 106 |
+
]
|
| 107 |
+
|
| 108 |
+
|
| 109 |
+
#: Field names we accept for the equation payload, in priority order. The
|
| 110 |
+
#: canonical key is ``equation`` and the system prompt asks for it
|
| 111 |
+
#: explicitly, but mid-strength chat models routinely substitute one of
|
| 112 |
+
#: these synonyms — especially after the first turn, where the model has
|
| 113 |
+
#: latched onto a different naming convention from its own pretraining
|
| 114 |
+
#: corpus. Treating these as missing produced silent ``r_format=0`` runs
|
| 115 |
+
#: even when the underlying equation was perfect; matching them
|
| 116 |
+
#: explicitly closes that hole without weakening the verifier (the
|
| 117 |
+
#: equation grammar itself remains strict).
|
| 118 |
+
_EQUATION_KEYS: tuple[str, ...] = (
|
| 119 |
+
"equation",
|
| 120 |
+
"eqn",
|
| 121 |
+
"ode",
|
| 122 |
+
"formula",
|
| 123 |
+
"expression",
|
| 124 |
+
"expr",
|
| 125 |
+
)
|
| 126 |
+
|
| 127 |
+
#: Same idea for the optional rationale payload. We never gate on this so
|
| 128 |
+
#: the cost of being permissive is zero.
|
| 129 |
+
_RATIONALE_KEYS: tuple[str, ...] = (
|
| 130 |
+
"rationale",
|
| 131 |
+
"reasoning",
|
| 132 |
+
"explanation",
|
| 133 |
+
"thought",
|
| 134 |
+
"thoughts",
|
| 135 |
+
)
|
| 136 |
+
|
| 137 |
+
#: And for the params dict. Some models emit ``parameters`` instead.
|
| 138 |
+
_PARAMS_KEYS: tuple[str, ...] = ("params", "parameters", "constants")
|
| 139 |
+
|
| 140 |
+
|
| 141 |
+
def parse_completion(completion: str) -> PhysiXAction:
|
| 142 |
+
"""Parse a raw model completion into a :class:`PhysiXAction`.
|
| 143 |
+
|
| 144 |
+
Scope is intentionally narrow: extract the first JSON object from the
|
| 145 |
+
completion (which may be wrapped in markdown fences or surrounded by
|
| 146 |
+
scratchpad text) and copy its fields verbatim into the action.
|
| 147 |
+
|
| 148 |
+
The ``equation`` string is **not** rewritten or normalised here. The
|
| 149 |
+
verifier in :mod:`physix.verifier.parser` defines the grammar, and any
|
| 150 |
+
deviation must surface as a parse error so the env can score
|
| 151 |
+
``r_format=0`` and feed the failure back to the agent on the next turn.
|
| 152 |
+
Rewriting equations upstream would silently change the agent's output
|
| 153 |
+
and obscure that signal.
|
| 154 |
+
|
| 155 |
+
Field-name aliases (``eqn``/``ode``/``formula``/...) are accepted in
|
| 156 |
+
addition to ``equation``: refusing them produced a particularly
|
| 157 |
+
confusing failure mode where every turn after the first scored
|
| 158 |
+
``r_format=0`` because the model latched onto the shorthand form
|
| 159 |
+
used in the HISTORY block. We've fixed the prompt too, but accepting
|
| 160 |
+
the synonyms is cheap defense-in-depth against future drift and
|
| 161 |
+
against models with their own naming preferences.
|
| 162 |
+
|
| 163 |
+
If no JSON object can be extracted (e.g. the model emitted free-form
|
| 164 |
+
prose, or invalid JSON that even the JSON-aware decoder rejected),
|
| 165 |
+
the action's ``equation`` is left **empty** so the verifier reports
|
| 166 |
+
a clean ``Empty equation payload`` parse error and the env scores
|
| 167 |
+
``r_format=0``. The raw model text is preserved in ``rationale`` so
|
| 168 |
+
the UI / training logs still show what was emitted, but it is
|
| 169 |
+
*never* fed to the equation parser as if it were an equation —
|
| 170 |
+
that produced misleading errors like ``Equation has no '=' sign:
|
| 171 |
+
'{'`` which made the verifier look broken when the real fault was
|
| 172 |
+
upstream.
|
| 173 |
+
"""
|
| 174 |
+
payload = _extract_json_payload(completion)
|
| 175 |
+
if payload is None:
|
| 176 |
+
return PhysiXAction(equation="", rationale=completion.strip()[:500])
|
| 177 |
+
|
| 178 |
+
normalized = _lowercase_keys(payload)
|
| 179 |
+
equation = _first_string_value(normalized, _EQUATION_KEYS)
|
| 180 |
+
rationale = _first_string_value(normalized, _RATIONALE_KEYS)
|
| 181 |
+
params_raw = _first_value(normalized, _PARAMS_KEYS) or {}
|
| 182 |
+
params = _coerce_params(params_raw)
|
| 183 |
+
|
| 184 |
+
return PhysiXAction(
|
| 185 |
+
equation=equation,
|
| 186 |
+
params=params,
|
| 187 |
+
rationale=rationale,
|
| 188 |
+
)
|
| 189 |
+
|
| 190 |
+
|
| 191 |
+
def _lowercase_keys(payload: dict[str, Any]) -> dict[str, Any]:
|
| 192 |
+
"""Return ``payload`` with top-level keys lowercased.
|
| 193 |
+
|
| 194 |
+
Some models emit ``"Equation"`` / ``"EQN"``; lowercasing once means
|
| 195 |
+
the lookup tables above stay declarative.
|
| 196 |
+
"""
|
| 197 |
+
return {str(k).lower(): v for k, v in payload.items()}
|
| 198 |
+
|
| 199 |
+
|
| 200 |
+
def _first_value(payload: dict[str, Any], keys: tuple[str, ...]) -> Any:
|
| 201 |
+
for key in keys:
|
| 202 |
+
if key in payload:
|
| 203 |
+
return payload[key]
|
| 204 |
+
return None
|
| 205 |
+
|
| 206 |
+
|
| 207 |
+
def _first_string_value(payload: dict[str, Any], keys: tuple[str, ...]) -> str:
|
| 208 |
+
value = _first_value(payload, keys)
|
| 209 |
+
if value is None:
|
| 210 |
+
return ""
|
| 211 |
+
return str(value).strip()
|
| 212 |
+
|
| 213 |
+
|
| 214 |
+
def _render_metadata_block(obs: PhysiXObservation) -> str:
|
| 215 |
+
state_vars = ", ".join(obs.state_variables) or "(none)"
|
| 216 |
+
stats_text = " ".join(f"{k}={v:.3g}" for k, v in obs.stats.items())
|
| 217 |
+
return (
|
| 218 |
+
f"SYSTEM_ID: {obs.system_id or 'unknown'}\n"
|
| 219 |
+
f"STATE_VARIABLES: {state_vars}\n"
|
| 220 |
+
f"HINT: {obs.hint}\n"
|
| 221 |
+
f"STATS: {stats_text}"
|
| 222 |
+
)
|
| 223 |
+
|
| 224 |
+
|
| 225 |
+
def _render_trajectory_block(obs: PhysiXObservation) -> str:
|
| 226 |
+
samples = _downsample(obs.trajectory, _TRAJECTORY_DOWNSAMPLE_COUNT)
|
| 227 |
+
lines = [f"TRAJECTORY ({len(samples)} samples downsampled from {len(obs.trajectory)}):"]
|
| 228 |
+
for sample in samples:
|
| 229 |
+
parts: list[str] = [f"t={sample['t']:.3f}"]
|
| 230 |
+
for var in obs.state_variables:
|
| 231 |
+
if var in sample:
|
| 232 |
+
parts.append(f"{var}={sample[var]:.3f}")
|
| 233 |
+
lines.append(" " + " ".join(parts))
|
| 234 |
+
return "\n".join(lines)
|
| 235 |
+
|
| 236 |
+
|
| 237 |
+
#: Order in which reward components are surfaced to the model. Match
|
| 238 |
+
#: matters most (it's the headline accuracy signal); format is last
|
| 239 |
+
#: because once it stabilises the others dominate the gradient. Stable
|
| 240 |
+
#: order also matters for the model's in-context retrieval: a fixed
|
| 241 |
+
#: column position is a reliable cue across turns.
|
| 242 |
+
_REWARD_COMPONENT_ORDER: tuple[str, ...] = (
|
| 243 |
+
"match",
|
| 244 |
+
"progress",
|
| 245 |
+
"simplicity",
|
| 246 |
+
"format",
|
| 247 |
+
)
|
| 248 |
+
|
| 249 |
+
|
| 250 |
+
def _render_history_block(obs: PhysiXObservation) -> str:
|
| 251 |
+
"""Render the most recent ``_HISTORY_CAP`` turns.
|
| 252 |
+
|
| 253 |
+
Field name is ``equation=`` rather than ``eqn=`` deliberately:
|
| 254 |
+
chat-tuned models tend to mimic the most-recent token spelling when
|
| 255 |
+
emitting their own JSON, so we must use the same key here that the
|
| 256 |
+
parser expects in the model's reply.
|
| 257 |
+
|
| 258 |
+
Each turn's full *dense* reward breakdown is surfaced in addition to
|
| 259 |
+
the scalar ``reward=`` total. The dense components are the same
|
| 260 |
+
values the GRPO trainer optimises (``match``/``progress``/
|
| 261 |
+
``simplicity``/``format``), so showing them in-context lets the
|
| 262 |
+
model attribute its own gains and losses turn-over-turn instead of
|
| 263 |
+
having to infer them from the residual prose alone — e.g. it can
|
| 264 |
+
see ``format=0.0`` after a parse error and prioritise grammar fixes,
|
| 265 |
+
or ``match=0.62, progress=0.0`` after a stuck plateau and try a
|
| 266 |
+
structurally different equation rather than tweaking the same
|
| 267 |
+
coefficients.
|
| 268 |
+
"""
|
| 269 |
+
recent = obs.history[-_HISTORY_CAP:]
|
| 270 |
+
lines = ["HISTORY:"]
|
| 271 |
+
for entry in recent:
|
| 272 |
+
eqn = entry.get("equation", "")
|
| 273 |
+
reward = float(entry.get("reward_total", 0.0))
|
| 274 |
+
components_text = _format_reward_components(entry.get("reward_components"))
|
| 275 |
+
lines.append(
|
| 276 |
+
f" turn={entry.get('turn')} reward={reward:.3f} "
|
| 277 |
+
f"[{components_text}] equation=`{eqn}`"
|
| 278 |
+
)
|
| 279 |
+
mismatch = entry.get("mismatch_summary", "")
|
| 280 |
+
if mismatch:
|
| 281 |
+
lines.append(f" mismatch: {mismatch}")
|
| 282 |
+
return "\n".join(lines)
|
| 283 |
+
|
| 284 |
+
|
| 285 |
+
def _format_reward_components(components: Any) -> str:
|
| 286 |
+
"""Render ``{match, progress, simplicity, format}`` as a compact line.
|
| 287 |
+
|
| 288 |
+
Always emits all four fields in :data:`_REWARD_COMPONENT_ORDER`,
|
| 289 |
+
defaulting to ``0.00`` when absent so the model never has to guess
|
| 290 |
+
why a column is missing. Three-decimal formatting matches the
|
| 291 |
+
server's history serialisation precision.
|
| 292 |
+
"""
|
| 293 |
+
if not isinstance(components, dict):
|
| 294 |
+
return " ".join(f"{name}=0.00" for name in _REWARD_COMPONENT_ORDER)
|
| 295 |
+
parts: list[str] = []
|
| 296 |
+
for name in _REWARD_COMPONENT_ORDER:
|
| 297 |
+
try:
|
| 298 |
+
value = float(components.get(name, 0.0))
|
| 299 |
+
except (TypeError, ValueError):
|
| 300 |
+
value = 0.0
|
| 301 |
+
parts.append(f"{name}={value:.2f}")
|
| 302 |
+
return " ".join(parts)
|
| 303 |
+
|
| 304 |
+
|
| 305 |
+
def _render_turn_footer(obs: PhysiXObservation) -> str:
|
| 306 |
+
total = obs.turn + obs.turn_remaining or DEFAULT_MAX_TURNS
|
| 307 |
+
return (
|
| 308 |
+
f"TURN: {obs.turn + 1} / {total} ({obs.turn_remaining} remaining)\n"
|
| 309 |
+
"Emit the next hypothesis as JSON."
|
| 310 |
+
)
|
| 311 |
+
|
| 312 |
+
|
| 313 |
+
def _downsample(samples: list[dict[str, float]], target: int) -> list[dict[str, float]]:
|
| 314 |
+
if len(samples) <= target:
|
| 315 |
+
return samples
|
| 316 |
+
step = max(1, len(samples) // target)
|
| 317 |
+
indices = list(range(0, len(samples), step))[:target]
|
| 318 |
+
if indices[-1] != len(samples) - 1:
|
| 319 |
+
indices[-1] = len(samples) - 1
|
| 320 |
+
return [samples[i] for i in indices]
|
| 321 |
+
|
| 322 |
+
|
| 323 |
+
_JSON_DECODER = json.JSONDecoder()
|
| 324 |
+
|
| 325 |
+
|
| 326 |
+
def _extract_json_payload(text: str) -> dict[str, Any] | None:
|
| 327 |
+
"""Find the first ``{...}`` block in ``text`` that parses as a JSON object.
|
| 328 |
+
|
| 329 |
+
Uses :meth:`json.JSONDecoder.raw_decode` so that braces appearing inside
|
| 330 |
+
JSON *string* values (e.g. LaTeX like ``"\\frac{d vy}{dt}"``) do not
|
| 331 |
+
confuse the scanner — a regex-based brace matcher would mis-balance
|
| 332 |
+
here and return the whole completion as a malformed equation.
|
| 333 |
+
"""
|
| 334 |
+
candidate = _strip_code_fences(text)
|
| 335 |
+
|
| 336 |
+
for i, ch in enumerate(candidate):
|
| 337 |
+
if ch != "{":
|
| 338 |
+
continue
|
| 339 |
+
try:
|
| 340 |
+
payload, _ = _JSON_DECODER.raw_decode(candidate[i:])
|
| 341 |
+
except json.JSONDecodeError:
|
| 342 |
+
continue
|
| 343 |
+
if isinstance(payload, dict):
|
| 344 |
+
return payload
|
| 345 |
+
return None
|
| 346 |
+
|
| 347 |
+
|
| 348 |
+
def _strip_code_fences(text: str) -> str:
|
| 349 |
+
"""Remove Markdown code-fence wrappers (```json``` / ```python``` / ```).
|
| 350 |
+
|
| 351 |
+
This is *not* equation rewriting — it strips the outer fence syntax
|
| 352 |
+
only, so the JSON-aware extractor below can find the object payload.
|
| 353 |
+
"""
|
| 354 |
+
text = re.sub(r"```(?:json|python)?\s*", "", text)
|
| 355 |
+
text = text.replace("```", "")
|
| 356 |
+
return text
|
| 357 |
+
|
| 358 |
+
|
| 359 |
+
def _coerce_params(params_raw: Any) -> dict[str, float]:
|
| 360 |
+
"""Best-effort coercion of a raw params payload into ``dict[str, float]``."""
|
| 361 |
+
if not isinstance(params_raw, dict):
|
| 362 |
+
return {}
|
| 363 |
+
out: dict[str, float] = {}
|
| 364 |
+
for key, value in params_raw.items():
|
| 365 |
+
try:
|
| 366 |
+
out[str(key)] = float(value)
|
| 367 |
+
except (TypeError, ValueError):
|
| 368 |
+
continue
|
| 369 |
+
return out
|
physix/training/reward_fns.py
ADDED
|
@@ -0,0 +1,107 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""TRL-compatible reward functions for GRPO training.
|
| 2 |
+
|
| 3 |
+
Responsibility: expose a stateless reward function for each independent
|
| 4 |
+
reward component, plus an aggregated total. Internally each component delegates
|
| 5 |
+
to a shared :class:`Scorer` so a single completion is parsed and simulated
|
| 6 |
+
exactly once per training step regardless of how many components TRL queries.
|
| 7 |
+
|
| 8 |
+
The TRL signature for a reward function is::
|
| 9 |
+
|
| 10 |
+
def reward_func(*, prompts, completions, **kwargs) -> list[float]: ...
|
| 11 |
+
|
| 12 |
+
where ``prompts`` and ``completions`` are batched lists. Extra columns from
|
| 13 |
+
the training dataset arrive as keyword arguments — we expect the columns
|
| 14 |
+
listed in :class:`SystemContext` to be present.
|
| 15 |
+
"""
|
| 16 |
+
|
| 17 |
+
from __future__ import annotations
|
| 18 |
+
|
| 19 |
+
from collections.abc import Callable, Sequence
|
| 20 |
+
from typing import Any
|
| 21 |
+
|
| 22 |
+
from physix.training.scorer import Scorer, SystemContext
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
RewardFunction = Callable[..., list[float]]
|
| 26 |
+
|
| 27 |
+
|
| 28 |
+
# The four independent reward components passed to GRPOTrainer.
|
| 29 |
+
# NOTE: "total" is intentionally excluded — GRPOTrainer *sums* the outputs
|
| 30 |
+
# of all reward functions into a single advantage signal, so passing a
|
| 31 |
+
# pre-computed total would double-count every component.
|
| 32 |
+
_COMPONENT_NAMES: tuple[str, ...] = ("match", "progress", "simplicity", "format")
|
| 33 |
+
|
| 34 |
+
|
| 35 |
+
def make_reward_funcs(
|
| 36 |
+
scorer: Scorer | None = None,
|
| 37 |
+
) -> dict[str, RewardFunction]:
|
| 38 |
+
"""Build a fresh dict of reward functions wired to a shared scorer.
|
| 39 |
+
|
| 40 |
+
Each function is named ``reward_<component>`` so TRL's GRPO trainer
|
| 41 |
+
logs them individually to W&B under
|
| 42 |
+
``train/rewards/reward_<component>/mean``.
|
| 43 |
+
|
| 44 |
+
The scorer is shared across all four functions; calling
|
| 45 |
+
``scorer.reset()`` between steps avoids unbounded cache growth and
|
| 46 |
+
ensures each completion is parsed + simulated exactly once per step
|
| 47 |
+
regardless of how many reward functions query it.
|
| 48 |
+
|
| 49 |
+
Returns:
|
| 50 |
+
``{"match": fn, "progress": fn, "simplicity": fn, "format": fn}``
|
| 51 |
+
"""
|
| 52 |
+
shared = scorer if scorer is not None else Scorer()
|
| 53 |
+
|
| 54 |
+
def _make(component: str) -> RewardFunction:
|
| 55 |
+
def _reward_fn(
|
| 56 |
+
prompts: Sequence[Any],
|
| 57 |
+
completions: Sequence[str],
|
| 58 |
+
**kwargs: Any,
|
| 59 |
+
) -> list[float]:
|
| 60 |
+
del prompts # kept for TRL API conformance; unused here.
|
| 61 |
+
shared.reset()
|
| 62 |
+
contexts = _hydrate_contexts(len(completions), kwargs)
|
| 63 |
+
out: list[float] = []
|
| 64 |
+
for i, completion in enumerate(completions):
|
| 65 |
+
breakdown = shared.score(
|
| 66 |
+
completion=completion,
|
| 67 |
+
context=contexts[i],
|
| 68 |
+
cache_key=i,
|
| 69 |
+
)
|
| 70 |
+
out.append(getattr(breakdown, component))
|
| 71 |
+
return out
|
| 72 |
+
|
| 73 |
+
_reward_fn.__name__ = f"reward_{component}"
|
| 74 |
+
return _reward_fn
|
| 75 |
+
|
| 76 |
+
return {name: _make(name) for name in _COMPONENT_NAMES}
|
| 77 |
+
|
| 78 |
+
|
| 79 |
+
def _hydrate_contexts(batch_size: int, kwargs: dict[str, Any]) -> list[SystemContext]:
|
| 80 |
+
"""Project per-row kwargs into :class:`SystemContext` records.
|
| 81 |
+
|
| 82 |
+
TRL passes dataset columns as kwargs where each value is a list of
|
| 83 |
+
length ``batch_size``. We zip them together into per-row dicts and hand
|
| 84 |
+
each off to :func:`SystemContext.from_row`.
|
| 85 |
+
"""
|
| 86 |
+
expected_keys = (
|
| 87 |
+
"system_id",
|
| 88 |
+
"state_variables",
|
| 89 |
+
"parameters",
|
| 90 |
+
"initial_conditions",
|
| 91 |
+
"timestamps",
|
| 92 |
+
"observed",
|
| 93 |
+
"previous_r_match",
|
| 94 |
+
)
|
| 95 |
+
|
| 96 |
+
rows: list[dict[str, Any]] = []
|
| 97 |
+
for i in range(batch_size):
|
| 98 |
+
row: dict[str, Any] = {}
|
| 99 |
+
for key in expected_keys:
|
| 100 |
+
value = kwargs.get(key)
|
| 101 |
+
if isinstance(value, list) and len(value) > i:
|
| 102 |
+
row[key] = value[i]
|
| 103 |
+
else:
|
| 104 |
+
row[key] = value
|
| 105 |
+
rows.append(row)
|
| 106 |
+
|
| 107 |
+
return [SystemContext.from_row(row) for row in rows]
|
physix/training/scorer.py
ADDED
|
@@ -0,0 +1,182 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Single-completion scorer used by both training and evaluation.
|
| 2 |
+
|
| 3 |
+
Responsibility: given the agent's raw completion text plus the system context
|
| 4 |
+
(state variables, parameters, IC, observed trajectory), compute the same
|
| 5 |
+
4-component :class:`RewardBreakdown` the env produces during a normal
|
| 6 |
+
``step()`` call. This is the bridge between TRL's "reward function over a
|
| 7 |
+
batch of completions" interface and our env's verifier pipeline.
|
| 8 |
+
|
| 9 |
+
Caching: a :class:`Scorer` instance memoises by ``(dataset_index, completion)``
|
| 10 |
+
so per-component reward functions can each ask the scorer for the *same*
|
| 11 |
+
completion without re-running parse + simulate four times.
|
| 12 |
+
"""
|
| 13 |
+
|
| 14 |
+
from __future__ import annotations
|
| 15 |
+
|
| 16 |
+
import numpy as np
|
| 17 |
+
from pydantic import BaseModel, ConfigDict, Field
|
| 18 |
+
|
| 19 |
+
from physix.models import RewardBreakdown
|
| 20 |
+
from physix.training.prompt import parse_completion
|
| 21 |
+
from physix.verifier import (
|
| 22 |
+
ParseError,
|
| 23 |
+
SimulationError,
|
| 24 |
+
compute_match,
|
| 25 |
+
compute_reward,
|
| 26 |
+
parse_equation,
|
| 27 |
+
simulate_hypothesis,
|
| 28 |
+
)
|
| 29 |
+
|
| 30 |
+
|
| 31 |
+
def _drop_none(mapping: object) -> dict[str, float]:
|
| 32 |
+
"""Return ``{k: float(v)}`` for keys whose value is not ``None``.
|
| 33 |
+
|
| 34 |
+
HuggingFace ``Dataset`` columns are schema-unified across rows, so a
|
| 35 |
+
row that lacks a key gets ``None`` for it. We drop those at ingest
|
| 36 |
+
so per-row dicts only contain the keys that actually apply to the
|
| 37 |
+
row's system.
|
| 38 |
+
"""
|
| 39 |
+
if not isinstance(mapping, dict):
|
| 40 |
+
return {}
|
| 41 |
+
return {str(k): float(v) for k, v in mapping.items() if v is not None}
|
| 42 |
+
|
| 43 |
+
|
| 44 |
+
class SystemContext(BaseModel):
|
| 45 |
+
"""Per-prompt context the scorer needs to evaluate completions.
|
| 46 |
+
|
| 47 |
+
These fields correspond 1:1 with dataset columns at training time.
|
| 48 |
+
"""
|
| 49 |
+
|
| 50 |
+
model_config = ConfigDict(frozen=True, arbitrary_types_allowed=True)
|
| 51 |
+
|
| 52 |
+
system_id: str
|
| 53 |
+
state_variables: tuple[str, ...]
|
| 54 |
+
parameters: dict[str, float] = Field(default_factory=dict)
|
| 55 |
+
initial_conditions: dict[str, float] = Field(default_factory=dict)
|
| 56 |
+
timestamps: np.ndarray
|
| 57 |
+
observed: dict[str, np.ndarray] = Field(default_factory=dict)
|
| 58 |
+
previous_r_match: float = 0.0
|
| 59 |
+
|
| 60 |
+
@classmethod
|
| 61 |
+
def from_row(cls, row: dict[str, object]) -> "SystemContext":
|
| 62 |
+
"""Hydrate from a HuggingFace dataset row.
|
| 63 |
+
|
| 64 |
+
Two non-obvious transforms happen here:
|
| 65 |
+
|
| 66 |
+
1. **Lists -> arrays.** The dataset stores trajectories as plain
|
| 67 |
+
Python lists for JSON serialisability; we lift them back into
|
| 68 |
+
``np.ndarray`` so the verifier's NumPy code path works.
|
| 69 |
+
2. **Strip ``None`` fillers.** ``Dataset.from_list`` schema-unifies
|
| 70 |
+
rows across all systems: a ``free_fall`` row ends up with
|
| 71 |
+
``parameters={'g': 9.81, 'mass': 3.4, 'k': None, 'L': None, ...}``
|
| 72 |
+
because *other* systems define those keys. Left as-is, ``None``
|
| 73 |
+
values would (a) inflate the verifier's allowed-symbol set, so
|
| 74 |
+
the model could "validly" reference parameters that don't
|
| 75 |
+
exist for this system, and (b) crash the simulator on
|
| 76 |
+
substitution. We drop them at ingest, restoring per-system
|
| 77 |
+
parameter sets.
|
| 78 |
+
"""
|
| 79 |
+
state_variables = tuple(row.get("state_variables", ()))
|
| 80 |
+
observed: dict[str, np.ndarray] = {}
|
| 81 |
+
observed_raw = row.get("observed", {})
|
| 82 |
+
if isinstance(observed_raw, dict):
|
| 83 |
+
for key, values in observed_raw.items():
|
| 84 |
+
if key not in state_variables:
|
| 85 |
+
continue
|
| 86 |
+
observed[str(key)] = np.asarray(values, dtype=float)
|
| 87 |
+
|
| 88 |
+
return cls(
|
| 89 |
+
system_id=str(row.get("system_id", "")),
|
| 90 |
+
state_variables=state_variables,
|
| 91 |
+
parameters=_drop_none(row.get("parameters", {})),
|
| 92 |
+
initial_conditions=_drop_none(row.get("initial_conditions", {})),
|
| 93 |
+
timestamps=np.asarray(row.get("timestamps", []), dtype=float),
|
| 94 |
+
observed=observed,
|
| 95 |
+
previous_r_match=float(row.get("previous_r_match", row.get("previous_total", 0.0))),
|
| 96 |
+
)
|
| 97 |
+
|
| 98 |
+
|
| 99 |
+
class Scorer:
|
| 100 |
+
"""Stateless completion scorer with optional per-batch memoisation."""
|
| 101 |
+
|
| 102 |
+
def __init__(self) -> None:
|
| 103 |
+
self._cache: dict[int, RewardBreakdown] = {}
|
| 104 |
+
|
| 105 |
+
def reset(self) -> None:
|
| 106 |
+
"""Clear the memoisation cache (call once per training step)."""
|
| 107 |
+
self._cache.clear()
|
| 108 |
+
|
| 109 |
+
def score(
|
| 110 |
+
self,
|
| 111 |
+
completion: str,
|
| 112 |
+
context: SystemContext,
|
| 113 |
+
*,
|
| 114 |
+
cache_key: int | None = None,
|
| 115 |
+
) -> RewardBreakdown:
|
| 116 |
+
"""Score one completion. Optionally memoise by ``cache_key``."""
|
| 117 |
+
if cache_key is not None and cache_key in self._cache:
|
| 118 |
+
return self._cache[cache_key]
|
| 119 |
+
|
| 120 |
+
breakdown = self._score_uncached(completion, context)
|
| 121 |
+
if cache_key is not None:
|
| 122 |
+
self._cache[cache_key] = breakdown
|
| 123 |
+
return breakdown
|
| 124 |
+
|
| 125 |
+
# --------------------------------------------------------------- internals
|
| 126 |
+
|
| 127 |
+
def _score_uncached(
|
| 128 |
+
self,
|
| 129 |
+
completion: str,
|
| 130 |
+
context: SystemContext,
|
| 131 |
+
) -> RewardBreakdown:
|
| 132 |
+
action = parse_completion(completion)
|
| 133 |
+
parameter_names = frozenset(action.params or {}) | frozenset(context.parameters)
|
| 134 |
+
|
| 135 |
+
try:
|
| 136 |
+
parsed = parse_equation(
|
| 137 |
+
action.equation,
|
| 138 |
+
state_variables=context.state_variables,
|
| 139 |
+
parameter_names=parameter_names,
|
| 140 |
+
)
|
| 141 |
+
except ParseError:
|
| 142 |
+
return compute_reward(
|
| 143 |
+
parse_succeeded=False,
|
| 144 |
+
r_match=0.0,
|
| 145 |
+
operator_count=0,
|
| 146 |
+
previous_r_match=context.previous_r_match,
|
| 147 |
+
)
|
| 148 |
+
|
| 149 |
+
# The agent's params take precedence over the system's; agent is
|
| 150 |
+
# allowed to use system parameter names like ``g`` if it provides
|
| 151 |
+
# values, but if it omits them we fall back to ground-truth values
|
| 152 |
+
# (which is fine — the agent's structural correctness is what we
|
| 153 |
+
# primarily score).
|
| 154 |
+
merged_parameters = {**context.parameters, **(action.params or {})}
|
| 155 |
+
|
| 156 |
+
try:
|
| 157 |
+
predicted = simulate_hypothesis(
|
| 158 |
+
parsed,
|
| 159 |
+
state_variables=context.state_variables,
|
| 160 |
+
parameters=merged_parameters,
|
| 161 |
+
initial_conditions=context.initial_conditions,
|
| 162 |
+
timestamps=context.timestamps,
|
| 163 |
+
)
|
| 164 |
+
except SimulationError:
|
| 165 |
+
return compute_reward(
|
| 166 |
+
parse_succeeded=True,
|
| 167 |
+
r_match=0.0,
|
| 168 |
+
operator_count=parsed.operator_count,
|
| 169 |
+
previous_r_match=context.previous_r_match,
|
| 170 |
+
)
|
| 171 |
+
|
| 172 |
+
r_match = compute_match(
|
| 173 |
+
observed=context.observed,
|
| 174 |
+
predicted=predicted,
|
| 175 |
+
state_variables=context.state_variables,
|
| 176 |
+
)
|
| 177 |
+
return compute_reward(
|
| 178 |
+
parse_succeeded=True,
|
| 179 |
+
r_match=r_match,
|
| 180 |
+
operator_count=parsed.operator_count,
|
| 181 |
+
previous_r_match=context.previous_r_match,
|
| 182 |
+
)
|
physix/training/sft.py
ADDED
|
@@ -0,0 +1,293 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""SFT warm-start before GRPO training.
|
| 2 |
+
|
| 3 |
+
Trains Qwen2.5-1.5B-Instruct for 2 epochs on supervised (prompt, completion)
|
| 4 |
+
pairs where the completion is the ground-truth equation in the action JSON
|
| 5 |
+
format the env expects. This is the essential bootstrap step: without it a
|
| 6 |
+
cold 1.5B model outputs LaTeX / incoherent text on ~80% of turns, yielding
|
| 7 |
+
near-zero GRPO advantages and a flat loss curve that wastes GPU credits.
|
| 8 |
+
|
| 9 |
+
After SFT the model:
|
| 10 |
+
- Emits valid JSON with ``equation``, ``params``, ``rationale`` on >90% turns.
|
| 11 |
+
- Writes equations in the ASCII grammar (``d2y/dt2 = ...``), not LaTeX.
|
| 12 |
+
- Knows the per-system equation family (gravity, drag, pendulum, spring).
|
| 13 |
+
|
| 14 |
+
Then GRPO refines physics accuracy via the verifiable R² reward.
|
| 15 |
+
|
| 16 |
+
Run::
|
| 17 |
+
|
| 18 |
+
python -m physix.training.sft \
|
| 19 |
+
--model Qwen/Qwen2.5-1.5B-Instruct \
|
| 20 |
+
--output-dir runs/physix-1.5b-sft \
|
| 21 |
+
--epochs 2 \
|
| 22 |
+
--instances-per-system 32
|
| 23 |
+
|
| 24 |
+
Typical runtime: 5-8 min on an A10G, 3-4 min on an A100.
|
| 25 |
+
"""
|
| 26 |
+
|
| 27 |
+
from __future__ import annotations
|
| 28 |
+
|
| 29 |
+
import argparse
|
| 30 |
+
import json
|
| 31 |
+
import logging
|
| 32 |
+
import os
|
| 33 |
+
from pathlib import Path
|
| 34 |
+
|
| 35 |
+
import numpy as np
|
| 36 |
+
from datasets import Dataset
|
| 37 |
+
|
| 38 |
+
from physix.systems import (
|
| 39 |
+
SUPPORTED_SYSTEMS,
|
| 40 |
+
SYSTEM_REGISTRY,
|
| 41 |
+
get_system,
|
| 42 |
+
)
|
| 43 |
+
from physix.systems.base import PhysicalSystem, TrajectoryData
|
| 44 |
+
from physix.training.prompt import build_prompt
|
| 45 |
+
from physix.models import DEFAULT_MAX_TURNS, PhysiXObservation
|
| 46 |
+
|
| 47 |
+
|
| 48 |
+
_log = logging.getLogger(__name__)
|
| 49 |
+
|
| 50 |
+
|
| 51 |
+
# ─── Dataset ──────────────────────────────────────────────────────────────────
|
| 52 |
+
|
| 53 |
+
def _gt_completion(system: PhysicalSystem) -> str:
|
| 54 |
+
"""Build the ground-truth completion JSON for one system.
|
| 55 |
+
|
| 56 |
+
We include the system's sampled parameters so the model learns that the
|
| 57 |
+
``params`` field must contain the symbols it references in the equation.
|
| 58 |
+
The SFT target is the *exact* JSON string the env's verifier accepts;
|
| 59 |
+
GRPO will later teach the model to refine parameter values per trajectory.
|
| 60 |
+
"""
|
| 61 |
+
import re as _re
|
| 62 |
+
eq = system.ground_truth_equation()
|
| 63 |
+
# Extract all identifier tokens that appear in the equation, then keep
|
| 64 |
+
# only those that are declared as system parameters. We use a proper
|
| 65 |
+
# identifier regex (not split-on-whitespace) so symbols inside function
|
| 66 |
+
# calls like sin(theta) and fractions like -(g/L) are caught.
|
| 67 |
+
reserved = set(system.state_variables) | {"dt", "d", "t", "sin", "cos",
|
| 68 |
+
"tan", "exp", "log", "sqrt", "abs"}
|
| 69 |
+
eq_tokens = set(_re.findall(r'\b([A-Za-z_][A-Za-z0-9_]*)\b', eq))
|
| 70 |
+
relevant_keys = eq_tokens & set(system.parameters) - reserved
|
| 71 |
+
relevant = {k: round(system.parameters[k], 4) for k in sorted(relevant_keys)}
|
| 72 |
+
return json.dumps({
|
| 73 |
+
"equation": eq,
|
| 74 |
+
"params": relevant,
|
| 75 |
+
"rationale": (
|
| 76 |
+
f"Ground-truth equation for {system.system_id.replace('_', ' ')}."
|
| 77 |
+
),
|
| 78 |
+
})
|
| 79 |
+
|
| 80 |
+
|
| 81 |
+
def build_sft_dataset(
|
| 82 |
+
system_ids: tuple[str, ...] = SUPPORTED_SYSTEMS,
|
| 83 |
+
instances_per_system: int = 32,
|
| 84 |
+
seed: int = 0,
|
| 85 |
+
) -> Dataset:
|
| 86 |
+
if not system_ids:
|
| 87 |
+
raise ValueError("system_ids must be non-empty.")
|
| 88 |
+
unknown = [sid for sid in system_ids if sid not in SYSTEM_REGISTRY]
|
| 89 |
+
if unknown:
|
| 90 |
+
raise ValueError(
|
| 91 |
+
f"Unknown system_ids in build_sft_dataset: {unknown!r}. "
|
| 92 |
+
f"Registered: {sorted(SYSTEM_REGISTRY)!r}."
|
| 93 |
+
)
|
| 94 |
+
|
| 95 |
+
rng = np.random.default_rng(seed)
|
| 96 |
+
rows: list[dict] = []
|
| 97 |
+
|
| 98 |
+
for system_id in system_ids:
|
| 99 |
+
system = get_system(system_id)
|
| 100 |
+
for _ in range(instances_per_system):
|
| 101 |
+
trajectory = system.simulate(rng)
|
| 102 |
+
obs = _build_obs(system, trajectory)
|
| 103 |
+
prompt_messages = build_prompt(obs)
|
| 104 |
+
completion = _gt_completion(system)
|
| 105 |
+
rows.append({"prompt": prompt_messages, "completion": completion})
|
| 106 |
+
|
| 107 |
+
_log.info(
|
| 108 |
+
"Built SFT dataset: %d rows across %d systems (%s)",
|
| 109 |
+
len(rows),
|
| 110 |
+
len(system_ids),
|
| 111 |
+
", ".join(system_ids),
|
| 112 |
+
)
|
| 113 |
+
return Dataset.from_list(rows)
|
| 114 |
+
|
| 115 |
+
|
| 116 |
+
def _build_obs(system: PhysicalSystem, trajectory: TrajectoryData) -> PhysiXObservation:
|
| 117 |
+
return PhysiXObservation(
|
| 118 |
+
done=False,
|
| 119 |
+
reward=None,
|
| 120 |
+
trajectory=trajectory.to_observation_samples(),
|
| 121 |
+
state_variables=list(system.state_variables),
|
| 122 |
+
hint=system.hint(system.parameters),
|
| 123 |
+
history=[],
|
| 124 |
+
mismatch_summary="",
|
| 125 |
+
turn=0,
|
| 126 |
+
turn_remaining=DEFAULT_MAX_TURNS,
|
| 127 |
+
system_id=system.system_id,
|
| 128 |
+
stats=trajectory.stats(),
|
| 129 |
+
reward_breakdown={},
|
| 130 |
+
)
|
| 131 |
+
|
| 132 |
+
|
| 133 |
+
# ─── Training ─────────────────────────────────────────────────────────────────
|
| 134 |
+
|
| 135 |
+
def train_sft(
|
| 136 |
+
model_name: str = "Qwen/Qwen2.5-1.5B-Instruct",
|
| 137 |
+
output_dir: str = "runs/physix-1.5b-sft",
|
| 138 |
+
epochs: int = 2,
|
| 139 |
+
max_seq_length: int = 2048,
|
| 140 |
+
lora_r: int = 16,
|
| 141 |
+
lora_alpha: int = 32,
|
| 142 |
+
per_device_batch_size: int = 2,
|
| 143 |
+
gradient_accumulation_steps: int = 4,
|
| 144 |
+
learning_rate: float = 2e-5,
|
| 145 |
+
instances_per_system: int = 32,
|
| 146 |
+
seed: int = 0,
|
| 147 |
+
wandb_run_name: str | None = None,
|
| 148 |
+
) -> None:
|
| 149 |
+
_configure_logging()
|
| 150 |
+
|
| 151 |
+
# Heavy imports: only available in [train] env.
|
| 152 |
+
import wandb
|
| 153 |
+
from unsloth import FastLanguageModel, PatchFastRL # noqa: F401
|
| 154 |
+
from trl import SFTTrainer, SFTConfig
|
| 155 |
+
|
| 156 |
+
# Force a fresh W&B run for SFT regardless of any inherited WANDB_RUN_ID
|
| 157 |
+
# / WANDB_RESUME env vars (those are intended for the GRPO stage). If we
|
| 158 |
+
# let wandb.init() try to resume a foreign run id it will block for ~90s
|
| 159 |
+
# fetching that run's history before giving up.
|
| 160 |
+
for stale in ("WANDB_RUN_ID", "WANDB_RESUME"):
|
| 161 |
+
os.environ.pop(stale, None)
|
| 162 |
+
|
| 163 |
+
wandb.init(
|
| 164 |
+
project=os.environ.get("WANDB_PROJECT", "physix-live"),
|
| 165 |
+
name=wandb_run_name or f"physix-sft-{epochs}ep",
|
| 166 |
+
config={
|
| 167 |
+
"stage": "sft",
|
| 168 |
+
"model_name": model_name,
|
| 169 |
+
"epochs": epochs,
|
| 170 |
+
"lora_r": lora_r,
|
| 171 |
+
"lora_alpha": lora_alpha,
|
| 172 |
+
"learning_rate": learning_rate,
|
| 173 |
+
"per_device_batch_size": per_device_batch_size,
|
| 174 |
+
"gradient_accumulation_steps": gradient_accumulation_steps,
|
| 175 |
+
"instances_per_system": instances_per_system,
|
| 176 |
+
"seed": seed,
|
| 177 |
+
},
|
| 178 |
+
tags=["sft", "physix", model_name.split("/")[-1]],
|
| 179 |
+
)
|
| 180 |
+
|
| 181 |
+
_log.info("Loading model %s (4-bit, LoRA-%d)", model_name, lora_r)
|
| 182 |
+
model, tokenizer = FastLanguageModel.from_pretrained(
|
| 183 |
+
model_name=model_name,
|
| 184 |
+
max_seq_length=max_seq_length,
|
| 185 |
+
load_in_4bit=True,
|
| 186 |
+
dtype=None,
|
| 187 |
+
)
|
| 188 |
+
model = FastLanguageModel.get_peft_model(
|
| 189 |
+
model,
|
| 190 |
+
r=lora_r,
|
| 191 |
+
lora_alpha=lora_alpha,
|
| 192 |
+
target_modules=["q_proj", "k_proj", "v_proj", "o_proj",
|
| 193 |
+
"gate_proj", "up_proj", "down_proj"],
|
| 194 |
+
bias="none",
|
| 195 |
+
use_gradient_checkpointing="unsloth",
|
| 196 |
+
random_state=seed,
|
| 197 |
+
)
|
| 198 |
+
|
| 199 |
+
dataset = build_sft_dataset(instances_per_system=instances_per_system, seed=seed)
|
| 200 |
+
|
| 201 |
+
def _format_row(row: dict) -> dict:
|
| 202 |
+
"""Combine prompt + completion into a single training string."""
|
| 203 |
+
messages = row["prompt"] + [{"role": "assistant", "content": row["completion"]}]
|
| 204 |
+
text = tokenizer.apply_chat_template(
|
| 205 |
+
messages, tokenize=False, add_generation_prompt=False
|
| 206 |
+
)
|
| 207 |
+
return {"text": text}
|
| 208 |
+
|
| 209 |
+
formatted = dataset.map(_format_row, remove_columns=["prompt", "completion"])
|
| 210 |
+
_log.info("SFT dataset ready: %d rows", len(formatted))
|
| 211 |
+
|
| 212 |
+
import torch
|
| 213 |
+
sft_config = SFTConfig(
|
| 214 |
+
output_dir=output_dir,
|
| 215 |
+
num_train_epochs=epochs,
|
| 216 |
+
per_device_train_batch_size=per_device_batch_size,
|
| 217 |
+
gradient_accumulation_steps=gradient_accumulation_steps,
|
| 218 |
+
learning_rate=learning_rate,
|
| 219 |
+
max_seq_length=max_seq_length,
|
| 220 |
+
dataset_text_field="text",
|
| 221 |
+
packing=True,
|
| 222 |
+
logging_steps=1,
|
| 223 |
+
save_strategy="epoch",
|
| 224 |
+
report_to=["wandb"],
|
| 225 |
+
seed=seed,
|
| 226 |
+
bf16=torch.cuda.is_bf16_supported() if torch.cuda.is_available() else False,
|
| 227 |
+
fp16=not torch.cuda.is_bf16_supported() if torch.cuda.is_available() else False,
|
| 228 |
+
)
|
| 229 |
+
|
| 230 |
+
trainer = SFTTrainer(
|
| 231 |
+
model=model,
|
| 232 |
+
tokenizer=tokenizer,
|
| 233 |
+
args=sft_config,
|
| 234 |
+
train_dataset=formatted,
|
| 235 |
+
)
|
| 236 |
+
|
| 237 |
+
_log.info("Starting SFT for %d epochs on %d examples", epochs, len(formatted))
|
| 238 |
+
trainer.train()
|
| 239 |
+
|
| 240 |
+
# We save as merged_16bit (full model + config + tokenizer) rather than
|
| 241 |
+
# "lora" (adapter weights only). GRPO's downstream
|
| 242 |
+
# ``FastLanguageModel.from_pretrained(sft_checkpoint)`` needs a complete
|
| 243 |
+
# model directory — config.json + tokenizer + weights — to load. A bare
|
| 244 |
+
# adapter shard makes Unsloth raise "No config file found". The merged
|
| 245 |
+
# checkpoint is ~3 GB (1.5B params × 2 bytes) which is fine on /tmp.
|
| 246 |
+
out_path = Path(output_dir) / "merged"
|
| 247 |
+
out_path.mkdir(parents=True, exist_ok=True)
|
| 248 |
+
model.save_pretrained_merged(
|
| 249 |
+
save_directory=str(out_path),
|
| 250 |
+
tokenizer=tokenizer,
|
| 251 |
+
save_method="merged_16bit",
|
| 252 |
+
)
|
| 253 |
+
_log.info("SFT model (merged 16-bit) saved → %s", out_path)
|
| 254 |
+
wandb.finish()
|
| 255 |
+
|
| 256 |
+
|
| 257 |
+
# ─── CLI ──────────────────────────────────────────────────────────────────────
|
| 258 |
+
|
| 259 |
+
def _configure_logging() -> None:
|
| 260 |
+
logging.basicConfig(
|
| 261 |
+
level=os.environ.get("PHYSIX_LOG_LEVEL", "INFO"),
|
| 262 |
+
format="[%(asctime)s] %(levelname)s %(name)s | %(message)s",
|
| 263 |
+
)
|
| 264 |
+
|
| 265 |
+
|
| 266 |
+
def main() -> None:
|
| 267 |
+
parser = argparse.ArgumentParser(description="SFT warm-start for PhysiX RLVR.")
|
| 268 |
+
parser.add_argument("--model", default="Qwen/Qwen2.5-1.5B-Instruct")
|
| 269 |
+
parser.add_argument("--output-dir", default="runs/physix-1.5b-sft")
|
| 270 |
+
parser.add_argument("--epochs", type=int, default=2)
|
| 271 |
+
parser.add_argument("--instances-per-system", type=int, default=32)
|
| 272 |
+
parser.add_argument("--lora-r", type=int, default=32)
|
| 273 |
+
parser.add_argument("--learning-rate", type=float, default=2e-5)
|
| 274 |
+
parser.add_argument("--seed", type=int, default=0)
|
| 275 |
+
parser.add_argument("--wandb-run-name", default=None,
|
| 276 |
+
help="Override W&B run name. Defaults to physix-sft-{epochs}ep.")
|
| 277 |
+
args = parser.parse_args()
|
| 278 |
+
|
| 279 |
+
os.environ.setdefault("WANDB_PROJECT", "physix-live")
|
| 280 |
+
train_sft(
|
| 281 |
+
model_name=args.model,
|
| 282 |
+
output_dir=args.output_dir,
|
| 283 |
+
epochs=args.epochs,
|
| 284 |
+
lora_r=args.lora_r,
|
| 285 |
+
learning_rate=args.learning_rate,
|
| 286 |
+
instances_per_system=args.instances_per_system,
|
| 287 |
+
seed=args.seed,
|
| 288 |
+
wandb_run_name=args.wandb_run_name,
|
| 289 |
+
)
|
| 290 |
+
|
| 291 |
+
|
| 292 |
+
if __name__ == "__main__":
|
| 293 |
+
main()
|
physix/verifier/__init__.py
ADDED
|
@@ -0,0 +1,33 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Verifier layer: parse hypothesis, simulate, score, summarise mismatch.
|
| 2 |
+
|
| 3 |
+
Public API:
|
| 4 |
+
|
| 5 |
+
- :func:`parse_equation` (``parser``): convert a SymPy-grammar string into a
|
| 6 |
+
callable ODE right-hand-side, validated against a strict whitelist.
|
| 7 |
+
- :func:`simulate_hypothesis` (``simulator``): run the parsed RHS forward in
|
| 8 |
+
time via ``scipy.integrate.odeint``.
|
| 9 |
+
- :func:`compute_match` (``metrics``): R-squared between observed and
|
| 10 |
+
predicted trajectories.
|
| 11 |
+
- :func:`summarize_mismatch` (``mismatch``): generate a one-sentence English
|
| 12 |
+
description of where prediction diverges from observation.
|
| 13 |
+
- :func:`compute_reward` (``reward``): combine all components into a
|
| 14 |
+
:class:`RewardBreakdown`.
|
| 15 |
+
"""
|
| 16 |
+
|
| 17 |
+
from physix.verifier.metrics import compute_match, residual_summary
|
| 18 |
+
from physix.verifier.mismatch import summarize_mismatch
|
| 19 |
+
from physix.verifier.parser import ParseError, ParsedEquation, parse_equation
|
| 20 |
+
from physix.verifier.reward import compute_reward
|
| 21 |
+
from physix.verifier.simulator import SimulationError, simulate_hypothesis
|
| 22 |
+
|
| 23 |
+
__all__ = [
|
| 24 |
+
"compute_match",
|
| 25 |
+
"residual_summary",
|
| 26 |
+
"summarize_mismatch",
|
| 27 |
+
"ParseError",
|
| 28 |
+
"ParsedEquation",
|
| 29 |
+
"parse_equation",
|
| 30 |
+
"compute_reward",
|
| 31 |
+
"SimulationError",
|
| 32 |
+
"simulate_hypothesis",
|
| 33 |
+
]
|
physix/verifier/metrics.py
ADDED
|
@@ -0,0 +1,114 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Numerical metrics over (observed, predicted) trajectory pairs.
|
| 2 |
+
|
| 3 |
+
Responsibility: compute scalar fit quality (R-squared), per-variable
|
| 4 |
+
residuals, and lightweight diagnostic statistics. Does no parsing, no
|
| 5 |
+
simulation, no English-text generation.
|
| 6 |
+
"""
|
| 7 |
+
|
| 8 |
+
from __future__ import annotations
|
| 9 |
+
|
| 10 |
+
from collections.abc import Iterable
|
| 11 |
+
|
| 12 |
+
import numpy as np
|
| 13 |
+
from pydantic import BaseModel, ConfigDict, Field
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
class ResidualSummary(BaseModel):
|
| 17 |
+
"""Diagnostic statistics derived from per-variable residuals.
|
| 18 |
+
|
| 19 |
+
Consumed exclusively by :mod:`physix.verifier.mismatch` to render the
|
| 20 |
+
English residual summary surfaced to the agent.
|
| 21 |
+
"""
|
| 22 |
+
|
| 23 |
+
model_config = ConfigDict(frozen=True)
|
| 24 |
+
|
| 25 |
+
per_variable_max_abs_residual: dict[str, float] = Field(default_factory=dict)
|
| 26 |
+
per_variable_t_of_max_residual: dict[str, float] = Field(default_factory=dict)
|
| 27 |
+
per_variable_late_residual_mean: dict[str, float] = Field(default_factory=dict)
|
| 28 |
+
overall_r2: float = 0.0
|
| 29 |
+
|
| 30 |
+
|
| 31 |
+
def compute_match(
|
| 32 |
+
observed: dict[str, np.ndarray],
|
| 33 |
+
predicted: dict[str, np.ndarray],
|
| 34 |
+
state_variables: Iterable[str],
|
| 35 |
+
) -> float:
|
| 36 |
+
"""Compute the per-step R-squared used as the primary reward signal.
|
| 37 |
+
|
| 38 |
+
Returns the **average** of per-variable R-squared values, clipped to
|
| 39 |
+
``[0, 1]``. This intentionally rewards models that get *some* variables
|
| 40 |
+
right even if others diverge.
|
| 41 |
+
"""
|
| 42 |
+
r2s = [
|
| 43 |
+
_r_squared(observed[var], predicted[var])
|
| 44 |
+
for var in state_variables
|
| 45 |
+
if var in observed and var in predicted
|
| 46 |
+
]
|
| 47 |
+
if not r2s:
|
| 48 |
+
return 0.0
|
| 49 |
+
avg = float(np.mean(r2s))
|
| 50 |
+
return _clip01(avg)
|
| 51 |
+
|
| 52 |
+
|
| 53 |
+
def residual_summary(
|
| 54 |
+
timestamps: np.ndarray,
|
| 55 |
+
observed: dict[str, np.ndarray],
|
| 56 |
+
predicted: dict[str, np.ndarray],
|
| 57 |
+
state_variables: Iterable[str],
|
| 58 |
+
) -> ResidualSummary:
|
| 59 |
+
"""Build a structured residual summary used downstream by mismatch.py."""
|
| 60 |
+
per_max: dict[str, float] = {}
|
| 61 |
+
per_t_max: dict[str, float] = {}
|
| 62 |
+
per_late_mean: dict[str, float] = {}
|
| 63 |
+
r2_values: list[float] = []
|
| 64 |
+
|
| 65 |
+
for var in state_variables:
|
| 66 |
+
if var not in observed or var not in predicted:
|
| 67 |
+
continue
|
| 68 |
+
obs = observed[var]
|
| 69 |
+
pred = predicted[var]
|
| 70 |
+
residual = pred - obs
|
| 71 |
+
|
| 72 |
+
r2_values.append(_r_squared(obs, pred))
|
| 73 |
+
i_max = int(np.argmax(np.abs(residual)))
|
| 74 |
+
per_max[var] = float(np.abs(residual[i_max]))
|
| 75 |
+
per_t_max[var] = float(timestamps[i_max])
|
| 76 |
+
|
| 77 |
+
# Mean residual magnitude over the last 25% of the trajectory; this
|
| 78 |
+
# is the signal the mismatch summariser uses to detect drift /
|
| 79 |
+
# plateau-mismatch.
|
| 80 |
+
late_start = int(0.75 * len(timestamps))
|
| 81 |
+
per_late_mean[var] = float(np.mean(np.abs(residual[late_start:])))
|
| 82 |
+
|
| 83 |
+
overall = float(np.mean(r2_values)) if r2_values else 0.0
|
| 84 |
+
|
| 85 |
+
return ResidualSummary(
|
| 86 |
+
per_variable_max_abs_residual=per_max,
|
| 87 |
+
per_variable_t_of_max_residual=per_t_max,
|
| 88 |
+
per_variable_late_residual_mean=per_late_mean,
|
| 89 |
+
overall_r2=_clip01(overall),
|
| 90 |
+
)
|
| 91 |
+
|
| 92 |
+
|
| 93 |
+
def _r_squared(observed: np.ndarray, predicted: np.ndarray) -> float:
|
| 94 |
+
"""Coefficient of determination with a zero floor.
|
| 95 |
+
|
| 96 |
+
Returns 0.0 when the observed series is constant (degenerate).
|
| 97 |
+
Returns 0.0 when the model is worse than the observed mean.
|
| 98 |
+
"""
|
| 99 |
+
if observed.shape != predicted.shape:
|
| 100 |
+
return 0.0
|
| 101 |
+
obs_mean = float(np.mean(observed))
|
| 102 |
+
ss_res = float(np.sum((observed - predicted) ** 2))
|
| 103 |
+
ss_tot = float(np.sum((observed - obs_mean) ** 2))
|
| 104 |
+
if ss_tot <= 0.0:
|
| 105 |
+
return 0.0
|
| 106 |
+
return _clip01(1.0 - ss_res / ss_tot)
|
| 107 |
+
|
| 108 |
+
|
| 109 |
+
def _clip01(value: float) -> float:
|
| 110 |
+
if value < 0.0:
|
| 111 |
+
return 0.0
|
| 112 |
+
if value > 1.0:
|
| 113 |
+
return 1.0
|
| 114 |
+
return value
|
physix/verifier/mismatch.py
ADDED
|
@@ -0,0 +1,138 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Generate a one-sentence English summary of where prediction disagrees
|
| 2 |
+
with observation.
|
| 3 |
+
|
| 4 |
+
Responsibility: convert a :class:`ResidualSummary` plus the two trajectories
|
| 5 |
+
into a deterministic English string the agent can use as feedback. No LLM
|
| 6 |
+
involved; this is templated text driven by simple rules over the numerical
|
| 7 |
+
residuals.
|
| 8 |
+
|
| 9 |
+
The output is the only place in the env where structured numerical state is
|
| 10 |
+
translated into natural language for the agent. We invest in this carefully
|
| 11 |
+
because a 1.5B model reasons better over short English sentences than over
|
| 12 |
+
100-row residual tables.
|
| 13 |
+
"""
|
| 14 |
+
|
| 15 |
+
from __future__ import annotations
|
| 16 |
+
|
| 17 |
+
from collections.abc import Iterable
|
| 18 |
+
|
| 19 |
+
import numpy as np
|
| 20 |
+
|
| 21 |
+
from physix.verifier.metrics import ResidualSummary
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
def summarize_mismatch(
|
| 25 |
+
observed: dict[str, np.ndarray],
|
| 26 |
+
predicted: dict[str, np.ndarray],
|
| 27 |
+
state_variables: Iterable[str],
|
| 28 |
+
timestamps: np.ndarray,
|
| 29 |
+
summary: ResidualSummary,
|
| 30 |
+
) -> str:
|
| 31 |
+
"""Return a one-sentence English description of the residual.
|
| 32 |
+
|
| 33 |
+
The sentence is built by:
|
| 34 |
+
|
| 35 |
+
1. Picking the variable with the **highest** late-window residual mean.
|
| 36 |
+
2. Inspecting whether the residual grows late in the trajectory (drift),
|
| 37 |
+
early in the trajectory (initial-condition mismatch), oscillates
|
| 38 |
+
around zero (phase / amplitude error), or stays uniformly small
|
| 39 |
+
(good fit).
|
| 40 |
+
3. Producing one descriptor for the dominant pattern.
|
| 41 |
+
|
| 42 |
+
Returns ``""`` if no residuals could be computed.
|
| 43 |
+
"""
|
| 44 |
+
if summary.overall_r2 >= 0.93:
|
| 45 |
+
return "Predicted and observed trajectories agree closely."
|
| 46 |
+
|
| 47 |
+
target = _pick_dominant_variable(summary, state_variables)
|
| 48 |
+
if target is None:
|
| 49 |
+
return ""
|
| 50 |
+
|
| 51 |
+
obs = observed[target]
|
| 52 |
+
pred = predicted[target]
|
| 53 |
+
residual = pred - obs
|
| 54 |
+
|
| 55 |
+
pattern = _classify_pattern(residual, timestamps)
|
| 56 |
+
return _render_sentence(target, pattern, summary, timestamps)
|
| 57 |
+
|
| 58 |
+
|
| 59 |
+
def _pick_dominant_variable(
|
| 60 |
+
summary: ResidualSummary,
|
| 61 |
+
state_variables: Iterable[str],
|
| 62 |
+
) -> str | None:
|
| 63 |
+
"""Pick the variable with the largest late-window residual mean."""
|
| 64 |
+
candidates: list[tuple[str, float]] = []
|
| 65 |
+
for var in state_variables:
|
| 66 |
+
if var in summary.per_variable_late_residual_mean:
|
| 67 |
+
candidates.append((var, summary.per_variable_late_residual_mean[var]))
|
| 68 |
+
if not candidates:
|
| 69 |
+
return None
|
| 70 |
+
return max(candidates, key=lambda kv: kv[1])[0]
|
| 71 |
+
|
| 72 |
+
|
| 73 |
+
def _classify_pattern(residual: np.ndarray, timestamps: np.ndarray) -> str:
|
| 74 |
+
"""Classify the dominant pattern of the residual into one of:
|
| 75 |
+
|
| 76 |
+
- ``"diverges_late"``: residual magnitude grows monotonically.
|
| 77 |
+
- ``"early_offset"``: large residual near t=0 then shrinks.
|
| 78 |
+
- ``"phase_or_amplitude"``: residual oscillates around zero with non-trivial amplitude.
|
| 79 |
+
- ``"uniform_small"``: residual is small everywhere.
|
| 80 |
+
"""
|
| 81 |
+
n = len(residual)
|
| 82 |
+
if n == 0:
|
| 83 |
+
return "uniform_small"
|
| 84 |
+
|
| 85 |
+
early_window = residual[: max(1, n // 4)]
|
| 86 |
+
late_window = residual[3 * n // 4 :]
|
| 87 |
+
abs_residual = np.abs(residual)
|
| 88 |
+
|
| 89 |
+
early_mag = float(np.mean(np.abs(early_window)))
|
| 90 |
+
late_mag = float(np.mean(np.abs(late_window)))
|
| 91 |
+
overall_mag = float(np.mean(abs_residual)) or 1e-9
|
| 92 |
+
|
| 93 |
+
# Sign-flip count: rough proxy for oscillation around zero.
|
| 94 |
+
sign_flips = int(np.sum(np.diff(np.sign(residual)) != 0))
|
| 95 |
+
|
| 96 |
+
if late_mag > 2.0 * early_mag and late_mag > 0.05 * float(np.ptp(residual) + 1e-9):
|
| 97 |
+
return "diverges_late"
|
| 98 |
+
if early_mag > 2.0 * late_mag:
|
| 99 |
+
return "early_offset"
|
| 100 |
+
if sign_flips > n // 5 and overall_mag > 0.05 * float(np.ptp(residual) + 1e-9):
|
| 101 |
+
return "phase_or_amplitude"
|
| 102 |
+
return "uniform_small"
|
| 103 |
+
|
| 104 |
+
|
| 105 |
+
def _render_sentence(
|
| 106 |
+
variable: str,
|
| 107 |
+
pattern: str,
|
| 108 |
+
summary: ResidualSummary,
|
| 109 |
+
timestamps: np.ndarray,
|
| 110 |
+
) -> str:
|
| 111 |
+
"""Render the chosen pattern into a single English sentence."""
|
| 112 |
+
t_max = summary.per_variable_t_of_max_residual.get(variable, 0.0)
|
| 113 |
+
max_abs = summary.per_variable_max_abs_residual.get(variable, 0.0)
|
| 114 |
+
|
| 115 |
+
if pattern == "diverges_late":
|
| 116 |
+
return (
|
| 117 |
+
f"Predicted {variable!s} diverges from observed past t={t_max:.1f}s "
|
| 118 |
+
f"(peak residual {max_abs:.2f}); the late-time behaviour is "
|
| 119 |
+
"structurally wrong (consider a missing damping, drag, or "
|
| 120 |
+
"saturation term)."
|
| 121 |
+
)
|
| 122 |
+
if pattern == "early_offset":
|
| 123 |
+
return (
|
| 124 |
+
f"Predicted {variable!s} is offset near t=0 (peak residual "
|
| 125 |
+
f"{max_abs:.2f} at t={t_max:.1f}s); the dynamics align later, "
|
| 126 |
+
"suggesting an initial-condition or constant-term mismatch."
|
| 127 |
+
)
|
| 128 |
+
if pattern == "phase_or_amplitude":
|
| 129 |
+
return (
|
| 130 |
+
f"Predicted {variable!s} oscillates around the observed but is "
|
| 131 |
+
f"out of phase or amplitude (peak residual {max_abs:.2f}); "
|
| 132 |
+
"consider tuning the natural frequency or adding light damping."
|
| 133 |
+
)
|
| 134 |
+
return (
|
| 135 |
+
f"Predicted {variable!s} matches observed broadly but residual is "
|
| 136 |
+
f"non-trivial (peak {max_abs:.2f} at t={t_max:.1f}s); fine-tune the "
|
| 137 |
+
"parameters."
|
| 138 |
+
)
|
physix/verifier/parser.py
ADDED
|
@@ -0,0 +1,396 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Parser for agent-emitted equations of motion.
|
| 2 |
+
|
| 3 |
+
RHS is parsed via Python's ``ast`` module, then walked by a whitelist visitor
|
| 4 |
+
that only permits Constant / Name / UnaryOp (+/-) / BinOp (+ - * / **) /
|
| 5 |
+
Call (bare allowed-function name, no kwargs). Anything else — Attribute,
|
| 6 |
+
Subscript, Lambda, IfExp, keyword args, etc. — raises ParseError by
|
| 7 |
+
construction. We never call sympify on raw text, so there is no eval stage
|
| 8 |
+
that can crash the trainer with an AttributeError.
|
| 9 |
+
|
| 10 |
+
Pre-transforms before AST parse:
|
| 11 |
+
- ``^`` → ``**`` (physics power notation)
|
| 12 |
+
- ``dx/dt`` / bare ``dx`` → ``vx`` when the system pairs x with vx
|
| 13 |
+
"""
|
| 14 |
+
|
| 15 |
+
from __future__ import annotations
|
| 16 |
+
|
| 17 |
+
import ast
|
| 18 |
+
import re
|
| 19 |
+
|
| 20 |
+
import sympy as sp
|
| 21 |
+
from pydantic import BaseModel, ConfigDict
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
class ParseError(ValueError):
|
| 25 |
+
"""Raised when the agent's text payload violates the equation grammar."""
|
| 26 |
+
|
| 27 |
+
|
| 28 |
+
ALLOWED_FUNCTIONS: dict[str, sp.Function] = {
|
| 29 |
+
"sin": sp.sin,
|
| 30 |
+
"cos": sp.cos,
|
| 31 |
+
"tan": sp.tan,
|
| 32 |
+
"exp": sp.exp,
|
| 33 |
+
"log": sp.log,
|
| 34 |
+
"sqrt": sp.sqrt,
|
| 35 |
+
"abs": sp.Abs,
|
| 36 |
+
"Abs": sp.Abs,
|
| 37 |
+
}
|
| 38 |
+
|
| 39 |
+
|
| 40 |
+
def _build_grammar_hint() -> str:
|
| 41 |
+
funcs = sorted({name.lower() for name in ALLOWED_FUNCTIONS})
|
| 42 |
+
return (
|
| 43 |
+
"The 'equation' field is an infix ODE in plain ASCII. "
|
| 44 |
+
"LHS form: 'dN<var>/dtN' where N is 1 or 2 (omit N for first "
|
| 45 |
+
"order, e.g. 'dy/dt' or 'd2y/dt2'). "
|
| 46 |
+
"RHS uses operators + - * / ** (or ^ for power), parentheses, "
|
| 47 |
+
"the state variables listed under STATE_VARIABLES, and any "
|
| 48 |
+
"names you declare in 'params'. "
|
| 49 |
+
f"Allowed functions: {' '.join(funcs)}. "
|
| 50 |
+
"Velocity convention: when STATE_VARIABLES lists both 'x' and 'vx' "
|
| 51 |
+
"(or 'y'/'vy', etc.), use the 'vx' name on the RHS to refer to the "
|
| 52 |
+
"first time-derivative of x. The aliases 'dx/dt' and bare 'dx' are "
|
| 53 |
+
"also accepted for that case. The system is autonomous: time 't' is "
|
| 54 |
+
"not a valid RHS symbol. "
|
| 55 |
+
"No LaTeX, no \\frac, no array indexing, no library prefixes "
|
| 56 |
+
"(write 'sqrt(x)', not 'np.sqrt(x)'), no keyword arguments. "
|
| 57 |
+
"Working examples appear in the HISTORY block of each subsequent turn."
|
| 58 |
+
)
|
| 59 |
+
|
| 60 |
+
|
| 61 |
+
GRAMMAR_HINT: str = _build_grammar_hint()
|
| 62 |
+
|
| 63 |
+
|
| 64 |
+
_LHS_PATTERN = re.compile(
|
| 65 |
+
r"""
|
| 66 |
+
^\s*
|
| 67 |
+
d(?P<order>\d*)
|
| 68 |
+
(?P<var>[A-Za-z_][A-Za-z0-9_]*)
|
| 69 |
+
/
|
| 70 |
+
d t
|
| 71 |
+
(?P<order2>\d*)
|
| 72 |
+
\s*$
|
| 73 |
+
""",
|
| 74 |
+
re.VERBOSE,
|
| 75 |
+
)
|
| 76 |
+
|
| 77 |
+
_BIN_OP_TO_SYMPY: dict[type, "callable"] = {
|
| 78 |
+
ast.Add: lambda a, b: a + b,
|
| 79 |
+
ast.Sub: lambda a, b: a - b,
|
| 80 |
+
ast.Mult: lambda a, b: a * b,
|
| 81 |
+
ast.Div: lambda a, b: a / b,
|
| 82 |
+
ast.Pow: lambda a, b: a**b,
|
| 83 |
+
}
|
| 84 |
+
|
| 85 |
+
|
| 86 |
+
class Equation(BaseModel):
|
| 87 |
+
model_config = ConfigDict(frozen=True, arbitrary_types_allowed=True)
|
| 88 |
+
|
| 89 |
+
var: str
|
| 90 |
+
order: int
|
| 91 |
+
rhs: sp.Expr
|
| 92 |
+
|
| 93 |
+
|
| 94 |
+
class ParsedEquation(BaseModel):
|
| 95 |
+
model_config = ConfigDict(frozen=True, arbitrary_types_allowed=True)
|
| 96 |
+
|
| 97 |
+
equations: tuple[Equation, ...]
|
| 98 |
+
free_symbols: frozenset[str]
|
| 99 |
+
operator_count: int
|
| 100 |
+
|
| 101 |
+
|
| 102 |
+
def parse_equation(
|
| 103 |
+
text: str,
|
| 104 |
+
state_variables: tuple[str, ...],
|
| 105 |
+
parameter_names: frozenset[str],
|
| 106 |
+
) -> ParsedEquation:
|
| 107 |
+
"""Parse and validate the agent's equation payload.
|
| 108 |
+
|
| 109 |
+
Only ParseError ever escapes — callers convert it to r_format=0.
|
| 110 |
+
"""
|
| 111 |
+
if not text or not text.strip():
|
| 112 |
+
raise ParseError("Empty equation payload.")
|
| 113 |
+
|
| 114 |
+
raw_equations = _split_equations(text)
|
| 115 |
+
if not raw_equations:
|
| 116 |
+
raise ParseError("No equations found in payload.")
|
| 117 |
+
|
| 118 |
+
allowed_symbols = frozenset(state_variables) | parameter_names
|
| 119 |
+
|
| 120 |
+
parsed: list[Equation] = []
|
| 121 |
+
free_symbol_names: set[str] = set()
|
| 122 |
+
operator_count = 0
|
| 123 |
+
|
| 124 |
+
for raw in raw_equations:
|
| 125 |
+
eq = _parse_one(raw, allowed_symbols, state_variables)
|
| 126 |
+
parsed.append(eq)
|
| 127 |
+
free_symbol_names.update(s.name for s in eq.rhs.free_symbols)
|
| 128 |
+
operator_count += _count_operators(eq.rhs)
|
| 129 |
+
|
| 130 |
+
return ParsedEquation(
|
| 131 |
+
equations=tuple(parsed),
|
| 132 |
+
free_symbols=frozenset(free_symbol_names),
|
| 133 |
+
operator_count=operator_count,
|
| 134 |
+
)
|
| 135 |
+
|
| 136 |
+
|
| 137 |
+
def _split_equations(text: str) -> list[str]:
|
| 138 |
+
parts = re.split(r"[;\n]+", text)
|
| 139 |
+
return [p.strip() for p in parts if p.strip()]
|
| 140 |
+
|
| 141 |
+
|
| 142 |
+
def _parse_one(
|
| 143 |
+
raw: str,
|
| 144 |
+
allowed_symbols: frozenset[str],
|
| 145 |
+
state_variables: tuple[str, ...],
|
| 146 |
+
) -> Equation:
|
| 147 |
+
if "=" not in raw:
|
| 148 |
+
raise ParseError(f"Equation has no '=' sign: {raw!r}")
|
| 149 |
+
lhs_text, rhs_text = raw.split("=", 1)
|
| 150 |
+
var, order = _parse_lhs(lhs_text)
|
| 151 |
+
rhs_expr = _parse_rhs(rhs_text, allowed_symbols, state_variables)
|
| 152 |
+
return Equation(var=var, order=order, rhs=rhs_expr)
|
| 153 |
+
|
| 154 |
+
|
| 155 |
+
def _parse_lhs(lhs: str) -> tuple[str, int]:
|
| 156 |
+
match = _LHS_PATTERN.match(lhs)
|
| 157 |
+
if not match:
|
| 158 |
+
raise ParseError(
|
| 159 |
+
f"Cannot parse LHS {lhs!r}. Expected 'dN<var>/dtN' where N is "
|
| 160 |
+
"1 or 2 (or empty for first order)."
|
| 161 |
+
)
|
| 162 |
+
order_top = match.group("order")
|
| 163 |
+
order_bot = match.group("order2")
|
| 164 |
+
var = match.group("var")
|
| 165 |
+
if order_top != order_bot:
|
| 166 |
+
raise ParseError(
|
| 167 |
+
f"LHS order mismatch in {lhs!r}: top order {order_top!r} vs "
|
| 168 |
+
f"bottom order {order_bot!r}."
|
| 169 |
+
)
|
| 170 |
+
if order_top == "":
|
| 171 |
+
order = 1
|
| 172 |
+
elif order_top in {"1", "2"}:
|
| 173 |
+
order = int(order_top)
|
| 174 |
+
else:
|
| 175 |
+
raise ParseError(f"Only orders 1 and 2 are supported. Got {order_top!r}.")
|
| 176 |
+
return var, order
|
| 177 |
+
|
| 178 |
+
|
| 179 |
+
def _parse_rhs(
|
| 180 |
+
rhs: str,
|
| 181 |
+
allowed_symbols: frozenset[str],
|
| 182 |
+
state_variables: tuple[str, ...],
|
| 183 |
+
) -> sp.Expr:
|
| 184 |
+
rhs = rhs.strip()
|
| 185 |
+
if not rhs:
|
| 186 |
+
raise ParseError("Empty RHS.")
|
| 187 |
+
rhs = rhs.replace("^", "**")
|
| 188 |
+
rhs = _apply_velocity_alias(rhs, state_variables)
|
| 189 |
+
try:
|
| 190 |
+
tree = ast.parse(rhs, mode="eval")
|
| 191 |
+
except SyntaxError as exc:
|
| 192 |
+
raise ParseError(
|
| 193 |
+
f"Syntax error in RHS {rhs!r}: {exc.msg}. "
|
| 194 |
+
"Expected an infix expression like '-k*x + c*vx'."
|
| 195 |
+
) from exc
|
| 196 |
+
return _ast_to_sympy(tree.body, allowed_symbols, state_variables)
|
| 197 |
+
|
| 198 |
+
|
| 199 |
+
def _ast_to_sympy(
|
| 200 |
+
node: ast.AST,
|
| 201 |
+
allowed_symbols: frozenset[str],
|
| 202 |
+
state_variables: tuple[str, ...],
|
| 203 |
+
) -> sp.Expr:
|
| 204 |
+
if isinstance(node, ast.Constant):
|
| 205 |
+
if isinstance(node.value, bool) or not isinstance(node.value, (int, float)):
|
| 206 |
+
raise ParseError(
|
| 207 |
+
f"Only numeric literals allowed on RHS; got "
|
| 208 |
+
f"{node.value!r} ({type(node.value).__name__})."
|
| 209 |
+
)
|
| 210 |
+
return sp.Number(node.value)
|
| 211 |
+
|
| 212 |
+
if isinstance(node, ast.Name):
|
| 213 |
+
return _name_to_sympy(node.id, allowed_symbols, state_variables)
|
| 214 |
+
|
| 215 |
+
if isinstance(node, ast.UnaryOp):
|
| 216 |
+
operand = _ast_to_sympy(node.operand, allowed_symbols, state_variables)
|
| 217 |
+
if isinstance(node.op, ast.UAdd):
|
| 218 |
+
return +operand
|
| 219 |
+
if isinstance(node.op, ast.USub):
|
| 220 |
+
return -operand
|
| 221 |
+
raise ParseError(
|
| 222 |
+
f"Unsupported unary operator {type(node.op).__name__}. "
|
| 223 |
+
"Allowed: + (positive), - (negation)."
|
| 224 |
+
)
|
| 225 |
+
|
| 226 |
+
if isinstance(node, ast.BinOp):
|
| 227 |
+
op_fn = _BIN_OP_TO_SYMPY.get(type(node.op))
|
| 228 |
+
if op_fn is None:
|
| 229 |
+
raise ParseError(
|
| 230 |
+
f"Unsupported binary operator {type(node.op).__name__}. "
|
| 231 |
+
"Allowed: + - * / ** (also '^' as a power synonym)."
|
| 232 |
+
)
|
| 233 |
+
left = _ast_to_sympy(node.left, allowed_symbols, state_variables)
|
| 234 |
+
right = _ast_to_sympy(node.right, allowed_symbols, state_variables)
|
| 235 |
+
return op_fn(left, right)
|
| 236 |
+
|
| 237 |
+
if isinstance(node, ast.Call):
|
| 238 |
+
return _call_to_sympy(node, allowed_symbols, state_variables)
|
| 239 |
+
|
| 240 |
+
if isinstance(node, ast.Attribute):
|
| 241 |
+
raise ParseError(
|
| 242 |
+
"Attribute access is not allowed in equation RHS "
|
| 243 |
+
f"(saw '.{node.attr}'). Use bare function names like "
|
| 244 |
+
"'sqrt(x)' or 'sin(theta)', not 'np.sqrt(x)'."
|
| 245 |
+
)
|
| 246 |
+
|
| 247 |
+
if isinstance(node, ast.Subscript):
|
| 248 |
+
raise ParseError(
|
| 249 |
+
"Array indexing is not allowed in equation RHS. "
|
| 250 |
+
"Use named scalars declared in 'params'."
|
| 251 |
+
)
|
| 252 |
+
|
| 253 |
+
if isinstance(node, ast.Compare):
|
| 254 |
+
raise ParseError(
|
| 255 |
+
"Comparisons (==, <, >, etc.) are not allowed in equation RHS."
|
| 256 |
+
)
|
| 257 |
+
|
| 258 |
+
if isinstance(node, ast.BoolOp):
|
| 259 |
+
raise ParseError(
|
| 260 |
+
"Boolean operators ('and', 'or') are not allowed in equation RHS."
|
| 261 |
+
)
|
| 262 |
+
|
| 263 |
+
if isinstance(node, ast.IfExp):
|
| 264 |
+
raise ParseError(
|
| 265 |
+
"Conditional expressions ('a if cond else b') are not allowed in "
|
| 266 |
+
"equation RHS."
|
| 267 |
+
)
|
| 268 |
+
|
| 269 |
+
if isinstance(node, ast.Lambda):
|
| 270 |
+
raise ParseError("Lambda expressions are not allowed in equation RHS.")
|
| 271 |
+
|
| 272 |
+
if isinstance(node, (ast.Tuple, ast.List, ast.Set, ast.Dict)):
|
| 273 |
+
raise ParseError(
|
| 274 |
+
f"Collection literal ({type(node).__name__}) is not allowed in "
|
| 275 |
+
"equation RHS."
|
| 276 |
+
)
|
| 277 |
+
|
| 278 |
+
raise ParseError(
|
| 279 |
+
f"Unsupported expression construct {type(node).__name__}. "
|
| 280 |
+
"The grammar accepts: numeric literals, allowed identifiers, "
|
| 281 |
+
f"+ - * / **, parentheses, and {sorted(ALLOWED_FUNCTIONS)}."
|
| 282 |
+
)
|
| 283 |
+
|
| 284 |
+
|
| 285 |
+
def _name_to_sympy(
|
| 286 |
+
name: str,
|
| 287 |
+
allowed_symbols: frozenset[str],
|
| 288 |
+
state_variables: tuple[str, ...],
|
| 289 |
+
) -> sp.Symbol:
|
| 290 |
+
if name in ALLOWED_FUNCTIONS:
|
| 291 |
+
raise ParseError(
|
| 292 |
+
f"{name!r} is a function and must be called with parentheses, "
|
| 293 |
+
f"e.g. {name}(x)."
|
| 294 |
+
)
|
| 295 |
+
if name not in allowed_symbols:
|
| 296 |
+
hint = _explain_unknown_symbol(name, state_variables)
|
| 297 |
+
suffix = f" {hint}" if hint else ""
|
| 298 |
+
raise ParseError(
|
| 299 |
+
f"Unknown symbol {name!r}; allowed {sorted(allowed_symbols)!r}."
|
| 300 |
+
f"{suffix}"
|
| 301 |
+
)
|
| 302 |
+
return sp.Symbol(name)
|
| 303 |
+
|
| 304 |
+
|
| 305 |
+
def _call_to_sympy(
|
| 306 |
+
node: ast.Call,
|
| 307 |
+
allowed_symbols: frozenset[str],
|
| 308 |
+
state_variables: tuple[str, ...],
|
| 309 |
+
) -> sp.Expr:
|
| 310 |
+
if node.keywords:
|
| 311 |
+
raise ParseError(
|
| 312 |
+
"Keyword arguments are not allowed in function calls "
|
| 313 |
+
"(e.g. 'sin(theta=0.1)'). Pass positional arguments only."
|
| 314 |
+
)
|
| 315 |
+
for arg in node.args:
|
| 316 |
+
if isinstance(arg, ast.Starred):
|
| 317 |
+
raise ParseError("Star-arg / unpacking ('*args') is not allowed.")
|
| 318 |
+
|
| 319 |
+
if isinstance(node.func, ast.Attribute):
|
| 320 |
+
raise ParseError(
|
| 321 |
+
"Attribute access is not allowed in equation RHS "
|
| 322 |
+
f"(saw '.{node.func.attr}'). Use bare function names like "
|
| 323 |
+
"'sqrt(x)' or 'sin(theta)', not 'np.sqrt(x)'."
|
| 324 |
+
)
|
| 325 |
+
|
| 326 |
+
if not isinstance(node.func, ast.Name):
|
| 327 |
+
raise ParseError(
|
| 328 |
+
"Only direct calls to named functions are allowed. "
|
| 329 |
+
f"Use one of {sorted(ALLOWED_FUNCTIONS)}, not a computed-name call."
|
| 330 |
+
)
|
| 331 |
+
|
| 332 |
+
func_name = node.func.id
|
| 333 |
+
if func_name not in ALLOWED_FUNCTIONS:
|
| 334 |
+
raise ParseError(
|
| 335 |
+
f"Unknown function {func_name!r}; "
|
| 336 |
+
f"allowed: {sorted(ALLOWED_FUNCTIONS)}."
|
| 337 |
+
)
|
| 338 |
+
|
| 339 |
+
args = [_ast_to_sympy(a, allowed_symbols, state_variables) for a in node.args]
|
| 340 |
+
return ALLOWED_FUNCTIONS[func_name](*args)
|
| 341 |
+
|
| 342 |
+
|
| 343 |
+
def _apply_velocity_alias(rhs: str, state_variables: tuple[str, ...]) -> str:
|
| 344 |
+
aliases = _velocity_aliases(state_variables)
|
| 345 |
+
if not aliases:
|
| 346 |
+
return rhs
|
| 347 |
+
out = rhs
|
| 348 |
+
for var, velocity in aliases:
|
| 349 |
+
slash_pattern = rf"\bd{re.escape(var)}\s*/\s*dt\b"
|
| 350 |
+
out = re.sub(slash_pattern, velocity, out)
|
| 351 |
+
bare_pattern = rf"\bd{re.escape(var)}\b"
|
| 352 |
+
out = re.sub(bare_pattern, velocity, out)
|
| 353 |
+
return out
|
| 354 |
+
|
| 355 |
+
|
| 356 |
+
def _velocity_aliases(state_variables: tuple[str, ...]) -> list[tuple[str, str]]:
|
| 357 |
+
state_set = set(state_variables)
|
| 358 |
+
out: list[tuple[str, str]] = []
|
| 359 |
+
for var in state_variables:
|
| 360 |
+
if not var or var.startswith(("d", "v")):
|
| 361 |
+
continue
|
| 362 |
+
velocity = f"v{var}"
|
| 363 |
+
if velocity in state_set:
|
| 364 |
+
out.append((var, velocity))
|
| 365 |
+
return out
|
| 366 |
+
|
| 367 |
+
|
| 368 |
+
def _explain_unknown_symbol(name: str, state_variables: tuple[str, ...]) -> str:
|
| 369 |
+
state_set = set(state_variables)
|
| 370 |
+
if name == "t":
|
| 371 |
+
return (
|
| 372 |
+
"'t' is not allowed — the equation must be autonomous "
|
| 373 |
+
"(express forces via state variables only, no explicit time)."
|
| 374 |
+
)
|
| 375 |
+
if name.startswith("d") and len(name) > 1:
|
| 376 |
+
base = name[1:]
|
| 377 |
+
velocity = f"v{base}"
|
| 378 |
+
if velocity in state_set:
|
| 379 |
+
return (
|
| 380 |
+
f"Did you mean '{velocity}'? "
|
| 381 |
+
f"Use '{velocity}' for the velocity of '{base}'."
|
| 382 |
+
)
|
| 383 |
+
if base in state_set:
|
| 384 |
+
return (
|
| 385 |
+
f"'{name}' looks like a derivative; this system has no "
|
| 386 |
+
f"separate velocity name, write '{base}' on the RHS."
|
| 387 |
+
)
|
| 388 |
+
return ""
|
| 389 |
+
|
| 390 |
+
|
| 391 |
+
def _count_operators(expr: sp.Expr) -> int:
|
| 392 |
+
count = 0
|
| 393 |
+
for node in sp.preorder_traversal(expr):
|
| 394 |
+
if not isinstance(node, (sp.Symbol, sp.Number)):
|
| 395 |
+
count += 1
|
| 396 |
+
return count
|
physix/verifier/reward.py
ADDED
|
@@ -0,0 +1,112 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Compose the four-component reward.
|
| 2 |
+
|
| 3 |
+
Responsibility:
|
| 4 |
+
|
| 5 |
+
- Take pre-computed ``r_match`` (from :mod:`physix.verifier.metrics`),
|
| 6 |
+
``operator_count`` (from :mod:`physix.verifier.parser`), the previous turn's
|
| 7 |
+
total reward (from env state), and a "did parse?" flag (true iff
|
| 8 |
+
:func:`parse_equation` succeeded).
|
| 9 |
+
- Return a :class:`RewardBreakdown` with all four components and the weighted
|
| 10 |
+
total.
|
| 11 |
+
|
| 12 |
+
This module owns no NumPy/SciPy dependencies. It only knows about scalars and
|
| 13 |
+
the four reward names.
|
| 14 |
+
"""
|
| 15 |
+
|
| 16 |
+
from __future__ import annotations
|
| 17 |
+
|
| 18 |
+
from physix.models import REWARD_WEIGHTS, RewardBreakdown
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
#: Operator-count cap used to normalise ``r_simplicity``. An equation with
|
| 22 |
+
#: this many operators or more scores 0.0 on simplicity; an equation with one
|
| 23 |
+
#: operator scores ~1.0.
|
| 24 |
+
SIMPLICITY_OPERATOR_CAP: int = 12
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
def compute_reward(
|
| 28 |
+
*,
|
| 29 |
+
parse_succeeded: bool,
|
| 30 |
+
r_match: float,
|
| 31 |
+
operator_count: int,
|
| 32 |
+
previous_r_match: float,
|
| 33 |
+
) -> RewardBreakdown:
|
| 34 |
+
"""Compose the four-component reward for one turn.
|
| 35 |
+
|
| 36 |
+
A parse failure short-circuits to a zero-everything-but-format breakdown:
|
| 37 |
+
we still surface a non-trivial signal so the agent can learn that
|
| 38 |
+
parse-failures are bad without all rewards collapsing to NaN.
|
| 39 |
+
"""
|
| 40 |
+
r_format = 1.0 if parse_succeeded else 0.0
|
| 41 |
+
|
| 42 |
+
if not parse_succeeded:
|
| 43 |
+
return RewardBreakdown(
|
| 44 |
+
match=0.0,
|
| 45 |
+
progress=0.0,
|
| 46 |
+
simplicity=0.0,
|
| 47 |
+
format=r_format,
|
| 48 |
+
total=_weighted_total(match=0.0, progress=0.0, simplicity=0.0, format=r_format),
|
| 49 |
+
)
|
| 50 |
+
|
| 51 |
+
r_simplicity = _simplicity_score(operator_count)
|
| 52 |
+
r_progress = _progress_score(r_match=r_match, previous_r_match=previous_r_match)
|
| 53 |
+
|
| 54 |
+
total = _weighted_total(
|
| 55 |
+
match=r_match,
|
| 56 |
+
progress=r_progress,
|
| 57 |
+
simplicity=r_simplicity,
|
| 58 |
+
format=r_format,
|
| 59 |
+
)
|
| 60 |
+
|
| 61 |
+
return RewardBreakdown(
|
| 62 |
+
match=r_match,
|
| 63 |
+
progress=r_progress,
|
| 64 |
+
simplicity=r_simplicity,
|
| 65 |
+
format=r_format,
|
| 66 |
+
total=total,
|
| 67 |
+
)
|
| 68 |
+
|
| 69 |
+
|
| 70 |
+
def _simplicity_score(operator_count: int) -> float:
|
| 71 |
+
"""Map raw operator count to a [0, 1] score where smaller is better."""
|
| 72 |
+
if operator_count <= 0:
|
| 73 |
+
return 1.0
|
| 74 |
+
if operator_count >= SIMPLICITY_OPERATOR_CAP:
|
| 75 |
+
return 0.0
|
| 76 |
+
return 1.0 - (operator_count / SIMPLICITY_OPERATOR_CAP)
|
| 77 |
+
|
| 78 |
+
|
| 79 |
+
def _progress_score(*, r_match: float, previous_r_match: float) -> float:
|
| 80 |
+
"""Score for improving the physics fit over the previous turn.
|
| 81 |
+
|
| 82 |
+
Compares ``r_match`` (R² of the current hypothesis) against
|
| 83 |
+
``previous_r_match`` (R² of the last hypothesis that actually parsed).
|
| 84 |
+
|
| 85 |
+
Critically, we compare match-against-match, not match-against-total.
|
| 86 |
+
The old code used ``previous_total``, which includes simplicity and format
|
| 87 |
+
bonuses — a model that parsed cleanly on turn 1 (earning total=0.30 from
|
| 88 |
+
format+simplicity alone, match=0) would never see ``r_progress > 0`` on
|
| 89 |
+
turn 2 even if its match improved from 0 → 0.25, because 0.25 < 0.30.
|
| 90 |
+
That incorrectly killed the improvement signal.
|
| 91 |
+
"""
|
| 92 |
+
delta = r_match - previous_r_match
|
| 93 |
+
if delta <= 0.0:
|
| 94 |
+
return 0.0
|
| 95 |
+
if delta >= 1.0:
|
| 96 |
+
return 1.0
|
| 97 |
+
return float(delta)
|
| 98 |
+
|
| 99 |
+
|
| 100 |
+
def _weighted_total(
|
| 101 |
+
*,
|
| 102 |
+
match: float,
|
| 103 |
+
progress: float,
|
| 104 |
+
simplicity: float,
|
| 105 |
+
format: float,
|
| 106 |
+
) -> float:
|
| 107 |
+
return float(
|
| 108 |
+
REWARD_WEIGHTS["match"] * match
|
| 109 |
+
+ REWARD_WEIGHTS["progress"] * progress
|
| 110 |
+
+ REWARD_WEIGHTS["simplicity"] * simplicity
|
| 111 |
+
+ REWARD_WEIGHTS["format"] * format
|
| 112 |
+
)
|
physix/verifier/simulator.py
ADDED
|
@@ -0,0 +1,224 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Forward-simulate the agent's parsed hypothesis.
|
| 2 |
+
|
| 3 |
+
Responsibility:
|
| 4 |
+
|
| 5 |
+
- Given a :class:`ParsedEquation`, the system's state-variable layout, the
|
| 6 |
+
agent's parameter dictionary, and a set of initial conditions and
|
| 7 |
+
timestamps, run ``scipy.integrate.odeint`` to produce a predicted
|
| 8 |
+
trajectory in the same shape as the system's observed data.
|
| 9 |
+
|
| 10 |
+
What this module does not do: parse text (see :mod:`physix.verifier.parser`),
|
| 11 |
+
score the trajectory (see :mod:`physix.verifier.metrics`), or compose rewards
|
| 12 |
+
(see :mod:`physix.verifier.reward`).
|
| 13 |
+
"""
|
| 14 |
+
|
| 15 |
+
from __future__ import annotations
|
| 16 |
+
|
| 17 |
+
from collections.abc import Iterable
|
| 18 |
+
from typing import Callable
|
| 19 |
+
|
| 20 |
+
import numpy as np
|
| 21 |
+
import sympy as sp
|
| 22 |
+
from scipy.integrate import odeint
|
| 23 |
+
|
| 24 |
+
from physix.verifier.parser import ParsedEquation
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
class SimulationError(RuntimeError):
|
| 28 |
+
"""Raised when the proposed system cannot be integrated numerically.
|
| 29 |
+
|
| 30 |
+
Common causes: NaN/Inf produced by ``odeint``, mass-matrix singularity,
|
| 31 |
+
or stiff dynamics that exhaust the integrator's step budget.
|
| 32 |
+
"""
|
| 33 |
+
|
| 34 |
+
|
| 35 |
+
def simulate_hypothesis(
|
| 36 |
+
parsed: ParsedEquation,
|
| 37 |
+
state_variables: Iterable[str],
|
| 38 |
+
parameters: dict[str, float],
|
| 39 |
+
initial_conditions: dict[str, float],
|
| 40 |
+
timestamps: np.ndarray,
|
| 41 |
+
) -> dict[str, np.ndarray]:
|
| 42 |
+
"""Integrate the agent's hypothesis forward in time.
|
| 43 |
+
|
| 44 |
+
Args:
|
| 45 |
+
parsed: Parsed equations from :func:`physix.verifier.parser.parse_equation`.
|
| 46 |
+
state_variables: Ordering of state variables in the underlying
|
| 47 |
+
physical system. Used to derive the integration state vector.
|
| 48 |
+
parameters: Numerical parameter substitutions supplied by the agent.
|
| 49 |
+
initial_conditions: Initial state values keyed by variable name.
|
| 50 |
+
timestamps: 1-D array of times at which to record state.
|
| 51 |
+
|
| 52 |
+
Returns:
|
| 53 |
+
A dict mapping each state variable to its predicted trajectory
|
| 54 |
+
(1-D array of the same length as ``timestamps``).
|
| 55 |
+
"""
|
| 56 |
+
state_vars = tuple(state_variables)
|
| 57 |
+
int_layout = _build_integration_layout(parsed, state_vars)
|
| 58 |
+
try:
|
| 59 |
+
rhs_callable = _compile_rhs(parsed, int_layout, parameters)
|
| 60 |
+
except Exception as exc: # noqa: BLE001 — surfaced as SimulationError below
|
| 61 |
+
raise SimulationError(f"failed to compile RHS: {exc}") from exc
|
| 62 |
+
|
| 63 |
+
initial_state = _build_initial_state(int_layout, initial_conditions)
|
| 64 |
+
|
| 65 |
+
try:
|
| 66 |
+
result = odeint(
|
| 67 |
+
rhs_callable,
|
| 68 |
+
initial_state,
|
| 69 |
+
timestamps,
|
| 70 |
+
full_output=False,
|
| 71 |
+
mxstep=2000,
|
| 72 |
+
rtol=1e-6,
|
| 73 |
+
atol=1e-9,
|
| 74 |
+
)
|
| 75 |
+
except Exception as exc: # noqa: BLE001
|
| 76 |
+
# ``odeint`` propagates whatever the user-supplied RHS callable
|
| 77 |
+
# raises. The model can emit equations that lambdify into Python
|
| 78 |
+
# code which then trips ``TypeError`` (e.g. ``np.sqrt`` on a
|
| 79 |
+
# SymPy ``Add``), ``ZeroDivisionError``, ``OverflowError``, etc.
|
| 80 |
+
# Any of those should be surfaced to the env as a clean
|
| 81 |
+
# ``SimulationError`` (= ``r_match=0`` for the turn) rather than
|
| 82 |
+
# crashing the route with a 500.
|
| 83 |
+
raise SimulationError(f"odeint failed: {exc}") from exc
|
| 84 |
+
|
| 85 |
+
if not np.all(np.isfinite(result)):
|
| 86 |
+
raise SimulationError("Predicted trajectory contains NaN or Inf values.")
|
| 87 |
+
|
| 88 |
+
return _project_back_to_state_vars(result, int_layout, state_vars)
|
| 89 |
+
|
| 90 |
+
|
| 91 |
+
_IntegrationLayout = tuple[tuple[str, ...], dict[str, int]]
|
| 92 |
+
|
| 93 |
+
|
| 94 |
+
def _build_integration_layout(
|
| 95 |
+
parsed: ParsedEquation,
|
| 96 |
+
state_vars: tuple[str, ...],
|
| 97 |
+
) -> _IntegrationLayout:
|
| 98 |
+
"""Return ``(integration_vars, index)`` for the integration state.
|
| 99 |
+
|
| 100 |
+
For a system with second-order ODEs we need to integrate both ``var`` and
|
| 101 |
+
``dvar/dt``. We prefix-derive the names: e.g. ``y -> [y, vy]``, with the
|
| 102 |
+
convention that ``v<name>`` is the first time derivative of ``<name>``
|
| 103 |
+
(matching the conventions used in :mod:`physix.systems.tier1`).
|
| 104 |
+
"""
|
| 105 |
+
integration_vars: list[str] = []
|
| 106 |
+
|
| 107 |
+
eq_by_var = {eq.var: eq for eq in parsed.equations}
|
| 108 |
+
|
| 109 |
+
for var in state_vars:
|
| 110 |
+
# Skip variables that are themselves first derivatives of another
|
| 111 |
+
# state variable (e.g. ``vy`` paired with ``y``); those will be added
|
| 112 |
+
# as part of the higher-order companion.
|
| 113 |
+
if _is_velocity_of(var, state_vars):
|
| 114 |
+
continue
|
| 115 |
+
|
| 116 |
+
eq = eq_by_var.get(var)
|
| 117 |
+
if eq is not None and eq.order == 2:
|
| 118 |
+
integration_vars.append(var)
|
| 119 |
+
integration_vars.append(_velocity_name(var))
|
| 120 |
+
continue
|
| 121 |
+
if eq is not None and eq.order == 1:
|
| 122 |
+
integration_vars.append(var)
|
| 123 |
+
continue
|
| 124 |
+
# No matching equation — accept anyway, treating as zero-derivative.
|
| 125 |
+
integration_vars.append(var)
|
| 126 |
+
|
| 127 |
+
# Now also append any equations whose var was not in state_vars (e.g.
|
| 128 |
+
# the agent named a derivative directly). We log these but do not crash;
|
| 129 |
+
# ``rhs_callable`` will simply not see them.
|
| 130 |
+
declared = set(integration_vars)
|
| 131 |
+
for eq in parsed.equations:
|
| 132 |
+
if eq.var in declared:
|
| 133 |
+
continue
|
| 134 |
+
if eq.order == 2:
|
| 135 |
+
integration_vars.append(eq.var)
|
| 136 |
+
integration_vars.append(_velocity_name(eq.var))
|
| 137 |
+
else:
|
| 138 |
+
integration_vars.append(eq.var)
|
| 139 |
+
|
| 140 |
+
index = {name: i for i, name in enumerate(integration_vars)}
|
| 141 |
+
return tuple(integration_vars), index
|
| 142 |
+
|
| 143 |
+
|
| 144 |
+
def _is_velocity_of(var: str, state_vars: tuple[str, ...]) -> bool:
|
| 145 |
+
"""True if ``var`` looks like the first derivative of another state var."""
|
| 146 |
+
if var.startswith("v"):
|
| 147 |
+
return var[1:] in state_vars
|
| 148 |
+
if var.startswith("d"):
|
| 149 |
+
return var[1:] in state_vars
|
| 150 |
+
return False
|
| 151 |
+
|
| 152 |
+
|
| 153 |
+
def _velocity_name(var: str) -> str:
|
| 154 |
+
"""Convention for naming a first time derivative."""
|
| 155 |
+
if var.startswith("theta"):
|
| 156 |
+
return "d" + var
|
| 157 |
+
return "v" + var
|
| 158 |
+
|
| 159 |
+
|
| 160 |
+
def _compile_rhs(
|
| 161 |
+
parsed: ParsedEquation,
|
| 162 |
+
layout: _IntegrationLayout,
|
| 163 |
+
parameters: dict[str, float],
|
| 164 |
+
) -> Callable[[np.ndarray, float], np.ndarray]:
|
| 165 |
+
"""Build a Python callable f(state, t) -> dstate/dt for ``odeint``."""
|
| 166 |
+
integration_vars, index = layout
|
| 167 |
+
eq_by_var = {eq.var: eq for eq in parsed.equations}
|
| 168 |
+
|
| 169 |
+
# Lambdify each equation's RHS once. Symbols are bound to integration
|
| 170 |
+
# variables (state) and to scalar parameters (substituted).
|
| 171 |
+
state_symbols = sp.symbols(" ".join(integration_vars))
|
| 172 |
+
if not isinstance(state_symbols, tuple):
|
| 173 |
+
state_symbols = (state_symbols,)
|
| 174 |
+
|
| 175 |
+
rhs_lambdas: dict[str, Callable[..., float]] = {}
|
| 176 |
+
for var, eq in eq_by_var.items():
|
| 177 |
+
rhs = eq.rhs.subs({sp.Symbol(k): v for k, v in parameters.items()})
|
| 178 |
+
rhs_lambdas[var] = sp.lambdify(state_symbols, rhs, modules="numpy")
|
| 179 |
+
|
| 180 |
+
def _rhs(state: np.ndarray, t: float) -> np.ndarray:
|
| 181 |
+
derivs = np.zeros_like(state)
|
| 182 |
+
for var, eq in eq_by_var.items():
|
| 183 |
+
if eq.order == 1:
|
| 184 |
+
idx = index[var]
|
| 185 |
+
derivs[idx] = float(rhs_lambdas[var](*state))
|
| 186 |
+
else: # order == 2
|
| 187 |
+
pos_idx = index[var]
|
| 188 |
+
vel_name = _velocity_name(var)
|
| 189 |
+
vel_idx = index[vel_name]
|
| 190 |
+
derivs[pos_idx] = state[vel_idx]
|
| 191 |
+
derivs[vel_idx] = float(rhs_lambdas[var](*state))
|
| 192 |
+
return derivs
|
| 193 |
+
|
| 194 |
+
return _rhs
|
| 195 |
+
|
| 196 |
+
|
| 197 |
+
def _build_initial_state(
|
| 198 |
+
layout: _IntegrationLayout,
|
| 199 |
+
initial_conditions: dict[str, float],
|
| 200 |
+
) -> np.ndarray:
|
| 201 |
+
"""Project the IC dict into the integration variable order."""
|
| 202 |
+
integration_vars, _ = layout
|
| 203 |
+
state = np.zeros(len(integration_vars), dtype=float)
|
| 204 |
+
for i, var in enumerate(integration_vars):
|
| 205 |
+
state[i] = float(initial_conditions.get(var, 0.0))
|
| 206 |
+
return state
|
| 207 |
+
|
| 208 |
+
|
| 209 |
+
def _project_back_to_state_vars(
|
| 210 |
+
result: np.ndarray,
|
| 211 |
+
layout: _IntegrationLayout,
|
| 212 |
+
state_vars: tuple[str, ...],
|
| 213 |
+
) -> dict[str, np.ndarray]:
|
| 214 |
+
"""Slice the integration output into per-system-variable trajectories."""
|
| 215 |
+
_, index = layout
|
| 216 |
+
out: dict[str, np.ndarray] = {}
|
| 217 |
+
for var in state_vars:
|
| 218 |
+
if var in index:
|
| 219 |
+
out[var] = result[:, index[var]]
|
| 220 |
+
else:
|
| 221 |
+
# The agent never modelled this variable; predict the first
|
| 222 |
+
# entry constant. This is rare and kept defensive.
|
| 223 |
+
out[var] = np.full(result.shape[0], float(result[0, 0]))
|
| 224 |
+
return out
|
pyproject.toml
ADDED
|
@@ -0,0 +1,48 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
[build-system]
|
| 2 |
+
requires = ["hatchling"]
|
| 3 |
+
build-backend = "hatchling.build"
|
| 4 |
+
|
| 5 |
+
[project]
|
| 6 |
+
name = "physix-live"
|
| 7 |
+
version = "0.1.0"
|
| 8 |
+
description = "OpenEnv RL environment for iterative equation discovery from trajectory data"
|
| 9 |
+
authors = [{ name = "PhysiX-Live Team" }]
|
| 10 |
+
requires-python = ">=3.10"
|
| 11 |
+
dependencies = [
|
| 12 |
+
"openenv-core[core]>=0.2.2",
|
| 13 |
+
"numpy>=1.24",
|
| 14 |
+
"scipy>=1.10",
|
| 15 |
+
"sympy>=1.12",
|
| 16 |
+
"fastapi>=0.110",
|
| 17 |
+
"uvicorn>=0.29",
|
| 18 |
+
"pydantic>=2.5",
|
| 19 |
+
"requests>=2.31",
|
| 20 |
+
]
|
| 21 |
+
|
| 22 |
+
[project.optional-dependencies]
|
| 23 |
+
dev = ["pytest>=7.4", "ruff>=0.4"]
|
| 24 |
+
# Training stack pinned to versions verified to work together. trl is
|
| 25 |
+
# HARD-pinned: see physix-train/Dockerfile for the rationale (Unsloth's
|
| 26 |
+
# patch_trl_openenv hook crashes on trl >=0.26).
|
| 27 |
+
train = [
|
| 28 |
+
"torch>=2.4",
|
| 29 |
+
"transformers>=4.56.1",
|
| 30 |
+
"accelerate>=1.4",
|
| 31 |
+
"trl>=0.18.2,!=0.19.0,<=0.24.0",
|
| 32 |
+
"unsloth>=2025.4",
|
| 33 |
+
"wandb>=0.16",
|
| 34 |
+
"datasets>=3.0",
|
| 35 |
+
"huggingface_hub>=0.24,<1.0",
|
| 36 |
+
]
|
| 37 |
+
demo = ["ollama>=0.4"]
|
| 38 |
+
|
| 39 |
+
[project.scripts]
|
| 40 |
+
physix-server = "physix.server.app:main"
|
| 41 |
+
physix-sft = "physix.training.sft:main"
|
| 42 |
+
physix-grpo = "physix.training.loop:main"
|
| 43 |
+
|
| 44 |
+
[tool.hatch.build.targets.wheel]
|
| 45 |
+
packages = ["physix"]
|
| 46 |
+
|
| 47 |
+
[tool.pytest.ini_options]
|
| 48 |
+
testpaths = ["tests"]
|
tests/__init__.py
ADDED
|
File without changes
|
tests/test_client_ws.py
ADDED
|
@@ -0,0 +1,88 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""WebSocket smoke test: spin up the FastAPI server in-process and drive it
|
| 2 |
+
through :class:`physix.PhysiXEnv` over a real WebSocket connection.
|
| 3 |
+
|
| 4 |
+
This catches regressions in the wire protocol (action/observation
|
| 5 |
+
serialisation, session lifecycle) that the in-process
|
| 6 |
+
``test_environment.py`` cannot.
|
| 7 |
+
"""
|
| 8 |
+
|
| 9 |
+
from __future__ import annotations
|
| 10 |
+
|
| 11 |
+
import asyncio
|
| 12 |
+
import contextlib
|
| 13 |
+
import socket
|
| 14 |
+
import threading
|
| 15 |
+
import time
|
| 16 |
+
from collections.abc import Iterator
|
| 17 |
+
|
| 18 |
+
import pytest
|
| 19 |
+
import uvicorn
|
| 20 |
+
|
| 21 |
+
from physix.client import PhysiXEnv
|
| 22 |
+
from physix.models import PhysiXAction
|
| 23 |
+
from physix.server.app import app
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
# ---------------------------------------------------------------------------
|
| 27 |
+
# Server fixture
|
| 28 |
+
# ---------------------------------------------------------------------------
|
| 29 |
+
|
| 30 |
+
|
| 31 |
+
def _free_port() -> int:
|
| 32 |
+
"""Return an OS-assigned free TCP port."""
|
| 33 |
+
with contextlib.closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as sock:
|
| 34 |
+
sock.bind(("127.0.0.1", 0))
|
| 35 |
+
return int(sock.getsockname()[1])
|
| 36 |
+
|
| 37 |
+
|
| 38 |
+
@pytest.fixture(scope="module")
|
| 39 |
+
def server_url() -> Iterator[str]:
|
| 40 |
+
"""Run uvicorn in a daemon thread for the duration of the module."""
|
| 41 |
+
port = _free_port()
|
| 42 |
+
config = uvicorn.Config(app, host="127.0.0.1", port=port, log_level="warning")
|
| 43 |
+
server = uvicorn.Server(config)
|
| 44 |
+
|
| 45 |
+
thread = threading.Thread(target=server.run, daemon=True)
|
| 46 |
+
thread.start()
|
| 47 |
+
|
| 48 |
+
deadline = time.time() + 10.0
|
| 49 |
+
while not server.started and time.time() < deadline:
|
| 50 |
+
time.sleep(0.05)
|
| 51 |
+
if not server.started:
|
| 52 |
+
pytest.fail("uvicorn server failed to start within timeout")
|
| 53 |
+
|
| 54 |
+
try:
|
| 55 |
+
yield f"http://127.0.0.1:{port}"
|
| 56 |
+
finally:
|
| 57 |
+
server.should_exit = True
|
| 58 |
+
thread.join(timeout=5.0)
|
| 59 |
+
|
| 60 |
+
|
| 61 |
+
# ---------------------------------------------------------------------------
|
| 62 |
+
# Test
|
| 63 |
+
# ---------------------------------------------------------------------------
|
| 64 |
+
|
| 65 |
+
|
| 66 |
+
@pytest.mark.asyncio
|
| 67 |
+
async def _drive_episode(base_url: str) -> None:
|
| 68 |
+
"""One reset + step against the live server, asserting reward shape."""
|
| 69 |
+
async with PhysiXEnv(base_url=base_url) as env:
|
| 70 |
+
result = await env.reset(system_id="free_fall", seed=11)
|
| 71 |
+
|
| 72 |
+
assert result.done is False
|
| 73 |
+
assert result.observation.system_id == "free_fall"
|
| 74 |
+
assert result.observation.turn == 0
|
| 75 |
+
assert len(result.observation.trajectory) > 0
|
| 76 |
+
|
| 77 |
+
result = await env.step(
|
| 78 |
+
PhysiXAction(equation="d2y/dt2 = -9.81", params={}, rationale="free fall")
|
| 79 |
+
)
|
| 80 |
+
|
| 81 |
+
breakdown = result.observation.reward_breakdown
|
| 82 |
+
assert breakdown["format"] == 1.0
|
| 83 |
+
assert breakdown["match"] >= 0.9
|
| 84 |
+
assert result.done is True
|
| 85 |
+
|
| 86 |
+
|
| 87 |
+
def test_websocket_round_trip(server_url: str) -> None:
|
| 88 |
+
asyncio.run(_drive_episode(server_url))
|
tests/test_dataset.py
ADDED
|
@@ -0,0 +1,92 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Unit tests for :mod:`physix.training.dataset`."""
|
| 2 |
+
|
| 3 |
+
from __future__ import annotations
|
| 4 |
+
|
| 5 |
+
import pytest
|
| 6 |
+
|
| 7 |
+
from physix.systems import SUPPORTED_SYSTEMS
|
| 8 |
+
from physix.training.dataset import (
|
| 9 |
+
DatasetSpec,
|
| 10 |
+
EvalDatasetSpec,
|
| 11 |
+
build_eval_dataset,
|
| 12 |
+
build_training_dataset,
|
| 13 |
+
)
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
_EXPECTED_COLUMNS = {
|
| 17 |
+
"prompt",
|
| 18 |
+
"system_id",
|
| 19 |
+
"state_variables",
|
| 20 |
+
"parameters",
|
| 21 |
+
"initial_conditions",
|
| 22 |
+
"timestamps",
|
| 23 |
+
"observed",
|
| 24 |
+
"previous_r_match",
|
| 25 |
+
}
|
| 26 |
+
|
| 27 |
+
|
| 28 |
+
def test_training_dataset_has_expected_schema() -> None:
|
| 29 |
+
ds = build_training_dataset(DatasetSpec(instances_per_system=2))
|
| 30 |
+
|
| 31 |
+
assert _EXPECTED_COLUMNS.issubset(set(ds.column_names))
|
| 32 |
+
# Default curriculum is the 3 demo systems × 2 instances = 6 rows.
|
| 33 |
+
assert len(ds) == len(SUPPORTED_SYSTEMS) * 2
|
| 34 |
+
|
| 35 |
+
|
| 36 |
+
def test_training_dataset_default_curriculum_is_demo_systems() -> None:
|
| 37 |
+
"""Default DatasetSpec must train on exactly the same systems the
|
| 38 |
+
live demo exposes. Mismatches would mean GRPO improves systems we
|
| 39 |
+
never benchmark — the failure mode that produced flat headline
|
| 40 |
+
metrics in the v1 training run.
|
| 41 |
+
"""
|
| 42 |
+
ds = build_training_dataset(DatasetSpec(instances_per_system=1))
|
| 43 |
+
assert set(ds["system_id"]) == set(SUPPORTED_SYSTEMS)
|
| 44 |
+
|
| 45 |
+
|
| 46 |
+
def test_training_dataset_explicit_system_ids_override_default() -> None:
|
| 47 |
+
ds = build_training_dataset(
|
| 48 |
+
DatasetSpec(system_ids=("free_fall", "simple_pendulum"), instances_per_system=1)
|
| 49 |
+
)
|
| 50 |
+
assert set(ds["system_id"]) == {"free_fall", "simple_pendulum"}
|
| 51 |
+
|
| 52 |
+
|
| 53 |
+
def test_training_dataset_rejects_unknown_system_ids() -> None:
|
| 54 |
+
with pytest.raises(ValueError, match="Unknown system_ids"):
|
| 55 |
+
build_training_dataset(DatasetSpec(system_ids=("not_a_real_system",)))
|
| 56 |
+
|
| 57 |
+
|
| 58 |
+
def test_training_dataset_rejects_empty_system_ids() -> None:
|
| 59 |
+
with pytest.raises(ValueError, match="non-empty"):
|
| 60 |
+
build_training_dataset(DatasetSpec(system_ids=()))
|
| 61 |
+
|
| 62 |
+
|
| 63 |
+
def test_training_dataset_prompts_are_chat_lists() -> None:
|
| 64 |
+
ds = build_training_dataset(DatasetSpec(instances_per_system=1))
|
| 65 |
+
prompt = ds[0]["prompt"]
|
| 66 |
+
|
| 67 |
+
assert isinstance(prompt, list)
|
| 68 |
+
assert prompt[0]["role"] == "system"
|
| 69 |
+
assert prompt[1]["role"] == "user"
|
| 70 |
+
assert "TRAJECTORY" in prompt[1]["content"]
|
| 71 |
+
|
| 72 |
+
|
| 73 |
+
def test_eval_dataset_marks_held_out_rows() -> None:
|
| 74 |
+
ds = build_eval_dataset(EvalDatasetSpec(instances_per_system=1))
|
| 75 |
+
|
| 76 |
+
held_out_rows = [r for r in ds if r["is_held_out"]]
|
| 77 |
+
train_rows = [r for r in ds if not r["is_held_out"]]
|
| 78 |
+
|
| 79 |
+
assert len(held_out_rows) >= 2 # 2 Tier 3 systems
|
| 80 |
+
assert len(train_rows) >= 6 # 6 Tier 1+2 systems
|
| 81 |
+
held_out_ids = {row["system_id"] for row in held_out_rows}
|
| 82 |
+
assert held_out_ids == {"projectile_drag", "charged_b_field"}
|
| 83 |
+
|
| 84 |
+
|
| 85 |
+
def test_dataset_observed_arrays_match_state_variables() -> None:
|
| 86 |
+
ds = build_training_dataset(DatasetSpec(instances_per_system=1))
|
| 87 |
+
|
| 88 |
+
for row in ds:
|
| 89 |
+
observed = row["observed"]
|
| 90 |
+
for var in row["state_variables"]:
|
| 91 |
+
assert var in observed
|
| 92 |
+
assert len(observed[var]) > 0
|
tests/test_environment.py
ADDED
|
@@ -0,0 +1,180 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""End-to-end smoke tests for :class:`PhysiXEnvironment`.
|
| 2 |
+
|
| 3 |
+
These tests exercise the full pipeline (parse + simulate + score + record)
|
| 4 |
+
without spinning up a FastAPI server. They serve as the first sanity check
|
| 5 |
+
that the parser, simulator, metrics, and reward composer interoperate.
|
| 6 |
+
"""
|
| 7 |
+
|
| 8 |
+
from __future__ import annotations
|
| 9 |
+
|
| 10 |
+
import pytest
|
| 11 |
+
|
| 12 |
+
from physix.models import CONVERGENCE_THRESHOLD, PhysiXAction
|
| 13 |
+
from physix.server.environment import PhysiXEnvironment
|
| 14 |
+
from physix.systems import SystemTier
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
# ---------------------------------------------------------------------------
|
| 18 |
+
# Fixtures
|
| 19 |
+
# ---------------------------------------------------------------------------
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
@pytest.fixture
|
| 23 |
+
def env() -> PhysiXEnvironment:
|
| 24 |
+
"""Deterministic env restricted to Tier 1 systems for fast tests."""
|
| 25 |
+
return PhysiXEnvironment(seed=42, train_tiers=(SystemTier.TIER_1,))
|
| 26 |
+
|
| 27 |
+
|
| 28 |
+
# ---------------------------------------------------------------------------
|
| 29 |
+
# Tests
|
| 30 |
+
# ---------------------------------------------------------------------------
|
| 31 |
+
|
| 32 |
+
|
| 33 |
+
def test_reset_returns_well_formed_observation(env: PhysiXEnvironment) -> None:
|
| 34 |
+
obs = env.reset(system_id="free_fall")
|
| 35 |
+
|
| 36 |
+
assert obs.system_id == "free_fall"
|
| 37 |
+
assert obs.turn == 0
|
| 38 |
+
assert obs.turn_remaining > 0
|
| 39 |
+
assert obs.history == []
|
| 40 |
+
assert obs.mismatch_summary == ""
|
| 41 |
+
assert "y" in obs.state_variables and "vy" in obs.state_variables
|
| 42 |
+
assert len(obs.trajectory) == 100
|
| 43 |
+
assert obs.hint # non-empty
|
| 44 |
+
assert obs.done is False
|
| 45 |
+
|
| 46 |
+
|
| 47 |
+
def test_step_with_ground_truth_rewards_high(env: PhysiXEnvironment) -> None:
|
| 48 |
+
"""The exact ground-truth equation should yield r_match close to 1."""
|
| 49 |
+
env.reset(system_id="free_fall")
|
| 50 |
+
|
| 51 |
+
obs = env.step(PhysiXAction(equation="d2y/dt2 = -9.81", params={}))
|
| 52 |
+
breakdown = obs.reward_breakdown
|
| 53 |
+
|
| 54 |
+
assert breakdown["format"] == 1.0
|
| 55 |
+
assert breakdown["match"] >= 0.95
|
| 56 |
+
assert obs.reward >= CONVERGENCE_THRESHOLD * 0.5 # weighted total floor
|
| 57 |
+
|
| 58 |
+
|
| 59 |
+
def test_step_with_unparseable_equation_short_circuits(
|
| 60 |
+
env: PhysiXEnvironment,
|
| 61 |
+
) -> None:
|
| 62 |
+
"""A junk payload should set r_format=0 and other components to 0."""
|
| 63 |
+
env.reset(system_id="free_fall")
|
| 64 |
+
|
| 65 |
+
obs = env.step(PhysiXAction(equation="not a real equation"))
|
| 66 |
+
breakdown = obs.reward_breakdown
|
| 67 |
+
|
| 68 |
+
assert breakdown["format"] == 0.0
|
| 69 |
+
assert breakdown["match"] == 0.0
|
| 70 |
+
assert breakdown["progress"] == 0.0
|
| 71 |
+
assert breakdown["simplicity"] == 0.0
|
| 72 |
+
assert "Parse error" in obs.mismatch_summary
|
| 73 |
+
|
| 74 |
+
|
| 75 |
+
def test_episode_terminates_on_convergence(env: PhysiXEnvironment) -> None:
|
| 76 |
+
"""High-quality match should set done=True via the convergence threshold."""
|
| 77 |
+
env.reset(system_id="free_fall")
|
| 78 |
+
|
| 79 |
+
obs = env.step(PhysiXAction(equation="d2y/dt2 = -9.81"))
|
| 80 |
+
|
| 81 |
+
assert obs.done is True
|
| 82 |
+
|
| 83 |
+
|
| 84 |
+
def test_history_accumulates_across_turns(env: PhysiXEnvironment) -> None:
|
| 85 |
+
"""Each step should append exactly one history entry."""
|
| 86 |
+
env.reset(system_id="free_fall_drag")
|
| 87 |
+
|
| 88 |
+
obs1 = env.step(PhysiXAction(equation="d2y/dt2 = -9.81"))
|
| 89 |
+
assert len(obs1.history) == 1
|
| 90 |
+
assert obs1.history[0]["equation"] == "d2y/dt2 = -9.81"
|
| 91 |
+
|
| 92 |
+
if not obs1.done:
|
| 93 |
+
obs2 = env.step(
|
| 94 |
+
PhysiXAction(equation="d2y/dt2 = -9.81 + 0.05 * vy**2"),
|
| 95 |
+
)
|
| 96 |
+
assert len(obs2.history) == 2
|
| 97 |
+
assert obs2.history[1]["equation"] == "d2y/dt2 = -9.81 + 0.05 * vy**2"
|
| 98 |
+
|
| 99 |
+
|
| 100 |
+
def test_progress_reward_rewards_improvement(env: PhysiXEnvironment) -> None:
|
| 101 |
+
"""A second-turn improvement should yield positive r_progress."""
|
| 102 |
+
env.reset(system_id="free_fall_drag")
|
| 103 |
+
|
| 104 |
+
# Turn 1: pure gravity (decent fit but missing drag).
|
| 105 |
+
obs1 = env.step(PhysiXAction(equation="d2y/dt2 = -9.81"))
|
| 106 |
+
if obs1.done:
|
| 107 |
+
pytest.skip("episode converged on turn 1")
|
| 108 |
+
|
| 109 |
+
# Turn 2: add drag (closer fit).
|
| 110 |
+
obs2 = env.step(
|
| 111 |
+
PhysiXAction(equation="d2y/dt2 = -9.81 + 0.05 * vy**2"),
|
| 112 |
+
)
|
| 113 |
+
|
| 114 |
+
assert obs2.reward_breakdown["match"] >= obs1.reward_breakdown["match"]
|
| 115 |
+
if obs2.reward_breakdown["match"] > obs1.reward_breakdown["total"]:
|
| 116 |
+
assert obs2.reward_breakdown["progress"] > 0.0
|
| 117 |
+
|
| 118 |
+
|
| 119 |
+
def test_max_turns_terminates_episode() -> None:
|
| 120 |
+
"""When budget is exhausted with no convergence, ``done`` flips true."""
|
| 121 |
+
env = PhysiXEnvironment(seed=0, max_turns=3, train_tiers=(SystemTier.TIER_1,))
|
| 122 |
+
env.reset(system_id="simple_pendulum")
|
| 123 |
+
|
| 124 |
+
last_obs = None
|
| 125 |
+
for _ in range(3):
|
| 126 |
+
# Deliberately wrong-but-parseable equation.
|
| 127 |
+
last_obs = env.step(PhysiXAction(equation="d2theta/dt2 = 0"))
|
| 128 |
+
|
| 129 |
+
assert last_obs is not None
|
| 130 |
+
assert last_obs.done is True
|
| 131 |
+
assert last_obs.turn_remaining == 0
|
| 132 |
+
|
| 133 |
+
|
| 134 |
+
def test_state_property_exposes_episode_id(env: PhysiXEnvironment) -> None:
|
| 135 |
+
obs = env.reset(system_id="free_fall")
|
| 136 |
+
assert env.state.episode_id is not None
|
| 137 |
+
assert env.state.episode_id # non-empty string
|
| 138 |
+
assert env.state.system_id == "free_fall"
|
| 139 |
+
assert obs.system_id == env.state.system_id
|
| 140 |
+
|
| 141 |
+
|
| 142 |
+
@pytest.mark.parametrize(
|
| 143 |
+
"system_id, equation",
|
| 144 |
+
[
|
| 145 |
+
# Pendulum-like system with a sqrt of an Add — historically
|
| 146 |
+
# produced a TypeError ("loop of ufunc does not support argument
|
| 147 |
+
# 0 of type Add which has no callable sqrt method") that escaped
|
| 148 |
+
# the simulator and 500-ed the route.
|
| 149 |
+
("simple_pendulum", "d2theta/dt2 = -sqrt(dtheta**2 + theta**2) * sin(theta)"),
|
| 150 |
+
# sqrt of a guaranteed-negative quantity → numpy emits NaN.
|
| 151 |
+
("simple_pendulum", "d2theta/dt2 = -sqrt(-theta**2 - 1)"),
|
| 152 |
+
# Division by zero from constant numerics in the RHS.
|
| 153 |
+
("free_fall", "d2y/dt2 = -9.81 / (y - y)"),
|
| 154 |
+
# Pathological growth that overflows odeint.
|
| 155 |
+
("free_fall", "d2y/dt2 = exp(exp(exp(y)))"),
|
| 156 |
+
# log of zero (-inf) propagating through the RHS.
|
| 157 |
+
("free_fall", "d2y/dt2 = log(0 * y)"),
|
| 158 |
+
],
|
| 159 |
+
)
|
| 160 |
+
def test_step_swallows_simulator_failures_as_format_zero_match_zero(
|
| 161 |
+
system_id: str, equation: str
|
| 162 |
+
) -> None:
|
| 163 |
+
"""``step`` must never propagate a TypeError / overflow / NaN out of
|
| 164 |
+
the simulator into the route layer. A model-emitted equation that
|
| 165 |
+
parses but blows up numerically should score ``r_match=0`` cleanly,
|
| 166 |
+
surface a ``Simulation error: ...`` mismatch, and let the episode
|
| 167 |
+
continue. Without the broadened exception catch in
|
| 168 |
+
:func:`simulate_hypothesis`, several of these would 500 the server.
|
| 169 |
+
"""
|
| 170 |
+
env = PhysiXEnvironment(seed=0, train_tiers=(SystemTier.TIER_1,))
|
| 171 |
+
env.reset(system_id=system_id)
|
| 172 |
+
|
| 173 |
+
obs = env.step(PhysiXAction(equation=equation))
|
| 174 |
+
|
| 175 |
+
assert obs.reward_breakdown["match"] == 0.0
|
| 176 |
+
# The equation parses, so format should be 1; any "format=0" here
|
| 177 |
+
# indicates parse rejection (also acceptable for these inputs).
|
| 178 |
+
assert obs.reward_breakdown["format"] in (0.0, 1.0)
|
| 179 |
+
# Either path must produce a non-empty diagnostic string.
|
| 180 |
+
assert obs.mismatch_summary
|
tests/test_interactive_api.py
ADDED
|
@@ -0,0 +1,329 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""End-to-end tests for the ``/interactive/*`` router."""
|
| 2 |
+
|
| 3 |
+
from __future__ import annotations
|
| 4 |
+
|
| 5 |
+
import json
|
| 6 |
+
from collections.abc import Iterable
|
| 7 |
+
|
| 8 |
+
import pytest
|
| 9 |
+
from fastapi import FastAPI
|
| 10 |
+
from fastapi.middleware.cors import CORSMiddleware
|
| 11 |
+
from fastapi.testclient import TestClient
|
| 12 |
+
from openenv.core.env_server import create_fastapi_app
|
| 13 |
+
|
| 14 |
+
from physix.models import PhysiXAction, PhysiXObservation
|
| 15 |
+
from physix.server.app import build_app
|
| 16 |
+
from physix.server.environment import PhysiXEnvironment
|
| 17 |
+
from physix.server.interactive import (
|
| 18 |
+
LlmModelInfo,
|
| 19 |
+
LlmModelsResponse,
|
| 20 |
+
LlmStepRequest,
|
| 21 |
+
build_interactive_router,
|
| 22 |
+
)
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
@pytest.fixture
|
| 26 |
+
def client() -> TestClient:
|
| 27 |
+
return TestClient(build_app())
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
def _build_app_with_stubbed_llm(
|
| 31 |
+
completions: Iterable[str],
|
| 32 |
+
*,
|
| 33 |
+
models_response: LlmModelsResponse | None = None,
|
| 34 |
+
) -> FastAPI:
|
| 35 |
+
"""Build a clone of the production app whose LLM policy returns
|
| 36 |
+
pre-canned completion strings in order.
|
| 37 |
+
|
| 38 |
+
Each call to the policy pops the next completion off the deque, so a
|
| 39 |
+
test that wants three turns supplies three strings. Optionally
|
| 40 |
+
overrides the model lister so the ``/interactive/models`` route can
|
| 41 |
+
be exercised without touching the real Ollama daemon.
|
| 42 |
+
"""
|
| 43 |
+
queue = list(completions)
|
| 44 |
+
|
| 45 |
+
def _stub_policy(_payload: LlmStepRequest):
|
| 46 |
+
def _policy(_prompt: list[dict[str, str]]) -> str:
|
| 47 |
+
if not queue:
|
| 48 |
+
raise AssertionError("Stubbed LLM ran out of canned completions.")
|
| 49 |
+
return queue.pop(0)
|
| 50 |
+
|
| 51 |
+
return _policy
|
| 52 |
+
|
| 53 |
+
def _stub_lister() -> LlmModelsResponse:
|
| 54 |
+
return models_response or LlmModelsResponse(models=[])
|
| 55 |
+
|
| 56 |
+
app = create_fastapi_app(
|
| 57 |
+
env=PhysiXEnvironment,
|
| 58 |
+
action_cls=PhysiXAction,
|
| 59 |
+
observation_cls=PhysiXObservation,
|
| 60 |
+
)
|
| 61 |
+
app.add_middleware(
|
| 62 |
+
CORSMiddleware,
|
| 63 |
+
allow_origins=["http://localhost:5173"],
|
| 64 |
+
allow_credentials=True,
|
| 65 |
+
allow_methods=["*"],
|
| 66 |
+
allow_headers=["*"],
|
| 67 |
+
)
|
| 68 |
+
app.include_router(
|
| 69 |
+
build_interactive_router(
|
| 70 |
+
policy_factory=_stub_policy,
|
| 71 |
+
models_lister=_stub_lister,
|
| 72 |
+
)
|
| 73 |
+
)
|
| 74 |
+
return app
|
| 75 |
+
|
| 76 |
+
|
| 77 |
+
# --- Catalogue ---
|
| 78 |
+
|
| 79 |
+
|
| 80 |
+
def test_systems_endpoint_returns_supported_systems_in_order(
|
| 81 |
+
client: TestClient,
|
| 82 |
+
) -> None:
|
| 83 |
+
from physix.systems.registry import SUPPORTED_SYSTEMS
|
| 84 |
+
|
| 85 |
+
response = client.get("/interactive/systems")
|
| 86 |
+
assert response.status_code == 200
|
| 87 |
+
|
| 88 |
+
catalogue = response.json()
|
| 89 |
+
returned_ids = [row["system_id"] for row in catalogue]
|
| 90 |
+
assert returned_ids == list(SUPPORTED_SYSTEMS)
|
| 91 |
+
|
| 92 |
+
system_ids = set(returned_ids)
|
| 93 |
+
assert "projectile_drag" not in system_ids
|
| 94 |
+
assert "charged_b_field" not in system_ids
|
| 95 |
+
# Sanity check: the systems we pruned for plateauing on 7B must not
|
| 96 |
+
# leak through either, since the whole point of the curriculum is to
|
| 97 |
+
# hide them from the demo dropdown.
|
| 98 |
+
assert "free_fall_drag" not in system_ids
|
| 99 |
+
assert "damped_pendulum" not in system_ids
|
| 100 |
+
|
| 101 |
+
|
| 102 |
+
# --- Local model catalogue ---
|
| 103 |
+
|
| 104 |
+
|
| 105 |
+
def test_models_endpoint_returns_injected_list() -> None:
|
| 106 |
+
"""Frontend reads installed model tags from the server, not a hardcoded
|
| 107 |
+
list. The route must surface whatever the lister reports."""
|
| 108 |
+
canned = LlmModelsResponse(
|
| 109 |
+
models=[
|
| 110 |
+
LlmModelInfo(name="qwen2.5:7b", size_bytes=4_700_000_000, parameter_size="7.6B"),
|
| 111 |
+
LlmModelInfo(name="qwen2.5:1.5b-instruct", size_bytes=986_000_000),
|
| 112 |
+
]
|
| 113 |
+
)
|
| 114 |
+
app = _build_app_with_stubbed_llm([], models_response=canned)
|
| 115 |
+
with TestClient(app) as client:
|
| 116 |
+
response = client.get("/interactive/models")
|
| 117 |
+
|
| 118 |
+
assert response.status_code == 200, response.text
|
| 119 |
+
body = response.json()
|
| 120 |
+
assert body["error"] is None
|
| 121 |
+
assert [m["name"] for m in body["models"]] == [
|
| 122 |
+
"qwen2.5:7b",
|
| 123 |
+
"qwen2.5:1.5b-instruct",
|
| 124 |
+
]
|
| 125 |
+
assert body["models"][0]["parameter_size"] == "7.6B"
|
| 126 |
+
|
| 127 |
+
|
| 128 |
+
def test_models_endpoint_returns_empty_with_error_when_daemon_unavailable() -> None:
|
| 129 |
+
"""When Ollama is unreachable the route degrades to an empty list and
|
| 130 |
+
surfaces a human-readable hint, instead of 5xx-ing the page."""
|
| 131 |
+
canned = LlmModelsResponse(
|
| 132 |
+
models=[],
|
| 133 |
+
error="Could not reach the local Ollama daemon (test). Is 'ollama serve' running?",
|
| 134 |
+
)
|
| 135 |
+
app = _build_app_with_stubbed_llm([], models_response=canned)
|
| 136 |
+
with TestClient(app) as client:
|
| 137 |
+
response = client.get("/interactive/models")
|
| 138 |
+
|
| 139 |
+
assert response.status_code == 200
|
| 140 |
+
body = response.json()
|
| 141 |
+
assert body["models"] == []
|
| 142 |
+
assert "Ollama" in body["error"]
|
| 143 |
+
|
| 144 |
+
|
| 145 |
+
# --- Session lifecycle ---
|
| 146 |
+
|
| 147 |
+
|
| 148 |
+
def test_session_lifecycle_create_summary_delete(client: TestClient) -> None:
|
| 149 |
+
"""Create → reset observation → summary → delete → 404. The actual
|
| 150 |
+
advancing of turn counter / format scoring / predicted overlay
|
| 151 |
+
lives in the ``/llm-step`` tests below; this is the lifecycle
|
| 152 |
+
skeleton (the only flow the UI actually exercises now that the
|
| 153 |
+
manual ``/step`` route is gone)."""
|
| 154 |
+
create = client.post(
|
| 155 |
+
"/interactive/sessions",
|
| 156 |
+
json={"system_id": "free_fall", "seed": 42, "max_turns": 4},
|
| 157 |
+
)
|
| 158 |
+
assert create.status_code == 200, create.text
|
| 159 |
+
body = create.json()
|
| 160 |
+
|
| 161 |
+
session_id = body["session_id"]
|
| 162 |
+
assert isinstance(session_id, str) and session_id
|
| 163 |
+
assert body["system"]["system_id"] == "free_fall"
|
| 164 |
+
assert "tier" not in body["system"] # tier is dropped from the public schema
|
| 165 |
+
assert body["max_turns"] == 4
|
| 166 |
+
assert body["observation"]["turn"] == 0
|
| 167 |
+
assert body["observation"]["done"] is False
|
| 168 |
+
assert len(body["observation"]["trajectory"]) == 100
|
| 169 |
+
|
| 170 |
+
summary = client.get(f"/interactive/sessions/{session_id}").json()
|
| 171 |
+
assert summary["turn"] == 0
|
| 172 |
+
assert summary["max_turns"] == 4
|
| 173 |
+
assert summary["done"] is False
|
| 174 |
+
|
| 175 |
+
end = client.delete(f"/interactive/sessions/{session_id}")
|
| 176 |
+
assert end.status_code == 204
|
| 177 |
+
assert client.get(f"/interactive/sessions/{session_id}").status_code == 404
|
| 178 |
+
|
| 179 |
+
|
| 180 |
+
def test_unknown_system_id_returns_400(client: TestClient) -> None:
|
| 181 |
+
response = client.post(
|
| 182 |
+
"/interactive/sessions",
|
| 183 |
+
json={"system_id": "no_such_system"},
|
| 184 |
+
)
|
| 185 |
+
assert response.status_code == 400
|
| 186 |
+
|
| 187 |
+
|
| 188 |
+
def test_unknown_session_id_returns_404() -> None:
|
| 189 |
+
"""Session-scoped routes return 404 for unknown ids, not 500."""
|
| 190 |
+
app = _build_app_with_stubbed_llm([])
|
| 191 |
+
with TestClient(app) as client:
|
| 192 |
+
response = client.post(
|
| 193 |
+
"/interactive/sessions/does-not-exist/llm-step",
|
| 194 |
+
json={"model": "stub"},
|
| 195 |
+
)
|
| 196 |
+
assert response.status_code == 404
|
| 197 |
+
|
| 198 |
+
|
| 199 |
+
# --- LLM-step endpoint (with stubbed policy) ---
|
| 200 |
+
|
| 201 |
+
|
| 202 |
+
def test_llm_step_drives_a_turn_using_injected_policy() -> None:
|
| 203 |
+
"""The endpoint must call the policy, parse, step, and surface the raw."""
|
| 204 |
+
app = _build_app_with_stubbed_llm(
|
| 205 |
+
[json.dumps({"equation": "d2y/dt2 = -9.81", "rationale": "gravity"})]
|
| 206 |
+
)
|
| 207 |
+
with TestClient(app) as client:
|
| 208 |
+
create = client.post(
|
| 209 |
+
"/interactive/sessions",
|
| 210 |
+
json={"system_id": "free_fall", "seed": 0, "max_turns": 4},
|
| 211 |
+
).json()
|
| 212 |
+
session_id = create["session_id"]
|
| 213 |
+
|
| 214 |
+
response = client.post(
|
| 215 |
+
f"/interactive/sessions/{session_id}/llm-step",
|
| 216 |
+
json={"model": "stub:1.5b", "temperature": 0.1, "max_tokens": 64},
|
| 217 |
+
)
|
| 218 |
+
|
| 219 |
+
assert response.status_code == 200, response.text
|
| 220 |
+
body = response.json()
|
| 221 |
+
assert body["model"] == "stub:1.5b"
|
| 222 |
+
assert body["action"]["equation"] == "d2y/dt2 = -9.81"
|
| 223 |
+
assert body["action"]["rationale"] == "gravity"
|
| 224 |
+
assert body["observation"]["turn"] == 1
|
| 225 |
+
assert body["observation"]["reward_breakdown"]["match"] >= 0.9
|
| 226 |
+
assert body["predicted_trajectory"]
|
| 227 |
+
assert body["latency_s"] >= 0.0
|
| 228 |
+
assert "d2y/dt2" in body["raw_completion"]
|
| 229 |
+
|
| 230 |
+
|
| 231 |
+
def test_llm_step_runs_full_episode_with_three_canned_turns() -> None:
|
| 232 |
+
"""Multi-turn drive: each call pops the next completion, history grows."""
|
| 233 |
+
completions = [
|
| 234 |
+
json.dumps({"equation": "d2y/dt2 = -9.81", "rationale": "pure gravity"}),
|
| 235 |
+
json.dumps({
|
| 236 |
+
"equation": "d2y/dt2 = -9.81 + 0.1 * vy",
|
| 237 |
+
"rationale": "linear drag",
|
| 238 |
+
}),
|
| 239 |
+
json.dumps({
|
| 240 |
+
"equation": "d2y/dt2 = -9.81 + 0.05 * vy**2",
|
| 241 |
+
"rationale": "quadratic drag",
|
| 242 |
+
}),
|
| 243 |
+
]
|
| 244 |
+
app = _build_app_with_stubbed_llm(completions)
|
| 245 |
+
with TestClient(app) as client:
|
| 246 |
+
session_id = client.post(
|
| 247 |
+
"/interactive/sessions",
|
| 248 |
+
json={"system_id": "free_fall_drag", "seed": 42, "max_turns": 8},
|
| 249 |
+
).json()["session_id"]
|
| 250 |
+
|
| 251 |
+
bodies = []
|
| 252 |
+
for _ in range(3):
|
| 253 |
+
response = client.post(
|
| 254 |
+
f"/interactive/sessions/{session_id}/llm-step",
|
| 255 |
+
json={"model": "stub"},
|
| 256 |
+
)
|
| 257 |
+
assert response.status_code == 200, response.text
|
| 258 |
+
bodies.append(response.json())
|
| 259 |
+
|
| 260 |
+
assert [b["action"]["equation"] for b in bodies] == [
|
| 261 |
+
"d2y/dt2 = -9.81",
|
| 262 |
+
"d2y/dt2 = -9.81 + 0.1 * vy",
|
| 263 |
+
"d2y/dt2 = -9.81 + 0.05 * vy**2",
|
| 264 |
+
]
|
| 265 |
+
assert [b["observation"]["turn"] for b in bodies] == [1, 2, 3]
|
| 266 |
+
# History accumulates across turns.
|
| 267 |
+
assert len(bodies[-1]["observation"]["history"]) == 3
|
| 268 |
+
|
| 269 |
+
|
| 270 |
+
def test_llm_step_handles_unparseable_completion_as_format_zero() -> None:
|
| 271 |
+
"""If the model emits junk, the env scores it format=0, no 500."""
|
| 272 |
+
app = _build_app_with_stubbed_llm(["I refuse to answer."])
|
| 273 |
+
with TestClient(app) as client:
|
| 274 |
+
session_id = client.post(
|
| 275 |
+
"/interactive/sessions",
|
| 276 |
+
json={"system_id": "simple_pendulum", "seed": 0, "max_turns": 4},
|
| 277 |
+
).json()["session_id"]
|
| 278 |
+
response = client.post(
|
| 279 |
+
f"/interactive/sessions/{session_id}/llm-step",
|
| 280 |
+
json={"model": "stub"},
|
| 281 |
+
)
|
| 282 |
+
|
| 283 |
+
assert response.status_code == 200, response.text
|
| 284 |
+
body = response.json()
|
| 285 |
+
assert body["observation"]["reward_breakdown"]["format"] == 0.0
|
| 286 |
+
assert body["predicted_trajectory"] == []
|
| 287 |
+
assert body["raw_completion"] == "I refuse to answer."
|
| 288 |
+
|
| 289 |
+
|
| 290 |
+
def test_llm_step_after_budget_exhaustion_returns_409() -> None:
|
| 291 |
+
"""Once the env has consumed its budget, llm-step is rejected too."""
|
| 292 |
+
canned = [
|
| 293 |
+
json.dumps({"equation": "d2theta/dt2 = 0"}),
|
| 294 |
+
json.dumps({"equation": "d2theta/dt2 = 0"}),
|
| 295 |
+
]
|
| 296 |
+
app = _build_app_with_stubbed_llm(canned)
|
| 297 |
+
with TestClient(app) as client:
|
| 298 |
+
session_id = client.post(
|
| 299 |
+
"/interactive/sessions",
|
| 300 |
+
json={"system_id": "simple_pendulum", "seed": 1, "max_turns": 2},
|
| 301 |
+
).json()["session_id"]
|
| 302 |
+
for _ in range(2):
|
| 303 |
+
assert client.post(
|
| 304 |
+
f"/interactive/sessions/{session_id}/llm-step",
|
| 305 |
+
json={"model": "stub"},
|
| 306 |
+
).status_code == 200
|
| 307 |
+
overflow = client.post(
|
| 308 |
+
f"/interactive/sessions/{session_id}/llm-step",
|
| 309 |
+
json={"model": "stub"},
|
| 310 |
+
)
|
| 311 |
+
|
| 312 |
+
assert overflow.status_code == 409
|
| 313 |
+
|
| 314 |
+
|
| 315 |
+
# --- CORS ---
|
| 316 |
+
|
| 317 |
+
|
| 318 |
+
def test_cors_preflight_for_dev_origin(client: TestClient) -> None:
|
| 319 |
+
"""OPTIONS preflight from the Vite dev server is allowed."""
|
| 320 |
+
response = client.options(
|
| 321 |
+
"/interactive/sessions",
|
| 322 |
+
headers={
|
| 323 |
+
"Origin": "http://localhost:5173",
|
| 324 |
+
"Access-Control-Request-Method": "POST",
|
| 325 |
+
"Access-Control-Request-Headers": "content-type",
|
| 326 |
+
},
|
| 327 |
+
)
|
| 328 |
+
assert response.status_code in (200, 204), response.text
|
| 329 |
+
assert response.headers["access-control-allow-origin"] == "http://localhost:5173"
|
tests/test_parser.py
ADDED
|
@@ -0,0 +1,329 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Tests for :mod:`physix.verifier.parser`.
|
| 2 |
+
|
| 3 |
+
The parser is the contract between LLM output and the simulator. Most
|
| 4 |
+
tests here exist because of a real failure mode caught during live
|
| 5 |
+
episodes — concretely, the velocity-alias rules and unknown-symbol
|
| 6 |
+
hints close grammar gaps that were silently scoring competent agent
|
| 7 |
+
outputs as ``r_format=0``.
|
| 8 |
+
"""
|
| 9 |
+
|
| 10 |
+
from __future__ import annotations
|
| 11 |
+
|
| 12 |
+
import pytest
|
| 13 |
+
import sympy as sp
|
| 14 |
+
|
| 15 |
+
from physix.verifier.parser import (
|
| 16 |
+
GRAMMAR_HINT,
|
| 17 |
+
ParseError,
|
| 18 |
+
parse_equation,
|
| 19 |
+
)
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
def _parse(
|
| 23 |
+
text: str,
|
| 24 |
+
state_variables: tuple[str, ...] = ("y", "vy"),
|
| 25 |
+
parameter_names: frozenset[str] = frozenset(),
|
| 26 |
+
):
|
| 27 |
+
return parse_equation(text, state_variables, parameter_names)
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
def test_basic_equation_round_trips():
|
| 31 |
+
parsed = _parse("d2y/dt2 = -9.81")
|
| 32 |
+
assert len(parsed.equations) == 1
|
| 33 |
+
eq = parsed.equations[0]
|
| 34 |
+
assert eq.var == "y"
|
| 35 |
+
assert eq.order == 2
|
| 36 |
+
assert sp.simplify(eq.rhs - sp.Float(-9.81)) == 0
|
| 37 |
+
|
| 38 |
+
|
| 39 |
+
def test_dx_dt_alias_substitutes_for_vx_when_velocity_state_exists():
|
| 40 |
+
"""``dx/dt`` is a valid synonym for ``vx`` in damped-spring style systems.
|
| 41 |
+
|
| 42 |
+
Regression: 7B produced the textbook-correct equation
|
| 43 |
+
``d2x/dt2 = -k/m * x - c/m * dx/dt`` on turn 1 and we silently
|
| 44 |
+
rejected it because ``dx`` and ``dt`` were not whitelisted.
|
| 45 |
+
"""
|
| 46 |
+
parsed = parse_equation(
|
| 47 |
+
"d2x/dt2 = -k/m * x - c/m * dx/dt",
|
| 48 |
+
state_variables=("x", "vx"),
|
| 49 |
+
parameter_names=frozenset({"k", "c", "m"}),
|
| 50 |
+
)
|
| 51 |
+
eq = parsed.equations[0]
|
| 52 |
+
assert "vx" in {s.name for s in eq.rhs.free_symbols}
|
| 53 |
+
assert "dx" not in {s.name for s in eq.rhs.free_symbols}
|
| 54 |
+
assert "dt" not in {s.name for s in eq.rhs.free_symbols}
|
| 55 |
+
|
| 56 |
+
|
| 57 |
+
def test_bare_dx_alias_substitutes_for_vx():
|
| 58 |
+
"""A bare ``dx`` (without ``/dt``) is also accepted as the velocity."""
|
| 59 |
+
parsed = parse_equation(
|
| 60 |
+
"d2x/dt2 = -k*x - c*dx",
|
| 61 |
+
state_variables=("x", "vx"),
|
| 62 |
+
parameter_names=frozenset({"k", "c"}),
|
| 63 |
+
)
|
| 64 |
+
eq = parsed.equations[0]
|
| 65 |
+
free = {s.name for s in eq.rhs.free_symbols}
|
| 66 |
+
assert "vx" in free
|
| 67 |
+
assert "dx" not in free
|
| 68 |
+
|
| 69 |
+
|
| 70 |
+
def test_dy_dt_alias_substitutes_for_vy():
|
| 71 |
+
"""The same alias rule applies to any ``<var>``/``v<var>`` pairing."""
|
| 72 |
+
parsed = parse_equation(
|
| 73 |
+
"d2y/dt2 = -9.81 - 0.1 * dy/dt",
|
| 74 |
+
state_variables=("y", "vy"),
|
| 75 |
+
parameter_names=frozenset(),
|
| 76 |
+
)
|
| 77 |
+
eq = parsed.equations[0]
|
| 78 |
+
free = {s.name for s in eq.rhs.free_symbols}
|
| 79 |
+
assert free == {"vy"}
|
| 80 |
+
|
| 81 |
+
|
| 82 |
+
def test_alias_does_not_fire_when_velocity_state_is_named_dvar():
|
| 83 |
+
"""For systems like damped pendulum where the state itself is ``dtheta``,
|
| 84 |
+
we must *not* substitute ``dtheta`` away — it is the canonical state name.
|
| 85 |
+
"""
|
| 86 |
+
parsed = parse_equation(
|
| 87 |
+
"d2theta/dt2 = -9.81 * sin(theta) - b * dtheta",
|
| 88 |
+
state_variables=("theta", "dtheta"),
|
| 89 |
+
parameter_names=frozenset({"b"}),
|
| 90 |
+
)
|
| 91 |
+
eq = parsed.equations[0]
|
| 92 |
+
free = {s.name for s in eq.rhs.free_symbols}
|
| 93 |
+
assert "dtheta" in free
|
| 94 |
+
assert "vtheta" not in free
|
| 95 |
+
|
| 96 |
+
|
| 97 |
+
def test_alias_only_replaces_word_boundary_matches():
|
| 98 |
+
"""``dx`` substring inside a longer identifier must be left alone.
|
| 99 |
+
|
| 100 |
+
Param names like ``mu_dx`` or ``kdx`` should not be silently
|
| 101 |
+
rewritten to ``mu_vx``/``kvx``.
|
| 102 |
+
"""
|
| 103 |
+
parsed = parse_equation(
|
| 104 |
+
"d2x/dt2 = -k * x + mu_dx * vx",
|
| 105 |
+
state_variables=("x", "vx"),
|
| 106 |
+
parameter_names=frozenset({"k", "mu_dx"}),
|
| 107 |
+
)
|
| 108 |
+
eq = parsed.equations[0]
|
| 109 |
+
free = {s.name for s in eq.rhs.free_symbols}
|
| 110 |
+
assert "mu_dx" in free
|
| 111 |
+
|
| 112 |
+
|
| 113 |
+
def test_unknown_dx_in_system_without_paired_velocity_includes_hint():
|
| 114 |
+
"""Without a ``vx`` state, ``dx`` cannot be aliased and must reject —
|
| 115 |
+
but the error should suggest the actual state name ``x``."""
|
| 116 |
+
with pytest.raises(ParseError) as excinfo:
|
| 117 |
+
parse_equation(
|
| 118 |
+
"dx/dt = -k * dx",
|
| 119 |
+
state_variables=("x",),
|
| 120 |
+
parameter_names=frozenset({"k"}),
|
| 121 |
+
)
|
| 122 |
+
assert "dx" in str(excinfo.value)
|
| 123 |
+
assert "no separate velocity name" in str(excinfo.value)
|
| 124 |
+
|
| 125 |
+
|
| 126 |
+
def test_unknown_t_emits_autonomy_hint():
|
| 127 |
+
"""``t`` is the most common forbidden symbol; the error must explain
|
| 128 |
+
why so the model stops re-emitting time-explicit RHSs across turns."""
|
| 129 |
+
with pytest.raises(ParseError) as excinfo:
|
| 130 |
+
parse_equation(
|
| 131 |
+
"d2theta/dt2 = -k * theta + c * t",
|
| 132 |
+
state_variables=("theta", "dtheta"),
|
| 133 |
+
parameter_names=frozenset({"k", "c"}),
|
| 134 |
+
)
|
| 135 |
+
msg = str(excinfo.value)
|
| 136 |
+
assert "'t'" in msg
|
| 137 |
+
assert "autonomous" in msg
|
| 138 |
+
|
| 139 |
+
|
| 140 |
+
def test_grammar_hint_documents_velocity_convention():
|
| 141 |
+
"""The system prompt embeds GRAMMAR_HINT verbatim. Whoever opens
|
| 142 |
+
this file looking for *why* dx/dt is now legal will find the
|
| 143 |
+
explanation; whoever weakens the convention by accident will
|
| 144 |
+
trip this test.
|
| 145 |
+
"""
|
| 146 |
+
assert "vx" in GRAMMAR_HINT
|
| 147 |
+
assert "dx/dt" in GRAMMAR_HINT
|
| 148 |
+
assert "autonomous" in GRAMMAR_HINT
|
| 149 |
+
|
| 150 |
+
|
| 151 |
+
def test_multiple_equations_split_on_semicolons_keep_alias_behaviour():
|
| 152 |
+
"""The alias rule must apply per-equation when payloads are stacked."""
|
| 153 |
+
parsed = parse_equation(
|
| 154 |
+
"dx/dt = vx; d2x/dt2 = -k * x - c * dx/dt",
|
| 155 |
+
state_variables=("x", "vx"),
|
| 156 |
+
parameter_names=frozenset({"k", "c"}),
|
| 157 |
+
)
|
| 158 |
+
assert len(parsed.equations) == 2
|
| 159 |
+
second_rhs = parsed.equations[1].rhs
|
| 160 |
+
free = {s.name for s in second_rhs.free_symbols}
|
| 161 |
+
assert "vx" in free
|
| 162 |
+
assert "dx" not in free
|
| 163 |
+
|
| 164 |
+
|
| 165 |
+
def test_dotted_attribute_access_is_rejected_with_clear_error():
|
| 166 |
+
"""Regression test for the GRPO crash on completion::
|
| 167 |
+
|
| 168 |
+
d2theta/dt2 = ... * np.sqrt(L**2 - theta**2) / L
|
| 169 |
+
|
| 170 |
+
Pre-v2 the parser used ``sympy.sympify`` which turned ``np`` into
|
| 171 |
+
``Symbol('np')`` and then evaluated ``.sqrt(...)`` on it during
|
| 172 |
+
``eval``, raising ``AttributeError: 'Symbol' object has no
|
| 173 |
+
attribute 'sqrt'`` *inside* sympy and tearing down the entire RL
|
| 174 |
+
step. v2 parses with ``ast.parse`` and a whitelist visitor that
|
| 175 |
+
rejects ``ast.Attribute`` (and call-with-attribute func)
|
| 176 |
+
structurally — there is no longer an "eval" stage that can crash.
|
| 177 |
+
"""
|
| 178 |
+
with pytest.raises(ParseError) as excinfo:
|
| 179 |
+
parse_equation(
|
| 180 |
+
"d2theta/dt2 = -theta + np.sqrt(L**2 - theta**2) / L",
|
| 181 |
+
state_variables=("theta", "dtheta"),
|
| 182 |
+
parameter_names=frozenset({"L"}),
|
| 183 |
+
)
|
| 184 |
+
msg = str(excinfo.value)
|
| 185 |
+
assert "Attribute access is not allowed" in msg
|
| 186 |
+
# Hint always nudges toward the bare-function form.
|
| 187 |
+
assert "sqrt(x)" in msg or "sqrt(" in msg
|
| 188 |
+
|
| 189 |
+
|
| 190 |
+
@pytest.mark.parametrize(
|
| 191 |
+
"rhs",
|
| 192 |
+
[
|
| 193 |
+
"math.sin(theta)",
|
| 194 |
+
"numpy.cos(theta) + 1",
|
| 195 |
+
"np.exp(-theta**2)",
|
| 196 |
+
"scipy.special.expit(theta)",
|
| 197 |
+
"theta.diff()",
|
| 198 |
+
],
|
| 199 |
+
)
|
| 200 |
+
def test_dotted_attribute_access_variants_all_rejected(rhs):
|
| 201 |
+
"""Defence in depth: every common ``library.fn`` shape — both the
|
| 202 |
+
'attribute as function' (``np.sqrt(x)``) and 'attribute as value'
|
| 203 |
+
(``theta.something``) shapes — must reject with the same
|
| 204 |
+
user-facing wording."""
|
| 205 |
+
with pytest.raises(ParseError) as excinfo:
|
| 206 |
+
parse_equation(
|
| 207 |
+
f"d2theta/dt2 = {rhs}",
|
| 208 |
+
state_variables=("theta", "dtheta"),
|
| 209 |
+
parameter_names=frozenset(),
|
| 210 |
+
)
|
| 211 |
+
assert "Attribute access is not allowed" in str(excinfo.value)
|
| 212 |
+
|
| 213 |
+
|
| 214 |
+
def test_decimal_literals_are_not_misread_as_attribute_access():
|
| 215 |
+
"""Numbers like ``1.05`` must parse as constants — they were a real
|
| 216 |
+
coefficient in the failing pendulum equation and ``ast.parse``
|
| 217 |
+
correctly tokenises them as ``ast.Constant(1.05)``, not as
|
| 218 |
+
Attribute access.
|
| 219 |
+
"""
|
| 220 |
+
parsed = parse_equation(
|
| 221 |
+
"d2theta/dt2 = -1.05 * theta",
|
| 222 |
+
state_variables=("theta", "dtheta"),
|
| 223 |
+
parameter_names=frozenset(),
|
| 224 |
+
)
|
| 225 |
+
assert len(parsed.equations) == 1
|
| 226 |
+
|
| 227 |
+
|
| 228 |
+
def test_keyword_arguments_in_call_are_rejected_with_specific_hint():
|
| 229 |
+
"""Pre-v2, ``sin(theta=0.1)`` reached sympy's eval and raised
|
| 230 |
+
``TypeError`` from inside ``parse_expr``. v2's call validator
|
| 231 |
+
catches it at the AST level and gives the model a one-line fix.
|
| 232 |
+
"""
|
| 233 |
+
with pytest.raises(ParseError, match="Keyword arguments"):
|
| 234 |
+
parse_equation(
|
| 235 |
+
"d2y/dt2 = sin(theta=0.1)",
|
| 236 |
+
state_variables=("y", "vy"),
|
| 237 |
+
parameter_names=frozenset(),
|
| 238 |
+
)
|
| 239 |
+
|
| 240 |
+
|
| 241 |
+
@pytest.mark.parametrize(
|
| 242 |
+
"rhs, expected_keyword",
|
| 243 |
+
[
|
| 244 |
+
("vy[0]", "Array indexing"),
|
| 245 |
+
("y if vy > 0 else -y", "Conditional"),
|
| 246 |
+
("lambda x: x", "Lambda"),
|
| 247 |
+
("y == vy", "Comparisons"),
|
| 248 |
+
("y and vy", "Boolean"),
|
| 249 |
+
("(y, vy)", "Collection"),
|
| 250 |
+
# Lambda nested in a call hits the call-with-non-Name-func branch,
|
| 251 |
+
# not the bare Lambda branch — keep coverage for that path too.
|
| 252 |
+
("(lambda x: x)(y)", "computed-name call"),
|
| 253 |
+
],
|
| 254 |
+
)
|
| 255 |
+
def test_disallowed_constructs_each_have_targeted_error(rhs, expected_keyword):
|
| 256 |
+
"""The whitelist visitor must reject each non-arithmetic shape with
|
| 257 |
+
a hint that names the construct — the error string is what the
|
| 258 |
+
LLM sees on the next turn, so vague messages waste turns. This
|
| 259 |
+
locks in coverage so adding a new shape requires adding a test
|
| 260 |
+
*and* a branch.
|
| 261 |
+
"""
|
| 262 |
+
with pytest.raises(ParseError) as excinfo:
|
| 263 |
+
parse_equation(
|
| 264 |
+
f"d2y/dt2 = {rhs}",
|
| 265 |
+
state_variables=("y", "vy"),
|
| 266 |
+
parameter_names=frozenset(),
|
| 267 |
+
)
|
| 268 |
+
assert expected_keyword in str(excinfo.value)
|
| 269 |
+
|
| 270 |
+
|
| 271 |
+
def test_caret_is_accepted_as_power_synonym():
|
| 272 |
+
"""Physics notation universally writes ``x^2`` for the square. v2
|
| 273 |
+
rewrites ``^`` → ``**`` before AST parse so the agent doesn't have
|
| 274 |
+
to remember Python's XOR/power split. (Pre-v2 the grammar hint
|
| 275 |
+
actively *disallowed* ``^`` — the model frequently emitted it
|
| 276 |
+
anyway and got format=0 for purely cosmetic reasons.)
|
| 277 |
+
"""
|
| 278 |
+
parsed = parse_equation(
|
| 279 |
+
"d2y/dt2 = -k * y^2",
|
| 280 |
+
state_variables=("y", "vy"),
|
| 281 |
+
parameter_names=frozenset({"k"}),
|
| 282 |
+
)
|
| 283 |
+
eq = parsed.equations[0]
|
| 284 |
+
# k * y**2 = k * y * y → operator count of 2 (Mul + Pow + Pow's UnaryMinus
|
| 285 |
+
# outer Mul). What we really care about: the rhs equals the explicit form.
|
| 286 |
+
expected = parse_equation(
|
| 287 |
+
"d2y/dt2 = -k * y**2",
|
| 288 |
+
state_variables=("y", "vy"),
|
| 289 |
+
parameter_names=frozenset({"k"}),
|
| 290 |
+
).equations[0]
|
| 291 |
+
assert sp.simplify(eq.rhs - expected.rhs) == 0
|
| 292 |
+
|
| 293 |
+
|
| 294 |
+
def test_only_parse_error_ever_escapes_the_parser():
|
| 295 |
+
"""Contract test: whatever the agent writes, the *only* exception
|
| 296 |
+
type that ever leaves this module is :class:`ParseError`. The
|
| 297 |
+
scorer relies on this to convert grammar failures into
|
| 298 |
+
``r_format = 0`` instead of crashing the entire GRPO group (which
|
| 299 |
+
is what zeroed loss + zeroed reward in the v1 training run).
|
| 300 |
+
|
| 301 |
+
We sweep a grab-bag of historically problematic shapes and
|
| 302 |
+
confirm every one becomes ``ParseError``, never bare
|
| 303 |
+
``AttributeError`` / ``TypeError`` / ``SyntaxError``.
|
| 304 |
+
"""
|
| 305 |
+
pathological = [
|
| 306 |
+
"np.sqrt(y)",
|
| 307 |
+
"sin(theta=0.1)",
|
| 308 |
+
"(lambda x: x)(y)",
|
| 309 |
+
"y if vy else -y",
|
| 310 |
+
"y == vy",
|
| 311 |
+
"y[0]",
|
| 312 |
+
"y; import os", # legal Python, illegal here — first arm parses, second is rejected by split-then-validate
|
| 313 |
+
"1.05.foo", # decimal followed by attribute
|
| 314 |
+
"y ** vy ** y ** vy ** y", # legal but deep; just confirms no crash
|
| 315 |
+
]
|
| 316 |
+
for raw in pathological:
|
| 317 |
+
try:
|
| 318 |
+
parse_equation(
|
| 319 |
+
f"d2y/dt2 = {raw}",
|
| 320 |
+
state_variables=("y", "vy"),
|
| 321 |
+
parameter_names=frozenset(),
|
| 322 |
+
)
|
| 323 |
+
except ParseError:
|
| 324 |
+
continue # expected for most
|
| 325 |
+
except BaseException as exc: # noqa: BLE001 — that's the whole point
|
| 326 |
+
raise AssertionError(
|
| 327 |
+
f"Non-ParseError escaped parser for input {raw!r}: "
|
| 328 |
+
f"{type(exc).__name__}: {exc}"
|
| 329 |
+
) from exc
|
tests/test_prompt.py
ADDED
|
@@ -0,0 +1,300 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Unit tests for :mod:`physix.training.prompt`."""
|
| 2 |
+
|
| 3 |
+
from __future__ import annotations
|
| 4 |
+
|
| 5 |
+
from physix.models import PhysiXObservation
|
| 6 |
+
from physix.training.prompt import (
|
| 7 |
+
SYSTEM_MESSAGE,
|
| 8 |
+
build_prompt,
|
| 9 |
+
parse_completion,
|
| 10 |
+
render_observation_for_prompt,
|
| 11 |
+
)
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
def _sample_observation() -> PhysiXObservation:
|
| 15 |
+
return PhysiXObservation(
|
| 16 |
+
done=False,
|
| 17 |
+
reward=None,
|
| 18 |
+
trajectory=[
|
| 19 |
+
{"t": 0.0, "y": 50.0, "vy": 0.0},
|
| 20 |
+
{"t": 0.5, "y": 48.7, "vy": -4.9},
|
| 21 |
+
{"t": 1.0, "y": 45.1, "vy": -9.7},
|
| 22 |
+
],
|
| 23 |
+
state_variables=["y", "vy"],
|
| 24 |
+
hint="Object dropped from 50 m, mass 2 kg.",
|
| 25 |
+
history=[
|
| 26 |
+
{
|
| 27 |
+
"turn": 1,
|
| 28 |
+
"equation": "d2y/dt2 = -9.81",
|
| 29 |
+
"params": {},
|
| 30 |
+
"reward_total": 0.42,
|
| 31 |
+
"reward_components": {"match": 0.42, "progress": 0.0, "simplicity": 0.95, "format": 1.0},
|
| 32 |
+
"mismatch_summary": "predicted y diverges past t=2.0s.",
|
| 33 |
+
}
|
| 34 |
+
],
|
| 35 |
+
mismatch_summary="predicted y diverges past t=2.0s.",
|
| 36 |
+
turn=1,
|
| 37 |
+
turn_remaining=7,
|
| 38 |
+
system_id="free_fall_drag",
|
| 39 |
+
stats={"y_min": 0.0, "y_max": 50.0, "duration": 6.0},
|
| 40 |
+
reward_breakdown={"match": 0.42, "total": 0.40},
|
| 41 |
+
)
|
| 42 |
+
|
| 43 |
+
|
| 44 |
+
# ---------------------------------------------------------------------------
|
| 45 |
+
# render_observation_for_prompt
|
| 46 |
+
# ---------------------------------------------------------------------------
|
| 47 |
+
|
| 48 |
+
|
| 49 |
+
def test_render_includes_metadata_block() -> None:
|
| 50 |
+
text = render_observation_for_prompt(_sample_observation())
|
| 51 |
+
|
| 52 |
+
assert "SYSTEM_ID: free_fall_drag" in text
|
| 53 |
+
assert "STATE_VARIABLES: y, vy" in text
|
| 54 |
+
assert "HINT: Object dropped from 50 m" in text
|
| 55 |
+
assert "STATS:" in text
|
| 56 |
+
|
| 57 |
+
|
| 58 |
+
def test_render_includes_trajectory_samples() -> None:
|
| 59 |
+
text = render_observation_for_prompt(_sample_observation())
|
| 60 |
+
assert "TRAJECTORY" in text
|
| 61 |
+
assert "y=50" in text or "y=50.000" in text
|
| 62 |
+
assert "vy=" in text
|
| 63 |
+
|
| 64 |
+
|
| 65 |
+
def test_render_includes_history_when_present() -> None:
|
| 66 |
+
text = render_observation_for_prompt(_sample_observation())
|
| 67 |
+
assert "HISTORY" in text
|
| 68 |
+
assert "turn=1" in text
|
| 69 |
+
assert "d2y/dt2 = -9.81" in text
|
| 70 |
+
|
| 71 |
+
|
| 72 |
+
def test_history_uses_equation_field_name_not_shorthand() -> None:
|
| 73 |
+
"""Regression: HISTORY originally used ``eqn=`` as a display
|
| 74 |
+
shorthand, which mid-strength chat models then mimicked when
|
| 75 |
+
emitting their own JSON (``{"eqn": "..."}``). The parser only reads
|
| 76 |
+
``equation``, so every post-first turn silently scored
|
| 77 |
+
``r_format=0`` even when the model's actual equation was perfect.
|
| 78 |
+
The prompt must use the same field name the parser expects."""
|
| 79 |
+
text = render_observation_for_prompt(_sample_observation())
|
| 80 |
+
assert "equation=`d2y/dt2 = -9.81`" in text
|
| 81 |
+
assert "eqn=" not in text
|
| 82 |
+
|
| 83 |
+
|
| 84 |
+
def test_history_block_surfaces_dense_reward_components() -> None:
|
| 85 |
+
"""The model needs to see *which* reward component scored what so it
|
| 86 |
+
can attribute its own progress turn-over-turn — e.g. push on grammar
|
| 87 |
+
when ``format=0``, or try a structurally different equation when
|
| 88 |
+
``match`` plateaus while ``progress=0``. Showing only ``reward=`` (the
|
| 89 |
+
weighted total) hides that signal.
|
| 90 |
+
"""
|
| 91 |
+
obs = _sample_observation()
|
| 92 |
+
obs.history = [
|
| 93 |
+
{
|
| 94 |
+
"turn": 1,
|
| 95 |
+
"equation": "d2y/dt2 = -9.81",
|
| 96 |
+
"params": {},
|
| 97 |
+
"reward_total": 0.42,
|
| 98 |
+
"reward_components": {
|
| 99 |
+
"match": 0.42,
|
| 100 |
+
"progress": 0.0,
|
| 101 |
+
"simplicity": 0.95,
|
| 102 |
+
"format": 1.0,
|
| 103 |
+
},
|
| 104 |
+
"mismatch_summary": "predicted y diverges past t=2.0s.",
|
| 105 |
+
}
|
| 106 |
+
]
|
| 107 |
+
text = render_observation_for_prompt(obs)
|
| 108 |
+
|
| 109 |
+
assert "match=0.42" in text
|
| 110 |
+
assert "progress=0.00" in text
|
| 111 |
+
assert "simplicity=0.95" in text
|
| 112 |
+
assert "format=1.00" in text
|
| 113 |
+
|
| 114 |
+
|
| 115 |
+
def test_history_block_tolerates_missing_reward_components() -> None:
|
| 116 |
+
"""Older history rows or partial breakdowns shouldn't crash render —
|
| 117 |
+
missing components default to 0.00 so the column layout stays
|
| 118 |
+
stable and the model can still parse the block in-context."""
|
| 119 |
+
obs = _sample_observation()
|
| 120 |
+
obs.history = [
|
| 121 |
+
{
|
| 122 |
+
"turn": 1,
|
| 123 |
+
"equation": "d2y/dt2 = -9.81",
|
| 124 |
+
"params": {},
|
| 125 |
+
"reward_total": 0.4,
|
| 126 |
+
"mismatch_summary": "",
|
| 127 |
+
}
|
| 128 |
+
]
|
| 129 |
+
text = render_observation_for_prompt(obs)
|
| 130 |
+
|
| 131 |
+
for name in ("match", "progress", "simplicity", "format"):
|
| 132 |
+
assert f"{name}=0.00" in text
|
| 133 |
+
|
| 134 |
+
|
| 135 |
+
def test_system_message_locks_in_canonical_field_name() -> None:
|
| 136 |
+
"""The system prompt must explicitly forbid synonyms so the model
|
| 137 |
+
doesn't drift to ``eqn``/``ode``/``formula`` on later turns."""
|
| 138 |
+
assert '"equation"' in SYSTEM_MESSAGE
|
| 139 |
+
assert "never" in SYSTEM_MESSAGE.lower()
|
| 140 |
+
|
| 141 |
+
|
| 142 |
+
def test_render_omits_history_block_when_empty() -> None:
|
| 143 |
+
obs = _sample_observation()
|
| 144 |
+
obs.history = []
|
| 145 |
+
text = render_observation_for_prompt(obs)
|
| 146 |
+
assert "HISTORY" not in text
|
| 147 |
+
|
| 148 |
+
|
| 149 |
+
# ---------------------------------------------------------------------------
|
| 150 |
+
# build_prompt
|
| 151 |
+
# ---------------------------------------------------------------------------
|
| 152 |
+
|
| 153 |
+
|
| 154 |
+
def test_build_prompt_returns_chat_pair() -> None:
|
| 155 |
+
prompt = build_prompt(_sample_observation())
|
| 156 |
+
|
| 157 |
+
assert len(prompt) == 2
|
| 158 |
+
assert prompt[0] == {"role": "system", "content": SYSTEM_MESSAGE}
|
| 159 |
+
assert prompt[1]["role"] == "user"
|
| 160 |
+
assert "TRAJECTORY" in prompt[1]["content"]
|
| 161 |
+
|
| 162 |
+
|
| 163 |
+
# ---------------------------------------------------------------------------
|
| 164 |
+
# parse_completion
|
| 165 |
+
# ---------------------------------------------------------------------------
|
| 166 |
+
|
| 167 |
+
|
| 168 |
+
def test_parse_completion_extracts_clean_json() -> None:
|
| 169 |
+
completion = '{"equation": "d2y/dt2 = -9.81", "params": {"g": 9.81}, "rationale": "free fall"}'
|
| 170 |
+
action = parse_completion(completion)
|
| 171 |
+
|
| 172 |
+
assert action.equation == "d2y/dt2 = -9.81"
|
| 173 |
+
assert action.params == {"g": 9.81}
|
| 174 |
+
assert action.rationale == "free fall"
|
| 175 |
+
|
| 176 |
+
|
| 177 |
+
def test_parse_completion_handles_code_fences() -> None:
|
| 178 |
+
completion = '''
|
| 179 |
+
Here is my hypothesis:
|
| 180 |
+
```json
|
| 181 |
+
{
|
| 182 |
+
"equation": "d2y/dt2 = -g + k * vy**2",
|
| 183 |
+
"params": {"g": 9.81, "k": 0.05},
|
| 184 |
+
"rationale": "added drag"
|
| 185 |
+
}
|
| 186 |
+
```
|
| 187 |
+
'''
|
| 188 |
+
action = parse_completion(completion)
|
| 189 |
+
|
| 190 |
+
assert action.equation == "d2y/dt2 = -g + k * vy**2"
|
| 191 |
+
assert action.params == {"g": 9.81, "k": 0.05}
|
| 192 |
+
|
| 193 |
+
|
| 194 |
+
def test_parse_completion_with_no_json_yields_empty_equation() -> None:
|
| 195 |
+
"""When the completion contains no extractable JSON object, the action
|
| 196 |
+
must have an *empty* equation (so the verifier reports a clean
|
| 197 |
+
``Empty equation payload`` error and the env scores ``r_format=0``).
|
| 198 |
+
|
| 199 |
+
The raw text is preserved in ``rationale`` so logs/UI can still show
|
| 200 |
+
what the model said, but it is never fed to the equation parser as
|
| 201 |
+
if it were an equation.
|
| 202 |
+
"""
|
| 203 |
+
completion = "I think the equation is d2y/dt2 = -g but I'm not sure."
|
| 204 |
+
action = parse_completion(completion)
|
| 205 |
+
|
| 206 |
+
assert action.equation == ""
|
| 207 |
+
assert action.params == {}
|
| 208 |
+
assert action.rationale == completion.strip()
|
| 209 |
+
|
| 210 |
+
|
| 211 |
+
def test_parse_completion_coerces_string_params() -> None:
|
| 212 |
+
completion = '{"equation": "d2y/dt2 = -g", "params": {"g": "9.81", "bad": "not_a_number"}}'
|
| 213 |
+
action = parse_completion(completion)
|
| 214 |
+
|
| 215 |
+
assert action.params == {"g": 9.81} # bad value silently dropped
|
| 216 |
+
|
| 217 |
+
|
| 218 |
+
def test_parse_completion_handles_latex_braces_inside_json_string() -> None:
|
| 219 |
+
"""Regression: a regex-based brace matcher mis-balances when the JSON
|
| 220 |
+
*string* value contains literal ``{`` / ``}`` (e.g. LaTeX ``\\frac{...}``).
|
| 221 |
+
The JSON-aware extractor (``json.JSONDecoder.raw_decode``) handles it
|
| 222 |
+
correctly. The equation field is preserved verbatim — rewriting it
|
| 223 |
+
upstream would silently corrupt the agent's output and is the env's
|
| 224 |
+
job to score, not ours."""
|
| 225 |
+
completion = (
|
| 226 |
+
'```json\n'
|
| 227 |
+
'{ "equation": "\\\\frac{d vy}{dt} = -9.81", '
|
| 228 |
+
'"params": { "g": 9.81 }, '
|
| 229 |
+
'"rationale": "free fall" }\n'
|
| 230 |
+
'```'
|
| 231 |
+
)
|
| 232 |
+
action = parse_completion(completion)
|
| 233 |
+
|
| 234 |
+
assert action.equation == "\\frac{d vy}{dt} = -9.81"
|
| 235 |
+
assert action.params == {"g": 9.81}
|
| 236 |
+
assert action.rationale == "free fall"
|
| 237 |
+
|
| 238 |
+
|
| 239 |
+
def test_parse_completion_picks_first_object_when_text_has_multiple() -> None:
|
| 240 |
+
completion = (
|
| 241 |
+
"Some thoughts here.\n"
|
| 242 |
+
'{"equation": "dy/dt = vy", "params": {}}\n'
|
| 243 |
+
"And then another:\n"
|
| 244 |
+
'{"equation": "dvy/dt = -g", "params": {"g": 9.81}}'
|
| 245 |
+
)
|
| 246 |
+
action = parse_completion(completion)
|
| 247 |
+
|
| 248 |
+
assert action.equation == "dy/dt = vy"
|
| 249 |
+
|
| 250 |
+
|
| 251 |
+
def test_parse_completion_accepts_eqn_synonym() -> None:
|
| 252 |
+
"""Regression: qwen2.5:7b emits ``{"eqn": "..."}`` after the first
|
| 253 |
+
turn (mimicking the historical HISTORY block shorthand). The parser
|
| 254 |
+
must accept this rather than silently scoring r_format=0."""
|
| 255 |
+
completion = '{"eqn": "d2y/dt2 = -9.81", "rationale": "free fall"}'
|
| 256 |
+
action = parse_completion(completion)
|
| 257 |
+
|
| 258 |
+
assert action.equation == "d2y/dt2 = -9.81"
|
| 259 |
+
assert action.rationale == "free fall"
|
| 260 |
+
|
| 261 |
+
|
| 262 |
+
def test_parse_completion_accepts_other_equation_synonyms() -> None:
|
| 263 |
+
for key in ("ode", "formula", "expression", "expr"):
|
| 264 |
+
completion = '{"%s": "dy/dt = vy"}' % key
|
| 265 |
+
action = parse_completion(completion)
|
| 266 |
+
assert action.equation == "dy/dt = vy", f"failed for key={key!r}"
|
| 267 |
+
|
| 268 |
+
|
| 269 |
+
def test_parse_completion_accepts_capitalised_keys() -> None:
|
| 270 |
+
"""Some models emit ``"Equation"`` / ``"EQN"``; we lowercase keys
|
| 271 |
+
once before lookup so the alias table stays simple."""
|
| 272 |
+
completion = '{"Equation": "dy/dt = vy", "Rationale": "kinematic"}'
|
| 273 |
+
action = parse_completion(completion)
|
| 274 |
+
|
| 275 |
+
assert action.equation == "dy/dt = vy"
|
| 276 |
+
assert action.rationale == "kinematic"
|
| 277 |
+
|
| 278 |
+
|
| 279 |
+
def test_parse_completion_accepts_parameters_synonym() -> None:
|
| 280 |
+
completion = '{"equation": "d2y/dt2 = -g", "parameters": {"g": 9.81}}'
|
| 281 |
+
action = parse_completion(completion)
|
| 282 |
+
|
| 283 |
+
assert action.params == {"g": 9.81}
|
| 284 |
+
|
| 285 |
+
|
| 286 |
+
def test_parse_completion_canonical_key_wins_over_synonym() -> None:
|
| 287 |
+
"""If both ``equation`` and a synonym are present, ``equation``
|
| 288 |
+
wins. This is just hygiene — we don't want the synonym fallback to
|
| 289 |
+
silently override a model that *did* emit the canonical key."""
|
| 290 |
+
completion = '{"equation": "dy/dt = vy", "eqn": "d2y/dt2 = -9.81"}'
|
| 291 |
+
action = parse_completion(completion)
|
| 292 |
+
|
| 293 |
+
assert action.equation == "dy/dt = vy"
|
| 294 |
+
|
| 295 |
+
|
| 296 |
+
def test_parse_completion_accepts_reasoning_synonym() -> None:
|
| 297 |
+
completion = '{"equation": "dy/dt = vy", "reasoning": "kinematic"}'
|
| 298 |
+
action = parse_completion(completion)
|
| 299 |
+
|
| 300 |
+
assert action.rationale == "kinematic"
|
tests/test_registry.py
ADDED
|
@@ -0,0 +1,28 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from __future__ import annotations
|
| 2 |
+
|
| 3 |
+
from physix.systems.registry import (
|
| 4 |
+
SUPPORTED_SYSTEMS,
|
| 5 |
+
SYSTEM_REGISTRY,
|
| 6 |
+
get_system,
|
| 7 |
+
list_supported_systems,
|
| 8 |
+
)
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
def test_supported_systems_is_non_empty() -> None:
|
| 12 |
+
assert len(SUPPORTED_SYSTEMS) > 0
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
def test_supported_systems_only_references_registered_systems() -> None:
|
| 16 |
+
for system_id in SUPPORTED_SYSTEMS:
|
| 17 |
+
assert system_id in SYSTEM_REGISTRY
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
def test_list_supported_systems_preserves_declared_order() -> None:
|
| 21 |
+
assert list_supported_systems() == list(SUPPORTED_SYSTEMS)
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
def test_every_supported_system_instantiates_cleanly() -> None:
|
| 25 |
+
for system_id in SUPPORTED_SYSTEMS:
|
| 26 |
+
system = get_system(system_id)
|
| 27 |
+
assert system.system_id == system_id
|
| 28 |
+
assert system.state_variables, f"{system_id} declares no state variables"
|
tests/test_scorer.py
ADDED
|
@@ -0,0 +1,68 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Unit tests for :mod:`physix.training.scorer`."""
|
| 2 |
+
|
| 3 |
+
from __future__ import annotations
|
| 4 |
+
|
| 5 |
+
import numpy as np
|
| 6 |
+
import pytest
|
| 7 |
+
|
| 8 |
+
from physix.systems import get_system
|
| 9 |
+
from physix.training.scorer import Scorer, SystemContext
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
def _build_context_for(system_id: str, seed: int = 5) -> SystemContext:
|
| 13 |
+
system = get_system(system_id)
|
| 14 |
+
rng = np.random.default_rng(seed)
|
| 15 |
+
traj = system.simulate(rng)
|
| 16 |
+
return SystemContext(
|
| 17 |
+
system_id=system_id,
|
| 18 |
+
state_variables=system.state_variables,
|
| 19 |
+
parameters=system.parameters,
|
| 20 |
+
initial_conditions=system.initial_conditions,
|
| 21 |
+
timestamps=traj.timestamps,
|
| 22 |
+
observed=traj.states,
|
| 23 |
+
previous_total=0.0,
|
| 24 |
+
)
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
def test_scorer_returns_high_match_for_ground_truth() -> None:
|
| 28 |
+
scorer = Scorer()
|
| 29 |
+
ctx = _build_context_for("free_fall")
|
| 30 |
+
breakdown = scorer.score('{"equation": "d2y/dt2 = -9.81"}', ctx)
|
| 31 |
+
|
| 32 |
+
assert breakdown.format == 1.0
|
| 33 |
+
assert breakdown.match >= 0.9
|
| 34 |
+
|
| 35 |
+
|
| 36 |
+
def test_scorer_returns_zero_format_on_garbage() -> None:
|
| 37 |
+
scorer = Scorer()
|
| 38 |
+
ctx = _build_context_for("free_fall")
|
| 39 |
+
breakdown = scorer.score("this is not an equation", ctx)
|
| 40 |
+
|
| 41 |
+
assert breakdown.format == 0.0
|
| 42 |
+
assert breakdown.match == 0.0
|
| 43 |
+
|
| 44 |
+
|
| 45 |
+
def test_scorer_caches_by_key() -> None:
|
| 46 |
+
scorer = Scorer()
|
| 47 |
+
ctx = _build_context_for("simple_pendulum")
|
| 48 |
+
completion = '{"equation": "d2theta/dt2 = -9.81 * sin(theta)"}'
|
| 49 |
+
|
| 50 |
+
first = scorer.score(completion, ctx, cache_key=42)
|
| 51 |
+
second = scorer.score(completion, ctx, cache_key=42)
|
| 52 |
+
assert first is second
|
| 53 |
+
|
| 54 |
+
|
| 55 |
+
def test_scorer_progress_shrinks_when_previous_total_is_high() -> None:
|
| 56 |
+
scorer = Scorer()
|
| 57 |
+
ctx_low = _build_context_for("free_fall_drag", seed=3)
|
| 58 |
+
completion = '{"equation": "d2y/dt2 = -9.81 + 0.05 * vy**2"}'
|
| 59 |
+
|
| 60 |
+
no_prev = scorer.score(completion, ctx_low)
|
| 61 |
+
if no_prev.match < 0.5:
|
| 62 |
+
pytest.skip("rng selection produced an awkward instance")
|
| 63 |
+
|
| 64 |
+
ctx_high = ctx_low.model_copy(update={"previous_total": no_prev.match * 0.9})
|
| 65 |
+
with_prev = scorer.score(completion, ctx_high)
|
| 66 |
+
|
| 67 |
+
# progress = max(0, match - prev_total). Higher prev_total => smaller progress.
|
| 68 |
+
assert with_prev.progress <= no_prev.progress + 1e-9
|
tests/test_sft_dataset.py
ADDED
|
@@ -0,0 +1,60 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Tests for the SFT dataset builder.
|
| 2 |
+
|
| 3 |
+
Why a dedicated file: :func:`physix.training.sft.build_sft_dataset` is
|
| 4 |
+
imported through a module that *also* imports heavy ML packages
|
| 5 |
+
(``unsloth``, ``trl``, ``wandb``) lazily inside :func:`train_sft`. Keeping
|
| 6 |
+
the dataset-only tests here documents the boundary — these tests
|
| 7 |
+
exercise the pure-Python data path and must not require GPU deps.
|
| 8 |
+
|
| 9 |
+
The whole point of changes captured by these tests: SFT must warm the
|
| 10 |
+
model on *the same systems GRPO will refine*. Any divergence between
|
| 11 |
+
SFT and GRPO curricula wastes the warm-start budget on shapes that
|
| 12 |
+
never see a reinforcement signal.
|
| 13 |
+
"""
|
| 14 |
+
|
| 15 |
+
from __future__ import annotations
|
| 16 |
+
|
| 17 |
+
import json
|
| 18 |
+
|
| 19 |
+
import pytest
|
| 20 |
+
|
| 21 |
+
from physix.systems import SUPPORTED_SYSTEMS
|
| 22 |
+
from physix.training.sft import build_sft_dataset
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
def test_build_sft_dataset_default_curriculum_size() -> None:
|
| 26 |
+
ds = build_sft_dataset(instances_per_system=1)
|
| 27 |
+
assert len(ds) == len(SUPPORTED_SYSTEMS)
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
def test_build_sft_dataset_default_scales_with_instances_per_system() -> None:
|
| 31 |
+
instances = 4
|
| 32 |
+
ds = build_sft_dataset(instances_per_system=instances)
|
| 33 |
+
assert len(ds) == len(SUPPORTED_SYSTEMS) * instances
|
| 34 |
+
|
| 35 |
+
|
| 36 |
+
def test_build_sft_dataset_completions_are_valid_json_action() -> None:
|
| 37 |
+
ds = build_sft_dataset(instances_per_system=1)
|
| 38 |
+
for row in ds:
|
| 39 |
+
action = json.loads(row["completion"])
|
| 40 |
+
assert "equation" in action
|
| 41 |
+
assert "params" in action
|
| 42 |
+
assert "rationale" in action
|
| 43 |
+
|
| 44 |
+
|
| 45 |
+
def test_build_sft_dataset_explicit_system_ids_override_default() -> None:
|
| 46 |
+
ds = build_sft_dataset(
|
| 47 |
+
system_ids=("free_fall",),
|
| 48 |
+
instances_per_system=3,
|
| 49 |
+
)
|
| 50 |
+
assert len(ds) == 3
|
| 51 |
+
|
| 52 |
+
|
| 53 |
+
def test_build_sft_dataset_rejects_unknown_system_ids() -> None:
|
| 54 |
+
with pytest.raises(ValueError, match="Unknown system_ids"):
|
| 55 |
+
build_sft_dataset(system_ids=("definitely_not_a_system",))
|
| 56 |
+
|
| 57 |
+
|
| 58 |
+
def test_build_sft_dataset_rejects_empty_system_ids() -> None:
|
| 59 |
+
with pytest.raises(ValueError, match="non-empty"):
|
| 60 |
+
build_sft_dataset(system_ids=())
|