7B run: anti-hack reward set + 7B profile
Browse filesThis view is limited to 50 files because it contains too many changes. See raw diff
- physix-live/README.md +308 -0
- physix-live/physix/__init__.py +32 -0
- physix-live/physix/__pycache__/__init__.cpython-311.pyc +0 -0
- physix-live/physix/__pycache__/adapters.cpython-311.pyc +0 -0
- physix-live/physix/__pycache__/client.cpython-311.pyc +0 -0
- physix-live/physix/__pycache__/models.cpython-311.pyc +0 -0
- physix-live/physix/client.py +43 -0
- physix-live/physix/models.py +138 -0
- physix-live/physix/server/__init__.py +8 -0
- physix-live/physix/server/__pycache__/__init__.cpython-311.pyc +0 -0
- physix-live/physix/server/__pycache__/app.cpython-311.pyc +0 -0
- physix-live/physix/server/__pycache__/environment.cpython-311.pyc +0 -0
- physix-live/physix/server/__pycache__/interactive.cpython-311.pyc +0 -0
- physix-live/physix/server/app.py +117 -0
- physix-live/physix/server/environment.py +280 -0
- physix-live/physix/server/interactive.py +430 -0
- physix-live/physix/systems/__init__.py +20 -0
- physix-live/physix/systems/__pycache__/__init__.cpython-311.pyc +0 -0
- physix-live/physix/systems/__pycache__/base.cpython-311.pyc +0 -0
- physix-live/physix/systems/__pycache__/registry.cpython-311.pyc +0 -0
- physix-live/physix/systems/__pycache__/tier1.cpython-311.pyc +0 -0
- physix-live/physix/systems/__pycache__/tier2.cpython-311.pyc +0 -0
- physix-live/physix/systems/__pycache__/tier3.cpython-311.pyc +0 -0
- physix-live/physix/systems/base.py +192 -0
- physix-live/physix/systems/registry.py +48 -0
- physix-live/physix/systems/tier1.py +143 -0
- physix-live/physix/systems/tier2.py +128 -0
- physix-live/physix/systems/tier3.py +132 -0
- physix-live/physix/training/__init__.py +18 -0
- physix-live/physix/training/__pycache__/__init__.cpython-311.pyc +0 -0
- physix-live/physix/training/__pycache__/dataset.cpython-311.pyc +0 -0
- physix-live/physix/training/__pycache__/prompt.cpython-311.pyc +0 -0
- physix-live/physix/training/__pycache__/scorer.cpython-311.pyc +0 -0
- physix-live/physix/training/__pycache__/sft.cpython-311.pyc +0 -0
- physix-live/physix/training/dataset.py +153 -0
- physix-live/physix/training/loop.py +759 -0
- physix-live/physix/training/prompt.py +369 -0
- physix-live/physix/training/reward_fns.py +167 -0
- physix-live/physix/training/scorer.py +189 -0
- physix-live/physix/training/sft.py +293 -0
- physix-live/physix/verifier/__init__.py +33 -0
- physix-live/physix/verifier/__pycache__/__init__.cpython-311.pyc +0 -0
- physix-live/physix/verifier/__pycache__/metrics.cpython-311.pyc +0 -0
- physix-live/physix/verifier/__pycache__/mismatch.cpython-311.pyc +0 -0
- physix-live/physix/verifier/__pycache__/parser.cpython-311.pyc +0 -0
- physix-live/physix/verifier/__pycache__/reward.cpython-311.pyc +0 -0
- physix-live/physix/verifier/__pycache__/simulator.cpython-311.pyc +0 -0
- physix-live/physix/verifier/metrics.py +114 -0
- physix-live/physix/verifier/mismatch.py +138 -0
- physix-live/physix/verifier/parser.py +396 -0
physix-live/README.md
ADDED
|
@@ -0,0 +1,308 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# PhysiX-Live
|
| 2 |
+
|
| 3 |
+
**One-line pitch:** an OpenEnv RL environment where a small (1.5B) language model iteratively
|
| 4 |
+
discovers equations of motion from trajectory data plus a one-sentence English hint —
|
| 5 |
+
verifier is `scipy.integrate.odeint` plus per-step R², no LLM-as-judge in the reward loop.
|
| 6 |
+
|
| 7 |
+
A submission for the **OpenEnv hackathon** (Apr 2026). The deliverables are: a clean
|
| 8 |
+
OpenEnv-compatible env, a TRL+Unsloth+GRPO training pipeline targeting Qwen2.5-1.5B with
|
| 9 |
+
LoRA-32, a React + TypeScript + Tailwind demo UI that animates trajectories side-by-side
|
| 10 |
+
for the trained vs. untrained model, and a recording script for pre-baked demo episodes.
|
| 11 |
+
|
| 12 |
+
---
|
| 13 |
+
|
| 14 |
+
## Repository layout
|
| 15 |
+
|
| 16 |
+
```
|
| 17 |
+
physix-live/
|
| 18 |
+
├── physix/ # Python package
|
| 19 |
+
│ ├── __init__.py # narrow public API
|
| 20 |
+
│ ├── models.py # Pydantic Action / Observation / State
|
| 21 |
+
│ ├── client.py # OpenEnv WebSocket client subclass
|
| 22 |
+
│ ├── systems/ # 8 physical systems in 3 tiers
|
| 23 |
+
│ │ ├── base.py # PhysicalSystem ABC + TrajectoryData
|
| 24 |
+
│ │ ├── tier1.py # FreeFall, FreeFallWithDrag, SimplePendulum
|
| 25 |
+
│ │ ├── tier2.py # DampedPendulum, SpringMass, DampedSpring
|
| 26 |
+
│ │ ├── tier3.py # ProjectileWithDrag, ChargedInBField (held out)
|
| 27 |
+
│ │ └── registry.py # system_id -> factory mapping
|
| 28 |
+
│ ├── verifier/ # scoring pipeline
|
| 29 |
+
│ │ ├── parser.py # SymPy whitelisted parser
|
| 30 |
+
│ │ ├── simulator.py # scipy.odeint forward sim
|
| 31 |
+
│ │ ├── metrics.py # per-step R²
|
| 32 |
+
│ │ ├── mismatch.py # English residual summary
|
| 33 |
+
│ │ └── reward.py # 4-component reward composition
|
| 34 |
+
│ ├── server/ # FastAPI + OpenEnv
|
| 35 |
+
│ │ ├── environment.py # PhysiXEnvironment subclass
|
| 36 |
+
│ │ ├── interactive.py # session-based REST router (`/interactive/*`)
|
| 37 |
+
│ │ └── app.py # FastAPI factory + CLI entry point
|
| 38 |
+
│ └── training/ # GRPO training pipeline
|
| 39 |
+
│ ├── prompt.py # observation -> prompt, completion -> action
|
| 40 |
+
│ ├── scorer.py # single-completion scorer (training + eval)
|
| 41 |
+
│ ├── reward_fns.py # TRL-compatible reward callables
|
| 42 |
+
│ ├── dataset.py # build training / eval datasets
|
| 43 |
+
│ └── loop.py # Unsloth + TRL GRPO loop (cloud A100)
|
| 44 |
+
├── frontend/ # React + TS + Tailwind demo UI
|
| 45 |
+
│ └── src/
|
| 46 |
+
│ ├── App.tsx # tabs: "Run with LLM" + "Manual"
|
| 47 |
+
│ ├── components/ # RunWithLlmPane, InteractivePane, …
|
| 48 |
+
│ ├── hooks/ # useLlmEpisodeRunner, useInteractiveSession
|
| 49 |
+
│ ├── lib/ # interactiveClient, trajectory, format
|
| 50 |
+
│ └── types/physix.ts
|
| 51 |
+
└── tests/ # full pipeline coverage incl. /interactive/*
|
| 52 |
+
```
|
| 53 |
+
|
| 54 |
+
---
|
| 55 |
+
|
| 56 |
+
## What the env does (one episode end-to-end)
|
| 57 |
+
|
| 58 |
+
```mermaid
|
| 59 |
+
sequenceDiagram
|
| 60 |
+
participant Agent
|
| 61 |
+
participant Env as PhysiXEnvironment
|
| 62 |
+
participant Sim as scipy.odeint
|
| 63 |
+
participant Verifier
|
| 64 |
+
|
| 65 |
+
Env->>Agent: reset(): observed trajectory + hint
|
| 66 |
+
loop up to 8 turns
|
| 67 |
+
Agent->>Env: step(SymPy eqn + params + rationale)
|
| 68 |
+
Env->>Sim: simulate from hypothesis
|
| 69 |
+
Sim-->>Verifier: predicted trajectory
|
| 70 |
+
Verifier-->>Env: r_match + r_progress + r_simplicity + r_format
|
| 71 |
+
Env->>Agent: obs (mismatch summary, history) + reward
|
| 72 |
+
alt r_match > 0.93 or budget exhausted
|
| 73 |
+
Env-->>Agent: done=True
|
| 74 |
+
end
|
| 75 |
+
end
|
| 76 |
+
```
|
| 77 |
+
|
| 78 |
+
**Action space:** the agent emits structured text in a constrained SymPy grammar
|
| 79 |
+
(`d2y/dt2 = -9.81 + 0.05 * vy**2`). Allowed operators: `+ - * / **`. Allowed
|
| 80 |
+
functions: `sin cos tan exp log sqrt abs`. Parse failures score `r_format = 0`.
|
| 81 |
+
|
| 82 |
+
**Reward:** four independent components (each in `[0, 1]`), weighted into a total.
|
| 83 |
+
|
| 84 |
+
| Component | Weight | What it measures |
|
| 85 |
+
|---|---:|---|
|
| 86 |
+
| `r_match` | 0.5 | Per-step R² between observed and predicted trajectory |
|
| 87 |
+
| `r_progress` | 0.2 | Improvement over prior turn (dense per-turn shaping) |
|
| 88 |
+
| `r_simplicity` | 0.2 | 1 − normalised operator count (Occam's razor) |
|
| 89 |
+
| `r_format` | 0.1 | Binary: SymPy parses + dimensional consistency |
|
| 90 |
+
|
| 91 |
+
The reward is fully verifiable — the env never calls an LLM-as-judge.
|
| 92 |
+
|
| 93 |
+
---
|
| 94 |
+
|
| 95 |
+
## Quick start
|
| 96 |
+
|
| 97 |
+
### 1. Install (Python)
|
| 98 |
+
|
| 99 |
+
Requires Python 3.10+. Inside a fresh conda env or venv:
|
| 100 |
+
|
| 101 |
+
```bash
|
| 102 |
+
pip install -e . # base deps (env server, verifier, client)
|
| 103 |
+
pip install -e ".[dev]" # + pytest, ruff
|
| 104 |
+
pip install -e ".[demo]" # + ollama (live LLM episodes via /interactive/llm-step)
|
| 105 |
+
pip install -e ".[train]" # + torch, transformers, trl, unsloth, wandb
|
| 106 |
+
```
|
| 107 |
+
|
| 108 |
+
Notes:
|
| 109 |
+
- `[train]` requires CUDA. Install it on the cloud A100 box, not on your laptop.
|
| 110 |
+
- `[demo]` adds the `ollama` Python client used by the server when the UI's
|
| 111 |
+
"Run with LLM" pane drives an episode. Start `ollama serve` and pull the
|
| 112 |
+
base model once with `ollama pull qwen2.5:1.5b-instruct`.
|
| 113 |
+
- The repo ships a `.vscode/settings.json` that pins the workspace's Python
|
| 114 |
+
interpreter to `~/miniconda3/envs/openenv_run/bin/python`. If your venv
|
| 115 |
+
lives somewhere else and your IDE shows "import could not be resolved",
|
| 116 |
+
update that path or run **Python: Select Interpreter** from the command
|
| 117 |
+
palette.
|
| 118 |
+
|
| 119 |
+
### 2. Run the test suite
|
| 120 |
+
|
| 121 |
+
```bash
|
| 122 |
+
pytest tests/ # 30 tests, ~3 seconds
|
| 123 |
+
```
|
| 124 |
+
|
| 125 |
+
### 3. Boot the env server locally
|
| 126 |
+
|
| 127 |
+
```bash
|
| 128 |
+
python -m physix.server.app --host 127.0.0.1 --port 8000
|
| 129 |
+
# or
|
| 130 |
+
uvicorn physix.server.app:app --host 127.0.0.1 --port 8000
|
| 131 |
+
```
|
| 132 |
+
|
| 133 |
+
The server exposes:
|
| 134 |
+
|
| 135 |
+
- OpenEnv endpoints: `/reset`, `/step`, `/state`, `/schema`, `/health`. These
|
| 136 |
+
are stateless — each request gets a fresh env. Fine for headless agents.
|
| 137 |
+
- A stateful WebSocket at `/ws` (used by the Python `PhysiXEnv` client).
|
| 138 |
+
- A bespoke session-based REST router at `/interactive/*` (see
|
| 139 |
+
`physix/server/interactive.py`) used by the demo UI. It maintains
|
| 140 |
+
in-process sessions so a browser can drive a multi-turn episode by
|
| 141 |
+
POSTing equations.
|
| 142 |
+
|
| 143 |
+
CORS is enabled out of the box for `http://localhost:5173` (the Vite dev
|
| 144 |
+
server). Override with `PHYSIX_CORS_ORIGINS=https://your-host.example` (or
|
| 145 |
+
`*` for any origin, dev only).
|
| 146 |
+
|
| 147 |
+
For sustained Python-side interaction use the WebSocket client:
|
| 148 |
+
|
| 149 |
+
```python
|
| 150 |
+
import asyncio
|
| 151 |
+
from physix import PhysiXEnv, PhysiXAction
|
| 152 |
+
|
| 153 |
+
async def main():
|
| 154 |
+
async with PhysiXEnv(base_url="http://127.0.0.1:8000") as env:
|
| 155 |
+
result = await env.reset(system_id="free_fall_drag", seed=42)
|
| 156 |
+
result = await env.step(
|
| 157 |
+
PhysiXAction(equation="d2y/dt2 = -9.81 + 0.05 * vy**2")
|
| 158 |
+
)
|
| 159 |
+
print(result.observation.reward_breakdown)
|
| 160 |
+
|
| 161 |
+
asyncio.run(main())
|
| 162 |
+
```
|
| 163 |
+
|
| 164 |
+
### 4. Run the demo UI
|
| 165 |
+
|
| 166 |
+
```bash
|
| 167 |
+
cd frontend
|
| 168 |
+
pnpm install
|
| 169 |
+
pnpm dev # http://localhost:5173
|
| 170 |
+
```
|
| 171 |
+
|
| 172 |
+
The UI has two tabs, both backed by the same live env server:
|
| 173 |
+
|
| 174 |
+
- **Run with LLM** — pick a system + an Ollama model tag, click ▶ Run, and
|
| 175 |
+
watch the model propose ODEs turn-by-turn. Each call hits
|
| 176 |
+
`POST /interactive/sessions/:id/llm-step`, which builds the env's prompt,
|
| 177 |
+
calls the local Ollama daemon, parses the reply, scores it via the
|
| 178 |
+
verifier, and streams the resulting turn back to the page. Pause anytime.
|
| 179 |
+
- **Manual** — submit equations yourself. No LLM in the loop. Same scoring
|
| 180 |
+
pipeline, useful for building intuition for the verifier.
|
| 181 |
+
|
| 182 |
+
The UI expects the env server to be reachable on the URL in
|
| 183 |
+
`VITE_PHYSIX_API_URL` (default `http://localhost:8000`). For the LLM tab,
|
| 184 |
+
you also need a local Ollama daemon (`ollama serve`) with the model tag
|
| 185 |
+
pulled in advance:
|
| 186 |
+
|
| 187 |
+
```bash
|
| 188 |
+
ollama pull qwen2.5:1.5b-instruct
|
| 189 |
+
# or, after exporting your merged adapter to GGUF and building a Modelfile:
|
| 190 |
+
ollama create physix-trained:latest -f Modelfile
|
| 191 |
+
```
|
| 192 |
+
|
| 193 |
+
There are no pre-recorded episodes to regenerate. Every turn shown in the
|
| 194 |
+
UI is a real LLM call against the live env.
|
| 195 |
+
|
| 196 |
+
### 5. Train (cloud A100)
|
| 197 |
+
|
| 198 |
+
```bash
|
| 199 |
+
WANDB_PROJECT=physix-live python -m physix.training.loop \
|
| 200 |
+
--model Qwen/Qwen2.5-1.5B-Instruct \
|
| 201 |
+
--output-dir runs/physix-1.5b-rl \
|
| 202 |
+
--num-steps 300
|
| 203 |
+
|
| 204 |
+
# Run an ablation:
|
| 205 |
+
python -m physix.training.loop --num-steps 300 --ablation no_progress
|
| 206 |
+
```
|
| 207 |
+
|
| 208 |
+
After training, push the merged adapter to the Hub. By default the loop
|
| 209 |
+
saves a `merged_16bit` artifact (LoRA merged into the base, written as a
|
| 210 |
+
standard HF checkpoint) so it can be loaded without Unsloth and exported to
|
| 211 |
+
GGUF for Ollama:
|
| 212 |
+
|
| 213 |
+
```bash
|
| 214 |
+
python -m physix.training.loop \
|
| 215 |
+
--num-steps 300 \
|
| 216 |
+
--save-method merged_16bit \
|
| 217 |
+
--push-to-hub --hub-repo-id you/physix-1.5b-rl
|
| 218 |
+
```
|
| 219 |
+
|
| 220 |
+
Pass `--save-method lora` if you want the small adapter-only artifact
|
| 221 |
+
instead. The training loop calls `unsloth.PatchFastRL("GRPO", FastLanguageModel)`
|
| 222 |
+
before importing `GRPOTrainer` — required for Unsloth's GRPO kernels to be
|
| 223 |
+
swapped in.
|
| 224 |
+
|
| 225 |
+
---
|
| 226 |
+
|
| 227 |
+
## Adding a new physical system
|
| 228 |
+
|
| 229 |
+
The framework generalises beyond the 8 shipped systems. Adding a new one is
|
| 230 |
+
about 50 lines:
|
| 231 |
+
|
| 232 |
+
```python
|
| 233 |
+
# physix/systems/tier2.py (or your own module)
|
| 234 |
+
import numpy as np
|
| 235 |
+
from physix.systems.base import PhysicalSystem, SystemTier
|
| 236 |
+
|
| 237 |
+
class CoupledOscillators(PhysicalSystem):
|
| 238 |
+
system_id: str = "coupled_oscillators"
|
| 239 |
+
tier: SystemTier = SystemTier.TIER_2
|
| 240 |
+
state_variables: tuple[str, ...] = ("x1", "vx1", "x2", "vx2")
|
| 241 |
+
hint_template: str = "Two masses coupled by a spring; observe both positions."
|
| 242 |
+
|
| 243 |
+
def sample_parameters(self, rng):
|
| 244 |
+
return {"k": float(rng.uniform(2, 10)), "k_c": float(rng.uniform(0.5, 2))}
|
| 245 |
+
|
| 246 |
+
def sample_initial_conditions(self, rng):
|
| 247 |
+
return {"x1": float(rng.uniform(0.5, 1)), "vx1": 0.0, "x2": 0.0, "vx2": 0.0}
|
| 248 |
+
|
| 249 |
+
def rhs(self, t, state, params):
|
| 250 |
+
x1, vx1, x2, vx2 = state
|
| 251 |
+
return np.array([
|
| 252 |
+
vx1, -params["k"] * x1 + params["k_c"] * (x2 - x1),
|
| 253 |
+
vx2, -params["k"] * x2 + params["k_c"] * (x1 - x2),
|
| 254 |
+
])
|
| 255 |
+
|
| 256 |
+
def ground_truth_equation(self) -> str:
|
| 257 |
+
return "d2x1/dt2 = -k*x1 + k_c*(x2-x1); d2x2/dt2 = -k*x2 + k_c*(x1-x2)"
|
| 258 |
+
```
|
| 259 |
+
|
| 260 |
+
`PhysicalSystem` is a Pydantic model with an `ABCMeta` mixin — subclasses
|
| 261 |
+
declare overridden fields as plain class-level annotations and pydantic
|
| 262 |
+
treats them as field overrides. No `@dataclass` decorator needed.
|
| 263 |
+
|
| 264 |
+
Then register it in `physix/systems/registry.py`:
|
| 265 |
+
|
| 266 |
+
```python
|
| 267 |
+
SYSTEM_REGISTRY["coupled_oscillators"] = CoupledOscillators
|
| 268 |
+
```
|
| 269 |
+
|
| 270 |
+
That's it — the env, parser, simulator, scorer, and training loop all pick it
|
| 271 |
+
up automatically.
|
| 272 |
+
|
| 273 |
+
---
|
| 274 |
+
|
| 275 |
+
## Themes (OpenEnv hackathon rubric)
|
| 276 |
+
|
| 277 |
+
- **Primary: World-Modeling** — the agent literally builds an internal model
|
| 278 |
+
of physical dynamics from data + context, refines it, and is scored against
|
| 279 |
+
ground truth.
|
| 280 |
+
- **Primary: Long-Horizon** — episodes are 5-8 turns of stateful refinement;
|
| 281 |
+
earlier hypotheses condition later ones via the prompt history.
|
| 282 |
+
- **Secondary: Self-Improvement** — curriculum from 1-D undamped (Tier 1)
|
| 283 |
+
through 1-D damped (Tier 2) to 2-D coupled (Tier 3, held out).
|
| 284 |
+
|
| 285 |
+
---
|
| 286 |
+
|
| 287 |
+
## Honest framing
|
| 288 |
+
|
| 289 |
+
We do **not** claim:
|
| 290 |
+
|
| 291 |
+
- The env discovers genuinely new physics.
|
| 292 |
+
- A 1.5B model beats GPT-4o on equation discovery.
|
| 293 |
+
- The model learns physics from scratch.
|
| 294 |
+
|
| 295 |
+
We **do** claim:
|
| 296 |
+
|
| 297 |
+
- The same 1.5B converges in fewer turns *after* RL training than *before*.
|
| 298 |
+
- The trained model generalises to held-out 2-D systems (Tier 3).
|
| 299 |
+
- The trained model uses NL hints meaningfully (ablate the hint, performance drops).
|
| 300 |
+
|
| 301 |
+
This calibrated framing is part of the storytelling axis (30%) — judges trust
|
| 302 |
+
self-comparison numbers more than claims to beat frontier models.
|
| 303 |
+
|
| 304 |
+
---
|
| 305 |
+
|
| 306 |
+
## License
|
| 307 |
+
|
| 308 |
+
MIT.
|
physix-live/physix/__init__.py
ADDED
|
@@ -0,0 +1,32 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""PhysiX-Live: OpenEnv environment for iterative equation discovery.
|
| 2 |
+
|
| 3 |
+
Public API:
|
| 4 |
+
|
| 5 |
+
- :class:`PhysiXEnv`: the OpenEnv client (HTTP/WebSocket).
|
| 6 |
+
- :class:`PhysiXAction`, :class:`PhysiXObservation`, :class:`PhysiXState`:
|
| 7 |
+
the env's wire-protocol Pydantic models.
|
| 8 |
+
- :class:`RewardBreakdown`: 4-component reward record.
|
| 9 |
+
- :data:`GRAMMAR_HINT`: machine-generated DSL description for the LLM
|
| 10 |
+
system prompt (single source of truth: :mod:`physix.verifier.parser`).
|
| 11 |
+
"""
|
| 12 |
+
|
| 13 |
+
from physix.client import PhysiXEnv
|
| 14 |
+
from physix.models import (
|
| 15 |
+
PhysiXAction,
|
| 16 |
+
PhysiXObservation,
|
| 17 |
+
PhysiXState,
|
| 18 |
+
RewardBreakdown,
|
| 19 |
+
)
|
| 20 |
+
from physix.verifier.parser import GRAMMAR_HINT
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
__version__ = "0.1.0"
|
| 24 |
+
__all__ = [
|
| 25 |
+
"PhysiXEnv",
|
| 26 |
+
"PhysiXAction",
|
| 27 |
+
"PhysiXObservation",
|
| 28 |
+
"PhysiXState",
|
| 29 |
+
"RewardBreakdown",
|
| 30 |
+
"GRAMMAR_HINT",
|
| 31 |
+
"__version__",
|
| 32 |
+
]
|
physix-live/physix/__pycache__/__init__.cpython-311.pyc
ADDED
|
Binary file (1.08 kB). View file
|
|
|
physix-live/physix/__pycache__/adapters.cpython-311.pyc
ADDED
|
Binary file (7.73 kB). View file
|
|
|
physix-live/physix/__pycache__/client.cpython-311.pyc
ADDED
|
Binary file (2.56 kB). View file
|
|
|
physix-live/physix/__pycache__/models.cpython-311.pyc
ADDED
|
Binary file (7.06 kB). View file
|
|
|
physix-live/physix/client.py
ADDED
|
@@ -0,0 +1,43 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""HTTP/WebSocket client for the PhysiX-Live environment.
|
| 2 |
+
|
| 3 |
+
Subclasses :class:`openenv.core.EnvClient` to provide PhysiX-specific
|
| 4 |
+
serialisation and parsing. The base class handles WebSocket connection,
|
| 5 |
+
session management, and the OpenEnv wire protocol.
|
| 6 |
+
"""
|
| 7 |
+
|
| 8 |
+
from __future__ import annotations
|
| 9 |
+
|
| 10 |
+
from typing import Any
|
| 11 |
+
|
| 12 |
+
from openenv.core import EnvClient
|
| 13 |
+
from openenv.core.client_types import StepResult
|
| 14 |
+
|
| 15 |
+
from physix.models import PhysiXAction, PhysiXObservation, PhysiXState
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
class PhysiXEnv(EnvClient[PhysiXAction, PhysiXObservation, PhysiXState]):
|
| 19 |
+
"""Client for the PhysiX-Live OpenEnv environment.
|
| 20 |
+
|
| 21 |
+
Example::
|
| 22 |
+
|
| 23 |
+
>>> async with PhysiXEnv(base_url="http://localhost:8000") as env:
|
| 24 |
+
... result = await env.reset()
|
| 25 |
+
... while not result.done:
|
| 26 |
+
... action = agent.predict(result.observation)
|
| 27 |
+
... result = await env.step(action)
|
| 28 |
+
"""
|
| 29 |
+
|
| 30 |
+
def _step_payload(self, action: PhysiXAction) -> dict[str, Any]:
|
| 31 |
+
return action.model_dump(exclude_none=False)
|
| 32 |
+
|
| 33 |
+
def _parse_result(self, payload: dict[str, Any]) -> StepResult[PhysiXObservation]:
|
| 34 |
+
observation_data = payload.get("observation", {}) or {}
|
| 35 |
+
observation = PhysiXObservation(**observation_data)
|
| 36 |
+
return StepResult(
|
| 37 |
+
observation=observation,
|
| 38 |
+
reward=payload.get("reward"),
|
| 39 |
+
done=payload.get("done", False),
|
| 40 |
+
)
|
| 41 |
+
|
| 42 |
+
def _parse_state(self, payload: dict[str, Any]) -> PhysiXState:
|
| 43 |
+
return PhysiXState(**payload)
|
physix-live/physix/models.py
ADDED
|
@@ -0,0 +1,138 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Pydantic schemas + constants. Behaviour lives elsewhere."""
|
| 2 |
+
|
| 3 |
+
from __future__ import annotations
|
| 4 |
+
|
| 5 |
+
from typing import Any
|
| 6 |
+
|
| 7 |
+
from pydantic import BaseModel, ConfigDict, Field
|
| 8 |
+
|
| 9 |
+
from openenv.core.env_server import Action, Observation, State
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
#: Per-episode turn budget. Episodes terminate earlier if r_match crosses
|
| 13 |
+
#: :data:`CONVERGENCE_THRESHOLD`.
|
| 14 |
+
DEFAULT_MAX_TURNS: int = 8
|
| 15 |
+
CONVERGENCE_THRESHOLD: float = 0.93
|
| 16 |
+
|
| 17 |
+
#: Reward component weights. Ablations only edit this dict.
|
| 18 |
+
REWARD_WEIGHTS: dict[str, float] = {
|
| 19 |
+
"match": 0.5,
|
| 20 |
+
"progress": 0.2,
|
| 21 |
+
"simplicity": 0.2,
|
| 22 |
+
"format": 0.1,
|
| 23 |
+
}
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
class PhysiXAction(Action):
|
| 27 |
+
"""One agent step.
|
| 28 |
+
|
| 29 |
+
Fields have defaults so tests can construct partial actions, and so
|
| 30 |
+
the env can fabricate a ``format=0`` no-op action for completions
|
| 31 |
+
that fail to parse JSON. The LLM is expected to fill all three;
|
| 32 |
+
an empty string / dict is fine when irrelevant.
|
| 33 |
+
"""
|
| 34 |
+
|
| 35 |
+
equation: str = Field(default="", description="ODE in the verifier DSL")
|
| 36 |
+
params: dict[str, float] = Field(
|
| 37 |
+
default_factory=dict,
|
| 38 |
+
description="Numerical substitutions for free symbols on the RHS",
|
| 39 |
+
)
|
| 40 |
+
rationale: str = Field(default="")
|
| 41 |
+
|
| 42 |
+
|
| 43 |
+
class PhysiXObservation(Observation):
|
| 44 |
+
"""What the agent sees per step. Inherits ``done`` / ``reward`` from
|
| 45 |
+
:class:`openenv.core.env_server.Observation`."""
|
| 46 |
+
|
| 47 |
+
trajectory: list[dict[str, float]] = Field(
|
| 48 |
+
default_factory=list,
|
| 49 |
+
description="Observed (noisy) trajectory as list of timestep dicts",
|
| 50 |
+
)
|
| 51 |
+
state_variables: list[str] = Field(
|
| 52 |
+
default_factory=list,
|
| 53 |
+
description="Names of state-variable keys present in trajectory[i] (excluding t)",
|
| 54 |
+
)
|
| 55 |
+
hint: str = Field(default="", description="One-sentence physical-context string")
|
| 56 |
+
history: list[dict[str, Any]] = Field(
|
| 57 |
+
default_factory=list,
|
| 58 |
+
description="Prior turns surfaced back so the agent can refine",
|
| 59 |
+
)
|
| 60 |
+
mismatch_summary: str = Field(
|
| 61 |
+
default="",
|
| 62 |
+
description="English description of where last prediction diverged",
|
| 63 |
+
)
|
| 64 |
+
turn: int = Field(default=0, ge=0, description="0-indexed turn counter")
|
| 65 |
+
turn_remaining: int = Field(
|
| 66 |
+
default=DEFAULT_MAX_TURNS,
|
| 67 |
+
ge=0,
|
| 68 |
+
description="Turns left in the episode budget",
|
| 69 |
+
)
|
| 70 |
+
system_id: str = Field(default="", description="Stable id of underlying system")
|
| 71 |
+
stats: dict[str, float] = Field(
|
| 72 |
+
default_factory=dict, description="Aggregate trajectory statistics"
|
| 73 |
+
)
|
| 74 |
+
reward_breakdown: dict[str, float] = Field(
|
| 75 |
+
default_factory=dict,
|
| 76 |
+
description="Four reward components from the previous step",
|
| 77 |
+
)
|
| 78 |
+
|
| 79 |
+
|
| 80 |
+
class PhysiXState(State):
|
| 81 |
+
"""Episode-level state. The ground-truth equation lives here for logging;
|
| 82 |
+
it is *never* surfaced to the agent. ``last_reward_total`` feeds the
|
| 83 |
+
per-turn ``progress`` reward delta."""
|
| 84 |
+
|
| 85 |
+
system_id: str = Field(default="")
|
| 86 |
+
ground_truth_equation: str = Field(default="")
|
| 87 |
+
ground_truth_params: dict[str, float] = Field(default_factory=dict)
|
| 88 |
+
last_reward_total: float = Field(default=0.0)
|
| 89 |
+
last_r_match: float = Field(default=0.0)
|
| 90 |
+
converged: bool = Field(default=False)
|
| 91 |
+
max_turns: int = Field(default=DEFAULT_MAX_TURNS, ge=1)
|
| 92 |
+
|
| 93 |
+
|
| 94 |
+
class HistoryEntry(BaseModel):
|
| 95 |
+
"""One previous turn, surfaced back to the agent on the next step."""
|
| 96 |
+
|
| 97 |
+
model_config = ConfigDict(extra="forbid")
|
| 98 |
+
|
| 99 |
+
turn: int
|
| 100 |
+
equation: str
|
| 101 |
+
params: dict[str, float]
|
| 102 |
+
reward_total: float
|
| 103 |
+
reward_components: dict[str, float]
|
| 104 |
+
mismatch_summary: str
|
| 105 |
+
|
| 106 |
+
def as_dict(self) -> dict[str, Any]:
|
| 107 |
+
return {
|
| 108 |
+
"turn": self.turn,
|
| 109 |
+
"equation": self.equation,
|
| 110 |
+
"params": self.params,
|
| 111 |
+
"reward_total": round(self.reward_total, 4),
|
| 112 |
+
"reward_components": {
|
| 113 |
+
k: round(v, 4) for k, v in self.reward_components.items()
|
| 114 |
+
},
|
| 115 |
+
"mismatch_summary": self.mismatch_summary,
|
| 116 |
+
}
|
| 117 |
+
|
| 118 |
+
|
| 119 |
+
class RewardBreakdown(BaseModel):
|
| 120 |
+
"""4-component reward, each in ``[0, 1]``. ``total`` is the weighted sum
|
| 121 |
+
using :data:`REWARD_WEIGHTS`."""
|
| 122 |
+
|
| 123 |
+
model_config = ConfigDict(extra="forbid")
|
| 124 |
+
|
| 125 |
+
match: float = 0.0
|
| 126 |
+
progress: float = 0.0
|
| 127 |
+
simplicity: float = 0.0
|
| 128 |
+
format: float = 0.0
|
| 129 |
+
total: float = 0.0
|
| 130 |
+
|
| 131 |
+
def as_dict(self) -> dict[str, float]:
|
| 132 |
+
return {
|
| 133 |
+
"match": self.match,
|
| 134 |
+
"progress": self.progress,
|
| 135 |
+
"simplicity": self.simplicity,
|
| 136 |
+
"format": self.format,
|
| 137 |
+
"total": self.total,
|
| 138 |
+
}
|
physix-live/physix/server/__init__.py
ADDED
|
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""HTTP server layer for the PhysiX-Live environment.
|
| 2 |
+
|
| 3 |
+
Submodules:
|
| 4 |
+
- :mod:`physix.server.environment` defines the OpenEnv ``Environment`` subclass
|
| 5 |
+
that owns episode lifecycle and reward dispatch.
|
| 6 |
+
- :mod:`physix.server.app` builds the FastAPI application and exposes a
|
| 7 |
+
``main()`` entry point for the ``physix-server`` console script.
|
| 8 |
+
"""
|
physix-live/physix/server/__pycache__/__init__.cpython-311.pyc
ADDED
|
Binary file (534 Bytes). View file
|
|
|
physix-live/physix/server/__pycache__/app.cpython-311.pyc
ADDED
|
Binary file (6.48 kB). View file
|
|
|
physix-live/physix/server/__pycache__/environment.cpython-311.pyc
ADDED
|
Binary file (12.6 kB). View file
|
|
|
physix-live/physix/server/__pycache__/interactive.cpython-311.pyc
ADDED
|
Binary file (21.9 kB). View file
|
|
|
physix-live/physix/server/app.py
ADDED
|
@@ -0,0 +1,117 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""FastAPI app + ``physix-server`` console-script entry point.
|
| 2 |
+
|
| 3 |
+
Mounts the OpenEnv stateless endpoints (``/reset`` etc.) plus the bespoke
|
| 4 |
+
``/interactive/*`` router that maintains in-process sessions for browsers.
|
| 5 |
+
CORS allows the Vite dev origin out of the box; override with the
|
| 6 |
+
``PHYSIX_CORS_ORIGINS`` env var (comma-separated, or ``*`` for any).
|
| 7 |
+
"""
|
| 8 |
+
|
| 9 |
+
from __future__ import annotations
|
| 10 |
+
|
| 11 |
+
import argparse
|
| 12 |
+
import logging
|
| 13 |
+
import os
|
| 14 |
+
|
| 15 |
+
import uvicorn
|
| 16 |
+
from fastapi import FastAPI, Request
|
| 17 |
+
from fastapi.exceptions import RequestValidationError
|
| 18 |
+
from fastapi.middleware.cors import CORSMiddleware
|
| 19 |
+
from fastapi.responses import JSONResponse
|
| 20 |
+
from openenv.core.env_server import create_fastapi_app
|
| 21 |
+
from starlette.exceptions import HTTPException as StarletteHTTPException
|
| 22 |
+
|
| 23 |
+
from physix.models import PhysiXAction, PhysiXObservation
|
| 24 |
+
from physix.server.environment import PhysiXEnvironment
|
| 25 |
+
from physix.server.interactive import build_interactive_router
|
| 26 |
+
|
| 27 |
+
|
| 28 |
+
_DEFAULT_CORS_ORIGINS = (
|
| 29 |
+
"http://localhost:5173",
|
| 30 |
+
"http://127.0.0.1:5173",
|
| 31 |
+
)
|
| 32 |
+
|
| 33 |
+
|
| 34 |
+
def build_app() -> FastAPI:
|
| 35 |
+
app = create_fastapi_app(
|
| 36 |
+
env=PhysiXEnvironment,
|
| 37 |
+
action_cls=PhysiXAction,
|
| 38 |
+
observation_cls=PhysiXObservation,
|
| 39 |
+
)
|
| 40 |
+
_install_cors(app)
|
| 41 |
+
_install_error_handlers(app)
|
| 42 |
+
app.include_router(build_interactive_router())
|
| 43 |
+
return app
|
| 44 |
+
|
| 45 |
+
|
| 46 |
+
def _install_cors(app: FastAPI) -> None:
|
| 47 |
+
raw = os.environ.get("PHYSIX_CORS_ORIGINS", "")
|
| 48 |
+
origins = (
|
| 49 |
+
[o.strip() for o in raw.split(",") if o.strip()]
|
| 50 |
+
if raw
|
| 51 |
+
else list(_DEFAULT_CORS_ORIGINS)
|
| 52 |
+
)
|
| 53 |
+
allow_all = origins == ["*"]
|
| 54 |
+
app.add_middleware(
|
| 55 |
+
CORSMiddleware,
|
| 56 |
+
allow_origins=origins if not allow_all else ["*"],
|
| 57 |
+
allow_credentials=not allow_all,
|
| 58 |
+
allow_methods=["*"],
|
| 59 |
+
allow_headers=["*"],
|
| 60 |
+
)
|
| 61 |
+
|
| 62 |
+
|
| 63 |
+
def _install_error_handlers(app: FastAPI) -> None:
|
| 64 |
+
"""Make sure 4xx/5xx responses still carry the request's CORS headers.
|
| 65 |
+
|
| 66 |
+
Starlette's ``CORSMiddleware`` only annotates responses produced by
|
| 67 |
+
successful route handlers; raw ``HTTPException`` responses skip the
|
| 68 |
+
middleware and reach the browser without ``Access-Control-Allow-Origin``,
|
| 69 |
+
which the browser surfaces as a generic network error rather than the
|
| 70 |
+
real status code (e.g. 502 "Ollama not reachable" was reported as 404
|
| 71 |
+
in the dev UI). Re-emitting through ``JSONResponse`` runs the middleware.
|
| 72 |
+
"""
|
| 73 |
+
|
| 74 |
+
@app.exception_handler(StarletteHTTPException)
|
| 75 |
+
async def _http_exc(_request: Request, exc: StarletteHTTPException) -> JSONResponse:
|
| 76 |
+
return JSONResponse(
|
| 77 |
+
status_code=exc.status_code,
|
| 78 |
+
content={"detail": exc.detail},
|
| 79 |
+
headers=exc.headers,
|
| 80 |
+
)
|
| 81 |
+
|
| 82 |
+
@app.exception_handler(RequestValidationError)
|
| 83 |
+
async def _validation_exc(
|
| 84 |
+
_request: Request, exc: RequestValidationError
|
| 85 |
+
) -> JSONResponse:
|
| 86 |
+
return JSONResponse(status_code=422, content={"detail": exc.errors()})
|
| 87 |
+
|
| 88 |
+
|
| 89 |
+
app: FastAPI = build_app()
|
| 90 |
+
|
| 91 |
+
|
| 92 |
+
def main() -> None:
|
| 93 |
+
parser = argparse.ArgumentParser(description="Run the PhysiX-Live env server.")
|
| 94 |
+
parser.add_argument("--host", default=os.environ.get("PHYSIX_HOST", "0.0.0.0"))
|
| 95 |
+
parser.add_argument("--port", type=int, default=int(os.environ.get("PORT", "8000")))
|
| 96 |
+
parser.add_argument(
|
| 97 |
+
"--log-level", default=os.environ.get("PHYSIX_LOG_LEVEL", "info")
|
| 98 |
+
)
|
| 99 |
+
parser.add_argument(
|
| 100 |
+
"--reload",
|
| 101 |
+
action="store_true",
|
| 102 |
+
help="Auto-reload on source changes. Use during development only.",
|
| 103 |
+
)
|
| 104 |
+
args = parser.parse_args()
|
| 105 |
+
|
| 106 |
+
logging.basicConfig(level=args.log_level.upper())
|
| 107 |
+
uvicorn.run(
|
| 108 |
+
"physix.server.app:app",
|
| 109 |
+
host=args.host,
|
| 110 |
+
port=args.port,
|
| 111 |
+
log_level=args.log_level,
|
| 112 |
+
reload=args.reload,
|
| 113 |
+
)
|
| 114 |
+
|
| 115 |
+
|
| 116 |
+
if __name__ == "__main__":
|
| 117 |
+
main()
|
physix-live/physix/server/environment.py
ADDED
|
@@ -0,0 +1,280 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""OpenEnv :class:`Environment` subclass for PhysiX-Live.
|
| 2 |
+
|
| 3 |
+
Owns one episode's lifecycle (state + budget + termination) and orchestrates
|
| 4 |
+
the parser/simulator/metrics/reward modules. No scoring logic lives here.
|
| 5 |
+
"""
|
| 6 |
+
|
| 7 |
+
from __future__ import annotations
|
| 8 |
+
|
| 9 |
+
import logging
|
| 10 |
+
import uuid
|
| 11 |
+
from typing import Any, Optional
|
| 12 |
+
|
| 13 |
+
import numpy as np
|
| 14 |
+
from openenv.core.env_server import Environment
|
| 15 |
+
|
| 16 |
+
from physix.models import (
|
| 17 |
+
CONVERGENCE_THRESHOLD,
|
| 18 |
+
DEFAULT_MAX_TURNS,
|
| 19 |
+
HistoryEntry,
|
| 20 |
+
PhysiXAction,
|
| 21 |
+
PhysiXObservation,
|
| 22 |
+
PhysiXState,
|
| 23 |
+
RewardBreakdown,
|
| 24 |
+
)
|
| 25 |
+
from physix.systems import PhysicalSystem, SystemTier, get_system, list_systems_by_tier
|
| 26 |
+
from physix.systems.base import TrajectoryData
|
| 27 |
+
from physix.verifier import (
|
| 28 |
+
ParseError,
|
| 29 |
+
SimulationError,
|
| 30 |
+
compute_match,
|
| 31 |
+
compute_reward,
|
| 32 |
+
parse_equation,
|
| 33 |
+
residual_summary,
|
| 34 |
+
simulate_hypothesis,
|
| 35 |
+
summarize_mismatch,
|
| 36 |
+
)
|
| 37 |
+
|
| 38 |
+
|
| 39 |
+
_log = logging.getLogger(__name__)
|
| 40 |
+
|
| 41 |
+
|
| 42 |
+
class PhysiXEnvironment(Environment[PhysiXAction, PhysiXObservation, PhysiXState]):
|
| 43 |
+
"""OpenEnv environment that drives one episode of equation discovery."""
|
| 44 |
+
|
| 45 |
+
def __init__(
|
| 46 |
+
self,
|
| 47 |
+
*,
|
| 48 |
+
max_turns: int = DEFAULT_MAX_TURNS,
|
| 49 |
+
train_tiers: tuple[SystemTier, ...] = (SystemTier.TIER_1, SystemTier.TIER_2),
|
| 50 |
+
seed: Optional[int] = None,
|
| 51 |
+
) -> None:
|
| 52 |
+
super().__init__()
|
| 53 |
+
self._max_turns = max_turns
|
| 54 |
+
self._train_tiers = train_tiers
|
| 55 |
+
self._rng = np.random.default_rng(seed)
|
| 56 |
+
|
| 57 |
+
self._state = PhysiXState(max_turns=max_turns)
|
| 58 |
+
self._system: Optional[PhysicalSystem] = None
|
| 59 |
+
self._trajectory: Optional[TrajectoryData] = None
|
| 60 |
+
self._history: list[HistoryEntry] = []
|
| 61 |
+
|
| 62 |
+
def reset(
|
| 63 |
+
self,
|
| 64 |
+
seed: Optional[int] = None,
|
| 65 |
+
episode_id: Optional[str] = None,
|
| 66 |
+
**kwargs: Any,
|
| 67 |
+
) -> PhysiXObservation:
|
| 68 |
+
"""Start a new episode. Pass ``system_id=`` to force a specific system."""
|
| 69 |
+
if seed is not None:
|
| 70 |
+
self._rng = np.random.default_rng(seed)
|
| 71 |
+
|
| 72 |
+
forced_id = kwargs.get("system_id")
|
| 73 |
+
chosen_id = forced_id or self._sample_training_system_id()
|
| 74 |
+
|
| 75 |
+
self._system = get_system(chosen_id)
|
| 76 |
+
self._trajectory = self._system.simulate(self._rng)
|
| 77 |
+
self._history = []
|
| 78 |
+
|
| 79 |
+
self._state = PhysiXState(
|
| 80 |
+
episode_id=episode_id or str(uuid.uuid4()),
|
| 81 |
+
step_count=0,
|
| 82 |
+
system_id=chosen_id,
|
| 83 |
+
ground_truth_equation=self._system.ground_truth_equation(),
|
| 84 |
+
ground_truth_params=dict(self._system.parameters),
|
| 85 |
+
last_reward_total=0.0,
|
| 86 |
+
converged=False,
|
| 87 |
+
max_turns=self._max_turns,
|
| 88 |
+
)
|
| 89 |
+
|
| 90 |
+
return self._build_observation(
|
| 91 |
+
mismatch_summary="",
|
| 92 |
+
reward_breakdown=RewardBreakdown(),
|
| 93 |
+
)
|
| 94 |
+
|
| 95 |
+
def step(
|
| 96 |
+
self,
|
| 97 |
+
action: PhysiXAction,
|
| 98 |
+
timeout_s: Optional[float] = None,
|
| 99 |
+
**kwargs: Any,
|
| 100 |
+
) -> PhysiXObservation:
|
| 101 |
+
del timeout_s, kwargs # accepted for OpenEnv API conformance, unused
|
| 102 |
+
|
| 103 |
+
if self._system is None or self._trajectory is None:
|
| 104 |
+
raise RuntimeError("step() called before reset(); call reset() first.")
|
| 105 |
+
|
| 106 |
+
self._state.step_count = self._state.step_count + 1
|
| 107 |
+
|
| 108 |
+
breakdown, mismatch_text = self._score_hypothesis(action)
|
| 109 |
+
self._record_history(action, breakdown, mismatch_text)
|
| 110 |
+
|
| 111 |
+
self._state.last_reward_total = breakdown.total
|
| 112 |
+
self._state.last_r_match = breakdown.match
|
| 113 |
+
if breakdown.match >= CONVERGENCE_THRESHOLD:
|
| 114 |
+
self._state.converged = True
|
| 115 |
+
|
| 116 |
+
return self._build_observation(
|
| 117 |
+
mismatch_summary=mismatch_text,
|
| 118 |
+
reward_breakdown=breakdown,
|
| 119 |
+
)
|
| 120 |
+
|
| 121 |
+
@property
|
| 122 |
+
def state(self) -> PhysiXState:
|
| 123 |
+
return self._state
|
| 124 |
+
|
| 125 |
+
def current_observation(self) -> Optional[PhysiXObservation]:
|
| 126 |
+
"""Re-render the observation an external driver should feed to the
|
| 127 |
+
agent for the *next* turn (i.e. before calling :meth:`step`).
|
| 128 |
+
|
| 129 |
+
Used by the interactive HTTP router to build prompts mid-session.
|
| 130 |
+
Returns ``None`` before :meth:`reset` has been called.
|
| 131 |
+
"""
|
| 132 |
+
if self._system is None or self._trajectory is None:
|
| 133 |
+
return None
|
| 134 |
+
last = self._history[-1] if self._history else None
|
| 135 |
+
breakdown = (
|
| 136 |
+
RewardBreakdown(**last.reward_components)
|
| 137 |
+
if last is not None
|
| 138 |
+
else RewardBreakdown()
|
| 139 |
+
)
|
| 140 |
+
mismatch = last.mismatch_summary if last is not None else ""
|
| 141 |
+
return self._build_observation(
|
| 142 |
+
mismatch_summary=mismatch,
|
| 143 |
+
reward_breakdown=breakdown,
|
| 144 |
+
)
|
| 145 |
+
|
| 146 |
+
@property
|
| 147 |
+
def current_trajectory(self) -> Optional[TrajectoryData]:
|
| 148 |
+
return self._trajectory
|
| 149 |
+
|
| 150 |
+
@property
|
| 151 |
+
def current_system(self) -> Optional[PhysicalSystem]:
|
| 152 |
+
return self._system
|
| 153 |
+
|
| 154 |
+
def _is_done(self) -> bool:
|
| 155 |
+
if self._state.converged:
|
| 156 |
+
return True
|
| 157 |
+
return self._state.step_count >= self._max_turns
|
| 158 |
+
|
| 159 |
+
def _score_hypothesis(
|
| 160 |
+
self,
|
| 161 |
+
action: PhysiXAction,
|
| 162 |
+
) -> tuple[RewardBreakdown, str]:
|
| 163 |
+
assert self._system is not None
|
| 164 |
+
assert self._trajectory is not None
|
| 165 |
+
|
| 166 |
+
parameter_names = frozenset(action.params or {})
|
| 167 |
+
|
| 168 |
+
try:
|
| 169 |
+
parsed = parse_equation(
|
| 170 |
+
action.equation,
|
| 171 |
+
state_variables=self._system.state_variables,
|
| 172 |
+
parameter_names=parameter_names,
|
| 173 |
+
)
|
| 174 |
+
except ParseError as exc:
|
| 175 |
+
_log.debug("parse_equation failed: %s", exc)
|
| 176 |
+
breakdown = compute_reward(
|
| 177 |
+
parse_succeeded=False,
|
| 178 |
+
r_match=0.0,
|
| 179 |
+
operator_count=0,
|
| 180 |
+
previous_r_match=self._state.last_r_match,
|
| 181 |
+
)
|
| 182 |
+
return breakdown, f"Parse error: {exc}"
|
| 183 |
+
|
| 184 |
+
try:
|
| 185 |
+
predicted = simulate_hypothesis(
|
| 186 |
+
parsed,
|
| 187 |
+
state_variables=self._system.state_variables,
|
| 188 |
+
parameters=dict(action.params or {}),
|
| 189 |
+
initial_conditions=self._trajectory.initial_conditions,
|
| 190 |
+
timestamps=self._trajectory.timestamps,
|
| 191 |
+
)
|
| 192 |
+
except SimulationError as exc:
|
| 193 |
+
_log.debug("simulate_hypothesis failed: %s", exc)
|
| 194 |
+
breakdown = compute_reward(
|
| 195 |
+
parse_succeeded=True,
|
| 196 |
+
simulation_succeeded=False,
|
| 197 |
+
r_match=0.0,
|
| 198 |
+
operator_count=parsed.operator_count,
|
| 199 |
+
previous_r_match=self._state.last_r_match,
|
| 200 |
+
)
|
| 201 |
+
return breakdown, f"Simulation error: {exc}"
|
| 202 |
+
|
| 203 |
+
r_match = compute_match(
|
| 204 |
+
observed=self._trajectory.states,
|
| 205 |
+
predicted=predicted,
|
| 206 |
+
state_variables=self._system.state_variables,
|
| 207 |
+
)
|
| 208 |
+
residuals = residual_summary(
|
| 209 |
+
timestamps=self._trajectory.timestamps,
|
| 210 |
+
observed=self._trajectory.states,
|
| 211 |
+
predicted=predicted,
|
| 212 |
+
state_variables=self._system.state_variables,
|
| 213 |
+
)
|
| 214 |
+
mismatch_text = summarize_mismatch(
|
| 215 |
+
observed=self._trajectory.states,
|
| 216 |
+
predicted=predicted,
|
| 217 |
+
state_variables=self._system.state_variables,
|
| 218 |
+
timestamps=self._trajectory.timestamps,
|
| 219 |
+
summary=residuals,
|
| 220 |
+
)
|
| 221 |
+
|
| 222 |
+
breakdown = compute_reward(
|
| 223 |
+
parse_succeeded=True,
|
| 224 |
+
simulation_succeeded=True,
|
| 225 |
+
r_match=r_match,
|
| 226 |
+
operator_count=parsed.operator_count,
|
| 227 |
+
previous_r_match=self._state.last_r_match,
|
| 228 |
+
)
|
| 229 |
+
return breakdown, mismatch_text
|
| 230 |
+
|
| 231 |
+
def _record_history(
|
| 232 |
+
self,
|
| 233 |
+
action: PhysiXAction,
|
| 234 |
+
breakdown: RewardBreakdown,
|
| 235 |
+
mismatch_text: str,
|
| 236 |
+
) -> None:
|
| 237 |
+
entry = HistoryEntry(
|
| 238 |
+
turn=self._state.step_count,
|
| 239 |
+
equation=action.equation,
|
| 240 |
+
params=dict(action.params or {}),
|
| 241 |
+
reward_total=breakdown.total,
|
| 242 |
+
reward_components=breakdown.as_dict(),
|
| 243 |
+
mismatch_summary=mismatch_text,
|
| 244 |
+
)
|
| 245 |
+
self._history.append(entry)
|
| 246 |
+
|
| 247 |
+
def _build_observation(
|
| 248 |
+
self,
|
| 249 |
+
*,
|
| 250 |
+
mismatch_summary: str,
|
| 251 |
+
reward_breakdown: RewardBreakdown,
|
| 252 |
+
) -> PhysiXObservation:
|
| 253 |
+
assert self._system is not None
|
| 254 |
+
assert self._trajectory is not None
|
| 255 |
+
|
| 256 |
+
return PhysiXObservation(
|
| 257 |
+
done=self._is_done(),
|
| 258 |
+
reward=reward_breakdown.total,
|
| 259 |
+
trajectory=self._trajectory.to_observation_samples(),
|
| 260 |
+
state_variables=list(self._system.state_variables),
|
| 261 |
+
hint=self._system.hint(self._state.ground_truth_params),
|
| 262 |
+
history=[entry.as_dict() for entry in self._history],
|
| 263 |
+
mismatch_summary=mismatch_summary,
|
| 264 |
+
turn=self._state.step_count,
|
| 265 |
+
turn_remaining=max(0, self._max_turns - self._state.step_count),
|
| 266 |
+
system_id=self._state.system_id,
|
| 267 |
+
stats=self._trajectory.stats(),
|
| 268 |
+
reward_breakdown=reward_breakdown.as_dict(),
|
| 269 |
+
)
|
| 270 |
+
|
| 271 |
+
def _sample_training_system_id(self) -> str:
|
| 272 |
+
candidates: list[str] = []
|
| 273 |
+
for tier in self._train_tiers:
|
| 274 |
+
candidates.extend(list_systems_by_tier(tier))
|
| 275 |
+
if not candidates:
|
| 276 |
+
raise RuntimeError(
|
| 277 |
+
f"No training systems found for tiers {self._train_tiers!r}."
|
| 278 |
+
)
|
| 279 |
+
idx = int(self._rng.integers(0, len(candidates)))
|
| 280 |
+
return candidates[idx]
|
physix-live/physix/server/interactive.py
ADDED
|
@@ -0,0 +1,430 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Session-based REST router for browser-driven episodes."""
|
| 2 |
+
|
| 3 |
+
from __future__ import annotations
|
| 4 |
+
|
| 5 |
+
import logging
|
| 6 |
+
import threading
|
| 7 |
+
import time
|
| 8 |
+
import uuid
|
| 9 |
+
from collections.abc import Callable
|
| 10 |
+
from typing import Optional
|
| 11 |
+
|
| 12 |
+
import numpy as np
|
| 13 |
+
from fastapi import APIRouter, HTTPException
|
| 14 |
+
from pydantic import BaseModel, ConfigDict, Field
|
| 15 |
+
|
| 16 |
+
from physix.models import (
|
| 17 |
+
DEFAULT_MAX_TURNS,
|
| 18 |
+
PhysiXAction,
|
| 19 |
+
PhysiXObservation,
|
| 20 |
+
)
|
| 21 |
+
from physix.server.environment import PhysiXEnvironment
|
| 22 |
+
from physix.systems import list_supported_systems, list_systems
|
| 23 |
+
from physix.systems.base import PhysicalSystem, TrajectoryData
|
| 24 |
+
from physix.training.prompt import build_prompt, parse_completion
|
| 25 |
+
from physix.verifier.parser import parse_equation
|
| 26 |
+
from physix.verifier.simulator import simulate_hypothesis
|
| 27 |
+
|
| 28 |
+
|
| 29 |
+
_log = logging.getLogger(__name__)
|
| 30 |
+
|
| 31 |
+
|
| 32 |
+
class InteractiveResetRequest(BaseModel):
|
| 33 |
+
model_config = ConfigDict(extra="forbid")
|
| 34 |
+
|
| 35 |
+
system_id: Optional[str] = Field(
|
| 36 |
+
default=None,
|
| 37 |
+
description="Force a specific system. None = sample at random.",
|
| 38 |
+
)
|
| 39 |
+
seed: Optional[int] = None
|
| 40 |
+
max_turns: int = Field(default=DEFAULT_MAX_TURNS, ge=1, le=32)
|
| 41 |
+
|
| 42 |
+
|
| 43 |
+
class SystemDescriptor(BaseModel):
|
| 44 |
+
model_config = ConfigDict(frozen=True)
|
| 45 |
+
|
| 46 |
+
system_id: str
|
| 47 |
+
state_variables: tuple[str, ...]
|
| 48 |
+
|
| 49 |
+
|
| 50 |
+
class InteractiveStartResponse(BaseModel):
|
| 51 |
+
session_id: str
|
| 52 |
+
observation: PhysiXObservation
|
| 53 |
+
system: SystemDescriptor
|
| 54 |
+
max_turns: int
|
| 55 |
+
|
| 56 |
+
|
| 57 |
+
class LlmStepRequest(BaseModel):
|
| 58 |
+
"""Server-side LLM call. Browser names a model tag; server hits Ollama."""
|
| 59 |
+
|
| 60 |
+
model_config = ConfigDict(extra="forbid")
|
| 61 |
+
|
| 62 |
+
model: str = "qwen2.5:1.5b-instruct"
|
| 63 |
+
temperature: float = Field(default=0.4, ge=0.0, le=2.0)
|
| 64 |
+
max_tokens: int = Field(default=2048, ge=64, le=8192)
|
| 65 |
+
host: Optional[str] = None
|
| 66 |
+
|
| 67 |
+
|
| 68 |
+
class LlmModelInfo(BaseModel):
|
| 69 |
+
"""A single locally-pulled Ollama model tag."""
|
| 70 |
+
|
| 71 |
+
model_config = ConfigDict(frozen=True)
|
| 72 |
+
|
| 73 |
+
name: str
|
| 74 |
+
size_bytes: Optional[int] = None
|
| 75 |
+
parameter_size: Optional[str] = None
|
| 76 |
+
family: Optional[str] = None
|
| 77 |
+
|
| 78 |
+
|
| 79 |
+
class LlmModelsResponse(BaseModel):
|
| 80 |
+
models: list[LlmModelInfo] = Field(default_factory=list)
|
| 81 |
+
error: Optional[str] = None
|
| 82 |
+
|
| 83 |
+
|
| 84 |
+
class LlmStepResponse(BaseModel):
|
| 85 |
+
observation: PhysiXObservation
|
| 86 |
+
predicted_trajectory: list[dict[str, float]] = Field(default_factory=list)
|
| 87 |
+
action: PhysiXAction
|
| 88 |
+
raw_completion: str
|
| 89 |
+
latency_s: float
|
| 90 |
+
model: str
|
| 91 |
+
|
| 92 |
+
|
| 93 |
+
class SessionSummary(BaseModel):
|
| 94 |
+
session_id: str
|
| 95 |
+
system_id: str
|
| 96 |
+
turn: int
|
| 97 |
+
max_turns: int
|
| 98 |
+
converged: bool
|
| 99 |
+
done: bool
|
| 100 |
+
|
| 101 |
+
|
| 102 |
+
class _Session:
|
| 103 |
+
__slots__ = ("env", "system_id", "max_turns", "lock")
|
| 104 |
+
|
| 105 |
+
def __init__(self, env: PhysiXEnvironment, system_id: str, max_turns: int) -> None:
|
| 106 |
+
self.env = env
|
| 107 |
+
self.system_id = system_id
|
| 108 |
+
self.max_turns = max_turns
|
| 109 |
+
self.lock = threading.Lock()
|
| 110 |
+
|
| 111 |
+
|
| 112 |
+
class InteractiveSessionStore:
|
| 113 |
+
"""Threadsafe in-memory session map."""
|
| 114 |
+
|
| 115 |
+
def __init__(self) -> None:
|
| 116 |
+
self._sessions: dict[str, _Session] = {}
|
| 117 |
+
self._lock = threading.Lock()
|
| 118 |
+
|
| 119 |
+
def create(
|
| 120 |
+
self,
|
| 121 |
+
*,
|
| 122 |
+
system_id: Optional[str],
|
| 123 |
+
seed: Optional[int],
|
| 124 |
+
max_turns: int,
|
| 125 |
+
) -> tuple[str, _Session, PhysiXObservation]:
|
| 126 |
+
env = PhysiXEnvironment(seed=seed, max_turns=max_turns)
|
| 127 |
+
observation = env.reset(seed=seed, system_id=system_id)
|
| 128 |
+
session = _Session(env=env, system_id=env.state.system_id, max_turns=max_turns)
|
| 129 |
+
session_id = uuid.uuid4().hex
|
| 130 |
+
with self._lock:
|
| 131 |
+
self._sessions[session_id] = session
|
| 132 |
+
return session_id, session, observation
|
| 133 |
+
|
| 134 |
+
def get(self, session_id: str) -> _Session:
|
| 135 |
+
with self._lock:
|
| 136 |
+
session = self._sessions.get(session_id)
|
| 137 |
+
if session is None:
|
| 138 |
+
raise HTTPException(status_code=404, detail="Unknown session_id.")
|
| 139 |
+
return session
|
| 140 |
+
|
| 141 |
+
def delete(self, session_id: str) -> None:
|
| 142 |
+
with self._lock:
|
| 143 |
+
self._sessions.pop(session_id, None)
|
| 144 |
+
|
| 145 |
+
def __len__(self) -> int:
|
| 146 |
+
with self._lock:
|
| 147 |
+
return len(self._sessions)
|
| 148 |
+
|
| 149 |
+
|
| 150 |
+
LlmPolicy = Callable[[list[dict[str, str]]], str]
|
| 151 |
+
LlmPolicyFactory = Callable[[LlmStepRequest], LlmPolicy]
|
| 152 |
+
LlmModelsLister = Callable[[], LlmModelsResponse]
|
| 153 |
+
|
| 154 |
+
|
| 155 |
+
def default_ollama_models_lister() -> LlmModelsResponse:
|
| 156 |
+
try:
|
| 157 |
+
import ollama # type: ignore[import-not-found]
|
| 158 |
+
except ImportError:
|
| 159 |
+
return LlmModelsResponse(
|
| 160 |
+
models=[],
|
| 161 |
+
error=(
|
| 162 |
+
"The 'ollama' Python package is not installed on the server. "
|
| 163 |
+
"Install with: pip install -e '.[demo]'"
|
| 164 |
+
),
|
| 165 |
+
)
|
| 166 |
+
|
| 167 |
+
try:
|
| 168 |
+
response = ollama.Client().list()
|
| 169 |
+
except Exception as exc: # noqa: BLE001 — surfaced in the response body
|
| 170 |
+
return LlmModelsResponse(
|
| 171 |
+
models=[],
|
| 172 |
+
error=(
|
| 173 |
+
f"Could not reach the local Ollama daemon ({exc}). "
|
| 174 |
+
"Is 'ollama serve' running?"
|
| 175 |
+
),
|
| 176 |
+
)
|
| 177 |
+
|
| 178 |
+
raw_models = getattr(response, "models", None)
|
| 179 |
+
if raw_models is None and isinstance(response, dict):
|
| 180 |
+
raw_models = response.get("models", [])
|
| 181 |
+
raw_models = raw_models or []
|
| 182 |
+
|
| 183 |
+
out: list[LlmModelInfo] = []
|
| 184 |
+
for entry in raw_models:
|
| 185 |
+
name = _model_attr(entry, "model") or _model_attr(entry, "name")
|
| 186 |
+
if not isinstance(name, str) or not name:
|
| 187 |
+
continue
|
| 188 |
+
details = _model_attr(entry, "details")
|
| 189 |
+
out.append(
|
| 190 |
+
LlmModelInfo(
|
| 191 |
+
name=name,
|
| 192 |
+
size_bytes=_coerce_int(_model_attr(entry, "size")),
|
| 193 |
+
parameter_size=_model_attr(details, "parameter_size"),
|
| 194 |
+
family=_model_attr(details, "family"),
|
| 195 |
+
)
|
| 196 |
+
)
|
| 197 |
+
|
| 198 |
+
out.sort(key=lambda m: m.name)
|
| 199 |
+
return LlmModelsResponse(models=out)
|
| 200 |
+
|
| 201 |
+
|
| 202 |
+
def _model_attr(obj: object, key: str) -> object:
|
| 203 |
+
if obj is None:
|
| 204 |
+
return None
|
| 205 |
+
if isinstance(obj, dict):
|
| 206 |
+
return obj.get(key)
|
| 207 |
+
return getattr(obj, key, None)
|
| 208 |
+
|
| 209 |
+
|
| 210 |
+
def _coerce_int(value: object) -> Optional[int]:
|
| 211 |
+
if value is None:
|
| 212 |
+
return None
|
| 213 |
+
try:
|
| 214 |
+
return int(value)
|
| 215 |
+
except (TypeError, ValueError):
|
| 216 |
+
return None
|
| 217 |
+
|
| 218 |
+
|
| 219 |
+
def default_ollama_policy_factory(request: LlmStepRequest) -> LlmPolicy:
|
| 220 |
+
try:
|
| 221 |
+
import ollama # type: ignore[import-not-found]
|
| 222 |
+
except ImportError as exc: # pragma: no cover
|
| 223 |
+
raise HTTPException(
|
| 224 |
+
status_code=503,
|
| 225 |
+
detail=(
|
| 226 |
+
"The 'ollama' Python package is not installed on the server. "
|
| 227 |
+
"Install with: pip install -e '.[demo]'"
|
| 228 |
+
),
|
| 229 |
+
) from exc
|
| 230 |
+
|
| 231 |
+
client = ollama.Client(host=request.host) if request.host else ollama.Client()
|
| 232 |
+
|
| 233 |
+
def _policy(prompt: list[dict[str, str]]) -> str:
|
| 234 |
+
try:
|
| 235 |
+
response = client.chat(
|
| 236 |
+
model=request.model,
|
| 237 |
+
messages=prompt,
|
| 238 |
+
format="json",
|
| 239 |
+
options={
|
| 240 |
+
"temperature": request.temperature,
|
| 241 |
+
"num_predict": request.max_tokens,
|
| 242 |
+
},
|
| 243 |
+
)
|
| 244 |
+
except Exception as exc: # noqa: BLE001 — surfaced as 502
|
| 245 |
+
raise HTTPException(
|
| 246 |
+
status_code=502,
|
| 247 |
+
detail=(
|
| 248 |
+
f"Ollama call failed for model {request.model!r}: {exc}. "
|
| 249 |
+
"Is 'ollama serve' running and the model pulled "
|
| 250 |
+
f"('ollama pull {request.model}')?"
|
| 251 |
+
),
|
| 252 |
+
) from exc
|
| 253 |
+
return str(response["message"]["content"])
|
| 254 |
+
|
| 255 |
+
return _policy
|
| 256 |
+
|
| 257 |
+
|
| 258 |
+
def build_interactive_router(
|
| 259 |
+
store: Optional[InteractiveSessionStore] = None,
|
| 260 |
+
*,
|
| 261 |
+
policy_factory: LlmPolicyFactory = default_ollama_policy_factory,
|
| 262 |
+
models_lister: LlmModelsLister = default_ollama_models_lister,
|
| 263 |
+
) -> APIRouter:
|
| 264 |
+
sessions = store if store is not None else InteractiveSessionStore()
|
| 265 |
+
router = APIRouter(prefix="/interactive", tags=["Interactive"])
|
| 266 |
+
|
| 267 |
+
@router.get("/models", response_model=LlmModelsResponse)
|
| 268 |
+
def list_local_models() -> LlmModelsResponse:
|
| 269 |
+
return models_lister()
|
| 270 |
+
|
| 271 |
+
@router.get("/systems", response_model=list[SystemDescriptor])
|
| 272 |
+
def list_public_systems() -> list[SystemDescriptor]:
|
| 273 |
+
from physix.systems import get_system
|
| 274 |
+
|
| 275 |
+
out: list[SystemDescriptor] = []
|
| 276 |
+
for system_id in list_supported_systems():
|
| 277 |
+
system = get_system(system_id)
|
| 278 |
+
out.append(
|
| 279 |
+
SystemDescriptor(
|
| 280 |
+
system_id=system.system_id,
|
| 281 |
+
state_variables=system.state_variables,
|
| 282 |
+
)
|
| 283 |
+
)
|
| 284 |
+
return out
|
| 285 |
+
|
| 286 |
+
@router.post("/sessions", response_model=InteractiveStartResponse)
|
| 287 |
+
def start_session(payload: InteractiveResetRequest) -> InteractiveStartResponse:
|
| 288 |
+
from physix.systems import get_system
|
| 289 |
+
|
| 290 |
+
if payload.system_id is not None and payload.system_id not in list_systems():
|
| 291 |
+
raise HTTPException(
|
| 292 |
+
status_code=400, detail=f"Unknown system_id {payload.system_id!r}."
|
| 293 |
+
)
|
| 294 |
+
chosen_system_id = payload.system_id
|
| 295 |
+
if chosen_system_id is None:
|
| 296 |
+
demo_ids = list_supported_systems()
|
| 297 |
+
if demo_ids:
|
| 298 |
+
rng = (
|
| 299 |
+
np.random.default_rng(payload.seed)
|
| 300 |
+
if payload.seed is not None
|
| 301 |
+
else np.random.default_rng()
|
| 302 |
+
)
|
| 303 |
+
chosen_system_id = str(rng.choice(demo_ids))
|
| 304 |
+
session_id, session, observation = sessions.create(
|
| 305 |
+
system_id=chosen_system_id,
|
| 306 |
+
seed=payload.seed,
|
| 307 |
+
max_turns=payload.max_turns,
|
| 308 |
+
)
|
| 309 |
+
system = get_system(session.system_id)
|
| 310 |
+
return InteractiveStartResponse(
|
| 311 |
+
session_id=session_id,
|
| 312 |
+
observation=observation,
|
| 313 |
+
system=SystemDescriptor(
|
| 314 |
+
system_id=system.system_id,
|
| 315 |
+
state_variables=system.state_variables,
|
| 316 |
+
),
|
| 317 |
+
max_turns=session.max_turns,
|
| 318 |
+
)
|
| 319 |
+
|
| 320 |
+
@router.post(
|
| 321 |
+
"/sessions/{session_id}/llm-step", response_model=LlmStepResponse
|
| 322 |
+
)
|
| 323 |
+
def llm_step_session(
|
| 324 |
+
session_id: str, payload: LlmStepRequest
|
| 325 |
+
) -> LlmStepResponse:
|
| 326 |
+
session = sessions.get(session_id)
|
| 327 |
+
with session.lock:
|
| 328 |
+
_ensure_budget(session)
|
| 329 |
+
|
| 330 |
+
current_obs = session.env.current_observation()
|
| 331 |
+
if current_obs is None:
|
| 332 |
+
raise HTTPException(
|
| 333 |
+
status_code=500, detail="Session has no current observation."
|
| 334 |
+
)
|
| 335 |
+
|
| 336 |
+
policy = policy_factory(payload)
|
| 337 |
+
t0 = time.perf_counter()
|
| 338 |
+
raw_completion = policy(build_prompt(current_obs))
|
| 339 |
+
latency_s = time.perf_counter() - t0
|
| 340 |
+
|
| 341 |
+
action = parse_completion(raw_completion)
|
| 342 |
+
observation = session.env.step(action)
|
| 343 |
+
predicted = _safe_predict(session.env, action)
|
| 344 |
+
|
| 345 |
+
return LlmStepResponse(
|
| 346 |
+
observation=observation,
|
| 347 |
+
predicted_trajectory=predicted,
|
| 348 |
+
action=action,
|
| 349 |
+
raw_completion=raw_completion,
|
| 350 |
+
latency_s=latency_s,
|
| 351 |
+
model=payload.model,
|
| 352 |
+
)
|
| 353 |
+
|
| 354 |
+
@router.delete("/sessions/{session_id}", status_code=204)
|
| 355 |
+
def end_session(session_id: str) -> None:
|
| 356 |
+
sessions.delete(session_id)
|
| 357 |
+
|
| 358 |
+
@router.get("/sessions/{session_id}", response_model=SessionSummary)
|
| 359 |
+
def get_session(session_id: str) -> SessionSummary:
|
| 360 |
+
session = sessions.get(session_id)
|
| 361 |
+
return SessionSummary(
|
| 362 |
+
session_id=session_id,
|
| 363 |
+
system_id=session.system_id,
|
| 364 |
+
turn=session.env.state.step_count,
|
| 365 |
+
max_turns=session.max_turns,
|
| 366 |
+
converged=session.env.state.converged,
|
| 367 |
+
done=(
|
| 368 |
+
session.env.state.converged
|
| 369 |
+
or session.env.state.step_count >= session.max_turns
|
| 370 |
+
),
|
| 371 |
+
)
|
| 372 |
+
|
| 373 |
+
return router
|
| 374 |
+
|
| 375 |
+
|
| 376 |
+
def _ensure_budget(session: _Session) -> None:
|
| 377 |
+
if session.env.state.step_count >= session.max_turns:
|
| 378 |
+
raise HTTPException(
|
| 379 |
+
status_code=409,
|
| 380 |
+
detail="Episode budget already exhausted; start a new session.",
|
| 381 |
+
)
|
| 382 |
+
|
| 383 |
+
|
| 384 |
+
def _safe_predict(
|
| 385 |
+
env: PhysiXEnvironment, action: PhysiXAction
|
| 386 |
+
) -> list[dict[str, float]]:
|
| 387 |
+
"""Forward-simulate the user's hypothesis for the UI overlay.
|
| 388 |
+
|
| 389 |
+
Returns ``[]`` on parse / simulation failure — the env's reward is
|
| 390 |
+
authoritative; this is best-effort visualisation only.
|
| 391 |
+
"""
|
| 392 |
+
system: Optional[PhysicalSystem] = env.current_system
|
| 393 |
+
trajectory: Optional[TrajectoryData] = env.current_trajectory
|
| 394 |
+
if system is None or trajectory is None:
|
| 395 |
+
return []
|
| 396 |
+
|
| 397 |
+
parameter_names = frozenset(action.params or {}) | frozenset(system.parameters)
|
| 398 |
+
try:
|
| 399 |
+
parsed = parse_equation(
|
| 400 |
+
action.equation,
|
| 401 |
+
state_variables=system.state_variables,
|
| 402 |
+
parameter_names=parameter_names,
|
| 403 |
+
)
|
| 404 |
+
except Exception as exc: # noqa: BLE001
|
| 405 |
+
_log.debug("predict parse failed: %s", exc)
|
| 406 |
+
return []
|
| 407 |
+
|
| 408 |
+
merged = {**system.parameters, **(action.params or {})}
|
| 409 |
+
try:
|
| 410 |
+
predicted = simulate_hypothesis(
|
| 411 |
+
parsed,
|
| 412 |
+
state_variables=system.state_variables,
|
| 413 |
+
parameters=merged,
|
| 414 |
+
initial_conditions=trajectory.initial_conditions,
|
| 415 |
+
timestamps=trajectory.timestamps,
|
| 416 |
+
)
|
| 417 |
+
except Exception as exc: # noqa: BLE001
|
| 418 |
+
_log.debug("predict simulate failed: %s", exc)
|
| 419 |
+
return []
|
| 420 |
+
|
| 421 |
+
samples: list[dict[str, float]] = []
|
| 422 |
+
for i, t in enumerate(trajectory.timestamps):
|
| 423 |
+
sample: dict[str, float] = {"t": round(float(t), 5)}
|
| 424 |
+
for var in system.state_variables:
|
| 425 |
+
value = predicted[var][i]
|
| 426 |
+
if not np.isfinite(value):
|
| 427 |
+
return []
|
| 428 |
+
sample[var] = round(float(value), 5)
|
| 429 |
+
samples.append(sample)
|
| 430 |
+
return samples
|
physix-live/physix/systems/__init__.py
ADDED
|
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from physix.systems.base import PhysicalSystem, SystemTier
|
| 2 |
+
from physix.systems.registry import (
|
| 3 |
+
SUPPORTED_SYSTEMS,
|
| 4 |
+
SYSTEM_REGISTRY,
|
| 5 |
+
get_system,
|
| 6 |
+
list_supported_systems,
|
| 7 |
+
list_systems,
|
| 8 |
+
list_systems_by_tier,
|
| 9 |
+
)
|
| 10 |
+
|
| 11 |
+
__all__ = [
|
| 12 |
+
"PhysicalSystem",
|
| 13 |
+
"SystemTier",
|
| 14 |
+
"SYSTEM_REGISTRY",
|
| 15 |
+
"SUPPORTED_SYSTEMS",
|
| 16 |
+
"get_system",
|
| 17 |
+
"list_systems",
|
| 18 |
+
"list_systems_by_tier",
|
| 19 |
+
"list_supported_systems",
|
| 20 |
+
]
|
physix-live/physix/systems/__pycache__/__init__.cpython-311.pyc
ADDED
|
Binary file (907 Bytes). View file
|
|
|
physix-live/physix/systems/__pycache__/base.cpython-311.pyc
ADDED
|
Binary file (10.5 kB). View file
|
|
|
physix-live/physix/systems/__pycache__/registry.cpython-311.pyc
ADDED
|
Binary file (2.86 kB). View file
|
|
|
physix-live/physix/systems/__pycache__/tier1.cpython-311.pyc
ADDED
|
Binary file (7.26 kB). View file
|
|
|
physix-live/physix/systems/__pycache__/tier2.cpython-311.pyc
ADDED
|
Binary file (6.69 kB). View file
|
|
|
physix-live/physix/systems/__pycache__/tier3.cpython-311.pyc
ADDED
|
Binary file (7.12 kB). View file
|
|
|
physix-live/physix/systems/base.py
ADDED
|
@@ -0,0 +1,192 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Abstract base class and shared types for physical systems.
|
| 2 |
+
|
| 3 |
+
A physical system is responsible for three things and three things only:
|
| 4 |
+
|
| 5 |
+
1. Exposing **stable metadata** (id, tier, state-variable names, NL hint).
|
| 6 |
+
2. Generating a **noisy trajectory** for one episode given an RNG seed.
|
| 7 |
+
3. Reporting its **ground-truth equation** as a canonical SymPy expression
|
| 8 |
+
for logging and verification.
|
| 9 |
+
|
| 10 |
+
Reward computation, simulation of the *agent's* hypotheses, residual analysis,
|
| 11 |
+
and natural-language mismatch summarisation all live in :mod:`physix.verifier`.
|
| 12 |
+
Systems do not import anything from the verifier; the dependency runs in one
|
| 13 |
+
direction.
|
| 14 |
+
"""
|
| 15 |
+
|
| 16 |
+
from __future__ import annotations
|
| 17 |
+
|
| 18 |
+
from abc import ABC, ABCMeta, abstractmethod
|
| 19 |
+
from enum import Enum
|
| 20 |
+
|
| 21 |
+
import numpy as np
|
| 22 |
+
from pydantic import BaseModel, ConfigDict, Field
|
| 23 |
+
from pydantic._internal._model_construction import ModelMetaclass
|
| 24 |
+
from scipy.integrate import odeint
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
class SystemTier(str, Enum):
|
| 28 |
+
"""Curriculum tier. Tier 3 is held out of training to enable a
|
| 29 |
+
generalisation claim ("converges on systems it never trained on")."""
|
| 30 |
+
|
| 31 |
+
TIER_1 = "tier1"
|
| 32 |
+
TIER_2 = "tier2"
|
| 33 |
+
TIER_3 = "tier3"
|
| 34 |
+
|
| 35 |
+
|
| 36 |
+
class TrajectoryData(BaseModel):
|
| 37 |
+
"""Numerical trajectory plus its initial conditions.
|
| 38 |
+
|
| 39 |
+
``initial_conditions`` is included so the verifier can re-simulate the
|
| 40 |
+
agent's hypothesis from the *same* starting point, making residuals
|
| 41 |
+
directly comparable.
|
| 42 |
+
"""
|
| 43 |
+
|
| 44 |
+
model_config = ConfigDict(frozen=True, arbitrary_types_allowed=True)
|
| 45 |
+
|
| 46 |
+
timestamps: np.ndarray
|
| 47 |
+
states: dict[str, np.ndarray]
|
| 48 |
+
initial_conditions: dict[str, float]
|
| 49 |
+
state_variables: tuple[str, ...]
|
| 50 |
+
|
| 51 |
+
def to_observation_samples(self, decimals: int = 5) -> list[dict[str, float]]:
|
| 52 |
+
"""Render as a JSON-friendly list of timestep dicts for the agent."""
|
| 53 |
+
samples: list[dict[str, float]] = []
|
| 54 |
+
for i, t in enumerate(self.timestamps):
|
| 55 |
+
sample: dict[str, float] = {"t": round(float(t), decimals)}
|
| 56 |
+
for var in self.state_variables:
|
| 57 |
+
sample[var] = round(float(self.states[var][i]), decimals)
|
| 58 |
+
samples.append(sample)
|
| 59 |
+
return samples
|
| 60 |
+
|
| 61 |
+
def stats(self) -> dict[str, float]:
|
| 62 |
+
"""Aggregate statistics for the agent's stats panel."""
|
| 63 |
+
out: dict[str, float] = {
|
| 64 |
+
"duration": float(self.timestamps[-1] - self.timestamps[0]),
|
| 65 |
+
"n_timesteps": float(len(self.timestamps)),
|
| 66 |
+
"dt": float(self.timestamps[1] - self.timestamps[0]) if len(self.timestamps) > 1 else 0.0,
|
| 67 |
+
}
|
| 68 |
+
for var in self.state_variables:
|
| 69 |
+
arr = self.states[var]
|
| 70 |
+
out[f"{var}_min"] = float(np.min(arr))
|
| 71 |
+
out[f"{var}_max"] = float(np.max(arr))
|
| 72 |
+
out[f"{var}_std"] = float(np.std(arr))
|
| 73 |
+
return out
|
| 74 |
+
|
| 75 |
+
|
| 76 |
+
class _AbstractModelMeta(ModelMetaclass, ABCMeta):
|
| 77 |
+
"""Metaclass union so :class:`PhysicalSystem` is both a pydantic model and
|
| 78 |
+
a true ABC (i.e. instantiating the base or a subclass that fails to
|
| 79 |
+
implement an abstract method raises ``TypeError``)."""
|
| 80 |
+
|
| 81 |
+
|
| 82 |
+
class PhysicalSystem(BaseModel, ABC, metaclass=_AbstractModelMeta):
|
| 83 |
+
"""Abstract physical system.
|
| 84 |
+
|
| 85 |
+
Concrete subclasses must:
|
| 86 |
+
|
| 87 |
+
- Override :attr:`system_id`, :attr:`tier`, :attr:`state_variables`, and
|
| 88 |
+
:attr:`hint_template`.
|
| 89 |
+
- Implement :meth:`sample_parameters` to draw random episode parameters.
|
| 90 |
+
- Implement :meth:`sample_initial_conditions` to draw random initial state.
|
| 91 |
+
- Implement :meth:`rhs` returning the time derivatives.
|
| 92 |
+
- Implement :meth:`ground_truth_equation` returning a canonical SymPy
|
| 93 |
+
string of the system's equation of motion.
|
| 94 |
+
|
| 95 |
+
The base class implements :meth:`simulate` once for all subclasses by
|
| 96 |
+
delegating to ``scipy.integrate.odeint`` against :meth:`rhs`.
|
| 97 |
+
"""
|
| 98 |
+
|
| 99 |
+
model_config = ConfigDict(arbitrary_types_allowed=True)
|
| 100 |
+
|
| 101 |
+
system_id: str = ""
|
| 102 |
+
tier: SystemTier = SystemTier.TIER_1
|
| 103 |
+
state_variables: tuple[str, ...] = ()
|
| 104 |
+
hint_template: str = ""
|
| 105 |
+
|
| 106 |
+
duration: float = 10.0
|
| 107 |
+
n_timesteps: int = 100
|
| 108 |
+
#: Gaussian noise applied to each observed sample, expressed as a fraction
|
| 109 |
+
#: of the per-variable *standard deviation* of the clean trajectory. Using
|
| 110 |
+
#: std (rather than range) avoids the pathology where a system with a
|
| 111 |
+
#: large total excursion (e.g. free fall) produces overwhelming noise.
|
| 112 |
+
noise_std: float = 0.02
|
| 113 |
+
|
| 114 |
+
parameters: dict[str, float] = Field(default_factory=dict)
|
| 115 |
+
initial_conditions: dict[str, float] = Field(default_factory=dict)
|
| 116 |
+
|
| 117 |
+
# ------------------------------------------------------------------ API
|
| 118 |
+
|
| 119 |
+
@abstractmethod
|
| 120 |
+
def sample_parameters(self, rng: np.random.Generator) -> dict[str, float]:
|
| 121 |
+
"""Draw a fresh set of physical parameters for one episode."""
|
| 122 |
+
|
| 123 |
+
@abstractmethod
|
| 124 |
+
def sample_initial_conditions(self, rng: np.random.Generator) -> dict[str, float]:
|
| 125 |
+
"""Draw fresh initial conditions for one episode."""
|
| 126 |
+
|
| 127 |
+
@abstractmethod
|
| 128 |
+
def rhs(
|
| 129 |
+
self,
|
| 130 |
+
t: float,
|
| 131 |
+
state: np.ndarray,
|
| 132 |
+
params: dict[str, float],
|
| 133 |
+
) -> np.ndarray:
|
| 134 |
+
"""Time derivatives at time ``t``.
|
| 135 |
+
|
| 136 |
+
``state`` is laid out in the order of :attr:`state_variables`. The
|
| 137 |
+
return value must be a 1-D array of the same length.
|
| 138 |
+
"""
|
| 139 |
+
|
| 140 |
+
@abstractmethod
|
| 141 |
+
def ground_truth_equation(self) -> str:
|
| 142 |
+
"""Canonical SymPy-grammar string of the equation of motion.
|
| 143 |
+
|
| 144 |
+
Used for logging only. Never surfaced to the agent during training
|
| 145 |
+
or inference.
|
| 146 |
+
"""
|
| 147 |
+
|
| 148 |
+
def hint(self, parameters: dict[str, float]) -> str:
|
| 149 |
+
"""Render the natural-language hint for the agent.
|
| 150 |
+
|
| 151 |
+
Default behaviour formats :attr:`hint_template` against
|
| 152 |
+
``parameters``. Subclasses may override for more flexibility.
|
| 153 |
+
"""
|
| 154 |
+
try:
|
| 155 |
+
return self.hint_template.format(**parameters)
|
| 156 |
+
except (KeyError, IndexError):
|
| 157 |
+
return self.hint_template
|
| 158 |
+
|
| 159 |
+
# -------------------------------------------------------------- behaviour
|
| 160 |
+
|
| 161 |
+
def simulate(self, rng: np.random.Generator) -> TrajectoryData:
|
| 162 |
+
"""Generate one noisy trajectory for an episode."""
|
| 163 |
+
params = self.sample_parameters(rng)
|
| 164 |
+
ic = self.sample_initial_conditions(rng)
|
| 165 |
+
|
| 166 |
+
timestamps = np.linspace(0.0, self.duration, self.n_timesteps)
|
| 167 |
+
initial_state = np.array([ic[var] for var in self.state_variables], dtype=float)
|
| 168 |
+
|
| 169 |
+
# scipy.integrate.odeint expects f(state, t, *args); we wrap to put
|
| 170 |
+
# `t` second and pass params via closure.
|
| 171 |
+
def _rhs_wrapper(state: np.ndarray, t: float) -> np.ndarray:
|
| 172 |
+
return self.rhs(t, state, params)
|
| 173 |
+
|
| 174 |
+
clean = odeint(_rhs_wrapper, initial_state, timestamps, full_output=False)
|
| 175 |
+
|
| 176 |
+
states: dict[str, np.ndarray] = {}
|
| 177 |
+
for col, var in enumerate(self.state_variables):
|
| 178 |
+
clean_col = clean[:, col]
|
| 179 |
+
scale = max(float(np.std(clean_col)), 1e-6)
|
| 180 |
+
noise = rng.normal(0.0, self.noise_std * scale, size=clean_col.shape)
|
| 181 |
+
states[var] = clean_col + noise
|
| 182 |
+
|
| 183 |
+
# Cache for downstream access (parameters surfaced to the env).
|
| 184 |
+
self.parameters = params
|
| 185 |
+
self.initial_conditions = ic
|
| 186 |
+
|
| 187 |
+
return TrajectoryData(
|
| 188 |
+
timestamps=timestamps,
|
| 189 |
+
states=states,
|
| 190 |
+
initial_conditions=ic,
|
| 191 |
+
state_variables=self.state_variables,
|
| 192 |
+
)
|
physix-live/physix/systems/registry.py
ADDED
|
@@ -0,0 +1,48 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from __future__ import annotations
|
| 2 |
+
|
| 3 |
+
from typing import Callable
|
| 4 |
+
|
| 5 |
+
from physix.systems.base import PhysicalSystem, SystemTier
|
| 6 |
+
from physix.systems.tier1 import FreeFall, FreeFallWithDrag, SimplePendulum
|
| 7 |
+
from physix.systems.tier2 import DampedPendulum, DampedSpring, SpringMass
|
| 8 |
+
from physix.systems.tier3 import ChargedInBField, ProjectileWithDrag
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
SystemFactory = Callable[[], PhysicalSystem]
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
SYSTEM_REGISTRY: dict[str, SystemFactory] = {
|
| 15 |
+
"free_fall": FreeFall,
|
| 16 |
+
"free_fall_drag": FreeFallWithDrag,
|
| 17 |
+
"simple_pendulum": SimplePendulum,
|
| 18 |
+
"damped_pendulum": DampedPendulum,
|
| 19 |
+
"spring_mass": SpringMass,
|
| 20 |
+
"damped_spring": DampedSpring,
|
| 21 |
+
"projectile_drag": ProjectileWithDrag,
|
| 22 |
+
"charged_b_field": ChargedInBField,
|
| 23 |
+
}
|
| 24 |
+
|
| 25 |
+
SUPPORTED_SYSTEMS: tuple[str, ...] = (
|
| 26 |
+
"free_fall",
|
| 27 |
+
"damped_spring",
|
| 28 |
+
"simple_pendulum",
|
| 29 |
+
)
|
| 30 |
+
|
| 31 |
+
|
| 32 |
+
def get_system(system_id: str) -> PhysicalSystem:
|
| 33 |
+
if system_id not in SYSTEM_REGISTRY:
|
| 34 |
+
valid = ", ".join(sorted(SYSTEM_REGISTRY))
|
| 35 |
+
raise KeyError(f"Unknown system_id={system_id!r}. Valid: {valid}")
|
| 36 |
+
return SYSTEM_REGISTRY[system_id]()
|
| 37 |
+
|
| 38 |
+
|
| 39 |
+
def list_systems() -> list[str]:
|
| 40 |
+
return list(SYSTEM_REGISTRY)
|
| 41 |
+
|
| 42 |
+
|
| 43 |
+
def list_systems_by_tier(tier: SystemTier) -> list[str]:
|
| 44 |
+
return [sid for sid, cls in SYSTEM_REGISTRY.items() if cls().tier == tier]
|
| 45 |
+
|
| 46 |
+
|
| 47 |
+
def list_supported_systems() -> list[str]:
|
| 48 |
+
return [sid for sid in SUPPORTED_SYSTEMS if sid in SYSTEM_REGISTRY]
|
physix-live/physix/systems/tier1.py
ADDED
|
@@ -0,0 +1,143 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Tier-1 physical systems: simple, single-variable, no damping."""
|
| 2 |
+
|
| 3 |
+
from __future__ import annotations
|
| 4 |
+
|
| 5 |
+
import numpy as np
|
| 6 |
+
|
| 7 |
+
from physix.systems.base import PhysicalSystem, SystemTier
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
class FreeFall(PhysicalSystem):
|
| 11 |
+
"""Simple free fall under constant gravity.
|
| 12 |
+
|
| 13 |
+
Equation of motion: ``d2y/dt2 = -g``.
|
| 14 |
+
State variables: ``y`` (vertical position), ``vy`` (vertical velocity).
|
| 15 |
+
"""
|
| 16 |
+
|
| 17 |
+
system_id: str = "free_fall"
|
| 18 |
+
tier: SystemTier = SystemTier.TIER_1
|
| 19 |
+
state_variables: tuple[str, ...] = ("y", "vy")
|
| 20 |
+
duration: float = 3.0 # short enough that the object does not pass y=0
|
| 21 |
+
hint_template: str = (
|
| 22 |
+
"Object dropped near Earth's surface in vacuum. "
|
| 23 |
+
"Mass {mass:.1f} kg, released from rest at altitude {y0:.1f} m."
|
| 24 |
+
)
|
| 25 |
+
|
| 26 |
+
def sample_parameters(self, rng: np.random.Generator) -> dict[str, float]:
|
| 27 |
+
return {
|
| 28 |
+
"g": 9.81,
|
| 29 |
+
"mass": float(rng.uniform(0.5, 5.0)),
|
| 30 |
+
}
|
| 31 |
+
|
| 32 |
+
def sample_initial_conditions(self, rng: np.random.Generator) -> dict[str, float]:
|
| 33 |
+
return {
|
| 34 |
+
"y": float(rng.uniform(20.0, 60.0)),
|
| 35 |
+
"vy": 0.0,
|
| 36 |
+
}
|
| 37 |
+
|
| 38 |
+
def rhs(
|
| 39 |
+
self,
|
| 40 |
+
t: float,
|
| 41 |
+
state: np.ndarray,
|
| 42 |
+
params: dict[str, float],
|
| 43 |
+
) -> np.ndarray:
|
| 44 |
+
_y, vy = state
|
| 45 |
+
return np.array([vy, -params["g"]], dtype=float)
|
| 46 |
+
|
| 47 |
+
def ground_truth_equation(self) -> str:
|
| 48 |
+
return "d2y/dt2 = -g"
|
| 49 |
+
|
| 50 |
+
def hint(self, parameters: dict[str, float]) -> str:
|
| 51 |
+
ic = self.initial_conditions or {"y": 30.0}
|
| 52 |
+
return self.hint_template.format(mass=parameters["mass"], y0=ic["y"])
|
| 53 |
+
|
| 54 |
+
|
| 55 |
+
class FreeFallWithDrag(PhysicalSystem):
|
| 56 |
+
"""Free fall with quadratic air drag — the demo system.
|
| 57 |
+
|
| 58 |
+
Equation of motion: ``d2y/dt2 = -g + k * vy**2`` (drag opposes motion;
|
| 59 |
+
when ``vy < 0`` the ``vy**2`` term provides a positive deceleration).
|
| 60 |
+
"""
|
| 61 |
+
|
| 62 |
+
system_id: str = "free_fall_drag"
|
| 63 |
+
tier: SystemTier = SystemTier.TIER_1
|
| 64 |
+
state_variables: tuple[str, ...] = ("y", "vy")
|
| 65 |
+
duration: float = 6.0 # long enough to clearly see terminal-velocity onset
|
| 66 |
+
hint_template: str = (
|
| 67 |
+
"Object dropped from altitude {y0:.1f} m, mass {mass:.1f} kg, "
|
| 68 |
+
"in air. Air resistance may be non-negligible."
|
| 69 |
+
)
|
| 70 |
+
|
| 71 |
+
def sample_parameters(self, rng: np.random.Generator) -> dict[str, float]:
|
| 72 |
+
return {
|
| 73 |
+
"g": 9.81,
|
| 74 |
+
"mass": float(rng.uniform(1.0, 3.0)),
|
| 75 |
+
# Drag coefficient tuned so terminal velocity is reached within ~5s
|
| 76 |
+
# for the altitudes we sample.
|
| 77 |
+
"k": float(rng.uniform(0.02, 0.10)),
|
| 78 |
+
}
|
| 79 |
+
|
| 80 |
+
def sample_initial_conditions(self, rng: np.random.Generator) -> dict[str, float]:
|
| 81 |
+
return {
|
| 82 |
+
"y": float(rng.uniform(40.0, 80.0)),
|
| 83 |
+
"vy": 0.0,
|
| 84 |
+
}
|
| 85 |
+
|
| 86 |
+
def rhs(
|
| 87 |
+
self,
|
| 88 |
+
t: float,
|
| 89 |
+
state: np.ndarray,
|
| 90 |
+
params: dict[str, float],
|
| 91 |
+
) -> np.ndarray:
|
| 92 |
+
_y, vy = state
|
| 93 |
+
# vy is negative on descent; vy**2 keeps the magnitude correct.
|
| 94 |
+
return np.array([vy, -params["g"] + params["k"] * vy * vy], dtype=float)
|
| 95 |
+
|
| 96 |
+
def ground_truth_equation(self) -> str:
|
| 97 |
+
return "d2y/dt2 = -g + k * vy**2"
|
| 98 |
+
|
| 99 |
+
def hint(self, parameters: dict[str, float]) -> str:
|
| 100 |
+
ic = self.initial_conditions or {"y": 50.0}
|
| 101 |
+
return self.hint_template.format(mass=parameters["mass"], y0=ic["y"])
|
| 102 |
+
|
| 103 |
+
|
| 104 |
+
class SimplePendulum(PhysicalSystem):
|
| 105 |
+
"""Idealised pendulum (small or large angle), no damping.
|
| 106 |
+
|
| 107 |
+
Equation of motion: ``d2theta/dt2 = -(g / L) * sin(theta)``.
|
| 108 |
+
"""
|
| 109 |
+
|
| 110 |
+
system_id: str = "simple_pendulum"
|
| 111 |
+
tier: SystemTier = SystemTier.TIER_1
|
| 112 |
+
state_variables: tuple[str, ...] = ("theta", "dtheta")
|
| 113 |
+
hint_template: str = (
|
| 114 |
+
"Simple pendulum of length {L:.2f} m swinging in vacuum. "
|
| 115 |
+
"No friction, no air resistance."
|
| 116 |
+
)
|
| 117 |
+
|
| 118 |
+
def sample_parameters(self, rng: np.random.Generator) -> dict[str, float]:
|
| 119 |
+
return {
|
| 120 |
+
"g": 9.81,
|
| 121 |
+
"L": float(rng.uniform(0.5, 2.0)),
|
| 122 |
+
}
|
| 123 |
+
|
| 124 |
+
def sample_initial_conditions(self, rng: np.random.Generator) -> dict[str, float]:
|
| 125 |
+
return {
|
| 126 |
+
"theta": float(rng.uniform(0.3, 1.0)), # ~17-57 degrees
|
| 127 |
+
"dtheta": 0.0,
|
| 128 |
+
}
|
| 129 |
+
|
| 130 |
+
def rhs(
|
| 131 |
+
self,
|
| 132 |
+
t: float,
|
| 133 |
+
state: np.ndarray,
|
| 134 |
+
params: dict[str, float],
|
| 135 |
+
) -> np.ndarray:
|
| 136 |
+
theta, dtheta = state
|
| 137 |
+
return np.array(
|
| 138 |
+
[dtheta, -(params["g"] / params["L"]) * np.sin(theta)],
|
| 139 |
+
dtype=float,
|
| 140 |
+
)
|
| 141 |
+
|
| 142 |
+
def ground_truth_equation(self) -> str:
|
| 143 |
+
return "d2theta/dt2 = -(g / L) * sin(theta)"
|
physix-live/physix/systems/tier2.py
ADDED
|
@@ -0,0 +1,128 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Tier-2 physical systems: damped or with a second active force term."""
|
| 2 |
+
|
| 3 |
+
from __future__ import annotations
|
| 4 |
+
|
| 5 |
+
import numpy as np
|
| 6 |
+
|
| 7 |
+
from physix.systems.base import PhysicalSystem, SystemTier
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
class DampedPendulum(PhysicalSystem):
|
| 11 |
+
"""Pendulum with linear angular damping.
|
| 12 |
+
|
| 13 |
+
Equation of motion: ``d2theta/dt2 = -(g/L)*sin(theta) - b*dtheta``.
|
| 14 |
+
"""
|
| 15 |
+
|
| 16 |
+
system_id: str = "damped_pendulum"
|
| 17 |
+
tier: SystemTier = SystemTier.TIER_2
|
| 18 |
+
state_variables: tuple[str, ...] = ("theta", "dtheta")
|
| 19 |
+
hint_template: str = (
|
| 20 |
+
"Pendulum of length {L:.2f} m. Oscillation amplitude visibly decreases "
|
| 21 |
+
"over time, suggesting linear angular damping."
|
| 22 |
+
)
|
| 23 |
+
|
| 24 |
+
def sample_parameters(self, rng: np.random.Generator) -> dict[str, float]:
|
| 25 |
+
return {
|
| 26 |
+
"g": 9.81,
|
| 27 |
+
"L": float(rng.uniform(0.5, 2.0)),
|
| 28 |
+
"b": float(rng.uniform(0.05, 0.30)),
|
| 29 |
+
}
|
| 30 |
+
|
| 31 |
+
def sample_initial_conditions(self, rng: np.random.Generator) -> dict[str, float]:
|
| 32 |
+
return {
|
| 33 |
+
"theta": float(rng.uniform(0.3, 1.0)),
|
| 34 |
+
"dtheta": 0.0,
|
| 35 |
+
}
|
| 36 |
+
|
| 37 |
+
def rhs(
|
| 38 |
+
self,
|
| 39 |
+
t: float,
|
| 40 |
+
state: np.ndarray,
|
| 41 |
+
params: dict[str, float],
|
| 42 |
+
) -> np.ndarray:
|
| 43 |
+
theta, dtheta = state
|
| 44 |
+
d2theta = -(params["g"] / params["L"]) * np.sin(theta) - params["b"] * dtheta
|
| 45 |
+
return np.array([dtheta, d2theta], dtype=float)
|
| 46 |
+
|
| 47 |
+
def ground_truth_equation(self) -> str:
|
| 48 |
+
return "d2theta/dt2 = -(g/L)*sin(theta) - b*dtheta"
|
| 49 |
+
|
| 50 |
+
|
| 51 |
+
class SpringMass(PhysicalSystem):
|
| 52 |
+
"""Undamped harmonic oscillator.
|
| 53 |
+
|
| 54 |
+
Equation of motion: ``d2x/dt2 = -(k/m) * x``.
|
| 55 |
+
"""
|
| 56 |
+
|
| 57 |
+
system_id: str = "spring_mass"
|
| 58 |
+
tier: SystemTier = SystemTier.TIER_2
|
| 59 |
+
state_variables: tuple[str, ...] = ("x", "vx")
|
| 60 |
+
hint_template: str = (
|
| 61 |
+
"Mass {m:.2f} kg attached to a spring of stiffness {k:.2f} N/m, "
|
| 62 |
+
"frictionless surface."
|
| 63 |
+
)
|
| 64 |
+
|
| 65 |
+
def sample_parameters(self, rng: np.random.Generator) -> dict[str, float]:
|
| 66 |
+
return {
|
| 67 |
+
"k": float(rng.uniform(2.0, 20.0)),
|
| 68 |
+
"m": float(rng.uniform(0.5, 2.0)),
|
| 69 |
+
}
|
| 70 |
+
|
| 71 |
+
def sample_initial_conditions(self, rng: np.random.Generator) -> dict[str, float]:
|
| 72 |
+
return {
|
| 73 |
+
"x": float(rng.uniform(0.5, 2.0)),
|
| 74 |
+
"vx": 0.0,
|
| 75 |
+
}
|
| 76 |
+
|
| 77 |
+
def rhs(
|
| 78 |
+
self,
|
| 79 |
+
t: float,
|
| 80 |
+
state: np.ndarray,
|
| 81 |
+
params: dict[str, float],
|
| 82 |
+
) -> np.ndarray:
|
| 83 |
+
x, vx = state
|
| 84 |
+
return np.array([vx, -(params["k"] / params["m"]) * x], dtype=float)
|
| 85 |
+
|
| 86 |
+
def ground_truth_equation(self) -> str:
|
| 87 |
+
return "d2x/dt2 = -(k/m) * x"
|
| 88 |
+
|
| 89 |
+
|
| 90 |
+
class DampedSpring(PhysicalSystem):
|
| 91 |
+
"""Damped harmonic oscillator.
|
| 92 |
+
|
| 93 |
+
Equation of motion: ``d2x/dt2 = -(k/m)*x - (c/m)*vx``.
|
| 94 |
+
"""
|
| 95 |
+
|
| 96 |
+
system_id: str = "damped_spring"
|
| 97 |
+
tier: SystemTier = SystemTier.TIER_2
|
| 98 |
+
state_variables: tuple[str, ...] = ("x", "vx")
|
| 99 |
+
hint_template: str = (
|
| 100 |
+
"Mass {m:.2f} kg on a spring of stiffness {k:.2f} N/m with viscous "
|
| 101 |
+
"damping coefficient {c:.2f}. Oscillation amplitude decays over time."
|
| 102 |
+
)
|
| 103 |
+
|
| 104 |
+
def sample_parameters(self, rng: np.random.Generator) -> dict[str, float]:
|
| 105 |
+
return {
|
| 106 |
+
"k": float(rng.uniform(2.0, 20.0)),
|
| 107 |
+
"m": float(rng.uniform(0.5, 2.0)),
|
| 108 |
+
"c": float(rng.uniform(0.1, 1.0)),
|
| 109 |
+
}
|
| 110 |
+
|
| 111 |
+
def sample_initial_conditions(self, rng: np.random.Generator) -> dict[str, float]:
|
| 112 |
+
return {
|
| 113 |
+
"x": float(rng.uniform(0.5, 2.0)),
|
| 114 |
+
"vx": 0.0,
|
| 115 |
+
}
|
| 116 |
+
|
| 117 |
+
def rhs(
|
| 118 |
+
self,
|
| 119 |
+
t: float,
|
| 120 |
+
state: np.ndarray,
|
| 121 |
+
params: dict[str, float],
|
| 122 |
+
) -> np.ndarray:
|
| 123 |
+
x, vx = state
|
| 124 |
+
d2x = -(params["k"] / params["m"]) * x - (params["c"] / params["m"]) * vx
|
| 125 |
+
return np.array([vx, d2x], dtype=float)
|
| 126 |
+
|
| 127 |
+
def ground_truth_equation(self) -> str:
|
| 128 |
+
return "d2x/dt2 = -(k/m)*x - (c/m)*vx"
|
physix-live/physix/systems/tier3.py
ADDED
|
@@ -0,0 +1,132 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Tier-3 physical systems: held out of training to support a generalisation
|
| 2 |
+
claim ("converges on systems it never trained on")."""
|
| 3 |
+
|
| 4 |
+
from __future__ import annotations
|
| 5 |
+
|
| 6 |
+
import numpy as np
|
| 7 |
+
|
| 8 |
+
from physix.systems.base import PhysicalSystem, SystemTier
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
class ProjectileWithDrag(PhysicalSystem):
|
| 12 |
+
"""2-D projectile with quadratic air drag.
|
| 13 |
+
|
| 14 |
+
Equations of motion::
|
| 15 |
+
|
| 16 |
+
d2x/dt2 = -k * |v| * vx
|
| 17 |
+
d2y/dt2 = -g - k * |v| * vy
|
| 18 |
+
|
| 19 |
+
where ``|v| = sqrt(vx**2 + vy**2)``.
|
| 20 |
+
"""
|
| 21 |
+
|
| 22 |
+
system_id: str = "projectile_drag"
|
| 23 |
+
tier: SystemTier = SystemTier.TIER_3
|
| 24 |
+
state_variables: tuple[str, ...] = ("x", "y", "vx", "vy")
|
| 25 |
+
duration: float = 5.0 # typical flight time for the parameter ranges below
|
| 26 |
+
hint_template: str = (
|
| 27 |
+
"Projectile launched at angle {angle_deg:.0f} degrees with initial "
|
| 28 |
+
"speed {v0:.1f} m/s. Air drag is non-negligible."
|
| 29 |
+
)
|
| 30 |
+
|
| 31 |
+
def sample_parameters(self, rng: np.random.Generator) -> dict[str, float]:
|
| 32 |
+
return {
|
| 33 |
+
"g": 9.81,
|
| 34 |
+
"k": float(rng.uniform(0.005, 0.02)),
|
| 35 |
+
}
|
| 36 |
+
|
| 37 |
+
def sample_initial_conditions(self, rng: np.random.Generator) -> dict[str, float]:
|
| 38 |
+
speed = float(rng.uniform(15.0, 30.0))
|
| 39 |
+
angle = float(rng.uniform(np.deg2rad(30.0), np.deg2rad(70.0)))
|
| 40 |
+
return {
|
| 41 |
+
"x": 0.0,
|
| 42 |
+
"y": 0.0,
|
| 43 |
+
"vx": float(speed * np.cos(angle)),
|
| 44 |
+
"vy": float(speed * np.sin(angle)),
|
| 45 |
+
}
|
| 46 |
+
|
| 47 |
+
def rhs(
|
| 48 |
+
self,
|
| 49 |
+
t: float,
|
| 50 |
+
state: np.ndarray,
|
| 51 |
+
params: dict[str, float],
|
| 52 |
+
) -> np.ndarray:
|
| 53 |
+
_x, _y, vx, vy = state
|
| 54 |
+
speed = float(np.sqrt(vx * vx + vy * vy))
|
| 55 |
+
return np.array(
|
| 56 |
+
[
|
| 57 |
+
vx,
|
| 58 |
+
vy,
|
| 59 |
+
-params["k"] * speed * vx,
|
| 60 |
+
-params["g"] - params["k"] * speed * vy,
|
| 61 |
+
],
|
| 62 |
+
dtype=float,
|
| 63 |
+
)
|
| 64 |
+
|
| 65 |
+
def ground_truth_equation(self) -> str:
|
| 66 |
+
# Two-equation system rendered in a single string so the parser can
|
| 67 |
+
# split on ';' or '\n'. Verifier handles both delimiters.
|
| 68 |
+
return (
|
| 69 |
+
"d2x/dt2 = -k*sqrt(vx**2 + vy**2)*vx; "
|
| 70 |
+
"d2y/dt2 = -g - k*sqrt(vx**2 + vy**2)*vy"
|
| 71 |
+
)
|
| 72 |
+
|
| 73 |
+
def hint(self, parameters: dict[str, float]) -> str:
|
| 74 |
+
ic = self.initial_conditions
|
| 75 |
+
if not ic:
|
| 76 |
+
return self.hint_template
|
| 77 |
+
v0 = float(np.sqrt(ic["vx"] ** 2 + ic["vy"] ** 2))
|
| 78 |
+
angle_deg = float(np.rad2deg(np.arctan2(ic["vy"], ic["vx"])))
|
| 79 |
+
return self.hint_template.format(angle_deg=angle_deg, v0=v0)
|
| 80 |
+
|
| 81 |
+
|
| 82 |
+
class ChargedInBField(PhysicalSystem):
|
| 83 |
+
"""Charged particle in a uniform magnetic field along z (circular motion).
|
| 84 |
+
|
| 85 |
+
Equations of motion (assuming B = B_z * ẑ and v in xy-plane)::
|
| 86 |
+
|
| 87 |
+
d2x/dt2 = (q*B/m) * vy
|
| 88 |
+
d2y/dt2 = -(q*B/m) * vx
|
| 89 |
+
"""
|
| 90 |
+
|
| 91 |
+
system_id: str = "charged_b_field"
|
| 92 |
+
tier: SystemTier = SystemTier.TIER_3
|
| 93 |
+
state_variables: tuple[str, ...] = ("x", "y", "vx", "vy")
|
| 94 |
+
hint_template: str = (
|
| 95 |
+
"Charged particle in a uniform magnetic field. Charge-to-mass ratio "
|
| 96 |
+
"q/m = {qm:.2f}, field strength {B:.2f} T."
|
| 97 |
+
)
|
| 98 |
+
|
| 99 |
+
def sample_parameters(self, rng: np.random.Generator) -> dict[str, float]:
|
| 100 |
+
return {
|
| 101 |
+
"q": float(rng.choice([-1.0, 1.0])),
|
| 102 |
+
"m": float(rng.uniform(0.5, 2.0)),
|
| 103 |
+
"B": float(rng.uniform(0.5, 2.0)),
|
| 104 |
+
}
|
| 105 |
+
|
| 106 |
+
def sample_initial_conditions(self, rng: np.random.Generator) -> dict[str, float]:
|
| 107 |
+
return {
|
| 108 |
+
"x": 0.0,
|
| 109 |
+
"y": 0.0,
|
| 110 |
+
"vx": float(rng.uniform(0.5, 2.0)),
|
| 111 |
+
"vy": float(rng.uniform(-2.0, 2.0)),
|
| 112 |
+
}
|
| 113 |
+
|
| 114 |
+
def rhs(
|
| 115 |
+
self,
|
| 116 |
+
t: float,
|
| 117 |
+
state: np.ndarray,
|
| 118 |
+
params: dict[str, float],
|
| 119 |
+
) -> np.ndarray:
|
| 120 |
+
_x, _y, vx, vy = state
|
| 121 |
+
omega = params["q"] * params["B"] / params["m"]
|
| 122 |
+
return np.array([vx, vy, omega * vy, -omega * vx], dtype=float)
|
| 123 |
+
|
| 124 |
+
def ground_truth_equation(self) -> str:
|
| 125 |
+
return (
|
| 126 |
+
"d2x/dt2 = (q*B/m)*vy; "
|
| 127 |
+
"d2y/dt2 = -(q*B/m)*vx"
|
| 128 |
+
)
|
| 129 |
+
|
| 130 |
+
def hint(self, parameters: dict[str, float]) -> str:
|
| 131 |
+
qm = parameters["q"] / parameters["m"]
|
| 132 |
+
return self.hint_template.format(qm=qm, B=parameters["B"])
|
physix-live/physix/training/__init__.py
ADDED
|
@@ -0,0 +1,18 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Training utilities for PhysiX-Live.
|
| 2 |
+
|
| 3 |
+
The ``loop`` submodule pulls in heavy ML deps (torch, unsloth, trl) and is
|
| 4 |
+
imported lazily on demand. The lighter prompt + scorer surface is exposed
|
| 5 |
+
here so callers without CUDA can still build datasets and score completions.
|
| 6 |
+
"""
|
| 7 |
+
|
| 8 |
+
from physix.training.prompt import (
|
| 9 |
+
build_prompt,
|
| 10 |
+
parse_completion,
|
| 11 |
+
render_observation_for_prompt,
|
| 12 |
+
)
|
| 13 |
+
|
| 14 |
+
__all__ = [
|
| 15 |
+
"build_prompt",
|
| 16 |
+
"parse_completion",
|
| 17 |
+
"render_observation_for_prompt",
|
| 18 |
+
]
|
physix-live/physix/training/__pycache__/__init__.cpython-311.pyc
ADDED
|
Binary file (672 Bytes). View file
|
|
|
physix-live/physix/training/__pycache__/dataset.cpython-311.pyc
ADDED
|
Binary file (7.39 kB). View file
|
|
|
physix-live/physix/training/__pycache__/prompt.cpython-311.pyc
ADDED
|
Binary file (12.4 kB). View file
|
|
|
physix-live/physix/training/__pycache__/scorer.cpython-311.pyc
ADDED
|
Binary file (8.64 kB). View file
|
|
|
physix-live/physix/training/__pycache__/sft.cpython-311.pyc
ADDED
|
Binary file (11.8 kB). View file
|
|
|
physix-live/physix/training/dataset.py
ADDED
|
@@ -0,0 +1,153 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Build the prompt dataset for GRPO training.
|
| 2 |
+
|
| 3 |
+
Responsibility: enumerate the curriculum of physical systems, simulate each
|
| 4 |
+
one a configurable number of times, and emit a :class:`datasets.Dataset`
|
| 5 |
+
whose rows contain everything the training loop needs:
|
| 6 |
+
|
| 7 |
+
- ``prompt``: the chat-format string passed to the model
|
| 8 |
+
- ``system_id``, ``state_variables``, ``parameters``, ``initial_conditions``,
|
| 9 |
+
``timestamps``, ``observed``: the system context the scorer needs
|
| 10 |
+
- ``previous_total``: 0.0 at turn-0 (we train on first-turn prompts; the
|
| 11 |
+
iterative refinement skill emerges from the model's general ability to
|
| 12 |
+
read history at inference time)
|
| 13 |
+
|
| 14 |
+
Multi-turn prompts can be added later by extending this builder; the
|
| 15 |
+
hackathon scope deliberately keeps it to turn-0 prompts.
|
| 16 |
+
"""
|
| 17 |
+
|
| 18 |
+
from __future__ import annotations
|
| 19 |
+
|
| 20 |
+
from collections.abc import Iterable
|
| 21 |
+
|
| 22 |
+
import numpy as np
|
| 23 |
+
from datasets import Dataset
|
| 24 |
+
from pydantic import BaseModel, ConfigDict
|
| 25 |
+
|
| 26 |
+
from physix.models import DEFAULT_MAX_TURNS, PhysiXObservation
|
| 27 |
+
from physix.systems import (
|
| 28 |
+
SYSTEM_REGISTRY,
|
| 29 |
+
SUPPORTED_SYSTEMS,
|
| 30 |
+
SystemTier,
|
| 31 |
+
get_system,
|
| 32 |
+
list_systems_by_tier,
|
| 33 |
+
)
|
| 34 |
+
from physix.systems.base import PhysicalSystem, TrajectoryData
|
| 35 |
+
from physix.training.prompt import build_prompt
|
| 36 |
+
|
| 37 |
+
|
| 38 |
+
class DatasetSpec(BaseModel):
|
| 39 |
+
"""Configuration for :func:`build_training_dataset`."""
|
| 40 |
+
|
| 41 |
+
model_config = ConfigDict(frozen=True)
|
| 42 |
+
|
| 43 |
+
system_ids: tuple[str, ...] = SUPPORTED_SYSTEMS
|
| 44 |
+
instances_per_system: int = 32
|
| 45 |
+
seed: int = 0
|
| 46 |
+
|
| 47 |
+
|
| 48 |
+
class EvalDatasetSpec(BaseModel):
|
| 49 |
+
"""Held-out evaluation set, drawn separately so seeds do not overlap."""
|
| 50 |
+
|
| 51 |
+
model_config = ConfigDict(frozen=True)
|
| 52 |
+
|
| 53 |
+
train_tiers: tuple[SystemTier, ...] = (SystemTier.TIER_1, SystemTier.TIER_2)
|
| 54 |
+
held_out_tiers: tuple[SystemTier, ...] = (SystemTier.TIER_3,)
|
| 55 |
+
instances_per_system: int = 8
|
| 56 |
+
seed: int = 1_000_000 # large to avoid overlap with training seeds
|
| 57 |
+
|
| 58 |
+
|
| 59 |
+
def build_training_dataset(spec: DatasetSpec | None = None) -> Dataset:
|
| 60 |
+
"""Build the GRPO training dataset.
|
| 61 |
+
|
| 62 |
+
Each row contains one (system, instance) prompt at turn 0.
|
| 63 |
+
"""
|
| 64 |
+
spec = spec or DatasetSpec()
|
| 65 |
+
_validate_system_ids(spec.system_ids)
|
| 66 |
+
rng = np.random.default_rng(spec.seed)
|
| 67 |
+
|
| 68 |
+
rows: list[dict[str, object]] = []
|
| 69 |
+
for system_id in spec.system_ids:
|
| 70 |
+
for _ in range(spec.instances_per_system):
|
| 71 |
+
rows.append(_build_row(system_id, rng))
|
| 72 |
+
return Dataset.from_list(rows)
|
| 73 |
+
|
| 74 |
+
|
| 75 |
+
def _validate_system_ids(system_ids: tuple[str, ...]) -> None:
|
| 76 |
+
"""Fail fast if the spec references an unregistered system."""
|
| 77 |
+
if not system_ids:
|
| 78 |
+
raise ValueError(
|
| 79 |
+
"DatasetSpec.system_ids must be non-empty. "
|
| 80 |
+
f"Available: {sorted(SYSTEM_REGISTRY)!r}."
|
| 81 |
+
)
|
| 82 |
+
unknown = [sid for sid in system_ids if sid not in SYSTEM_REGISTRY]
|
| 83 |
+
if unknown:
|
| 84 |
+
raise ValueError(
|
| 85 |
+
f"Unknown system_ids in DatasetSpec: {unknown!r}. "
|
| 86 |
+
f"Registered: {sorted(SYSTEM_REGISTRY)!r}."
|
| 87 |
+
)
|
| 88 |
+
|
| 89 |
+
|
| 90 |
+
def build_eval_dataset(spec: EvalDatasetSpec | None = None) -> Dataset:
|
| 91 |
+
"""Build a held-out evaluation dataset spanning held-out tiers too."""
|
| 92 |
+
spec = spec or EvalDatasetSpec()
|
| 93 |
+
rng = np.random.default_rng(spec.seed)
|
| 94 |
+
|
| 95 |
+
rows: list[dict[str, object]] = []
|
| 96 |
+
for system_id in _list_systems(spec.train_tiers + spec.held_out_tiers):
|
| 97 |
+
for _ in range(spec.instances_per_system):
|
| 98 |
+
row = _build_row(system_id, rng)
|
| 99 |
+
row["is_held_out"] = system_id in _list_systems(spec.held_out_tiers)
|
| 100 |
+
rows.append(row)
|
| 101 |
+
return Dataset.from_list(rows)
|
| 102 |
+
|
| 103 |
+
|
| 104 |
+
def _list_systems(tiers: Iterable[SystemTier]) -> list[str]:
|
| 105 |
+
out: list[str] = []
|
| 106 |
+
for tier in tiers:
|
| 107 |
+
out.extend(list_systems_by_tier(tier))
|
| 108 |
+
return out
|
| 109 |
+
|
| 110 |
+
|
| 111 |
+
def _build_row(system_id: str, rng: np.random.Generator) -> dict[str, object]:
|
| 112 |
+
"""Generate one (prompt + system context) row for a given system."""
|
| 113 |
+
system = get_system(system_id)
|
| 114 |
+
trajectory = system.simulate(rng)
|
| 115 |
+
|
| 116 |
+
obs = _build_observation(system, trajectory)
|
| 117 |
+
prompt = build_prompt(obs)
|
| 118 |
+
|
| 119 |
+
return {
|
| 120 |
+
"prompt": prompt, # chat list of {"role", "content"} dicts
|
| 121 |
+
"system_id": system_id,
|
| 122 |
+
"state_variables": list(system.state_variables),
|
| 123 |
+
"parameters": dict(system.parameters),
|
| 124 |
+
"initial_conditions": dict(system.initial_conditions),
|
| 125 |
+
"timestamps": trajectory.timestamps.tolist(),
|
| 126 |
+
"observed": {var: trajectory.states[var].tolist() for var in system.state_variables},
|
| 127 |
+
"previous_r_match": 0.0,
|
| 128 |
+
}
|
| 129 |
+
|
| 130 |
+
|
| 131 |
+
def _build_observation(
|
| 132 |
+
system: PhysicalSystem,
|
| 133 |
+
trajectory: TrajectoryData,
|
| 134 |
+
) -> PhysiXObservation:
|
| 135 |
+
"""Construct a turn-0 :class:`PhysiXObservation` for a fresh system.
|
| 136 |
+
|
| 137 |
+
We bypass :class:`PhysiXEnvironment` here because its lifecycle (history,
|
| 138 |
+
convergence flag, episode budget) is irrelevant for dataset construction.
|
| 139 |
+
"""
|
| 140 |
+
return PhysiXObservation(
|
| 141 |
+
done=False,
|
| 142 |
+
reward=None,
|
| 143 |
+
trajectory=trajectory.to_observation_samples(),
|
| 144 |
+
state_variables=list(system.state_variables),
|
| 145 |
+
hint=system.hint(system.parameters),
|
| 146 |
+
history=[],
|
| 147 |
+
mismatch_summary="",
|
| 148 |
+
turn=0,
|
| 149 |
+
turn_remaining=DEFAULT_MAX_TURNS,
|
| 150 |
+
system_id=system.system_id,
|
| 151 |
+
stats=trajectory.stats(),
|
| 152 |
+
reward_breakdown={},
|
| 153 |
+
)
|
physix-live/physix/training/loop.py
ADDED
|
@@ -0,0 +1,759 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""GRPO training loop using Unsloth + TRL + W&B.
|
| 2 |
+
|
| 3 |
+
Requires the ``[train]`` optional dependency group. Importing this module on
|
| 4 |
+
a machine without the heavy ML deps installed will fail at module load,
|
| 5 |
+
which is the documented contract — local development tools (env server,
|
| 6 |
+
verifier, demo UI) live in lighter modules and remain usable.
|
| 7 |
+
|
| 8 |
+
Run via::
|
| 9 |
+
|
| 10 |
+
python -m physix.training.loop \
|
| 11 |
+
--model Qwen/Qwen2.5-1.5B-Instruct \
|
| 12 |
+
--output-dir runs/physix-1.5b-rl \
|
| 13 |
+
--num-steps 300
|
| 14 |
+
|
| 15 |
+
Environment variables:
|
| 16 |
+
|
| 17 |
+
- ``WANDB_PROJECT`` (default ``physix-live``)
|
| 18 |
+
- ``HUGGINGFACE_HUB_TOKEN`` if pushing the adapter to the Hub
|
| 19 |
+
"""
|
| 20 |
+
|
| 21 |
+
from __future__ import annotations
|
| 22 |
+
|
| 23 |
+
import argparse
|
| 24 |
+
import logging
|
| 25 |
+
import os
|
| 26 |
+
from pathlib import Path
|
| 27 |
+
from typing import Literal, Optional
|
| 28 |
+
|
| 29 |
+
import torch
|
| 30 |
+
from datasets import Dataset
|
| 31 |
+
from pydantic import BaseModel, ConfigDict
|
| 32 |
+
from transformers import AutoTokenizer, TrainerCallback, TrainerControl, TrainerState
|
| 33 |
+
from transformers import TrainingArguments as HFTrainingArguments
|
| 34 |
+
|
| 35 |
+
from physix.systems import SUPPORTED_SYSTEMS
|
| 36 |
+
from physix.training.dataset import (
|
| 37 |
+
DatasetSpec,
|
| 38 |
+
build_training_dataset,
|
| 39 |
+
)
|
| 40 |
+
from physix.training.reward_fns import make_reward_funcs
|
| 41 |
+
from physix.training.scorer import Scorer
|
| 42 |
+
|
| 43 |
+
# IMPORTANT: Unsloth's GRPO patches must be applied *before* importing
|
| 44 |
+
# ``GRPOTrainer`` so its kernels are swapped in. Without this, the trainer
|
| 45 |
+
# falls back to the stock TRL path and Unsloth's optimisations are bypassed
|
| 46 |
+
# (and on recent versions the import will hard-fail). Keep this block
|
| 47 |
+
# directly above the ``trl`` import — order matters.
|
| 48 |
+
#
|
| 49 |
+
# Version note: this requires ``trl<=0.24.0``. Newer TRL versions ship
|
| 50 |
+
# ``trl.experimental.openenv`` which Unsloth's ``patch_trl_openenv``
|
| 51 |
+
# hook tries to ``inspect.getsource()`` on; that fails with ``OSError:
|
| 52 |
+
# could not get source code`` and crashes ``PatchFastRL``. ``trl==0.24.0``
|
| 53 |
+
# is the pinned upper bound declared in unsloth's pyproject.toml.
|
| 54 |
+
from unsloth import FastLanguageModel, PatchFastRL # noqa: E402
|
| 55 |
+
|
| 56 |
+
PatchFastRL("GRPO", FastLanguageModel)
|
| 57 |
+
|
| 58 |
+
from trl import GRPOConfig, GRPOTrainer # noqa: E402 (must come after PatchFastRL)
|
| 59 |
+
|
| 60 |
+
|
| 61 |
+
_log = logging.getLogger(__name__)
|
| 62 |
+
|
| 63 |
+
|
| 64 |
+
Ablation = Literal["no_progress", "no_simplicity", "no_format"]
|
| 65 |
+
SaveMethod = Literal["lora", "merged_16bit", "merged_4bit"]
|
| 66 |
+
|
| 67 |
+
|
| 68 |
+
class TrainingConfig(BaseModel):
|
| 69 |
+
"""All hyperparameters in one place; the CLI populates this."""
|
| 70 |
+
|
| 71 |
+
model_config = ConfigDict(frozen=True)
|
| 72 |
+
|
| 73 |
+
model_name: str = "Qwen/Qwen2.5-1.5B-Instruct"
|
| 74 |
+
#: Optional path to a LoRA adapter produced by the SFT warm-start step.
|
| 75 |
+
#: When set, the base model is loaded and the adapter weights are applied
|
| 76 |
+
#: before GRPO begins. Without this the cold base model rarely produces
|
| 77 |
+
#: any reward signal in early steps.
|
| 78 |
+
sft_checkpoint: Optional[str] = None
|
| 79 |
+
#: Optional Hub repo id (or local path) of an existing LoRA adapter to
|
| 80 |
+
#: warm-start GRPO from — e.g. a previous GRPO run that was interrupted
|
| 81 |
+
#: and pushed checkpoints to ``hub_checkpoint_repo_id``. When set, the
|
| 82 |
+
#: base ``model_name`` is loaded and this adapter is applied as the
|
| 83 |
+
#: starting trainable LoRA (skipping the fresh ``get_peft_model`` call).
|
| 84 |
+
#: SFT is unnecessary in this case (the adapter is already downstream
|
| 85 |
+
#: of an SFT warm-start), so leave ``sft_checkpoint`` unset when using
|
| 86 |
+
#: this flag.
|
| 87 |
+
lora_adapter_repo: Optional[str] = None
|
| 88 |
+
output_dir: str = "runs/physix-1.5b-rl"
|
| 89 |
+
max_seq_length: int = 2048
|
| 90 |
+
lora_r: int = 16
|
| 91 |
+
lora_alpha: int = 32
|
| 92 |
+
learning_rate: float = 5.0e-6
|
| 93 |
+
temperature: float = 0.9
|
| 94 |
+
max_completion_length: int = 256
|
| 95 |
+
beta: float = 0.04
|
| 96 |
+
num_generations: int = 4
|
| 97 |
+
per_device_train_batch_size: int = 1
|
| 98 |
+
gradient_accumulation_steps: int = 8
|
| 99 |
+
num_steps: int = 300
|
| 100 |
+
seed: int = 0
|
| 101 |
+
instances_per_system: int = 32
|
| 102 |
+
ablation: Optional[Ablation] = None
|
| 103 |
+
wandb_project: str = "physix-live"
|
| 104 |
+
wandb_run_name: Optional[str] = None
|
| 105 |
+
push_to_hub: bool = False
|
| 106 |
+
hub_repo_id: Optional[str] = None
|
| 107 |
+
#: HF repo to push LoRA checkpoints to every save_steps during GRPO.
|
| 108 |
+
#: Separate from hub_repo_id (which receives the final merged model).
|
| 109 |
+
#: Set this to enable mid-run checkpoint persistence and W&B artifact logging.
|
| 110 |
+
hub_checkpoint_repo_id: Optional[str] = None
|
| 111 |
+
#: Path to a Trainer checkpoint dir to resume GRPO from (e.g. from a
|
| 112 |
+
#: previous run killed mid-training). Set automatically by train.sh.
|
| 113 |
+
resume_from_checkpoint: Optional[str] = None
|
| 114 |
+
#: How to persist the final adapter. ``"lora"`` saves only the adapter
|
| 115 |
+
#: weights (small, requires the base model at load time). ``"merged_16bit"``
|
| 116 |
+
#: merges the adapter into the base and saves a deployable bf16/fp16
|
| 117 |
+
#: checkpoint (large, but loadable as a normal HF model — what you want
|
| 118 |
+
#: for Hub pushes and Ollama exports).
|
| 119 |
+
save_method: SaveMethod = "merged_16bit"
|
| 120 |
+
|
| 121 |
+
|
| 122 |
+
def train(config: TrainingConfig) -> None:
|
| 123 |
+
"""Run a full GRPO training loop with the given configuration."""
|
| 124 |
+
_configure_logging()
|
| 125 |
+
|
| 126 |
+
import wandb
|
| 127 |
+
|
| 128 |
+
run_name = config.wandb_run_name or f"physix-grpo-{config.num_steps}steps"
|
| 129 |
+
wandb.init(
|
| 130 |
+
project=config.wandb_project,
|
| 131 |
+
name=run_name,
|
| 132 |
+
config=config.model_dump(),
|
| 133 |
+
tags=["grpo", "physix", config.model_name.split("/")[-1]],
|
| 134 |
+
resume="allow",
|
| 135 |
+
)
|
| 136 |
+
|
| 137 |
+
# Pin a few high-signal pointers into the run summary right away so the
|
| 138 |
+
# W&B "Overview" tab shows them prominently (no scrolling, no hunting).
|
| 139 |
+
if config.hub_checkpoint_repo_id:
|
| 140 |
+
ckpt_url = f"https://huggingface.co/{config.hub_checkpoint_repo_id}"
|
| 141 |
+
wandb.run.summary["checkpoint/repo"] = config.hub_checkpoint_repo_id
|
| 142 |
+
wandb.run.summary["checkpoint/repo_url"] = ckpt_url
|
| 143 |
+
if config.hub_repo_id:
|
| 144 |
+
wandb.run.summary["model/final_repo"] = config.hub_repo_id
|
| 145 |
+
wandb.run.summary["model/final_url"] = (
|
| 146 |
+
f"https://huggingface.co/{config.hub_repo_id}"
|
| 147 |
+
)
|
| 148 |
+
if config.lora_adapter_repo:
|
| 149 |
+
wandb.run.summary["resume/from_adapter"] = config.lora_adapter_repo
|
| 150 |
+
wandb.run.summary["resume/from_url"] = (
|
| 151 |
+
f"https://huggingface.co/{config.lora_adapter_repo}"
|
| 152 |
+
)
|
| 153 |
+
# If a parent W&B run is named (set by the orchestrator script),
|
| 154 |
+
# surface it prominently so the lineage is one click away.
|
| 155 |
+
parent_run = os.environ.get("WANDB_RESUMED_FROM")
|
| 156 |
+
if parent_run:
|
| 157 |
+
wandb.run.summary["resume/parent_wandb_run"] = parent_run
|
| 158 |
+
wandb.run.summary["resume/parent_wandb_url"] = (
|
| 159 |
+
f"https://wandb.ai/{wandb.run.entity}/{wandb.run.project}/runs/{parent_run}"
|
| 160 |
+
)
|
| 161 |
+
print(
|
| 162 |
+
f"\n[wandb] WARM-STARTED run — adapter "
|
| 163 |
+
f"https://huggingface.co/{config.lora_adapter_repo}\n",
|
| 164 |
+
flush=True,
|
| 165 |
+
)
|
| 166 |
+
|
| 167 |
+
_log.info("Loading model %s with Unsloth (4-bit, LoRA-%d)", config.model_name, config.lora_r)
|
| 168 |
+
model, tokenizer = _load_model_and_tokenizer(config)
|
| 169 |
+
train_dataset = _build_and_format_dataset(config, tokenizer)
|
| 170 |
+
|
| 171 |
+
reward_funcs = _select_reward_funcs(config.ablation)
|
| 172 |
+
|
| 173 |
+
grpo_config = _build_grpo_config(config)
|
| 174 |
+
|
| 175 |
+
callbacks = []
|
| 176 |
+
if config.hub_checkpoint_repo_id:
|
| 177 |
+
callbacks.append(_WandbCheckpointCallback(config.hub_checkpoint_repo_id))
|
| 178 |
+
_log.info(
|
| 179 |
+
"Checkpoint hub push enabled → %s (every %d steps)",
|
| 180 |
+
config.hub_checkpoint_repo_id,
|
| 181 |
+
grpo_config.save_steps,
|
| 182 |
+
)
|
| 183 |
+
|
| 184 |
+
trainer = GRPOTrainer(
|
| 185 |
+
model=model,
|
| 186 |
+
processing_class=tokenizer,
|
| 187 |
+
args=grpo_config,
|
| 188 |
+
train_dataset=train_dataset,
|
| 189 |
+
reward_funcs=reward_funcs,
|
| 190 |
+
callbacks=callbacks or None,
|
| 191 |
+
)
|
| 192 |
+
|
| 193 |
+
if config.resume_from_checkpoint:
|
| 194 |
+
_log.info("Resuming from checkpoint: %s", config.resume_from_checkpoint)
|
| 195 |
+
|
| 196 |
+
_log.info("Starting GRPO training for %d steps", config.num_steps)
|
| 197 |
+
trainer.train(resume_from_checkpoint=config.resume_from_checkpoint)
|
| 198 |
+
|
| 199 |
+
_log_reward_summary(trainer)
|
| 200 |
+
|
| 201 |
+
_log.info("Saving adapter (%s) to %s", config.save_method, config.output_dir)
|
| 202 |
+
_save_artifacts(model, tokenizer, config)
|
| 203 |
+
wandb.finish()
|
| 204 |
+
|
| 205 |
+
|
| 206 |
+
def _log_reward_summary(trainer: "GRPOTrainer") -> None:
|
| 207 |
+
"""Emit a final reward-signal summary so log readers don't misinterpret
|
| 208 |
+
GRPO's near-zero ``train/loss`` as a broken run. ``train/loss`` is just
|
| 209 |
+
the KL term; what matters is whether reward components moved.
|
| 210 |
+
|
| 211 |
+
Pulls the last ``log_history`` entry that contains reward keys and prints
|
| 212 |
+
the mean of every ``rewards/*/mean`` it finds, plus an explicit
|
| 213 |
+
interpretation hint. If *no* reward keys are present we hard-fail — that
|
| 214 |
+
means the reward functions never produced a non-NaN value, which is a
|
| 215 |
+
real bug worth surfacing.
|
| 216 |
+
"""
|
| 217 |
+
history = getattr(trainer.state, "log_history", []) or []
|
| 218 |
+
reward_entries = [
|
| 219 |
+
entry for entry in history
|
| 220 |
+
if any(k.startswith("rewards/") or k == "reward" for k in entry)
|
| 221 |
+
]
|
| 222 |
+
if not reward_entries:
|
| 223 |
+
_log.error(
|
| 224 |
+
"No reward metrics logged during training. This usually means "
|
| 225 |
+
"every rollout failed to parse. Check `train/reward` in W&B and "
|
| 226 |
+
"the most recent completion samples."
|
| 227 |
+
)
|
| 228 |
+
raise RuntimeError(
|
| 229 |
+
"GRPO produced no reward metrics — training silently failed."
|
| 230 |
+
)
|
| 231 |
+
|
| 232 |
+
last = reward_entries[-1]
|
| 233 |
+
first = reward_entries[0]
|
| 234 |
+
_log.info("=" * 60)
|
| 235 |
+
_log.info("GRPO reward summary (first → last logged step):")
|
| 236 |
+
for key in sorted(last):
|
| 237 |
+
if key.startswith("rewards/") or key == "reward":
|
| 238 |
+
v0 = first.get(key)
|
| 239 |
+
v1 = last.get(key)
|
| 240 |
+
if isinstance(v0, (int, float)) and isinstance(v1, (int, float)):
|
| 241 |
+
_log.info(" %-40s %.4f → %.4f (Δ=%+.4f)", key, v0, v1, v1 - v0)
|
| 242 |
+
_log.info("-" * 60)
|
| 243 |
+
_log.info("NOTE: train/loss near zero is EXPECTED for GRPO — it is only")
|
| 244 |
+
_log.info("the KL-term contribution (beta=%.3f). The model learns via the",
|
| 245 |
+
trainer.args.beta)
|
| 246 |
+
_log.info("advantage-weighted policy gradient, which doesn't appear in")
|
| 247 |
+
_log.info("the displayed loss scalar. Trust `train/reward` and `rewards/*`.")
|
| 248 |
+
_log.info("=" * 60)
|
| 249 |
+
|
| 250 |
+
|
| 251 |
+
def _load_model_and_tokenizer(
|
| 252 |
+
config: TrainingConfig,
|
| 253 |
+
) -> tuple[FastLanguageModel, AutoTokenizer]:
|
| 254 |
+
"""Load Qwen via Unsloth in 4-bit and attach a LoRA adapter.
|
| 255 |
+
|
| 256 |
+
If ``config.sft_checkpoint`` is set, the SFT adapter weights are merged
|
| 257 |
+
on top of the base model before GRPO starts. This gives GRPO a warm base
|
| 258 |
+
policy that already knows the JSON format and equation grammar, so early
|
| 259 |
+
rollouts produce meaningful reward signal instead of all scoring zero.
|
| 260 |
+
"""
|
| 261 |
+
if config.lora_adapter_repo:
|
| 262 |
+
# Resume path: load the base model and attach the existing LoRA
|
| 263 |
+
# adapter via PEFT. We deliberately do NOT call
|
| 264 |
+
# ``FastLanguageModel.from_pretrained(model_name=adapter_repo)``
|
| 265 |
+
# because the adapter's ``adapter_config.json`` may carry a stale
|
| 266 |
+
# ``base_model_name_or_path`` pointing at a path that only existed
|
| 267 |
+
# inside the previous training container (e.g. ``/tmp/physix-sft/merged``).
|
| 268 |
+
# PEFT's ``load_adapter`` ignores that field — it adapts onto whatever
|
| 269 |
+
# base we hand it.
|
| 270 |
+
_log.info(
|
| 271 |
+
"Resuming from existing LoRA adapter %s on top of %s",
|
| 272 |
+
config.lora_adapter_repo,
|
| 273 |
+
config.model_name,
|
| 274 |
+
)
|
| 275 |
+
model, tokenizer = FastLanguageModel.from_pretrained(
|
| 276 |
+
model_name=config.model_name,
|
| 277 |
+
max_seq_length=config.max_seq_length,
|
| 278 |
+
load_in_4bit=True,
|
| 279 |
+
dtype=None,
|
| 280 |
+
)
|
| 281 |
+
# Wrap the base in a fresh trainable LoRA, then overwrite its
|
| 282 |
+
# weights with the saved adapter. We use the adapter's own r/alpha
|
| 283 |
+
# by relying on PEFT's ``load_adapter`` resolving from the repo's
|
| 284 |
+
# adapter_config.json. The dummy ``get_peft_model`` call is just to
|
| 285 |
+
# turn the model into a ``PeftModel`` instance whose ``load_adapter``
|
| 286 |
+
# method accepts a hub repo id.
|
| 287 |
+
model = FastLanguageModel.get_peft_model(
|
| 288 |
+
model,
|
| 289 |
+
r=config.lora_r,
|
| 290 |
+
lora_alpha=config.lora_alpha,
|
| 291 |
+
target_modules=[
|
| 292 |
+
"q_proj", "k_proj", "v_proj", "o_proj",
|
| 293 |
+
"gate_proj", "up_proj", "down_proj",
|
| 294 |
+
],
|
| 295 |
+
bias="none",
|
| 296 |
+
use_gradient_checkpointing="unsloth",
|
| 297 |
+
random_state=config.seed,
|
| 298 |
+
)
|
| 299 |
+
# Overwrite the freshly-initialised LoRA weights with the saved ones.
|
| 300 |
+
# ``adapter_name='default'`` matches what ``get_peft_model`` creates.
|
| 301 |
+
model.load_adapter(
|
| 302 |
+
config.lora_adapter_repo,
|
| 303 |
+
adapter_name="default",
|
| 304 |
+
is_trainable=True,
|
| 305 |
+
)
|
| 306 |
+
_log.info("Adapter loaded; LoRA is trainable and ready for GRPO.")
|
| 307 |
+
return model, tokenizer
|
| 308 |
+
|
| 309 |
+
if config.sft_checkpoint:
|
| 310 |
+
_log.info(
|
| 311 |
+
"Loading SFT-warmed model from %s (GRPO will refine from here)",
|
| 312 |
+
config.sft_checkpoint,
|
| 313 |
+
)
|
| 314 |
+
model, tokenizer = FastLanguageModel.from_pretrained(
|
| 315 |
+
model_name=config.sft_checkpoint,
|
| 316 |
+
max_seq_length=config.max_seq_length,
|
| 317 |
+
load_in_4bit=True,
|
| 318 |
+
dtype=None,
|
| 319 |
+
)
|
| 320 |
+
else:
|
| 321 |
+
_log.warning(
|
| 322 |
+
"No --sft-checkpoint supplied. Starting GRPO from cold base model. "
|
| 323 |
+
"Early reward signal will be near-zero; consider running sft.py first."
|
| 324 |
+
)
|
| 325 |
+
model, tokenizer = FastLanguageModel.from_pretrained(
|
| 326 |
+
model_name=config.model_name,
|
| 327 |
+
max_seq_length=config.max_seq_length,
|
| 328 |
+
load_in_4bit=True,
|
| 329 |
+
dtype=None,
|
| 330 |
+
)
|
| 331 |
+
model = FastLanguageModel.get_peft_model(
|
| 332 |
+
model,
|
| 333 |
+
r=config.lora_r,
|
| 334 |
+
lora_alpha=config.lora_alpha,
|
| 335 |
+
target_modules=[
|
| 336 |
+
"q_proj",
|
| 337 |
+
"k_proj",
|
| 338 |
+
"v_proj",
|
| 339 |
+
"o_proj",
|
| 340 |
+
"gate_proj",
|
| 341 |
+
"up_proj",
|
| 342 |
+
"down_proj",
|
| 343 |
+
],
|
| 344 |
+
bias="none",
|
| 345 |
+
use_gradient_checkpointing="unsloth",
|
| 346 |
+
random_state=config.seed,
|
| 347 |
+
)
|
| 348 |
+
return model, tokenizer
|
| 349 |
+
|
| 350 |
+
|
| 351 |
+
def _build_and_format_dataset(
|
| 352 |
+
config: TrainingConfig,
|
| 353 |
+
tokenizer: AutoTokenizer,
|
| 354 |
+
) -> Dataset:
|
| 355 |
+
spec = DatasetSpec(
|
| 356 |
+
system_ids=SUPPORTED_SYSTEMS,
|
| 357 |
+
instances_per_system=config.instances_per_system,
|
| 358 |
+
seed=config.seed,
|
| 359 |
+
)
|
| 360 |
+
dataset = build_training_dataset(spec)
|
| 361 |
+
_log.info(
|
| 362 |
+
"Built training dataset: %d rows across %d systems (%s)",
|
| 363 |
+
len(dataset),
|
| 364 |
+
len(SUPPORTED_SYSTEMS),
|
| 365 |
+
", ".join(SUPPORTED_SYSTEMS),
|
| 366 |
+
)
|
| 367 |
+
|
| 368 |
+
def _apply_chat_template(example: dict[str, object]) -> dict[str, object]:
|
| 369 |
+
formatted = tokenizer.apply_chat_template(
|
| 370 |
+
example["prompt"],
|
| 371 |
+
tokenize=False,
|
| 372 |
+
add_generation_prompt=True,
|
| 373 |
+
)
|
| 374 |
+
return {"prompt": formatted}
|
| 375 |
+
|
| 376 |
+
return dataset.map(_apply_chat_template)
|
| 377 |
+
|
| 378 |
+
|
| 379 |
+
def _select_reward_funcs(ablation: Optional[Ablation]) -> list[object]:
|
| 380 |
+
"""Return the GRPO reward function set.
|
| 381 |
+
|
| 382 |
+
Default set (5 functions, summed by GRPOTrainer into the advantage):
|
| 383 |
+
|
| 384 |
+
- ``reward_match`` — raw R² (linear).
|
| 385 |
+
- ``reward_match_dense`` — sqrt(R²); dense low-value gradient.
|
| 386 |
+
- ``reward_correctness`` — binary cliff at R² ≥ 0.70.
|
| 387 |
+
- ``reward_simplicity`` — gated on R² ≥ 0.10 (anti-hack).
|
| 388 |
+
- ``reward_format`` — 1.0 only if parsed AND simulated.
|
| 389 |
+
|
| 390 |
+
Why this composition: empirically (RCA from W&B run 5kuqns9x) the
|
| 391 |
+
previous ``{match, progress, simplicity, format}`` mix had a
|
| 392 |
+
progress-equals-match duplicate (single-turn ``previous_r_match=0``)
|
| 393 |
+
AND let the model farm format+simplicity by emitting trivial
|
| 394 |
+
parseable equations. The new set both removes the duplicate and
|
| 395 |
+
triple-weights correctness via three different correctness-shaped
|
| 396 |
+
signals (match, match_dense, correctness_bonus) so that physical
|
| 397 |
+
accuracy dominates the GRPO advantage.
|
| 398 |
+
|
| 399 |
+
Ablations strip one signal at a time (used by the experiment matrix,
|
| 400 |
+
not by the main runs).
|
| 401 |
+
"""
|
| 402 |
+
scorer = Scorer()
|
| 403 |
+
funcs = make_reward_funcs(scorer)
|
| 404 |
+
full = [
|
| 405 |
+
funcs["match"],
|
| 406 |
+
funcs["match_dense"],
|
| 407 |
+
funcs["correctness"],
|
| 408 |
+
funcs["simplicity"],
|
| 409 |
+
funcs["format"],
|
| 410 |
+
]
|
| 411 |
+
if ablation is None:
|
| 412 |
+
return full
|
| 413 |
+
if ablation == "no_simplicity":
|
| 414 |
+
return [funcs["match"], funcs["match_dense"], funcs["correctness"], funcs["format"]]
|
| 415 |
+
if ablation == "no_format":
|
| 416 |
+
return [funcs["match"], funcs["match_dense"], funcs["correctness"], funcs["simplicity"]]
|
| 417 |
+
if ablation == "no_progress":
|
| 418 |
+
# Backward-compat alias: ``progress`` no longer exists, the new
|
| 419 |
+
# reward set already excludes it. Treat ``no_progress`` as the
|
| 420 |
+
# full default set so old job configs still work without surprise.
|
| 421 |
+
return full
|
| 422 |
+
raise ValueError(
|
| 423 |
+
f"Unknown ablation {ablation!r}. Choose from "
|
| 424 |
+
"no_progress | no_simplicity | no_format | None."
|
| 425 |
+
)
|
| 426 |
+
|
| 427 |
+
|
| 428 |
+
class _WandbCheckpointCallback(TrainerCallback):
|
| 429 |
+
"""Make checkpoints first-class in W&B.
|
| 430 |
+
|
| 431 |
+
After every Trainer save, this callback:
|
| 432 |
+
|
| 433 |
+
1. Resolves the latest commit hash on the Hub repo (best-effort — the
|
| 434 |
+
trainer's own ``PushToHubCallback`` runs ``git push`` asynchronously
|
| 435 |
+
so we may briefly see an older commit; that is fine, it self-corrects
|
| 436 |
+
on the next save).
|
| 437 |
+
2. Updates the W&B run summary with persistent, prominent keys
|
| 438 |
+
(visible in the "Overview" tab of the run):
|
| 439 |
+
- ``checkpoint/last_step``
|
| 440 |
+
- ``checkpoint/last_commit``
|
| 441 |
+
- ``checkpoint/repo_url``
|
| 442 |
+
- ``checkpoint/last_url``
|
| 443 |
+
3. Logs a step-indexed scalar ``checkpoint/step`` so a chart appears
|
| 444 |
+
on the W&B run page (one tick per save).
|
| 445 |
+
4. Maintains a running ``checkpoint_history`` ``wandb.Table`` so every
|
| 446 |
+
saved checkpoint is browsable as a sortable table directly on the
|
| 447 |
+
run page (Tables tab).
|
| 448 |
+
5. Prints a banner to stdout (visible in ``hf jobs logs``) with the
|
| 449 |
+
direct URL — so the checkpoint is also impossible to miss in the
|
| 450 |
+
job logs.
|
| 451 |
+
|
| 452 |
+
No model bytes are uploaded to W&B; the actual weights live on the HF
|
| 453 |
+
Hub checkpoint repo. We never crash training if any of this fails.
|
| 454 |
+
"""
|
| 455 |
+
|
| 456 |
+
def __init__(self, hub_checkpoint_repo_id: str) -> None:
|
| 457 |
+
self._repo = hub_checkpoint_repo_id
|
| 458 |
+
self._repo_url = f"https://huggingface.co/{hub_checkpoint_repo_id}"
|
| 459 |
+
self._table = None # lazy: wandb may not be initialised at __init__
|
| 460 |
+
|
| 461 |
+
def on_train_begin(
|
| 462 |
+
self,
|
| 463 |
+
args: HFTrainingArguments,
|
| 464 |
+
state: TrainerState,
|
| 465 |
+
control: TrainerControl,
|
| 466 |
+
**kwargs,
|
| 467 |
+
) -> None:
|
| 468 |
+
# Pin the repo URL into the run config + summary at the very start
|
| 469 |
+
# so the link is visible on the W&B "Overview" panel from step 0.
|
| 470 |
+
try:
|
| 471 |
+
import wandb
|
| 472 |
+
|
| 473 |
+
if wandb.run is None:
|
| 474 |
+
return
|
| 475 |
+
wandb.run.summary["checkpoint/repo_url"] = self._repo_url
|
| 476 |
+
wandb.run.summary["checkpoint/repo"] = self._repo
|
| 477 |
+
wandb.config.update(
|
| 478 |
+
{"checkpoint_repo_url": self._repo_url, "checkpoint_repo": self._repo},
|
| 479 |
+
allow_val_change=True,
|
| 480 |
+
)
|
| 481 |
+
print(
|
| 482 |
+
f"\n[wandb] Checkpoint repo pinned in run summary: {self._repo_url}\n",
|
| 483 |
+
flush=True,
|
| 484 |
+
)
|
| 485 |
+
except Exception as exc: # noqa: BLE001
|
| 486 |
+
_log.warning("Could not pin checkpoint repo to W&B summary: %s", exc)
|
| 487 |
+
|
| 488 |
+
def on_save(
|
| 489 |
+
self,
|
| 490 |
+
args: HFTrainingArguments,
|
| 491 |
+
state: TrainerState,
|
| 492 |
+
control: TrainerControl,
|
| 493 |
+
**kwargs,
|
| 494 |
+
) -> None:
|
| 495 |
+
try:
|
| 496 |
+
import wandb
|
| 497 |
+
|
| 498 |
+
if wandb.run is None:
|
| 499 |
+
return
|
| 500 |
+
step = state.global_step
|
| 501 |
+
commit_sha = self._latest_commit_sha()
|
| 502 |
+
short = (commit_sha or "pending")[:8]
|
| 503 |
+
tree_url = (
|
| 504 |
+
f"{self._repo_url}/tree/{commit_sha}"
|
| 505 |
+
if commit_sha
|
| 506 |
+
else f"{self._repo_url}/tree/main"
|
| 507 |
+
)
|
| 508 |
+
|
| 509 |
+
# 1. Persistent summary keys (top-of-run, always visible).
|
| 510 |
+
wandb.run.summary["checkpoint/last_step"] = step
|
| 511 |
+
wandb.run.summary["checkpoint/last_commit"] = commit_sha or "pending"
|
| 512 |
+
wandb.run.summary["checkpoint/last_url"] = tree_url
|
| 513 |
+
|
| 514 |
+
# 2. Step-indexed scalar so a small chart appears on the run page.
|
| 515 |
+
wandb.log({"checkpoint/step": step}, step=step)
|
| 516 |
+
|
| 517 |
+
# 3. Running history table.
|
| 518 |
+
if self._table is None:
|
| 519 |
+
self._table = wandb.Table(
|
| 520 |
+
columns=["step", "commit", "url", "repo"]
|
| 521 |
+
)
|
| 522 |
+
self._table.add_data(step, commit_sha or "pending", tree_url, self._repo)
|
| 523 |
+
# Re-log the entire table each time so the latest version shows.
|
| 524 |
+
wandb.log({"checkpoint_history": self._table}, step=step)
|
| 525 |
+
|
| 526 |
+
# 4. Stdout banner — also visible in `hf jobs logs`.
|
| 527 |
+
print(
|
| 528 |
+
"\n"
|
| 529 |
+
"================ CHECKPOINT SAVED ================\n"
|
| 530 |
+
f" step : {step}\n"
|
| 531 |
+
f" commit: {short}\n"
|
| 532 |
+
f" url : {tree_url}\n"
|
| 533 |
+
f" repo : {self._repo_url}\n"
|
| 534 |
+
"==================================================\n",
|
| 535 |
+
flush=True,
|
| 536 |
+
)
|
| 537 |
+
_log.info(
|
| 538 |
+
"W&B checkpoint metadata logged: step=%d commit=%s",
|
| 539 |
+
step,
|
| 540 |
+
short,
|
| 541 |
+
)
|
| 542 |
+
except Exception as exc: # noqa: BLE001
|
| 543 |
+
_log.warning(
|
| 544 |
+
"W&B checkpoint callback skipped at step %d: %s. "
|
| 545 |
+
"Training continues; the actual checkpoint is still pushed "
|
| 546 |
+
"to the HF Hub by the trainer's PushToHubCallback.",
|
| 547 |
+
state.global_step,
|
| 548 |
+
exc,
|
| 549 |
+
)
|
| 550 |
+
|
| 551 |
+
def _latest_commit_sha(self) -> Optional[str]:
|
| 552 |
+
"""Best-effort fetch of the most recent commit on the checkpoint repo.
|
| 553 |
+
|
| 554 |
+
Uses ``HfApi.list_repo_commits`` if available; returns ``None`` on
|
| 555 |
+
any failure. The async ``git push`` may not be done at the instant
|
| 556 |
+
``on_save`` fires, so we may see the *previous* checkpoint's commit;
|
| 557 |
+
that's acceptable — it self-corrects on the next save.
|
| 558 |
+
"""
|
| 559 |
+
try:
|
| 560 |
+
from huggingface_hub import HfApi
|
| 561 |
+
|
| 562 |
+
api = HfApi(token=os.environ.get("HUGGINGFACE_HUB_TOKEN"))
|
| 563 |
+
commits = api.list_repo_commits(repo_id=self._repo, repo_type="model")
|
| 564 |
+
if commits:
|
| 565 |
+
return commits[0].commit_id
|
| 566 |
+
except Exception as exc: # noqa: BLE001
|
| 567 |
+
_log.debug("Could not fetch latest commit sha: %s", exc)
|
| 568 |
+
return None
|
| 569 |
+
|
| 570 |
+
|
| 571 |
+
def _build_grpo_config(config: TrainingConfig) -> GRPOConfig:
|
| 572 |
+
# NOTE on "train/loss → 0" — this is expected GRPO behaviour, not a bug.
|
| 573 |
+
# The scalar TRL logs as `train/loss` is *only* the KL-divergence term
|
| 574 |
+
# weighted by beta; the advantage-weighted policy-gradient term that
|
| 575 |
+
# actually drives learning contributes gradients but is not in the
|
| 576 |
+
# displayed loss. At step 0, policy == reference → KL = 0 → loss = 0.
|
| 577 |
+
# As the policy drifts, loss rises slightly (with beta=0.04 typically
|
| 578 |
+
# to ~0.001–0.05). The signal you care about is `train/rewards/*` and
|
| 579 |
+
# `train/reward`, not `train/loss`. See:
|
| 580 |
+
# https://github.com/huggingface/trl/issues/2703
|
| 581 |
+
# https://github.com/huggingface/open-r1/issues/239
|
| 582 |
+
effective_batch = (
|
| 583 |
+
config.per_device_train_batch_size * config.gradient_accumulation_steps
|
| 584 |
+
)
|
| 585 |
+
if effective_batch % config.num_generations != 0:
|
| 586 |
+
raise ValueError(
|
| 587 |
+
f"effective_batch_size ({effective_batch}) must be divisible by "
|
| 588 |
+
f"num_generations ({config.num_generations}). Adjust "
|
| 589 |
+
"per_device_train_batch_size, gradient_accumulation_steps, or "
|
| 590 |
+
"num_generations."
|
| 591 |
+
)
|
| 592 |
+
hub_kwargs: dict = {}
|
| 593 |
+
if config.hub_checkpoint_repo_id:
|
| 594 |
+
hub_kwargs = dict(
|
| 595 |
+
push_to_hub=True,
|
| 596 |
+
hub_model_id=config.hub_checkpoint_repo_id,
|
| 597 |
+
hub_strategy="checkpoint",
|
| 598 |
+
hub_token=os.environ.get("HUGGINGFACE_HUB_TOKEN") or os.environ.get("HF_TOKEN"),
|
| 599 |
+
)
|
| 600 |
+
|
| 601 |
+
return GRPOConfig(
|
| 602 |
+
output_dir=config.output_dir,
|
| 603 |
+
learning_rate=config.learning_rate,
|
| 604 |
+
per_device_train_batch_size=config.per_device_train_batch_size,
|
| 605 |
+
gradient_accumulation_steps=config.gradient_accumulation_steps,
|
| 606 |
+
num_train_epochs=1,
|
| 607 |
+
max_steps=config.num_steps,
|
| 608 |
+
num_generations=config.num_generations,
|
| 609 |
+
max_completion_length=config.max_completion_length,
|
| 610 |
+
max_prompt_length=config.max_seq_length - config.max_completion_length,
|
| 611 |
+
temperature=config.temperature,
|
| 612 |
+
beta=config.beta,
|
| 613 |
+
logging_steps=1,
|
| 614 |
+
save_strategy="steps",
|
| 615 |
+
save_steps=max(50, config.num_steps // 6),
|
| 616 |
+
report_to=["wandb"],
|
| 617 |
+
run_name=config.wandb_run_name,
|
| 618 |
+
seed=config.seed,
|
| 619 |
+
bf16=torch.cuda.is_bf16_supported() if torch.cuda.is_available() else False,
|
| 620 |
+
fp16=not torch.cuda.is_bf16_supported() if torch.cuda.is_available() else False,
|
| 621 |
+
**hub_kwargs,
|
| 622 |
+
)
|
| 623 |
+
|
| 624 |
+
|
| 625 |
+
def _save_artifacts(
|
| 626 |
+
model: FastLanguageModel,
|
| 627 |
+
tokenizer: AutoTokenizer,
|
| 628 |
+
config: TrainingConfig,
|
| 629 |
+
) -> None:
|
| 630 |
+
"""Persist the trained adapter via Unsloth's save path.
|
| 631 |
+
|
| 632 |
+
``save_pretrained_merged`` dispatches on ``save_method``:
|
| 633 |
+
|
| 634 |
+
- ``"lora"``: writes only the adapter weights (small; requires the base
|
| 635 |
+
model at load time).
|
| 636 |
+
- ``"merged_16bit"``: merges LoRA into base and writes a standard HF
|
| 637 |
+
checkpoint in bf16/fp16 (large; loadable without Unsloth, exportable to
|
| 638 |
+
GGUF for Ollama).
|
| 639 |
+
- ``"merged_4bit"``: same merge but quantised back to 4-bit.
|
| 640 |
+
|
| 641 |
+
Hub pushes use the same ``save_method`` so the on-disk artifact and the
|
| 642 |
+
Hub artifact are byte-identical.
|
| 643 |
+
"""
|
| 644 |
+
out_path = Path(config.output_dir)
|
| 645 |
+
out_path.mkdir(parents=True, exist_ok=True)
|
| 646 |
+
|
| 647 |
+
save_dir = out_path / config.save_method
|
| 648 |
+
model.save_pretrained_merged(
|
| 649 |
+
save_directory=str(save_dir),
|
| 650 |
+
tokenizer=tokenizer,
|
| 651 |
+
save_method=config.save_method,
|
| 652 |
+
)
|
| 653 |
+
|
| 654 |
+
if config.push_to_hub and config.hub_repo_id:
|
| 655 |
+
_log.info("Pushing %s artifact to Hugging Face Hub: %s", config.save_method, config.hub_repo_id)
|
| 656 |
+
model.push_to_hub_merged(
|
| 657 |
+
config.hub_repo_id,
|
| 658 |
+
tokenizer,
|
| 659 |
+
save_method=config.save_method,
|
| 660 |
+
token=os.environ.get("HUGGINGFACE_HUB_TOKEN"),
|
| 661 |
+
)
|
| 662 |
+
|
| 663 |
+
|
| 664 |
+
def _configure_logging() -> None:
|
| 665 |
+
logging.basicConfig(
|
| 666 |
+
level=os.environ.get("PHYSIX_LOG_LEVEL", "INFO"),
|
| 667 |
+
format="[%(asctime)s] %(levelname)s %(name)s | %(message)s",
|
| 668 |
+
)
|
| 669 |
+
|
| 670 |
+
|
| 671 |
+
def _parse_args() -> TrainingConfig:
|
| 672 |
+
parser = argparse.ArgumentParser(description="Train PhysiX-Live with GRPO.")
|
| 673 |
+
parser.add_argument("--model", default="Qwen/Qwen2.5-1.5B-Instruct")
|
| 674 |
+
parser.add_argument("--output-dir", default="runs/physix-1.5b-rl")
|
| 675 |
+
parser.add_argument("--num-steps", type=int, default=300)
|
| 676 |
+
parser.add_argument("--learning-rate", type=float, default=5.0e-6)
|
| 677 |
+
parser.add_argument("--num-generations", type=int, default=4)
|
| 678 |
+
parser.add_argument("--max-completion-length", type=int, default=256,
|
| 679 |
+
help="Max tokens per rollout completion. Shorter = faster generation.")
|
| 680 |
+
parser.add_argument("--lora-r", type=int, default=16)
|
| 681 |
+
parser.add_argument("--instances-per-system", type=int, default=32)
|
| 682 |
+
parser.add_argument(
|
| 683 |
+
"--ablation",
|
| 684 |
+
choices=("no_progress", "no_simplicity", "no_format"),
|
| 685 |
+
default=None,
|
| 686 |
+
)
|
| 687 |
+
parser.add_argument(
|
| 688 |
+
"--save-method",
|
| 689 |
+
choices=("lora", "merged_16bit", "merged_4bit"),
|
| 690 |
+
default="merged_16bit",
|
| 691 |
+
help="How to persist the final adapter (merged_16bit is deployable).",
|
| 692 |
+
)
|
| 693 |
+
parser.add_argument("--sft-checkpoint", default=None,
|
| 694 |
+
help="Path to a merged SFT model from sft.py to warm-start from.")
|
| 695 |
+
parser.add_argument(
|
| 696 |
+
"--lora-adapter-repo",
|
| 697 |
+
default=None,
|
| 698 |
+
help=(
|
| 699 |
+
"Hub repo id (or local path) of an existing LoRA adapter to warm-start "
|
| 700 |
+
"GRPO from — e.g. a previous run's checkpoint at "
|
| 701 |
+
"user/physix-1.5b-rl-ckpt. Mutually exclusive with --sft-checkpoint."
|
| 702 |
+
),
|
| 703 |
+
)
|
| 704 |
+
parser.add_argument("--wandb-project", default="physix-live")
|
| 705 |
+
parser.add_argument("--wandb-run-name", default=None)
|
| 706 |
+
parser.add_argument("--push-to-hub", action="store_true")
|
| 707 |
+
parser.add_argument("--hub-repo-id", default=None)
|
| 708 |
+
parser.add_argument(
|
| 709 |
+
"--hub-checkpoint-repo-id",
|
| 710 |
+
default=None,
|
| 711 |
+
help="HF repo to push LoRA checkpoints to every save_steps (e.g. user/physix-ckpt).",
|
| 712 |
+
)
|
| 713 |
+
parser.add_argument(
|
| 714 |
+
"--resume-from-checkpoint",
|
| 715 |
+
default=None,
|
| 716 |
+
help="Path to a Trainer checkpoint directory to resume GRPO from.",
|
| 717 |
+
)
|
| 718 |
+
parser.add_argument("--seed", type=int, default=0)
|
| 719 |
+
|
| 720 |
+
args = parser.parse_args()
|
| 721 |
+
|
| 722 |
+
if args.sft_checkpoint and args.lora_adapter_repo:
|
| 723 |
+
parser.error(
|
| 724 |
+
"--sft-checkpoint and --lora-adapter-repo are mutually exclusive. "
|
| 725 |
+
"Use --lora-adapter-repo to resume from a prior GRPO run, or "
|
| 726 |
+
"--sft-checkpoint for a fresh GRPO from a merged SFT model."
|
| 727 |
+
)
|
| 728 |
+
|
| 729 |
+
return TrainingConfig(
|
| 730 |
+
model_name=args.model,
|
| 731 |
+
sft_checkpoint=args.sft_checkpoint,
|
| 732 |
+
lora_adapter_repo=args.lora_adapter_repo,
|
| 733 |
+
output_dir=args.output_dir,
|
| 734 |
+
num_steps=args.num_steps,
|
| 735 |
+
learning_rate=args.learning_rate,
|
| 736 |
+
num_generations=args.num_generations,
|
| 737 |
+
max_completion_length=args.max_completion_length,
|
| 738 |
+
lora_r=args.lora_r,
|
| 739 |
+
instances_per_system=args.instances_per_system,
|
| 740 |
+
ablation=args.ablation,
|
| 741 |
+
save_method=args.save_method,
|
| 742 |
+
wandb_project=args.wandb_project,
|
| 743 |
+
wandb_run_name=args.wandb_run_name,
|
| 744 |
+
push_to_hub=args.push_to_hub,
|
| 745 |
+
hub_repo_id=args.hub_repo_id,
|
| 746 |
+
hub_checkpoint_repo_id=args.hub_checkpoint_repo_id,
|
| 747 |
+
resume_from_checkpoint=args.resume_from_checkpoint,
|
| 748 |
+
seed=args.seed,
|
| 749 |
+
)
|
| 750 |
+
|
| 751 |
+
|
| 752 |
+
def main() -> None:
|
| 753 |
+
config = _parse_args()
|
| 754 |
+
os.environ.setdefault("WANDB_PROJECT", config.wandb_project)
|
| 755 |
+
train(config)
|
| 756 |
+
|
| 757 |
+
|
| 758 |
+
if __name__ == "__main__":
|
| 759 |
+
main()
|
physix-live/physix/training/prompt.py
ADDED
|
@@ -0,0 +1,369 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Prompt rendering and completion parsing for PhysiX-Live.
|
| 2 |
+
|
| 3 |
+
Responsibility:
|
| 4 |
+
|
| 5 |
+
- :func:`render_observation_for_prompt`: serialise a :class:`PhysiXObservation`
|
| 6 |
+
into a compact, token-efficient string the agent can read.
|
| 7 |
+
- :func:`build_prompt`: combine the system message, grammar hint, and the
|
| 8 |
+
current observation into a single chat-formatted prompt.
|
| 9 |
+
- :func:`parse_completion`: parse a raw model completion (which may contain a
|
| 10 |
+
JSON object inside arbitrary text) into a :class:`PhysiXAction`.
|
| 11 |
+
|
| 12 |
+
This module imports nothing from :mod:`torch`, :mod:`unsloth`, or :mod:`trl`
|
| 13 |
+
so it can be tested on any machine.
|
| 14 |
+
"""
|
| 15 |
+
|
| 16 |
+
from __future__ import annotations
|
| 17 |
+
|
| 18 |
+
import json
|
| 19 |
+
import re
|
| 20 |
+
from typing import Any
|
| 21 |
+
|
| 22 |
+
from physix.models import (
|
| 23 |
+
DEFAULT_MAX_TURNS,
|
| 24 |
+
PhysiXAction,
|
| 25 |
+
PhysiXObservation,
|
| 26 |
+
)
|
| 27 |
+
from physix.verifier.parser import GRAMMAR_HINT
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
SYSTEM_MESSAGE: str = (
|
| 31 |
+
"You are an expert physicist. Your task is to discover the equation of "
|
| 32 |
+
"motion that produced an observed trajectory. Each turn you propose a "
|
| 33 |
+
"candidate equation; the environment simulates it and tells you how well "
|
| 34 |
+
"the prediction matches observation. Refine your guess across turns based "
|
| 35 |
+
"on the residual feedback. Keep equations as simple as possible.\n\n"
|
| 36 |
+
+ GRAMMAR_HINT
|
| 37 |
+
+ "\n\n"
|
| 38 |
+
"Output a single JSON object with exactly these keys: "
|
| 39 |
+
'"equation" (string, required), "params" (object of name->number, '
|
| 40 |
+
'optional), "rationale" (short string, optional). Do not rename the '
|
| 41 |
+
'keys: always emit "equation", never "eqn"/"ode"/"formula"/"expr". '
|
| 42 |
+
'Example: {"equation": "d2y/dt2 = -9.81", "params": {}, '
|
| 43 |
+
'"rationale": "free fall"}'
|
| 44 |
+
)
|
| 45 |
+
|
| 46 |
+
|
| 47 |
+
# Maximum number of trajectory samples shipped to the agent. We downsample
|
| 48 |
+
# from 100 to 12 to keep prompt size bounded; statistics carry the rest.
|
| 49 |
+
_TRAJECTORY_DOWNSAMPLE_COUNT: int = 12
|
| 50 |
+
|
| 51 |
+
# Maximum number of prior history entries surfaced. With 8 turns max budget,
|
| 52 |
+
# 7 prior turns is the upper bound; we cap at 5 to stay token-efficient.
|
| 53 |
+
_HISTORY_CAP: int = 5
|
| 54 |
+
|
| 55 |
+
|
| 56 |
+
def render_observation_for_prompt(obs: PhysiXObservation) -> str:
|
| 57 |
+
"""Render an observation as a compact string the agent can read.
|
| 58 |
+
|
| 59 |
+
Format::
|
| 60 |
+
|
| 61 |
+
SYSTEM_ID: free_fall_drag
|
| 62 |
+
STATE_VARIABLES: y, vy
|
| 63 |
+
HINT: <one-sentence physical context>
|
| 64 |
+
STATS: y_min=-2.13 y_max=78.93 ... duration=6.00
|
| 65 |
+
|
| 66 |
+
TRAJECTORY (12 samples downsampled from 100):
|
| 67 |
+
t=0.000 y=78.93 vy=0.00
|
| 68 |
+
t=0.500 y=77.71 vy=-4.84
|
| 69 |
+
...
|
| 70 |
+
|
| 71 |
+
HISTORY (turns so far):
|
| 72 |
+
turn=1 reward=0.42 [match=0.42 progress=0.00 simplicity=0.95 format=1.00] equation=`d2y/dt2 = -9.81`
|
| 73 |
+
mismatch: predicted y diverges past t=2.0s ...
|
| 74 |
+
turn=2 ...
|
| 75 |
+
|
| 76 |
+
TURN: 3 / 8 (5 turns remaining)
|
| 77 |
+
|
| 78 |
+
HISTORY uses the literal string ``equation=`` (not a shorthand like
|
| 79 |
+
``eqn=``). Mid-strength chat models will mimic the field name they
|
| 80 |
+
see in HISTORY when emitting the next turn's JSON, so the in-prompt
|
| 81 |
+
name *must* match the JSON key the parser reads. Drift here silently
|
| 82 |
+
produces ``{"eqn": ...}`` outputs that the parser ignores, scoring
|
| 83 |
+
every post-first turn ``r_format=0`` even when the equation is
|
| 84 |
+
perfect.
|
| 85 |
+
"""
|
| 86 |
+
sections = [
|
| 87 |
+
_render_metadata_block(obs),
|
| 88 |
+
_render_trajectory_block(obs),
|
| 89 |
+
]
|
| 90 |
+
if obs.history:
|
| 91 |
+
sections.append(_render_history_block(obs))
|
| 92 |
+
sections.append(_render_turn_footer(obs))
|
| 93 |
+
return "\n\n".join(sections)
|
| 94 |
+
|
| 95 |
+
|
| 96 |
+
def build_prompt(obs: PhysiXObservation) -> list[dict[str, str]]:
|
| 97 |
+
"""Build a chat-format prompt list (system + user) for the model.
|
| 98 |
+
|
| 99 |
+
The return value is the standard ``[{"role": "system", "content": ...},
|
| 100 |
+
{"role": "user", "content": ...}]`` shape expected by Hugging Face
|
| 101 |
+
chat-template tokenisers.
|
| 102 |
+
"""
|
| 103 |
+
return [
|
| 104 |
+
{"role": "system", "content": SYSTEM_MESSAGE},
|
| 105 |
+
{"role": "user", "content": render_observation_for_prompt(obs)},
|
| 106 |
+
]
|
| 107 |
+
|
| 108 |
+
|
| 109 |
+
#: Field names we accept for the equation payload, in priority order. The
|
| 110 |
+
#: canonical key is ``equation`` and the system prompt asks for it
|
| 111 |
+
#: explicitly, but mid-strength chat models routinely substitute one of
|
| 112 |
+
#: these synonyms — especially after the first turn, where the model has
|
| 113 |
+
#: latched onto a different naming convention from its own pretraining
|
| 114 |
+
#: corpus. Treating these as missing produced silent ``r_format=0`` runs
|
| 115 |
+
#: even when the underlying equation was perfect; matching them
|
| 116 |
+
#: explicitly closes that hole without weakening the verifier (the
|
| 117 |
+
#: equation grammar itself remains strict).
|
| 118 |
+
_EQUATION_KEYS: tuple[str, ...] = (
|
| 119 |
+
"equation",
|
| 120 |
+
"eqn",
|
| 121 |
+
"ode",
|
| 122 |
+
"formula",
|
| 123 |
+
"expression",
|
| 124 |
+
"expr",
|
| 125 |
+
)
|
| 126 |
+
|
| 127 |
+
#: Same idea for the optional rationale payload. We never gate on this so
|
| 128 |
+
#: the cost of being permissive is zero.
|
| 129 |
+
_RATIONALE_KEYS: tuple[str, ...] = (
|
| 130 |
+
"rationale",
|
| 131 |
+
"reasoning",
|
| 132 |
+
"explanation",
|
| 133 |
+
"thought",
|
| 134 |
+
"thoughts",
|
| 135 |
+
)
|
| 136 |
+
|
| 137 |
+
#: And for the params dict. Some models emit ``parameters`` instead.
|
| 138 |
+
_PARAMS_KEYS: tuple[str, ...] = ("params", "parameters", "constants")
|
| 139 |
+
|
| 140 |
+
|
| 141 |
+
def parse_completion(completion: str) -> PhysiXAction:
|
| 142 |
+
"""Parse a raw model completion into a :class:`PhysiXAction`.
|
| 143 |
+
|
| 144 |
+
Scope is intentionally narrow: extract the first JSON object from the
|
| 145 |
+
completion (which may be wrapped in markdown fences or surrounded by
|
| 146 |
+
scratchpad text) and copy its fields verbatim into the action.
|
| 147 |
+
|
| 148 |
+
The ``equation`` string is **not** rewritten or normalised here. The
|
| 149 |
+
verifier in :mod:`physix.verifier.parser` defines the grammar, and any
|
| 150 |
+
deviation must surface as a parse error so the env can score
|
| 151 |
+
``r_format=0`` and feed the failure back to the agent on the next turn.
|
| 152 |
+
Rewriting equations upstream would silently change the agent's output
|
| 153 |
+
and obscure that signal.
|
| 154 |
+
|
| 155 |
+
Field-name aliases (``eqn``/``ode``/``formula``/...) are accepted in
|
| 156 |
+
addition to ``equation``: refusing them produced a particularly
|
| 157 |
+
confusing failure mode where every turn after the first scored
|
| 158 |
+
``r_format=0`` because the model latched onto the shorthand form
|
| 159 |
+
used in the HISTORY block. We've fixed the prompt too, but accepting
|
| 160 |
+
the synonyms is cheap defense-in-depth against future drift and
|
| 161 |
+
against models with their own naming preferences.
|
| 162 |
+
|
| 163 |
+
If no JSON object can be extracted (e.g. the model emitted free-form
|
| 164 |
+
prose, or invalid JSON that even the JSON-aware decoder rejected),
|
| 165 |
+
the action's ``equation`` is left **empty** so the verifier reports
|
| 166 |
+
a clean ``Empty equation payload`` parse error and the env scores
|
| 167 |
+
``r_format=0``. The raw model text is preserved in ``rationale`` so
|
| 168 |
+
the UI / training logs still show what was emitted, but it is
|
| 169 |
+
*never* fed to the equation parser as if it were an equation —
|
| 170 |
+
that produced misleading errors like ``Equation has no '=' sign:
|
| 171 |
+
'{'`` which made the verifier look broken when the real fault was
|
| 172 |
+
upstream.
|
| 173 |
+
"""
|
| 174 |
+
payload = _extract_json_payload(completion)
|
| 175 |
+
if payload is None:
|
| 176 |
+
return PhysiXAction(equation="", rationale=completion.strip()[:500])
|
| 177 |
+
|
| 178 |
+
normalized = _lowercase_keys(payload)
|
| 179 |
+
equation = _first_string_value(normalized, _EQUATION_KEYS)
|
| 180 |
+
rationale = _first_string_value(normalized, _RATIONALE_KEYS)
|
| 181 |
+
params_raw = _first_value(normalized, _PARAMS_KEYS) or {}
|
| 182 |
+
params = _coerce_params(params_raw)
|
| 183 |
+
|
| 184 |
+
return PhysiXAction(
|
| 185 |
+
equation=equation,
|
| 186 |
+
params=params,
|
| 187 |
+
rationale=rationale,
|
| 188 |
+
)
|
| 189 |
+
|
| 190 |
+
|
| 191 |
+
def _lowercase_keys(payload: dict[str, Any]) -> dict[str, Any]:
|
| 192 |
+
"""Return ``payload`` with top-level keys lowercased.
|
| 193 |
+
|
| 194 |
+
Some models emit ``"Equation"`` / ``"EQN"``; lowercasing once means
|
| 195 |
+
the lookup tables above stay declarative.
|
| 196 |
+
"""
|
| 197 |
+
return {str(k).lower(): v for k, v in payload.items()}
|
| 198 |
+
|
| 199 |
+
|
| 200 |
+
def _first_value(payload: dict[str, Any], keys: tuple[str, ...]) -> Any:
|
| 201 |
+
for key in keys:
|
| 202 |
+
if key in payload:
|
| 203 |
+
return payload[key]
|
| 204 |
+
return None
|
| 205 |
+
|
| 206 |
+
|
| 207 |
+
def _first_string_value(payload: dict[str, Any], keys: tuple[str, ...]) -> str:
|
| 208 |
+
value = _first_value(payload, keys)
|
| 209 |
+
if value is None:
|
| 210 |
+
return ""
|
| 211 |
+
return str(value).strip()
|
| 212 |
+
|
| 213 |
+
|
| 214 |
+
def _render_metadata_block(obs: PhysiXObservation) -> str:
|
| 215 |
+
state_vars = ", ".join(obs.state_variables) or "(none)"
|
| 216 |
+
stats_text = " ".join(f"{k}={v:.3g}" for k, v in obs.stats.items())
|
| 217 |
+
return (
|
| 218 |
+
f"SYSTEM_ID: {obs.system_id or 'unknown'}\n"
|
| 219 |
+
f"STATE_VARIABLES: {state_vars}\n"
|
| 220 |
+
f"HINT: {obs.hint}\n"
|
| 221 |
+
f"STATS: {stats_text}"
|
| 222 |
+
)
|
| 223 |
+
|
| 224 |
+
|
| 225 |
+
def _render_trajectory_block(obs: PhysiXObservation) -> str:
|
| 226 |
+
samples = _downsample(obs.trajectory, _TRAJECTORY_DOWNSAMPLE_COUNT)
|
| 227 |
+
lines = [f"TRAJECTORY ({len(samples)} samples downsampled from {len(obs.trajectory)}):"]
|
| 228 |
+
for sample in samples:
|
| 229 |
+
parts: list[str] = [f"t={sample['t']:.3f}"]
|
| 230 |
+
for var in obs.state_variables:
|
| 231 |
+
if var in sample:
|
| 232 |
+
parts.append(f"{var}={sample[var]:.3f}")
|
| 233 |
+
lines.append(" " + " ".join(parts))
|
| 234 |
+
return "\n".join(lines)
|
| 235 |
+
|
| 236 |
+
|
| 237 |
+
#: Order in which reward components are surfaced to the model. Match
|
| 238 |
+
#: matters most (it's the headline accuracy signal); format is last
|
| 239 |
+
#: because once it stabilises the others dominate the gradient. Stable
|
| 240 |
+
#: order also matters for the model's in-context retrieval: a fixed
|
| 241 |
+
#: column position is a reliable cue across turns.
|
| 242 |
+
_REWARD_COMPONENT_ORDER: tuple[str, ...] = (
|
| 243 |
+
"match",
|
| 244 |
+
"progress",
|
| 245 |
+
"simplicity",
|
| 246 |
+
"format",
|
| 247 |
+
)
|
| 248 |
+
|
| 249 |
+
|
| 250 |
+
def _render_history_block(obs: PhysiXObservation) -> str:
|
| 251 |
+
"""Render the most recent ``_HISTORY_CAP`` turns.
|
| 252 |
+
|
| 253 |
+
Field name is ``equation=`` rather than ``eqn=`` deliberately:
|
| 254 |
+
chat-tuned models tend to mimic the most-recent token spelling when
|
| 255 |
+
emitting their own JSON, so we must use the same key here that the
|
| 256 |
+
parser expects in the model's reply.
|
| 257 |
+
|
| 258 |
+
Each turn's full *dense* reward breakdown is surfaced in addition to
|
| 259 |
+
the scalar ``reward=`` total. The dense components are the same
|
| 260 |
+
values the GRPO trainer optimises (``match``/``progress``/
|
| 261 |
+
``simplicity``/``format``), so showing them in-context lets the
|
| 262 |
+
model attribute its own gains and losses turn-over-turn instead of
|
| 263 |
+
having to infer them from the residual prose alone — e.g. it can
|
| 264 |
+
see ``format=0.0`` after a parse error and prioritise grammar fixes,
|
| 265 |
+
or ``match=0.62, progress=0.0`` after a stuck plateau and try a
|
| 266 |
+
structurally different equation rather than tweaking the same
|
| 267 |
+
coefficients.
|
| 268 |
+
"""
|
| 269 |
+
recent = obs.history[-_HISTORY_CAP:]
|
| 270 |
+
lines = ["HISTORY:"]
|
| 271 |
+
for entry in recent:
|
| 272 |
+
eqn = entry.get("equation", "")
|
| 273 |
+
reward = float(entry.get("reward_total", 0.0))
|
| 274 |
+
components_text = _format_reward_components(entry.get("reward_components"))
|
| 275 |
+
lines.append(
|
| 276 |
+
f" turn={entry.get('turn')} reward={reward:.3f} "
|
| 277 |
+
f"[{components_text}] equation=`{eqn}`"
|
| 278 |
+
)
|
| 279 |
+
mismatch = entry.get("mismatch_summary", "")
|
| 280 |
+
if mismatch:
|
| 281 |
+
lines.append(f" mismatch: {mismatch}")
|
| 282 |
+
return "\n".join(lines)
|
| 283 |
+
|
| 284 |
+
|
| 285 |
+
def _format_reward_components(components: Any) -> str:
|
| 286 |
+
"""Render ``{match, progress, simplicity, format}`` as a compact line.
|
| 287 |
+
|
| 288 |
+
Always emits all four fields in :data:`_REWARD_COMPONENT_ORDER`,
|
| 289 |
+
defaulting to ``0.00`` when absent so the model never has to guess
|
| 290 |
+
why a column is missing. Three-decimal formatting matches the
|
| 291 |
+
server's history serialisation precision.
|
| 292 |
+
"""
|
| 293 |
+
if not isinstance(components, dict):
|
| 294 |
+
return " ".join(f"{name}=0.00" for name in _REWARD_COMPONENT_ORDER)
|
| 295 |
+
parts: list[str] = []
|
| 296 |
+
for name in _REWARD_COMPONENT_ORDER:
|
| 297 |
+
try:
|
| 298 |
+
value = float(components.get(name, 0.0))
|
| 299 |
+
except (TypeError, ValueError):
|
| 300 |
+
value = 0.0
|
| 301 |
+
parts.append(f"{name}={value:.2f}")
|
| 302 |
+
return " ".join(parts)
|
| 303 |
+
|
| 304 |
+
|
| 305 |
+
def _render_turn_footer(obs: PhysiXObservation) -> str:
|
| 306 |
+
total = obs.turn + obs.turn_remaining or DEFAULT_MAX_TURNS
|
| 307 |
+
return (
|
| 308 |
+
f"TURN: {obs.turn + 1} / {total} ({obs.turn_remaining} remaining)\n"
|
| 309 |
+
"Emit the next hypothesis as JSON."
|
| 310 |
+
)
|
| 311 |
+
|
| 312 |
+
|
| 313 |
+
def _downsample(samples: list[dict[str, float]], target: int) -> list[dict[str, float]]:
|
| 314 |
+
if len(samples) <= target:
|
| 315 |
+
return samples
|
| 316 |
+
step = max(1, len(samples) // target)
|
| 317 |
+
indices = list(range(0, len(samples), step))[:target]
|
| 318 |
+
if indices[-1] != len(samples) - 1:
|
| 319 |
+
indices[-1] = len(samples) - 1
|
| 320 |
+
return [samples[i] for i in indices]
|
| 321 |
+
|
| 322 |
+
|
| 323 |
+
_JSON_DECODER = json.JSONDecoder()
|
| 324 |
+
|
| 325 |
+
|
| 326 |
+
def _extract_json_payload(text: str) -> dict[str, Any] | None:
|
| 327 |
+
"""Find the first ``{...}`` block in ``text`` that parses as a JSON object.
|
| 328 |
+
|
| 329 |
+
Uses :meth:`json.JSONDecoder.raw_decode` so that braces appearing inside
|
| 330 |
+
JSON *string* values (e.g. LaTeX like ``"\\frac{d vy}{dt}"``) do not
|
| 331 |
+
confuse the scanner — a regex-based brace matcher would mis-balance
|
| 332 |
+
here and return the whole completion as a malformed equation.
|
| 333 |
+
"""
|
| 334 |
+
candidate = _strip_code_fences(text)
|
| 335 |
+
|
| 336 |
+
for i, ch in enumerate(candidate):
|
| 337 |
+
if ch != "{":
|
| 338 |
+
continue
|
| 339 |
+
try:
|
| 340 |
+
payload, _ = _JSON_DECODER.raw_decode(candidate[i:])
|
| 341 |
+
except json.JSONDecodeError:
|
| 342 |
+
continue
|
| 343 |
+
if isinstance(payload, dict):
|
| 344 |
+
return payload
|
| 345 |
+
return None
|
| 346 |
+
|
| 347 |
+
|
| 348 |
+
def _strip_code_fences(text: str) -> str:
|
| 349 |
+
"""Remove Markdown code-fence wrappers (```json``` / ```python``` / ```).
|
| 350 |
+
|
| 351 |
+
This is *not* equation rewriting — it strips the outer fence syntax
|
| 352 |
+
only, so the JSON-aware extractor below can find the object payload.
|
| 353 |
+
"""
|
| 354 |
+
text = re.sub(r"```(?:json|python)?\s*", "", text)
|
| 355 |
+
text = text.replace("```", "")
|
| 356 |
+
return text
|
| 357 |
+
|
| 358 |
+
|
| 359 |
+
def _coerce_params(params_raw: Any) -> dict[str, float]:
|
| 360 |
+
"""Best-effort coercion of a raw params payload into ``dict[str, float]``."""
|
| 361 |
+
if not isinstance(params_raw, dict):
|
| 362 |
+
return {}
|
| 363 |
+
out: dict[str, float] = {}
|
| 364 |
+
for key, value in params_raw.items():
|
| 365 |
+
try:
|
| 366 |
+
out[str(key)] = float(value)
|
| 367 |
+
except (TypeError, ValueError):
|
| 368 |
+
continue
|
| 369 |
+
return out
|
physix-live/physix/training/reward_fns.py
ADDED
|
@@ -0,0 +1,167 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""TRL-compatible reward functions for GRPO training.
|
| 2 |
+
|
| 3 |
+
Responsibility: expose a stateless reward function for each independent
|
| 4 |
+
reward signal. Internally each component delegates to a shared
|
| 5 |
+
:class:`Scorer` so a single completion is parsed and simulated exactly
|
| 6 |
+
once per training step regardless of how many reward functions query it.
|
| 7 |
+
|
| 8 |
+
The TRL signature for a reward function is::
|
| 9 |
+
|
| 10 |
+
def reward_func(*, prompts, completions, **kwargs) -> list[float]: ...
|
| 11 |
+
|
| 12 |
+
where ``prompts`` and ``completions`` are batched lists. Extra columns from
|
| 13 |
+
the training dataset arrive as keyword arguments — we expect the columns
|
| 14 |
+
listed in :class:`SystemContext` to be present.
|
| 15 |
+
|
| 16 |
+
Reward set design (anti-hack, RCA from W&B run 5kuqns9x):
|
| 17 |
+
|
| 18 |
+
- ``reward_match`` — raw R² on the trajectory (linear).
|
| 19 |
+
- ``reward_match_dense`` — sqrt(R²); denser gradient at low values.
|
| 20 |
+
- ``reward_correctness`` — binary cliff at R² ≥ 0.70; pushes past plateau.
|
| 21 |
+
- ``reward_simplicity`` — gated on R² ≥ 0.10 (no free reward for trivial
|
| 22 |
+
equations).
|
| 23 |
+
- ``reward_format`` — 1.0 only if the equation parsed *and*
|
| 24 |
+
simulated. No partial credit for parseable
|
| 25 |
+
but uncomputable garbage.
|
| 26 |
+
|
| 27 |
+
The legacy ``reward_progress`` is intentionally absent. In single-turn
|
| 28 |
+
GRPO every dataset row carries ``previous_r_match=0``, which made
|
| 29 |
+
``progress = max(0, match - 0) = match`` for every rollout — a perfect
|
| 30 |
+
duplicate of ``reward_match`` that diluted advantage estimation.
|
| 31 |
+
"""
|
| 32 |
+
|
| 33 |
+
from __future__ import annotations
|
| 34 |
+
|
| 35 |
+
from collections.abc import Callable, Sequence
|
| 36 |
+
from typing import Any
|
| 37 |
+
|
| 38 |
+
from physix.training.scorer import Scorer, SystemContext
|
| 39 |
+
from physix.verifier.reward import correctness_bonus, match_dense
|
| 40 |
+
|
| 41 |
+
|
| 42 |
+
RewardFunction = Callable[..., list[float]]
|
| 43 |
+
|
| 44 |
+
|
| 45 |
+
#: Components that read directly from the :class:`RewardBreakdown` produced
|
| 46 |
+
#: by :class:`Scorer.score`. ``progress`` is omitted (see module docstring).
|
| 47 |
+
_BREAKDOWN_COMPONENTS: tuple[str, ...] = ("match", "simplicity", "format")
|
| 48 |
+
|
| 49 |
+
|
| 50 |
+
def make_reward_funcs(
|
| 51 |
+
scorer: Scorer | None = None,
|
| 52 |
+
) -> dict[str, RewardFunction]:
|
| 53 |
+
"""Build a fresh dict of reward functions wired to a shared scorer.
|
| 54 |
+
|
| 55 |
+
Each function is named ``reward_<component>`` so TRL's GRPO trainer
|
| 56 |
+
logs them individually to W&B under
|
| 57 |
+
``train/rewards/reward_<component>/mean``.
|
| 58 |
+
|
| 59 |
+
The scorer is shared across all functions; calling ``scorer.reset()``
|
| 60 |
+
between steps avoids unbounded cache growth and ensures each
|
| 61 |
+
completion is parsed + simulated exactly once per step regardless of
|
| 62 |
+
how many reward functions query it.
|
| 63 |
+
|
| 64 |
+
Returns a dict whose keys are:
|
| 65 |
+
|
| 66 |
+
- ``match`` / ``simplicity`` / ``format`` — direct reads from the
|
| 67 |
+
:class:`RewardBreakdown`. ``simplicity`` is internally gated on
|
| 68 |
+
match ≥ 0.10 and ``format`` on simulation success.
|
| 69 |
+
- ``match_dense`` — ``sqrt(match)`` for denser low-value gradient.
|
| 70 |
+
- ``correctness`` — binary 1.0 above an R² threshold (``0.70``).
|
| 71 |
+
|
| 72 |
+
All functions share the scorer cache, so they cost one parse +
|
| 73 |
+
simulate per completion combined, not five.
|
| 74 |
+
"""
|
| 75 |
+
shared = scorer if scorer is not None else Scorer()
|
| 76 |
+
|
| 77 |
+
def _make_breakdown_reader(component: str) -> RewardFunction:
|
| 78 |
+
def _reward_fn(
|
| 79 |
+
prompts: Sequence[Any],
|
| 80 |
+
completions: Sequence[str],
|
| 81 |
+
**kwargs: Any,
|
| 82 |
+
) -> list[float]:
|
| 83 |
+
del prompts # kept for TRL API conformance; unused here.
|
| 84 |
+
shared.reset()
|
| 85 |
+
contexts = _hydrate_contexts(len(completions), kwargs)
|
| 86 |
+
out: list[float] = []
|
| 87 |
+
for i, completion in enumerate(completions):
|
| 88 |
+
breakdown = shared.score(
|
| 89 |
+
completion=completion,
|
| 90 |
+
context=contexts[i],
|
| 91 |
+
cache_key=i,
|
| 92 |
+
)
|
| 93 |
+
out.append(getattr(breakdown, component))
|
| 94 |
+
return out
|
| 95 |
+
|
| 96 |
+
_reward_fn.__name__ = f"reward_{component}"
|
| 97 |
+
return _reward_fn
|
| 98 |
+
|
| 99 |
+
def _reward_match_dense(
|
| 100 |
+
prompts: Sequence[Any],
|
| 101 |
+
completions: Sequence[str],
|
| 102 |
+
**kwargs: Any,
|
| 103 |
+
) -> list[float]:
|
| 104 |
+
del prompts
|
| 105 |
+
shared.reset()
|
| 106 |
+
contexts = _hydrate_contexts(len(completions), kwargs)
|
| 107 |
+
out: list[float] = []
|
| 108 |
+
for i, completion in enumerate(completions):
|
| 109 |
+
b = shared.score(completion=completion, context=contexts[i], cache_key=i)
|
| 110 |
+
out.append(match_dense(b.match))
|
| 111 |
+
return out
|
| 112 |
+
|
| 113 |
+
_reward_match_dense.__name__ = "reward_match_dense"
|
| 114 |
+
|
| 115 |
+
def _reward_correctness(
|
| 116 |
+
prompts: Sequence[Any],
|
| 117 |
+
completions: Sequence[str],
|
| 118 |
+
**kwargs: Any,
|
| 119 |
+
) -> list[float]:
|
| 120 |
+
del prompts
|
| 121 |
+
shared.reset()
|
| 122 |
+
contexts = _hydrate_contexts(len(completions), kwargs)
|
| 123 |
+
out: list[float] = []
|
| 124 |
+
for i, completion in enumerate(completions):
|
| 125 |
+
b = shared.score(completion=completion, context=contexts[i], cache_key=i)
|
| 126 |
+
out.append(correctness_bonus(b.match))
|
| 127 |
+
return out
|
| 128 |
+
|
| 129 |
+
_reward_correctness.__name__ = "reward_correctness"
|
| 130 |
+
|
| 131 |
+
funcs: dict[str, RewardFunction] = {
|
| 132 |
+
name: _make_breakdown_reader(name) for name in _BREAKDOWN_COMPONENTS
|
| 133 |
+
}
|
| 134 |
+
funcs["match_dense"] = _reward_match_dense
|
| 135 |
+
funcs["correctness"] = _reward_correctness
|
| 136 |
+
return funcs
|
| 137 |
+
|
| 138 |
+
|
| 139 |
+
def _hydrate_contexts(batch_size: int, kwargs: dict[str, Any]) -> list[SystemContext]:
|
| 140 |
+
"""Project per-row kwargs into :class:`SystemContext` records.
|
| 141 |
+
|
| 142 |
+
TRL passes dataset columns as kwargs where each value is a list of
|
| 143 |
+
length ``batch_size``. We zip them together into per-row dicts and hand
|
| 144 |
+
each off to :func:`SystemContext.from_row`.
|
| 145 |
+
"""
|
| 146 |
+
expected_keys = (
|
| 147 |
+
"system_id",
|
| 148 |
+
"state_variables",
|
| 149 |
+
"parameters",
|
| 150 |
+
"initial_conditions",
|
| 151 |
+
"timestamps",
|
| 152 |
+
"observed",
|
| 153 |
+
"previous_r_match",
|
| 154 |
+
)
|
| 155 |
+
|
| 156 |
+
rows: list[dict[str, Any]] = []
|
| 157 |
+
for i in range(batch_size):
|
| 158 |
+
row: dict[str, Any] = {}
|
| 159 |
+
for key in expected_keys:
|
| 160 |
+
value = kwargs.get(key)
|
| 161 |
+
if isinstance(value, list) and len(value) > i:
|
| 162 |
+
row[key] = value[i]
|
| 163 |
+
else:
|
| 164 |
+
row[key] = value
|
| 165 |
+
rows.append(row)
|
| 166 |
+
|
| 167 |
+
return [SystemContext.from_row(row) for row in rows]
|
physix-live/physix/training/scorer.py
ADDED
|
@@ -0,0 +1,189 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Single-completion scorer used by both training and evaluation.
|
| 2 |
+
|
| 3 |
+
Responsibility: given the agent's raw completion text plus the system context
|
| 4 |
+
(state variables, parameters, IC, observed trajectory), compute the same
|
| 5 |
+
4-component :class:`RewardBreakdown` the env produces during a normal
|
| 6 |
+
``step()`` call. This is the bridge between TRL's "reward function over a
|
| 7 |
+
batch of completions" interface and our env's verifier pipeline.
|
| 8 |
+
|
| 9 |
+
Caching: a :class:`Scorer` instance memoises by ``(dataset_index, completion)``
|
| 10 |
+
so per-component reward functions can each ask the scorer for the *same*
|
| 11 |
+
completion without re-running parse + simulate four times.
|
| 12 |
+
"""
|
| 13 |
+
|
| 14 |
+
from __future__ import annotations
|
| 15 |
+
|
| 16 |
+
import numpy as np
|
| 17 |
+
from pydantic import BaseModel, ConfigDict, Field
|
| 18 |
+
|
| 19 |
+
from physix.models import RewardBreakdown
|
| 20 |
+
from physix.training.prompt import parse_completion
|
| 21 |
+
from physix.verifier import (
|
| 22 |
+
ParseError,
|
| 23 |
+
SimulationError,
|
| 24 |
+
compute_match,
|
| 25 |
+
compute_reward,
|
| 26 |
+
parse_equation,
|
| 27 |
+
simulate_hypothesis,
|
| 28 |
+
)
|
| 29 |
+
|
| 30 |
+
|
| 31 |
+
def _drop_none(mapping: object) -> dict[str, float]:
|
| 32 |
+
"""Return ``{k: float(v)}`` for keys whose value is not ``None``.
|
| 33 |
+
|
| 34 |
+
HuggingFace ``Dataset`` columns are schema-unified across rows, so a
|
| 35 |
+
row that lacks a key gets ``None`` for it. We drop those at ingest
|
| 36 |
+
so per-row dicts only contain the keys that actually apply to the
|
| 37 |
+
row's system.
|
| 38 |
+
"""
|
| 39 |
+
if not isinstance(mapping, dict):
|
| 40 |
+
return {}
|
| 41 |
+
return {str(k): float(v) for k, v in mapping.items() if v is not None}
|
| 42 |
+
|
| 43 |
+
|
| 44 |
+
class SystemContext(BaseModel):
|
| 45 |
+
"""Per-prompt context the scorer needs to evaluate completions.
|
| 46 |
+
|
| 47 |
+
These fields correspond 1:1 with dataset columns at training time.
|
| 48 |
+
"""
|
| 49 |
+
|
| 50 |
+
model_config = ConfigDict(frozen=True, arbitrary_types_allowed=True)
|
| 51 |
+
|
| 52 |
+
system_id: str
|
| 53 |
+
state_variables: tuple[str, ...]
|
| 54 |
+
parameters: dict[str, float] = Field(default_factory=dict)
|
| 55 |
+
initial_conditions: dict[str, float] = Field(default_factory=dict)
|
| 56 |
+
timestamps: np.ndarray
|
| 57 |
+
observed: dict[str, np.ndarray] = Field(default_factory=dict)
|
| 58 |
+
previous_r_match: float = 0.0
|
| 59 |
+
|
| 60 |
+
@classmethod
|
| 61 |
+
def from_row(cls, row: dict[str, object]) -> "SystemContext":
|
| 62 |
+
"""Hydrate from a HuggingFace dataset row.
|
| 63 |
+
|
| 64 |
+
Two non-obvious transforms happen here:
|
| 65 |
+
|
| 66 |
+
1. **Lists -> arrays.** The dataset stores trajectories as plain
|
| 67 |
+
Python lists for JSON serialisability; we lift them back into
|
| 68 |
+
``np.ndarray`` so the verifier's NumPy code path works.
|
| 69 |
+
2. **Strip ``None`` fillers.** ``Dataset.from_list`` schema-unifies
|
| 70 |
+
rows across all systems: a ``free_fall`` row ends up with
|
| 71 |
+
``parameters={'g': 9.81, 'mass': 3.4, 'k': None, 'L': None, ...}``
|
| 72 |
+
because *other* systems define those keys. Left as-is, ``None``
|
| 73 |
+
values would (a) inflate the verifier's allowed-symbol set, so
|
| 74 |
+
the model could "validly" reference parameters that don't
|
| 75 |
+
exist for this system, and (b) crash the simulator on
|
| 76 |
+
substitution. We drop them at ingest, restoring per-system
|
| 77 |
+
parameter sets.
|
| 78 |
+
"""
|
| 79 |
+
state_variables = tuple(row.get("state_variables", ()))
|
| 80 |
+
observed: dict[str, np.ndarray] = {}
|
| 81 |
+
observed_raw = row.get("observed", {})
|
| 82 |
+
if isinstance(observed_raw, dict):
|
| 83 |
+
for key, values in observed_raw.items():
|
| 84 |
+
if key not in state_variables:
|
| 85 |
+
continue
|
| 86 |
+
observed[str(key)] = np.asarray(values, dtype=float)
|
| 87 |
+
|
| 88 |
+
return cls(
|
| 89 |
+
system_id=str(row.get("system_id", "")),
|
| 90 |
+
state_variables=state_variables,
|
| 91 |
+
parameters=_drop_none(row.get("parameters", {})),
|
| 92 |
+
initial_conditions=_drop_none(row.get("initial_conditions", {})),
|
| 93 |
+
timestamps=np.asarray(row.get("timestamps", []), dtype=float),
|
| 94 |
+
observed=observed,
|
| 95 |
+
previous_r_match=float(row.get("previous_r_match", row.get("previous_total", 0.0))),
|
| 96 |
+
)
|
| 97 |
+
|
| 98 |
+
|
| 99 |
+
class Scorer:
|
| 100 |
+
"""Stateless completion scorer with optional per-batch memoisation."""
|
| 101 |
+
|
| 102 |
+
def __init__(self) -> None:
|
| 103 |
+
self._cache: dict[int, RewardBreakdown] = {}
|
| 104 |
+
|
| 105 |
+
def reset(self) -> None:
|
| 106 |
+
"""Clear the memoisation cache (call once per training step)."""
|
| 107 |
+
self._cache.clear()
|
| 108 |
+
|
| 109 |
+
def score(
|
| 110 |
+
self,
|
| 111 |
+
completion: str,
|
| 112 |
+
context: SystemContext,
|
| 113 |
+
*,
|
| 114 |
+
cache_key: int | None = None,
|
| 115 |
+
) -> RewardBreakdown:
|
| 116 |
+
"""Score one completion. Optionally memoise by ``cache_key``."""
|
| 117 |
+
if cache_key is not None and cache_key in self._cache:
|
| 118 |
+
return self._cache[cache_key]
|
| 119 |
+
|
| 120 |
+
breakdown = self._score_uncached(completion, context)
|
| 121 |
+
if cache_key is not None:
|
| 122 |
+
self._cache[cache_key] = breakdown
|
| 123 |
+
return breakdown
|
| 124 |
+
|
| 125 |
+
# --------------------------------------------------------------- internals
|
| 126 |
+
|
| 127 |
+
def _score_uncached(
|
| 128 |
+
self,
|
| 129 |
+
completion: str,
|
| 130 |
+
context: SystemContext,
|
| 131 |
+
) -> RewardBreakdown:
|
| 132 |
+
action = parse_completion(completion)
|
| 133 |
+
parameter_names = frozenset(action.params or {}) | frozenset(context.parameters)
|
| 134 |
+
|
| 135 |
+
try:
|
| 136 |
+
parsed = parse_equation(
|
| 137 |
+
action.equation,
|
| 138 |
+
state_variables=context.state_variables,
|
| 139 |
+
parameter_names=parameter_names,
|
| 140 |
+
)
|
| 141 |
+
except ParseError:
|
| 142 |
+
return compute_reward(
|
| 143 |
+
parse_succeeded=False,
|
| 144 |
+
r_match=0.0,
|
| 145 |
+
operator_count=0,
|
| 146 |
+
previous_r_match=context.previous_r_match,
|
| 147 |
+
)
|
| 148 |
+
|
| 149 |
+
# The agent's params take precedence over the system's; agent is
|
| 150 |
+
# allowed to use system parameter names like ``g`` if it provides
|
| 151 |
+
# values, but if it omits them we fall back to ground-truth values
|
| 152 |
+
# (which is fine — the agent's structural correctness is what we
|
| 153 |
+
# primarily score).
|
| 154 |
+
merged_parameters = {**context.parameters, **(action.params or {})}
|
| 155 |
+
|
| 156 |
+
try:
|
| 157 |
+
predicted = simulate_hypothesis(
|
| 158 |
+
parsed,
|
| 159 |
+
state_variables=context.state_variables,
|
| 160 |
+
parameters=merged_parameters,
|
| 161 |
+
initial_conditions=context.initial_conditions,
|
| 162 |
+
timestamps=context.timestamps,
|
| 163 |
+
)
|
| 164 |
+
except SimulationError:
|
| 165 |
+
# Equation parsed but the simulator could not produce a usable
|
| 166 |
+
# trajectory (NaN/inf, stiff blow-up, dimension mismatch, …).
|
| 167 |
+
# We mark ``simulation_succeeded=False`` so ``compute_reward``
|
| 168 |
+
# zeros out *every* component including ``format`` — otherwise
|
| 169 |
+
# the model gets paid for "looks valid but doesn't work".
|
| 170 |
+
return compute_reward(
|
| 171 |
+
parse_succeeded=True,
|
| 172 |
+
simulation_succeeded=False,
|
| 173 |
+
r_match=0.0,
|
| 174 |
+
operator_count=parsed.operator_count,
|
| 175 |
+
previous_r_match=context.previous_r_match,
|
| 176 |
+
)
|
| 177 |
+
|
| 178 |
+
r_match = compute_match(
|
| 179 |
+
observed=context.observed,
|
| 180 |
+
predicted=predicted,
|
| 181 |
+
state_variables=context.state_variables,
|
| 182 |
+
)
|
| 183 |
+
return compute_reward(
|
| 184 |
+
parse_succeeded=True,
|
| 185 |
+
simulation_succeeded=True,
|
| 186 |
+
r_match=r_match,
|
| 187 |
+
operator_count=parsed.operator_count,
|
| 188 |
+
previous_r_match=context.previous_r_match,
|
| 189 |
+
)
|
physix-live/physix/training/sft.py
ADDED
|
@@ -0,0 +1,293 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""SFT warm-start before GRPO training.
|
| 2 |
+
|
| 3 |
+
Trains Qwen2.5-1.5B-Instruct for 2 epochs on supervised (prompt, completion)
|
| 4 |
+
pairs where the completion is the ground-truth equation in the action JSON
|
| 5 |
+
format the env expects. This is the essential bootstrap step: without it a
|
| 6 |
+
cold 1.5B model outputs LaTeX / incoherent text on ~80% of turns, yielding
|
| 7 |
+
near-zero GRPO advantages and a flat loss curve that wastes GPU credits.
|
| 8 |
+
|
| 9 |
+
After SFT the model:
|
| 10 |
+
- Emits valid JSON with ``equation``, ``params``, ``rationale`` on >90% turns.
|
| 11 |
+
- Writes equations in the ASCII grammar (``d2y/dt2 = ...``), not LaTeX.
|
| 12 |
+
- Knows the per-system equation family (gravity, drag, pendulum, spring).
|
| 13 |
+
|
| 14 |
+
Then GRPO refines physics accuracy via the verifiable R² reward.
|
| 15 |
+
|
| 16 |
+
Run::
|
| 17 |
+
|
| 18 |
+
python -m physix.training.sft \
|
| 19 |
+
--model Qwen/Qwen2.5-1.5B-Instruct \
|
| 20 |
+
--output-dir runs/physix-1.5b-sft \
|
| 21 |
+
--epochs 2 \
|
| 22 |
+
--instances-per-system 32
|
| 23 |
+
|
| 24 |
+
Typical runtime: 5-8 min on an A10G, 3-4 min on an A100.
|
| 25 |
+
"""
|
| 26 |
+
|
| 27 |
+
from __future__ import annotations
|
| 28 |
+
|
| 29 |
+
import argparse
|
| 30 |
+
import json
|
| 31 |
+
import logging
|
| 32 |
+
import os
|
| 33 |
+
from pathlib import Path
|
| 34 |
+
|
| 35 |
+
import numpy as np
|
| 36 |
+
from datasets import Dataset
|
| 37 |
+
|
| 38 |
+
from physix.systems import (
|
| 39 |
+
SUPPORTED_SYSTEMS,
|
| 40 |
+
SYSTEM_REGISTRY,
|
| 41 |
+
get_system,
|
| 42 |
+
)
|
| 43 |
+
from physix.systems.base import PhysicalSystem, TrajectoryData
|
| 44 |
+
from physix.training.prompt import build_prompt
|
| 45 |
+
from physix.models import DEFAULT_MAX_TURNS, PhysiXObservation
|
| 46 |
+
|
| 47 |
+
|
| 48 |
+
_log = logging.getLogger(__name__)
|
| 49 |
+
|
| 50 |
+
|
| 51 |
+
# ─── Dataset ──────────────────────────────────────────────────────────────────
|
| 52 |
+
|
| 53 |
+
def _gt_completion(system: PhysicalSystem) -> str:
|
| 54 |
+
"""Build the ground-truth completion JSON for one system.
|
| 55 |
+
|
| 56 |
+
We include the system's sampled parameters so the model learns that the
|
| 57 |
+
``params`` field must contain the symbols it references in the equation.
|
| 58 |
+
The SFT target is the *exact* JSON string the env's verifier accepts;
|
| 59 |
+
GRPO will later teach the model to refine parameter values per trajectory.
|
| 60 |
+
"""
|
| 61 |
+
import re as _re
|
| 62 |
+
eq = system.ground_truth_equation()
|
| 63 |
+
# Extract all identifier tokens that appear in the equation, then keep
|
| 64 |
+
# only those that are declared as system parameters. We use a proper
|
| 65 |
+
# identifier regex (not split-on-whitespace) so symbols inside function
|
| 66 |
+
# calls like sin(theta) and fractions like -(g/L) are caught.
|
| 67 |
+
reserved = set(system.state_variables) | {"dt", "d", "t", "sin", "cos",
|
| 68 |
+
"tan", "exp", "log", "sqrt", "abs"}
|
| 69 |
+
eq_tokens = set(_re.findall(r'\b([A-Za-z_][A-Za-z0-9_]*)\b', eq))
|
| 70 |
+
relevant_keys = eq_tokens & set(system.parameters) - reserved
|
| 71 |
+
relevant = {k: round(system.parameters[k], 4) for k in sorted(relevant_keys)}
|
| 72 |
+
return json.dumps({
|
| 73 |
+
"equation": eq,
|
| 74 |
+
"params": relevant,
|
| 75 |
+
"rationale": (
|
| 76 |
+
f"Ground-truth equation for {system.system_id.replace('_', ' ')}."
|
| 77 |
+
),
|
| 78 |
+
})
|
| 79 |
+
|
| 80 |
+
|
| 81 |
+
def build_sft_dataset(
|
| 82 |
+
system_ids: tuple[str, ...] = SUPPORTED_SYSTEMS,
|
| 83 |
+
instances_per_system: int = 32,
|
| 84 |
+
seed: int = 0,
|
| 85 |
+
) -> Dataset:
|
| 86 |
+
if not system_ids:
|
| 87 |
+
raise ValueError("system_ids must be non-empty.")
|
| 88 |
+
unknown = [sid for sid in system_ids if sid not in SYSTEM_REGISTRY]
|
| 89 |
+
if unknown:
|
| 90 |
+
raise ValueError(
|
| 91 |
+
f"Unknown system_ids in build_sft_dataset: {unknown!r}. "
|
| 92 |
+
f"Registered: {sorted(SYSTEM_REGISTRY)!r}."
|
| 93 |
+
)
|
| 94 |
+
|
| 95 |
+
rng = np.random.default_rng(seed)
|
| 96 |
+
rows: list[dict] = []
|
| 97 |
+
|
| 98 |
+
for system_id in system_ids:
|
| 99 |
+
system = get_system(system_id)
|
| 100 |
+
for _ in range(instances_per_system):
|
| 101 |
+
trajectory = system.simulate(rng)
|
| 102 |
+
obs = _build_obs(system, trajectory)
|
| 103 |
+
prompt_messages = build_prompt(obs)
|
| 104 |
+
completion = _gt_completion(system)
|
| 105 |
+
rows.append({"prompt": prompt_messages, "completion": completion})
|
| 106 |
+
|
| 107 |
+
_log.info(
|
| 108 |
+
"Built SFT dataset: %d rows across %d systems (%s)",
|
| 109 |
+
len(rows),
|
| 110 |
+
len(system_ids),
|
| 111 |
+
", ".join(system_ids),
|
| 112 |
+
)
|
| 113 |
+
return Dataset.from_list(rows)
|
| 114 |
+
|
| 115 |
+
|
| 116 |
+
def _build_obs(system: PhysicalSystem, trajectory: TrajectoryData) -> PhysiXObservation:
|
| 117 |
+
return PhysiXObservation(
|
| 118 |
+
done=False,
|
| 119 |
+
reward=None,
|
| 120 |
+
trajectory=trajectory.to_observation_samples(),
|
| 121 |
+
state_variables=list(system.state_variables),
|
| 122 |
+
hint=system.hint(system.parameters),
|
| 123 |
+
history=[],
|
| 124 |
+
mismatch_summary="",
|
| 125 |
+
turn=0,
|
| 126 |
+
turn_remaining=DEFAULT_MAX_TURNS,
|
| 127 |
+
system_id=system.system_id,
|
| 128 |
+
stats=trajectory.stats(),
|
| 129 |
+
reward_breakdown={},
|
| 130 |
+
)
|
| 131 |
+
|
| 132 |
+
|
| 133 |
+
# ─── Training ─────────────────────────────────────────────────────────────────
|
| 134 |
+
|
| 135 |
+
def train_sft(
|
| 136 |
+
model_name: str = "Qwen/Qwen2.5-1.5B-Instruct",
|
| 137 |
+
output_dir: str = "runs/physix-1.5b-sft",
|
| 138 |
+
epochs: int = 2,
|
| 139 |
+
max_seq_length: int = 2048,
|
| 140 |
+
lora_r: int = 16,
|
| 141 |
+
lora_alpha: int = 32,
|
| 142 |
+
per_device_batch_size: int = 2,
|
| 143 |
+
gradient_accumulation_steps: int = 4,
|
| 144 |
+
learning_rate: float = 2e-5,
|
| 145 |
+
instances_per_system: int = 32,
|
| 146 |
+
seed: int = 0,
|
| 147 |
+
wandb_run_name: str | None = None,
|
| 148 |
+
) -> None:
|
| 149 |
+
_configure_logging()
|
| 150 |
+
|
| 151 |
+
# Heavy imports: only available in [train] env.
|
| 152 |
+
import wandb
|
| 153 |
+
from unsloth import FastLanguageModel, PatchFastRL # noqa: F401
|
| 154 |
+
from trl import SFTTrainer, SFTConfig
|
| 155 |
+
|
| 156 |
+
# Force a fresh W&B run for SFT regardless of any inherited WANDB_RUN_ID
|
| 157 |
+
# / WANDB_RESUME env vars (those are intended for the GRPO stage). If we
|
| 158 |
+
# let wandb.init() try to resume a foreign run id it will block for ~90s
|
| 159 |
+
# fetching that run's history before giving up.
|
| 160 |
+
for stale in ("WANDB_RUN_ID", "WANDB_RESUME"):
|
| 161 |
+
os.environ.pop(stale, None)
|
| 162 |
+
|
| 163 |
+
wandb.init(
|
| 164 |
+
project=os.environ.get("WANDB_PROJECT", "physix-live"),
|
| 165 |
+
name=wandb_run_name or f"physix-sft-{epochs}ep",
|
| 166 |
+
config={
|
| 167 |
+
"stage": "sft",
|
| 168 |
+
"model_name": model_name,
|
| 169 |
+
"epochs": epochs,
|
| 170 |
+
"lora_r": lora_r,
|
| 171 |
+
"lora_alpha": lora_alpha,
|
| 172 |
+
"learning_rate": learning_rate,
|
| 173 |
+
"per_device_batch_size": per_device_batch_size,
|
| 174 |
+
"gradient_accumulation_steps": gradient_accumulation_steps,
|
| 175 |
+
"instances_per_system": instances_per_system,
|
| 176 |
+
"seed": seed,
|
| 177 |
+
},
|
| 178 |
+
tags=["sft", "physix", model_name.split("/")[-1]],
|
| 179 |
+
)
|
| 180 |
+
|
| 181 |
+
_log.info("Loading model %s (4-bit, LoRA-%d)", model_name, lora_r)
|
| 182 |
+
model, tokenizer = FastLanguageModel.from_pretrained(
|
| 183 |
+
model_name=model_name,
|
| 184 |
+
max_seq_length=max_seq_length,
|
| 185 |
+
load_in_4bit=True,
|
| 186 |
+
dtype=None,
|
| 187 |
+
)
|
| 188 |
+
model = FastLanguageModel.get_peft_model(
|
| 189 |
+
model,
|
| 190 |
+
r=lora_r,
|
| 191 |
+
lora_alpha=lora_alpha,
|
| 192 |
+
target_modules=["q_proj", "k_proj", "v_proj", "o_proj",
|
| 193 |
+
"gate_proj", "up_proj", "down_proj"],
|
| 194 |
+
bias="none",
|
| 195 |
+
use_gradient_checkpointing="unsloth",
|
| 196 |
+
random_state=seed,
|
| 197 |
+
)
|
| 198 |
+
|
| 199 |
+
dataset = build_sft_dataset(instances_per_system=instances_per_system, seed=seed)
|
| 200 |
+
|
| 201 |
+
def _format_row(row: dict) -> dict:
|
| 202 |
+
"""Combine prompt + completion into a single training string."""
|
| 203 |
+
messages = row["prompt"] + [{"role": "assistant", "content": row["completion"]}]
|
| 204 |
+
text = tokenizer.apply_chat_template(
|
| 205 |
+
messages, tokenize=False, add_generation_prompt=False
|
| 206 |
+
)
|
| 207 |
+
return {"text": text}
|
| 208 |
+
|
| 209 |
+
formatted = dataset.map(_format_row, remove_columns=["prompt", "completion"])
|
| 210 |
+
_log.info("SFT dataset ready: %d rows", len(formatted))
|
| 211 |
+
|
| 212 |
+
import torch
|
| 213 |
+
sft_config = SFTConfig(
|
| 214 |
+
output_dir=output_dir,
|
| 215 |
+
num_train_epochs=epochs,
|
| 216 |
+
per_device_train_batch_size=per_device_batch_size,
|
| 217 |
+
gradient_accumulation_steps=gradient_accumulation_steps,
|
| 218 |
+
learning_rate=learning_rate,
|
| 219 |
+
max_seq_length=max_seq_length,
|
| 220 |
+
dataset_text_field="text",
|
| 221 |
+
packing=True,
|
| 222 |
+
logging_steps=1,
|
| 223 |
+
save_strategy="epoch",
|
| 224 |
+
report_to=["wandb"],
|
| 225 |
+
seed=seed,
|
| 226 |
+
bf16=torch.cuda.is_bf16_supported() if torch.cuda.is_available() else False,
|
| 227 |
+
fp16=not torch.cuda.is_bf16_supported() if torch.cuda.is_available() else False,
|
| 228 |
+
)
|
| 229 |
+
|
| 230 |
+
trainer = SFTTrainer(
|
| 231 |
+
model=model,
|
| 232 |
+
tokenizer=tokenizer,
|
| 233 |
+
args=sft_config,
|
| 234 |
+
train_dataset=formatted,
|
| 235 |
+
)
|
| 236 |
+
|
| 237 |
+
_log.info("Starting SFT for %d epochs on %d examples", epochs, len(formatted))
|
| 238 |
+
trainer.train()
|
| 239 |
+
|
| 240 |
+
# We save as merged_16bit (full model + config + tokenizer) rather than
|
| 241 |
+
# "lora" (adapter weights only). GRPO's downstream
|
| 242 |
+
# ``FastLanguageModel.from_pretrained(sft_checkpoint)`` needs a complete
|
| 243 |
+
# model directory — config.json + tokenizer + weights — to load. A bare
|
| 244 |
+
# adapter shard makes Unsloth raise "No config file found". The merged
|
| 245 |
+
# checkpoint is ~3 GB (1.5B params × 2 bytes) which is fine on /tmp.
|
| 246 |
+
out_path = Path(output_dir) / "merged"
|
| 247 |
+
out_path.mkdir(parents=True, exist_ok=True)
|
| 248 |
+
model.save_pretrained_merged(
|
| 249 |
+
save_directory=str(out_path),
|
| 250 |
+
tokenizer=tokenizer,
|
| 251 |
+
save_method="merged_16bit",
|
| 252 |
+
)
|
| 253 |
+
_log.info("SFT model (merged 16-bit) saved → %s", out_path)
|
| 254 |
+
wandb.finish()
|
| 255 |
+
|
| 256 |
+
|
| 257 |
+
# ─── CLI ──────────────────────────────────────────────────────────────────────
|
| 258 |
+
|
| 259 |
+
def _configure_logging() -> None:
|
| 260 |
+
logging.basicConfig(
|
| 261 |
+
level=os.environ.get("PHYSIX_LOG_LEVEL", "INFO"),
|
| 262 |
+
format="[%(asctime)s] %(levelname)s %(name)s | %(message)s",
|
| 263 |
+
)
|
| 264 |
+
|
| 265 |
+
|
| 266 |
+
def main() -> None:
|
| 267 |
+
parser = argparse.ArgumentParser(description="SFT warm-start for PhysiX RLVR.")
|
| 268 |
+
parser.add_argument("--model", default="Qwen/Qwen2.5-1.5B-Instruct")
|
| 269 |
+
parser.add_argument("--output-dir", default="runs/physix-1.5b-sft")
|
| 270 |
+
parser.add_argument("--epochs", type=int, default=2)
|
| 271 |
+
parser.add_argument("--instances-per-system", type=int, default=32)
|
| 272 |
+
parser.add_argument("--lora-r", type=int, default=32)
|
| 273 |
+
parser.add_argument("--learning-rate", type=float, default=2e-5)
|
| 274 |
+
parser.add_argument("--seed", type=int, default=0)
|
| 275 |
+
parser.add_argument("--wandb-run-name", default=None,
|
| 276 |
+
help="Override W&B run name. Defaults to physix-sft-{epochs}ep.")
|
| 277 |
+
args = parser.parse_args()
|
| 278 |
+
|
| 279 |
+
os.environ.setdefault("WANDB_PROJECT", "physix-live")
|
| 280 |
+
train_sft(
|
| 281 |
+
model_name=args.model,
|
| 282 |
+
output_dir=args.output_dir,
|
| 283 |
+
epochs=args.epochs,
|
| 284 |
+
lora_r=args.lora_r,
|
| 285 |
+
learning_rate=args.learning_rate,
|
| 286 |
+
instances_per_system=args.instances_per_system,
|
| 287 |
+
seed=args.seed,
|
| 288 |
+
wandb_run_name=args.wandb_run_name,
|
| 289 |
+
)
|
| 290 |
+
|
| 291 |
+
|
| 292 |
+
if __name__ == "__main__":
|
| 293 |
+
main()
|
physix-live/physix/verifier/__init__.py
ADDED
|
@@ -0,0 +1,33 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Verifier layer: parse hypothesis, simulate, score, summarise mismatch.
|
| 2 |
+
|
| 3 |
+
Public API:
|
| 4 |
+
|
| 5 |
+
- :func:`parse_equation` (``parser``): convert a SymPy-grammar string into a
|
| 6 |
+
callable ODE right-hand-side, validated against a strict whitelist.
|
| 7 |
+
- :func:`simulate_hypothesis` (``simulator``): run the parsed RHS forward in
|
| 8 |
+
time via ``scipy.integrate.odeint``.
|
| 9 |
+
- :func:`compute_match` (``metrics``): R-squared between observed and
|
| 10 |
+
predicted trajectories.
|
| 11 |
+
- :func:`summarize_mismatch` (``mismatch``): generate a one-sentence English
|
| 12 |
+
description of where prediction diverges from observation.
|
| 13 |
+
- :func:`compute_reward` (``reward``): combine all components into a
|
| 14 |
+
:class:`RewardBreakdown`.
|
| 15 |
+
"""
|
| 16 |
+
|
| 17 |
+
from physix.verifier.metrics import compute_match, residual_summary
|
| 18 |
+
from physix.verifier.mismatch import summarize_mismatch
|
| 19 |
+
from physix.verifier.parser import ParseError, ParsedEquation, parse_equation
|
| 20 |
+
from physix.verifier.reward import compute_reward
|
| 21 |
+
from physix.verifier.simulator import SimulationError, simulate_hypothesis
|
| 22 |
+
|
| 23 |
+
__all__ = [
|
| 24 |
+
"compute_match",
|
| 25 |
+
"residual_summary",
|
| 26 |
+
"summarize_mismatch",
|
| 27 |
+
"ParseError",
|
| 28 |
+
"ParsedEquation",
|
| 29 |
+
"parse_equation",
|
| 30 |
+
"compute_reward",
|
| 31 |
+
"SimulationError",
|
| 32 |
+
"simulate_hypothesis",
|
| 33 |
+
]
|
physix-live/physix/verifier/__pycache__/__init__.cpython-311.pyc
ADDED
|
Binary file (1.49 kB). View file
|
|
|
physix-live/physix/verifier/__pycache__/metrics.cpython-311.pyc
ADDED
|
Binary file (5.43 kB). View file
|
|
|
physix-live/physix/verifier/__pycache__/mismatch.cpython-311.pyc
ADDED
|
Binary file (6.56 kB). View file
|
|
|
physix-live/physix/verifier/__pycache__/parser.cpython-311.pyc
ADDED
|
Binary file (12.4 kB). View file
|
|
|
physix-live/physix/verifier/__pycache__/reward.cpython-311.pyc
ADDED
|
Binary file (3.87 kB). View file
|
|
|
physix-live/physix/verifier/__pycache__/simulator.cpython-311.pyc
ADDED
|
Binary file (10.4 kB). View file
|
|
|
physix-live/physix/verifier/metrics.py
ADDED
|
@@ -0,0 +1,114 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Numerical metrics over (observed, predicted) trajectory pairs.
|
| 2 |
+
|
| 3 |
+
Responsibility: compute scalar fit quality (R-squared), per-variable
|
| 4 |
+
residuals, and lightweight diagnostic statistics. Does no parsing, no
|
| 5 |
+
simulation, no English-text generation.
|
| 6 |
+
"""
|
| 7 |
+
|
| 8 |
+
from __future__ import annotations
|
| 9 |
+
|
| 10 |
+
from collections.abc import Iterable
|
| 11 |
+
|
| 12 |
+
import numpy as np
|
| 13 |
+
from pydantic import BaseModel, ConfigDict, Field
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
class ResidualSummary(BaseModel):
|
| 17 |
+
"""Diagnostic statistics derived from per-variable residuals.
|
| 18 |
+
|
| 19 |
+
Consumed exclusively by :mod:`physix.verifier.mismatch` to render the
|
| 20 |
+
English residual summary surfaced to the agent.
|
| 21 |
+
"""
|
| 22 |
+
|
| 23 |
+
model_config = ConfigDict(frozen=True)
|
| 24 |
+
|
| 25 |
+
per_variable_max_abs_residual: dict[str, float] = Field(default_factory=dict)
|
| 26 |
+
per_variable_t_of_max_residual: dict[str, float] = Field(default_factory=dict)
|
| 27 |
+
per_variable_late_residual_mean: dict[str, float] = Field(default_factory=dict)
|
| 28 |
+
overall_r2: float = 0.0
|
| 29 |
+
|
| 30 |
+
|
| 31 |
+
def compute_match(
|
| 32 |
+
observed: dict[str, np.ndarray],
|
| 33 |
+
predicted: dict[str, np.ndarray],
|
| 34 |
+
state_variables: Iterable[str],
|
| 35 |
+
) -> float:
|
| 36 |
+
"""Compute the per-step R-squared used as the primary reward signal.
|
| 37 |
+
|
| 38 |
+
Returns the **average** of per-variable R-squared values, clipped to
|
| 39 |
+
``[0, 1]``. This intentionally rewards models that get *some* variables
|
| 40 |
+
right even if others diverge.
|
| 41 |
+
"""
|
| 42 |
+
r2s = [
|
| 43 |
+
_r_squared(observed[var], predicted[var])
|
| 44 |
+
for var in state_variables
|
| 45 |
+
if var in observed and var in predicted
|
| 46 |
+
]
|
| 47 |
+
if not r2s:
|
| 48 |
+
return 0.0
|
| 49 |
+
avg = float(np.mean(r2s))
|
| 50 |
+
return _clip01(avg)
|
| 51 |
+
|
| 52 |
+
|
| 53 |
+
def residual_summary(
|
| 54 |
+
timestamps: np.ndarray,
|
| 55 |
+
observed: dict[str, np.ndarray],
|
| 56 |
+
predicted: dict[str, np.ndarray],
|
| 57 |
+
state_variables: Iterable[str],
|
| 58 |
+
) -> ResidualSummary:
|
| 59 |
+
"""Build a structured residual summary used downstream by mismatch.py."""
|
| 60 |
+
per_max: dict[str, float] = {}
|
| 61 |
+
per_t_max: dict[str, float] = {}
|
| 62 |
+
per_late_mean: dict[str, float] = {}
|
| 63 |
+
r2_values: list[float] = []
|
| 64 |
+
|
| 65 |
+
for var in state_variables:
|
| 66 |
+
if var not in observed or var not in predicted:
|
| 67 |
+
continue
|
| 68 |
+
obs = observed[var]
|
| 69 |
+
pred = predicted[var]
|
| 70 |
+
residual = pred - obs
|
| 71 |
+
|
| 72 |
+
r2_values.append(_r_squared(obs, pred))
|
| 73 |
+
i_max = int(np.argmax(np.abs(residual)))
|
| 74 |
+
per_max[var] = float(np.abs(residual[i_max]))
|
| 75 |
+
per_t_max[var] = float(timestamps[i_max])
|
| 76 |
+
|
| 77 |
+
# Mean residual magnitude over the last 25% of the trajectory; this
|
| 78 |
+
# is the signal the mismatch summariser uses to detect drift /
|
| 79 |
+
# plateau-mismatch.
|
| 80 |
+
late_start = int(0.75 * len(timestamps))
|
| 81 |
+
per_late_mean[var] = float(np.mean(np.abs(residual[late_start:])))
|
| 82 |
+
|
| 83 |
+
overall = float(np.mean(r2_values)) if r2_values else 0.0
|
| 84 |
+
|
| 85 |
+
return ResidualSummary(
|
| 86 |
+
per_variable_max_abs_residual=per_max,
|
| 87 |
+
per_variable_t_of_max_residual=per_t_max,
|
| 88 |
+
per_variable_late_residual_mean=per_late_mean,
|
| 89 |
+
overall_r2=_clip01(overall),
|
| 90 |
+
)
|
| 91 |
+
|
| 92 |
+
|
| 93 |
+
def _r_squared(observed: np.ndarray, predicted: np.ndarray) -> float:
|
| 94 |
+
"""Coefficient of determination with a zero floor.
|
| 95 |
+
|
| 96 |
+
Returns 0.0 when the observed series is constant (degenerate).
|
| 97 |
+
Returns 0.0 when the model is worse than the observed mean.
|
| 98 |
+
"""
|
| 99 |
+
if observed.shape != predicted.shape:
|
| 100 |
+
return 0.0
|
| 101 |
+
obs_mean = float(np.mean(observed))
|
| 102 |
+
ss_res = float(np.sum((observed - predicted) ** 2))
|
| 103 |
+
ss_tot = float(np.sum((observed - obs_mean) ** 2))
|
| 104 |
+
if ss_tot <= 0.0:
|
| 105 |
+
return 0.0
|
| 106 |
+
return _clip01(1.0 - ss_res / ss_tot)
|
| 107 |
+
|
| 108 |
+
|
| 109 |
+
def _clip01(value: float) -> float:
|
| 110 |
+
if value < 0.0:
|
| 111 |
+
return 0.0
|
| 112 |
+
if value > 1.0:
|
| 113 |
+
return 1.0
|
| 114 |
+
return value
|
physix-live/physix/verifier/mismatch.py
ADDED
|
@@ -0,0 +1,138 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Generate a one-sentence English summary of where prediction disagrees
|
| 2 |
+
with observation.
|
| 3 |
+
|
| 4 |
+
Responsibility: convert a :class:`ResidualSummary` plus the two trajectories
|
| 5 |
+
into a deterministic English string the agent can use as feedback. No LLM
|
| 6 |
+
involved; this is templated text driven by simple rules over the numerical
|
| 7 |
+
residuals.
|
| 8 |
+
|
| 9 |
+
The output is the only place in the env where structured numerical state is
|
| 10 |
+
translated into natural language for the agent. We invest in this carefully
|
| 11 |
+
because a 1.5B model reasons better over short English sentences than over
|
| 12 |
+
100-row residual tables.
|
| 13 |
+
"""
|
| 14 |
+
|
| 15 |
+
from __future__ import annotations
|
| 16 |
+
|
| 17 |
+
from collections.abc import Iterable
|
| 18 |
+
|
| 19 |
+
import numpy as np
|
| 20 |
+
|
| 21 |
+
from physix.verifier.metrics import ResidualSummary
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
def summarize_mismatch(
|
| 25 |
+
observed: dict[str, np.ndarray],
|
| 26 |
+
predicted: dict[str, np.ndarray],
|
| 27 |
+
state_variables: Iterable[str],
|
| 28 |
+
timestamps: np.ndarray,
|
| 29 |
+
summary: ResidualSummary,
|
| 30 |
+
) -> str:
|
| 31 |
+
"""Return a one-sentence English description of the residual.
|
| 32 |
+
|
| 33 |
+
The sentence is built by:
|
| 34 |
+
|
| 35 |
+
1. Picking the variable with the **highest** late-window residual mean.
|
| 36 |
+
2. Inspecting whether the residual grows late in the trajectory (drift),
|
| 37 |
+
early in the trajectory (initial-condition mismatch), oscillates
|
| 38 |
+
around zero (phase / amplitude error), or stays uniformly small
|
| 39 |
+
(good fit).
|
| 40 |
+
3. Producing one descriptor for the dominant pattern.
|
| 41 |
+
|
| 42 |
+
Returns ``""`` if no residuals could be computed.
|
| 43 |
+
"""
|
| 44 |
+
if summary.overall_r2 >= 0.93:
|
| 45 |
+
return "Predicted and observed trajectories agree closely."
|
| 46 |
+
|
| 47 |
+
target = _pick_dominant_variable(summary, state_variables)
|
| 48 |
+
if target is None:
|
| 49 |
+
return ""
|
| 50 |
+
|
| 51 |
+
obs = observed[target]
|
| 52 |
+
pred = predicted[target]
|
| 53 |
+
residual = pred - obs
|
| 54 |
+
|
| 55 |
+
pattern = _classify_pattern(residual, timestamps)
|
| 56 |
+
return _render_sentence(target, pattern, summary, timestamps)
|
| 57 |
+
|
| 58 |
+
|
| 59 |
+
def _pick_dominant_variable(
|
| 60 |
+
summary: ResidualSummary,
|
| 61 |
+
state_variables: Iterable[str],
|
| 62 |
+
) -> str | None:
|
| 63 |
+
"""Pick the variable with the largest late-window residual mean."""
|
| 64 |
+
candidates: list[tuple[str, float]] = []
|
| 65 |
+
for var in state_variables:
|
| 66 |
+
if var in summary.per_variable_late_residual_mean:
|
| 67 |
+
candidates.append((var, summary.per_variable_late_residual_mean[var]))
|
| 68 |
+
if not candidates:
|
| 69 |
+
return None
|
| 70 |
+
return max(candidates, key=lambda kv: kv[1])[0]
|
| 71 |
+
|
| 72 |
+
|
| 73 |
+
def _classify_pattern(residual: np.ndarray, timestamps: np.ndarray) -> str:
|
| 74 |
+
"""Classify the dominant pattern of the residual into one of:
|
| 75 |
+
|
| 76 |
+
- ``"diverges_late"``: residual magnitude grows monotonically.
|
| 77 |
+
- ``"early_offset"``: large residual near t=0 then shrinks.
|
| 78 |
+
- ``"phase_or_amplitude"``: residual oscillates around zero with non-trivial amplitude.
|
| 79 |
+
- ``"uniform_small"``: residual is small everywhere.
|
| 80 |
+
"""
|
| 81 |
+
n = len(residual)
|
| 82 |
+
if n == 0:
|
| 83 |
+
return "uniform_small"
|
| 84 |
+
|
| 85 |
+
early_window = residual[: max(1, n // 4)]
|
| 86 |
+
late_window = residual[3 * n // 4 :]
|
| 87 |
+
abs_residual = np.abs(residual)
|
| 88 |
+
|
| 89 |
+
early_mag = float(np.mean(np.abs(early_window)))
|
| 90 |
+
late_mag = float(np.mean(np.abs(late_window)))
|
| 91 |
+
overall_mag = float(np.mean(abs_residual)) or 1e-9
|
| 92 |
+
|
| 93 |
+
# Sign-flip count: rough proxy for oscillation around zero.
|
| 94 |
+
sign_flips = int(np.sum(np.diff(np.sign(residual)) != 0))
|
| 95 |
+
|
| 96 |
+
if late_mag > 2.0 * early_mag and late_mag > 0.05 * float(np.ptp(residual) + 1e-9):
|
| 97 |
+
return "diverges_late"
|
| 98 |
+
if early_mag > 2.0 * late_mag:
|
| 99 |
+
return "early_offset"
|
| 100 |
+
if sign_flips > n // 5 and overall_mag > 0.05 * float(np.ptp(residual) + 1e-9):
|
| 101 |
+
return "phase_or_amplitude"
|
| 102 |
+
return "uniform_small"
|
| 103 |
+
|
| 104 |
+
|
| 105 |
+
def _render_sentence(
|
| 106 |
+
variable: str,
|
| 107 |
+
pattern: str,
|
| 108 |
+
summary: ResidualSummary,
|
| 109 |
+
timestamps: np.ndarray,
|
| 110 |
+
) -> str:
|
| 111 |
+
"""Render the chosen pattern into a single English sentence."""
|
| 112 |
+
t_max = summary.per_variable_t_of_max_residual.get(variable, 0.0)
|
| 113 |
+
max_abs = summary.per_variable_max_abs_residual.get(variable, 0.0)
|
| 114 |
+
|
| 115 |
+
if pattern == "diverges_late":
|
| 116 |
+
return (
|
| 117 |
+
f"Predicted {variable!s} diverges from observed past t={t_max:.1f}s "
|
| 118 |
+
f"(peak residual {max_abs:.2f}); the late-time behaviour is "
|
| 119 |
+
"structurally wrong (consider a missing damping, drag, or "
|
| 120 |
+
"saturation term)."
|
| 121 |
+
)
|
| 122 |
+
if pattern == "early_offset":
|
| 123 |
+
return (
|
| 124 |
+
f"Predicted {variable!s} is offset near t=0 (peak residual "
|
| 125 |
+
f"{max_abs:.2f} at t={t_max:.1f}s); the dynamics align later, "
|
| 126 |
+
"suggesting an initial-condition or constant-term mismatch."
|
| 127 |
+
)
|
| 128 |
+
if pattern == "phase_or_amplitude":
|
| 129 |
+
return (
|
| 130 |
+
f"Predicted {variable!s} oscillates around the observed but is "
|
| 131 |
+
f"out of phase or amplitude (peak residual {max_abs:.2f}); "
|
| 132 |
+
"consider tuning the natural frequency or adding light damping."
|
| 133 |
+
)
|
| 134 |
+
return (
|
| 135 |
+
f"Predicted {variable!s} matches observed broadly but residual is "
|
| 136 |
+
f"non-trivial (peak {max_abs:.2f} at t={t_max:.1f}s); fine-tune the "
|
| 137 |
+
"parameters."
|
| 138 |
+
)
|
physix-live/physix/verifier/parser.py
ADDED
|
@@ -0,0 +1,396 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Parser for agent-emitted equations of motion.
|
| 2 |
+
|
| 3 |
+
RHS is parsed via Python's ``ast`` module, then walked by a whitelist visitor
|
| 4 |
+
that only permits Constant / Name / UnaryOp (+/-) / BinOp (+ - * / **) /
|
| 5 |
+
Call (bare allowed-function name, no kwargs). Anything else — Attribute,
|
| 6 |
+
Subscript, Lambda, IfExp, keyword args, etc. — raises ParseError by
|
| 7 |
+
construction. We never call sympify on raw text, so there is no eval stage
|
| 8 |
+
that can crash the trainer with an AttributeError.
|
| 9 |
+
|
| 10 |
+
Pre-transforms before AST parse:
|
| 11 |
+
- ``^`` → ``**`` (physics power notation)
|
| 12 |
+
- ``dx/dt`` / bare ``dx`` → ``vx`` when the system pairs x with vx
|
| 13 |
+
"""
|
| 14 |
+
|
| 15 |
+
from __future__ import annotations
|
| 16 |
+
|
| 17 |
+
import ast
|
| 18 |
+
import re
|
| 19 |
+
|
| 20 |
+
import sympy as sp
|
| 21 |
+
from pydantic import BaseModel, ConfigDict
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
class ParseError(ValueError):
|
| 25 |
+
"""Raised when the agent's text payload violates the equation grammar."""
|
| 26 |
+
|
| 27 |
+
|
| 28 |
+
ALLOWED_FUNCTIONS: dict[str, sp.Function] = {
|
| 29 |
+
"sin": sp.sin,
|
| 30 |
+
"cos": sp.cos,
|
| 31 |
+
"tan": sp.tan,
|
| 32 |
+
"exp": sp.exp,
|
| 33 |
+
"log": sp.log,
|
| 34 |
+
"sqrt": sp.sqrt,
|
| 35 |
+
"abs": sp.Abs,
|
| 36 |
+
"Abs": sp.Abs,
|
| 37 |
+
}
|
| 38 |
+
|
| 39 |
+
|
| 40 |
+
def _build_grammar_hint() -> str:
|
| 41 |
+
funcs = sorted({name.lower() for name in ALLOWED_FUNCTIONS})
|
| 42 |
+
return (
|
| 43 |
+
"The 'equation' field is an infix ODE in plain ASCII. "
|
| 44 |
+
"LHS form: 'dN<var>/dtN' where N is 1 or 2 (omit N for first "
|
| 45 |
+
"order, e.g. 'dy/dt' or 'd2y/dt2'). "
|
| 46 |
+
"RHS uses operators + - * / ** (or ^ for power), parentheses, "
|
| 47 |
+
"the state variables listed under STATE_VARIABLES, and any "
|
| 48 |
+
"names you declare in 'params'. "
|
| 49 |
+
f"Allowed functions: {' '.join(funcs)}. "
|
| 50 |
+
"Velocity convention: when STATE_VARIABLES lists both 'x' and 'vx' "
|
| 51 |
+
"(or 'y'/'vy', etc.), use the 'vx' name on the RHS to refer to the "
|
| 52 |
+
"first time-derivative of x. The aliases 'dx/dt' and bare 'dx' are "
|
| 53 |
+
"also accepted for that case. The system is autonomous: time 't' is "
|
| 54 |
+
"not a valid RHS symbol. "
|
| 55 |
+
"No LaTeX, no \\frac, no array indexing, no library prefixes "
|
| 56 |
+
"(write 'sqrt(x)', not 'np.sqrt(x)'), no keyword arguments. "
|
| 57 |
+
"Working examples appear in the HISTORY block of each subsequent turn."
|
| 58 |
+
)
|
| 59 |
+
|
| 60 |
+
|
| 61 |
+
GRAMMAR_HINT: str = _build_grammar_hint()
|
| 62 |
+
|
| 63 |
+
|
| 64 |
+
_LHS_PATTERN = re.compile(
|
| 65 |
+
r"""
|
| 66 |
+
^\s*
|
| 67 |
+
d(?P<order>\d*)
|
| 68 |
+
(?P<var>[A-Za-z_][A-Za-z0-9_]*)
|
| 69 |
+
/
|
| 70 |
+
d t
|
| 71 |
+
(?P<order2>\d*)
|
| 72 |
+
\s*$
|
| 73 |
+
""",
|
| 74 |
+
re.VERBOSE,
|
| 75 |
+
)
|
| 76 |
+
|
| 77 |
+
_BIN_OP_TO_SYMPY: dict[type, "callable"] = {
|
| 78 |
+
ast.Add: lambda a, b: a + b,
|
| 79 |
+
ast.Sub: lambda a, b: a - b,
|
| 80 |
+
ast.Mult: lambda a, b: a * b,
|
| 81 |
+
ast.Div: lambda a, b: a / b,
|
| 82 |
+
ast.Pow: lambda a, b: a**b,
|
| 83 |
+
}
|
| 84 |
+
|
| 85 |
+
|
| 86 |
+
class Equation(BaseModel):
|
| 87 |
+
model_config = ConfigDict(frozen=True, arbitrary_types_allowed=True)
|
| 88 |
+
|
| 89 |
+
var: str
|
| 90 |
+
order: int
|
| 91 |
+
rhs: sp.Expr
|
| 92 |
+
|
| 93 |
+
|
| 94 |
+
class ParsedEquation(BaseModel):
|
| 95 |
+
model_config = ConfigDict(frozen=True, arbitrary_types_allowed=True)
|
| 96 |
+
|
| 97 |
+
equations: tuple[Equation, ...]
|
| 98 |
+
free_symbols: frozenset[str]
|
| 99 |
+
operator_count: int
|
| 100 |
+
|
| 101 |
+
|
| 102 |
+
def parse_equation(
|
| 103 |
+
text: str,
|
| 104 |
+
state_variables: tuple[str, ...],
|
| 105 |
+
parameter_names: frozenset[str],
|
| 106 |
+
) -> ParsedEquation:
|
| 107 |
+
"""Parse and validate the agent's equation payload.
|
| 108 |
+
|
| 109 |
+
Only ParseError ever escapes — callers convert it to r_format=0.
|
| 110 |
+
"""
|
| 111 |
+
if not text or not text.strip():
|
| 112 |
+
raise ParseError("Empty equation payload.")
|
| 113 |
+
|
| 114 |
+
raw_equations = _split_equations(text)
|
| 115 |
+
if not raw_equations:
|
| 116 |
+
raise ParseError("No equations found in payload.")
|
| 117 |
+
|
| 118 |
+
allowed_symbols = frozenset(state_variables) | parameter_names
|
| 119 |
+
|
| 120 |
+
parsed: list[Equation] = []
|
| 121 |
+
free_symbol_names: set[str] = set()
|
| 122 |
+
operator_count = 0
|
| 123 |
+
|
| 124 |
+
for raw in raw_equations:
|
| 125 |
+
eq = _parse_one(raw, allowed_symbols, state_variables)
|
| 126 |
+
parsed.append(eq)
|
| 127 |
+
free_symbol_names.update(s.name for s in eq.rhs.free_symbols)
|
| 128 |
+
operator_count += _count_operators(eq.rhs)
|
| 129 |
+
|
| 130 |
+
return ParsedEquation(
|
| 131 |
+
equations=tuple(parsed),
|
| 132 |
+
free_symbols=frozenset(free_symbol_names),
|
| 133 |
+
operator_count=operator_count,
|
| 134 |
+
)
|
| 135 |
+
|
| 136 |
+
|
| 137 |
+
def _split_equations(text: str) -> list[str]:
|
| 138 |
+
parts = re.split(r"[;\n]+", text)
|
| 139 |
+
return [p.strip() for p in parts if p.strip()]
|
| 140 |
+
|
| 141 |
+
|
| 142 |
+
def _parse_one(
|
| 143 |
+
raw: str,
|
| 144 |
+
allowed_symbols: frozenset[str],
|
| 145 |
+
state_variables: tuple[str, ...],
|
| 146 |
+
) -> Equation:
|
| 147 |
+
if "=" not in raw:
|
| 148 |
+
raise ParseError(f"Equation has no '=' sign: {raw!r}")
|
| 149 |
+
lhs_text, rhs_text = raw.split("=", 1)
|
| 150 |
+
var, order = _parse_lhs(lhs_text)
|
| 151 |
+
rhs_expr = _parse_rhs(rhs_text, allowed_symbols, state_variables)
|
| 152 |
+
return Equation(var=var, order=order, rhs=rhs_expr)
|
| 153 |
+
|
| 154 |
+
|
| 155 |
+
def _parse_lhs(lhs: str) -> tuple[str, int]:
|
| 156 |
+
match = _LHS_PATTERN.match(lhs)
|
| 157 |
+
if not match:
|
| 158 |
+
raise ParseError(
|
| 159 |
+
f"Cannot parse LHS {lhs!r}. Expected 'dN<var>/dtN' where N is "
|
| 160 |
+
"1 or 2 (or empty for first order)."
|
| 161 |
+
)
|
| 162 |
+
order_top = match.group("order")
|
| 163 |
+
order_bot = match.group("order2")
|
| 164 |
+
var = match.group("var")
|
| 165 |
+
if order_top != order_bot:
|
| 166 |
+
raise ParseError(
|
| 167 |
+
f"LHS order mismatch in {lhs!r}: top order {order_top!r} vs "
|
| 168 |
+
f"bottom order {order_bot!r}."
|
| 169 |
+
)
|
| 170 |
+
if order_top == "":
|
| 171 |
+
order = 1
|
| 172 |
+
elif order_top in {"1", "2"}:
|
| 173 |
+
order = int(order_top)
|
| 174 |
+
else:
|
| 175 |
+
raise ParseError(f"Only orders 1 and 2 are supported. Got {order_top!r}.")
|
| 176 |
+
return var, order
|
| 177 |
+
|
| 178 |
+
|
| 179 |
+
def _parse_rhs(
|
| 180 |
+
rhs: str,
|
| 181 |
+
allowed_symbols: frozenset[str],
|
| 182 |
+
state_variables: tuple[str, ...],
|
| 183 |
+
) -> sp.Expr:
|
| 184 |
+
rhs = rhs.strip()
|
| 185 |
+
if not rhs:
|
| 186 |
+
raise ParseError("Empty RHS.")
|
| 187 |
+
rhs = rhs.replace("^", "**")
|
| 188 |
+
rhs = _apply_velocity_alias(rhs, state_variables)
|
| 189 |
+
try:
|
| 190 |
+
tree = ast.parse(rhs, mode="eval")
|
| 191 |
+
except SyntaxError as exc:
|
| 192 |
+
raise ParseError(
|
| 193 |
+
f"Syntax error in RHS {rhs!r}: {exc.msg}. "
|
| 194 |
+
"Expected an infix expression like '-k*x + c*vx'."
|
| 195 |
+
) from exc
|
| 196 |
+
return _ast_to_sympy(tree.body, allowed_symbols, state_variables)
|
| 197 |
+
|
| 198 |
+
|
| 199 |
+
def _ast_to_sympy(
|
| 200 |
+
node: ast.AST,
|
| 201 |
+
allowed_symbols: frozenset[str],
|
| 202 |
+
state_variables: tuple[str, ...],
|
| 203 |
+
) -> sp.Expr:
|
| 204 |
+
if isinstance(node, ast.Constant):
|
| 205 |
+
if isinstance(node.value, bool) or not isinstance(node.value, (int, float)):
|
| 206 |
+
raise ParseError(
|
| 207 |
+
f"Only numeric literals allowed on RHS; got "
|
| 208 |
+
f"{node.value!r} ({type(node.value).__name__})."
|
| 209 |
+
)
|
| 210 |
+
return sp.Number(node.value)
|
| 211 |
+
|
| 212 |
+
if isinstance(node, ast.Name):
|
| 213 |
+
return _name_to_sympy(node.id, allowed_symbols, state_variables)
|
| 214 |
+
|
| 215 |
+
if isinstance(node, ast.UnaryOp):
|
| 216 |
+
operand = _ast_to_sympy(node.operand, allowed_symbols, state_variables)
|
| 217 |
+
if isinstance(node.op, ast.UAdd):
|
| 218 |
+
return +operand
|
| 219 |
+
if isinstance(node.op, ast.USub):
|
| 220 |
+
return -operand
|
| 221 |
+
raise ParseError(
|
| 222 |
+
f"Unsupported unary operator {type(node.op).__name__}. "
|
| 223 |
+
"Allowed: + (positive), - (negation)."
|
| 224 |
+
)
|
| 225 |
+
|
| 226 |
+
if isinstance(node, ast.BinOp):
|
| 227 |
+
op_fn = _BIN_OP_TO_SYMPY.get(type(node.op))
|
| 228 |
+
if op_fn is None:
|
| 229 |
+
raise ParseError(
|
| 230 |
+
f"Unsupported binary operator {type(node.op).__name__}. "
|
| 231 |
+
"Allowed: + - * / ** (also '^' as a power synonym)."
|
| 232 |
+
)
|
| 233 |
+
left = _ast_to_sympy(node.left, allowed_symbols, state_variables)
|
| 234 |
+
right = _ast_to_sympy(node.right, allowed_symbols, state_variables)
|
| 235 |
+
return op_fn(left, right)
|
| 236 |
+
|
| 237 |
+
if isinstance(node, ast.Call):
|
| 238 |
+
return _call_to_sympy(node, allowed_symbols, state_variables)
|
| 239 |
+
|
| 240 |
+
if isinstance(node, ast.Attribute):
|
| 241 |
+
raise ParseError(
|
| 242 |
+
"Attribute access is not allowed in equation RHS "
|
| 243 |
+
f"(saw '.{node.attr}'). Use bare function names like "
|
| 244 |
+
"'sqrt(x)' or 'sin(theta)', not 'np.sqrt(x)'."
|
| 245 |
+
)
|
| 246 |
+
|
| 247 |
+
if isinstance(node, ast.Subscript):
|
| 248 |
+
raise ParseError(
|
| 249 |
+
"Array indexing is not allowed in equation RHS. "
|
| 250 |
+
"Use named scalars declared in 'params'."
|
| 251 |
+
)
|
| 252 |
+
|
| 253 |
+
if isinstance(node, ast.Compare):
|
| 254 |
+
raise ParseError(
|
| 255 |
+
"Comparisons (==, <, >, etc.) are not allowed in equation RHS."
|
| 256 |
+
)
|
| 257 |
+
|
| 258 |
+
if isinstance(node, ast.BoolOp):
|
| 259 |
+
raise ParseError(
|
| 260 |
+
"Boolean operators ('and', 'or') are not allowed in equation RHS."
|
| 261 |
+
)
|
| 262 |
+
|
| 263 |
+
if isinstance(node, ast.IfExp):
|
| 264 |
+
raise ParseError(
|
| 265 |
+
"Conditional expressions ('a if cond else b') are not allowed in "
|
| 266 |
+
"equation RHS."
|
| 267 |
+
)
|
| 268 |
+
|
| 269 |
+
if isinstance(node, ast.Lambda):
|
| 270 |
+
raise ParseError("Lambda expressions are not allowed in equation RHS.")
|
| 271 |
+
|
| 272 |
+
if isinstance(node, (ast.Tuple, ast.List, ast.Set, ast.Dict)):
|
| 273 |
+
raise ParseError(
|
| 274 |
+
f"Collection literal ({type(node).__name__}) is not allowed in "
|
| 275 |
+
"equation RHS."
|
| 276 |
+
)
|
| 277 |
+
|
| 278 |
+
raise ParseError(
|
| 279 |
+
f"Unsupported expression construct {type(node).__name__}. "
|
| 280 |
+
"The grammar accepts: numeric literals, allowed identifiers, "
|
| 281 |
+
f"+ - * / **, parentheses, and {sorted(ALLOWED_FUNCTIONS)}."
|
| 282 |
+
)
|
| 283 |
+
|
| 284 |
+
|
| 285 |
+
def _name_to_sympy(
|
| 286 |
+
name: str,
|
| 287 |
+
allowed_symbols: frozenset[str],
|
| 288 |
+
state_variables: tuple[str, ...],
|
| 289 |
+
) -> sp.Symbol:
|
| 290 |
+
if name in ALLOWED_FUNCTIONS:
|
| 291 |
+
raise ParseError(
|
| 292 |
+
f"{name!r} is a function and must be called with parentheses, "
|
| 293 |
+
f"e.g. {name}(x)."
|
| 294 |
+
)
|
| 295 |
+
if name not in allowed_symbols:
|
| 296 |
+
hint = _explain_unknown_symbol(name, state_variables)
|
| 297 |
+
suffix = f" {hint}" if hint else ""
|
| 298 |
+
raise ParseError(
|
| 299 |
+
f"Unknown symbol {name!r}; allowed {sorted(allowed_symbols)!r}."
|
| 300 |
+
f"{suffix}"
|
| 301 |
+
)
|
| 302 |
+
return sp.Symbol(name)
|
| 303 |
+
|
| 304 |
+
|
| 305 |
+
def _call_to_sympy(
|
| 306 |
+
node: ast.Call,
|
| 307 |
+
allowed_symbols: frozenset[str],
|
| 308 |
+
state_variables: tuple[str, ...],
|
| 309 |
+
) -> sp.Expr:
|
| 310 |
+
if node.keywords:
|
| 311 |
+
raise ParseError(
|
| 312 |
+
"Keyword arguments are not allowed in function calls "
|
| 313 |
+
"(e.g. 'sin(theta=0.1)'). Pass positional arguments only."
|
| 314 |
+
)
|
| 315 |
+
for arg in node.args:
|
| 316 |
+
if isinstance(arg, ast.Starred):
|
| 317 |
+
raise ParseError("Star-arg / unpacking ('*args') is not allowed.")
|
| 318 |
+
|
| 319 |
+
if isinstance(node.func, ast.Attribute):
|
| 320 |
+
raise ParseError(
|
| 321 |
+
"Attribute access is not allowed in equation RHS "
|
| 322 |
+
f"(saw '.{node.func.attr}'). Use bare function names like "
|
| 323 |
+
"'sqrt(x)' or 'sin(theta)', not 'np.sqrt(x)'."
|
| 324 |
+
)
|
| 325 |
+
|
| 326 |
+
if not isinstance(node.func, ast.Name):
|
| 327 |
+
raise ParseError(
|
| 328 |
+
"Only direct calls to named functions are allowed. "
|
| 329 |
+
f"Use one of {sorted(ALLOWED_FUNCTIONS)}, not a computed-name call."
|
| 330 |
+
)
|
| 331 |
+
|
| 332 |
+
func_name = node.func.id
|
| 333 |
+
if func_name not in ALLOWED_FUNCTIONS:
|
| 334 |
+
raise ParseError(
|
| 335 |
+
f"Unknown function {func_name!r}; "
|
| 336 |
+
f"allowed: {sorted(ALLOWED_FUNCTIONS)}."
|
| 337 |
+
)
|
| 338 |
+
|
| 339 |
+
args = [_ast_to_sympy(a, allowed_symbols, state_variables) for a in node.args]
|
| 340 |
+
return ALLOWED_FUNCTIONS[func_name](*args)
|
| 341 |
+
|
| 342 |
+
|
| 343 |
+
def _apply_velocity_alias(rhs: str, state_variables: tuple[str, ...]) -> str:
|
| 344 |
+
aliases = _velocity_aliases(state_variables)
|
| 345 |
+
if not aliases:
|
| 346 |
+
return rhs
|
| 347 |
+
out = rhs
|
| 348 |
+
for var, velocity in aliases:
|
| 349 |
+
slash_pattern = rf"\bd{re.escape(var)}\s*/\s*dt\b"
|
| 350 |
+
out = re.sub(slash_pattern, velocity, out)
|
| 351 |
+
bare_pattern = rf"\bd{re.escape(var)}\b"
|
| 352 |
+
out = re.sub(bare_pattern, velocity, out)
|
| 353 |
+
return out
|
| 354 |
+
|
| 355 |
+
|
| 356 |
+
def _velocity_aliases(state_variables: tuple[str, ...]) -> list[tuple[str, str]]:
|
| 357 |
+
state_set = set(state_variables)
|
| 358 |
+
out: list[tuple[str, str]] = []
|
| 359 |
+
for var in state_variables:
|
| 360 |
+
if not var or var.startswith(("d", "v")):
|
| 361 |
+
continue
|
| 362 |
+
velocity = f"v{var}"
|
| 363 |
+
if velocity in state_set:
|
| 364 |
+
out.append((var, velocity))
|
| 365 |
+
return out
|
| 366 |
+
|
| 367 |
+
|
| 368 |
+
def _explain_unknown_symbol(name: str, state_variables: tuple[str, ...]) -> str:
|
| 369 |
+
state_set = set(state_variables)
|
| 370 |
+
if name == "t":
|
| 371 |
+
return (
|
| 372 |
+
"'t' is not allowed — the equation must be autonomous "
|
| 373 |
+
"(express forces via state variables only, no explicit time)."
|
| 374 |
+
)
|
| 375 |
+
if name.startswith("d") and len(name) > 1:
|
| 376 |
+
base = name[1:]
|
| 377 |
+
velocity = f"v{base}"
|
| 378 |
+
if velocity in state_set:
|
| 379 |
+
return (
|
| 380 |
+
f"Did you mean '{velocity}'? "
|
| 381 |
+
f"Use '{velocity}' for the velocity of '{base}'."
|
| 382 |
+
)
|
| 383 |
+
if base in state_set:
|
| 384 |
+
return (
|
| 385 |
+
f"'{name}' looks like a derivative; this system has no "
|
| 386 |
+
f"separate velocity name, write '{base}' on the RHS."
|
| 387 |
+
)
|
| 388 |
+
return ""
|
| 389 |
+
|
| 390 |
+
|
| 391 |
+
def _count_operators(expr: sp.Expr) -> int:
|
| 392 |
+
count = 0
|
| 393 |
+
for node in sp.preorder_traversal(expr):
|
| 394 |
+
if not isinstance(node, (sp.Symbol, sp.Number)):
|
| 395 |
+
count += 1
|
| 396 |
+
return count
|