Pratyush-01 commited on
Commit
88a3c69
·
verified ·
1 Parent(s): 906b21d

Sync physix/ to merged tree (post train/ merge, pre 4ep/500step run)

Browse files
physix/__pycache__/__init__.cpython-311.pyc CHANGED
Binary files a/physix/__pycache__/__init__.cpython-311.pyc and b/physix/__pycache__/__init__.cpython-311.pyc differ
 
physix/__pycache__/__init__.cpython-313.pyc ADDED
Binary file (975 Bytes). View file
 
physix/__pycache__/__init__.cpython-314.pyc ADDED
Binary file (972 Bytes). View file
 
physix/__pycache__/client.cpython-313.pyc ADDED
Binary file (2.3 kB). View file
 
physix/__pycache__/client.cpython-314.pyc ADDED
Binary file (2.77 kB). View file
 
physix/__pycache__/models.cpython-311.pyc CHANGED
Binary files a/physix/__pycache__/models.cpython-311.pyc and b/physix/__pycache__/models.cpython-311.pyc differ
 
physix/__pycache__/models.cpython-314.pyc ADDED
Binary file (6.54 kB). View file
 
physix/client.py CHANGED
@@ -41,3 +41,11 @@ class PhysiXEnv(EnvClient[PhysiXAction, PhysiXObservation, PhysiXState]):
41
 
42
  def _parse_state(self, payload: dict[str, Any]) -> PhysiXState:
43
  return PhysiXState(**payload)
 
 
 
 
 
 
 
 
 
41
 
42
  def _parse_state(self, payload: dict[str, Any]) -> PhysiXState:
43
  return PhysiXState(**payload)
44
+
45
+
46
+ # Alias for OpenEnv auto-discovery: the convention Pascal-cases the
47
+ # manifest `name` field ("physix" -> "Physix"), so AutoEnv looks up
48
+ # `physix.client.PhysixEnv`. The actual class is `PhysiXEnv` (capital
49
+ # X in the brand). This alias makes both lookups succeed without
50
+ # duplicating the implementation.
51
+ PhysixEnv = PhysiXEnv
physix/server/interactive.py CHANGED
@@ -6,7 +6,6 @@ import logging
6
  import threading
7
  import time
8
  import uuid
9
- from collections.abc import Callable
10
  from typing import Optional
11
 
12
  import numpy as np
@@ -19,12 +18,30 @@ from physix.models import (
19
  PhysiXObservation,
20
  )
21
  from physix.server.environment import PhysiXEnvironment
 
 
 
 
 
 
 
 
 
22
  from physix.systems import list_supported_systems, list_systems
23
  from physix.systems.base import PhysicalSystem, TrajectoryData
24
  from physix.training.prompt import build_prompt, parse_completion
25
  from physix.verifier.parser import parse_equation
26
  from physix.verifier.simulator import simulate_hypothesis
27
 
 
 
 
 
 
 
 
 
 
28
 
29
  _log = logging.getLogger(__name__)
30
 
@@ -54,33 +71,6 @@ class InteractiveStartResponse(BaseModel):
54
  max_turns: int
55
 
56
 
57
- class LlmStepRequest(BaseModel):
58
- """Server-side LLM call. Browser names a model tag; server hits Ollama."""
59
-
60
- model_config = ConfigDict(extra="forbid")
61
-
62
- model: str = "qwen2.5:1.5b-instruct"
63
- temperature: float = Field(default=0.4, ge=0.0, le=2.0)
64
- max_tokens: int = Field(default=2048, ge=64, le=8192)
65
- host: Optional[str] = None
66
-
67
-
68
- class LlmModelInfo(BaseModel):
69
- """A single locally-pulled Ollama model tag."""
70
-
71
- model_config = ConfigDict(frozen=True)
72
-
73
- name: str
74
- size_bytes: Optional[int] = None
75
- parameter_size: Optional[str] = None
76
- family: Optional[str] = None
77
-
78
-
79
- class LlmModelsResponse(BaseModel):
80
- models: list[LlmModelInfo] = Field(default_factory=list)
81
- error: Optional[str] = None
82
-
83
-
84
  class LlmStepResponse(BaseModel):
85
  observation: PhysiXObservation
86
  predicted_trajectory: list[dict[str, float]] = Field(default_factory=list)
@@ -147,118 +137,10 @@ class InteractiveSessionStore:
147
  return len(self._sessions)
148
 
149
 
150
- LlmPolicy = Callable[[list[dict[str, str]]], str]
151
- LlmPolicyFactory = Callable[[LlmStepRequest], LlmPolicy]
152
- LlmModelsLister = Callable[[], LlmModelsResponse]
153
-
154
-
155
- def default_ollama_models_lister() -> LlmModelsResponse:
156
- try:
157
- import ollama # type: ignore[import-not-found]
158
- except ImportError:
159
- return LlmModelsResponse(
160
- models=[],
161
- error=(
162
- "The 'ollama' Python package is not installed on the server. "
163
- "Install with: pip install -e '.[demo]'"
164
- ),
165
- )
166
-
167
- try:
168
- response = ollama.Client().list()
169
- except Exception as exc: # noqa: BLE001 — surfaced in the response body
170
- return LlmModelsResponse(
171
- models=[],
172
- error=(
173
- f"Could not reach the local Ollama daemon ({exc}). "
174
- "Is 'ollama serve' running?"
175
- ),
176
- )
177
-
178
- raw_models = getattr(response, "models", None)
179
- if raw_models is None and isinstance(response, dict):
180
- raw_models = response.get("models", [])
181
- raw_models = raw_models or []
182
-
183
- out: list[LlmModelInfo] = []
184
- for entry in raw_models:
185
- name = _model_attr(entry, "model") or _model_attr(entry, "name")
186
- if not isinstance(name, str) or not name:
187
- continue
188
- details = _model_attr(entry, "details")
189
- out.append(
190
- LlmModelInfo(
191
- name=name,
192
- size_bytes=_coerce_int(_model_attr(entry, "size")),
193
- parameter_size=_model_attr(details, "parameter_size"),
194
- family=_model_attr(details, "family"),
195
- )
196
- )
197
-
198
- out.sort(key=lambda m: m.name)
199
- return LlmModelsResponse(models=out)
200
-
201
-
202
- def _model_attr(obj: object, key: str) -> object:
203
- if obj is None:
204
- return None
205
- if isinstance(obj, dict):
206
- return obj.get(key)
207
- return getattr(obj, key, None)
208
-
209
-
210
- def _coerce_int(value: object) -> Optional[int]:
211
- if value is None:
212
- return None
213
- try:
214
- return int(value)
215
- except (TypeError, ValueError):
216
- return None
217
-
218
-
219
- def default_ollama_policy_factory(request: LlmStepRequest) -> LlmPolicy:
220
- try:
221
- import ollama # type: ignore[import-not-found]
222
- except ImportError as exc: # pragma: no cover
223
- raise HTTPException(
224
- status_code=503,
225
- detail=(
226
- "The 'ollama' Python package is not installed on the server. "
227
- "Install with: pip install -e '.[demo]'"
228
- ),
229
- ) from exc
230
-
231
- client = ollama.Client(host=request.host) if request.host else ollama.Client()
232
-
233
- def _policy(prompt: list[dict[str, str]]) -> str:
234
- try:
235
- response = client.chat(
236
- model=request.model,
237
- messages=prompt,
238
- format="json",
239
- options={
240
- "temperature": request.temperature,
241
- "num_predict": request.max_tokens,
242
- },
243
- )
244
- except Exception as exc: # noqa: BLE001 — surfaced as 502
245
- raise HTTPException(
246
- status_code=502,
247
- detail=(
248
- f"Ollama call failed for model {request.model!r}: {exc}. "
249
- "Is 'ollama serve' running and the model pulled "
250
- f"('ollama pull {request.model}')?"
251
- ),
252
- ) from exc
253
- return str(response["message"]["content"])
254
-
255
- return _policy
256
-
257
-
258
  def build_interactive_router(
259
  store: Optional[InteractiveSessionStore] = None,
260
  *,
261
- policy_factory: LlmPolicyFactory = default_ollama_policy_factory,
262
  models_lister: LlmModelsLister = default_ollama_models_lister,
263
  ) -> APIRouter:
264
  sessions = store if store is not None else InteractiveSessionStore()
 
6
  import threading
7
  import time
8
  import uuid
 
9
  from typing import Optional
10
 
11
  import numpy as np
 
18
  PhysiXObservation,
19
  )
20
  from physix.server.environment import PhysiXEnvironment
21
+ from physix.server.providers import (
22
+ LlmModelInfo,
23
+ LlmModelsLister,
24
+ LlmModelsResponse,
25
+ LlmPolicyFactory,
26
+ LlmStepRequest,
27
+ default_ollama_models_lister,
28
+ default_openai_compat_policy_factory,
29
+ )
30
  from physix.systems import list_supported_systems, list_systems
31
  from physix.systems.base import PhysicalSystem, TrajectoryData
32
  from physix.training.prompt import build_prompt, parse_completion
33
  from physix.verifier.parser import parse_equation
34
  from physix.verifier.simulator import simulate_hypothesis
35
 
36
+ __all__ = [
37
+ "InteractiveSessionStore",
38
+ "LlmModelInfo",
39
+ "LlmModelsResponse",
40
+ "LlmStepRequest",
41
+ "LlmStepResponse",
42
+ "build_interactive_router",
43
+ ]
44
+
45
 
46
  _log = logging.getLogger(__name__)
47
 
 
71
  max_turns: int
72
 
73
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
74
  class LlmStepResponse(BaseModel):
75
  observation: PhysiXObservation
76
  predicted_trajectory: list[dict[str, float]] = Field(default_factory=list)
 
137
  return len(self._sessions)
138
 
139
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
140
  def build_interactive_router(
141
  store: Optional[InteractiveSessionStore] = None,
142
  *,
143
+ policy_factory: LlmPolicyFactory = default_openai_compat_policy_factory,
144
  models_lister: LlmModelsLister = default_ollama_models_lister,
145
  ) -> APIRouter:
146
  sessions = store if store is not None else InteractiveSessionStore()
physix/server/providers.py ADDED
@@ -0,0 +1,280 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """LLM provider abstraction for the interactive demo.
2
+
3
+ The demo points at any OpenAI-compatible ``/v1/chat/completions`` endpoint:
4
+ local Ollama, Hugging Face's Inference Providers router, OpenAI itself,
5
+ vLLM, OpenRouter, etc. Everything funnels through one factory so the UI
6
+ only has to learn one shape.
7
+
8
+ The browser passes ``base_url``, ``model``, and (optionally) ``api_key``
9
+ on every request. If ``api_key`` is missing we fall back to a per-provider
10
+ env var so a Hugging Face Space can ship a default working config without
11
+ hard-coding secrets in client bundles.
12
+ """
13
+
14
+ from __future__ import annotations
15
+
16
+ import logging
17
+ import os
18
+ from collections.abc import Callable
19
+ from typing import Optional
20
+
21
+ from fastapi import HTTPException
22
+ from pydantic import BaseModel, ConfigDict, Field
23
+
24
+ _log = logging.getLogger(__name__)
25
+
26
+
27
+ # Public preset URLs. Mirror these in the frontend connection panel so the
28
+ # two stay in sync; the values themselves only matter when the browser
29
+ # hands one back to us in `LlmStepRequest.base_url`.
30
+ HF_ROUTER_BASE_URL = "https://router.huggingface.co/v1"
31
+ OPENAI_BASE_URL = "https://api.openai.com/v1"
32
+ OLLAMA_OPENAI_BASE_URL = "http://localhost:11434/v1"
33
+
34
+
35
+ class LlmStepRequest(BaseModel):
36
+ """Provider-agnostic step payload.
37
+
38
+ The browser names a base URL + model + (optional) key. The server
39
+ fans these into an ``openai.OpenAI`` client. ``base_url`` is required
40
+ so we never silently default to the wrong endpoint when the user
41
+ swaps providers mid-session.
42
+ """
43
+
44
+ model_config = ConfigDict(extra="forbid")
45
+
46
+ base_url: str = Field(
47
+ description=(
48
+ "OpenAI-compatible /v1 base URL. E.g. http://localhost:11434/v1, "
49
+ "https://router.huggingface.co/v1, https://api.openai.com/v1."
50
+ ),
51
+ )
52
+ model: str = Field(
53
+ description=(
54
+ "Model id understood by the chosen base URL. For HF this is the "
55
+ "repo id (optionally suffixed with :provider, e.g. ':fastest'); "
56
+ "for Ollama it's the local tag; for OpenAI it's the model name."
57
+ ),
58
+ )
59
+ api_key: Optional[str] = Field(
60
+ default=None,
61
+ description=(
62
+ "Bearer token forwarded as Authorization header. Falls back to "
63
+ "HF_TOKEN / OPENAI_API_KEY / OLLAMA_API_KEY env vars on the "
64
+ "server based on `base_url` if omitted."
65
+ ),
66
+ )
67
+ temperature: float = Field(default=0.4, ge=0.0, le=2.0)
68
+ max_tokens: int = Field(default=2048, ge=64, le=8192)
69
+ request_timeout_s: float = Field(default=120.0, ge=5.0, le=600.0)
70
+
71
+
72
+ # A policy is "give me prompt messages, get back the assistant content".
73
+ LlmPolicy = Callable[[list[dict[str, str]]], str]
74
+ LlmPolicyFactory = Callable[[LlmStepRequest], LlmPolicy]
75
+
76
+
77
+ def resolve_api_key(request: LlmStepRequest) -> Optional[str]:
78
+ """Pick the bearer token to use for this request.
79
+
80
+ Browser-supplied keys win. When the browser sends nothing we fall
81
+ back to a server-side env var picked from the URL — this lets a
82
+ public Hugging Face Space ship a usable default by setting
83
+ ``HF_TOKEN`` as a Space secret while still letting power users
84
+ bring their own.
85
+ """
86
+
87
+ if request.api_key:
88
+ return request.api_key
89
+
90
+ base_url = (request.base_url or "").lower()
91
+ if "huggingface" in base_url:
92
+ return os.environ.get("HF_TOKEN") or os.environ.get("HUGGINGFACE_API_KEY")
93
+ if "openai.com" in base_url:
94
+ return os.environ.get("OPENAI_API_KEY")
95
+ if "openrouter" in base_url:
96
+ return os.environ.get("OPENROUTER_API_KEY")
97
+ if "localhost" in base_url or "127.0.0.1" in base_url:
98
+ # Ollama doesn't require a key; the SDK still wants something
99
+ # truthy in some versions, so we hand it a placeholder.
100
+ return os.environ.get("OLLAMA_API_KEY", "ollama")
101
+ return None
102
+
103
+
104
+ def default_openai_compat_policy_factory(request: LlmStepRequest) -> LlmPolicy:
105
+ """Build a chat policy backed by any OpenAI-compatible endpoint.
106
+
107
+ Used by the interactive router for every demo turn. Failures bubble
108
+ up as ``HTTPException(502)`` so the UI can surface a clear "your
109
+ provider is unhappy" banner instead of a stack trace.
110
+ """
111
+
112
+ try:
113
+ from openai import OpenAI # type: ignore[import-not-found]
114
+ except ImportError as exc: # pragma: no cover
115
+ raise HTTPException(
116
+ status_code=503,
117
+ detail=(
118
+ "The 'openai' Python package is not installed on the server. "
119
+ "Install with: pip install -e '.[demo]'"
120
+ ),
121
+ ) from exc
122
+
123
+ api_key = resolve_api_key(request)
124
+ client = OpenAI(
125
+ base_url=request.base_url,
126
+ api_key=api_key or "missing",
127
+ timeout=request.request_timeout_s,
128
+ )
129
+
130
+ def _policy(prompt: list[dict[str, str]]) -> str:
131
+ try:
132
+ response = client.chat.completions.create(
133
+ model=request.model,
134
+ messages=prompt, # type: ignore[arg-type]
135
+ temperature=request.temperature,
136
+ max_tokens=request.max_tokens,
137
+ # Encourages JSON output where supported (OpenAI, vLLM,
138
+ # Ollama-OpenAI). HF router silently ignores this on
139
+ # providers that don't support it, which is fine — our
140
+ # parser tolerates Markdown-fenced JSON too.
141
+ response_format={"type": "json_object"},
142
+ )
143
+ except TypeError:
144
+ # Some providers reject `response_format`; retry without it.
145
+ response = client.chat.completions.create(
146
+ model=request.model,
147
+ messages=prompt, # type: ignore[arg-type]
148
+ temperature=request.temperature,
149
+ max_tokens=request.max_tokens,
150
+ )
151
+ except Exception as exc: # noqa: BLE001 — surfaced to the UI
152
+ raise HTTPException(
153
+ status_code=502,
154
+ detail=_format_provider_error(request, exc),
155
+ ) from exc
156
+
157
+ choice = response.choices[0] if response.choices else None
158
+ content = (choice.message.content if choice and choice.message else "") or ""
159
+ return str(content)
160
+
161
+ return _policy
162
+
163
+
164
+ def _format_provider_error(request: LlmStepRequest, exc: Exception) -> str:
165
+ """Make the most common failure modes self-diagnosing in the UI."""
166
+
167
+ base_msg = f"Chat completion failed via {request.base_url} for model {request.model!r}: {exc}"
168
+ text = str(exc).lower()
169
+ if "401" in text or "unauthorized" in text or "invalid api key" in text:
170
+ return (
171
+ f"{base_msg}\n\n"
172
+ "Hint: the API key is missing or rejected. Open the connection "
173
+ "panel and paste a valid token, or set the matching env var on "
174
+ "the server (HF_TOKEN, OPENAI_API_KEY, etc.)."
175
+ )
176
+ if "404" in text or "not found" in text or "no such model" in text:
177
+ return (
178
+ f"{base_msg}\n\n"
179
+ "Hint: the chosen model isn't reachable through this endpoint. "
180
+ "For Hugging Face, verify the repo id is public and that "
181
+ "Inference Providers is enabled for it. For Ollama, run "
182
+ f"'ollama pull {request.model}'."
183
+ )
184
+ if "connection" in text or "refused" in text or "timeout" in text:
185
+ return (
186
+ f"{base_msg}\n\n"
187
+ "Hint: the endpoint isn't reachable. For Ollama, make sure "
188
+ "'ollama serve' is running on the host you pointed at."
189
+ )
190
+ return base_msg
191
+
192
+
193
+ # -----------------------------------------------------------------------
194
+ # Ollama-only model lister (kept for the local-dev convenience dropdown).
195
+ # -----------------------------------------------------------------------
196
+
197
+
198
+ class LlmModelInfo(BaseModel):
199
+ """A single locally-pulled Ollama model tag."""
200
+
201
+ model_config = ConfigDict(frozen=True)
202
+
203
+ name: str
204
+ size_bytes: Optional[int] = None
205
+ parameter_size: Optional[str] = None
206
+ family: Optional[str] = None
207
+
208
+
209
+ class LlmModelsResponse(BaseModel):
210
+ models: list[LlmModelInfo] = Field(default_factory=list)
211
+ error: Optional[str] = None
212
+
213
+
214
+ LlmModelsLister = Callable[[], LlmModelsResponse]
215
+
216
+
217
+ def default_ollama_models_lister() -> LlmModelsResponse:
218
+ """Enumerate locally-pulled Ollama tags. Best-effort."""
219
+
220
+ try:
221
+ import ollama # type: ignore[import-not-found]
222
+ except ImportError:
223
+ return LlmModelsResponse(
224
+ models=[],
225
+ error=(
226
+ "The 'ollama' Python package is not installed on the server. "
227
+ "Install with: pip install -e '.[demo]'"
228
+ ),
229
+ )
230
+
231
+ try:
232
+ response = ollama.Client().list()
233
+ except Exception as exc: # noqa: BLE001 — surfaced in the response body
234
+ return LlmModelsResponse(
235
+ models=[],
236
+ error=(
237
+ f"Could not reach the local Ollama daemon ({exc}). "
238
+ "Is 'ollama serve' running?"
239
+ ),
240
+ )
241
+
242
+ raw_models = getattr(response, "models", None)
243
+ if raw_models is None and isinstance(response, dict):
244
+ raw_models = response.get("models", [])
245
+ raw_models = raw_models or []
246
+
247
+ out: list[LlmModelInfo] = []
248
+ for entry in raw_models:
249
+ name = _model_attr(entry, "model") or _model_attr(entry, "name")
250
+ if not isinstance(name, str) or not name:
251
+ continue
252
+ details = _model_attr(entry, "details")
253
+ out.append(
254
+ LlmModelInfo(
255
+ name=name,
256
+ size_bytes=_coerce_int(_model_attr(entry, "size")),
257
+ parameter_size=_model_attr(details, "parameter_size"),
258
+ family=_model_attr(details, "family"),
259
+ )
260
+ )
261
+
262
+ out.sort(key=lambda m: m.name)
263
+ return LlmModelsResponse(models=out)
264
+
265
+
266
+ def _model_attr(obj: object, key: str) -> object:
267
+ if obj is None:
268
+ return None
269
+ if isinstance(obj, dict):
270
+ return obj.get(key)
271
+ return getattr(obj, key, None)
272
+
273
+
274
+ def _coerce_int(value: object) -> Optional[int]:
275
+ if value is None:
276
+ return None
277
+ try:
278
+ return int(value)
279
+ except (TypeError, ValueError):
280
+ return None
physix/training/dataset.py CHANGED
@@ -7,7 +7,7 @@ whose rows contain everything the training loop needs:
7
  - ``prompt``: the chat-format string passed to the model
8
  - ``system_id``, ``state_variables``, ``parameters``, ``initial_conditions``,
9
  ``timestamps``, ``observed``: the system context the scorer needs
10
- - ``previous_total``: 0.0 at turn-0 (we train on first-turn prompts; the
11
  iterative refinement skill emerges from the model's general ability to
12
  read history at inference time)
13
 
 
7
  - ``prompt``: the chat-format string passed to the model
8
  - ``system_id``, ``state_variables``, ``parameters``, ``initial_conditions``,
9
  ``timestamps``, ``observed``: the system context the scorer needs
10
+ - ``previous_r_match``: 0.0 at turn-0 (we train on first-turn prompts; the
11
  iterative refinement skill emerges from the model's general ability to
12
  read history at inference time)
13
 
physix/training/reward_fns.py CHANGED
@@ -42,11 +42,6 @@ from physix.verifier.reward import correctness_bonus, match_dense
42
  RewardFunction = Callable[..., list[float]]
43
 
44
 
45
- #: Components that read directly from the :class:`RewardBreakdown` produced
46
- #: by :class:`Scorer.score`. ``progress`` is omitted (see module docstring).
47
- _BREAKDOWN_COMPONENTS: tuple[str, ...] = ("match", "simplicity", "format")
48
-
49
-
50
  def make_reward_funcs(
51
  scorer: Scorer | None = None,
52
  ) -> dict[str, RewardFunction]:
@@ -56,10 +51,13 @@ def make_reward_funcs(
56
  logs them individually to W&B under
57
  ``train/rewards/reward_<component>/mean``.
58
 
59
- The scorer is shared across all functions; calling ``scorer.reset()``
60
- between steps avoids unbounded cache growth and ensures each
61
- completion is parsed + simulated exactly once per step regardless of
62
- how many reward functions query it.
 
 
 
63
 
64
  Returns a dict whose keys are:
65
 
@@ -74,14 +72,15 @@ def make_reward_funcs(
74
  """
75
  shared = scorer if scorer is not None else Scorer()
76
 
77
- def _make_breakdown_reader(component: str) -> RewardFunction:
78
  def _reward_fn(
79
  prompts: Sequence[Any],
80
  completions: Sequence[str],
81
  **kwargs: Any,
82
  ) -> list[float]:
83
  del prompts # kept for TRL API conformance; unused here.
84
- shared.reset()
 
85
  contexts = _hydrate_contexts(len(completions), kwargs)
86
  out: list[float] = []
87
  for i, completion in enumerate(completions):
@@ -102,7 +101,6 @@ def make_reward_funcs(
102
  **kwargs: Any,
103
  ) -> list[float]:
104
  del prompts
105
- shared.reset()
106
  contexts = _hydrate_contexts(len(completions), kwargs)
107
  out: list[float] = []
108
  for i, completion in enumerate(completions):
@@ -118,7 +116,6 @@ def make_reward_funcs(
118
  **kwargs: Any,
119
  ) -> list[float]:
120
  del prompts
121
- shared.reset()
122
  contexts = _hydrate_contexts(len(completions), kwargs)
123
  out: list[float] = []
124
  for i, completion in enumerate(completions):
@@ -128,8 +125,12 @@ def make_reward_funcs(
128
 
129
  _reward_correctness.__name__ = "reward_correctness"
130
 
 
 
131
  funcs: dict[str, RewardFunction] = {
132
- name: _make_breakdown_reader(name) for name in _BREAKDOWN_COMPONENTS
 
 
133
  }
134
  funcs["match_dense"] = _reward_match_dense
135
  funcs["correctness"] = _reward_correctness
 
42
  RewardFunction = Callable[..., list[float]]
43
 
44
 
 
 
 
 
 
45
  def make_reward_funcs(
46
  scorer: Scorer | None = None,
47
  ) -> dict[str, RewardFunction]:
 
51
  logs them individually to W&B under
52
  ``train/rewards/reward_<component>/mean``.
53
 
54
+ The scorer is shared across all functions. TRL calls reward functions
55
+ one-by-one for the same batch (same ``completions`` list, same indices).
56
+ The ``match`` function resets the cache and populates it; the
57
+ remaining functions (``match_dense``, ``correctness``, ``simplicity``,
58
+ ``format``) reuse the cached results via ``cache_key=i``. This means
59
+ each completion is parsed + simulated exactly once per step regardless
60
+ of how many reward functions query it.
61
 
62
  Returns a dict whose keys are:
63
 
 
72
  """
73
  shared = scorer if scorer is not None else Scorer()
74
 
75
+ def _make_breakdown_reader(component: str, *, reset_cache: bool) -> RewardFunction:
76
  def _reward_fn(
77
  prompts: Sequence[Any],
78
  completions: Sequence[str],
79
  **kwargs: Any,
80
  ) -> list[float]:
81
  del prompts # kept for TRL API conformance; unused here.
82
+ if reset_cache:
83
+ shared.reset()
84
  contexts = _hydrate_contexts(len(completions), kwargs)
85
  out: list[float] = []
86
  for i, completion in enumerate(completions):
 
101
  **kwargs: Any,
102
  ) -> list[float]:
103
  del prompts
 
104
  contexts = _hydrate_contexts(len(completions), kwargs)
105
  out: list[float] = []
106
  for i, completion in enumerate(completions):
 
116
  **kwargs: Any,
117
  ) -> list[float]:
118
  del prompts
 
119
  contexts = _hydrate_contexts(len(completions), kwargs)
120
  out: list[float] = []
121
  for i, completion in enumerate(completions):
 
125
 
126
  _reward_correctness.__name__ = "reward_correctness"
127
 
128
+ # ``match`` is always the first function TRL calls; it resets the cache
129
+ # so subsequent functions get fresh results for this step's completions.
130
  funcs: dict[str, RewardFunction] = {
131
+ "match": _make_breakdown_reader("match", reset_cache=True),
132
+ "simplicity": _make_breakdown_reader("simplicity", reset_cache=False),
133
+ "format": _make_breakdown_reader("format", reset_cache=False),
134
  }
135
  funcs["match_dense"] = _reward_match_dense
136
  funcs["correctness"] = _reward_correctness
physix/training/scorer.py CHANGED
@@ -92,7 +92,7 @@ class SystemContext(BaseModel):
92
  initial_conditions=_drop_none(row.get("initial_conditions", {})),
93
  timestamps=np.asarray(row.get("timestamps", []), dtype=float),
94
  observed=observed,
95
- previous_r_match=float(row.get("previous_r_match", row.get("previous_total", 0.0))),
96
  )
97
 
98
 
 
92
  initial_conditions=_drop_none(row.get("initial_conditions", {})),
93
  timestamps=np.asarray(row.get("timestamps", []), dtype=float),
94
  observed=observed,
95
+ previous_r_match=float(row.get("previous_r_match", 0.0)),
96
  )
97
 
98
 
physix/training/sft.py CHANGED
@@ -153,7 +153,7 @@ def train_sft(
153
 
154
  # Heavy imports: only available in [train] env.
155
  import wandb
156
- from unsloth import FastLanguageModel, PatchFastRL # noqa: F401
157
  from trl import SFTTrainer, SFTConfig
158
 
159
  # Force a fresh W&B run for SFT regardless of any inherited WANDB_RUN_ID
 
153
 
154
  # Heavy imports: only available in [train] env.
155
  import wandb
156
+ from unsloth import FastLanguageModel
157
  from trl import SFTTrainer, SFTConfig
158
 
159
  # Force a fresh W&B run for SFT regardless of any inherited WANDB_RUN_ID