File size: 11,482 Bytes
b99b9ee | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 | """End-to-end tests for the ``/interactive/*`` router."""
from __future__ import annotations
import json
from collections.abc import Iterable
import pytest
from fastapi import FastAPI
from fastapi.middleware.cors import CORSMiddleware
from fastapi.testclient import TestClient
from openenv.core.env_server import create_fastapi_app
from physix.models import PhysiXAction, PhysiXObservation
from physix.server.app import build_app
from physix.server.environment import PhysiXEnvironment
from physix.server.interactive import (
LlmModelInfo,
LlmModelsResponse,
LlmStepRequest,
build_interactive_router,
)
@pytest.fixture
def client() -> TestClient:
return TestClient(build_app())
def _build_app_with_stubbed_llm(
completions: Iterable[str],
*,
models_response: LlmModelsResponse | None = None,
) -> FastAPI:
"""Build a clone of the production app whose LLM policy returns
pre-canned completion strings in order.
Each call to the policy pops the next completion off the deque, so a
test that wants three turns supplies three strings. Optionally
overrides the model lister so the ``/interactive/models`` route can
be exercised without touching the real Ollama daemon.
"""
queue = list(completions)
def _stub_policy(_payload: LlmStepRequest):
def _policy(_prompt: list[dict[str, str]]) -> str:
if not queue:
raise AssertionError("Stubbed LLM ran out of canned completions.")
return queue.pop(0)
return _policy
def _stub_lister() -> LlmModelsResponse:
return models_response or LlmModelsResponse(models=[])
app = create_fastapi_app(
env=PhysiXEnvironment,
action_cls=PhysiXAction,
observation_cls=PhysiXObservation,
)
app.add_middleware(
CORSMiddleware,
allow_origins=["http://localhost:5173"],
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
app.include_router(
build_interactive_router(
policy_factory=_stub_policy,
models_lister=_stub_lister,
)
)
return app
# --- Catalogue ---
def test_systems_endpoint_returns_supported_systems_in_order(
client: TestClient,
) -> None:
from physix.systems.registry import SUPPORTED_SYSTEMS
response = client.get("/interactive/systems")
assert response.status_code == 200
catalogue = response.json()
returned_ids = [row["system_id"] for row in catalogue]
assert returned_ids == list(SUPPORTED_SYSTEMS)
system_ids = set(returned_ids)
assert "projectile_drag" not in system_ids
assert "charged_b_field" not in system_ids
# Sanity check: the systems we pruned for plateauing on 7B must not
# leak through either, since the whole point of the curriculum is to
# hide them from the demo dropdown.
assert "free_fall_drag" not in system_ids
assert "damped_pendulum" not in system_ids
# --- Local model catalogue ---
def test_models_endpoint_returns_injected_list() -> None:
"""Frontend reads installed model tags from the server, not a hardcoded
list. The route must surface whatever the lister reports."""
canned = LlmModelsResponse(
models=[
LlmModelInfo(name="qwen2.5:7b", size_bytes=4_700_000_000, parameter_size="7.6B"),
LlmModelInfo(name="qwen2.5:1.5b-instruct", size_bytes=986_000_000),
]
)
app = _build_app_with_stubbed_llm([], models_response=canned)
with TestClient(app) as client:
response = client.get("/interactive/models")
assert response.status_code == 200, response.text
body = response.json()
assert body["error"] is None
assert [m["name"] for m in body["models"]] == [
"qwen2.5:7b",
"qwen2.5:1.5b-instruct",
]
assert body["models"][0]["parameter_size"] == "7.6B"
def test_models_endpoint_returns_empty_with_error_when_daemon_unavailable() -> None:
"""When Ollama is unreachable the route degrades to an empty list and
surfaces a human-readable hint, instead of 5xx-ing the page."""
canned = LlmModelsResponse(
models=[],
error="Could not reach the local Ollama daemon (test). Is 'ollama serve' running?",
)
app = _build_app_with_stubbed_llm([], models_response=canned)
with TestClient(app) as client:
response = client.get("/interactive/models")
assert response.status_code == 200
body = response.json()
assert body["models"] == []
assert "Ollama" in body["error"]
# --- Session lifecycle ---
def test_session_lifecycle_create_summary_delete(client: TestClient) -> None:
"""Create → reset observation → summary → delete → 404. The actual
advancing of turn counter / format scoring / predicted overlay
lives in the ``/llm-step`` tests below; this is the lifecycle
skeleton (the only flow the UI actually exercises now that the
manual ``/step`` route is gone)."""
create = client.post(
"/interactive/sessions",
json={"system_id": "free_fall", "seed": 42, "max_turns": 4},
)
assert create.status_code == 200, create.text
body = create.json()
session_id = body["session_id"]
assert isinstance(session_id, str) and session_id
assert body["system"]["system_id"] == "free_fall"
assert "tier" not in body["system"] # tier is dropped from the public schema
assert body["max_turns"] == 4
assert body["observation"]["turn"] == 0
assert body["observation"]["done"] is False
assert len(body["observation"]["trajectory"]) == 100
summary = client.get(f"/interactive/sessions/{session_id}").json()
assert summary["turn"] == 0
assert summary["max_turns"] == 4
assert summary["done"] is False
end = client.delete(f"/interactive/sessions/{session_id}")
assert end.status_code == 204
assert client.get(f"/interactive/sessions/{session_id}").status_code == 404
def test_unknown_system_id_returns_400(client: TestClient) -> None:
response = client.post(
"/interactive/sessions",
json={"system_id": "no_such_system"},
)
assert response.status_code == 400
def test_unknown_session_id_returns_404() -> None:
"""Session-scoped routes return 404 for unknown ids, not 500."""
app = _build_app_with_stubbed_llm([])
with TestClient(app) as client:
response = client.post(
"/interactive/sessions/does-not-exist/llm-step",
json={"model": "stub"},
)
assert response.status_code == 404
# --- LLM-step endpoint (with stubbed policy) ---
def test_llm_step_drives_a_turn_using_injected_policy() -> None:
"""The endpoint must call the policy, parse, step, and surface the raw."""
app = _build_app_with_stubbed_llm(
[json.dumps({"equation": "d2y/dt2 = -9.81", "rationale": "gravity"})]
)
with TestClient(app) as client:
create = client.post(
"/interactive/sessions",
json={"system_id": "free_fall", "seed": 0, "max_turns": 4},
).json()
session_id = create["session_id"]
response = client.post(
f"/interactive/sessions/{session_id}/llm-step",
json={"model": "stub:1.5b", "temperature": 0.1, "max_tokens": 64},
)
assert response.status_code == 200, response.text
body = response.json()
assert body["model"] == "stub:1.5b"
assert body["action"]["equation"] == "d2y/dt2 = -9.81"
assert body["action"]["rationale"] == "gravity"
assert body["observation"]["turn"] == 1
assert body["observation"]["reward_breakdown"]["match"] >= 0.9
assert body["predicted_trajectory"]
assert body["latency_s"] >= 0.0
assert "d2y/dt2" in body["raw_completion"]
def test_llm_step_runs_full_episode_with_three_canned_turns() -> None:
"""Multi-turn drive: each call pops the next completion, history grows."""
completions = [
json.dumps({"equation": "d2y/dt2 = -9.81", "rationale": "pure gravity"}),
json.dumps({
"equation": "d2y/dt2 = -9.81 + 0.1 * vy",
"rationale": "linear drag",
}),
json.dumps({
"equation": "d2y/dt2 = -9.81 + 0.05 * vy**2",
"rationale": "quadratic drag",
}),
]
app = _build_app_with_stubbed_llm(completions)
with TestClient(app) as client:
session_id = client.post(
"/interactive/sessions",
json={"system_id": "free_fall_drag", "seed": 42, "max_turns": 8},
).json()["session_id"]
bodies = []
for _ in range(3):
response = client.post(
f"/interactive/sessions/{session_id}/llm-step",
json={"model": "stub"},
)
assert response.status_code == 200, response.text
bodies.append(response.json())
assert [b["action"]["equation"] for b in bodies] == [
"d2y/dt2 = -9.81",
"d2y/dt2 = -9.81 + 0.1 * vy",
"d2y/dt2 = -9.81 + 0.05 * vy**2",
]
assert [b["observation"]["turn"] for b in bodies] == [1, 2, 3]
# History accumulates across turns.
assert len(bodies[-1]["observation"]["history"]) == 3
def test_llm_step_handles_unparseable_completion_as_format_zero() -> None:
"""If the model emits junk, the env scores it format=0, no 500."""
app = _build_app_with_stubbed_llm(["I refuse to answer."])
with TestClient(app) as client:
session_id = client.post(
"/interactive/sessions",
json={"system_id": "simple_pendulum", "seed": 0, "max_turns": 4},
).json()["session_id"]
response = client.post(
f"/interactive/sessions/{session_id}/llm-step",
json={"model": "stub"},
)
assert response.status_code == 200, response.text
body = response.json()
assert body["observation"]["reward_breakdown"]["format"] == 0.0
assert body["predicted_trajectory"] == []
assert body["raw_completion"] == "I refuse to answer."
def test_llm_step_after_budget_exhaustion_returns_409() -> None:
"""Once the env has consumed its budget, llm-step is rejected too."""
canned = [
json.dumps({"equation": "d2theta/dt2 = 0"}),
json.dumps({"equation": "d2theta/dt2 = 0"}),
]
app = _build_app_with_stubbed_llm(canned)
with TestClient(app) as client:
session_id = client.post(
"/interactive/sessions",
json={"system_id": "simple_pendulum", "seed": 1, "max_turns": 2},
).json()["session_id"]
for _ in range(2):
assert client.post(
f"/interactive/sessions/{session_id}/llm-step",
json={"model": "stub"},
).status_code == 200
overflow = client.post(
f"/interactive/sessions/{session_id}/llm-step",
json={"model": "stub"},
)
assert overflow.status_code == 409
# --- CORS ---
def test_cors_preflight_for_dev_origin(client: TestClient) -> None:
"""OPTIONS preflight from the Vite dev server is allowed."""
response = client.options(
"/interactive/sessions",
headers={
"Origin": "http://localhost:5173",
"Access-Control-Request-Method": "POST",
"Access-Control-Request-Headers": "content-type",
},
)
assert response.status_code in (200, 204), response.text
assert response.headers["access-control-allow-origin"] == "http://localhost:5173"
|