feat(api): POST /explain/eeg + /explain/mri — full-stack Track-1 coverage
Browse files- EEGExplainRequest carries pipeline metrics (rows / columns /
duration_sec / mlflow_run_id). MRIExplainRequest carries ComBat KPIs
(site_gap_pre / site_gap_post / reduction_factor / n_subjects).
- Both routes mounted on explain_router (prefix /explain). Use the
Day-7 explainer with modality='eeg' or 'mri' — same hybrid LLM /
template / kill-switch contract.
- 2 new tests with NEUROBRIDGE_DISABLE_LLM=1 force-deterministic.
Co-Authored-By: Claude Opus 4.7 (1M context) <noreply@anthropic.com>
- src/api/routes.py +40 -0
- src/api/schemas.py +32 -0
- tests/api/test_routes.py +41 -0
src/api/routes.py
CHANGED
|
@@ -24,12 +24,16 @@ from src.api.schemas import (
|
|
| 24 |
BBBPredictResponse,
|
| 25 |
BBBRequest,
|
| 26 |
CalibrationContext,
|
|
|
|
|
|
|
| 27 |
EEGRequest,
|
| 28 |
FeatureAttribution,
|
| 29 |
HarmonizationRow,
|
| 30 |
ModelProvenance,
|
| 31 |
MRIDiagnosticsRequest,
|
| 32 |
MRIDiagnosticsResponse,
|
|
|
|
|
|
|
| 33 |
MRIRequest,
|
| 34 |
PipelineResponse,
|
| 35 |
)
|
|
@@ -362,3 +366,39 @@ def explain_bbb(req: BBBExplainRequest) -> BBBExplainResponse:
|
|
| 362 |
source=result["source"],
|
| 363 |
model=result["model"],
|
| 364 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 24 |
BBBPredictResponse,
|
| 25 |
BBBRequest,
|
| 26 |
CalibrationContext,
|
| 27 |
+
EEGExplainRequest,
|
| 28 |
+
EEGExplainResponse,
|
| 29 |
EEGRequest,
|
| 30 |
FeatureAttribution,
|
| 31 |
HarmonizationRow,
|
| 32 |
ModelProvenance,
|
| 33 |
MRIDiagnosticsRequest,
|
| 34 |
MRIDiagnosticsResponse,
|
| 35 |
+
MRIExplainRequest,
|
| 36 |
+
MRIExplainResponse,
|
| 37 |
MRIRequest,
|
| 38 |
PipelineResponse,
|
| 39 |
)
|
|
|
|
| 366 |
source=result["source"],
|
| 367 |
model=result["model"],
|
| 368 |
)
|
| 369 |
+
|
| 370 |
+
|
| 371 |
+
@explain_router.post("/eeg", response_model=EEGExplainResponse)
|
| 372 |
+
def explain_eeg(req: EEGExplainRequest) -> EEGExplainResponse:
|
| 373 |
+
"""Natural-language rationale for an EEG pipeline run."""
|
| 374 |
+
payload = {
|
| 375 |
+
"rows": req.rows,
|
| 376 |
+
"columns": req.columns,
|
| 377 |
+
"duration_sec": req.duration_sec,
|
| 378 |
+
"mlflow_run_id": req.mlflow_run_id,
|
| 379 |
+
"user_question": req.user_question or "",
|
| 380 |
+
}
|
| 381 |
+
result = llm_explainer.explain(payload, modality="eeg")
|
| 382 |
+
return EEGExplainResponse(
|
| 383 |
+
rationale=result["rationale"],
|
| 384 |
+
source=result["source"],
|
| 385 |
+
model=result["model"],
|
| 386 |
+
)
|
| 387 |
+
|
| 388 |
+
|
| 389 |
+
@explain_router.post("/mri", response_model=MRIExplainResponse)
|
| 390 |
+
def explain_mri(req: MRIExplainRequest) -> MRIExplainResponse:
|
| 391 |
+
"""Natural-language rationale for an MRI ComBat diagnostic run."""
|
| 392 |
+
payload = {
|
| 393 |
+
"site_gap_pre": req.site_gap_pre,
|
| 394 |
+
"site_gap_post": req.site_gap_post,
|
| 395 |
+
"reduction_factor": req.reduction_factor,
|
| 396 |
+
"n_subjects": req.n_subjects,
|
| 397 |
+
"user_question": req.user_question or "",
|
| 398 |
+
}
|
| 399 |
+
result = llm_explainer.explain(payload, modality="mri")
|
| 400 |
+
return MRIExplainResponse(
|
| 401 |
+
rationale=result["rationale"],
|
| 402 |
+
source=result["source"],
|
| 403 |
+
model=result["model"],
|
| 404 |
+
)
|
src/api/schemas.py
CHANGED
|
@@ -161,3 +161,35 @@ class BBBExplainResponse(BaseModel):
|
|
| 161 |
None,
|
| 162 |
description="LLM model name when source='llm'; None when source='template'",
|
| 163 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 161 |
None,
|
| 162 |
description="LLM model name when source='llm'; None when source='template'",
|
| 163 |
)
|
| 164 |
+
|
| 165 |
+
|
| 166 |
+
class EEGExplainRequest(BaseModel):
|
| 167 |
+
"""Day-8 T1B: payload for POST /explain/eeg."""
|
| 168 |
+
rows: int = Field(..., ge=0, description="Number of epochs produced")
|
| 169 |
+
columns: int = Field(..., ge=0, description="Number of features per epoch")
|
| 170 |
+
duration_sec: float = Field(..., ge=0.0, description="Pipeline wall-clock seconds")
|
| 171 |
+
mlflow_run_id: str | None = Field(None, description="MLflow run id, if available")
|
| 172 |
+
user_question: str | None = Field(None, description="Optional user question for the LLM prompt")
|
| 173 |
+
|
| 174 |
+
|
| 175 |
+
class EEGExplainResponse(BaseModel):
|
| 176 |
+
"""Day-8 T1B: response from POST /explain/eeg."""
|
| 177 |
+
rationale: str
|
| 178 |
+
source: str
|
| 179 |
+
model: str | None = None
|
| 180 |
+
|
| 181 |
+
|
| 182 |
+
class MRIExplainRequest(BaseModel):
|
| 183 |
+
"""Day-8 T1B: payload for POST /explain/mri."""
|
| 184 |
+
site_gap_pre: float = Field(..., ge=0.0)
|
| 185 |
+
site_gap_post: float = Field(..., ge=0.0)
|
| 186 |
+
reduction_factor: float = Field(..., ge=0.0)
|
| 187 |
+
n_subjects: int = Field(..., ge=0)
|
| 188 |
+
user_question: str | None = None
|
| 189 |
+
|
| 190 |
+
|
| 191 |
+
class MRIExplainResponse(BaseModel):
|
| 192 |
+
"""Day-8 T1B: response from POST /explain/mri."""
|
| 193 |
+
rationale: str
|
| 194 |
+
source: str
|
| 195 |
+
model: str | None = None
|
tests/api/test_routes.py
CHANGED
|
@@ -259,3 +259,44 @@ class TestExplainBBBRoute:
|
|
| 259 |
for feat in ("fp_341", "fp_902", "fp_77"):
|
| 260 |
assert feat in out["rationale"]
|
| 261 |
assert "permeable" in out["rationale"]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 259 |
for feat in ("fp_341", "fp_902", "fp_77"):
|
| 260 |
assert feat in out["rationale"]
|
| 261 |
assert "permeable" in out["rationale"]
|
| 262 |
+
|
| 263 |
+
|
| 264 |
+
class TestExplainEEGRoute:
|
| 265 |
+
"""Day-8 T1B: POST /explain/eeg."""
|
| 266 |
+
|
| 267 |
+
def test_returns_200_with_template_source(self, monkeypatch):
|
| 268 |
+
monkeypatch.setenv("NEUROBRIDGE_DISABLE_LLM", "1")
|
| 269 |
+
body = {
|
| 270 |
+
"rows": 30,
|
| 271 |
+
"columns": 95,
|
| 272 |
+
"duration_sec": 4.32,
|
| 273 |
+
"mlflow_run_id": "abc12345",
|
| 274 |
+
"user_question": "Why were epochs dropped?",
|
| 275 |
+
}
|
| 276 |
+
resp = client.post("/explain/eeg", json=body)
|
| 277 |
+
assert resp.status_code == 200, resp.text
|
| 278 |
+
out = resp.json()
|
| 279 |
+
assert out["source"] == "template"
|
| 280 |
+
assert out["model"] is None
|
| 281 |
+
assert "30" in out["rationale"]
|
| 282 |
+
assert "95" in out["rationale"]
|
| 283 |
+
|
| 284 |
+
|
| 285 |
+
class TestExplainMRIRoute:
|
| 286 |
+
"""Day-8 T1B: POST /explain/mri."""
|
| 287 |
+
|
| 288 |
+
def test_returns_200_with_template_source(self, monkeypatch):
|
| 289 |
+
monkeypatch.setenv("NEUROBRIDGE_DISABLE_LLM", "1")
|
| 290 |
+
body = {
|
| 291 |
+
"site_gap_pre": 5.0004,
|
| 292 |
+
"site_gap_post": 0.0015,
|
| 293 |
+
"reduction_factor": 3290.0,
|
| 294 |
+
"n_subjects": 6,
|
| 295 |
+
"user_question": "Why does ComBat matter?",
|
| 296 |
+
}
|
| 297 |
+
resp = client.post("/explain/mri", json=body)
|
| 298 |
+
assert resp.status_code == 200, resp.text
|
| 299 |
+
out = resp.json()
|
| 300 |
+
assert out["source"] == "template"
|
| 301 |
+
assert "3290" in out["rationale"]
|
| 302 |
+
assert "6" in out["rationale"]
|