fix(demo): wire every showcase path end-to-end with auto-resolved artifacts
Browse filesGoal: every endpoint and every Streamlit tab must work in a 2-minute
hackathon demo without requiring the demoer to fetch external data.
Code fixes:
- src/api/routes.py + src/agents/tools.py: fix two stale references —
bbb_model.load (not load_model) and bbb_model.predict_with_proba (not
predict_one); also corrects the agent-side default BBB artifact path
from .pkl to .joblib so the auto-resolve in adjust_drug_dose works.
- src/api/routes.py: replace hard-coded _AGENT_DEFAULT_MODEL = google/
gemini-2.0-flash-exp:free (now 404 on OpenRouter) with a fallback
chain probed at orchestrator-build time. Picks the first model that
returns a non-404/429 ping. Override via NEUROBRIDGE_AGENT_MODEL
(single id) or NEUROBRIDGE_AGENT_MODEL_CHAIN (csv).
- src/llm/explainer.py: prepend openai/gpt-oss-20b:free (verified 2026-
05-02) to the explainer fallback chain so /explain/* doesn't fall
back to the deterministic template when free-tier ids churn.
- src/frontend/app.py: drop the env-var/'stub'/'demo' captions from the
EEG, MRI, and Researcher sections — feature copy only, no internals
exposed in the UI.
Demo smoke (TestClient, every showcase path):
18/18 passes — /health · /predict/{bbb,mri[volumetric+resnet18_2d],
eeg,bbb_permeability_map} · /fusion/predict ·
/research/drug_dose_adjustment · /pipeline/{bbb,eeg,mri,mri/diagnostics}
· /explain/{bbb,eeg,mri} · /experiments/runs · /diag/agent (7 tools)
· /agent/run live (auto-picked openai/gpt-oss-20b:free, BBB->RAG
trace, Turkish synthesis).
Test suite: 362 passed (orchestrator_live skipped — flaky on free tier).
Co-Authored-By: Claude Opus 4.7 (1M context) <noreply@anthropic.com>
- .gitignore +4 -0
- src/agents/tools.py +3 -3
- src/api/routes.py +47 -4
- src/frontend/app.py +4 -13
- src/llm/explainer.py +1 -0
- tests/fixtures/mri_sample/subject_0_axial.png +0 -0
|
@@ -45,3 +45,7 @@ data/processed/faiss_index/
|
|
| 45 |
|
| 46 |
# Plan: external-assets-integration — clinical RAG corpus and pre-built TF-IDF index
|
| 47 |
data/external_rag/
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 45 |
|
| 46 |
# Plan: external-assets-integration — clinical RAG corpus and pre-built TF-IDF index
|
| 47 |
data/external_rag/
|
| 48 |
+
|
| 49 |
+
# User-local files (presentations, Office lock files)
|
| 50 |
+
*.pptx
|
| 51 |
+
~$*
|
|
@@ -233,10 +233,10 @@ def _make_dose_adjuster_executor() -> Callable[[DrugDoseAdjustmentInput], DrugDo
|
|
| 233 |
try:
|
| 234 |
from src.models import bbb_model
|
| 235 |
import os as _os
|
| 236 |
-
artifact = Path(_os.environ.get("BBB_MODEL_PATH", "data/processed/bbb_model.
|
| 237 |
if artifact.exists():
|
| 238 |
-
model = bbb_model.
|
| 239 |
-
pred = bbb_model.
|
| 240 |
drug_permeable = bool(pred["label"] == 1)
|
| 241 |
except (FileNotFoundError, ValueError, KeyError) as e:
|
| 242 |
logger.warning(
|
|
|
|
| 233 |
try:
|
| 234 |
from src.models import bbb_model
|
| 235 |
import os as _os
|
| 236 |
+
artifact = Path(_os.environ.get("BBB_MODEL_PATH", "data/processed/bbb_model.joblib"))
|
| 237 |
if artifact.exists():
|
| 238 |
+
model = bbb_model.load(artifact)
|
| 239 |
+
pred = bbb_model.predict_with_proba(model, inp.smiles)
|
| 240 |
drug_permeable = bool(pred["label"] == 1)
|
| 241 |
except (FileNotFoundError, ValueError, KeyError) as e:
|
| 242 |
logger.warning(
|
|
@@ -374,8 +374,8 @@ def research_drug_dose_adjustment(req: DrugDoseAdjustmentRequest) -> DrugDoseAdj
|
|
| 374 |
try:
|
| 375 |
artifact = _bbb_model_path()
|
| 376 |
if artifact.exists():
|
| 377 |
-
model = bbb_model.
|
| 378 |
-
bbb_pred = bbb_model.
|
| 379 |
drug_permeable = bool(bbb_pred["label"] == 1)
|
| 380 |
except (FileNotFoundError, ValueError, KeyError) as e:
|
| 381 |
logger.warning("could not auto-resolve BBB permeability for smiles=%s: %s", req.smiles, e)
|
|
@@ -710,7 +710,37 @@ agent_router = APIRouter(prefix="/agent")
|
|
| 710 |
|
| 711 |
_DEFAULT_RAG_INDEX_DIR = Path("data/processed/faiss_index")
|
| 712 |
_AGENT_MODEL_ENV = "NEUROBRIDGE_AGENT_MODEL"
|
| 713 |
-
_AGENT_DEFAULT_MODEL = "
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 714 |
|
| 715 |
|
| 716 |
def _build_orchestrator():
|
|
@@ -742,7 +772,20 @@ def _build_orchestrator():
|
|
| 742 |
rag_index_dir=rag_dir,
|
| 743 |
clinical_rag_index_path=clinical_idx if clinical_idx.exists() else None,
|
| 744 |
)
|
| 745 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 746 |
return Orchestrator(
|
| 747 |
llm_client=client,
|
| 748 |
tools=tools,
|
|
|
|
| 374 |
try:
|
| 375 |
artifact = _bbb_model_path()
|
| 376 |
if artifact.exists():
|
| 377 |
+
model = bbb_model.load(artifact)
|
| 378 |
+
bbb_pred = bbb_model.predict_with_proba(model, req.smiles)
|
| 379 |
drug_permeable = bool(bbb_pred["label"] == 1)
|
| 380 |
except (FileNotFoundError, ValueError, KeyError) as e:
|
| 381 |
logger.warning("could not auto-resolve BBB permeability for smiles=%s: %s", req.smiles, e)
|
|
|
|
| 710 |
|
| 711 |
_DEFAULT_RAG_INDEX_DIR = Path("data/processed/faiss_index")
|
| 712 |
_AGENT_MODEL_ENV = "NEUROBRIDGE_AGENT_MODEL"
|
| 713 |
+
_AGENT_DEFAULT_MODEL = "openai/gpt-oss-20b:free"
|
| 714 |
+
# Fallback chain probed at orchestrator-build time. First model returning a
|
| 715 |
+
# non-404/429 ping wins. Override via NEUROBRIDGE_AGENT_MODEL env (single id)
|
| 716 |
+
# or NEUROBRIDGE_AGENT_MODEL_CHAIN (comma-separated).
|
| 717 |
+
_AGENT_FALLBACK_CHAIN: tuple[str, ...] = (
|
| 718 |
+
"openai/gpt-oss-20b:free",
|
| 719 |
+
"minimax/minimax-m2.5:free",
|
| 720 |
+
"tencent/hy3-preview:free",
|
| 721 |
+
"inclusionai/ling-2.6-1t:free",
|
| 722 |
+
"nvidia/nemotron-3-super-120b-a12b:free",
|
| 723 |
+
"qwen/qwen3-next-80b-a3b-instruct:free",
|
| 724 |
+
"google/gemma-4-31b-it:free",
|
| 725 |
+
"meta-llama/llama-3.3-70b-instruct:free",
|
| 726 |
+
)
|
| 727 |
+
|
| 728 |
+
|
| 729 |
+
def _pick_working_agent_model(client: Any, candidates: tuple[str, ...]) -> str:
|
| 730 |
+
"""Return the first candidate that responds to a tiny ping; else last one."""
|
| 731 |
+
for m in candidates:
|
| 732 |
+
try:
|
| 733 |
+
client.chat.completions.create(
|
| 734 |
+
model=m,
|
| 735 |
+
messages=[{"role": "user", "content": "OK"}],
|
| 736 |
+
max_tokens=4, temperature=0,
|
| 737 |
+
)
|
| 738 |
+
logger.info("agent model selected: %s", m)
|
| 739 |
+
return m
|
| 740 |
+
except Exception as e:
|
| 741 |
+
logger.info("agent model unavailable: %s (%s)", m, type(e).__name__)
|
| 742 |
+
logger.warning("no agent model responded; falling back to %s", candidates[-1])
|
| 743 |
+
return candidates[-1]
|
| 744 |
|
| 745 |
|
| 746 |
def _build_orchestrator():
|
|
|
|
| 772 |
rag_index_dir=rag_dir,
|
| 773 |
clinical_rag_index_path=clinical_idx if clinical_idx.exists() else None,
|
| 774 |
)
|
| 775 |
+
# Resolve agent model. NEUROBRIDGE_AGENT_MODEL overrides; otherwise probe
|
| 776 |
+
# the fallback chain (NEUROBRIDGE_AGENT_MODEL_CHAIN env to override the
|
| 777 |
+
# candidate list) and pick the first one that responds. Demo robustness:
|
| 778 |
+
# OpenRouter free-tier IDs churn; this avoids hard-coding a stale id.
|
| 779 |
+
explicit = os.environ.get(_AGENT_MODEL_ENV)
|
| 780 |
+
if explicit:
|
| 781 |
+
model = explicit
|
| 782 |
+
else:
|
| 783 |
+
chain_raw = os.environ.get("NEUROBRIDGE_AGENT_MODEL_CHAIN")
|
| 784 |
+
chain = (
|
| 785 |
+
tuple(s.strip() for s in chain_raw.split(",") if s.strip())
|
| 786 |
+
if chain_raw else _AGENT_FALLBACK_CHAIN
|
| 787 |
+
)
|
| 788 |
+
model = _pick_working_agent_model(client, chain)
|
| 789 |
return Orchestrator(
|
| 790 |
llm_client=client,
|
| 791 |
tools=tools,
|
|
@@ -1208,10 +1208,7 @@ def _render_eeg_tab() -> None:
|
|
| 1208 |
"Input FIF/EDF path",
|
| 1209 |
"tests/fixtures/eeg_sample.fif",
|
| 1210 |
key="eeg_in",
|
| 1211 |
-
help=
|
| 1212 |
-
"Defaults to the bundled EEG fixture so the demo runs out of "
|
| 1213 |
-
"the box. Replace with your own .fif/.edf path on a real run."
|
| 1214 |
-
),
|
| 1215 |
)
|
| 1216 |
eeg_out = st.text_input(
|
| 1217 |
"Output Parquet path",
|
|
@@ -1320,10 +1317,6 @@ def _render_mri_tab() -> None:
|
|
| 1320 |
|
| 1321 |
st.markdown("#### MRI Image Model")
|
| 1322 |
mri_kind = os.environ.get("MRI_MODEL_KIND", "volumetric_onnx")
|
| 1323 |
-
st.caption(
|
| 1324 |
-
f"Active backend: `{mri_kind}` — set `MRI_MODEL_KIND=resnet18_2d` "
|
| 1325 |
-
"to switch to the 2D 4-class Alzheimer's classifier."
|
| 1326 |
-
)
|
| 1327 |
|
| 1328 |
if mri_kind == "resnet18_2d":
|
| 1329 |
mri_image = st.text_input(
|
|
@@ -1380,8 +1373,7 @@ def _render_mri_tab() -> None:
|
|
| 1380 |
"Resize W", min_value=1, max_value=256, value=64, step=1, key="mri_predict_w"
|
| 1381 |
)
|
| 1382 |
st.caption(
|
| 1383 |
-
"
|
| 1384 |
-
"dummy ONNX fixture from `tests/fixtures/build_dummy_mri_onnx.py`."
|
| 1385 |
)
|
| 1386 |
if st.button("Predict MRI image", key="mri_predict"):
|
| 1387 |
labels = [x.strip() for x in mri_labels.split(",") if x.strip()]
|
|
@@ -1416,9 +1408,8 @@ def _render_mri_tab() -> None:
|
|
| 1416 |
|
| 1417 |
st.markdown("#### EEG Pretrained Classifier")
|
| 1418 |
st.caption(
|
| 1419 |
-
"
|
| 1420 |
-
"
|
| 1421 |
-
"labels are `(control, alzheimers)` — override via `EEG_CLF_LABELS`."
|
| 1422 |
)
|
| 1423 |
eeg_csv = st.text_area(
|
| 1424 |
"EEG features (comma-separated)",
|
|
|
|
| 1208 |
"Input FIF/EDF path",
|
| 1209 |
"tests/fixtures/eeg_sample.fif",
|
| 1210 |
key="eeg_in",
|
| 1211 |
+
help="Path to a .fif/.edf EEG recording on the server filesystem.",
|
|
|
|
|
|
|
|
|
|
| 1212 |
)
|
| 1213 |
eeg_out = st.text_input(
|
| 1214 |
"Output Parquet path",
|
|
|
|
| 1317 |
|
| 1318 |
st.markdown("#### MRI Image Model")
|
| 1319 |
mri_kind = os.environ.get("MRI_MODEL_KIND", "volumetric_onnx")
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1320 |
|
| 1321 |
if mri_kind == "resnet18_2d":
|
| 1322 |
mri_image = st.text_input(
|
|
|
|
| 1373 |
"Resize W", min_value=1, max_value=256, value=64, step=1, key="mri_predict_w"
|
| 1374 |
)
|
| 1375 |
st.caption(
|
| 1376 |
+
"Resize target as (D, H, W). Default 64³ matches typical model exports."
|
|
|
|
| 1377 |
)
|
| 1378 |
if st.button("Predict MRI image", key="mri_predict"):
|
| 1379 |
labels = [x.strip() for x in mri_labels.split(",") if x.strip()]
|
|
|
|
| 1408 |
|
| 1409 |
st.markdown("#### EEG Pretrained Classifier")
|
| 1410 |
st.caption(
|
| 1411 |
+
"Pretrained sklearn classifier on EEG band-power features. "
|
| 1412 |
+
"Output: per-class probabilities for `(control, alzheimers)`."
|
|
|
|
| 1413 |
)
|
| 1414 |
eeg_csv = st.text_area(
|
| 1415 |
"EEG features (comma-separated)",
|
|
@@ -63,6 +63,7 @@ _LLM_TEMPERATURE = 0.3
|
|
| 63 |
# Entries marked "currently 429" have valid IDs but were quota-exhausted at
|
| 64 |
# probe time; kept because OpenRouter rate-limits are per-window and recover.
|
| 65 |
_DEFAULT_FREE_MODEL_CHAIN: tuple[str, ...] = (
|
|
|
|
| 66 |
"inclusionai/ling-2.6-1t:free", # ~1T flagship — verified OK, returns content
|
| 67 |
"nvidia/nemotron-3-super-120b-a12b:free", # 120B — verified OK, returns content
|
| 68 |
"minimax/minimax-m2.5:free", # MoE — verified OK, returns content
|
|
|
|
| 63 |
# Entries marked "currently 429" have valid IDs but were quota-exhausted at
|
| 64 |
# probe time; kept because OpenRouter rate-limits are per-window and recover.
|
| 65 |
_DEFAULT_FREE_MODEL_CHAIN: tuple[str, ...] = (
|
| 66 |
+
"openai/gpt-oss-20b:free", # 20B — verified OK 2026-05-02
|
| 67 |
"inclusionai/ling-2.6-1t:free", # ~1T flagship — verified OK, returns content
|
| 68 |
"nvidia/nemotron-3-super-120b-a12b:free", # 120B — verified OK, returns content
|
| 69 |
"minimax/minimax-m2.5:free", # MoE — verified OK, returns content
|
|