mekosotto Claude Opus 4.7 (1M context) commited on
Commit
fc4e33b
·
1 Parent(s): 5e9f487

feat(frontend): AI Assistant tab — natural-language explainer

Browse files

- New 4th tab in main(): BBB / EEG / MRI / AI Assistant.
- _render_ai_assistant_tab pulls last_bbb_prediction from session
state, shows a snapshot caption, lets the user pick from 3 preset
questions or type a custom one, POSTs to /explain/bbb, and renders
a reverse-chronological history (capped at 10).
- Each history entry shows source (llm | template) and model so
jurors can audit which path served each rationale.
- Empty state when no prediction yet: explicit prompt to run BBB tab
first.
- No new tests; covered by 2 existing import-smoke tests.

Co-Authored-By: Claude Opus 4.7 (1M context) <noreply@anthropic.com>

Files changed (1) hide show
  1. src/frontend/app.py +90 -1
src/frontend/app.py CHANGED
@@ -639,6 +639,92 @@ def _render_combat_diagnostics(result: dict) -> None:
639
  )
640
 
641
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
642
  def main() -> None:
643
  """Streamlit entrypoint. Idempotent — Streamlit re-runs on every interaction."""
644
  st.set_page_config(
@@ -660,10 +746,11 @@ def main() -> None:
660
  "Run `uvicorn src.api.main:app --port 8000` or `docker compose up`."
661
  )
662
 
663
- bbb_tab, eeg_tab, mri_tab = st.tabs([
664
  "Molecule (BBB)",
665
  "Signal (EEG)",
666
  "Image (MRI)",
 
667
  ])
668
 
669
  with bbb_tab:
@@ -672,6 +759,8 @@ def main() -> None:
672
  _render_eeg_tab()
673
  with mri_tab:
674
  _render_mri_tab()
 
 
675
 
676
 
677
  if __name__ == "__main__":
 
639
  )
640
 
641
 
642
+ def _render_ai_assistant_tab() -> None:
643
+ """Day-7 T3C: chat-style explainer for the most recent BBB prediction."""
644
+ _render_section(
645
+ "AI Assistant",
646
+ "Natural-language rationale (LLM or deterministic template)",
647
+ "Pulls the most recent BBB prediction from this session and asks "
648
+ "the explainer to justify it. Falls back to a deterministic, "
649
+ "auditable template when no LLM is configured."
650
+ )
651
+
652
+ last = st.session_state.get("last_bbb_prediction")
653
+ if last is None:
654
+ st.info(
655
+ "Run a BBB prediction first (BBB tab → Predict button), "
656
+ "then come back here to ask the assistant about it."
657
+ )
658
+ return
659
+
660
+ # Snapshot card so the user knows which prediction is being explained
661
+ st.caption(
662
+ f"Latest prediction: **{last['label_text']}** "
663
+ f"({float(last['confidence']) * 100:.0f}% confident) · "
664
+ f"Top SHAP: {', '.join(f['feature'] for f in last.get('top_features', [])[:3])}"
665
+ )
666
+
667
+ PRESETS = [
668
+ "Why was this molecule predicted as permeable?",
669
+ "Which features pushed the verdict the most?",
670
+ "Is this prediction trustworthy given the drift signal?",
671
+ ]
672
+ preset = st.selectbox("Preset question", options=PRESETS, key="ai_preset")
673
+ custom = st.text_input(
674
+ "Or type your own question (optional)",
675
+ value="",
676
+ key="ai_custom",
677
+ help="Custom questions only affect the LLM path; the template gives a generic SHAP-driven rationale either way.",
678
+ )
679
+ question = custom.strip() or preset
680
+
681
+ if st.button("Ask the AI Assistant", type="primary", key="ai_ask"):
682
+ with st.spinner("Composing rationale…"):
683
+ try:
684
+ body = {
685
+ "smiles": last.get("smiles", ""),
686
+ "label": last["label"],
687
+ "label_text": last["label_text"],
688
+ "confidence": last["confidence"],
689
+ "top_features": last.get("top_features", []),
690
+ "calibration": last.get("calibration"),
691
+ "drift_z": last.get("drift_z"),
692
+ "user_question": question,
693
+ }
694
+ # The /predict/bbb response payload doesn't include the
695
+ # user-supplied SMILES (only label/confidence/etc.), so
696
+ # pull it from the input widget for paper-trail accuracy.
697
+ # Streamlit text inputs persist via st.session_state.
698
+ if not body["smiles"]:
699
+ body["smiles"] = st.session_state.get("bbb_smiles", "")
700
+ resp = _post("/explain/bbb", body)
701
+ except httpx.HTTPStatusError as e:
702
+ st.error(
703
+ f"Explainer failed (HTTP {e.response.status_code}): "
704
+ f"{e.response.text}"
705
+ )
706
+ return
707
+ except httpx.RequestError as e:
708
+ st.error(f"Cannot reach FastAPI at {_API_URL}: {e!r}")
709
+ return
710
+
711
+ history = st.session_state.setdefault("explain_history", [])
712
+ history.insert(0, (question, resp))
713
+
714
+ # Render history (most recent first)
715
+ history = st.session_state.get("explain_history", [])
716
+ if history:
717
+ st.markdown("### Conversation")
718
+ for q, r in history[:10]: # cap at 10 most recent
719
+ with st.container():
720
+ st.markdown(f"**Q:** {q}")
721
+ st.markdown(f"**A:** {r['rationale']}")
722
+ source = r.get("source", "?")
723
+ model = r.get("model") or "—"
724
+ st.caption(f"Source: `{source}` · Model: `{model}`")
725
+ st.divider()
726
+
727
+
728
  def main() -> None:
729
  """Streamlit entrypoint. Idempotent — Streamlit re-runs on every interaction."""
730
  st.set_page_config(
 
746
  "Run `uvicorn src.api.main:app --port 8000` or `docker compose up`."
747
  )
748
 
749
+ bbb_tab, eeg_tab, mri_tab, assistant_tab = st.tabs([
750
  "Molecule (BBB)",
751
  "Signal (EEG)",
752
  "Image (MRI)",
753
+ "AI Assistant",
754
  ])
755
 
756
  with bbb_tab:
 
759
  _render_eeg_tab()
760
  with mri_tab:
761
  _render_mri_tab()
762
+ with assistant_tab:
763
+ _render_ai_assistant_tab()
764
 
765
 
766
  if __name__ == "__main__":