deploy: sync from fe7cce1
Browse files- .gitignore +45 -0
- Dockerfile +25 -0
- README.md +125 -3
- app.py +143 -0
- backend/__init__.py +0 -0
- backend/requirements.txt +13 -0
- backend/serve.py +111 -0
- backend/server.py +79 -0
- pyproject.toml +13 -0
- requirements.txt +12 -0
- scripts/build_hf_readme.sh +15 -0
- scripts/deploy_hf_space.py +75 -0
- scripts/deploy_hf_space.sh +30 -0
- scripts/generate_synthea_case.sh +49 -0
- space/header.md +11 -0
- src/recap/__init__.py +1 -0
- src/recap/cases.py +93 -0
- src/recap/config.py +23 -0
- src/recap/demo_patient.py +107 -0
- src/recap/inference/__init__.py +3 -0
- src/recap/inference/gateway.py +63 -0
- src/recap/inference/mi300x_client.py +48 -0
- src/recap/inference/mock.py +22 -0
- src/recap/ingestion/__init__.py +3 -0
- src/recap/ingestion/fhir.py +225 -0
- src/recap/ingestion/image.py +38 -0
- src/recap/ingestion/pdf.py +22 -0
- src/recap/models.py +47 -0
- src/recap/prompts.py +25 -0
- src/recap/reasoner.py +45 -0
- src/recap/retrieval.py +53 -0
- src/recap/timeline.py +17 -0
- src/recap/ui/__init__.py +3 -0
- src/recap/ui/timeline_view.py +77 -0
- static/app.jsx +859 -0
- static/index.html +40 -0
- tests/__init__.py +0 -0
- tests/fixtures/_make_tiny_pdf.py +19 -0
- tests/fixtures/tiny_fhir.json +79 -0
- tests/fixtures/tiny_lab.pdf +68 -0
- tests/test_cases.py +129 -0
- tests/test_inference_gateway.py +63 -0
- tests/test_ingestion_fhir.py +75 -0
- tests/test_ingestion_image.py +35 -0
- tests/test_ingestion_pdf.py +19 -0
- tests/test_mi300x_client.py +85 -0
- tests/test_models.py +34 -0
- tests/test_reasoner.py +63 -0
- tests/test_retrieval.py +41 -0
- tests/test_timeline.py +33 -0
.gitignore
ADDED
|
@@ -0,0 +1,45 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
docs/
|
| 2 |
+
|
| 3 |
+
# Python
|
| 4 |
+
__pycache__/
|
| 5 |
+
*.py[cod]
|
| 6 |
+
*$py.class
|
| 7 |
+
*.so
|
| 8 |
+
.Python
|
| 9 |
+
.venv/
|
| 10 |
+
venv/
|
| 11 |
+
env/
|
| 12 |
+
.env
|
| 13 |
+
.env.local
|
| 14 |
+
|
| 15 |
+
# Jupyter
|
| 16 |
+
.ipynb_checkpoints/
|
| 17 |
+
|
| 18 |
+
# Hugging Face / model caches
|
| 19 |
+
.cache/
|
| 20 |
+
*.pt
|
| 21 |
+
*.bin
|
| 22 |
+
*.safetensors
|
| 23 |
+
hf_cache/
|
| 24 |
+
|
| 25 |
+
# HF Space deploy artifact — generated by scripts/build_hf_readme.sh
|
| 26 |
+
space/README.md
|
| 27 |
+
|
| 28 |
+
# Data — generated bundles and Synthea outputs are not tracked.
|
| 29 |
+
# Only manifest.json files and curated images are kept in the repo.
|
| 30 |
+
data/cases/*/fhir.json
|
| 31 |
+
data/cases/*/synthea-output/
|
| 32 |
+
data/cases/*/docs/
|
| 33 |
+
data/raw/
|
| 34 |
+
data/cache/
|
| 35 |
+
*.dcm
|
| 36 |
+
*.svs
|
| 37 |
+
|
| 38 |
+
# OS
|
| 39 |
+
.DS_Store
|
| 40 |
+
Thumbs.db
|
| 41 |
+
|
| 42 |
+
# IDE
|
| 43 |
+
.vscode/
|
| 44 |
+
.idea/
|
| 45 |
+
*.swp
|
Dockerfile
ADDED
|
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
FROM python:3.11-slim
|
| 2 |
+
|
| 3 |
+
# System deps for pdf/image parsing.
|
| 4 |
+
RUN apt-get update && apt-get install -y --no-install-recommends \
|
| 5 |
+
libxml2-dev libxslt1-dev \
|
| 6 |
+
&& rm -rf /var/lib/apt/lists/*
|
| 7 |
+
|
| 8 |
+
WORKDIR /app
|
| 9 |
+
|
| 10 |
+
# HF Spaces runs as a non-root user — make sure caches go somewhere writable.
|
| 11 |
+
ENV HF_HOME=/app/.cache/huggingface \
|
| 12 |
+
TRANSFORMERS_CACHE=/app/.cache/huggingface \
|
| 13 |
+
PYTHONUNBUFFERED=1 \
|
| 14 |
+
PYTHONPATH=/app/src
|
| 15 |
+
|
| 16 |
+
COPY requirements.txt ./
|
| 17 |
+
RUN pip install --no-cache-dir -r requirements.txt
|
| 18 |
+
|
| 19 |
+
COPY . .
|
| 20 |
+
|
| 21 |
+
# Default to mock backend on HF until MI300X tunnel is configured via env var.
|
| 22 |
+
ENV RECAP_BACKEND=mock
|
| 23 |
+
|
| 24 |
+
EXPOSE 7860
|
| 25 |
+
CMD ["uvicorn", "app:app", "--host", "0.0.0.0", "--port", "7860"]
|
README.md
CHANGED
|
@@ -1,10 +1,132 @@
|
|
| 1 |
---
|
| 2 |
title: Recap
|
| 3 |
-
emoji:
|
| 4 |
-
colorFrom:
|
| 5 |
colorTo: purple
|
| 6 |
sdk: docker
|
|
|
|
| 7 |
pinned: false
|
|
|
|
|
|
|
| 8 |
---
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 9 |
|
| 10 |
-
|
|
|
|
| 1 |
---
|
| 2 |
title: Recap
|
| 3 |
+
emoji: 🩺
|
| 4 |
+
colorFrom: blue
|
| 5 |
colorTo: purple
|
| 6 |
sdk: docker
|
| 7 |
+
app_port: 7860
|
| 8 |
pinned: false
|
| 9 |
+
license: mit
|
| 10 |
+
short_description: Recap reads the whole chart so you don't have to.
|
| 11 |
---
|
| 12 |
+
---
|
| 13 |
+
title: Recap
|
| 14 |
+
emoji: 🩺
|
| 15 |
+
colorFrom: blue
|
| 16 |
+
colorTo: purple
|
| 17 |
+
sdk: docker
|
| 18 |
+
app_port: 7860
|
| 19 |
+
pinned: false
|
| 20 |
+
license: mit
|
| 21 |
+
short_description: Recap reads the whole chart so you don't have to.
|
| 22 |
+
---
|
| 23 |
+
# Recap
|
| 24 |
+
|
| 25 |
+
> *Reads the whole chart so you don't have to.*
|
| 26 |
+
|
| 27 |
+
Drop in a patient's scattered medical records — lab PDFs, scans, photos, discharge summaries — and Recap gives you back two things:
|
| 28 |
+
|
| 29 |
+
1. **A chronological timeline** of every event, color-coded by type
|
| 30 |
+
2. **A chat box** where you can ask plain-language questions, with every answer cited to the exact source page or lab row
|
| 31 |
+
|
| 32 |
+
No diagnosis. No treatment. Just *"read everything and answer questions about what's been read."*
|
| 33 |
+
|
| 34 |
+
## The hackathon angle
|
| 35 |
+
|
| 36 |
+
Recap is built for the [AMD x LabLab.ai Developer Hackathon](https://lablab.ai/ai-hackathons/amd-developer) (May 2026). The technical headline:
|
| 37 |
+
|
| 38 |
+
> **The only GPU with enough memory to keep a patient's whole record co-resident with the reasoner.**
|
| 39 |
+
|
| 40 |
+
The premium-mode backend runs **MedGemma-27B-MM** (medical multimodal specialist) and **Qwen-32B** (reasoning + multilingual orchestrator) **co-resident on a single AMD MI300X (192 GB HBM3)** along with cached imaging-foundation embeddings and a 128 K-token KV cache. Impossible on H100/A100 80 GB cards.
|
| 41 |
+
|
| 42 |
+
The public Hugging Face Space runs a lite version (MedGemma-4B-MM on ZeroGPU H200) so anyone can try it.
|
| 43 |
+
|
| 44 |
+
## Architecture
|
| 45 |
+
|
| 46 |
+
```
|
| 47 |
+
┌────────────── HF Space (Gradio) ──────────────┐
|
| 48 |
+
│ 3 preloaded showcase patients │
|
| 49 |
+
│ Plotly timeline + chat with citations │
|
| 50 |
+
└────────────────┬─────────────────┬────────────┘
|
| 51 |
+
│ │
|
| 52 |
+
┌──────────┴──────┐ ┌────────┴───────────┐
|
| 53 |
+
│ ZeroGPU (H200) │ │ AMD MI300X (192GB) │
|
| 54 |
+
│ MedGemma-4B-MM │ │ MedGemma-27B-MM │
|
| 55 |
+
│ Always-on lite │ │ + Qwen-32B reasoner│
|
| 56 |
+
│ │ │ + foundation cache │
|
| 57 |
+
└─────────────────┘ └────────────────────┘
|
| 58 |
+
```
|
| 59 |
+
|
| 60 |
+
## Project structure
|
| 61 |
+
|
| 62 |
+
```
|
| 63 |
+
src/recap/
|
| 64 |
+
├── config.py # env-driven config
|
| 65 |
+
├── models.py # Event, Citation, Patient, Answer
|
| 66 |
+
├── ingestion/
|
| 67 |
+
│ ├── fhir.py # Synthea bundles → events
|
| 68 |
+
│ ├── pdf.py # lab PDFs → page records
|
| 69 |
+
│ └── image.py # medical images → events
|
| 70 |
+
├── timeline.py # chronological event view (TBD)
|
| 71 |
+
├── retrieval.py # BM25 over events (TBD)
|
| 72 |
+
├── inference/ # gateway routing zerogpu vs mi300x (TBD)
|
| 73 |
+
├── reasoner.py # two-stage MedGemma → Qwen (TBD)
|
| 74 |
+
└── ui/ # Gradio components (TBD)
|
| 75 |
+
|
| 76 |
+
backend/ # FastAPI on MI300X (TBD)
|
| 77 |
+
data/cases/ # showcase patients (Synthea + curated images)
|
| 78 |
+
scripts/ # generators + smoke tests
|
| 79 |
+
space/ # HF Space deploy artifacts
|
| 80 |
+
tests/ # 13 passing unit tests
|
| 81 |
+
```
|
| 82 |
+
|
| 83 |
+
## Showcase cases
|
| 84 |
+
|
| 85 |
+
Built from [Synthea](https://github.com/synthetichealth/synthea) (Apache 2.0 synthetic patient generator) paired with condition-matched public imaging:
|
| 86 |
+
|
| 87 |
+
- **Sarah, 67** — kidney decline over 8 years (tests time-axis questions)
|
| 88 |
+
- **Marcus, 54** — suspicious lump → cancer journey (tests multimodal grounding)
|
| 89 |
+
- **Aisha, 29** — immigrant patient with foreign-language records (tests Qwen multilingual)
|
| 90 |
+
|
| 91 |
+
## Running locally
|
| 92 |
+
|
| 93 |
+
```bash
|
| 94 |
+
uv venv .venv --python 3.11
|
| 95 |
+
uv pip install --python .venv/bin/python -r requirements.txt
|
| 96 |
+
.venv/bin/python -m pytest tests/ -v # 13 passing
|
| 97 |
+
.venv/bin/python app.py # local Gradio at :7860
|
| 98 |
+
```
|
| 99 |
+
|
| 100 |
+
Environment variables (all prefixed `RECAP_*`):
|
| 101 |
+
|
| 102 |
+
| Var | Default | Meaning |
|
| 103 |
+
|---|---|---|
|
| 104 |
+
| `RECAP_BACKEND` | `zerogpu` | One of `zerogpu`, `mi300x`, `mock` |
|
| 105 |
+
| `RECAP_MI300X_URL` | — | Premium-mode backend URL (set when the MI300X box is up) |
|
| 106 |
+
| `RECAP_MEDGEMMA_LITE` | `google/medgemma-1.5-4b-it` | Public-Space model |
|
| 107 |
+
| `RECAP_MEDGEMMA_PREMIUM` | `google/medgemma-27b-it` | MI300X model |
|
| 108 |
+
| `RECAP_QWEN` | `Qwen/Qwen3.6-27B` | Reasoner model — latest dense Qwen (Apr 2026), matched 27B class to MedGemma. Fallbacks: `Qwen/Qwen3-32B`, `Qwen/Qwen3-14B`, `Qwen/Qwen3.6-35B-A3B` |
|
| 109 |
+
|
| 110 |
+
## Hugging Face Space deployment
|
| 111 |
+
|
| 112 |
+
The HF Space requires YAML frontmatter at the top of its README, which GitHub renders as an ugly metadata table. To keep the GitHub README clean and the HF README correct, the frontmatter lives in `space/header.md` and the deploy script assembles a combined `space/README.md` before pushing to the HF Space remote:
|
| 113 |
+
|
| 114 |
+
```bash
|
| 115 |
+
./scripts/build_hf_readme.sh # writes space/README.md
|
| 116 |
+
# then push space/README.md to the HF Space repo
|
| 117 |
+
```
|
| 118 |
+
|
| 119 |
+
## Tech stack
|
| 120 |
+
|
| 121 |
+
- **Models:** Google MedGemma 1.5 (4B-MM lite, 27B-MM premium), Alibaba **Qwen 3.6-27B** (latest, released 2026-04-22)
|
| 122 |
+
- **Serving:** vLLM-on-ROCm on MI300X, HF Transformers + ZeroGPU `@spaces.GPU` on the Space
|
| 123 |
+
- **Frontend:** Gradio 4.44, Plotly
|
| 124 |
+
- **Data:** Synthea synthetic FHIR + public CC0 imaging, packaged as an HF Dataset
|
| 125 |
+
|
| 126 |
+
## Disclaimer
|
| 127 |
+
|
| 128 |
+
**Not for clinical use.** Demo only. All patients are synthetic — no real PHI is touched, stored, or processed. The model card for MedGemma explicitly forbids unmodified clinical deployment.
|
| 129 |
+
|
| 130 |
+
## License
|
| 131 |
|
| 132 |
+
MIT (this repo). Upstream models retain their respective licenses (MedGemma → Google's Health AI Developer Foundations terms; Qwen → Tongyi Qianwen License).
|
app.py
ADDED
|
@@ -0,0 +1,143 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Recap — FastAPI app entry point. Serves the React UI + JSON inference API.
|
| 2 |
+
|
| 3 |
+
GET / → static index.html (React via CDN, Babel-compiled JSX in browser)
|
| 4 |
+
GET /static/* → static assets (app.jsx, css)
|
| 5 |
+
GET /api/patients → list of patients with full event timelines
|
| 6 |
+
POST /api/answer → run the inference gateway and return a cited answer
|
| 7 |
+
GET /api/health → liveness + backend selection
|
| 8 |
+
"""
|
| 9 |
+
|
| 10 |
+
from pathlib import Path
|
| 11 |
+
|
| 12 |
+
from fastapi import FastAPI
|
| 13 |
+
from fastapi.responses import FileResponse, JSONResponse
|
| 14 |
+
from fastapi.staticfiles import StaticFiles
|
| 15 |
+
from pydantic import BaseModel
|
| 16 |
+
|
| 17 |
+
from recap.cases import load_case
|
| 18 |
+
from recap.config import load as load_config
|
| 19 |
+
from recap.demo_patient import build_demo_patient
|
| 20 |
+
from recap.inference import answer as answer_question
|
| 21 |
+
from recap.models import Patient
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
CFG = load_config()
|
| 25 |
+
ROOT = Path(__file__).parent
|
| 26 |
+
STATIC_DIR = ROOT / "static"
|
| 27 |
+
|
| 28 |
+
|
| 29 |
+
def _discover_cases() -> dict[str, Patient]:
|
| 30 |
+
cases: dict[str, Patient] = {}
|
| 31 |
+
cases_dir = Path(CFG.cases_dir)
|
| 32 |
+
if cases_dir.exists():
|
| 33 |
+
for d in sorted(cases_dir.iterdir()):
|
| 34 |
+
if (d / "manifest.json").exists():
|
| 35 |
+
try:
|
| 36 |
+
cases[d.name] = load_case(CFG.cases_dir, d.name)
|
| 37 |
+
except Exception as e: # noqa: BLE001 — keep one bad case from breaking the whole API
|
| 38 |
+
print(f"[recap] failed to load case {d.name}: {e}")
|
| 39 |
+
if not cases:
|
| 40 |
+
cases["demo"] = build_demo_patient()
|
| 41 |
+
return cases
|
| 42 |
+
|
| 43 |
+
|
| 44 |
+
PATIENTS: dict[str, Patient] = _discover_cases()
|
| 45 |
+
|
| 46 |
+
|
| 47 |
+
app = FastAPI(title="Recap", version="0.1.0")
|
| 48 |
+
|
| 49 |
+
app.mount("/static", StaticFiles(directory=str(STATIC_DIR)), name="static")
|
| 50 |
+
|
| 51 |
+
|
| 52 |
+
class AnswerRequest(BaseModel):
|
| 53 |
+
patient_id: str
|
| 54 |
+
question: str
|
| 55 |
+
|
| 56 |
+
|
| 57 |
+
@app.get("/")
|
| 58 |
+
def index() -> FileResponse:
|
| 59 |
+
return FileResponse(STATIC_DIR / "index.html")
|
| 60 |
+
|
| 61 |
+
|
| 62 |
+
@app.get("/api/patients")
|
| 63 |
+
def list_patients() -> JSONResponse:
|
| 64 |
+
"""Serialize all loaded patients in a shape the React app expects."""
|
| 65 |
+
out = []
|
| 66 |
+
for pid, p in PATIENTS.items():
|
| 67 |
+
out.append({
|
| 68 |
+
"id": p.id,
|
| 69 |
+
"display_name": p.display_name,
|
| 70 |
+
"age": p.age,
|
| 71 |
+
"gender": p.gender,
|
| 72 |
+
"mrn": getattr(p, "mrn", None) or f"MRN-{abs(hash(p.id)) % 9999999:07d}",
|
| 73 |
+
"summary": _patient_summary(p),
|
| 74 |
+
"hook": _patient_hook(p),
|
| 75 |
+
"tags": _patient_tags(p),
|
| 76 |
+
"events": [_event_to_dict(e) for e in p.events],
|
| 77 |
+
})
|
| 78 |
+
return JSONResponse(out)
|
| 79 |
+
|
| 80 |
+
|
| 81 |
+
@app.post("/api/answer")
|
| 82 |
+
def answer(req: AnswerRequest) -> JSONResponse:
|
| 83 |
+
if req.patient_id not in PATIENTS:
|
| 84 |
+
return JSONResponse({"error": f"unknown patient {req.patient_id}"}, status_code=404)
|
| 85 |
+
p = PATIENTS[req.patient_id]
|
| 86 |
+
a = answer_question(req.question, p.events)
|
| 87 |
+
return JSONResponse({
|
| 88 |
+
"text": a.text,
|
| 89 |
+
"citations": [
|
| 90 |
+
{"source_id": c.source_id, "page": c.page, "snippet": c.snippet}
|
| 91 |
+
for c in a.citations
|
| 92 |
+
],
|
| 93 |
+
})
|
| 94 |
+
|
| 95 |
+
|
| 96 |
+
@app.get("/api/health")
|
| 97 |
+
def health() -> JSONResponse:
|
| 98 |
+
return JSONResponse({
|
| 99 |
+
"ok": True,
|
| 100 |
+
"backend": CFG.backend,
|
| 101 |
+
"patient_count": len(PATIENTS),
|
| 102 |
+
"patient_ids": list(PATIENTS.keys()),
|
| 103 |
+
})
|
| 104 |
+
|
| 105 |
+
|
| 106 |
+
# ─── Helpers ───────────────────────────────────────────────────────────
|
| 107 |
+
|
| 108 |
+
|
| 109 |
+
def _event_to_dict(e) -> dict:
|
| 110 |
+
return {
|
| 111 |
+
"id": e.id,
|
| 112 |
+
"date": e.date.date().isoformat(),
|
| 113 |
+
"category": e.category,
|
| 114 |
+
"title": e.title,
|
| 115 |
+
"source": e.source,
|
| 116 |
+
"body": e.body,
|
| 117 |
+
"page": e.metadata.get("page"),
|
| 118 |
+
"snippet": e.metadata.get("snippet"),
|
| 119 |
+
"flag": e.metadata.get("flag"),
|
| 120 |
+
}
|
| 121 |
+
|
| 122 |
+
|
| 123 |
+
def _patient_summary(p: Patient) -> str:
|
| 124 |
+
"""One-sentence dossier summary. Real cases override via manifest.summary later."""
|
| 125 |
+
n = len(p.events)
|
| 126 |
+
years = sorted({e.date.year for e in p.events})
|
| 127 |
+
span = f"{years[0]}–{years[-1]}" if years else "no record"
|
| 128 |
+
return f"{n} clinical events on file from {span}."
|
| 129 |
+
|
| 130 |
+
|
| 131 |
+
def _patient_hook(p: Patient) -> str:
|
| 132 |
+
return ""
|
| 133 |
+
|
| 134 |
+
|
| 135 |
+
def _patient_tags(p: Patient) -> list[str]:
|
| 136 |
+
"""Surface the most recent diagnosis titles as tag chips."""
|
| 137 |
+
dx = [e.title for e in sorted(p.events, key=lambda e: e.date, reverse=True) if e.category == "diagnosis"]
|
| 138 |
+
return dx[:3]
|
| 139 |
+
|
| 140 |
+
|
| 141 |
+
if __name__ == "__main__":
|
| 142 |
+
import uvicorn
|
| 143 |
+
uvicorn.run(app, host="0.0.0.0", port=7860)
|
backend/__init__.py
ADDED
|
File without changes
|
backend/requirements.txt
ADDED
|
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Backend deps for the MI300X premium-mode FastAPI server.
|
| 2 |
+
# torch is installed separately on the droplet via the ROCm-flavored wheel:
|
| 3 |
+
# pip install --pre torch --index-url https://download.pytorch.org/whl/nightly/rocm6.2
|
| 4 |
+
#
|
| 5 |
+
# Everything below is ROCm-agnostic.
|
| 6 |
+
|
| 7 |
+
fastapi==0.115.5
|
| 8 |
+
uvicorn[standard]==0.32.0
|
| 9 |
+
transformers==4.46.0
|
| 10 |
+
accelerate==1.1.1
|
| 11 |
+
pydantic==2.9.2
|
| 12 |
+
sentencepiece==0.2.0
|
| 13 |
+
protobuf==5.28.3
|
backend/serve.py
ADDED
|
@@ -0,0 +1,111 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Loads MedGemma-27B-MM + Qwen-32B co-resident on a single AMD MI300X.
|
| 2 |
+
|
| 3 |
+
Designed to run inside the FastAPI server on the droplet. Models are loaded
|
| 4 |
+
lazily (first request triggers load) so the health endpoint is responsive
|
| 5 |
+
even before the heavy weights touch GPU memory.
|
| 6 |
+
"""
|
| 7 |
+
|
| 8 |
+
from __future__ import annotations
|
| 9 |
+
|
| 10 |
+
import os
|
| 11 |
+
import time
|
| 12 |
+
from threading import Lock
|
| 13 |
+
from typing import Any
|
| 14 |
+
|
| 15 |
+
import torch
|
| 16 |
+
from transformers import (
|
| 17 |
+
AutoModelForCausalLM,
|
| 18 |
+
AutoModelForImageTextToText,
|
| 19 |
+
AutoProcessor,
|
| 20 |
+
AutoTokenizer,
|
| 21 |
+
)
|
| 22 |
+
|
| 23 |
+
MEDGEMMA_ID = os.getenv("MEDGEMMA_ID", "google/medgemma-27b-it")
|
| 24 |
+
QWEN_ID = os.getenv("QWEN_ID", "Qwen/Qwen3.6-27B")
|
| 25 |
+
|
| 26 |
+
DEVICE = "cuda:0"
|
| 27 |
+
DTYPE = torch.bfloat16
|
| 28 |
+
|
| 29 |
+
_state: dict[str, Any] = {"loaded": False}
|
| 30 |
+
_lock = Lock()
|
| 31 |
+
|
| 32 |
+
|
| 33 |
+
def _ensure_loaded() -> None:
|
| 34 |
+
"""Load both models into GPU memory once. Idempotent + thread-safe."""
|
| 35 |
+
if _state["loaded"]:
|
| 36 |
+
return
|
| 37 |
+
with _lock:
|
| 38 |
+
if _state["loaded"]: # double-checked
|
| 39 |
+
return
|
| 40 |
+
|
| 41 |
+
t0 = time.time()
|
| 42 |
+
print(f"[serve] loading MedGemma: {MEDGEMMA_ID}", flush=True)
|
| 43 |
+
_state["medgemma_proc"] = AutoProcessor.from_pretrained(MEDGEMMA_ID)
|
| 44 |
+
_state["medgemma"] = AutoModelForImageTextToText.from_pretrained(
|
| 45 |
+
MEDGEMMA_ID, torch_dtype=DTYPE, device_map=DEVICE,
|
| 46 |
+
)
|
| 47 |
+
torch.cuda.synchronize()
|
| 48 |
+
peak_after_mg = torch.cuda.max_memory_allocated() / 1e9
|
| 49 |
+
print(f"[serve] medgemma loaded in {time.time() - t0:.1f}s, peak {peak_after_mg:.1f} GB", flush=True)
|
| 50 |
+
|
| 51 |
+
t1 = time.time()
|
| 52 |
+
print(f"[serve] loading Qwen: {QWEN_ID}", flush=True)
|
| 53 |
+
_state["qwen_tok"] = AutoTokenizer.from_pretrained(QWEN_ID)
|
| 54 |
+
_state["qwen"] = AutoModelForCausalLM.from_pretrained(
|
| 55 |
+
QWEN_ID, torch_dtype=DTYPE, device_map=DEVICE,
|
| 56 |
+
)
|
| 57 |
+
torch.cuda.synchronize()
|
| 58 |
+
peak = torch.cuda.max_memory_allocated() / 1e9
|
| 59 |
+
print(f"[serve] qwen loaded in {time.time() - t1:.1f}s, total peak {peak:.1f} GB", flush=True)
|
| 60 |
+
|
| 61 |
+
_state["loaded"] = True
|
| 62 |
+
_state["peak_after_load_gb"] = peak
|
| 63 |
+
|
| 64 |
+
|
| 65 |
+
def memory_stats() -> dict[str, float]:
|
| 66 |
+
if not torch.cuda.is_available():
|
| 67 |
+
return {"available": False}
|
| 68 |
+
return {
|
| 69 |
+
"available": True,
|
| 70 |
+
"allocated_gb": torch.cuda.memory_allocated() / 1e9,
|
| 71 |
+
"reserved_gb": torch.cuda.memory_reserved() / 1e9,
|
| 72 |
+
"total_gb": torch.cuda.get_device_properties(0).total_memory / 1e9,
|
| 73 |
+
"peak_after_load_gb": _state.get("peak_after_load_gb"),
|
| 74 |
+
"device_name": torch.cuda.get_device_name(0),
|
| 75 |
+
}
|
| 76 |
+
|
| 77 |
+
|
| 78 |
+
def medgemma_extract(system: str, user: str, max_new_tokens: int = 384) -> str:
|
| 79 |
+
"""First stage of the two-stage reasoner: read records, surface relevant findings."""
|
| 80 |
+
_ensure_loaded()
|
| 81 |
+
msgs = [
|
| 82 |
+
{"role": "system", "content": [{"type": "text", "text": system}]},
|
| 83 |
+
{"role": "user", "content": [{"type": "text", "text": user}]},
|
| 84 |
+
]
|
| 85 |
+
inputs = _state["medgemma_proc"].apply_chat_template(
|
| 86 |
+
msgs, add_generation_prompt=True, tokenize=True,
|
| 87 |
+
return_dict=True, return_tensors="pt",
|
| 88 |
+
).to(DEVICE)
|
| 89 |
+
out = _state["medgemma"].generate(
|
| 90 |
+
**inputs, max_new_tokens=max_new_tokens, do_sample=False,
|
| 91 |
+
)
|
| 92 |
+
new_tokens = out[0][inputs["input_ids"].shape[-1]:]
|
| 93 |
+
return _state["medgemma_proc"].decode(new_tokens, skip_special_tokens=True)
|
| 94 |
+
|
| 95 |
+
|
| 96 |
+
def qwen_synthesize(system: str, user: str, max_new_tokens: int = 512) -> str:
|
| 97 |
+
"""Second stage: synthesize MedGemma's findings into the final cited answer."""
|
| 98 |
+
_ensure_loaded()
|
| 99 |
+
msgs = [
|
| 100 |
+
{"role": "system", "content": system},
|
| 101 |
+
{"role": "user", "content": user},
|
| 102 |
+
]
|
| 103 |
+
text = _state["qwen_tok"].apply_chat_template(
|
| 104 |
+
msgs, add_generation_prompt=True, tokenize=False,
|
| 105 |
+
)
|
| 106 |
+
inputs = _state["qwen_tok"](text, return_tensors="pt").to(DEVICE)
|
| 107 |
+
out = _state["qwen"].generate(
|
| 108 |
+
**inputs, max_new_tokens=max_new_tokens, do_sample=False,
|
| 109 |
+
)
|
| 110 |
+
new_tokens = out[0][inputs["input_ids"].shape[-1]:]
|
| 111 |
+
return _state["qwen_tok"].decode(new_tokens, skip_special_tokens=True)
|
backend/server.py
ADDED
|
@@ -0,0 +1,79 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Recap MI300X premium-mode backend. Runs on the AMD Developer Cloud droplet.
|
| 2 |
+
|
| 3 |
+
Deploy:
|
| 4 |
+
cd backend
|
| 5 |
+
pip install -r requirements.txt
|
| 6 |
+
# ROCm torch installed separately on the droplet image.
|
| 7 |
+
uvicorn backend.server:app --host 0.0.0.0 --port 8080
|
| 8 |
+
|
| 9 |
+
Then expose to the public Space via ngrok / cloudflared and set
|
| 10 |
+
RECAP_MI300X_URL in the Space's env to the public URL.
|
| 11 |
+
"""
|
| 12 |
+
|
| 13 |
+
from __future__ import annotations
|
| 14 |
+
|
| 15 |
+
import os
|
| 16 |
+
from contextlib import asynccontextmanager
|
| 17 |
+
|
| 18 |
+
from fastapi import FastAPI, HTTPException
|
| 19 |
+
from pydantic import BaseModel
|
| 20 |
+
|
| 21 |
+
from backend import serve
|
| 22 |
+
|
| 23 |
+
EAGER_LOAD = os.getenv("RECAP_EAGER_LOAD", "1") == "1"
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
@asynccontextmanager
|
| 27 |
+
async def lifespan(app: FastAPI):
|
| 28 |
+
if EAGER_LOAD:
|
| 29 |
+
# Load models at startup so the first /medgemma request is fast.
|
| 30 |
+
# Set RECAP_EAGER_LOAD=0 if you want a fast boot for debugging.
|
| 31 |
+
try:
|
| 32 |
+
serve._ensure_loaded()
|
| 33 |
+
except Exception as e: # noqa: BLE001 — defer the failure to first request
|
| 34 |
+
print(f"[server] eager load failed: {e}", flush=True)
|
| 35 |
+
yield
|
| 36 |
+
|
| 37 |
+
|
| 38 |
+
app = FastAPI(title="Recap Premium Backend", version="0.1.0", lifespan=lifespan)
|
| 39 |
+
|
| 40 |
+
|
| 41 |
+
class GenRequest(BaseModel):
|
| 42 |
+
system: str
|
| 43 |
+
user: str
|
| 44 |
+
max_new_tokens: int = 384
|
| 45 |
+
|
| 46 |
+
|
| 47 |
+
class GenResponse(BaseModel):
|
| 48 |
+
text: str
|
| 49 |
+
|
| 50 |
+
|
| 51 |
+
@app.post("/medgemma", response_model=GenResponse)
|
| 52 |
+
def medgemma(req: GenRequest) -> GenResponse:
|
| 53 |
+
try:
|
| 54 |
+
text = serve.medgemma_extract(req.system, req.user, req.max_new_tokens)
|
| 55 |
+
except Exception as e: # noqa: BLE001
|
| 56 |
+
raise HTTPException(status_code=500, detail=str(e)) from e
|
| 57 |
+
return GenResponse(text=text)
|
| 58 |
+
|
| 59 |
+
|
| 60 |
+
@app.post("/qwen", response_model=GenResponse)
|
| 61 |
+
def qwen(req: GenRequest) -> GenResponse:
|
| 62 |
+
try:
|
| 63 |
+
text = serve.qwen_synthesize(req.system, req.user, req.max_new_tokens)
|
| 64 |
+
except Exception as e: # noqa: BLE001
|
| 65 |
+
raise HTTPException(status_code=500, detail=str(e)) from e
|
| 66 |
+
return GenResponse(text=text)
|
| 67 |
+
|
| 68 |
+
|
| 69 |
+
@app.get("/health")
|
| 70 |
+
def health() -> dict:
|
| 71 |
+
return {
|
| 72 |
+
"ok": True,
|
| 73 |
+
"loaded": serve._state.get("loaded", False),
|
| 74 |
+
"memory": serve.memory_stats(),
|
| 75 |
+
"models": {
|
| 76 |
+
"medgemma_id": serve.MEDGEMMA_ID,
|
| 77 |
+
"qwen_id": serve.QWEN_ID,
|
| 78 |
+
},
|
| 79 |
+
}
|
pyproject.toml
ADDED
|
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
[project]
|
| 2 |
+
name = "recap"
|
| 3 |
+
version = "0.1.0"
|
| 4 |
+
description = "Longitudinal patient-records copilot — recaps a patient's whole chart in seconds. Built for the AMD x LabLab.ai hackathon."
|
| 5 |
+
requires-python = ">=3.11"
|
| 6 |
+
license = "MIT"
|
| 7 |
+
|
| 8 |
+
[tool.setuptools.packages.find]
|
| 9 |
+
where = ["src"]
|
| 10 |
+
|
| 11 |
+
[tool.pytest.ini_options]
|
| 12 |
+
testpaths = ["tests"]
|
| 13 |
+
pythonpath = ["src"]
|
requirements.txt
ADDED
|
@@ -0,0 +1,12 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
fastapi==0.115.5
|
| 2 |
+
uvicorn[standard]==0.32.0
|
| 3 |
+
transformers==4.46.0
|
| 4 |
+
torch==2.5.1
|
| 5 |
+
accelerate==1.1.1
|
| 6 |
+
spaces==0.30.4
|
| 7 |
+
pypdf==5.1.0
|
| 8 |
+
Pillow==11.0.0
|
| 9 |
+
pydantic==2.9.2
|
| 10 |
+
fhir.resources==7.1.0
|
| 11 |
+
httpx==0.27.2
|
| 12 |
+
python-dateutil==2.9.0
|
scripts/build_hf_readme.sh
ADDED
|
@@ -0,0 +1,15 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env bash
|
| 2 |
+
# Assemble the HF-Space-bound README by prepending the HF YAML frontmatter
|
| 3 |
+
# to the GitHub README. Outputs to space/README.md.
|
| 4 |
+
#
|
| 5 |
+
# Run before pushing to the HF Space remote.
|
| 6 |
+
|
| 7 |
+
set -euo pipefail
|
| 8 |
+
|
| 9 |
+
REPO_ROOT="$(cd "$(dirname "$0")/.." && pwd)"
|
| 10 |
+
HEADER="${REPO_ROOT}/space/header.md"
|
| 11 |
+
BODY="${REPO_ROOT}/README.md"
|
| 12 |
+
OUT="${REPO_ROOT}/space/README.md"
|
| 13 |
+
|
| 14 |
+
cat "${HEADER}" "${BODY}" > "${OUT}"
|
| 15 |
+
echo "Wrote ${OUT} ($(wc -l < "${OUT}") lines)"
|
scripts/deploy_hf_space.py
ADDED
|
@@ -0,0 +1,75 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Deploy current working tree to the HF Space via HfApi.upload_folder.
|
| 2 |
+
|
| 3 |
+
Bypasses git push (and macOS keychain credential issues). Uses the locally
|
| 4 |
+
configured HF token. Run after committing locally if you want git history
|
| 5 |
+
on GitHub to match what's on the Space.
|
| 6 |
+
"""
|
| 7 |
+
|
| 8 |
+
from __future__ import annotations
|
| 9 |
+
|
| 10 |
+
import shutil
|
| 11 |
+
import subprocess
|
| 12 |
+
import tempfile
|
| 13 |
+
from pathlib import Path
|
| 14 |
+
|
| 15 |
+
from huggingface_hub import HfApi
|
| 16 |
+
|
| 17 |
+
REPO_ID = "lablab-ai-amd-developer-hackathon/recap"
|
| 18 |
+
ROOT = Path(__file__).resolve().parent.parent
|
| 19 |
+
|
| 20 |
+
IGNORE = {
|
| 21 |
+
".git", ".venv", "venv", "env",
|
| 22 |
+
"__pycache__", ".pytest_cache",
|
| 23 |
+
"docs", "node_modules",
|
| 24 |
+
".DS_Store",
|
| 25 |
+
}
|
| 26 |
+
|
| 27 |
+
|
| 28 |
+
def _build_hf_readme(staging: Path) -> None:
|
| 29 |
+
"""Concatenate space/header.md + README.md into staging/README.md."""
|
| 30 |
+
header = (ROOT / "space" / "header.md").read_text()
|
| 31 |
+
body = (ROOT / "README.md").read_text()
|
| 32 |
+
if body.startswith("---\n"):
|
| 33 |
+
end = body.find("\n---\n", 4)
|
| 34 |
+
body = body[end + 5:].lstrip() if end != -1 else body
|
| 35 |
+
(staging / "README.md").write_text(header + body)
|
| 36 |
+
|
| 37 |
+
|
| 38 |
+
def _copy_to_staging(staging: Path) -> None:
|
| 39 |
+
for entry in ROOT.iterdir():
|
| 40 |
+
if entry.name in IGNORE:
|
| 41 |
+
continue
|
| 42 |
+
dst = staging / entry.name
|
| 43 |
+
if entry.is_dir():
|
| 44 |
+
shutil.copytree(entry, dst, ignore=shutil.ignore_patterns(*IGNORE))
|
| 45 |
+
else:
|
| 46 |
+
shutil.copy2(entry, dst)
|
| 47 |
+
|
| 48 |
+
|
| 49 |
+
def main() -> None:
|
| 50 |
+
api = HfApi()
|
| 51 |
+
|
| 52 |
+
with tempfile.TemporaryDirectory() as tmp:
|
| 53 |
+
staging = Path(tmp)
|
| 54 |
+
_copy_to_staging(staging)
|
| 55 |
+
_build_hf_readme(staging)
|
| 56 |
+
|
| 57 |
+
rev_short = subprocess.run(
|
| 58 |
+
["git", "rev-parse", "--short", "HEAD"],
|
| 59 |
+
cwd=ROOT, capture_output=True, text=True, check=False,
|
| 60 |
+
).stdout.strip()
|
| 61 |
+
commit_msg = f"deploy: sync from {rev_short or 'local'}"
|
| 62 |
+
|
| 63 |
+
print(f"Uploading {len(list(staging.rglob('*')))} entries to {REPO_ID}…")
|
| 64 |
+
api.upload_folder(
|
| 65 |
+
folder_path=str(staging),
|
| 66 |
+
repo_id=REPO_ID,
|
| 67 |
+
repo_type="space",
|
| 68 |
+
commit_message=commit_msg,
|
| 69 |
+
)
|
| 70 |
+
|
| 71 |
+
print(f"Done. https://huggingface.co/spaces/{REPO_ID}")
|
| 72 |
+
|
| 73 |
+
|
| 74 |
+
if __name__ == "__main__":
|
| 75 |
+
main()
|
scripts/deploy_hf_space.sh
ADDED
|
@@ -0,0 +1,30 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env bash
|
| 2 |
+
set -euo pipefail
|
| 3 |
+
|
| 4 |
+
# Deploy current master to the HF Space, swapping README.md for the
|
| 5 |
+
# frontmatter-prefixed version. master stays clean for GitHub.
|
| 6 |
+
#
|
| 7 |
+
# Usage: ./scripts/deploy_hf_space.sh
|
| 8 |
+
|
| 9 |
+
REPO_ROOT="$(cd "$(dirname "$0")/.." && pwd)"
|
| 10 |
+
cd "${REPO_ROOT}"
|
| 11 |
+
|
| 12 |
+
SPACE_REMOTE="space"
|
| 13 |
+
SPACE_URL="https://huggingface.co/spaces/lablab-ai-amd-developer-hackathon/recap"
|
| 14 |
+
|
| 15 |
+
git remote get-url "${SPACE_REMOTE}" >/dev/null 2>&1 || git remote add "${SPACE_REMOTE}" "${SPACE_URL}"
|
| 16 |
+
|
| 17 |
+
./scripts/build_hf_readme.sh
|
| 18 |
+
|
| 19 |
+
CURRENT_BRANCH="$(git symbolic-ref --short HEAD)"
|
| 20 |
+
git checkout -B hf-deploy
|
| 21 |
+
cp space/README.md README.md
|
| 22 |
+
git add README.md
|
| 23 |
+
git commit -q -m "deploy: hf space readme with sdk frontmatter" || echo "no readme change"
|
| 24 |
+
git push -f "${SPACE_REMOTE}" hf-deploy:master
|
| 25 |
+
git checkout "${CURRENT_BRANCH}"
|
| 26 |
+
git checkout -- README.md
|
| 27 |
+
|
| 28 |
+
echo
|
| 29 |
+
echo "Pushed to ${SPACE_URL}"
|
| 30 |
+
echo "Watch the build at: ${SPACE_URL}?logs=build"
|
scripts/generate_synthea_case.sh
ADDED
|
@@ -0,0 +1,49 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env bash
|
| 2 |
+
# Generate a Synthea synthetic patient FHIR bundle for a showcase case.
|
| 3 |
+
#
|
| 4 |
+
# Usage:
|
| 5 |
+
# ./scripts/generate_synthea_case.sh <case_name> [seed]
|
| 6 |
+
#
|
| 7 |
+
# Examples:
|
| 8 |
+
# ./scripts/generate_synthea_case.sh sarah 3923
|
| 9 |
+
# ./scripts/generate_synthea_case.sh marcus 7711
|
| 10 |
+
#
|
| 11 |
+
# Requires: java 11+, internet access for the first run.
|
| 12 |
+
|
| 13 |
+
set -euo pipefail
|
| 14 |
+
|
| 15 |
+
CASE_NAME="${1:?Usage: $0 <case_name> [seed]}"
|
| 16 |
+
SEED="${2:-3923}"
|
| 17 |
+
|
| 18 |
+
REPO_ROOT="$(cd "$(dirname "$0")/.." && pwd)"
|
| 19 |
+
OUT_DIR="${REPO_ROOT}/data/cases/${CASE_NAME}"
|
| 20 |
+
WORK_DIR="${OUT_DIR}/synthea-output"
|
| 21 |
+
JAR_URL="https://github.com/synthetichealth/synthea/releases/download/v3.3.0/synthea-with-dependencies.jar"
|
| 22 |
+
|
| 23 |
+
mkdir -p "${WORK_DIR}"
|
| 24 |
+
cd "${WORK_DIR}"
|
| 25 |
+
|
| 26 |
+
if [ ! -f synthea-with-dependencies.jar ]; then
|
| 27 |
+
echo "Downloading Synthea jar..."
|
| 28 |
+
curl -fL -o synthea-with-dependencies.jar "${JAR_URL}"
|
| 29 |
+
fi
|
| 30 |
+
|
| 31 |
+
echo "Generating patient with seed ${SEED}..."
|
| 32 |
+
java -jar synthea-with-dependencies.jar \
|
| 33 |
+
-p 1 -s "${SEED}" \
|
| 34 |
+
--exporter.fhir.export=true \
|
| 35 |
+
--exporter.csv.export=false \
|
| 36 |
+
--generate.only_alive_patients=true \
|
| 37 |
+
--exporter.years_of_history=10 \
|
| 38 |
+
Massachusetts \
|
| 39 |
+
-a 60-75 -g F
|
| 40 |
+
|
| 41 |
+
# The first FHIR bundle file is the patient bundle (others are hospital/practitioner).
|
| 42 |
+
PATIENT_BUNDLE="$(ls output/fhir/*.json | grep -v 'hospitalInformation\|practitionerInformation' | head -1)"
|
| 43 |
+
if [ -z "${PATIENT_BUNDLE}" ]; then
|
| 44 |
+
echo "ERROR: no patient bundle produced. Check Synthea output above." >&2
|
| 45 |
+
exit 1
|
| 46 |
+
fi
|
| 47 |
+
|
| 48 |
+
cp "${PATIENT_BUNDLE}" "${OUT_DIR}/fhir.json"
|
| 49 |
+
echo "Wrote ${OUT_DIR}/fhir.json"
|
space/header.md
ADDED
|
@@ -0,0 +1,11 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
---
|
| 2 |
+
title: Recap
|
| 3 |
+
emoji: 🩺
|
| 4 |
+
colorFrom: blue
|
| 5 |
+
colorTo: purple
|
| 6 |
+
sdk: docker
|
| 7 |
+
app_port: 7860
|
| 8 |
+
pinned: false
|
| 9 |
+
license: mit
|
| 10 |
+
short_description: Recap reads the whole chart so you don't have to.
|
| 11 |
+
---
|
src/recap/__init__.py
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
__version__ = "0.1.0"
|
src/recap/cases.py
ADDED
|
@@ -0,0 +1,93 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Load a Patient from a case directory (manifest.json + bundles + docs + images).
|
| 2 |
+
|
| 3 |
+
A case directory looks like:
|
| 4 |
+
|
| 5 |
+
data/cases/<case_id>/
|
| 6 |
+
├── manifest.json # required
|
| 7 |
+
├── fhir.json # optional Synthea bundle
|
| 8 |
+
├── docs/ # optional PDF lab/discharge docs
|
| 9 |
+
│ └── lab_2022.pdf
|
| 10 |
+
└── images/ # optional scans/photos
|
| 11 |
+
└── fundus.png
|
| 12 |
+
|
| 13 |
+
If a FHIR bundle is present, the patient's display name, age, and gender
|
| 14 |
+
are pulled from it automatically — manifest can omit `display_name`.
|
| 15 |
+
"""
|
| 16 |
+
|
| 17 |
+
import json
|
| 18 |
+
from datetime import datetime
|
| 19 |
+
from pathlib import Path
|
| 20 |
+
|
| 21 |
+
from recap.ingestion.fhir import load_bundle, load_demographics
|
| 22 |
+
from recap.ingestion.image import load_image_event
|
| 23 |
+
from recap.ingestion.pdf import load_pdf
|
| 24 |
+
from recap.models import Event, Patient
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
def _normalize_date(s: str) -> str:
|
| 28 |
+
if "T" not in s:
|
| 29 |
+
s = f"{s}T00:00:00+00:00"
|
| 30 |
+
if s.endswith("Z"):
|
| 31 |
+
s = s[:-1] + "+00:00"
|
| 32 |
+
return s
|
| 33 |
+
|
| 34 |
+
|
| 35 |
+
def _events_from_pdf(case_dir: Path, doc: dict) -> list[Event]:
|
| 36 |
+
file = doc["file"]
|
| 37 |
+
src = Path(file).name
|
| 38 |
+
pages = load_pdf(str(case_dir / file), source_id=src)
|
| 39 |
+
date = datetime.fromisoformat(_normalize_date(doc["date"]))
|
| 40 |
+
return [
|
| 41 |
+
Event(
|
| 42 |
+
id=f"pdf-{src}-p{p.page_number}",
|
| 43 |
+
date=date,
|
| 44 |
+
category=doc.get("category", "note"),
|
| 45 |
+
title=doc.get("title", src),
|
| 46 |
+
source=src,
|
| 47 |
+
body=p.text,
|
| 48 |
+
metadata={"page": p.page_number},
|
| 49 |
+
)
|
| 50 |
+
for p in pages
|
| 51 |
+
]
|
| 52 |
+
|
| 53 |
+
|
| 54 |
+
def load_case(cases_dir: str, case_id: str) -> Patient:
|
| 55 |
+
base = Path(cases_dir) / case_id
|
| 56 |
+
manifest = json.loads((base / "manifest.json").read_text())
|
| 57 |
+
events: list[Event] = []
|
| 58 |
+
|
| 59 |
+
# Default demographics from manifest (used as fallback or override).
|
| 60 |
+
display_name = manifest.get("display_name")
|
| 61 |
+
age: int | None = manifest.get("age")
|
| 62 |
+
gender: str | None = manifest.get("gender")
|
| 63 |
+
|
| 64 |
+
if manifest.get("fhir_bundle"):
|
| 65 |
+
bundle_path = str(base / manifest["fhir_bundle"])
|
| 66 |
+
events.extend(load_bundle(bundle_path, source_id=manifest["fhir_bundle"]))
|
| 67 |
+
|
| 68 |
+
# Pull demographics from FHIR Patient resource — manifest values, if any, win.
|
| 69 |
+
demo = load_demographics(bundle_path)
|
| 70 |
+
if demo is not None:
|
| 71 |
+
display_name = display_name or demo.display_name
|
| 72 |
+
age = age if age is not None else demo.age
|
| 73 |
+
gender = gender or demo.gender
|
| 74 |
+
|
| 75 |
+
for doc in manifest.get("docs", []):
|
| 76 |
+
events.extend(_events_from_pdf(base, doc))
|
| 77 |
+
|
| 78 |
+
for img in manifest.get("images", []):
|
| 79 |
+
events.append(load_image_event(
|
| 80 |
+
str(base / img["file"]),
|
| 81 |
+
category=img.get("category", "scan"),
|
| 82 |
+
title=img.get("title", img["file"]),
|
| 83 |
+
date_iso=img["date"],
|
| 84 |
+
source_id=Path(img["file"]).name,
|
| 85 |
+
))
|
| 86 |
+
|
| 87 |
+
return Patient(
|
| 88 |
+
id=manifest["id"],
|
| 89 |
+
display_name=display_name or manifest["id"], # final fallback: case_id
|
| 90 |
+
age=age,
|
| 91 |
+
gender=gender,
|
| 92 |
+
events=events,
|
| 93 |
+
)
|
src/recap/config.py
ADDED
|
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
from dataclasses import dataclass
|
| 3 |
+
|
| 4 |
+
|
| 5 |
+
@dataclass(frozen=True)
|
| 6 |
+
class Config:
|
| 7 |
+
backend: str # "zerogpu" | "mi300x" | "mock"
|
| 8 |
+
mi300x_url: str
|
| 9 |
+
medgemma_lite_id: str
|
| 10 |
+
medgemma_premium_id: str
|
| 11 |
+
qwen_id: str
|
| 12 |
+
cases_dir: str
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
def load() -> Config:
|
| 16 |
+
return Config(
|
| 17 |
+
backend=os.getenv("RECAP_BACKEND", "zerogpu"),
|
| 18 |
+
mi300x_url=os.getenv("RECAP_MI300X_URL", ""),
|
| 19 |
+
medgemma_lite_id=os.getenv("RECAP_MEDGEMMA_LITE", "google/medgemma-1.5-4b-it"),
|
| 20 |
+
medgemma_premium_id=os.getenv("RECAP_MEDGEMMA_PREMIUM", "google/medgemma-27b-it"),
|
| 21 |
+
qwen_id=os.getenv("RECAP_QWEN", "Qwen/Qwen3.6-27B"),
|
| 22 |
+
cases_dir=os.getenv("RECAP_CASES_DIR", "data/cases"),
|
| 23 |
+
)
|
src/recap/demo_patient.py
ADDED
|
@@ -0,0 +1,107 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Synthetic Sarah Johnson — CKD progression over 8 years, generated in memory.
|
| 2 |
+
|
| 3 |
+
Used so the UI is functional before real Synthea data is curated. Once
|
| 4 |
+
`data/cases/sarah/` exists with a manifest, the case loader takes over
|
| 5 |
+
and this is no longer used.
|
| 6 |
+
"""
|
| 7 |
+
|
| 8 |
+
from datetime import datetime, timedelta, timezone
|
| 9 |
+
|
| 10 |
+
from recap.models import Event, Patient
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
def _ev(eid: str, date: datetime, category: str, title: str, source: str, body: str = "") -> Event:
|
| 14 |
+
return Event(
|
| 15 |
+
id=eid,
|
| 16 |
+
date=date,
|
| 17 |
+
category=category,
|
| 18 |
+
title=title,
|
| 19 |
+
source=source,
|
| 20 |
+
body=body or title,
|
| 21 |
+
)
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
def build_demo_patient() -> Patient:
|
| 25 |
+
"""Build a richly-populated synthetic patient for UI demo purposes."""
|
| 26 |
+
base = datetime(2017, 1, 15, tzinfo=timezone.utc)
|
| 27 |
+
events: list[Event] = []
|
| 28 |
+
|
| 29 |
+
# Initial diagnosis (2017): T2DM
|
| 30 |
+
events.append(_ev("dx-1", base, "diagnosis", "Type 2 diabetes mellitus",
|
| 31 |
+
"fhir.json", "Diagnosis: Type 2 diabetes mellitus, newly identified"))
|
| 32 |
+
events.append(_ev("v-1", base, "visit", "Annual physical exam", "fhir.json"))
|
| 33 |
+
events.append(_ev("med-1", base + timedelta(days=2), "med", "Metformin 500mg BID",
|
| 34 |
+
"fhir.json", "Prescribed: Metformin 500mg twice daily"))
|
| 35 |
+
|
| 36 |
+
# Year 1-3: stable, occasional labs
|
| 37 |
+
cr_values = [0.9, 0.95, 1.0, 1.0, 1.05, 1.1] # creatinine slowly rising
|
| 38 |
+
a1c_values = [7.4, 7.2, 7.0, 7.1, 6.9, 7.3]
|
| 39 |
+
for i, (cr, a1c) in enumerate(zip(cr_values, a1c_values)):
|
| 40 |
+
d = base + timedelta(days=180 * (i + 1))
|
| 41 |
+
events.append(_ev(f"lab-cr-{i}", d, "lab", f"Creatinine: {cr} mg/dL",
|
| 42 |
+
f"lab_{d.date()}.pdf",
|
| 43 |
+
f"Creatinine value: {cr} mg/dL (Reference: 0.6-1.2)"))
|
| 44 |
+
events.append(_ev(f"lab-a1c-{i}", d, "lab", f"HbA1c: {a1c}%",
|
| 45 |
+
f"lab_{d.date()}.pdf",
|
| 46 |
+
f"HbA1c value: {a1c}% (Target: <7.0)"))
|
| 47 |
+
|
| 48 |
+
# Year 4 (2021): first abnormal Cr — kidney decline begins
|
| 49 |
+
decline_start = datetime(2021, 3, 14, tzinfo=timezone.utc)
|
| 50 |
+
events.append(_ev("lab-cr-abnormal", decline_start, "lab", "Creatinine: 1.4 mg/dL (high)",
|
| 51 |
+
f"lab_{decline_start.date()}.pdf",
|
| 52 |
+
"Creatinine value: 1.4 mg/dL (FIRST abnormal — reference 0.6-1.2)"))
|
| 53 |
+
events.append(_ev("lab-egfr-abnormal", decline_start, "lab", "eGFR: 52 mL/min/1.73m²",
|
| 54 |
+
f"lab_{decline_start.date()}.pdf",
|
| 55 |
+
"eGFR value: 52 (low — stage 3 CKD threshold)"))
|
| 56 |
+
events.append(_ev("rep-cmp-1", decline_start, "report",
|
| 57 |
+
"Comprehensive metabolic panel",
|
| 58 |
+
f"lab_{decline_start.date()}.pdf",
|
| 59 |
+
"Mildly elevated creatinine consistent with stage 3 CKD."))
|
| 60 |
+
|
| 61 |
+
# Nephrology referral
|
| 62 |
+
nephro = decline_start + timedelta(days=45)
|
| 63 |
+
events.append(_ev("v-nephro-1", nephro, "visit", "Nephrology consultation",
|
| 64 |
+
"fhir.json", "Referred for evaluation of declining renal function."))
|
| 65 |
+
events.append(_ev("dx-ckd", nephro, "diagnosis", "Chronic kidney disease, stage 3",
|
| 66 |
+
"fhir.json", "Diagnosis: CKD stage 3, likely diabetic nephropathy."))
|
| 67 |
+
events.append(_ev("med-ace", nephro + timedelta(days=2), "med", "Lisinopril 10mg daily",
|
| 68 |
+
"fhir.json", "Prescribed: Lisinopril 10mg for renal protection."))
|
| 69 |
+
|
| 70 |
+
# Renal ultrasound
|
| 71 |
+
us = nephro + timedelta(days=10)
|
| 72 |
+
events.append(_ev("proc-us", us, "procedure", "Renal ultrasound",
|
| 73 |
+
"fhir.json", "Bilateral kidneys imaged."))
|
| 74 |
+
events.append(_ev("scan-us", us, "scan", "Renal ultrasound (bilateral)",
|
| 75 |
+
"kidney_us_2021.png",
|
| 76 |
+
"Imaging: bilateral renal cortices mildly thinned, no obstruction."))
|
| 77 |
+
|
| 78 |
+
# Year 5 (2022): continued decline
|
| 79 |
+
cr_2022 = [1.5, 1.6, 1.55]
|
| 80 |
+
for i, cr in enumerate(cr_2022):
|
| 81 |
+
d = datetime(2022, 3 + i * 4, 1, tzinfo=timezone.utc)
|
| 82 |
+
events.append(_ev(f"lab-cr-22-{i}", d, "lab", f"Creatinine: {cr} mg/dL",
|
| 83 |
+
f"lab_{d.date()}.pdf",
|
| 84 |
+
f"Creatinine value: {cr} mg/dL (continued elevation)"))
|
| 85 |
+
|
| 86 |
+
# Diabetic retinopathy screening (2023)
|
| 87 |
+
eye = datetime(2023, 4, 1, tzinfo=timezone.utc)
|
| 88 |
+
events.append(_ev("v-ophth-1", eye, "visit", "Diabetic retinopathy screening", "fhir.json"))
|
| 89 |
+
events.append(_ev("scan-fundus", eye, "scan", "Right fundus photograph",
|
| 90 |
+
"fundus_2023.png",
|
| 91 |
+
"Mild non-proliferative diabetic retinopathy in right eye."))
|
| 92 |
+
events.append(_ev("dx-dr", eye, "diagnosis", "Mild non-proliferative diabetic retinopathy",
|
| 93 |
+
"fhir.json", "Diagnosis: NPDR, mild — annual follow-up."))
|
| 94 |
+
|
| 95 |
+
# Recent (2024-2025): stable on lisinopril
|
| 96 |
+
for i, cr in enumerate([1.6, 1.55, 1.6, 1.7]):
|
| 97 |
+
d = datetime(2024, 3 + i * 3, 1, tzinfo=timezone.utc)
|
| 98 |
+
events.append(_ev(f"lab-cr-24-{i}", d, "lab", f"Creatinine: {cr} mg/dL",
|
| 99 |
+
f"lab_{d.date()}.pdf"))
|
| 100 |
+
|
| 101 |
+
return Patient(
|
| 102 |
+
id="demo",
|
| 103 |
+
display_name="Sarah Johnson, 67 (demo)",
|
| 104 |
+
age=67,
|
| 105 |
+
gender="female",
|
| 106 |
+
events=events,
|
| 107 |
+
)
|
src/recap/inference/__init__.py
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from recap.inference.gateway import answer
|
| 2 |
+
|
| 3 |
+
__all__ = ["answer"]
|
src/recap/inference/gateway.py
ADDED
|
@@ -0,0 +1,63 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Gateway: question in, cited Answer out.
|
| 2 |
+
|
| 3 |
+
This is the only place the rest of the codebase talks to. The UI calls
|
| 4 |
+
`answer(...)`; the gateway handles retrieval, prompt assembly, backend
|
| 5 |
+
routing, and citation parsing. Backends (mock/zerogpu/mi300x) are imported
|
| 6 |
+
lazily so importing this module doesn't drag in torch on a CPU laptop.
|
| 7 |
+
"""
|
| 8 |
+
|
| 9 |
+
import re
|
| 10 |
+
|
| 11 |
+
from recap.config import load
|
| 12 |
+
from recap.models import Answer, Citation, Event
|
| 13 |
+
from recap.prompts import PATIENT_QA_SYSTEM, build_user_prompt
|
| 14 |
+
from recap.retrieval import retrieve
|
| 15 |
+
|
| 16 |
+
_CITATION_RE = re.compile(r"\[src:([^\]#]+)(?:#p(\d+))?\]")
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
def answer(question: str, events: list[Event], top_k: int = 6) -> Answer:
|
| 20 |
+
"""Run the full question→retrieved→generated→cited pipeline."""
|
| 21 |
+
cfg = load()
|
| 22 |
+
retrieved = retrieve(question, events, top_k=top_k)
|
| 23 |
+
user_prompt = build_user_prompt(question, retrieved)
|
| 24 |
+
text = _generate(cfg.backend, PATIENT_QA_SYSTEM, user_prompt)
|
| 25 |
+
citations = _parse_citations(text, retrieved)
|
| 26 |
+
return Answer(text=text, citations=citations)
|
| 27 |
+
|
| 28 |
+
|
| 29 |
+
def _generate(backend: str, system: str, user: str) -> str:
|
| 30 |
+
if backend == "mi300x":
|
| 31 |
+
from recap.inference.mi300x_client import generate_premium
|
| 32 |
+
|
| 33 |
+
return generate_premium(system=system, user=user)
|
| 34 |
+
if backend == "mock":
|
| 35 |
+
from recap.inference.mock import generate_mock
|
| 36 |
+
|
| 37 |
+
return generate_mock(system=system, user=user)
|
| 38 |
+
# default: zerogpu
|
| 39 |
+
from recap.inference.zerogpu import generate_lite
|
| 40 |
+
|
| 41 |
+
return generate_lite(system=system, user=user)
|
| 42 |
+
|
| 43 |
+
|
| 44 |
+
def _parse_citations(text: str, retrieved: list[Event]) -> list[Citation]:
|
| 45 |
+
"""Extract `[src:foo#p2]` markers and resolve each to a Citation.
|
| 46 |
+
|
| 47 |
+
Drops citations to sources that weren't in the retrieved set (defensive
|
| 48 |
+
against the model hallucinating a source name it never saw).
|
| 49 |
+
"""
|
| 50 |
+
by_source: dict[str, Event] = {e.source: e for e in retrieved}
|
| 51 |
+
seen: set[tuple[str, int | None]] = set()
|
| 52 |
+
out: list[Citation] = []
|
| 53 |
+
for m in _CITATION_RE.finditer(text):
|
| 54 |
+
src = m.group(1)
|
| 55 |
+
page = int(m.group(2)) if m.group(2) else None
|
| 56 |
+
if src not in by_source:
|
| 57 |
+
continue
|
| 58 |
+
key = (src, page)
|
| 59 |
+
if key in seen:
|
| 60 |
+
continue
|
| 61 |
+
seen.add(key)
|
| 62 |
+
out.append(Citation(source_id=src, page=page, snippet=by_source[src].title))
|
| 63 |
+
return out
|
src/recap/inference/mi300x_client.py
ADDED
|
@@ -0,0 +1,48 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from __future__ import annotations
|
| 2 |
+
|
| 3 |
+
import time
|
| 4 |
+
|
| 5 |
+
import httpx
|
| 6 |
+
|
| 7 |
+
from recap.config import load
|
| 8 |
+
from recap.reasoner import two_stage
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
def _post(endpoint: str, system: str, user: str, *, timeout: float = 180.0) -> str:
|
| 12 |
+
cfg = load()
|
| 13 |
+
if not cfg.mi300x_url:
|
| 14 |
+
raise RuntimeError(
|
| 15 |
+
"RECAP_MI300X_URL is not set. Point it at the backend, e.g. "
|
| 16 |
+
"RECAP_MI300X_URL=https://abc-123.ngrok-free.app"
|
| 17 |
+
)
|
| 18 |
+
url = f"{cfg.mi300x_url.rstrip('/')}/{endpoint}"
|
| 19 |
+
payload = {"system": system, "user": user}
|
| 20 |
+
|
| 21 |
+
last_err: Exception | None = None
|
| 22 |
+
for attempt in range(3):
|
| 23 |
+
try:
|
| 24 |
+
r = httpx.post(url, json=payload, timeout=timeout)
|
| 25 |
+
r.raise_for_status()
|
| 26 |
+
return r.json()["text"]
|
| 27 |
+
except (httpx.HTTPStatusError, httpx.TransportError) as e:
|
| 28 |
+
last_err = e
|
| 29 |
+
if attempt < 2:
|
| 30 |
+
time.sleep(1.5 ** attempt)
|
| 31 |
+
raise RuntimeError(f"MI300X backend call failed after 3 attempts: {last_err}")
|
| 32 |
+
|
| 33 |
+
|
| 34 |
+
def generate_premium(system: str, user: str) -> str:
|
| 35 |
+
if "Question:" in user:
|
| 36 |
+
block, question = user.rsplit("Question:", 1)
|
| 37 |
+
retrieved_block = block.strip()
|
| 38 |
+
question = question.strip()
|
| 39 |
+
else:
|
| 40 |
+
retrieved_block = user
|
| 41 |
+
question = "Summarize what's in these records."
|
| 42 |
+
|
| 43 |
+
return two_stage(
|
| 44 |
+
question,
|
| 45 |
+
retrieved_block,
|
| 46 |
+
extract_fn=lambda s, u: _post("medgemma", s, u),
|
| 47 |
+
synthesize_fn=lambda s, u: _post("qwen", s, u),
|
| 48 |
+
)
|
src/recap/inference/mock.py
ADDED
|
@@ -0,0 +1,22 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Mock backend for CPU-only local dev. Returns canned text without loading a model."""
|
| 2 |
+
|
| 3 |
+
import re
|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
def generate_mock(system: str, user: str) -> str:
|
| 7 |
+
"""Pretend-answer that always cites the first source it sees in the user prompt."""
|
| 8 |
+
m = re.search(r"\[src:([^\]]+)\]", user)
|
| 9 |
+
src = m.group(1) if m else "unknown.pdf"
|
| 10 |
+
|
| 11 |
+
# Try to surface the first event date and title for a slightly more useful demo string.
|
| 12 |
+
date_match = re.search(r"(\d{4}-\d{2}-\d{2})", user)
|
| 13 |
+
snippet_match = re.search(r"\[src:[^\]]+\][^\n]*?— (.+?)(?:\n|$)", user)
|
| 14 |
+
|
| 15 |
+
date_str = date_match.group(1) if date_match else "an unknown date"
|
| 16 |
+
snippet = snippet_match.group(1).strip() if snippet_match else "a record"
|
| 17 |
+
|
| 18 |
+
return (
|
| 19 |
+
f"[mock answer] Based on the records, the earliest relevant signal "
|
| 20 |
+
f"appears on {date_str}: {snippet} [src:{src}]. "
|
| 21 |
+
f"Set RECAP_BACKEND=zerogpu or mi300x for real inference."
|
| 22 |
+
)
|
src/recap/ingestion/__init__.py
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from recap.ingestion.fhir import load_bundle as load_fhir_bundle
|
| 2 |
+
|
| 3 |
+
__all__ = ["load_fhir_bundle"]
|
src/recap/ingestion/fhir.py
ADDED
|
@@ -0,0 +1,225 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Parse Synthea-style FHIR bundles into demographics + chronological Events.
|
| 2 |
+
|
| 3 |
+
Handles these FHIR resource types:
|
| 4 |
+
- Patient → demographics (name, age, gender)
|
| 5 |
+
- Observation → "lab" events
|
| 6 |
+
- Encounter → "visit" events
|
| 7 |
+
- MedicationRequest → "med" events
|
| 8 |
+
- Condition → "diagnosis" events
|
| 9 |
+
- Procedure → "procedure" events
|
| 10 |
+
- DiagnosticReport → "report" events
|
| 11 |
+
|
| 12 |
+
Other Synthea-emitted types (Claim, ExplanationOfBenefit, CarePlan, Goal,
|
| 13 |
+
Immunization, AllergyIntolerance) are ignored for now — they're either
|
| 14 |
+
financial (no clinical value to the demo) or low-signal compared to the
|
| 15 |
+
above. We can add them if a showcase question needs them.
|
| 16 |
+
"""
|
| 17 |
+
|
| 18 |
+
import json
|
| 19 |
+
import re
|
| 20 |
+
from dataclasses import dataclass
|
| 21 |
+
from datetime import datetime, timezone
|
| 22 |
+
from pathlib import Path
|
| 23 |
+
|
| 24 |
+
from recap.models import Event
|
| 25 |
+
|
| 26 |
+
_TRAILING_DIGITS_RE = re.compile(r"\d+$")
|
| 27 |
+
|
| 28 |
+
|
| 29 |
+
@dataclass
|
| 30 |
+
class Demographics:
|
| 31 |
+
display_name: str
|
| 32 |
+
age: int | None
|
| 33 |
+
gender: str | None # "male" | "female" | "other"
|
| 34 |
+
|
| 35 |
+
|
| 36 |
+
def _parse_date(s: str) -> datetime:
|
| 37 |
+
if "T" not in s:
|
| 38 |
+
s = f"{s}T00:00:00+00:00"
|
| 39 |
+
if s.endswith("Z"):
|
| 40 |
+
s = s[:-1] + "+00:00"
|
| 41 |
+
return datetime.fromisoformat(s)
|
| 42 |
+
|
| 43 |
+
|
| 44 |
+
def _strip_synthea_digits(s: str) -> str:
|
| 45 |
+
"""Synthea suffixes names with digits (e.g. 'Sarah123 Smith45') so they look fake."""
|
| 46 |
+
return _TRAILING_DIGITS_RE.sub("", s)
|
| 47 |
+
|
| 48 |
+
|
| 49 |
+
def _compute_age(birth_date: str, as_of: datetime | None = None) -> int | None:
|
| 50 |
+
try:
|
| 51 |
+
bd = _parse_date(birth_date)
|
| 52 |
+
except Exception:
|
| 53 |
+
return None
|
| 54 |
+
now = as_of or datetime.now(timezone.utc)
|
| 55 |
+
years = now.year - bd.year - ((now.month, now.day) < (bd.month, bd.day))
|
| 56 |
+
return max(years, 0)
|
| 57 |
+
|
| 58 |
+
|
| 59 |
+
def _patient_to_demographics(r: dict) -> Demographics:
|
| 60 |
+
names = r.get("name") or []
|
| 61 |
+
family = ""
|
| 62 |
+
given = ""
|
| 63 |
+
if names:
|
| 64 |
+
family = _strip_synthea_digits(names[0].get("family", ""))
|
| 65 |
+
givens = names[0].get("given") or []
|
| 66 |
+
if givens:
|
| 67 |
+
given = _strip_synthea_digits(givens[0])
|
| 68 |
+
full_name = f"{given} {family}".strip() or "Patient"
|
| 69 |
+
|
| 70 |
+
age = _compute_age(r["birthDate"]) if r.get("birthDate") else None
|
| 71 |
+
display = f"{full_name}, {age}" if age is not None else full_name
|
| 72 |
+
return Demographics(display_name=display, age=age, gender=r.get("gender"))
|
| 73 |
+
|
| 74 |
+
|
| 75 |
+
def _observation_to_event(r: dict, source_id: str) -> Event | None:
|
| 76 |
+
code = r.get("code", {}).get("text") or ""
|
| 77 |
+
value = r.get("valueQuantity", {})
|
| 78 |
+
v_str = ""
|
| 79 |
+
if value:
|
| 80 |
+
v_str = f"{value.get('value')} {value.get('unit', '')}".strip()
|
| 81 |
+
date_str = r.get("effectiveDateTime") or r.get("issued")
|
| 82 |
+
if not date_str:
|
| 83 |
+
return None
|
| 84 |
+
rid = r.get("id", "")
|
| 85 |
+
title = f"{code}: {v_str}".strip(": ") if v_str else code or "Observation"
|
| 86 |
+
return Event(
|
| 87 |
+
id=f"obs-{rid}",
|
| 88 |
+
date=_parse_date(date_str),
|
| 89 |
+
category="lab",
|
| 90 |
+
title=title,
|
| 91 |
+
source=source_id,
|
| 92 |
+
body=f"{code} value: {v_str}" if v_str else code,
|
| 93 |
+
metadata={"resource_id": rid},
|
| 94 |
+
)
|
| 95 |
+
|
| 96 |
+
|
| 97 |
+
def _encounter_to_event(r: dict, source_id: str) -> Event | None:
|
| 98 |
+
reasons = r.get("reasonCode") or []
|
| 99 |
+
reason = reasons[0].get("text", "Encounter") if reasons else "Encounter"
|
| 100 |
+
start = r.get("period", {}).get("start")
|
| 101 |
+
if not start:
|
| 102 |
+
return None
|
| 103 |
+
rid = r.get("id", "")
|
| 104 |
+
return Event(
|
| 105 |
+
id=f"enc-{rid}",
|
| 106 |
+
date=_parse_date(start),
|
| 107 |
+
category="visit",
|
| 108 |
+
title=reason,
|
| 109 |
+
source=source_id,
|
| 110 |
+
body=reason,
|
| 111 |
+
metadata={"resource_id": rid},
|
| 112 |
+
)
|
| 113 |
+
|
| 114 |
+
|
| 115 |
+
def _medication_to_event(r: dict, source_id: str) -> Event | None:
|
| 116 |
+
med = r.get("medicationCodeableConcept", {}).get("text", "Medication")
|
| 117 |
+
authored = r.get("authoredOn")
|
| 118 |
+
if not authored:
|
| 119 |
+
return None
|
| 120 |
+
rid = r.get("id", "")
|
| 121 |
+
return Event(
|
| 122 |
+
id=f"med-{rid}",
|
| 123 |
+
date=_parse_date(authored),
|
| 124 |
+
category="med",
|
| 125 |
+
title=med,
|
| 126 |
+
source=source_id,
|
| 127 |
+
body=f"Prescribed: {med}",
|
| 128 |
+
metadata={"resource_id": rid},
|
| 129 |
+
)
|
| 130 |
+
|
| 131 |
+
|
| 132 |
+
def _condition_to_event(r: dict, source_id: str) -> Event | None:
|
| 133 |
+
name = r.get("code", {}).get("text", "Condition")
|
| 134 |
+
date_str = r.get("onsetDateTime") or r.get("recordedDate")
|
| 135 |
+
if not date_str:
|
| 136 |
+
return None
|
| 137 |
+
rid = r.get("id", "")
|
| 138 |
+
clinical = r.get("clinicalStatus", {}).get("coding", [{}])[0].get("code", "")
|
| 139 |
+
return Event(
|
| 140 |
+
id=f"cond-{rid}",
|
| 141 |
+
date=_parse_date(date_str),
|
| 142 |
+
category="diagnosis",
|
| 143 |
+
title=name,
|
| 144 |
+
source=source_id,
|
| 145 |
+
body=f"Diagnosis: {name}" + (f" (status: {clinical})" if clinical else ""),
|
| 146 |
+
metadata={"resource_id": rid, "clinical_status": clinical},
|
| 147 |
+
)
|
| 148 |
+
|
| 149 |
+
|
| 150 |
+
def _procedure_to_event(r: dict, source_id: str) -> Event | None:
|
| 151 |
+
name = r.get("code", {}).get("text", "Procedure")
|
| 152 |
+
perf = r.get("performedDateTime") or r.get("performedPeriod", {}).get("start")
|
| 153 |
+
if not perf:
|
| 154 |
+
return None
|
| 155 |
+
rid = r.get("id", "")
|
| 156 |
+
return Event(
|
| 157 |
+
id=f"proc-{rid}",
|
| 158 |
+
date=_parse_date(perf),
|
| 159 |
+
category="procedure",
|
| 160 |
+
title=name,
|
| 161 |
+
source=source_id,
|
| 162 |
+
body=f"Procedure: {name}",
|
| 163 |
+
metadata={"resource_id": rid},
|
| 164 |
+
)
|
| 165 |
+
|
| 166 |
+
|
| 167 |
+
def _diagnostic_report_to_event(r: dict, source_id: str) -> Event | None:
|
| 168 |
+
name = r.get("code", {}).get("text", "Report")
|
| 169 |
+
date_str = r.get("effectiveDateTime") or r.get("issued")
|
| 170 |
+
if not date_str:
|
| 171 |
+
return None
|
| 172 |
+
rid = r.get("id", "")
|
| 173 |
+
conclusion = r.get("conclusion", "")
|
| 174 |
+
return Event(
|
| 175 |
+
id=f"rep-{rid}",
|
| 176 |
+
date=_parse_date(date_str),
|
| 177 |
+
category="report",
|
| 178 |
+
title=name,
|
| 179 |
+
source=source_id,
|
| 180 |
+
body=f"{name}. {conclusion}".strip("."),
|
| 181 |
+
metadata={"resource_id": rid},
|
| 182 |
+
)
|
| 183 |
+
|
| 184 |
+
|
| 185 |
+
_DISPATCH = {
|
| 186 |
+
"Observation": _observation_to_event,
|
| 187 |
+
"Encounter": _encounter_to_event,
|
| 188 |
+
"MedicationRequest": _medication_to_event,
|
| 189 |
+
"Condition": _condition_to_event,
|
| 190 |
+
"Procedure": _procedure_to_event,
|
| 191 |
+
"DiagnosticReport": _diagnostic_report_to_event,
|
| 192 |
+
}
|
| 193 |
+
|
| 194 |
+
|
| 195 |
+
def _iter_resources(bundle: dict):
|
| 196 |
+
for entry in bundle.get("entry", []):
|
| 197 |
+
r = entry.get("resource", {})
|
| 198 |
+
yield r.get("resourceType"), r
|
| 199 |
+
|
| 200 |
+
|
| 201 |
+
def load_bundle(path: str, source_id: str) -> list[Event]:
|
| 202 |
+
"""Parse a FHIR Bundle and return Events for known clinical resource types."""
|
| 203 |
+
with Path(path).open() as f:
|
| 204 |
+
bundle = json.load(f)
|
| 205 |
+
|
| 206 |
+
events: list[Event] = []
|
| 207 |
+
for rtype, r in _iter_resources(bundle):
|
| 208 |
+
handler = _DISPATCH.get(rtype)
|
| 209 |
+
if handler is None:
|
| 210 |
+
continue
|
| 211 |
+
ev = handler(r, source_id)
|
| 212 |
+
if ev is not None:
|
| 213 |
+
events.append(ev)
|
| 214 |
+
return events
|
| 215 |
+
|
| 216 |
+
|
| 217 |
+
def load_demographics(path: str) -> Demographics | None:
|
| 218 |
+
"""Extract Patient demographics from a FHIR Bundle. Returns None if no Patient resource."""
|
| 219 |
+
with Path(path).open() as f:
|
| 220 |
+
bundle = json.load(f)
|
| 221 |
+
|
| 222 |
+
for rtype, r in _iter_resources(bundle):
|
| 223 |
+
if rtype == "Patient":
|
| 224 |
+
return _patient_to_demographics(r)
|
| 225 |
+
return None
|
src/recap/ingestion/image.py
ADDED
|
@@ -0,0 +1,38 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Wrap medical images as Events with caller-provided date and category.
|
| 2 |
+
|
| 3 |
+
We do not auto-extract dates from EXIF — clinical workflow requires curation.
|
| 4 |
+
The caller (case manifest, upload handler) provides the date explicitly.
|
| 5 |
+
"""
|
| 6 |
+
|
| 7 |
+
from datetime import datetime
|
| 8 |
+
from pathlib import Path
|
| 9 |
+
|
| 10 |
+
from recap.models import Event, EventCategory
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
def _parse_date(s: str) -> datetime:
|
| 14 |
+
if "T" not in s:
|
| 15 |
+
s = f"{s}T00:00:00+00:00"
|
| 16 |
+
if s.endswith("Z"):
|
| 17 |
+
s = s[:-1] + "+00:00"
|
| 18 |
+
return datetime.fromisoformat(s)
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
def load_image_event(
|
| 22 |
+
path: str,
|
| 23 |
+
*,
|
| 24 |
+
category: EventCategory,
|
| 25 |
+
title: str,
|
| 26 |
+
date_iso: str,
|
| 27 |
+
source_id: str | None = None,
|
| 28 |
+
) -> Event:
|
| 29 |
+
src = source_id or Path(path).name
|
| 30 |
+
return Event(
|
| 31 |
+
id=f"img-{src}",
|
| 32 |
+
date=_parse_date(date_iso),
|
| 33 |
+
category=category,
|
| 34 |
+
title=title,
|
| 35 |
+
source=src,
|
| 36 |
+
body=f"Image: {title}",
|
| 37 |
+
metadata={"path": path},
|
| 38 |
+
)
|
src/recap/ingestion/pdf.py
ADDED
|
@@ -0,0 +1,22 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Parse PDFs into per-page records with source metadata for citation grounding."""
|
| 2 |
+
|
| 3 |
+
from dataclasses import dataclass
|
| 4 |
+
from pathlib import Path
|
| 5 |
+
|
| 6 |
+
from pypdf import PdfReader
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
@dataclass
|
| 10 |
+
class PdfPage:
|
| 11 |
+
source_id: str
|
| 12 |
+
page_number: int # 1-indexed
|
| 13 |
+
text: str
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
def load_pdf(path: str, source_id: str | None = None) -> list[PdfPage]:
|
| 17 |
+
src = source_id or Path(path).name
|
| 18 |
+
reader = PdfReader(path)
|
| 19 |
+
return [
|
| 20 |
+
PdfPage(source_id=src, page_number=i, text=page.extract_text() or "")
|
| 21 |
+
for i, page in enumerate(reader.pages, start=1)
|
| 22 |
+
]
|
src/recap/models.py
ADDED
|
@@ -0,0 +1,47 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from datetime import datetime
|
| 2 |
+
from typing import Literal
|
| 3 |
+
|
| 4 |
+
from pydantic import BaseModel, Field
|
| 5 |
+
|
| 6 |
+
EventCategory = Literal[
|
| 7 |
+
"lab",
|
| 8 |
+
"visit",
|
| 9 |
+
"scan",
|
| 10 |
+
"med",
|
| 11 |
+
"note",
|
| 12 |
+
"photo",
|
| 13 |
+
"diagnosis",
|
| 14 |
+
"procedure",
|
| 15 |
+
"report",
|
| 16 |
+
"other",
|
| 17 |
+
]
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
class Citation(BaseModel):
|
| 21 |
+
source_id: str
|
| 22 |
+
page: int | None = None
|
| 23 |
+
snippet: str | None = None
|
| 24 |
+
region: tuple[float, float, float, float] | None = None
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
class Event(BaseModel):
|
| 28 |
+
id: str
|
| 29 |
+
date: datetime
|
| 30 |
+
category: EventCategory
|
| 31 |
+
title: str
|
| 32 |
+
source: str
|
| 33 |
+
body: str = ""
|
| 34 |
+
metadata: dict = Field(default_factory=dict)
|
| 35 |
+
|
| 36 |
+
|
| 37 |
+
class Patient(BaseModel):
|
| 38 |
+
id: str
|
| 39 |
+
display_name: str
|
| 40 |
+
age: int | None = None
|
| 41 |
+
gender: str | None = None # "male" | "female" | "other"
|
| 42 |
+
events: list[Event] = Field(default_factory=list)
|
| 43 |
+
|
| 44 |
+
|
| 45 |
+
class Answer(BaseModel):
|
| 46 |
+
text: str
|
| 47 |
+
citations: list[Citation] = Field(default_factory=list)
|
src/recap/prompts.py
ADDED
|
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""System prompts and prompt builders. Centralized for easy tuning."""
|
| 2 |
+
|
| 3 |
+
PATIENT_QA_SYSTEM = """You are a careful medical reading assistant. You have access to a patient's records (labs, visits, medications, scans). When asked a question:
|
| 4 |
+
|
| 5 |
+
1. Cite the exact source for every claim using the format [src:<source_id>#p<page>] or [src:<source_id>] if no page.
|
| 6 |
+
2. If the answer is not in the provided records, say so explicitly.
|
| 7 |
+
3. Never speculate beyond what the records show.
|
| 8 |
+
4. Never give medical advice or recommend treatment changes.
|
| 9 |
+
|
| 10 |
+
Output format:
|
| 11 |
+
- A direct answer in 2-4 sentences with inline citations.
|
| 12 |
+
- Then a bullet list of the cited records you relied on.
|
| 13 |
+
"""
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
def build_user_prompt(question: str, retrieved_events: list) -> str:
|
| 17 |
+
"""Render retrieved events into the user-turn prompt."""
|
| 18 |
+
lines = ["Patient records (most relevant first):", ""]
|
| 19 |
+
for e in retrieved_events:
|
| 20 |
+
lines.append(f"- [src:{e.source}] {e.date.date().isoformat()} — {e.title}")
|
| 21 |
+
if e.body and e.body != e.title:
|
| 22 |
+
lines.append(f" {e.body}")
|
| 23 |
+
lines.append("")
|
| 24 |
+
lines.append(f"Question: {question}")
|
| 25 |
+
return "\n".join(lines)
|
src/recap/reasoner.py
ADDED
|
@@ -0,0 +1,45 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Two-stage reasoner: MedGemma extracts evidence, Qwen writes the answer."""
|
| 2 |
+
|
| 3 |
+
from __future__ import annotations
|
| 4 |
+
|
| 5 |
+
from typing import Callable, Protocol
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
EXTRACT_SYSTEM = (
|
| 9 |
+
"You are a medical evidence extractor. Given a patient's records and a "
|
| 10 |
+
"question, identify the most relevant data points and quote them verbatim. "
|
| 11 |
+
"Always include the source citation in [src:source_id] or "
|
| 12 |
+
"[src:source_id#p<page>] format. Do not synthesize, interpret, or speculate "
|
| 13 |
+
"— extract only."
|
| 14 |
+
)
|
| 15 |
+
|
| 16 |
+
SYNTHESIZE_SYSTEM = (
|
| 17 |
+
"You are a careful medical reading assistant. You will be given:\n"
|
| 18 |
+
"1. A user question\n"
|
| 19 |
+
"2. Evidence extracted from the patient's records, with citations\n\n"
|
| 20 |
+
"Synthesize a 2-4 sentence answer using only the evidence. Preserve every "
|
| 21 |
+
"[src:...] citation exactly as given. If the evidence is insufficient, say "
|
| 22 |
+
"so. Never give medical advice or recommend treatment changes."
|
| 23 |
+
)
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
class GenerateFn(Protocol):
|
| 27 |
+
def __call__(self, system: str, user: str) -> str: ...
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
def two_stage(
|
| 31 |
+
question: str,
|
| 32 |
+
retrieved_block: str,
|
| 33 |
+
*,
|
| 34 |
+
extract_fn: GenerateFn,
|
| 35 |
+
synthesize_fn: GenerateFn,
|
| 36 |
+
) -> str:
|
| 37 |
+
extract_user = (
|
| 38 |
+
f"Patient records:\n{retrieved_block}\n\n"
|
| 39 |
+
f"Question: {question}\n\n"
|
| 40 |
+
"Extract the most relevant data points with citations:"
|
| 41 |
+
)
|
| 42 |
+
evidence = extract_fn(EXTRACT_SYSTEM, extract_user).strip()
|
| 43 |
+
|
| 44 |
+
synth_user = f"Question: {question}\n\nEvidence:\n{evidence}\n\nAnswer:"
|
| 45 |
+
return synthesize_fn(SYNTHESIZE_SYSTEM, synth_user).strip()
|
src/recap/retrieval.py
ADDED
|
@@ -0,0 +1,53 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""BM25 retrieval over patient events. No external deps."""
|
| 2 |
+
|
| 3 |
+
import re
|
| 4 |
+
from collections import Counter
|
| 5 |
+
from math import log
|
| 6 |
+
|
| 7 |
+
from recap.models import Event
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
def _tokenize(text: str) -> list[str]:
|
| 11 |
+
return re.findall(r"[A-Za-z0-9]+", text.lower())
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
def retrieve(query: str, events: list[Event], top_k: int = 5) -> list[Event]:
|
| 15 |
+
"""Rank events by BM25 over title+body.
|
| 16 |
+
|
| 17 |
+
On no-match, falls back to the first `top_k` events so the caller always
|
| 18 |
+
gets something to send to the LLM rather than an empty context.
|
| 19 |
+
"""
|
| 20 |
+
if not events:
|
| 21 |
+
return []
|
| 22 |
+
|
| 23 |
+
query_tokens = _tokenize(query)
|
| 24 |
+
if not query_tokens:
|
| 25 |
+
return events[:top_k]
|
| 26 |
+
|
| 27 |
+
docs = [_tokenize(f"{e.title} {e.body}") for e in events]
|
| 28 |
+
avgdl = sum(len(d) for d in docs) / len(docs)
|
| 29 |
+
|
| 30 |
+
df: Counter = Counter()
|
| 31 |
+
for d in docs:
|
| 32 |
+
for tok in set(d):
|
| 33 |
+
df[tok] += 1
|
| 34 |
+
|
| 35 |
+
n = len(docs)
|
| 36 |
+
k1, b = 1.5, 0.75
|
| 37 |
+
|
| 38 |
+
scores: list[tuple[float, int]] = []
|
| 39 |
+
for i, d in enumerate(docs):
|
| 40 |
+
score = 0.0
|
| 41 |
+
tf = Counter(d)
|
| 42 |
+
for q in query_tokens:
|
| 43 |
+
if q not in tf:
|
| 44 |
+
continue
|
| 45 |
+
idf = log((n - df[q] + 0.5) / (df[q] + 0.5) + 1)
|
| 46 |
+
num = tf[q] * (k1 + 1)
|
| 47 |
+
den = tf[q] + k1 * (1 - b + b * len(d) / max(avgdl, 1))
|
| 48 |
+
score += idf * num / den
|
| 49 |
+
scores.append((score, i))
|
| 50 |
+
|
| 51 |
+
scores.sort(reverse=True)
|
| 52 |
+
ranked = [events[i] for s, i in scores[:top_k] if s > 0]
|
| 53 |
+
return ranked or events[:top_k]
|
src/recap/timeline.py
ADDED
|
@@ -0,0 +1,17 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Build a chronological timeline view of a patient's events."""
|
| 2 |
+
|
| 3 |
+
from dataclasses import dataclass
|
| 4 |
+
|
| 5 |
+
from recap.models import Event
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
@dataclass
|
| 9 |
+
class Timeline:
|
| 10 |
+
events: list[Event]
|
| 11 |
+
years_covered: list[int]
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
def build_timeline(events: list[Event]) -> Timeline:
|
| 15 |
+
sorted_events = sorted(events, key=lambda e: e.date)
|
| 16 |
+
years = sorted({e.date.year for e in sorted_events})
|
| 17 |
+
return Timeline(events=sorted_events, years_covered=years)
|
src/recap/ui/__init__.py
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from recap.ui.timeline_view import build_timeline_figure
|
| 2 |
+
|
| 3 |
+
__all__ = ["build_timeline_figure"]
|
src/recap/ui/timeline_view.py
ADDED
|
@@ -0,0 +1,77 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Render a Patient's events as an interactive Plotly timeline.
|
| 2 |
+
|
| 3 |
+
X-axis is time, Y-axis groups events by category (lab, visit, scan, …).
|
| 4 |
+
Hovering a marker shows the event title and source. Clicking is wired
|
| 5 |
+
up later to scroll the chat to the relevant citation.
|
| 6 |
+
"""
|
| 7 |
+
|
| 8 |
+
import pandas as pd
|
| 9 |
+
import plotly.express as px
|
| 10 |
+
|
| 11 |
+
from recap.models import Patient
|
| 12 |
+
|
| 13 |
+
# Stable category order — controls the y-axis lane positions.
|
| 14 |
+
CATEGORY_ORDER = [
|
| 15 |
+
"diagnosis",
|
| 16 |
+
"visit",
|
| 17 |
+
"lab",
|
| 18 |
+
"report",
|
| 19 |
+
"scan",
|
| 20 |
+
"procedure",
|
| 21 |
+
"med",
|
| 22 |
+
"note",
|
| 23 |
+
"photo",
|
| 24 |
+
"other",
|
| 25 |
+
]
|
| 26 |
+
|
| 27 |
+
# Color per category — chosen for legibility on dark theme + colorblind-friendly.
|
| 28 |
+
CATEGORY_COLORS = {
|
| 29 |
+
"diagnosis": "#e63946",
|
| 30 |
+
"visit": "#2a9d8f",
|
| 31 |
+
"lab": "#457b9d",
|
| 32 |
+
"report": "#264653",
|
| 33 |
+
"scan": "#f4a261",
|
| 34 |
+
"procedure": "#9b5de5",
|
| 35 |
+
"med": "#e76f51",
|
| 36 |
+
"note": "#6c757d",
|
| 37 |
+
"photo": "#bdb2ff",
|
| 38 |
+
"other": "#adb5bd",
|
| 39 |
+
}
|
| 40 |
+
|
| 41 |
+
|
| 42 |
+
def build_timeline_figure(patient: Patient):
|
| 43 |
+
"""Return a Plotly Figure (or None if patient has no events)."""
|
| 44 |
+
if not patient.events:
|
| 45 |
+
return None
|
| 46 |
+
|
| 47 |
+
df = pd.DataFrame([
|
| 48 |
+
{
|
| 49 |
+
"date": e.date,
|
| 50 |
+
"category": e.category,
|
| 51 |
+
"title": e.title,
|
| 52 |
+
"source": e.source,
|
| 53 |
+
"year": e.date.year,
|
| 54 |
+
}
|
| 55 |
+
for e in patient.events
|
| 56 |
+
])
|
| 57 |
+
|
| 58 |
+
fig = px.scatter(
|
| 59 |
+
df,
|
| 60 |
+
x="date",
|
| 61 |
+
y="category",
|
| 62 |
+
color="category",
|
| 63 |
+
category_orders={"category": CATEGORY_ORDER},
|
| 64 |
+
color_discrete_map=CATEGORY_COLORS,
|
| 65 |
+
hover_data={"title": True, "source": True, "category": False, "date": "|%Y-%m-%d"},
|
| 66 |
+
title=f"{patient.display_name} — {len(patient.events)} events",
|
| 67 |
+
)
|
| 68 |
+
fig.update_traces(marker=dict(size=11, opacity=0.85, line=dict(width=0.5, color="white")))
|
| 69 |
+
fig.update_layout(
|
| 70 |
+
height=340,
|
| 71 |
+
margin=dict(t=50, b=40, l=40, r=20),
|
| 72 |
+
showlegend=True,
|
| 73 |
+
legend=dict(orientation="h", yanchor="bottom", y=1.02, xanchor="right", x=1),
|
| 74 |
+
xaxis_title=None,
|
| 75 |
+
yaxis_title=None,
|
| 76 |
+
)
|
| 77 |
+
return fig
|
static/app.jsx
ADDED
|
@@ -0,0 +1,859 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
// Recap — Bold Editorial UI
|
| 2 |
+
// Ported from the design bundle (direction-editorial.jsx) and wired to the
|
| 3 |
+
// real FastAPI backend at /api/patients and /api/answer. Single-instance
|
| 4 |
+
// app (no canvas), full-window, light + dark.
|
| 5 |
+
|
| 6 |
+
const { useState, useEffect, useMemo, useRef } = React;
|
| 7 |
+
|
| 8 |
+
const PALETTE = {
|
| 9 |
+
light: {
|
| 10 |
+
bg: '#f4ede2', paper: '#fbf7ef',
|
| 11 |
+
ink: '#1a1410', inkSoft: '#3a2e25',
|
| 12 |
+
muted: '#6b5c4a', faint: '#a8967f',
|
| 13 |
+
rule: '#d6c8b4', ruleSoft: '#e8ddc9',
|
| 14 |
+
accent: '#b8412e', accentSoft: '#f3dcd0',
|
| 15 |
+
mark: '#d4af37',
|
| 16 |
+
},
|
| 17 |
+
dark: {
|
| 18 |
+
bg: '#1a1410', paper: '#221a14',
|
| 19 |
+
ink: '#f4ede2', inkSoft: '#d6c8b4',
|
| 20 |
+
muted: '#a8967f', faint: '#6b5c4a',
|
| 21 |
+
rule: '#3a2e25', ruleSoft: '#2a2017',
|
| 22 |
+
accent: '#e8755e', accentSoft: '#2a1814',
|
| 23 |
+
mark: '#e0c060',
|
| 24 |
+
},
|
| 25 |
+
};
|
| 26 |
+
|
| 27 |
+
const CAT = {
|
| 28 |
+
diagnosis: { label: 'Diagnosis', hint: 'Clinical condition' },
|
| 29 |
+
visit: { label: 'Visit', hint: 'Patient encounter' },
|
| 30 |
+
lab: { label: 'Lab', hint: 'Laboratory result' },
|
| 31 |
+
report: { label: 'Report', hint: 'Clinical report or summary' },
|
| 32 |
+
scan: { label: 'Scan', hint: 'Medical imaging' },
|
| 33 |
+
procedure: { label: 'Procedure', hint: 'Operation or intervention' },
|
| 34 |
+
med: { label: 'Medication', hint: 'Prescribed drug' },
|
| 35 |
+
note: { label: 'Note', hint: 'Free-text clinical note' },
|
| 36 |
+
photo: { label: 'Photo', hint: 'Patient-supplied image' },
|
| 37 |
+
other: { label: 'Other', hint: 'Uncategorized event' },
|
| 38 |
+
};
|
| 39 |
+
|
| 40 |
+
// Inline lucide-style icons (24x24 viewBox). stroke inherits from parent.
|
| 41 |
+
// One glyph per event category, picked for instant recognition.
|
| 42 |
+
const ICONS = {
|
| 43 |
+
// alert-octagon — signals clinical importance for any diagnosis
|
| 44 |
+
diagnosis: (
|
| 45 |
+
<g>
|
| 46 |
+
<path d="M7.86 2h8.28L22 7.86v8.28L16.14 22H7.86L2 16.14V7.86z" />
|
| 47 |
+
<path d="M12 8v4" />
|
| 48 |
+
<path d="M12 16h.01" />
|
| 49 |
+
</g>
|
| 50 |
+
),
|
| 51 |
+
// stethoscope
|
| 52 |
+
visit: (
|
| 53 |
+
<g>
|
| 54 |
+
<path d="M11 2v2" />
|
| 55 |
+
<path d="M5 2v2" />
|
| 56 |
+
<path d="M5 3H4a2 2 0 0 0-2 2v4a6 6 0 0 0 12 0V5a2 2 0 0 0-2-2h-1" />
|
| 57 |
+
<path d="M8 15a6 6 0 0 0 12 0v-3" />
|
| 58 |
+
<circle cx="20" cy="10" r="2" />
|
| 59 |
+
</g>
|
| 60 |
+
),
|
| 61 |
+
// flask-conical
|
| 62 |
+
lab: (
|
| 63 |
+
<g>
|
| 64 |
+
<path d="M10 2v6.5L3.5 19a1 1 0 0 0 .9 1.5h15.2a1 1 0 0 0 .9-1.5L14 8.5V2" />
|
| 65 |
+
<path d="M9 2h6" />
|
| 66 |
+
<path d="M6.4 14.5h11.2" />
|
| 67 |
+
</g>
|
| 68 |
+
),
|
| 69 |
+
// file-text
|
| 70 |
+
report: (
|
| 71 |
+
<g>
|
| 72 |
+
<path d="M15 2H6a2 2 0 0 0-2 2v16a2 2 0 0 0 2 2h12a2 2 0 0 0 2-2V7Z" />
|
| 73 |
+
<path d="M14 2v6h6" />
|
| 74 |
+
<path d="M9 13h6" />
|
| 75 |
+
<path d="M9 17h4" />
|
| 76 |
+
</g>
|
| 77 |
+
),
|
| 78 |
+
// image (frame + small sun + mountain) — universal "imaging" symbol
|
| 79 |
+
scan: (
|
| 80 |
+
<g>
|
| 81 |
+
<rect x="3" y="3" width="18" height="18" rx="2" ry="2" />
|
| 82 |
+
<circle cx="9" cy="9" r="2" />
|
| 83 |
+
<path d="m21 15-3.086-3.086a2 2 0 0 0-2.828 0L6 21" />
|
| 84 |
+
</g>
|
| 85 |
+
),
|
| 86 |
+
// scissors
|
| 87 |
+
procedure: (
|
| 88 |
+
<g>
|
| 89 |
+
<circle cx="6" cy="6" r="3" />
|
| 90 |
+
<path d="M8.12 8.12 12 12" />
|
| 91 |
+
<path d="M20 4 8.12 15.88" />
|
| 92 |
+
<circle cx="6" cy="18" r="3" />
|
| 93 |
+
<path d="M14.8 14.8 20 20" />
|
| 94 |
+
</g>
|
| 95 |
+
),
|
| 96 |
+
// pill
|
| 97 |
+
med: (
|
| 98 |
+
<g>
|
| 99 |
+
<path d="m10.5 20.5 10-10a4.95 4.95 0 1 0-7-7l-10 10a4.95 4.95 0 1 0 7 7Z" />
|
| 100 |
+
<path d="m8.5 8.5 7 7" />
|
| 101 |
+
</g>
|
| 102 |
+
),
|
| 103 |
+
// pen-line
|
| 104 |
+
note: (
|
| 105 |
+
<g>
|
| 106 |
+
<path d="M12 20h9" />
|
| 107 |
+
<path d="M16.5 3.5a2.121 2.121 0 0 1 3 3L7 19l-4 1 1-4Z" />
|
| 108 |
+
</g>
|
| 109 |
+
),
|
| 110 |
+
// camera
|
| 111 |
+
photo: (
|
| 112 |
+
<g>
|
| 113 |
+
<path d="M14.5 4h-5L7 7H4a2 2 0 0 0-2 2v9a2 2 0 0 0 2 2h16a2 2 0 0 0 2-2V9a2 2 0 0 0-2-2h-3l-2.5-3z" />
|
| 114 |
+
<circle cx="12" cy="13" r="3" />
|
| 115 |
+
</g>
|
| 116 |
+
),
|
| 117 |
+
// dot fallback
|
| 118 |
+
other: (
|
| 119 |
+
<circle cx="12" cy="12" r="3" />
|
| 120 |
+
),
|
| 121 |
+
};
|
| 122 |
+
|
| 123 |
+
function EventIcon({ category, size = 12 }) {
|
| 124 |
+
const paths = ICONS[category] || ICONS.other;
|
| 125 |
+
return (
|
| 126 |
+
<svg width={size} height={size} viewBox="0 0 24 24"
|
| 127 |
+
fill="none" stroke="currentColor" strokeWidth="2"
|
| 128 |
+
strokeLinecap="round" strokeLinejoin="round"
|
| 129 |
+
style={{ display: 'block' }}>
|
| 130 |
+
{paths}
|
| 131 |
+
</svg>
|
| 132 |
+
);
|
| 133 |
+
}
|
| 134 |
+
|
| 135 |
+
const SERIF = '"Source Serif 4", "GT Sectra", "Tiempos Headline", Charter, Georgia, serif';
|
| 136 |
+
const SANS = '"Inter", -apple-system, BlinkMacSystemFont, "Segoe UI", system-ui, sans-serif';
|
| 137 |
+
const MONO = '"JetBrains Mono", "SF Mono", ui-monospace, monospace';
|
| 138 |
+
|
| 139 |
+
function fmtDate(iso, opts = { y: true }) {
|
| 140 |
+
const d = new Date(iso + (iso.length === 10 ? 'T00:00:00Z' : ''));
|
| 141 |
+
if (opts.short) return d.toLocaleDateString('en-US', { month: 'short', day: 'numeric' });
|
| 142 |
+
return d.toLocaleDateString('en-US', {
|
| 143 |
+
year: opts.y ? 'numeric' : undefined,
|
| 144 |
+
month: 'short',
|
| 145 |
+
day: 'numeric',
|
| 146 |
+
});
|
| 147 |
+
}
|
| 148 |
+
|
| 149 |
+
// ─────────────────────────────────────────────────────────────────────
|
| 150 |
+
// Suggested questions per patient. Used as starter prompts only —
|
| 151 |
+
// actual answers come from /api/answer (real LLM via inference gateway).
|
| 152 |
+
const SUGGESTED = {
|
| 153 |
+
sarah: [
|
| 154 |
+
'When did her kidney function start declining?',
|
| 155 |
+
'What medications was she on when CKD was diagnosed?',
|
| 156 |
+
'Summarize her trajectory in 3 sentences.',
|
| 157 |
+
],
|
| 158 |
+
marcus: [
|
| 159 |
+
'How long from first symptom to diagnosis?',
|
| 160 |
+
'What was the response to R-CHOP?',
|
| 161 |
+
'Summarize this patient\'s journey.',
|
| 162 |
+
],
|
| 163 |
+
aisha: [
|
| 164 |
+
'What records does she have in foreign languages?',
|
| 165 |
+
'Is her current anemia recurrent or new?',
|
| 166 |
+
'What is her current pregnancy status?',
|
| 167 |
+
],
|
| 168 |
+
demo: [
|
| 169 |
+
'When did her kidney function start declining?',
|
| 170 |
+
'What was her first abnormal creatinine reading?',
|
| 171 |
+
'What medications was she on when CKD was diagnosed?',
|
| 172 |
+
],
|
| 173 |
+
};
|
| 174 |
+
|
| 175 |
+
// ─────────────────────────────────────────────────────────────────────
|
| 176 |
+
function App() {
|
| 177 |
+
const [patients, setPatients] = useState([]);
|
| 178 |
+
const [patientId, setPatientId] = useState(null);
|
| 179 |
+
const [dark, setDark] = useState(false);
|
| 180 |
+
const [loading, setLoading] = useState(true);
|
| 181 |
+
const [error, setError] = useState(null);
|
| 182 |
+
|
| 183 |
+
useEffect(() => {
|
| 184 |
+
fetch('/api/patients')
|
| 185 |
+
.then((r) => r.json())
|
| 186 |
+
.then((data) => {
|
| 187 |
+
setPatients(data);
|
| 188 |
+
if (data.length > 0) setPatientId(data[0].id);
|
| 189 |
+
setLoading(false);
|
| 190 |
+
})
|
| 191 |
+
.catch((e) => {
|
| 192 |
+
setError(String(e));
|
| 193 |
+
setLoading(false);
|
| 194 |
+
});
|
| 195 |
+
}, []);
|
| 196 |
+
|
| 197 |
+
const c = dark ? PALETTE.dark : PALETTE.light;
|
| 198 |
+
const patient = useMemo(
|
| 199 |
+
() => patients.find((p) => p.id === patientId),
|
| 200 |
+
[patients, patientId],
|
| 201 |
+
);
|
| 202 |
+
|
| 203 |
+
if (loading) {
|
| 204 |
+
return <Loading c={c} />;
|
| 205 |
+
}
|
| 206 |
+
if (error) {
|
| 207 |
+
return <ErrorView c={c} message={error} />;
|
| 208 |
+
}
|
| 209 |
+
if (!patient) {
|
| 210 |
+
return <ErrorView c={c} message="No patients available." />;
|
| 211 |
+
}
|
| 212 |
+
|
| 213 |
+
return (
|
| 214 |
+
<div style={{
|
| 215 |
+
position: 'absolute', inset: 0, display: 'flex', flexDirection: 'column',
|
| 216 |
+
background: c.bg, color: c.ink, fontFamily: SANS, fontSize: 13,
|
| 217 |
+
}}>
|
| 218 |
+
<Masthead c={c} dark={dark} patient={patient} allPatients={patients}
|
| 219 |
+
onPatientChange={setPatientId}
|
| 220 |
+
onDarkToggle={() => setDark(!dark)} />
|
| 221 |
+
<div style={{ flex: 1, display: 'flex', minHeight: 0 }}>
|
| 222 |
+
<Document c={c} patient={patient} />
|
| 223 |
+
<ChatColumn c={c} patient={patient} />
|
| 224 |
+
</div>
|
| 225 |
+
</div>
|
| 226 |
+
);
|
| 227 |
+
}
|
| 228 |
+
|
| 229 |
+
function Loading({ c }) {
|
| 230 |
+
return (
|
| 231 |
+
<div style={{
|
| 232 |
+
position: 'absolute', inset: 0, background: c.bg, color: c.muted,
|
| 233 |
+
display: 'grid', placeItems: 'center', fontFamily: SERIF,
|
| 234 |
+
}}>
|
| 235 |
+
<div style={{ textAlign: 'center' }}>
|
| 236 |
+
<div style={{ fontSize: 42, color: c.ink, letterSpacing: '-.03em' }}>
|
| 237 |
+
Recap<span style={{ color: c.accent }}>.</span>
|
| 238 |
+
</div>
|
| 239 |
+
<div style={{ fontStyle: 'italic', marginTop: 8 }}>loading the chart…</div>
|
| 240 |
+
</div>
|
| 241 |
+
</div>
|
| 242 |
+
);
|
| 243 |
+
}
|
| 244 |
+
|
| 245 |
+
function ErrorView({ c, message }) {
|
| 246 |
+
return (
|
| 247 |
+
<div style={{
|
| 248 |
+
position: 'absolute', inset: 0, background: c.bg, color: c.ink,
|
| 249 |
+
display: 'grid', placeItems: 'center', fontFamily: SERIF, padding: 24,
|
| 250 |
+
}}>
|
| 251 |
+
<div style={{ textAlign: 'center', maxWidth: 480 }}>
|
| 252 |
+
<div style={{ fontSize: 32, color: c.accent, letterSpacing: '-.02em' }}>
|
| 253 |
+
Something is off.
|
| 254 |
+
</div>
|
| 255 |
+
<div style={{ marginTop: 12, color: c.muted, fontStyle: 'italic' }}>{message}</div>
|
| 256 |
+
</div>
|
| 257 |
+
</div>
|
| 258 |
+
);
|
| 259 |
+
}
|
| 260 |
+
|
| 261 |
+
// ─────────────────────────────────────────────────────────────────────
|
| 262 |
+
function Masthead({ c, dark, patient, allPatients, onPatientChange, onDarkToggle }) {
|
| 263 |
+
const [open, setOpen] = useState(false);
|
| 264 |
+
return (
|
| 265 |
+
<div style={{
|
| 266 |
+
padding: '14px 28px', borderBottom: `1px solid ${c.rule}`,
|
| 267 |
+
background: c.bg, display: 'flex', alignItems: 'center', gap: 18,
|
| 268 |
+
}}>
|
| 269 |
+
<div style={{
|
| 270 |
+
fontFamily: SERIF, fontSize: 26, fontWeight: 500,
|
| 271 |
+
letterSpacing: '-0.025em', lineHeight: 1, color: c.ink,
|
| 272 |
+
}}>
|
| 273 |
+
Recap<span style={{ color: c.accent }}>.</span>
|
| 274 |
+
</div>
|
| 275 |
+
<div style={{
|
| 276 |
+
paddingLeft: 16, borderLeft: `1px solid ${c.rule}`,
|
| 277 |
+
fontFamily: SERIF, fontStyle: 'italic', fontSize: 13.5, color: c.muted,
|
| 278 |
+
lineHeight: 1.3, maxWidth: 280,
|
| 279 |
+
}}>
|
| 280 |
+
Reads the whole chart so you don't have to.
|
| 281 |
+
</div>
|
| 282 |
+
<div style={{ flex: 1 }} />
|
| 283 |
+
<div style={{ position: 'relative' }}>
|
| 284 |
+
<button onClick={() => setOpen(!open)} style={{
|
| 285 |
+
display: 'flex', alignItems: 'center', gap: 10,
|
| 286 |
+
padding: '6px 12px', border: `1px solid ${c.rule}`, borderRadius: 2,
|
| 287 |
+
background: c.paper, color: c.ink, cursor: 'pointer',
|
| 288 |
+
fontFamily: SANS, fontSize: 12,
|
| 289 |
+
}}>
|
| 290 |
+
<span style={{ fontFamily: MONO, fontSize: 10, color: c.faint, letterSpacing: '0.06em' }}>
|
| 291 |
+
CASE №
|
| 292 |
+
</span>
|
| 293 |
+
<span style={{ fontFamily: SERIF, fontSize: 14, fontWeight: 500, color: c.ink }}>
|
| 294 |
+
{patient.display_name}
|
| 295 |
+
</span>
|
| 296 |
+
<span style={{ color: c.faint }}>▾</span>
|
| 297 |
+
</button>
|
| 298 |
+
{open && (
|
| 299 |
+
<div style={{
|
| 300 |
+
position: 'absolute', top: '110%', right: 0, width: 320, marginTop: 4,
|
| 301 |
+
background: c.paper, border: `1px solid ${c.rule}`, borderRadius: 2,
|
| 302 |
+
padding: 4, zIndex: 10,
|
| 303 |
+
boxShadow: dark ? '0 8px 32px rgba(0,0,0,.5)' : '0 8px 32px rgba(0,0,0,.12)',
|
| 304 |
+
}}>
|
| 305 |
+
{allPatients.map((p, i) => (
|
| 306 |
+
<button key={p.id}
|
| 307 |
+
onClick={() => { onPatientChange(p.id); setOpen(false); }}
|
| 308 |
+
style={{
|
| 309 |
+
width: '100%', textAlign: 'left', padding: '10px 12px',
|
| 310 |
+
borderRadius: 2, border: 'none', cursor: 'pointer',
|
| 311 |
+
background: p.id === patient.id ? c.accentSoft : 'transparent',
|
| 312 |
+
color: c.ink, fontFamily: 'inherit',
|
| 313 |
+
}}>
|
| 314 |
+
<div style={{ display: 'flex', alignItems: 'baseline', gap: 8 }}>
|
| 315 |
+
<span style={{
|
| 316 |
+
fontFamily: MONO, fontSize: 10, color: c.faint, letterSpacing: '0.06em',
|
| 317 |
+
}}>{String(i + 1).padStart(2, '0')}</span>
|
| 318 |
+
<span style={{ fontFamily: SERIF, fontSize: 16, fontWeight: 500 }}>
|
| 319 |
+
{p.display_name}
|
| 320 |
+
</span>
|
| 321 |
+
{p.age != null && (
|
| 322 |
+
<span style={{ fontSize: 11, color: c.muted }}>· {p.age}y</span>
|
| 323 |
+
)}
|
| 324 |
+
</div>
|
| 325 |
+
<div style={{
|
| 326 |
+
fontSize: 11.5, color: c.muted, marginTop: 4, lineHeight: 1.45,
|
| 327 |
+
fontStyle: 'italic', fontFamily: SERIF,
|
| 328 |
+
}}>
|
| 329 |
+
{p.summary}
|
| 330 |
+
</div>
|
| 331 |
+
</button>
|
| 332 |
+
))}
|
| 333 |
+
</div>
|
| 334 |
+
)}
|
| 335 |
+
</div>
|
| 336 |
+
<BackendBadge c={c} />
|
| 337 |
+
<button onClick={onDarkToggle} style={{
|
| 338 |
+
width: 28, height: 28, borderRadius: 2,
|
| 339 |
+
border: `1px solid ${c.rule}`, background: c.paper,
|
| 340 |
+
color: c.muted, cursor: 'pointer', fontSize: 14,
|
| 341 |
+
}}>{dark ? '☀' : '☾'}</button>
|
| 342 |
+
</div>
|
| 343 |
+
);
|
| 344 |
+
}
|
| 345 |
+
|
| 346 |
+
function BackendBadge({ c }) {
|
| 347 |
+
const [info, setInfo] = useState({ backend: '...' });
|
| 348 |
+
useEffect(() => {
|
| 349 |
+
fetch('/api/health').then((r) => r.json()).then(setInfo).catch(() => {});
|
| 350 |
+
}, []);
|
| 351 |
+
return (
|
| 352 |
+
<div style={{
|
| 353 |
+
display: 'flex', alignItems: 'center', gap: 6,
|
| 354 |
+
padding: '4px 10px', border: `1px solid ${c.rule}`, borderRadius: 2,
|
| 355 |
+
background: c.paper,
|
| 356 |
+
fontFamily: MONO, fontSize: 10, color: c.muted, letterSpacing: '0.04em',
|
| 357 |
+
}}>
|
| 358 |
+
<span style={{ width: 5, height: 5, borderRadius: '50%', background: c.accent }} />
|
| 359 |
+
AMD MI300X · 192 GB · {info.backend}
|
| 360 |
+
</div>
|
| 361 |
+
);
|
| 362 |
+
}
|
| 363 |
+
|
| 364 |
+
// ─────────────────────────────────────────────────────────────────────
|
| 365 |
+
function Document({ c, patient }) {
|
| 366 |
+
const events = patient.events;
|
| 367 |
+
const groups = {};
|
| 368 |
+
events.forEach((e) => {
|
| 369 |
+
const y = e.date.slice(0, 4);
|
| 370 |
+
(groups[y] = groups[y] || []).push(e);
|
| 371 |
+
});
|
| 372 |
+
const years = Object.keys(groups).sort();
|
| 373 |
+
|
| 374 |
+
return (
|
| 375 |
+
<div style={{
|
| 376 |
+
flex: 1.4, minWidth: 0, overflowY: 'auto',
|
| 377 |
+
background: c.paper, borderRight: `1px solid ${c.rule}`,
|
| 378 |
+
}}>
|
| 379 |
+
<div style={{ padding: '32px 36px 24px', borderBottom: `1px solid ${c.rule}` }}>
|
| 380 |
+
<div style={{
|
| 381 |
+
fontFamily: MONO, fontSize: 10, color: c.faint, letterSpacing: '0.12em',
|
| 382 |
+
textTransform: 'uppercase', marginBottom: 12,
|
| 383 |
+
}}>
|
| 384 |
+
Patient Dossier · {events.length} events · {years.length} year{years.length === 1 ? '' : 's'} on record
|
| 385 |
+
</div>
|
| 386 |
+
<h1 style={{
|
| 387 |
+
fontFamily: SERIF, fontSize: 48, fontWeight: 500,
|
| 388 |
+
letterSpacing: '-0.03em', lineHeight: 1.05, color: c.ink,
|
| 389 |
+
margin: '0 0 12px',
|
| 390 |
+
}}>
|
| 391 |
+
{patient.display_name}<span style={{ color: c.accent }}>.</span>
|
| 392 |
+
</h1>
|
| 393 |
+
<div style={{
|
| 394 |
+
fontFamily: SERIF, fontStyle: 'italic', fontSize: 18, color: c.muted,
|
| 395 |
+
lineHeight: 1.45, maxWidth: 620, textWrap: 'pretty',
|
| 396 |
+
}}>
|
| 397 |
+
{patient.summary}
|
| 398 |
+
</div>
|
| 399 |
+
<div style={{ display: 'flex', gap: 24, marginTop: 22, alignItems: 'baseline' }}>
|
| 400 |
+
{patient.age != null && <Stat c={c} value={patient.age} label="years old" />}
|
| 401 |
+
{patient.gender && <Stat c={c} value={patient.gender} label="gender" />}
|
| 402 |
+
{patient.mrn && <Stat c={c} value={patient.mrn} label="MRN" mono />}
|
| 403 |
+
<Stat c={c} value={new Set(events.map((e) => e.source)).size} label="source docs" />
|
| 404 |
+
</div>
|
| 405 |
+
{patient.tags && patient.tags.length > 0 && (
|
| 406 |
+
<div style={{ display: 'flex', gap: 8, marginTop: 20, flexWrap: 'wrap' }}>
|
| 407 |
+
{patient.tags.map((t) => (
|
| 408 |
+
<span key={t} style={{
|
| 409 |
+
fontFamily: SANS, fontSize: 11, color: c.inkSoft,
|
| 410 |
+
padding: '3px 10px', border: `1px solid ${c.rule}`, borderRadius: 2,
|
| 411 |
+
background: c.bg,
|
| 412 |
+
}}>{t}</span>
|
| 413 |
+
))}
|
| 414 |
+
</div>
|
| 415 |
+
)}
|
| 416 |
+
</div>
|
| 417 |
+
|
| 418 |
+
<div style={{ padding: '24px 36px 48px' }}>
|
| 419 |
+
{years.map((y, yi) => (
|
| 420 |
+
<YearSection key={y} c={c} year={y} events={groups[y]} first={yi === 0} />
|
| 421 |
+
))}
|
| 422 |
+
</div>
|
| 423 |
+
</div>
|
| 424 |
+
);
|
| 425 |
+
}
|
| 426 |
+
|
| 427 |
+
function Stat({ c, value, label, mono }) {
|
| 428 |
+
return (
|
| 429 |
+
<div>
|
| 430 |
+
<div style={{
|
| 431 |
+
fontFamily: mono ? MONO : SERIF,
|
| 432 |
+
fontSize: mono ? 14 : 22, fontWeight: 500, color: c.ink, lineHeight: 1,
|
| 433 |
+
}}>{value}</div>
|
| 434 |
+
<div style={{
|
| 435 |
+
fontFamily: MONO, fontSize: 9.5, color: c.faint, letterSpacing: '0.1em',
|
| 436 |
+
textTransform: 'uppercase', marginTop: 5,
|
| 437 |
+
}}>{label}</div>
|
| 438 |
+
</div>
|
| 439 |
+
);
|
| 440 |
+
}
|
| 441 |
+
|
| 442 |
+
function YearSection({ c, year, events, first }) {
|
| 443 |
+
const [activeId, setActiveId] = useState(null);
|
| 444 |
+
return (
|
| 445 |
+
<div style={{ position: 'relative', marginBottom: 32 }}>
|
| 446 |
+
<div style={{
|
| 447 |
+
display: 'flex', alignItems: 'baseline', gap: 14,
|
| 448 |
+
marginBottom: 8, paddingBottom: 8,
|
| 449 |
+
borderBottom: `1px solid ${c.ruleSoft}`, marginLeft: 80,
|
| 450 |
+
}}>
|
| 451 |
+
<h2 style={{
|
| 452 |
+
fontFamily: SERIF, fontSize: 32, fontWeight: 500, letterSpacing: '-0.02em',
|
| 453 |
+
color: c.ink, margin: 0, lineHeight: 1,
|
| 454 |
+
}}>{year}</h2>
|
| 455 |
+
<div style={{
|
| 456 |
+
fontFamily: MONO, fontSize: 10, color: c.faint, letterSpacing: '0.1em',
|
| 457 |
+
textTransform: 'uppercase',
|
| 458 |
+
}}>
|
| 459 |
+
{events.length} {events.length === 1 ? 'event' : 'events'}
|
| 460 |
+
</div>
|
| 461 |
+
</div>
|
| 462 |
+
|
| 463 |
+
<div style={{
|
| 464 |
+
position: 'absolute', left: 100, top: 56, bottom: 0,
|
| 465 |
+
width: 1, background: c.rule,
|
| 466 |
+
}} />
|
| 467 |
+
|
| 468 |
+
{events.map((e, ei) => (
|
| 469 |
+
<DocEvent key={e.id} c={c} e={e} index={ei}
|
| 470 |
+
active={activeId === e.id}
|
| 471 |
+
onClick={() => setActiveId(activeId === e.id ? null : e.id)} />
|
| 472 |
+
))}
|
| 473 |
+
</div>
|
| 474 |
+
);
|
| 475 |
+
}
|
| 476 |
+
|
| 477 |
+
function DocEvent({ c, e, index, active, onClick }) {
|
| 478 |
+
const cat = CAT[e.category] || CAT.other;
|
| 479 |
+
const [iconHover, setIconHover] = useState(false);
|
| 480 |
+
// Vertical center of the title line is roughly 26px from the top of the
|
| 481 |
+
// content button (≈12px category label + 3px gap + half of 22px title line).
|
| 482 |
+
// Center the icon there so it visually anchors to the title, not the date.
|
| 483 |
+
const iconCenterY = 26;
|
| 484 |
+
const iconSize = active ? 30 : 26;
|
| 485 |
+
const iconPadTop = Math.max(iconCenterY - iconSize / 2, 0);
|
| 486 |
+
return (
|
| 487 |
+
<div style={{
|
| 488 |
+
display: 'flex', alignItems: 'flex-start',
|
| 489 |
+
padding: '12px 0', position: 'relative',
|
| 490 |
+
}}>
|
| 491 |
+
<div style={{
|
| 492 |
+
width: 76, flexShrink: 0, paddingRight: 8,
|
| 493 |
+
paddingTop: 16, textAlign: 'right',
|
| 494 |
+
}}>
|
| 495 |
+
<div style={{
|
| 496 |
+
fontFamily: SERIF, fontSize: 14, color: c.ink, fontWeight: 500,
|
| 497 |
+
letterSpacing: '-0.01em',
|
| 498 |
+
}}>
|
| 499 |
+
{fmtDate(e.date, { y: false, short: true })}
|
| 500 |
+
</div>
|
| 501 |
+
<div style={{
|
| 502 |
+
fontFamily: MONO, fontSize: 9.5, color: c.faint, letterSpacing: '0.06em',
|
| 503 |
+
marginTop: 2,
|
| 504 |
+
}}>
|
| 505 |
+
{String(index + 1).padStart(2, '0')}
|
| 506 |
+
</div>
|
| 507 |
+
</div>
|
| 508 |
+
|
| 509 |
+
<div style={{
|
| 510 |
+
width: 32, flexShrink: 0, display: 'flex', justifyContent: 'center',
|
| 511 |
+
paddingTop: iconPadTop, position: 'relative', zIndex: 1,
|
| 512 |
+
}}>
|
| 513 |
+
{/* Hover wrapper sits exactly on the icon — tooltip uses bottom:100% relative to it */}
|
| 514 |
+
<div style={{ position: 'relative', display: 'inline-block' }}
|
| 515 |
+
onMouseEnter={() => setIconHover(true)}
|
| 516 |
+
onMouseLeave={() => setIconHover(false)}>
|
| 517 |
+
<div style={{
|
| 518 |
+
width: iconSize, height: iconSize,
|
| 519 |
+
borderRadius: e.category === 'diagnosis' ? 4 : '50%',
|
| 520 |
+
background: active ? c.accent : c.paper,
|
| 521 |
+
border: `1px solid ${active ? c.accent : c.rule}`,
|
| 522 |
+
display: 'grid', placeItems: 'center',
|
| 523 |
+
color: active ? c.paper : c.muted,
|
| 524 |
+
transition: 'all .15s',
|
| 525 |
+
boxShadow: iconHover && !active ? `0 0 0 4px ${c.accentSoft}` : 'none',
|
| 526 |
+
}}>
|
| 527 |
+
<EventIcon category={e.category} size={active ? 16 : 14} />
|
| 528 |
+
</div>
|
| 529 |
+
{iconHover && (
|
| 530 |
+
<div role="tooltip" style={{
|
| 531 |
+
position: 'absolute', bottom: '100%', left: '50%',
|
| 532 |
+
transform: 'translateX(-50%)', marginBottom: 8,
|
| 533 |
+
background: c.ink, color: c.bg,
|
| 534 |
+
padding: '6px 10px', borderRadius: 2,
|
| 535 |
+
fontFamily: MONO, fontSize: 10, letterSpacing: '0.06em',
|
| 536 |
+
whiteSpace: 'nowrap', pointerEvents: 'none', zIndex: 50,
|
| 537 |
+
boxShadow: '0 4px 14px rgba(0,0,0,.18)',
|
| 538 |
+
display: 'flex', alignItems: 'baseline', gap: 6,
|
| 539 |
+
}}>
|
| 540 |
+
<span style={{ fontWeight: 600, textTransform: 'uppercase' }}>
|
| 541 |
+
{cat.label}
|
| 542 |
+
</span>
|
| 543 |
+
<span style={{ opacity: 0.65 }}>· {cat.hint}</span>
|
| 544 |
+
{/* Tooltip tail */}
|
| 545 |
+
<span style={{
|
| 546 |
+
position: 'absolute', top: '100%', left: '50%',
|
| 547 |
+
transform: 'translateX(-50%)',
|
| 548 |
+
width: 0, height: 0,
|
| 549 |
+
borderLeft: '5px solid transparent',
|
| 550 |
+
borderRight: '5px solid transparent',
|
| 551 |
+
borderTop: `5px solid ${c.ink}`,
|
| 552 |
+
}} />
|
| 553 |
+
</div>
|
| 554 |
+
)}
|
| 555 |
+
</div>
|
| 556 |
+
</div>
|
| 557 |
+
|
| 558 |
+
<button onClick={onClick} style={{
|
| 559 |
+
flex: 1, marginLeft: 18, textAlign: 'left',
|
| 560 |
+
background: active ? c.accentSoft : 'transparent',
|
| 561 |
+
padding: active ? '10px 14px' : '0',
|
| 562 |
+
marginTop: active ? -4 : 0, marginBottom: active ? -4 : 0,
|
| 563 |
+
border: 'none', cursor: 'pointer', color: c.ink, fontFamily: 'inherit',
|
| 564 |
+
borderRadius: 2,
|
| 565 |
+
}}>
|
| 566 |
+
<div style={{ display: 'flex', alignItems: 'baseline', gap: 8, flexWrap: 'wrap' }}>
|
| 567 |
+
<span style={{
|
| 568 |
+
fontFamily: MONO, fontSize: 9.5, color: c.faint,
|
| 569 |
+
textTransform: 'uppercase', letterSpacing: '0.1em',
|
| 570 |
+
}}>{e.category}</span>
|
| 571 |
+
{e.flag === 'critical' && (
|
| 572 |
+
<span style={{
|
| 573 |
+
fontFamily: MONO, fontSize: 9.5, color: c.accent,
|
| 574 |
+
textTransform: 'uppercase', letterSpacing: '0.1em', fontWeight: 600,
|
| 575 |
+
}}>· Critical</span>
|
| 576 |
+
)}
|
| 577 |
+
{(e.flag === 'high' || e.flag === 'low') && (
|
| 578 |
+
<span style={{
|
| 579 |
+
fontFamily: MONO, fontSize: 9.5, color: c.mark,
|
| 580 |
+
textTransform: 'uppercase', letterSpacing: '0.1em', fontWeight: 600,
|
| 581 |
+
}}>· {e.flag === 'high' ? 'High' : 'Low'}</span>
|
| 582 |
+
)}
|
| 583 |
+
</div>
|
| 584 |
+
<div style={{
|
| 585 |
+
fontFamily: SERIF, fontSize: 17, fontWeight: 500, color: c.ink,
|
| 586 |
+
letterSpacing: '-0.012em', lineHeight: 1.3, marginTop: 3, textWrap: 'balance',
|
| 587 |
+
}}>{e.title}</div>
|
| 588 |
+
{e.body && e.body !== e.title && (active || e.flag === 'critical') && (
|
| 589 |
+
<div style={{
|
| 590 |
+
fontFamily: SERIF, fontSize: 14, color: c.inkSoft, lineHeight: 1.5,
|
| 591 |
+
marginTop: 6, fontStyle: 'italic', maxWidth: 540, textWrap: 'pretty',
|
| 592 |
+
}}>{e.body}</div>
|
| 593 |
+
)}
|
| 594 |
+
{active && (
|
| 595 |
+
<div style={{
|
| 596 |
+
marginTop: 10, paddingTop: 8, borderTop: `1px solid ${c.rule}`,
|
| 597 |
+
fontFamily: MONO, fontSize: 10.5, color: c.muted,
|
| 598 |
+
display: 'flex', alignItems: 'center', gap: 12, flexWrap: 'wrap',
|
| 599 |
+
}}>
|
| 600 |
+
<span>Source: <span style={{ color: c.ink }}>{e.source}</span></span>
|
| 601 |
+
{e.page && <span>Page {e.page}</span>}
|
| 602 |
+
{e.snippet && (
|
| 603 |
+
<span style={{
|
| 604 |
+
fontStyle: 'italic', fontFamily: SERIF, fontSize: 12.5, color: c.inkSoft,
|
| 605 |
+
}}>"{e.snippet}"</span>
|
| 606 |
+
)}
|
| 607 |
+
</div>
|
| 608 |
+
)}
|
| 609 |
+
</button>
|
| 610 |
+
</div>
|
| 611 |
+
);
|
| 612 |
+
}
|
| 613 |
+
|
| 614 |
+
// ─────────────────────────────────────────────────────────────────────
|
| 615 |
+
function ChatColumn({ c, patient }) {
|
| 616 |
+
const [history, setHistory] = useState([]);
|
| 617 |
+
const [input, setInput] = useState('');
|
| 618 |
+
const [thinking, setThinking] = useState(false);
|
| 619 |
+
const scroller = useRef(null);
|
| 620 |
+
|
| 621 |
+
useEffect(() => {
|
| 622 |
+
setHistory([]);
|
| 623 |
+
setInput('');
|
| 624 |
+
}, [patient.id]);
|
| 625 |
+
|
| 626 |
+
useEffect(() => {
|
| 627 |
+
if (scroller.current) scroller.current.scrollTop = scroller.current.scrollHeight;
|
| 628 |
+
}, [history, thinking]);
|
| 629 |
+
|
| 630 |
+
const send = async (text) => {
|
| 631 |
+
const q = (text || input).trim();
|
| 632 |
+
if (!q) return;
|
| 633 |
+
setInput('');
|
| 634 |
+
setHistory((h) => [...h, { role: 'user', text: q }]);
|
| 635 |
+
setThinking(true);
|
| 636 |
+
try {
|
| 637 |
+
const r = await fetch('/api/answer', {
|
| 638 |
+
method: 'POST',
|
| 639 |
+
headers: { 'Content-Type': 'application/json' },
|
| 640 |
+
body: JSON.stringify({ patient_id: patient.id, question: q }),
|
| 641 |
+
});
|
| 642 |
+
const data = await r.json();
|
| 643 |
+
if (data.error) {
|
| 644 |
+
setHistory((h) => [...h, { role: 'assistant', text: `Error: ${data.error}`, citations: [] }]);
|
| 645 |
+
} else {
|
| 646 |
+
setHistory((h) => [...h, {
|
| 647 |
+
role: 'assistant',
|
| 648 |
+
text: data.text,
|
| 649 |
+
citations: data.citations || [],
|
| 650 |
+
}]);
|
| 651 |
+
}
|
| 652 |
+
} catch (err) {
|
| 653 |
+
setHistory((h) => [...h, {
|
| 654 |
+
role: 'assistant',
|
| 655 |
+
text: `Network error: ${String(err)}`,
|
| 656 |
+
citations: [],
|
| 657 |
+
}]);
|
| 658 |
+
} finally {
|
| 659 |
+
setThinking(false);
|
| 660 |
+
}
|
| 661 |
+
};
|
| 662 |
+
|
| 663 |
+
const examples = SUGGESTED[patient.id] || SUGGESTED.demo || [];
|
| 664 |
+
|
| 665 |
+
return (
|
| 666 |
+
<div style={{
|
| 667 |
+
flex: 1, minWidth: 0, display: 'flex', flexDirection: 'column', background: c.bg,
|
| 668 |
+
}}>
|
| 669 |
+
<div style={{ padding: '20px 24px 14px', borderBottom: `1px solid ${c.rule}` }}>
|
| 670 |
+
<div style={{
|
| 671 |
+
fontFamily: MONO, fontSize: 10, color: c.faint, letterSpacing: '0.12em',
|
| 672 |
+
textTransform: 'uppercase', marginBottom: 6,
|
| 673 |
+
}}>
|
| 674 |
+
The Reading Room
|
| 675 |
+
</div>
|
| 676 |
+
<div style={{
|
| 677 |
+
fontFamily: SERIF, fontSize: 22, fontWeight: 500,
|
| 678 |
+
letterSpacing: '-0.02em', color: c.ink, lineHeight: 1.15,
|
| 679 |
+
}}>
|
| 680 |
+
Ask a question.<br />
|
| 681 |
+
<span style={{ fontStyle: 'italic', color: c.muted }}>Get a cited answer.</span>
|
| 682 |
+
</div>
|
| 683 |
+
</div>
|
| 684 |
+
|
| 685 |
+
<div ref={scroller} style={{ flex: 1, overflowY: 'auto', padding: '18px 24px' }}>
|
| 686 |
+
{history.length === 0 && (
|
| 687 |
+
<div>
|
| 688 |
+
<div style={{
|
| 689 |
+
fontFamily: MONO, fontSize: 10, color: c.faint, letterSpacing: '0.1em',
|
| 690 |
+
textTransform: 'uppercase', marginBottom: 12,
|
| 691 |
+
}}>Suggested</div>
|
| 692 |
+
{examples.map((ex, i) => (
|
| 693 |
+
<button key={ex} onClick={() => send(ex)} style={{
|
| 694 |
+
display: 'flex', gap: 12, alignItems: 'flex-start',
|
| 695 |
+
width: '100%', textAlign: 'left',
|
| 696 |
+
padding: '14px 0',
|
| 697 |
+
borderTop: i === 0 ? `1px solid ${c.rule}` : 'none',
|
| 698 |
+
borderBottom: `1px solid ${c.rule}`,
|
| 699 |
+
background: 'transparent', border: 'none', cursor: 'pointer',
|
| 700 |
+
borderRadius: 0, color: c.ink, fontFamily: 'inherit',
|
| 701 |
+
}}>
|
| 702 |
+
<span style={{
|
| 703 |
+
fontFamily: MONO, fontSize: 10, color: c.faint, letterSpacing: '0.1em',
|
| 704 |
+
width: 22, paddingTop: 4, flexShrink: 0,
|
| 705 |
+
}}>0{i + 1}</span>
|
| 706 |
+
<span style={{
|
| 707 |
+
flex: 1, fontFamily: SERIF, fontSize: 16, lineHeight: 1.4,
|
| 708 |
+
color: c.ink, fontWeight: 500, letterSpacing: '-0.01em',
|
| 709 |
+
}}>{ex}</span>
|
| 710 |
+
<span style={{ color: c.accent, fontSize: 16, marginTop: 1 }}>→</span>
|
| 711 |
+
</button>
|
| 712 |
+
))}
|
| 713 |
+
</div>
|
| 714 |
+
)}
|
| 715 |
+
|
| 716 |
+
{history.map((m, i) => (
|
| 717 |
+
<div key={i} style={{ marginBottom: 22 }}>
|
| 718 |
+
{m.role === 'user' ? (
|
| 719 |
+
<div>
|
| 720 |
+
<div style={{
|
| 721 |
+
fontFamily: MONO, fontSize: 9.5, color: c.faint, letterSpacing: '0.1em',
|
| 722 |
+
textTransform: 'uppercase', marginBottom: 6,
|
| 723 |
+
}}>You asked</div>
|
| 724 |
+
<div style={{
|
| 725 |
+
fontFamily: SERIF, fontSize: 19, lineHeight: 1.35, color: c.ink,
|
| 726 |
+
fontWeight: 500, letterSpacing: '-0.015em',
|
| 727 |
+
}}>"{m.text}"</div>
|
| 728 |
+
</div>
|
| 729 |
+
) : (
|
| 730 |
+
<AssistantMessage c={c} m={m} patient={patient} />
|
| 731 |
+
)}
|
| 732 |
+
</div>
|
| 733 |
+
))}
|
| 734 |
+
|
| 735 |
+
{thinking && (
|
| 736 |
+
<div style={{
|
| 737 |
+
fontFamily: MONO, fontSize: 11, color: c.muted, letterSpacing: '0.04em',
|
| 738 |
+
padding: '8px 0',
|
| 739 |
+
}}>
|
| 740 |
+
<span style={{ animation: 'edit-blink 1s infinite' }}>▌</span>
|
| 741 |
+
{' '}reading {patient.events.length} events…
|
| 742 |
+
</div>
|
| 743 |
+
)}
|
| 744 |
+
</div>
|
| 745 |
+
|
| 746 |
+
<div style={{ padding: '14px 24px 18px', borderTop: `1px solid ${c.rule}` }}>
|
| 747 |
+
<div style={{
|
| 748 |
+
display: 'flex', alignItems: 'center', gap: 10,
|
| 749 |
+
border: `1px solid ${c.rule}`, borderRadius: 2,
|
| 750 |
+
background: c.paper, padding: '10px 14px',
|
| 751 |
+
}}>
|
| 752 |
+
<span style={{
|
| 753 |
+
fontFamily: SERIF, color: c.accent, fontSize: 18, fontStyle: 'italic',
|
| 754 |
+
}}>?</span>
|
| 755 |
+
<input value={input} onChange={(e) => setInput(e.target.value)}
|
| 756 |
+
onKeyDown={(e) => e.key === 'Enter' && send()}
|
| 757 |
+
placeholder="Ask anything about this chart…"
|
| 758 |
+
style={{
|
| 759 |
+
flex: 1, border: 'none', background: 'transparent',
|
| 760 |
+
color: c.ink, fontSize: 14, outline: 'none',
|
| 761 |
+
fontFamily: SERIF, padding: '2px 0',
|
| 762 |
+
}} />
|
| 763 |
+
<button onClick={() => send()} style={{
|
| 764 |
+
padding: '6px 14px', borderRadius: 2,
|
| 765 |
+
background: c.accent, border: 'none', color: c.paper,
|
| 766 |
+
cursor: 'pointer', fontSize: 12, fontWeight: 500,
|
| 767 |
+
fontFamily: SANS, letterSpacing: '0.02em',
|
| 768 |
+
}}>Ask →</button>
|
| 769 |
+
</div>
|
| 770 |
+
</div>
|
| 771 |
+
</div>
|
| 772 |
+
);
|
| 773 |
+
}
|
| 774 |
+
|
| 775 |
+
// Render markdown-ish bold + inline citation markers like [src:foo.pdf#p2].
|
| 776 |
+
function AssistantMessage({ c, m, patient }) {
|
| 777 |
+
// Replace [src:foo.pdf#p2] with superscript clickable cite numbers.
|
| 778 |
+
const citationsByKey = {};
|
| 779 |
+
let counter = 0;
|
| 780 |
+
const text = (m.text || '').replace(/\[src:([^\]#]+)(?:#p(\d+))?\]/g, (_match, src, page) => {
|
| 781 |
+
const key = `${src}|${page || ''}`;
|
| 782 |
+
if (!(key in citationsByKey)) {
|
| 783 |
+
counter += 1;
|
| 784 |
+
citationsByKey[key] = { n: counter, src, page: page ? parseInt(page, 10) : null };
|
| 785 |
+
}
|
| 786 |
+
return `‹CITE:${citationsByKey[key].n}›`;
|
| 787 |
+
});
|
| 788 |
+
|
| 789 |
+
// Now split on the placeholders + bold markdown.
|
| 790 |
+
const segments = text.split(/(‹CITE:\d+›|\*\*[^*]+\*\*)/g);
|
| 791 |
+
return (
|
| 792 |
+
<div>
|
| 793 |
+
<div style={{
|
| 794 |
+
fontFamily: MONO, fontSize: 9.5, color: c.faint, letterSpacing: '0.1em',
|
| 795 |
+
textTransform: 'uppercase', marginBottom: 8,
|
| 796 |
+
}}>The chart says</div>
|
| 797 |
+
<div style={{
|
| 798 |
+
fontFamily: SERIF, fontSize: 16.5, lineHeight: 1.55, color: c.ink,
|
| 799 |
+
letterSpacing: '-0.005em', textWrap: 'pretty',
|
| 800 |
+
}}>
|
| 801 |
+
{segments.map((seg, i) => {
|
| 802 |
+
if (seg.startsWith('‹CITE:')) {
|
| 803 |
+
const n = parseInt(seg.slice(6, -1), 10);
|
| 804 |
+
return (
|
| 805 |
+
<sup key={i} style={{
|
| 806 |
+
color: c.accent, fontFamily: SERIF, fontStyle: 'italic',
|
| 807 |
+
fontWeight: 700, fontSize: 11, padding: '0 2px',
|
| 808 |
+
}}>{n}</sup>
|
| 809 |
+
);
|
| 810 |
+
}
|
| 811 |
+
if (seg.startsWith('**') && seg.endsWith('**')) {
|
| 812 |
+
return <strong key={i} style={{ fontWeight: 600 }}>{seg.slice(2, -2)}</strong>;
|
| 813 |
+
}
|
| 814 |
+
return <React.Fragment key={i}>{seg}</React.Fragment>;
|
| 815 |
+
})}
|
| 816 |
+
</div>
|
| 817 |
+
|
| 818 |
+
{(m.citations && m.citations.length > 0) && (
|
| 819 |
+
<div style={{ marginTop: 16, paddingTop: 12, borderTop: `1px solid ${c.rule}` }}>
|
| 820 |
+
<div style={{
|
| 821 |
+
fontFamily: MONO, fontSize: 9.5, color: c.faint, letterSpacing: '0.1em',
|
| 822 |
+
textTransform: 'uppercase', marginBottom: 8,
|
| 823 |
+
}}>Drawn from</div>
|
| 824 |
+
{m.citations.map((cit, i) => (
|
| 825 |
+
<div key={i} style={{
|
| 826 |
+
display: 'flex', alignItems: 'baseline', gap: 12, padding: '6px 0',
|
| 827 |
+
}}>
|
| 828 |
+
<span style={{
|
| 829 |
+
fontFamily: SERIF, fontStyle: 'italic', color: c.accent,
|
| 830 |
+
fontSize: 14, width: 18, flexShrink: 0,
|
| 831 |
+
}}>{i + 1}.</span>
|
| 832 |
+
<span style={{ flex: 1, fontSize: 12.5, lineHeight: 1.45 }}>
|
| 833 |
+
{cit.snippet && (
|
| 834 |
+
<span style={{ fontFamily: SERIF, color: c.ink, fontWeight: 500 }}>
|
| 835 |
+
{cit.snippet}
|
| 836 |
+
</span>
|
| 837 |
+
)}
|
| 838 |
+
{cit.snippet && <span style={{ color: c.muted }}> · </span>}
|
| 839 |
+
<span style={{ fontFamily: MONO, fontSize: 10.5, color: c.muted }}>
|
| 840 |
+
{cit.source_id}{cit.page ? ` p.${cit.page}` : ''}
|
| 841 |
+
</span>
|
| 842 |
+
</span>
|
| 843 |
+
</div>
|
| 844 |
+
))}
|
| 845 |
+
</div>
|
| 846 |
+
)}
|
| 847 |
+
</div>
|
| 848 |
+
);
|
| 849 |
+
}
|
| 850 |
+
|
| 851 |
+
// Blinking cursor keyframes
|
| 852 |
+
if (!document.getElementById('edit-keyframes')) {
|
| 853 |
+
const s = document.createElement('style');
|
| 854 |
+
s.id = 'edit-keyframes';
|
| 855 |
+
s.textContent = `@keyframes edit-blink { 0%, 100% { opacity: 1; } 50% { opacity: 0; } }`;
|
| 856 |
+
document.head.appendChild(s);
|
| 857 |
+
}
|
| 858 |
+
|
| 859 |
+
ReactDOM.createRoot(document.getElementById('root')).render(<App />);
|
static/index.html
ADDED
|
@@ -0,0 +1,40 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
<!doctype html>
|
| 2 |
+
<html lang="en">
|
| 3 |
+
<head>
|
| 4 |
+
<meta charset="utf-8" />
|
| 5 |
+
<title>Recap — reads the whole chart</title>
|
| 6 |
+
<meta name="viewport" content="width=device-width, initial-scale=1" />
|
| 7 |
+
<meta name="description" content="Recap reads a patient's whole chart so you don't have to. Powered by MedGemma + Qwen on AMD MI300X." />
|
| 8 |
+
<link rel="preconnect" href="https://fonts.googleapis.com" />
|
| 9 |
+
<link rel="preconnect" href="https://fonts.gstatic.com" crossorigin />
|
| 10 |
+
<link rel="stylesheet" href="https://fonts.googleapis.com/css2?family=Source+Serif+4:ital,opsz,wght@0,8..60,400;0,8..60,500;0,8..60,600;1,8..60,400;1,8..60,500&family=Inter:wght@400;500;600&family=JetBrains+Mono:wght@400;500&display=swap" />
|
| 11 |
+
<style>
|
| 12 |
+
html, body, #root {
|
| 13 |
+
margin: 0; padding: 0; height: 100%; min-height: 100%;
|
| 14 |
+
background: #f4ede2;
|
| 15 |
+
font-family: "Inter", -apple-system, BlinkMacSystemFont, "Segoe UI", system-ui, sans-serif;
|
| 16 |
+
-webkit-font-smoothing: antialiased;
|
| 17 |
+
-moz-osx-font-smoothing: grayscale;
|
| 18 |
+
}
|
| 19 |
+
*::-webkit-scrollbar { width: 8px; height: 8px; }
|
| 20 |
+
*::-webkit-scrollbar-thumb { background: rgba(127,127,127,.28); border-radius: 4px; }
|
| 21 |
+
*::-webkit-scrollbar-thumb:hover { background: rgba(127,127,127,.45); }
|
| 22 |
+
*::-webkit-scrollbar-track { background: transparent; }
|
| 23 |
+
</style>
|
| 24 |
+
|
| 25 |
+
<script src="https://unpkg.com/react@18.3.1/umd/react.production.min.js" crossorigin="anonymous"></script>
|
| 26 |
+
<script src="https://unpkg.com/react-dom@18.3.1/umd/react-dom.production.min.js" crossorigin="anonymous"></script>
|
| 27 |
+
<script src="https://unpkg.com/@babel/standalone@7.29.0/babel.min.js" crossorigin="anonymous"></script>
|
| 28 |
+
</head>
|
| 29 |
+
<body>
|
| 30 |
+
<div id="root">
|
| 31 |
+
<div style="position:absolute;inset:0;display:grid;place-items:center;font-family:'Source Serif 4',Georgia,serif;color:#3a2e25;">
|
| 32 |
+
<div style="text-align:center;">
|
| 33 |
+
<div style="font-size:42px;letter-spacing:-.03em;">Recap<span style="color:#b8412e;">.</span></div>
|
| 34 |
+
<div style="font-style:italic;color:#6b5c4a;margin-top:8px;">loading the chart…</div>
|
| 35 |
+
</div>
|
| 36 |
+
</div>
|
| 37 |
+
</div>
|
| 38 |
+
<script type="text/babel" data-presets="env,react" src="/static/app.jsx"></script>
|
| 39 |
+
</body>
|
| 40 |
+
</html>
|
tests/__init__.py
ADDED
|
File without changes
|
tests/fixtures/_make_tiny_pdf.py
ADDED
|
@@ -0,0 +1,19 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""One-shot fixture generator. Produces tests/fixtures/tiny_lab.pdf.
|
| 2 |
+
|
| 3 |
+
Run: python tests/fixtures/_make_tiny_pdf.py
|
| 4 |
+
|
| 5 |
+
We commit the resulting PDF so tests can run without reportlab in CI.
|
| 6 |
+
"""
|
| 7 |
+
|
| 8 |
+
from reportlab.pdfgen import canvas
|
| 9 |
+
|
| 10 |
+
OUT = "tests/fixtures/tiny_lab.pdf"
|
| 11 |
+
|
| 12 |
+
c = canvas.Canvas(OUT)
|
| 13 |
+
c.drawString(72, 750, "LABORATORY REPORT")
|
| 14 |
+
c.drawString(72, 720, "Patient: Jane Doe Date: 2022-03-14")
|
| 15 |
+
c.drawString(72, 690, "Creatinine: 1.4 mg/dL (Reference: 0.6-1.2)")
|
| 16 |
+
c.drawString(72, 660, "eGFR: 52 mL/min/1.73m^2")
|
| 17 |
+
c.showPage()
|
| 18 |
+
c.save()
|
| 19 |
+
print(f"Wrote {OUT}")
|
tests/fixtures/tiny_fhir.json
ADDED
|
@@ -0,0 +1,79 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"resourceType": "Bundle",
|
| 3 |
+
"type": "transaction",
|
| 4 |
+
"entry": [
|
| 5 |
+
{
|
| 6 |
+
"resource": {
|
| 7 |
+
"resourceType": "Patient",
|
| 8 |
+
"id": "p1",
|
| 9 |
+
"name": [{"family": "Doe123", "given": ["Jane45"]}],
|
| 10 |
+
"birthDate": "1957-04-12",
|
| 11 |
+
"gender": "female"
|
| 12 |
+
}
|
| 13 |
+
},
|
| 14 |
+
{
|
| 15 |
+
"resource": {
|
| 16 |
+
"resourceType": "Observation",
|
| 17 |
+
"id": "o1",
|
| 18 |
+
"status": "final",
|
| 19 |
+
"code": {"text": "Creatinine"},
|
| 20 |
+
"effectiveDateTime": "2022-03-14T10:00:00Z",
|
| 21 |
+
"valueQuantity": {"value": 1.4, "unit": "mg/dL"},
|
| 22 |
+
"subject": {"reference": "Patient/p1"}
|
| 23 |
+
}
|
| 24 |
+
},
|
| 25 |
+
{
|
| 26 |
+
"resource": {
|
| 27 |
+
"resourceType": "Encounter",
|
| 28 |
+
"id": "e1",
|
| 29 |
+
"status": "finished",
|
| 30 |
+
"class": {"code": "AMB", "display": "Ambulatory"},
|
| 31 |
+
"period": {"start": "2022-03-14T09:30:00Z", "end": "2022-03-14T10:15:00Z"},
|
| 32 |
+
"reasonCode": [{"text": "Nephrology consult"}],
|
| 33 |
+
"subject": {"reference": "Patient/p1"}
|
| 34 |
+
}
|
| 35 |
+
},
|
| 36 |
+
{
|
| 37 |
+
"resource": {
|
| 38 |
+
"resourceType": "MedicationRequest",
|
| 39 |
+
"id": "m1",
|
| 40 |
+
"status": "active",
|
| 41 |
+
"intent": "order",
|
| 42 |
+
"medicationCodeableConcept": {"text": "Lisinopril 10 mg"},
|
| 43 |
+
"authoredOn": "2022-03-14",
|
| 44 |
+
"subject": {"reference": "Patient/p1"}
|
| 45 |
+
}
|
| 46 |
+
},
|
| 47 |
+
{
|
| 48 |
+
"resource": {
|
| 49 |
+
"resourceType": "Condition",
|
| 50 |
+
"id": "c1",
|
| 51 |
+
"clinicalStatus": {"coding": [{"code": "active"}]},
|
| 52 |
+
"code": {"text": "Chronic kidney disease, stage 3"},
|
| 53 |
+
"onsetDateTime": "2022-04-01T00:00:00Z",
|
| 54 |
+
"subject": {"reference": "Patient/p1"}
|
| 55 |
+
}
|
| 56 |
+
},
|
| 57 |
+
{
|
| 58 |
+
"resource": {
|
| 59 |
+
"resourceType": "Procedure",
|
| 60 |
+
"id": "pr1",
|
| 61 |
+
"status": "completed",
|
| 62 |
+
"code": {"text": "Renal ultrasound"},
|
| 63 |
+
"performedDateTime": "2022-04-15T11:00:00Z",
|
| 64 |
+
"subject": {"reference": "Patient/p1"}
|
| 65 |
+
}
|
| 66 |
+
},
|
| 67 |
+
{
|
| 68 |
+
"resource": {
|
| 69 |
+
"resourceType": "DiagnosticReport",
|
| 70 |
+
"id": "dr1",
|
| 71 |
+
"status": "final",
|
| 72 |
+
"code": {"text": "Comprehensive metabolic panel"},
|
| 73 |
+
"effectiveDateTime": "2022-03-14T10:30:00Z",
|
| 74 |
+
"conclusion": "Mildly elevated creatinine consistent with stage 3 CKD.",
|
| 75 |
+
"subject": {"reference": "Patient/p1"}
|
| 76 |
+
}
|
| 77 |
+
}
|
| 78 |
+
]
|
| 79 |
+
}
|
tests/fixtures/tiny_lab.pdf
ADDED
|
@@ -0,0 +1,68 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
%PDF-1.3
|
| 2 |
+
%���� ReportLab Generated PDF document (opensource)
|
| 3 |
+
1 0 obj
|
| 4 |
+
<<
|
| 5 |
+
/F1 2 0 R
|
| 6 |
+
>>
|
| 7 |
+
endobj
|
| 8 |
+
2 0 obj
|
| 9 |
+
<<
|
| 10 |
+
/BaseFont /Helvetica /Encoding /WinAnsiEncoding /Name /F1 /Subtype /Type1 /Type /Font
|
| 11 |
+
>>
|
| 12 |
+
endobj
|
| 13 |
+
3 0 obj
|
| 14 |
+
<<
|
| 15 |
+
/Contents 7 0 R /MediaBox [ 0 0 595.2756 841.8898 ] /Parent 6 0 R /Resources <<
|
| 16 |
+
/Font 1 0 R /ProcSet [ /PDF /Text /ImageB /ImageC /ImageI ]
|
| 17 |
+
>> /Rotate 0 /Trans <<
|
| 18 |
+
|
| 19 |
+
>>
|
| 20 |
+
/Type /Page
|
| 21 |
+
>>
|
| 22 |
+
endobj
|
| 23 |
+
4 0 obj
|
| 24 |
+
<<
|
| 25 |
+
/PageMode /UseNone /Pages 6 0 R /Type /Catalog
|
| 26 |
+
>>
|
| 27 |
+
endobj
|
| 28 |
+
5 0 obj
|
| 29 |
+
<<
|
| 30 |
+
/Author (anonymous) /CreationDate (D:20260504232000+05'00') /Creator (anonymous) /Keywords () /ModDate (D:20260504232000+05'00') /Producer (ReportLab PDF Library - \(opensource\))
|
| 31 |
+
/Subject (unspecified) /Title (untitled) /Trapped /False
|
| 32 |
+
>>
|
| 33 |
+
endobj
|
| 34 |
+
6 0 obj
|
| 35 |
+
<<
|
| 36 |
+
/Count 1 /Kids [ 3 0 R ] /Type /Pages
|
| 37 |
+
>>
|
| 38 |
+
endobj
|
| 39 |
+
7 0 obj
|
| 40 |
+
<<
|
| 41 |
+
/Filter [ /ASCII85Decode /FlateDecode ] /Length 232
|
| 42 |
+
>>
|
| 43 |
+
stream
|
| 44 |
+
Garo:4U]+l&4Ckp`KVht\Qr]sl/F']+ED9CL_+YQ&afeDs2W#Z===8U%/V)50Hp)&(C)Jpad,1#BgHI'67Qe^'RKVgk)=\*+:dG3>h6?Jg)aZ]LYBlREed><&3LMZVXN%/"nmpWX<.dWh=Om$%<H&l&Z't'fj^&ESf0H"o)YU?dG9t$"e<S5>CENdK\jXM6nt;\s)$Fse(rRjQnr!4\BZH9k;.>E0+O`n9f>Fn~>endstream
|
| 45 |
+
endobj
|
| 46 |
+
xref
|
| 47 |
+
0 8
|
| 48 |
+
0000000000 65535 f
|
| 49 |
+
0000000061 00000 n
|
| 50 |
+
0000000092 00000 n
|
| 51 |
+
0000000199 00000 n
|
| 52 |
+
0000000402 00000 n
|
| 53 |
+
0000000470 00000 n
|
| 54 |
+
0000000731 00000 n
|
| 55 |
+
0000000790 00000 n
|
| 56 |
+
trailer
|
| 57 |
+
<<
|
| 58 |
+
/ID
|
| 59 |
+
[<de94e39f5d88808ade2bdab9cb3e3993><de94e39f5d88808ade2bdab9cb3e3993>]
|
| 60 |
+
% ReportLab generated PDF document -- digest (opensource)
|
| 61 |
+
|
| 62 |
+
/Info 5 0 R
|
| 63 |
+
/Root 4 0 R
|
| 64 |
+
/Size 8
|
| 65 |
+
>>
|
| 66 |
+
startxref
|
| 67 |
+
1112
|
| 68 |
+
%%EOF
|
tests/test_cases.py
ADDED
|
@@ -0,0 +1,129 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import json
|
| 2 |
+
import shutil
|
| 3 |
+
|
| 4 |
+
from recap.cases import load_case
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
def test_load_case_with_only_fhir(tmp_path):
|
| 8 |
+
case = tmp_path / "tiny"
|
| 9 |
+
case.mkdir()
|
| 10 |
+
(case / "manifest.json").write_text(json.dumps({
|
| 11 |
+
"id": "tiny",
|
| 12 |
+
"display_name": "Tiny Test",
|
| 13 |
+
"fhir_bundle": "fhir.json",
|
| 14 |
+
"docs": [],
|
| 15 |
+
"images": [],
|
| 16 |
+
"demo_questions": [],
|
| 17 |
+
}))
|
| 18 |
+
shutil.copy("tests/fixtures/tiny_fhir.json", case / "fhir.json")
|
| 19 |
+
|
| 20 |
+
p = load_case(str(tmp_path), "tiny")
|
| 21 |
+
assert p.id == "tiny"
|
| 22 |
+
assert p.display_name == "Tiny Test" # manifest override wins
|
| 23 |
+
assert len(p.events) > 0
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
def test_load_case_pulls_display_name_from_fhir_when_manifest_omits_it(tmp_path):
|
| 27 |
+
"""Minimal manifest — name, age, gender all come from the FHIR Patient resource."""
|
| 28 |
+
case = tmp_path / "auto"
|
| 29 |
+
case.mkdir()
|
| 30 |
+
(case / "manifest.json").write_text(json.dumps({
|
| 31 |
+
"id": "auto",
|
| 32 |
+
"fhir_bundle": "fhir.json",
|
| 33 |
+
"demo_questions": [],
|
| 34 |
+
}))
|
| 35 |
+
shutil.copy("tests/fixtures/tiny_fhir.json", case / "fhir.json")
|
| 36 |
+
|
| 37 |
+
p = load_case(str(tmp_path), "auto")
|
| 38 |
+
assert p.display_name.startswith("Jane Doe") # from FHIR Patient.name
|
| 39 |
+
assert p.age is not None and p.age >= 60
|
| 40 |
+
assert p.gender == "female"
|
| 41 |
+
|
| 42 |
+
|
| 43 |
+
def test_load_case_with_pdf_docs(tmp_path):
|
| 44 |
+
case = tmp_path / "tiny"
|
| 45 |
+
case.mkdir()
|
| 46 |
+
(case / "docs").mkdir()
|
| 47 |
+
shutil.copy("tests/fixtures/tiny_lab.pdf", case / "docs" / "lab.pdf")
|
| 48 |
+
|
| 49 |
+
(case / "manifest.json").write_text(json.dumps({
|
| 50 |
+
"id": "tiny",
|
| 51 |
+
"display_name": "Tiny",
|
| 52 |
+
"fhir_bundle": None,
|
| 53 |
+
"docs": [{
|
| 54 |
+
"file": "docs/lab.pdf",
|
| 55 |
+
"date": "2022-03-14",
|
| 56 |
+
"category": "lab",
|
| 57 |
+
"title": "Renal panel",
|
| 58 |
+
}],
|
| 59 |
+
"images": [],
|
| 60 |
+
"demo_questions": [],
|
| 61 |
+
}))
|
| 62 |
+
|
| 63 |
+
p = load_case(str(tmp_path), "tiny")
|
| 64 |
+
pdf_events = [e for e in p.events if e.source == "lab.pdf"]
|
| 65 |
+
assert len(pdf_events) == 1
|
| 66 |
+
assert pdf_events[0].category == "lab"
|
| 67 |
+
assert "Creatinine" in pdf_events[0].body
|
| 68 |
+
|
| 69 |
+
|
| 70 |
+
def test_load_case_with_images(tmp_path):
|
| 71 |
+
case = tmp_path / "tiny"
|
| 72 |
+
case.mkdir()
|
| 73 |
+
(case / "images").mkdir()
|
| 74 |
+
from PIL import Image
|
| 75 |
+
Image.new("RGB", (10, 10), "white").save(case / "images" / "fundus.png")
|
| 76 |
+
|
| 77 |
+
(case / "manifest.json").write_text(json.dumps({
|
| 78 |
+
"id": "tiny",
|
| 79 |
+
"display_name": "Tiny",
|
| 80 |
+
"fhir_bundle": None,
|
| 81 |
+
"docs": [],
|
| 82 |
+
"images": [{
|
| 83 |
+
"file": "images/fundus.png",
|
| 84 |
+
"date": "2023-04-01",
|
| 85 |
+
"category": "scan",
|
| 86 |
+
"title": "Fundus photo",
|
| 87 |
+
}],
|
| 88 |
+
"demo_questions": [],
|
| 89 |
+
}))
|
| 90 |
+
|
| 91 |
+
p = load_case(str(tmp_path), "tiny")
|
| 92 |
+
assert len(p.events) == 1
|
| 93 |
+
assert p.events[0].category == "scan"
|
| 94 |
+
assert p.events[0].source == "fundus.png"
|
| 95 |
+
|
| 96 |
+
|
| 97 |
+
def test_load_case_events_chronologically_orderable(tmp_path):
|
| 98 |
+
"""Multi-source case (FHIR + PDF + image) yields a sortable timeline."""
|
| 99 |
+
case = tmp_path / "tiny"
|
| 100 |
+
case.mkdir()
|
| 101 |
+
(case / "docs").mkdir()
|
| 102 |
+
(case / "images").mkdir()
|
| 103 |
+
shutil.copy("tests/fixtures/tiny_fhir.json", case / "fhir.json")
|
| 104 |
+
shutil.copy("tests/fixtures/tiny_lab.pdf", case / "docs" / "lab.pdf")
|
| 105 |
+
from PIL import Image
|
| 106 |
+
Image.new("RGB", (10, 10), "white").save(case / "images" / "fundus.png")
|
| 107 |
+
|
| 108 |
+
(case / "manifest.json").write_text(json.dumps({
|
| 109 |
+
"id": "tiny",
|
| 110 |
+
"display_name": "Tiny",
|
| 111 |
+
"fhir_bundle": "fhir.json",
|
| 112 |
+
"docs": [{
|
| 113 |
+
"file": "docs/lab.pdf",
|
| 114 |
+
"date": "2022-03-14",
|
| 115 |
+
"category": "lab",
|
| 116 |
+
"title": "Lab",
|
| 117 |
+
}],
|
| 118 |
+
"images": [{
|
| 119 |
+
"file": "images/fundus.png",
|
| 120 |
+
"date": "2023-04-01",
|
| 121 |
+
"category": "scan",
|
| 122 |
+
"title": "Fundus",
|
| 123 |
+
}],
|
| 124 |
+
"demo_questions": [],
|
| 125 |
+
}))
|
| 126 |
+
|
| 127 |
+
p = load_case(str(tmp_path), "tiny")
|
| 128 |
+
sorted_dates = sorted(e.date for e in p.events)
|
| 129 |
+
assert sorted_dates == [e.date for e in sorted(p.events, key=lambda e: e.date)]
|
tests/test_inference_gateway.py
ADDED
|
@@ -0,0 +1,63 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Tests for the inference gateway. These exercise citation parsing and
|
| 2 |
+
backend routing without loading any model — the mock backend is enough.
|
| 3 |
+
"""
|
| 4 |
+
|
| 5 |
+
import os
|
| 6 |
+
from datetime import datetime
|
| 7 |
+
|
| 8 |
+
import pytest
|
| 9 |
+
|
| 10 |
+
import recap.inference.gateway as gw
|
| 11 |
+
from recap.models import Event
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
def _ev(src, date_iso="2022-03-14"):
|
| 15 |
+
return Event(
|
| 16 |
+
id=src,
|
| 17 |
+
date=datetime.fromisoformat(date_iso),
|
| 18 |
+
category="lab",
|
| 19 |
+
title=f"Record from {src}",
|
| 20 |
+
source=src,
|
| 21 |
+
)
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
def test_parses_citations_from_model_output():
|
| 25 |
+
text = (
|
| 26 |
+
"Creatinine first crossed normal in March 2022 [src:lab_2022.pdf#p1]. "
|
| 27 |
+
"eGFR was 52 [src:lab_2022.pdf]."
|
| 28 |
+
)
|
| 29 |
+
cites = gw._parse_citations(text, [_ev("lab_2022.pdf")])
|
| 30 |
+
assert len(cites) == 2
|
| 31 |
+
assert cites[0].page == 1
|
| 32 |
+
assert cites[1].page is None
|
| 33 |
+
|
| 34 |
+
|
| 35 |
+
def test_dedupes_repeated_citations():
|
| 36 |
+
text = "[src:a.pdf] said X [src:a.pdf]"
|
| 37 |
+
cites = gw._parse_citations(text, [_ev("a.pdf")])
|
| 38 |
+
assert len(cites) == 1
|
| 39 |
+
|
| 40 |
+
|
| 41 |
+
def test_drops_citations_to_unknown_sources():
|
| 42 |
+
text = "[src:hallucinated.pdf]"
|
| 43 |
+
cites = gw._parse_citations(text, [])
|
| 44 |
+
assert cites == []
|
| 45 |
+
|
| 46 |
+
|
| 47 |
+
def test_dedupe_treats_different_pages_as_different_citations():
|
| 48 |
+
text = "[src:a.pdf#p1] earlier [src:a.pdf#p2] later"
|
| 49 |
+
cites = gw._parse_citations(text, [_ev("a.pdf")])
|
| 50 |
+
assert len(cites) == 2
|
| 51 |
+
assert {c.page for c in cites} == {1, 2}
|
| 52 |
+
|
| 53 |
+
|
| 54 |
+
def test_answer_end_to_end_with_mock_backend(monkeypatch):
|
| 55 |
+
"""Full pipeline: question -> retrieve -> mock -> cited answer."""
|
| 56 |
+
monkeypatch.setenv("RECAP_BACKEND", "mock")
|
| 57 |
+
events = [
|
| 58 |
+
_ev("lab_2022.pdf", "2022-03-14"),
|
| 59 |
+
_ev("visit_2023.pdf", "2023-01-01"),
|
| 60 |
+
]
|
| 61 |
+
a = gw.answer("when did the lab change", events)
|
| 62 |
+
assert "[mock answer]" in a.text
|
| 63 |
+
assert any(c.source_id in {"lab_2022.pdf", "visit_2023.pdf"} for c in a.citations)
|
tests/test_ingestion_fhir.py
ADDED
|
@@ -0,0 +1,75 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from datetime import datetime, timezone
|
| 2 |
+
|
| 3 |
+
from recap.ingestion.fhir import load_bundle, load_demographics
|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
FIXTURE = "tests/fixtures/tiny_fhir.json"
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
def test_loads_observation_as_lab_event():
|
| 10 |
+
events = load_bundle(FIXTURE, source_id="tiny_fhir.json")
|
| 11 |
+
labs = [e for e in events if e.category == "lab"]
|
| 12 |
+
assert len(labs) == 1
|
| 13 |
+
assert "Creatinine" in labs[0].title
|
| 14 |
+
assert "1.4" in labs[0].title or "1.4" in labs[0].body
|
| 15 |
+
assert labs[0].date == datetime.fromisoformat("2022-03-14T10:00:00+00:00")
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
def test_loads_encounter_as_visit_event():
|
| 19 |
+
events = load_bundle(FIXTURE, source_id="tiny_fhir.json")
|
| 20 |
+
visits = [e for e in events if e.category == "visit"]
|
| 21 |
+
assert len(visits) == 1
|
| 22 |
+
assert "Nephrology" in visits[0].title
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
def test_loads_medication_as_med_event():
|
| 26 |
+
events = load_bundle(FIXTURE, source_id="tiny_fhir.json")
|
| 27 |
+
meds = [e for e in events if e.category == "med"]
|
| 28 |
+
assert len(meds) == 1
|
| 29 |
+
assert "Lisinopril" in meds[0].title
|
| 30 |
+
|
| 31 |
+
|
| 32 |
+
def test_loads_condition_as_diagnosis_event():
|
| 33 |
+
events = load_bundle(FIXTURE, source_id="tiny_fhir.json")
|
| 34 |
+
dx = [e for e in events if e.category == "diagnosis"]
|
| 35 |
+
assert len(dx) == 1
|
| 36 |
+
assert "Chronic kidney disease" in dx[0].title
|
| 37 |
+
assert dx[0].metadata["clinical_status"] == "active"
|
| 38 |
+
|
| 39 |
+
|
| 40 |
+
def test_loads_procedure_as_procedure_event():
|
| 41 |
+
events = load_bundle(FIXTURE, source_id="tiny_fhir.json")
|
| 42 |
+
procs = [e for e in events if e.category == "procedure"]
|
| 43 |
+
assert len(procs) == 1
|
| 44 |
+
assert "Renal ultrasound" in procs[0].title
|
| 45 |
+
|
| 46 |
+
|
| 47 |
+
def test_loads_diagnostic_report_as_report_event():
|
| 48 |
+
events = load_bundle(FIXTURE, source_id="tiny_fhir.json")
|
| 49 |
+
reports = [e for e in events if e.category == "report"]
|
| 50 |
+
assert len(reports) == 1
|
| 51 |
+
assert "metabolic panel" in reports[0].title.lower()
|
| 52 |
+
assert "stage 3 CKD" in reports[0].body
|
| 53 |
+
|
| 54 |
+
|
| 55 |
+
def test_events_are_chronologically_orderable():
|
| 56 |
+
events = load_bundle(FIXTURE, source_id="tiny_fhir.json")
|
| 57 |
+
sorted_events = sorted(events, key=lambda e: e.date)
|
| 58 |
+
assert [e.id for e in sorted_events] == [e.id for e in sorted(events, key=lambda e: e.date)]
|
| 59 |
+
|
| 60 |
+
|
| 61 |
+
def test_load_demographics_extracts_name_age_gender():
|
| 62 |
+
demo = load_demographics(FIXTURE)
|
| 63 |
+
assert demo is not None
|
| 64 |
+
# Trailing digits ("Jane45 Doe123") stripped for display
|
| 65 |
+
assert demo.display_name.startswith("Jane Doe")
|
| 66 |
+
assert demo.gender == "female"
|
| 67 |
+
# Born 1957 → age depends on current date but should be > 60
|
| 68 |
+
assert demo.age is not None and demo.age >= 60
|
| 69 |
+
|
| 70 |
+
|
| 71 |
+
def test_load_demographics_returns_none_if_no_patient_resource(tmp_path):
|
| 72 |
+
import json
|
| 73 |
+
p = tmp_path / "no_patient.json"
|
| 74 |
+
p.write_text(json.dumps({"resourceType": "Bundle", "entry": []}))
|
| 75 |
+
assert load_demographics(str(p)) is None
|
tests/test_ingestion_image.py
ADDED
|
@@ -0,0 +1,35 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from datetime import datetime
|
| 2 |
+
|
| 3 |
+
from PIL import Image
|
| 4 |
+
|
| 5 |
+
from recap.ingestion.image import load_image_event
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
def test_loads_image_with_provided_date_and_category(tmp_path):
|
| 9 |
+
p = tmp_path / "fundus.png"
|
| 10 |
+
Image.new("RGB", (100, 100), "white").save(p)
|
| 11 |
+
e = load_image_event(
|
| 12 |
+
str(p),
|
| 13 |
+
category="scan",
|
| 14 |
+
title="Right fundus",
|
| 15 |
+
date_iso="2023-04-01",
|
| 16 |
+
source_id="fundus_2023.png",
|
| 17 |
+
)
|
| 18 |
+
assert e.category == "scan"
|
| 19 |
+
assert e.date == datetime.fromisoformat("2023-04-01T00:00:00+00:00")
|
| 20 |
+
assert e.source == "fundus_2023.png"
|
| 21 |
+
assert "fundus" in e.title.lower()
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
def test_default_source_id_is_filename(tmp_path):
|
| 25 |
+
p = tmp_path / "ct_chest.png"
|
| 26 |
+
Image.new("RGB", (10, 10), "black").save(p)
|
| 27 |
+
e = load_image_event(str(p), category="scan", title="CT chest", date_iso="2024-01-15")
|
| 28 |
+
assert e.source == "ct_chest.png"
|
| 29 |
+
|
| 30 |
+
|
| 31 |
+
def test_image_path_preserved_in_metadata(tmp_path):
|
| 32 |
+
p = tmp_path / "wound.jpg"
|
| 33 |
+
Image.new("RGB", (10, 10), "red").save(p)
|
| 34 |
+
e = load_image_event(str(p), category="photo", title="Wound day 7", date_iso="2024-06-20")
|
| 35 |
+
assert e.metadata["path"] == str(p)
|
tests/test_ingestion_pdf.py
ADDED
|
@@ -0,0 +1,19 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from recap.ingestion.pdf import load_pdf
|
| 2 |
+
|
| 3 |
+
|
| 4 |
+
def test_extracts_pages_with_text_and_metadata():
|
| 5 |
+
pages = load_pdf("tests/fixtures/tiny_lab.pdf")
|
| 6 |
+
assert len(pages) == 1
|
| 7 |
+
assert pages[0].page_number == 1
|
| 8 |
+
assert "Creatinine" in pages[0].text
|
| 9 |
+
assert "1.4 mg/dL" in pages[0].text
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
def test_pages_have_source_id():
|
| 13 |
+
pages = load_pdf("tests/fixtures/tiny_lab.pdf", source_id="lab_2022-03-14.pdf")
|
| 14 |
+
assert pages[0].source_id == "lab_2022-03-14.pdf"
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
def test_default_source_id_is_filename():
|
| 18 |
+
pages = load_pdf("tests/fixtures/tiny_lab.pdf")
|
| 19 |
+
assert pages[0].source_id == "tiny_lab.pdf"
|
tests/test_mi300x_client.py
ADDED
|
@@ -0,0 +1,85 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import httpx
|
| 2 |
+
import pytest
|
| 3 |
+
|
| 4 |
+
import recap.inference.mi300x_client as client
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
class _FakeResp:
|
| 8 |
+
def __init__(self, status_code=200, json_data=None):
|
| 9 |
+
self.status_code = status_code
|
| 10 |
+
self._json = json_data or {}
|
| 11 |
+
|
| 12 |
+
def raise_for_status(self):
|
| 13 |
+
if self.status_code >= 400:
|
| 14 |
+
req = httpx.Request("POST", "http://x")
|
| 15 |
+
raise httpx.HTTPStatusError("err", request=req, response=httpx.Response(self.status_code))
|
| 16 |
+
|
| 17 |
+
def json(self):
|
| 18 |
+
return self._json
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
def test_raises_when_url_unset(monkeypatch):
|
| 22 |
+
monkeypatch.delenv("RECAP_MI300X_URL", raising=False)
|
| 23 |
+
with pytest.raises(RuntimeError, match="RECAP_MI300X_URL"):
|
| 24 |
+
client._post("medgemma", "sys", "user")
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
def test_posts_to_correct_url(monkeypatch):
|
| 28 |
+
monkeypatch.setenv("RECAP_MI300X_URL", "https://example.test")
|
| 29 |
+
seen = {}
|
| 30 |
+
|
| 31 |
+
def fake_post(url, json, timeout):
|
| 32 |
+
seen["url"] = url
|
| 33 |
+
seen["json"] = json
|
| 34 |
+
return _FakeResp(200, {"text": "hello"})
|
| 35 |
+
|
| 36 |
+
monkeypatch.setattr(client.httpx, "post", fake_post)
|
| 37 |
+
out = client._post("qwen", "sys-prompt", "user-prompt")
|
| 38 |
+
assert out == "hello"
|
| 39 |
+
assert seen["url"] == "https://example.test/qwen"
|
| 40 |
+
assert seen["json"] == {"system": "sys-prompt", "user": "user-prompt"}
|
| 41 |
+
|
| 42 |
+
|
| 43 |
+
def test_retries_on_transport_errors(monkeypatch):
|
| 44 |
+
monkeypatch.setenv("RECAP_MI300X_URL", "https://example.test")
|
| 45 |
+
monkeypatch.setattr(client.time, "sleep", lambda *_: None)
|
| 46 |
+
calls = {"n": 0}
|
| 47 |
+
|
| 48 |
+
def flaky_post(url, json, timeout):
|
| 49 |
+
calls["n"] += 1
|
| 50 |
+
if calls["n"] < 3:
|
| 51 |
+
raise httpx.ConnectError("boom")
|
| 52 |
+
return _FakeResp(200, {"text": "ok"})
|
| 53 |
+
|
| 54 |
+
monkeypatch.setattr(client.httpx, "post", flaky_post)
|
| 55 |
+
out = client._post("medgemma", "s", "u")
|
| 56 |
+
assert out == "ok"
|
| 57 |
+
assert calls["n"] == 3
|
| 58 |
+
|
| 59 |
+
|
| 60 |
+
def test_gives_up_after_three_attempts(monkeypatch):
|
| 61 |
+
monkeypatch.setenv("RECAP_MI300X_URL", "https://example.test")
|
| 62 |
+
monkeypatch.setattr(client.time, "sleep", lambda *_: None)
|
| 63 |
+
calls = {"n": 0}
|
| 64 |
+
|
| 65 |
+
def always_fail(url, json, timeout):
|
| 66 |
+
calls["n"] += 1
|
| 67 |
+
raise httpx.ConnectError("down")
|
| 68 |
+
|
| 69 |
+
monkeypatch.setattr(client.httpx, "post", always_fail)
|
| 70 |
+
with pytest.raises(RuntimeError, match="failed after 3 attempts"):
|
| 71 |
+
client._post("medgemma", "s", "u")
|
| 72 |
+
assert calls["n"] == 3
|
| 73 |
+
|
| 74 |
+
|
| 75 |
+
def test_strips_trailing_slash_from_url(monkeypatch):
|
| 76 |
+
monkeypatch.setenv("RECAP_MI300X_URL", "https://example.test/")
|
| 77 |
+
seen = {}
|
| 78 |
+
|
| 79 |
+
def fake_post(url, json, timeout):
|
| 80 |
+
seen["url"] = url
|
| 81 |
+
return _FakeResp(200, {"text": ""})
|
| 82 |
+
|
| 83 |
+
monkeypatch.setattr(client.httpx, "post", fake_post)
|
| 84 |
+
client._post("qwen", "s", "u")
|
| 85 |
+
assert seen["url"] == "https://example.test/qwen"
|
tests/test_models.py
ADDED
|
@@ -0,0 +1,34 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from datetime import datetime
|
| 2 |
+
|
| 3 |
+
from recap.models import Citation, Event, Patient
|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
def test_citation_roundtrip():
|
| 7 |
+
c = Citation(source_id="lab_2022-03-14.pdf", page=2, snippet="Cr 1.4 mg/dL")
|
| 8 |
+
d = c.model_dump()
|
| 9 |
+
assert d["source_id"] == "lab_2022-03-14.pdf"
|
| 10 |
+
assert d["page"] == 2
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
def test_event_orderable_by_date():
|
| 14 |
+
a = Event(
|
| 15 |
+
id="a",
|
| 16 |
+
date=datetime(2022, 3, 14),
|
| 17 |
+
category="lab",
|
| 18 |
+
title="Cr 1.4",
|
| 19 |
+
source="lab_2022-03-14.pdf",
|
| 20 |
+
)
|
| 21 |
+
b = Event(
|
| 22 |
+
id="b",
|
| 23 |
+
date=datetime(2023, 1, 1),
|
| 24 |
+
category="visit",
|
| 25 |
+
title="Nephrology",
|
| 26 |
+
source="visit_2023-01-01.pdf",
|
| 27 |
+
)
|
| 28 |
+
assert sorted([b, a], key=lambda e: e.date) == [a, b]
|
| 29 |
+
|
| 30 |
+
|
| 31 |
+
def test_patient_holds_events():
|
| 32 |
+
p = Patient(id="sarah", display_name="Sarah, 67", events=[])
|
| 33 |
+
assert p.id == "sarah"
|
| 34 |
+
assert len(p.events) == 0
|
tests/test_reasoner.py
ADDED
|
@@ -0,0 +1,63 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from recap.reasoner import EXTRACT_SYSTEM, SYNTHESIZE_SYSTEM, two_stage
|
| 2 |
+
|
| 3 |
+
|
| 4 |
+
def test_two_stage_pipes_extract_into_synthesize():
|
| 5 |
+
captured: dict = {}
|
| 6 |
+
|
| 7 |
+
def fake_extract(system, user):
|
| 8 |
+
captured["extract_system"] = system
|
| 9 |
+
captured["extract_user"] = user
|
| 10 |
+
return "Cr 1.4 mg/dL on 2022-03-14 [src:lab.pdf]"
|
| 11 |
+
|
| 12 |
+
def fake_synth(system, user):
|
| 13 |
+
captured["synth_system"] = system
|
| 14 |
+
captured["synth_user"] = user
|
| 15 |
+
return "Creatinine first crossed normal in March 2022 [src:lab.pdf]."
|
| 16 |
+
|
| 17 |
+
out = two_stage(
|
| 18 |
+
"When did kidney function decline?",
|
| 19 |
+
"Patient records block",
|
| 20 |
+
extract_fn=fake_extract,
|
| 21 |
+
synthesize_fn=fake_synth,
|
| 22 |
+
)
|
| 23 |
+
assert out == "Creatinine first crossed normal in March 2022 [src:lab.pdf]."
|
| 24 |
+
|
| 25 |
+
# Extract sees the records + question
|
| 26 |
+
assert "Patient records block" in captured["extract_user"]
|
| 27 |
+
assert "When did kidney function decline?" in captured["extract_user"]
|
| 28 |
+
assert captured["extract_system"] == EXTRACT_SYSTEM
|
| 29 |
+
|
| 30 |
+
# Synthesize sees the extracted evidence
|
| 31 |
+
assert "Cr 1.4 mg/dL on 2022-03-14 [src:lab.pdf]" in captured["synth_user"]
|
| 32 |
+
assert "When did kidney function decline?" in captured["synth_user"]
|
| 33 |
+
assert captured["synth_system"] == SYNTHESIZE_SYSTEM
|
| 34 |
+
|
| 35 |
+
|
| 36 |
+
def test_citations_survive_the_pipeline():
|
| 37 |
+
"""The whole point of two-stage is that MedGemma's [src:...] markers
|
| 38 |
+
flow through Qwen's synthesis intact, so the gateway can parse them."""
|
| 39 |
+
def fake_extract(s, u):
|
| 40 |
+
return "Cr 1.4 [src:lab_2022.pdf#p1] eGFR 52 [src:lab_2022.pdf#p1]"
|
| 41 |
+
|
| 42 |
+
def fake_synth(s, u):
|
| 43 |
+
return "She crossed the CKD threshold [src:lab_2022.pdf#p1]."
|
| 44 |
+
|
| 45 |
+
out = two_stage("when?", "block", extract_fn=fake_extract, synthesize_fn=fake_synth)
|
| 46 |
+
assert "[src:lab_2022.pdf#p1]" in out
|
| 47 |
+
|
| 48 |
+
|
| 49 |
+
def test_evidence_string_is_passed_verbatim_to_synth():
|
| 50 |
+
"""If MedGemma returns text with leading/trailing whitespace,
|
| 51 |
+
we strip it before feeding to Qwen so no double-empty-lines slip through."""
|
| 52 |
+
seen = []
|
| 53 |
+
|
| 54 |
+
def fake_extract(s, u):
|
| 55 |
+
return " evidence text \n\n"
|
| 56 |
+
|
| 57 |
+
def fake_synth(s, u):
|
| 58 |
+
seen.append(u)
|
| 59 |
+
return "answer"
|
| 60 |
+
|
| 61 |
+
two_stage("q", "b", extract_fn=fake_extract, synthesize_fn=fake_synth)
|
| 62 |
+
assert " evidence text " not in seen[0]
|
| 63 |
+
assert "evidence text" in seen[0]
|
tests/test_retrieval.py
ADDED
|
@@ -0,0 +1,41 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from datetime import datetime
|
| 2 |
+
|
| 3 |
+
from recap.models import Event
|
| 4 |
+
from recap.retrieval import retrieve
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
def _ev(eid, body, date_iso="2022-01-01"):
|
| 8 |
+
return Event(
|
| 9 |
+
id=eid,
|
| 10 |
+
date=datetime.fromisoformat(date_iso),
|
| 11 |
+
category="lab",
|
| 12 |
+
title=body,
|
| 13 |
+
source="x",
|
| 14 |
+
body=body,
|
| 15 |
+
)
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
def test_retrieves_relevant_events_for_question():
|
| 19 |
+
events = [
|
| 20 |
+
_ev("a", "Creatinine 1.4 mg/dL — first abnormal reading"),
|
| 21 |
+
_ev("b", "Influenza vaccination administered"),
|
| 22 |
+
_ev("c", "Hemoglobin A1c 8.2%"),
|
| 23 |
+
]
|
| 24 |
+
hits = retrieve("when did creatinine become abnormal", events, top_k=2)
|
| 25 |
+
assert hits[0].id == "a"
|
| 26 |
+
|
| 27 |
+
|
| 28 |
+
def test_retrieve_returns_at_most_top_k():
|
| 29 |
+
events = [_ev(str(i), f"event {i}") for i in range(20)]
|
| 30 |
+
hits = retrieve("event", events, top_k=5)
|
| 31 |
+
assert len(hits) == 5
|
| 32 |
+
|
| 33 |
+
|
| 34 |
+
def test_retrieve_handles_empty_event_list():
|
| 35 |
+
assert retrieve("anything", [], top_k=5) == []
|
| 36 |
+
|
| 37 |
+
|
| 38 |
+
def test_retrieve_falls_back_to_first_k_when_no_match():
|
| 39 |
+
events = [_ev("a", "alpha"), _ev("b", "beta"), _ev("c", "gamma")]
|
| 40 |
+
hits = retrieve("zzzzz", events, top_k=2)
|
| 41 |
+
assert len(hits) == 2 # falls back to first k rather than empty
|
tests/test_timeline.py
ADDED
|
@@ -0,0 +1,33 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from datetime import datetime
|
| 2 |
+
|
| 3 |
+
from recap.models import Event
|
| 4 |
+
from recap.timeline import build_timeline
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
def _ev(date_iso, cat="lab", title="t"):
|
| 8 |
+
return Event(
|
| 9 |
+
id=date_iso,
|
| 10 |
+
date=datetime.fromisoformat(date_iso),
|
| 11 |
+
category=cat,
|
| 12 |
+
title=title,
|
| 13 |
+
source="x",
|
| 14 |
+
)
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
def test_timeline_sorts_chronologically():
|
| 18 |
+
events = [_ev("2023-01-01"), _ev("2020-05-15"), _ev("2022-12-31")]
|
| 19 |
+
tl = build_timeline(events)
|
| 20 |
+
dates = [e.date for e in tl.events]
|
| 21 |
+
assert dates == sorted(dates)
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
def test_timeline_groups_by_year():
|
| 25 |
+
events = [_ev("2020-01-01"), _ev("2020-06-01"), _ev("2021-01-01")]
|
| 26 |
+
tl = build_timeline(events)
|
| 27 |
+
assert sorted(tl.years_covered) == [2020, 2021]
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
def test_empty_timeline_handles_zero_events():
|
| 31 |
+
tl = build_timeline([])
|
| 32 |
+
assert tl.events == []
|
| 33 |
+
assert tl.years_covered == []
|