medi422 commited on
Commit
9a75c73
Β·
verified Β·
1 Parent(s): 89aee75

Upload 21 files

Browse files
.env.example ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Local vLLM endpoint (no API key needed)
2
+ LLM_BASE_URL=http://localhost:8000/v1
3
+ LLM_MODEL=/model
4
+ APP_PORT=8090
.gitattributes CHANGED
@@ -1,35 +1,35 @@
1
- *.7z filter=lfs diff=lfs merge=lfs -text
2
- *.arrow filter=lfs diff=lfs merge=lfs -text
3
- *.bin filter=lfs diff=lfs merge=lfs -text
4
- *.bz2 filter=lfs diff=lfs merge=lfs -text
5
- *.ckpt filter=lfs diff=lfs merge=lfs -text
6
- *.ftz filter=lfs diff=lfs merge=lfs -text
7
- *.gz filter=lfs diff=lfs merge=lfs -text
8
- *.h5 filter=lfs diff=lfs merge=lfs -text
9
- *.joblib filter=lfs diff=lfs merge=lfs -text
10
- *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
- *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
- *.model filter=lfs diff=lfs merge=lfs -text
13
- *.msgpack filter=lfs diff=lfs merge=lfs -text
14
- *.npy filter=lfs diff=lfs merge=lfs -text
15
- *.npz filter=lfs diff=lfs merge=lfs -text
16
- *.onnx filter=lfs diff=lfs merge=lfs -text
17
- *.ot filter=lfs diff=lfs merge=lfs -text
18
- *.parquet filter=lfs diff=lfs merge=lfs -text
19
- *.pb filter=lfs diff=lfs merge=lfs -text
20
- *.pickle filter=lfs diff=lfs merge=lfs -text
21
- *.pkl filter=lfs diff=lfs merge=lfs -text
22
- *.pt filter=lfs diff=lfs merge=lfs -text
23
- *.pth filter=lfs diff=lfs merge=lfs -text
24
- *.rar filter=lfs diff=lfs merge=lfs -text
25
- *.safetensors filter=lfs diff=lfs merge=lfs -text
26
- saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
- *.tar.* filter=lfs diff=lfs merge=lfs -text
28
- *.tar filter=lfs diff=lfs merge=lfs -text
29
- *.tflite filter=lfs diff=lfs merge=lfs -text
30
- *.tgz filter=lfs diff=lfs merge=lfs -text
31
- *.wasm filter=lfs diff=lfs merge=lfs -text
32
- *.xz filter=lfs diff=lfs merge=lfs -text
33
- *.zip filter=lfs diff=lfs merge=lfs -text
34
- *.zst filter=lfs diff=lfs merge=lfs -text
35
- *tfevents* filter=lfs diff=lfs merge=lfs -text
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tar filter=lfs diff=lfs merge=lfs -text
29
+ *.tflite filter=lfs diff=lfs merge=lfs -text
30
+ *.tgz filter=lfs diff=lfs merge=lfs -text
31
+ *.wasm filter=lfs diff=lfs merge=lfs -text
32
+ *.xz filter=lfs diff=lfs merge=lfs -text
33
+ *.zip filter=lfs diff=lfs merge=lfs -text
34
+ *.zst filter=lfs diff=lfs merge=lfs -text
35
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
.gitignore ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ __pycache__/
2
+ *.pyc
3
+ *.pyo
4
+ .env
5
+ *.egg-info/
6
+ .DS_Store
7
+ model/
8
+ *.safetensors
9
+ *.bin
Dockerfile ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ FROM python:3.12-slim
2
+
3
+ RUN useradd -m -u 1000 user
4
+ USER user
5
+ ENV PATH="/home/user/.local/bin:$PATH"
6
+
7
+ WORKDIR /app
8
+
9
+ COPY --chown=user ./requirements.txt requirements.txt
10
+ RUN pip install --no-cache-dir --upgrade -r requirements.txt
11
+
12
+ COPY --chown=user . /app
13
+
14
+ ENV DEMO_MODE=true
15
+ ENV LLM_BASE_URL=http://localhost:8000/v1
16
+ ENV LLM_MODEL=/model
17
+
18
+ CMD ["uvicorn", "main:app", "--host", "0.0.0.0", "--port", "7860"]
README.md CHANGED
@@ -1,12 +1,421 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  ---
2
- title: MediAgent
3
- emoji: πŸ†
4
- colorFrom: indigo
5
- colorTo: green
6
- sdk: docker
7
- pinned: false
8
- license: mit
9
- short_description: Autonomous 5-agent medical imaging pipeline
 
 
 
 
 
 
 
 
10
  ---
11
 
12
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <div align="center">
2
+
3
+ <img src="https://img.shields.io/badge/AMD_Instinct-MI300X-ED1C24?style=for-the-badge&logo=amd&logoColor=white" />
4
+ <img src="https://img.shields.io/badge/ROCm-Stack-ED1C24?style=for-the-badge&logo=amd&logoColor=white" />
5
+ <img src="https://img.shields.io/badge/vLLM-Inference-6D28D9?style=for-the-badge" />
6
+ <img src="https://img.shields.io/badge/Qwen-Multimodal-0EA5E9?style=for-the-badge" />
7
+ <img src="https://img.shields.io/badge/FastAPI-0.115-009688?style=for-the-badge&logo=fastapi&logoColor=white" />
8
+ <img src="https://img.shields.io/badge/Python-3.12+-3776AB?style=for-the-badge&logo=python&logoColor=white" />
9
+
10
+ <br /><br />
11
+
12
+ # πŸ₯ MediAgent
13
+
14
+ ### Autonomous Multi-Agent Medical Imaging Analysis System
15
+
16
+ **Five specialized AI agents. One radiological verdict. Running entirely on AMD.**
17
+
18
+ *AMD Developer Hackathon 2026 Β· Track: Vision & Multimodal AI*
19
+
20
+ <br />
21
+
22
+ > Built by **Ramyar** β€” Security researcher & full-stack developer, Sulaymaniyah, Iraq
23
+
24
+ </div>
25
+
26
+ ---
27
+
28
+ ## What Is MediAgent?
29
+
30
+ MediAgent is a production-grade autonomous AI system that analyzes medical images β€” X-rays, MRI scans, CT scans β€” through a five-agent pipeline and generates structured, peer-reviewed clinical radiology reports in real time.
31
+
32
+ Upload an image. Watch five AI agents execute live. Get a formal radiology report with differential diagnoses, ICD-10 codes, a quality score, and a FHIR R4 export ready for any EMR system.
33
+
34
+ **No cloud APIs. No OpenAI. No Nvidia.**
35
+ Pure AMD MI300X inference. Local. Private. Fast.
36
+
37
+ ---
38
+
39
+ ## The Pipeline
40
+
41
+ ```
42
+ β”Œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”
43
+ β”‚ IMAGE UPLOAD β”‚
44
+ β”‚ PNG / JPG / DICOM (.dcm) β€” up to 20 MB β”‚
45
+ β””β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”¬β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜
46
+ β”‚
47
+ β”Œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”΄β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”
48
+ β”‚ PARALLEL STAGE β”‚
49
+ β–Ό β–Ό
50
+ β”Œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β” β”Œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”
51
+ β”‚ INTAKE AGENT β”‚ β”‚ VISION AGENT β”‚
52
+ β”‚ β”‚ β”‚ β”‚
53
+ β”‚ β€’ Validates β”‚ β”‚ β€’ Multimodal β”‚
54
+ β”‚ image payload β”‚ β”‚ Qwen analysis β”‚
55
+ β”‚ β€’ Normalizes β”‚ β”‚ β€’ Anatomical β”‚
56
+ β”‚ clinical text β”‚ β”‚ findings β”‚
57
+ β”‚ β€’ Extracts β”‚ β”‚ β€’ Severity per β”‚
58
+ β”‚ demographics β”‚ β”‚ region β”‚
59
+ β”‚ β€’ Safety triage β”‚ β”‚ β€’ Confidence β”‚
60
+ β”‚ (16 keywords) β”‚ β”‚ scoring β”‚
61
+ β”‚ β€’ Modality hint β”‚ β”‚ β€’ Anomaly flags β”‚
62
+ β””β”€β”€β”€β”€β”€β”€β”€β”€β”¬β”€β”€β”€β”€β”€β”€β”€β”€β”˜ β””β”€β”€β”€β”€β”€β”€β”€β”€β”¬β”€β”€β”€β”€β”€β”€β”€β”€β”˜
63
+ β””β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”¬β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜
64
+ β”‚
65
+ β–Ό
66
+ β”Œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”
67
+ β”‚ RESEARCH AGENT β”‚
68
+ β”‚ β”‚
69
+ β”‚ β€’ KB cross-reference β”‚
70
+ β”‚ (15 conditions) β”‚
71
+ β”‚ β€’ Demographic weight β”‚
72
+ β”‚ β€’ Ranked differentialsβ”‚
73
+ β”‚ β€’ ICD-10 codes β”‚
74
+ β”‚ β€’ Match probabilities β”‚
75
+ β””β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”¬β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜
76
+ β”‚
77
+ β–Ό
78
+ β”Œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”
79
+ β”‚ REPORT AGENT β”‚
80
+ β”‚ β”‚
81
+ β”‚ β€’ ACR/NICE format β”‚
82
+ β”‚ β€’ Clinical history β”‚
83
+ β”‚ β€’ Technique section β”‚
84
+ β”‚ β€’ Findings narrative β”‚
85
+ β”‚ β€’ Impression + top Dx β”‚
86
+ β”‚ β€’ Recommendations β”‚
87
+ β””β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”¬β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜
88
+ β”‚
89
+ β–Ό
90
+ β”Œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”
91
+ β”‚ CRITIC AGENT β”‚
92
+ β”‚ β”‚
93
+ β”‚ β€’ Cross-validates β”‚
94
+ β”‚ report vs findings β”‚
95
+ β”‚ β€’ Quality score 0-100 β”‚
96
+ β”‚ β€’ Uncertainty flags β”‚
97
+ β”‚ β€’ Disclaimer enforce β”‚
98
+ β””β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”¬β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜
99
+ β”‚
100
+ β–Ό
101
+ β”Œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”
102
+ β”‚ FINAL REPORT β”‚
103
+ β”‚ Structured JSON Β· PDF Export Β· FHIR R4 DiagnosticReport β”‚
104
+ β””β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜
105
+ ```
106
+
107
+ INTAKE and VISION execute **concurrently** β€” cutting wall-clock latency by running the two most expensive operations in parallel. Everything downstream sequences after both complete.
108
+
109
+ ---
110
+
111
+ ## AMD Hardware Stack
112
+
113
+ | Component | Technology |
114
+ |---|---|
115
+ | **GPU** | AMD Instinct MI300X |
116
+ | **GPU Software** | ROCm β€” AMD's open-source GPU compute platform |
117
+ | **Inference Server** | vLLM (ROCm build) at `localhost:8000/v1` |
118
+ | **Model** | Qwen multimodal β€” native vision + text |
119
+ | **Backend** | FastAPI 0.115 + Uvicorn |
120
+ | **Frontend** | Vanilla JS + Tailwind CSS + SSE streaming |
121
+
122
+ This project is a direct proof of concept that AMD's ROCm stack is **production-viable for real-world medical AI**. Every inference call β€” vision analysis, clinical normalization, report synthesis, peer review, post-report chat β€” runs on AMD MI300X. Zero CUDA dependency. Zero cloud API calls.
123
+
124
+ ---
125
+
126
+ ## Key Features
127
+
128
+ ### πŸ”΄ Real-Time SSE Streaming
129
+ Watch the pipeline execute live, agent by agent. Every status transition β€” WAITING β†’ RUNNING β†’ DONE β€” streams to the dashboard as it happens via Server-Sent Events. Per-agent runtime counters track exactly how long each step takes.
130
+
131
+ ### πŸ‘οΈ Multimodal Vision Analysis
132
+ Qwen processes the raw medical image natively. It returns structured JSON: detected modality, technical quality assessment, per-region findings with anatomical names, radiological descriptions, severity levels (NORMAL / INCIDENTAL / SIGNIFICANT / CRITICAL), confidence scores (0–100), and anomaly flags.
133
+
134
+ ### πŸ”¬ Medical Knowledge Base + ICD-10 Mapping
135
+ The Research Agent cross-references vision findings against 15 curated clinical conditions spanning pulmonary, neurological, abdominal, musculoskeletal, and vascular pathology. Every differential diagnosis comes with an ICD-10 code, match probability, and a sentence explaining exactly why the condition matches the findings.
136
+
137
+ ### πŸ›‘οΈ Critic Agent QA
138
+ Every report goes through a peer-review pass before delivery. The Critic checks that all anomalies from the Vision Agent appear in the report, flags low-confidence findings, assigns a quality score (completeness 30% + accuracy 40% + safety 20% + compliance 10%), and hard-caps the score at 40/100 if a core agent failed.
139
+
140
+ ### πŸ₯ DICOM Support
141
+ Upload real `.dcm` files. MediAgent extracts 20+ metadata fields β€” patient name, study date, institution, modality, body part, KVP, slice thickness, pixel spacing, image dimensions β€” and pre-populates the intake form automatically. MONOCHROME1 inversion and multi-frame handling included.
142
+
143
+ ### πŸ“‹ FHIR R4 Export
144
+ Every report can be exported as a fully conformant HL7 FHIR R4 DiagnosticReport resource. Includes an inline Patient resource, Observation resources, LOINC and SNOMED CT codes, severity mapping, full report text in `presentedForm`, and custom extensions for AI quality score and pipeline status. Ready to import into Epic, Cerner, or any FHIR-capable EMR.
145
+
146
+ ### πŸ’¬ Post-Report Clinical Chat
147
+ After the report is delivered, a ClinicalAdvisorAgent is available for follow-up questions. It answers in 2–4 sentences with direct reference to the report findings. Qwen's thinking/reasoning mode is explicitly disabled β€” answers are fast, direct, and clinical.
148
+
149
+ ### πŸ”’ Hard Safety Enforcement
150
+ - **16 deterministic safety keywords** β€” chest pain, stroke symptoms, acute trauma, hemoptysis, sepsis, spinal trauma, and more β€” trigger urgent flags regardless of LLM output.
151
+ - **Age-based alerts** β€” pediatric (<18) and geriatric (>75) cases are automatically flagged for expert review.
152
+ - **Mandatory AI disclaimer** β€” enforced at two independent layers (Report Agent + Critic Agent) and cannot be bypassed or modified by the LLM.
153
+ - **Graceful degradation** β€” the pipeline produces a report even if individual agents fail, always marking what succeeded and what didn't.
154
+
155
+ ### πŸ“„ Client-Side PDF Export
156
+ Full radiology report exported as a formatted PDF directly in the browser using jsPDF β€” severity color banner, all six report sections, DICOM metadata, QA score. No server round-trip needed.
157
+
158
+ ---
159
+
160
+ ## Agent Architecture
161
+
162
+ ### IntakeAgent
163
+ Validates the image payload (minimum size, valid base64), applies deterministic safety triage, and normalizes clinical language. For simple inputs under 120 characters it skips the LLM entirely and uses a built-in layman-to-medical term map (22 entries: "can't breathe" β†’ "dyspnea", "lump" β†’ "mass/nodule", "dizzy" β†’ "dizziness/vertigo", etc.). Only calls the LLM for complex clinical narratives with comorbidities or medical history. Falls back cleanly to raw input preservation if the LLM is unavailable.
164
+
165
+ ### VisionAgent
166
+ Sends the base64 image and clinical context to Qwen at temperature 0.0 with a strict JSON schema enforced via system prompt. Handles malformed enum values from the LLM with safe conversion fallbacks β€” a single bad field never drops a finding. Tracks token usage and anomaly counts in the output metadata.
167
+
168
+ ### ResearchAgent
169
+ Pre-filters the knowledge base to only conditions compatible with the detected modality before sending to the LLM β€” reducing prompt size and improving accuracy. Enforces strict output rules: only conditions from the KB, 2–4 differentials maximum, 5% minimum probability, exact ICD-10 codes, and evidence sentences that actually explain the match.
170
+
171
+ ### ReportAgent
172
+ Builds a structured prompt with clearly labeled sections β€” clinical history, imaging technique, findings block, differentials block β€” and asks the LLM to synthesize them into a formal ACR/NICE radiology report. The disclaimer is overwritten to the exact regulatory string after LLM generation, unconditionally.
173
+
174
+ ### CriticAgent
175
+ Operates at temperature 0.0 for fully deterministic QA. Receives the draft report and the full pipeline state including raw vision findings. Checks every anomaly is accounted for, flags low-confidence observations, and appends a `[QUALITY ASSESSMENT]` block to the recommendations section with score, issues, and uncertainty warnings.
176
+
177
+ ### ClinicalAdvisorAgent
178
+ Activated only after report delivery, scoped to the specific report's findings. Strips all Qwen thinking output via multi-layer regex before returning the answer β€” handles `<think>` XML blocks, markdown think fences, and plain-text reasoning preambles.
179
+
180
+ ---
181
+
182
+ ## LLM Client
183
+
184
+ The `LLMClient` wraps the OpenAI Python SDK pointed at the local vLLM endpoint. It handles:
185
+
186
+ - Text completions with optional JSON mode enforcement
187
+ - Multimodal completions with base64 image injection
188
+ - Token-level streaming with an `on_token` callback
189
+ - 3-attempt retry loop with 1-second flat backoff
190
+ - 90-second timeout per call
191
+ - Dual-strategy JSON extraction: direct parse first, then character-by-character brace-matching fallback for responses where the LLM adds conversational padding
192
+
193
+ ---
194
+
195
+ ## Medical Knowledge Base
196
+
197
+ 15 conditions covering the most common radiological findings across all supported modalities:
198
+
199
+ | Condition | ICD-10 | Modalities | Severity |
200
+ |---|---|---|---|
201
+ | Community-Acquired Pneumonia | J18.9 | X-RAY, CT | SIGNIFICANT |
202
+ | Cardiogenic Pulmonary Edema | J81.0 | X-RAY, CT | CRITICAL |
203
+ | Pleural Effusion | J90 | X-RAY, CT, MRI | SIGNIFICANT |
204
+ | Spontaneous Pneumothorax | J93.9 | X-RAY, CT | CRITICAL |
205
+ | Intracerebral Hemorrhage | I61.9 | CT, MRI | CRITICAL |
206
+ | Ischemic Stroke | I63.9 | CT, MRI | CRITICAL |
207
+ | Intracranial Neoplasm | C71.9 | MRI, CT | SIGNIFICANT |
208
+ | Abdominal Aortic Aneurysm | I71.4 | CT, MRI | CRITICAL |
209
+ | Nephrolithiasis | N20.0 | CT, X-RAY | SIGNIFICANT |
210
+ | Small Bowel Obstruction | K56.6 | X-RAY, CT | SIGNIFICANT |
211
+ | Long Bone Fracture | S82.902 | X-RAY, CT | SIGNIFICANT |
212
+ | Degenerative Joint Disease | M19.90 | X-RAY, MRI | INCIDENTAL |
213
+ | Hepatic Steatosis | K76.0 | CT, MRI | INCIDENTAL |
214
+ | Herniated Disc | M51.16 | MRI, CT | SIGNIFICANT |
215
+ | Pulmonary Nodule | R91.1 | X-RAY, CT | SIGNIFICANT |
216
+
217
+ ---
218
+
219
+ ## API Reference
220
+
221
+ | Method | Endpoint | Description |
222
+ |---|---|---|
223
+ | `GET` | `/` | Clinical dashboard UI |
224
+ | `GET` | `/health` | System health, version, active sessions |
225
+ | `GET` | `/metrics/gpu` | Live AMD GPU metrics (util, VRAM, temp, power) |
226
+ | `POST` | `/analyze` | Synchronous pipeline β†’ full JSON report |
227
+ | `POST` | `/analyze/stream` | Real-time SSE streaming pipeline |
228
+ | `GET` | `/status/{report_id}` | Poll live pipeline state |
229
+ | `POST` | `/chat/{report_id}` | Post-report clinical Q&A |
230
+ | `GET` | `/api/docs` | Swagger UI |
231
+ | `GET` | `/api/redoc` | ReDoc UI |
232
+
233
+ ### `/analyze/stream` β€” SSE Event Types
234
+
235
+ ```json
236
+ // Agent status update (emitted on every state transition)
237
+ {"agent": "VISION", "status": "RUNNING"}
238
+ {"agent": "VISION", "status": "DONE"}
239
+
240
+ // Final report (emitted when pipeline completes)
241
+ {"type": "report", "data": {...}, "report_id": "REP-A3F9C2D1B4E7"}
242
+
243
+ // Error
244
+ {"type": "error", "message": "Pipeline produced no report"}
245
+ ```
246
+
247
+ ### Form Fields (`/analyze`, `/analyze/stream`)
248
+
249
+ | Field | Type | Required | Notes |
250
+ |---|---|---|---|
251
+ | `image` | File | βœ… | PNG, JPG, or DICOM (.dcm), max 20 MB |
252
+ | `symptoms` | string | β€” | Free-text chief complaint |
253
+ | `age` | integer | β€” | 0–120 |
254
+ | `sex` | string | β€” | `M`, `F`, or `O` |
255
+ | `clinical_context` | string | β€” | Medical history, referral details |
256
+
257
+ ---
258
+
259
+ ## Data Models
260
+
261
+ ```
262
+ PatientInput
263
+ └── image_base64, symptoms, age, sex, clinical_context
264
+
265
+ PipelineState
266
+ β”œβ”€β”€ agent_statuses: {INTAKE, VISION, RESEARCH, REPORT, CRITIC}
267
+ β”œβ”€β”€ intake_output: IntakeOutput
268
+ β”œβ”€β”€ vision_output: VisionOutput
269
+ β”‚ └── findings: [VisionFinding, ...]
270
+ β”‚ └── anatomical_region, description, severity,
271
+ β”‚ confidence, confidence_score, is_anomaly
272
+ β”œβ”€β”€ research_output: ResearchOutput
273
+ β”‚ └── differential_diagnoses: [KnowledgeMatch, ...]
274
+ β”‚ └── condition_name, match_probability,
275
+ β”‚ supporting_evidence, differential_rank, icd10_code
276
+ β”œβ”€β”€ report_draft: ReportSection
277
+ β”‚ └── clinical_history, technique, findings, impression,
278
+ β”‚ recommendations, disclaimer
279
+ └── final_report: FinalReport
280
+ └── report_id, patient_metadata, sections, vision_summary,
281
+ research_summary, overall_severity, agent_pipeline_status,
282
+ generation_timestamp
283
+ ```
284
+
285
+ ---
286
+
287
+ ## Project Structure
288
+
289
+ ```
290
+ mediagent/
291
+ β”œβ”€β”€ main.py ← FastAPI server, all routes, SSE orchestration
292
+ β”œβ”€β”€ core/
293
+ β”‚ β”œβ”€β”€ llm.py ← LLM client (retry, vision, streaming, JSON extraction)
294
+ β”‚ β”œβ”€β”€ models.py ← All Pydantic v2 data models
295
+ β”‚ β”œβ”€β”€ pipeline.py ← Parallel pipeline orchestrator
296
+ β”‚ β”œβ”€β”€ dicom.py ← DICOM parser (pydicom + numpy + Pillow)
297
+ β”‚ └── fhir.py ← FHIR R4 DiagnosticReport builder
298
+ β”œβ”€β”€ agents/
299
+ β”‚ β”œβ”€β”€ intake.py ← Input validation, normalization, safety triage
300
+ β”‚ β”œβ”€β”€ vision.py ← Multimodal image analysis
301
+ β”‚ β”œβ”€β”€ research.py ← KB matching, ICD-10, differential diagnosis
302
+ β”‚ β”œβ”€β”€ report.py ← ACR/NICE radiology report synthesis
303
+ β”‚ β”œβ”€β”€ critic.py ← QA validation, quality scoring
304
+ β”‚ └── advisor.py ← Post-report clinical Q&A
305
+ β”œβ”€β”€ static/
306
+ β”‚ └── index.html ← Full dashboard (Tailwind + Chart.js + SSE)
307
+ β”œβ”€β”€ requirements.txt
308
+ └── .env.example
309
+ ```
310
+
311
+ ---
312
+
313
+ ## Getting Started
314
+
315
+ ### Prerequisites
316
+
317
+ - Python 3.12+
318
+ - vLLM running a Qwen multimodal model on ROCm, accessible at `http://localhost:8000/v1`
319
+ - ROCm-compatible AMD GPU (MI300X recommended)
320
+
321
+ ### Installation
322
+
323
+ ```bash
324
+ # Clone the repository
325
+ git clone https://github.com/Ramyar2007/mediagent
326
+ cd mediagent
327
+
328
+ # Install Python dependencies
329
+ pip install -r requirements.txt
330
+
331
+ # Configure environment
332
+ cp .env.example .env
333
+ # Edit .env and set LLM_BASE_URL to your vLLM endpoint
334
+ ```
335
+
336
+ ### Environment Variables
337
+
338
+ ```env
339
+ LLM_BASE_URL=http://localhost:8000/v1 # vLLM OpenAI-compatible endpoint
340
+ LLM_MODEL=/model # Model path served by vLLM
341
+ APP_PORT=8090 # Server port
342
+ ```
343
+
344
+ ### Run
345
+
346
+ ```bash
347
+ python main.py
348
+ ```
349
+
350
+ Dashboard available at **http://localhost:8090**
351
+
352
+ Swagger docs at **http://localhost:8090/api/docs**
353
+
354
  ---
355
+
356
+ ## Dependencies
357
+
358
+ | Package | Version | Purpose |
359
+ |---|---|---|
360
+ | `fastapi` | 0.115.6 | Web framework |
361
+ | `uvicorn[standard]` | 0.34.0 | ASGI server |
362
+ | `openai` | 1.58.1 | SDK for vLLM OpenAI-compatible API |
363
+ | `python-multipart` | 0.0.20 | Multipart form / file upload |
364
+ | `pydantic` | 2.10.5 | Data validation and serialization |
365
+ | `Pillow` | 11.1.0 | Image processing for DICOM conversion |
366
+ | `pydicom` | 2.4.4 | DICOM file parsing and metadata extraction |
367
+ | `numpy` | 1.26.4 | Pixel array normalization for DICOM |
368
+
369
+ Optional: `amdsmi` Python library β€” used automatically when available for more accurate GPU metrics than the `rocm-smi` CLI fallback.
370
+
371
  ---
372
 
373
+ ## Clinical Safety
374
+
375
+ MediAgent is built with clinical safety as a first-class concern, not an afterthought.
376
+
377
+ **Mandatory disclaimer** β€” enforced at two independent code layers and cannot be overridden by any LLM output:
378
+
379
+ > *"This analysis is AI-generated and must be reviewed by a licensed radiologist before any clinical decisions are made."*
380
+
381
+ **Hard safety rules that run deterministically, without LLM involvement:**
382
+ - 16 urgent clinical keywords trigger immediate flags before any AI processing
383
+ - Pediatric and geriatric age thresholds auto-flag for specialist review
384
+ - Quality score is hard-capped at 40/100 if core agents (Vision, Report) fail
385
+ - Low-confidence findings are always flagged with confirmatory imaging recommendations
386
+ - The disclaimer is re-enforced after every LLM call, unconditionally
387
+
388
+ **This system is a decision support tool, not a clinical decision maker.** Every output is intended to assist, not replace, a licensed radiologist.
389
+
390
+ ---
391
+
392
+ ## Dashboard Preview
393
+
394
+ The single-page clinical dashboard provides:
395
+
396
+ - **Live pipeline panel** β€” real-time agent status cards with per-step runtime counters
397
+ - **Analytics tab** β€” severity distribution donut chart, differential diagnosis confidence bar chart, agent timing bar chart β€” all populated from structured model output
398
+ - **Report panel** β€” severity banner, safety flags, all six report sections, finding cards color-coded by severity
399
+ - **DICOM metadata card** β€” study date, institution, modality, body part, technical parameters
400
+ - **PDF export** β€” full formatted report generated client-side
401
+ - **Clinical chat** β€” slide-up Q&A panel backed by the ClinicalAdvisorAgent
402
+ - **AMD GPU panel** β€” live util %, VRAM used/total, temperature, power draw β€” polling every 3 seconds
403
+
404
+ ---
405
+
406
+ ## Built For
407
+
408
+ **AMD Developer Hackathon 2026**
409
+ Track: Vision & Multimodal AI
410
+
411
+ This project demonstrates that AMD's ROCm ecosystem is a complete, production-viable alternative for serious AI workloads. Medical imaging analysis β€” with real multimodal vision, structured clinical reasoning, and standards-compliant output β€” running fully on AMD MI300X without a single NVIDIA or cloud dependency.
412
+
413
+ ---
414
+
415
+ <div align="center">
416
+
417
+ **Built by Ramyar Β· Sulaymaniyah, Iraq**
418
+
419
+ *#AMDDevChallenge Β· AMD Instinct MI300X Β· ROCm Β· vLLM Β· Qwen*
420
+
421
+ </div>
agents/__init__.py ADDED
File without changes
agents/advisor.py ADDED
@@ -0,0 +1,99 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mediagent/agents/advisor.py
2
+ """
3
+ Clinical Advisor Agent for MediAgent.
4
+ Post-report interactive Q&A. Answers follow-up clinical questions
5
+ from radiologists/clinicians in full context of the generated report.
6
+ Acts as a 24/7 senior radiology consultant.
7
+ """
8
+
9
+ import logging
10
+ import re
11
+ from typing import Optional
12
+
13
+ from core.llm import LLMClient
14
+ from core.models import FinalReport
15
+
16
+ logger = logging.getLogger(__name__)
17
+
18
+
19
+ class ClinicalAdvisorAgent:
20
+ """
21
+ Interactive clinical consultation agent activated after report generation.
22
+ Answers follow-up questions with access to all pipeline outputs.
23
+ Scope is limited to radiological interpretation β€” no treatment prescriptions.
24
+ """
25
+
26
+ SYSTEM_PROMPT = """You are a senior radiologist consultant. Answer the clinician's question directly and concisely β€” 2-4 sentences maximum. No preamble, no thinking out loud, no reasoning steps. Just the answer.
27
+ Rules: reference report findings; no fabrication; no medications/dosages; formal radiological tone. If management decision, end with "Clinical correlation recommended." """
28
+
29
+ def __init__(self, llm_client: Optional[LLMClient] = None):
30
+ self.llm = llm_client or LLMClient()
31
+
32
+ def answer(self, question: str, report: FinalReport) -> str:
33
+ """
34
+ Answer a follow-up clinical question in the context of the generated report.
35
+
36
+ Args:
37
+ question: Free-text clinical question from the user
38
+ report: The FinalReport from the pipeline
39
+
40
+ Returns:
41
+ str: Clinical answer text
42
+ """
43
+ logger.info("πŸ’¬ Clinical Advisor processing question: %.80s", question)
44
+
45
+ sections = report.sections
46
+ severity = report.overall_severity.value if hasattr(report.overall_severity, "value") else str(report.overall_severity)
47
+
48
+ # Send only the most relevant report fields β€” less tokens = faster response
49
+ context = (
50
+ f"Severity: {severity} | Impression: {sections.impression} | "
51
+ f"Findings: {sections.findings[:600]} | "
52
+ f"Recommendations: {sections.recommendations[:300]}"
53
+ )
54
+
55
+ prompt = f"Report: {context}\n\nQuestion: {question}\n\nAnswer:"
56
+
57
+ result = self.llm.generate_text(
58
+ prompt=prompt,
59
+ system_prompt=self.SYSTEM_PROMPT,
60
+ temperature=0.0,
61
+ max_tokens=200,
62
+ # Disable Qwen3 thinking/reasoning mode entirely
63
+ extra_body={"chat_template_kwargs": {"enable_thinking": False}},
64
+ )
65
+
66
+ if result.get("success") and result.get("content"):
67
+ answer = self._strip_thinking(result["content"].strip())
68
+ logger.info("βœ… Clinical Advisor answered | tokens=%s", result.get("usage", {}).get("total_tokens", 0))
69
+ return answer
70
+
71
+ logger.warning("⚠️ Clinical Advisor LLM call failed")
72
+ return "Unable to process this question. Please review the report directly and consult a licensed radiologist."
73
+
74
+ @staticmethod
75
+ def _strip_thinking(text: str) -> str:
76
+ """
77
+ Remove all thinking/reasoning output that Qwen and similar models emit.
78
+ Handles both structured tags and plain-text reasoning patterns.
79
+ """
80
+ # Remove <think>...</think> XML blocks
81
+ text = re.sub(r"<think>.*?</think>", "", text, flags=re.DOTALL)
82
+ # Remove ```think ... ``` markdown blocks
83
+ text = re.sub(r"```think.*?```", "", text, flags=re.DOTALL)
84
+ # Remove plain-text reasoning preambles Qwen3 emits without tags:
85
+ # "Here's a thinking process: 1. ..." or "Let me think: ..." etc.
86
+ text = re.sub(
87
+ r"(?i)^(here'?s?\s+(a\s+)?thinking\s+process:?|let me (think|analyze|consider):?|thinking:?).*?(\n\n|\Z)",
88
+ "", text, flags=re.DOTALL
89
+ )
90
+ # Remove numbered reasoning lists at the start (1. **Title:** ...)
91
+ text = re.sub(r"^(\s*\d+\.\s+\*\*[^*]+\*\*:.*\n?)+", "", text, flags=re.MULTILINE)
92
+ # If after stripping we have a clear section break, take only what's after it
93
+ if "\n\n" in text:
94
+ parts = [p.strip() for p in text.split("\n\n") if p.strip()]
95
+ # Take the last substantial chunk (the actual answer)
96
+ for part in reversed(parts):
97
+ if len(part) > 20:
98
+ return part
99
+ return text.strip()
agents/critic.py ADDED
@@ -0,0 +1,194 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mediagent/agents/critic.py
2
+ """
3
+ Critic Agent for MediAgent.
4
+ Final quality control and peer-review layer. Validates report consistency,
5
+ flags low-confidence observations, enforces regulatory disclaimers, assigns
6
+ quality scores, and applies corrective refinements before final delivery.
7
+ """
8
+
9
+ import logging
10
+ from typing import Any, Dict, List, Optional
11
+
12
+ from core.llm import LLMClient
13
+ from core.models import AgentStatus, PipelineState, ReportSection, VisionOutput
14
+
15
+ logger = logging.getLogger(__name__)
16
+
17
+
18
+ class CriticAgent:
19
+ """
20
+ Medical QA/QC engine. Cross-validates the synthesized report against
21
+ upstream agent outputs, detects logical inconsistencies, enforces
22
+ clinical safety thresholds, and produces a finalized, auditable report.
23
+ """
24
+
25
+ STANDARD_DISCLAIMER = (
26
+ "This analysis is AI-generated and must be reviewed by a licensed radiologist "
27
+ "before any clinical decisions are made."
28
+ )
29
+
30
+ SYSTEM_PROMPT = """You are a senior radiology peer-reviewer. Evaluate the draft report against the vision findings and return ONLY valid JSON:
31
+ {"clinical_history":"string","technique":"string","findings":"string","impression":"string","recommendations":"string","disclaimer":"string","quality_score":0-100,"review_issues":["string"],"uncertainty_warnings":["string"]}
32
+
33
+ Review criteria:
34
+ 1. CONSISTENCY: Every vision anomaly must appear in the report. Flag contradictions.
35
+ 2. CONFIDENCE: Findings < 50% confidence or LOW β†’ add uncertainty warning, recommend confirmatory imaging.
36
+ 3. DISCLAIMER: Must be exactly: "This analysis is AI-generated and must be reviewed by a licensed radiologist before any clinical decisions are made."
37
+ 4. TONE: Formal, objective radiological language. No speculative phrasing or definitive claims without imaging evidence.
38
+ 5. QUALITY SCORE: completeness 30% + accuracy/consistency 40% + clinical safety 20% + compliance 10%.
39
+ 6. Apply corrections directly. No placeholders or TODOs.
40
+ No markdown. Never fabricate findings. Lower score for failed pipeline agents."""
41
+
42
+ def __init__(self, llm_client: Optional[LLMClient] = None):
43
+ self.llm = llm_client or LLMClient()
44
+ self.last_quality_score: int = 100
45
+ self.last_review_issues: List[str] = []
46
+ self.last_uncertainty_warnings: List[str] = []
47
+
48
+ def process(self, draft_report: ReportSection, pipeline_state: PipelineState) -> ReportSection:
49
+ """
50
+ Execute final peer review and quality enforcement.
51
+
52
+ Args:
53
+ draft_report: Unreviewed ReportSection from Report Agent
54
+ pipeline_state: Full execution context including vision findings and agent statuses
55
+
56
+ Returns:
57
+ ReportSection: Finalized, QA-reviewed clinical report
58
+ """
59
+ logger.info("πŸ›‘οΈ Critic Agent initiated final quality review")
60
+
61
+ user_prompt = self._build_review_prompt(draft_report, pipeline_state)
62
+
63
+ result = self.llm.generate_text(
64
+ prompt=user_prompt,
65
+ system_prompt=self.SYSTEM_PROMPT,
66
+ temperature=0.0,
67
+ force_json=True
68
+ )
69
+
70
+ if not result.get("success"):
71
+ logger.error(f"❌ Critic LLM call failed: {result.get('error')}")
72
+ return self._apply_deterministic_qa(draft_report, pipeline_state)
73
+
74
+ raw_content = result.get("content", "")
75
+ parsed = LLMClient.extract_json_from_response(raw_content)
76
+ if not parsed:
77
+ logger.warning("⚠️ Failed to parse critic JSON response. Applying deterministic QA.")
78
+ return self._apply_deterministic_qa(draft_report, pipeline_state)
79
+
80
+ try:
81
+ return self._parse_qa_response(parsed, pipeline_state)
82
+ except Exception as e:
83
+ logger.error(f"πŸ’₯ Critic mapping failed: {e}")
84
+ return self._apply_deterministic_qa(draft_report, pipeline_state)
85
+
86
+ def _build_review_prompt(self, draft: ReportSection, state: PipelineState) -> str:
87
+ """Format pipeline context and draft report for LLM critique."""
88
+ # Extract vision findings for cross-reference
89
+ vision_text = "No vision findings available."
90
+ if state.vision_output:
91
+ findings_list = []
92
+ for f in state.vision_output.findings:
93
+ findings_list.append(
94
+ f"- Region: {f.anatomical_region} | Desc: {f.description} | "
95
+ f"Severity: {f.severity.value} | Confidence: {f.confidence.value} ({f.confidence_score}%) | "
96
+ f"Anomaly: {f.is_anomaly}"
97
+ )
98
+ vision_text = "\n".join(findings_list) if findings_list else "No specific findings."
99
+
100
+ # Agent execution status
101
+ agent_status = ", ".join([f"{k}: {v.value}" for k, v in state.agent_statuses.items()])
102
+
103
+ return f"""[PIPELINE EXECUTION STATUS]
104
+ {agent_status}
105
+
106
+ [VISION AGENT FINDINGS FOR CROSS-REFERENCE]
107
+ {vision_text}
108
+
109
+ [DRAFT REPORT FOR REVIEW]
110
+ Clinical History: {draft.clinical_history}
111
+ Technique: {draft.technique}
112
+ Findings: {draft.findings}
113
+ Impression: {draft.impression}
114
+ Recommendations: {draft.recommendations}
115
+ Disclaimer: {draft.disclaimer}
116
+
117
+ Critique the draft against the vision findings and pipeline status. Apply corrections, flag uncertainties, verify compliance, and output the refined JSON report."""
118
+
119
+ def _parse_qa_response(self, data: Dict[str, Any], state: PipelineState) -> ReportSection:
120
+ """Validate, extract, and enforce QA metadata on the report."""
121
+ # Extract core report fields
122
+ draft = ReportSection(
123
+ clinical_history=str(data.get("clinical_history", "Not provided.")),
124
+ technique=str(data.get("technique", "Imaging technique not specified.")),
125
+ findings=str(data.get("findings", "No abnormalities detected.")),
126
+ impression=str(data.get("impression", "Within normal limits.")),
127
+ recommendations=str(data.get("recommendations", "Routine follow-up as clinically indicated.")),
128
+ disclaimer=str(data.get("disclaimer", self.STANDARD_DISCLAIMER))
129
+ )
130
+
131
+ # Extract QA metadata
132
+ self.last_quality_score = int(data.get("quality_score", 85))
133
+ self.last_review_issues = data.get("review_issues", [])
134
+ self.last_uncertainty_warnings = data.get("uncertainty_warnings", [])
135
+
136
+ # Append QA summary to recommendations for frontend visibility
137
+ qa_summary = "\n\n[QUALITY ASSESSMENT]\nScore: {score}/100\nIssues: {issues}\nUncertainties: {warnings}".format(
138
+ score=self.last_quality_score,
139
+ issues=" | ".join(self.last_review_issues) if self.last_review_issues else "None",
140
+ warnings=" | ".join(self.last_uncertainty_warnings) if self.last_uncertainty_warnings else "None"
141
+ )
142
+ draft.recommendations += qa_summary
143
+
144
+ return self._apply_deterministic_qa(draft, state)
145
+
146
+ def _apply_deterministic_qa(self, draft: ReportSection, state: PipelineState) -> ReportSection:
147
+ """Hard-rule safety checks that cannot be overridden by LLM output."""
148
+ # 1. Enforce exact disclaimer
149
+ if draft.disclaimer != self.STANDARD_DISCLAIMER:
150
+ draft.disclaimer = self.STANDARD_DISCLAIMER
151
+ if self.last_review_issues:
152
+ self.last_review_issues.append("Disclaimer corrected to regulatory standard.")
153
+ else:
154
+ self.last_review_issues = ["Disclaimer corrected to regulatory standard."]
155
+
156
+ # 2. Cap quality score if critical pipeline failures occurred
157
+ critical_failures = [
158
+ k for k, v in state.agent_statuses.items()
159
+ if v == AgentStatus.ERROR and k in ["VISION", "REPORT"]
160
+ ]
161
+ if critical_failures:
162
+ self.last_quality_score = min(self.last_quality_score, 40)
163
+ self.last_review_issues.append(f"Pipeline degraded: {', '.join(critical_failures)} agents failed.")
164
+
165
+ # 3. Flag low-confidence vision findings if not already warned
166
+ if state.vision_output:
167
+ low_conf_findings = [
168
+ f.anatomical_region for f in state.vision_output.findings
169
+ if f.confidence.value == "LOW" or f.confidence_score < 50.0
170
+ ]
171
+ if low_conf_findings:
172
+ warning = f"Low confidence observations in: {', '.join(low_conf_findings)}. Confirmatory imaging recommended."
173
+ if warning not in self.last_uncertainty_warnings:
174
+ self.last_uncertainty_warnings.append(warning)
175
+
176
+ # 4. Re-append updated QA summary if modified
177
+ qa_summary = "\n\n[QUALITY ASSESSMENT]\nScore: {score}/100\nIssues: {issues}\nUncertainties: {warnings}".format(
178
+ score=self.last_quality_score,
179
+ issues=" | ".join(self.last_review_issues) if self.last_review_issues else "None",
180
+ warnings=" | ".join(self.last_uncertainty_warnings) if self.last_uncertainty_warnings else "None"
181
+ )
182
+ if qa_summary not in draft.recommendations:
183
+ draft.recommendations += qa_summary
184
+
185
+ logger.info(f"βœ… Critic Agent completed | QA Score: {self.last_quality_score}/100")
186
+ return draft
187
+
188
+ def _get_fallback_report(self, draft: ReportSection) -> ReportSection:
189
+ """Safe fallback when critic review cannot be completed."""
190
+ draft.disclaimer = self.STANDARD_DISCLAIMER
191
+ self.last_quality_score = 50
192
+ self.last_review_issues = ["Critic agent unavailable. Report delivered unreviewed."]
193
+ self.last_uncertainty_warnings = ["Peer review skipped. Manual radiologist verification mandatory."]
194
+ return self._apply_deterministic_qa(draft, PipelineState())
agents/intake.py ADDED
@@ -0,0 +1,216 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mediagent/agents/intake.py
2
+ """
3
+ Intake Agent for MediAgent.
4
+ Validates patient submissions, normalizes clinical terminology,
5
+ extracts demographics, detects imaging modality hints, and flags
6
+ urgent safety concerns before routing to downstream agents.
7
+ """
8
+
9
+ import logging
10
+ import re
11
+ from typing import Any, Dict, List, Optional
12
+
13
+ from core.llm import LLMClient
14
+ from core.models import IntakeOutput, ImageModality, PatientInput
15
+
16
+ logger = logging.getLogger(__name__)
17
+
18
+
19
+ class IntakeAgent:
20
+ """
21
+ First-stage pipeline agent responsible for input validation,
22
+ clinical text normalization, demographic extraction, and safety triage.
23
+ Ensures downstream agents receive structured, standardized input.
24
+ """
25
+
26
+ # Deterministic safety keywords for immediate flagging
27
+ SAFETY_KEYWORDS = [
28
+ "acute trauma", "chest pain", "shortness of breath", "dyspnea",
29
+ "stroke symptoms", "neurological deficit", "hemoptysis", "massive bleed",
30
+ "pediatric emergency", "pregnant", "anaphylaxis", "sepsis", "fever",
31
+ "head injury", "spinal trauma", "acute abdomen", "suspected fracture"
32
+ ]
33
+
34
+ MODALITY_KEYWORDS = {
35
+ "x-ray": ImageModality.XRAY,
36
+ "xr": ImageModality.XRAY,
37
+ "radiograph": ImageModality.XRAY,
38
+ "ct scan": ImageModality.CT,
39
+ "ct": ImageModality.CT,
40
+ "computed tomography": ImageModality.CT,
41
+ "mri": ImageModality.MRI,
42
+ "magnetic resonance": ImageModality.MRI,
43
+ "mammogram": ImageModality.XRAY, # Technically X-ray based
44
+ }
45
+
46
+ def __init__(self, llm_client: Optional[LLMClient] = None):
47
+ self.llm = llm_client or LLMClient()
48
+
49
+ def process(self, patient_input: PatientInput) -> IntakeOutput:
50
+ """
51
+ Main intake processing method.
52
+
53
+ Args:
54
+ patient_input: Raw validated patient submission
55
+
56
+ Returns:
57
+ IntakeOutput: Structured, normalized, safety-checked data
58
+ """
59
+ logger.info("πŸ“‹ Intake Agent processing initiated")
60
+
61
+ # 1. Validate image payload
62
+ if not self._validate_image_payload(patient_input.image_base64):
63
+ logger.warning("⚠️ Image payload validation failed. Proceeding with warnings.")
64
+
65
+ # 2. Apply deterministic safety triage
66
+ safety_flags = self._check_deterministic_safety(patient_input)
67
+
68
+ # 3. Clinical normalization & demographic extraction via LLM
69
+ structured_data = self._normalize_with_llm(patient_input, safety_flags)
70
+
71
+ # 4. Enrich modality detection
72
+ modality = self._infer_modality(patient_input, structured_data)
73
+
74
+ # 5. Assemble & validate output
75
+ try:
76
+ output = IntakeOutput(
77
+ validated=True,
78
+ standardized_symptoms=structured_data.get("standardized_symptoms", patient_input.symptoms or ""),
79
+ extracted_demographics=structured_data.get("extracted_demographics", {}),
80
+ safety_flags=list(set(safety_flags + structured_data.get("safety_flags", []))),
81
+ recommended_modality=modality,
82
+ processing_notes=structured_data.get("processing_notes", "")
83
+ )
84
+ logger.info("βœ… Intake Agent completed successfully")
85
+ return output
86
+ except Exception as e:
87
+ logger.error(f"πŸ’₯ IntakeOutput validation failed: {e}")
88
+ return self._get_fallback_output(patient_input, safety_flags)
89
+
90
+ def _validate_image_payload(self, base64_data: str) -> bool:
91
+ """Validate base64 image integrity and size constraints."""
92
+ if not base64_data or len(base64_data) < 500:
93
+ return False
94
+ # Check for valid base64 pattern (ignoring data URI prefix)
95
+ clean = re.sub(r"^data:image/[a-z]+;base64,", "", base64_data)
96
+ try:
97
+ import base64
98
+ base64.b64decode(clean)
99
+ return len(clean) < 20 * 1024 * 1024 # < 20MB limit
100
+ except Exception:
101
+ return False
102
+
103
+ def _check_deterministic_safety(self, inp: PatientInput) -> List[str]:
104
+ """Scan raw input for high-priority clinical safety terms."""
105
+ text = f"{inp.symptoms} {inp.clinical_context}".lower()
106
+ flags = []
107
+ for kw in self.SAFETY_KEYWORDS:
108
+ if kw.lower() in text:
109
+ flags.append(f"URGENT_TERM_DETECTED: {kw}")
110
+ if inp.age is not None and inp.age < 18:
111
+ flags.append("PATIENT_AGE: PEDIATRIC_REQUIRES_EXPERT_REVIEW")
112
+ if inp.age is not None and inp.age > 75:
113
+ flags.append("PATIENT_AGE: GERIATRIC_CONSIDERATIONS_RECOMMENDED")
114
+ return flags
115
+
116
+ # Layman-to-medical term map for fast deterministic normalization
117
+ LAYMAN_TERMS = {
118
+ "can't breathe": "dyspnea", "hard to breathe": "dyspnea", "difficulty breathing": "dyspnea",
119
+ "stomach pain": "abdominal pain", "belly pain": "abdominal pain", "tummy pain": "abdominal pain",
120
+ "chest tightness": "chest pain/pressure", "heart racing": "palpitations",
121
+ "blurry vision": "visual disturbance", "can't see clearly": "visual disturbance",
122
+ "dizzy": "dizziness/vertigo", "feel faint": "presyncope", "passed out": "syncope",
123
+ "throwing up": "vomiting", "nausea and vomiting": "nausea/emesis",
124
+ "back pain": "dorsal pain", "leg pain": "lower extremity pain",
125
+ "arm pain": "upper extremity pain", "neck pain": "cervicalgia",
126
+ "headache": "cephalgia", "head pain": "cephalgia",
127
+ "swollen": "edema", "swelling": "edema", "bruise": "ecchymosis",
128
+ "lump": "mass/nodule", "bump": "mass/nodule"
129
+ }
130
+
131
+ def _normalize_with_llm(self, inp: PatientInput, existing_flags: List[str]) -> Dict[str, Any]:
132
+ """
133
+ Normalize clinical text. Uses fast deterministic mapping for simple inputs;
134
+ falls back to LLM only for complex or lengthy clinical context.
135
+ """
136
+ combined_text = f"{inp.symptoms or ''} {inp.clinical_context or ''}".strip()
137
+
138
+ # Skip LLM for short/simple inputs β€” deterministic normalization is sufficient
139
+ if len(combined_text) <= 120 and not any(
140
+ indicator in combined_text.lower()
141
+ for indicator in ["history of", "diagnosed with", "chronic", "prior", "previous", "medication", "allerg"]
142
+ ):
143
+ logger.debug("⚑ Short input detected β€” using fast deterministic normalization (skipping LLM)")
144
+ return self._fast_normalize(inp, existing_flags)
145
+
146
+ prompt = f"""You are a clinical data standardization expert.
147
+ Convert raw patient input to standardized clinical terminology. Respond ONLY with JSON:
148
+ {{"standardized_symptoms":"string","extracted_demographics":{{"age":int|null,"sex":"M|F|O"|null,"comorbidities":["string"]}},"safety_flags":["string"],"processing_notes":"string"}}
149
+
150
+ Input:
151
+ - Symptoms: "{inp.symptoms or 'Not provided'}"
152
+ - Age: {inp.age}
153
+ - Sex: {inp.sex}
154
+ - Clinical Context: "{inp.clinical_context or 'Not provided'}"
155
+ - Existing Flags: {existing_flags}
156
+
157
+ Rules: convert layman terms to medical terminology; extract comorbidities; add safety flags; no markdown."""
158
+
159
+ result = self.llm.generate_text(prompt=prompt, force_json=True)
160
+ if result.get("success") and result.get("content"):
161
+ parsed = LLMClient.extract_json_from_response(result["content"])
162
+ if parsed:
163
+ return parsed
164
+
165
+ logger.warning("⚠️ LLM normalization failed. Using deterministic fallback.")
166
+ return self._build_fallback_dict(inp, existing_flags)
167
+
168
+ def _fast_normalize(self, inp: PatientInput, flags: List[str]) -> Dict[str, Any]:
169
+ """Deterministic normalization using term mapping β€” zero LLM calls."""
170
+ text = f"{inp.symptoms or ''} {inp.clinical_context or ''}".lower()
171
+ normalized = inp.symptoms or "No symptoms provided"
172
+ for layman, medical in self.LAYMAN_TERMS.items():
173
+ if layman in text:
174
+ normalized = normalized.lower().replace(layman, medical)
175
+ return {
176
+ "standardized_symptoms": normalized.strip(),
177
+ "extracted_demographics": {
178
+ "age": inp.age,
179
+ "sex": inp.sex,
180
+ "comorbidities": []
181
+ },
182
+ "safety_flags": flags,
183
+ "processing_notes": "Fast deterministic normalization applied."
184
+ }
185
+
186
+ def _infer_modality(self, inp: PatientInput, llm_data: Dict[str, Any]) -> ImageModality:
187
+ """Infer imaging modality from text hints or default to UNKNOWN."""
188
+ text = f"{inp.symptoms} {inp.clinical_context}".lower()
189
+ for kw, mod in self.MODALITY_KEYWORDS.items():
190
+ if kw in text:
191
+ return mod
192
+ return ImageModality.UNKNOWN
193
+
194
+ def _build_fallback_dict(self, inp: PatientInput, flags: List[str]) -> Dict[str, Any]:
195
+ """Deterministic fallback when LLM is unavailable."""
196
+ return {
197
+ "standardized_symptoms": inp.symptoms or "No symptoms provided",
198
+ "extracted_demographics": {
199
+ "age": inp.age,
200
+ "sex": inp.sex,
201
+ "comorbidities": []
202
+ },
203
+ "safety_flags": flags,
204
+ "processing_notes": "LLM normalization unavailable. Raw input preserved."
205
+ }
206
+
207
+ def _get_fallback_output(self, inp: PatientInput, flags: List[str]) -> IntakeOutput:
208
+ """Return a safe, minimally structured IntakeOutput on critical failure."""
209
+ return IntakeOutput(
210
+ validated=False,
211
+ standardized_symptoms=inp.symptoms or "",
212
+ extracted_demographics={"age": inp.age, "sex": inp.sex, "comorbidities": []},
213
+ safety_flags=flags + ["INTAKE_AGENT_FALLBACK_MODE"],
214
+ recommended_modality=ImageModality.UNKNOWN,
215
+ processing_notes="Intake agent encountered critical validation failure. Pipeline continues with degraded state."
216
+ )
agents/report.py ADDED
@@ -0,0 +1,201 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mediagent/agents/report.py
2
+ """
3
+ Report Agent for MediAgent.
4
+ Synthesizes outputs from Intake, Vision, and Research agents into a
5
+ structured, professional clinical radiology report. Follows standard
6
+ ACR/NICE radiological reporting conventions with strict JSON enforcement.
7
+ """
8
+
9
+ import logging
10
+ from typing import Optional, Any
11
+
12
+ from core.llm import LLMClient
13
+ from core.models import IntakeOutput, ReportSection, ResearchOutput, VisionOutput
14
+
15
+ logger = logging.getLogger(__name__)
16
+
17
+
18
+ class ReportAgent:
19
+ """
20
+ Clinical report synthesis engine. Transforms structured agent outputs
21
+ into a formal radiology report suitable for clinician review.
22
+ """
23
+
24
+ STANDARD_DISCLAIMER = (
25
+ "This analysis is AI-generated and must be reviewed by a licensed radiologist "
26
+ "before any clinical decisions are made."
27
+ )
28
+
29
+ SYSTEM_PROMPT = """You are a senior board-certified radiologist. Synthesize the provided findings into a formal radiology report. Return ONLY valid JSON:
30
+ {"clinical_history":"string","technique":"string","findings":"string","impression":"string","recommendations":"string","disclaimer":"string"}
31
+
32
+ Section rules:
33
+ 1. clinical_history: Age, sex, chief complaint, relevant context β€” formal clinical language.
34
+ 2. technique: Modality, planes/sequences, contrast status, image quality.
35
+ 3. findings: Anatomical region-by-region description; severity levels (NORMAL/INCIDENTAL/SIGNIFICANT/CRITICAL); size, shape, density, location.
36
+ 4. impression: Top 3 differentials with probabilities; clinical significance; end with "Confidence Level: High/Medium/Low β€” [reason]".
37
+ 5. recommendations: Next steps based on severity and confidence (further imaging, referral, urgent eval, follow-up).
38
+ 6. disclaimer: MUST be exactly: "This analysis is AI-generated and must be reviewed by a licensed radiologist before any clinical decisions are made."
39
+ No markdown, no preamble, no extra text. Use standard radiological terminology. State "Not provided" for missing data."""
40
+
41
+ def __init__(self, llm_client: Optional[LLMClient] = None):
42
+ self.llm = llm_client or LLMClient()
43
+
44
+ def process(
45
+ self,
46
+ intake: Optional[IntakeOutput] = None,
47
+ vision: Optional[VisionOutput] = None,
48
+ research: Optional[ResearchOutput] = None,
49
+ ) -> ReportSection:
50
+ """
51
+ Synthesize agent outputs into a formal clinical radiology report.
52
+
53
+ Args:
54
+ intake: Structured patient context and safety flags
55
+ vision: Imaging analysis findings and technical details
56
+ research: Ranked differential diagnoses and clinical correlations
57
+
58
+ Returns:
59
+ ReportSection: Complete, structured radiology report
60
+ """
61
+ logger.info("πŸ“ Report Agent initiated report synthesis")
62
+
63
+ user_prompt = self._build_user_prompt(intake, vision, research)
64
+
65
+ result = self.llm.generate_text(
66
+ prompt=user_prompt,
67
+ system_prompt=self.SYSTEM_PROMPT,
68
+ temperature=0.1,
69
+ force_json=True,
70
+ )
71
+
72
+ if not result.get("success"):
73
+ logger.error(f"❌ Report generation LLM call failed: {result.get('error')}")
74
+ return self._get_fallback_report(intake, vision, research)
75
+
76
+ raw_content = result.get("content", "")
77
+ parsed = LLMClient.extract_json_from_response(raw_content)
78
+ if not parsed:
79
+ logger.warning("⚠️ Failed to parse report LLM JSON response. Using fallback.")
80
+ return self._get_fallback_report(intake, vision, research)
81
+
82
+ try:
83
+ return self._parse_report_response(parsed)
84
+ except Exception as e:
85
+ logger.error(f"πŸ’₯ Report mapping failed: {e}")
86
+ return self._get_fallback_report(intake, vision, research)
87
+
88
+ def _build_user_prompt(self, intake, vision, research) -> str:
89
+ """Construct a highly structured prompt for the LLM synthesizer."""
90
+ # Clinical History Block
91
+ history = "Patient information not provided."
92
+ if intake:
93
+ age = f"{intake.extracted_demographics.get('age', 'Unknown')} years"
94
+ sex = intake.extracted_demographics.get('sex', 'Unknown')
95
+ symptoms = intake.standardized_symptoms or "No symptoms reported"
96
+ context = intake.processing_notes or ""
97
+ history = f"Age: {age} | Sex: {sex} | Chief Complaint: {symptoms} | Additional Context: {context}"
98
+
99
+ # Technique Block
100
+ technique = "Imaging technique not specified."
101
+ if vision:
102
+ modality = vision.modality_detected.value
103
+ quality = vision.technical_quality
104
+ technique = f"Modality: {modality} | Technical Assessment: {quality}"
105
+
106
+ # Findings Block
107
+ findings_text = "No imaging findings available."
108
+ if vision and vision.findings:
109
+ finding_blocks = []
110
+ for f in vision.findings:
111
+ finding_blocks.append(
112
+ f"- {f.anatomical_region}: {f.description} (Severity: {f.severity.value}, "
113
+ f"Confidence: {f.confidence.value} {f.confidence_score:.0f}%, Anomaly: {'Yes' if f.is_anomaly else 'No'})"
114
+ )
115
+ findings_text = "\n".join(finding_blocks)
116
+
117
+ # Differentials Block
118
+ diff_text = "No differential diagnoses generated."
119
+ if research and research.differential_diagnoses:
120
+ top_three = research.differential_diagnoses[:3]
121
+ diff_blocks = []
122
+ for d in top_three:
123
+ diff_blocks.append(
124
+ f"{d.differential_rank}. {d.condition_name} (ICD-10: {d.icd10_code}) - "
125
+ f"Probability: {d.match_probability:.1f}% | Evidence: {d.supporting_evidence}"
126
+ )
127
+ diff_text = "\n".join(diff_blocks)
128
+
129
+ return f"""Synthesize the following structured medical data into a formal radiology report:
130
+
131
+ [CLINICAL HISTORY]
132
+ {history}
133
+
134
+ [IMAGING TECHNIQUE]
135
+ {technique}
136
+
137
+ [IMAGING FINDINGS]
138
+ {findings_text}
139
+
140
+ [DIFFERENTIAL DIAGNOSES]
141
+ {diff_text}
142
+
143
+ Generate the complete report following the exact JSON schema and clinical guidelines provided in the system prompt."""
144
+
145
+ def _parse_report_response(self, data: dict) -> ReportSection:
146
+ """Validate and map LLM output to ReportSection model with safety enforcement."""
147
+ # Extract fields with safe defaults
148
+ clinical_history = str(data.get("clinical_history", "Not provided."))
149
+ technique = str(data.get("technique", "Digital advanced imaging acquisition."))
150
+ findings = str(data.get("findings", "No abnormalities detected."))
151
+ impression = str(data.get("impression", "Within normal limits."))
152
+ recommendations = str(data.get("recommendations", "Routine follow-up as clinically indicated."))
153
+
154
+ # Enforce exact disclaimer for regulatory compliance
155
+ disclaimer = self.STANDARD_DISCLAIMER
156
+
157
+ return ReportSection(
158
+ clinical_history=clinical_history,
159
+ technique=technique,
160
+ findings=findings,
161
+ impression=impression,
162
+ recommendations=recommendations,
163
+ disclaimer=disclaimer
164
+ )
165
+
166
+ def _get_fallback_report(
167
+ self,
168
+ intake: Optional[IntakeOutput],
169
+ vision: Optional[VisionOutput],
170
+ research: Optional[ResearchOutput]
171
+ ) -> ReportSection:
172
+ """Deterministic fallback report when LLM synthesis fails."""
173
+ logger.warning("⚠️ Generating deterministic fallback radiology report.")
174
+
175
+ # Build minimal clinically safe sections from raw data
176
+ history = "Patient data available. See raw intake logs for details."
177
+ if intake:
178
+ history = f"Symptoms: {intake.standardized_symptoms} | Demographics: {intake.extracted_demographics}"
179
+
180
+ technique = "Imaging modality unspecified due to processing limitation."
181
+ if vision:
182
+ technique = f"Modality: {vision.modality_detected.value} | Quality: {vision.technical_quality}"
183
+
184
+ findings = "Imaging analysis incomplete. Manual radiologist review strongly recommended."
185
+ if vision and vision.findings:
186
+ findings = "; ".join([f"{f.anatomical_region}: {f.description}" for f in vision.findings])
187
+
188
+ impression = "Diagnostic certainty limited. Top clinical considerations based on available data:"
189
+ if research and research.differential_diagnoses:
190
+ impression += " " + ", ".join([d.condition_name for d in research.differential_diagnoses[:3]])
191
+
192
+ recommendations = "Urgent manual review by a licensed radiologist is required. Correlate with clinical presentation."
193
+
194
+ return ReportSection(
195
+ clinical_history=history,
196
+ technique=technique,
197
+ findings=findings,
198
+ impression=impression,
199
+ recommendations=recommendations,
200
+ disclaimer=self.STANDARD_DISCLAIMER
201
+ )
agents/research.py ADDED
@@ -0,0 +1,281 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mediagent/agents/research.py
2
+ """
3
+ Research Agent for MediAgent.
4
+ Cross-references vision agent findings against a built-in medical knowledge
5
+ base to generate ranked differential diagnoses, ICD-10 mappings, and clinical
6
+ correlations. Uses LLM reasoning to weigh evidence and account for demographics.
7
+ """
8
+
9
+ import logging
10
+ from typing import Any, Dict, List, Optional
11
+
12
+ from core.llm import LLMClient
13
+ from core.models import KnowledgeMatch, ResearchOutput, VisionFinding
14
+
15
+ logger = logging.getLogger(__name__)
16
+
17
+ # ─────────────────────────────────────────────────────────────────────────────
18
+ # BUILT-IN MEDICAL KNOWLEDGE BASE
19
+ # Curated set of common radiological findings mapped to clinical conditions.
20
+ # Designed for deterministic cross-referencing with LLM reasoning overlay.
21
+ # ─────────────────────────────────────────────────────────────────────────────
22
+
23
+ MEDICAL_KB = [
24
+ {
25
+ "condition": "Community-Acquired Pneumonia",
26
+ "icd10": "J18.9",
27
+ "key_findings": ["lobar consolidation", "alveolar opacity", "air bronchograms", "focal infiltrate"],
28
+ "modalities": ["X-RAY", "CT"],
29
+ "typical_severity": "SIGNIFICANT"
30
+ },
31
+ {
32
+ "condition": "Cardiogenic Pulmonary Edema",
33
+ "icd10": "J81.0",
34
+ "key_findings": ["bilateral perihilar opacities", "kerley B lines", "cephalization", "pleural effusion", "cardiomegaly"],
35
+ "modalities": ["X-RAY", "CT"],
36
+ "typical_severity": "CRITICAL"
37
+ },
38
+ {
39
+ "condition": "Pleural Effusion",
40
+ "icd10": "J90",
41
+ "key_findings": ["blunting of costophrenic angle", "meniscus sign", "layering fluid", "hemothorax"],
42
+ "modalities": ["X-RAY", "CT", "MRI"],
43
+ "typical_severity": "SIGNIFICANT"
44
+ },
45
+ {
46
+ "condition": "Spontaneous Pneumothorax",
47
+ "icd10": "J93.9",
48
+ "key_findings": ["visceral pleural line", "absence of lung markings", "lung collapse", "hyperlucent hemithorax"],
49
+ "modalities": ["X-RAY", "CT"],
50
+ "typical_severity": "CRITICAL"
51
+ },
52
+ {
53
+ "condition": "Intracerebral Hemorrhage",
54
+ "icd10": "I61.9",
55
+ "key_findings": ["hyperdense collection", "mass effect", "midline shift", "sulcal effacement", "edema"],
56
+ "modalities": ["CT", "MRI"],
57
+ "typical_severity": "CRITICAL"
58
+ },
59
+ {
60
+ "condition": "Ischemic Stroke",
61
+ "icd10": "I63.9",
62
+ "key_findings": ["hypodensity", "loss of gray-white differentiation", "hypoenhancement", "restricted diffusion"],
63
+ "modalities": ["CT", "MRI"],
64
+ "typical_severity": "CRITICAL"
65
+ },
66
+ {
67
+ "condition": "Intracranial Neoplasm",
68
+ "icd10": "C71.9",
69
+ "key_findings": ["space-occupying lesion", "ring enhancement", "vasogenic edema", "midline shift", "mass effect"],
70
+ "modalities": ["MRI", "CT"],
71
+ "typical_severity": "SIGNIFICANT"
72
+ },
73
+ {
74
+ "condition": "Abdominal Aortic Aneurysm",
75
+ "icd10": "I71.4",
76
+ "key_findings": ["aortic dilation", "circumferential calcification", "thrombus", "rupture signs"],
77
+ "modalities": ["CT", "MRI"],
78
+ "typical_severity": "CRITICAL"
79
+ },
80
+ {
81
+ "condition": "Nephrolithiasis",
82
+ "icd10": "N20.0",
83
+ "key_findings": ["hyperdense calculus", "hydronephrosis", "ureteral dilation", "perinephric stranding"],
84
+ "modalities": ["CT", "X-RAY"],
85
+ "typical_severity": "SIGNIFICANT"
86
+ },
87
+ {
88
+ "condition": "Small Bowel Obstruction",
89
+ "icd10": "K56.6",
90
+ "key_findings": ["dilated loops", "air-fluid levels", "transition point", "collapsed distal bowel"],
91
+ "modalities": ["X-RAY", "CT"],
92
+ "typical_severity": "SIGNIFICANT"
93
+ },
94
+ {
95
+ "condition": "Long Bone Fracture",
96
+ "icd10": "S82.902",
97
+ "key_findings": ["cortical discontinuity", "displacement", "callus formation", "periosteal reaction", "fracture line"],
98
+ "modalities": ["X-RAY", "CT"],
99
+ "typical_severity": "SIGNIFICANT"
100
+ },
101
+ {
102
+ "condition": "Degenerative Joint Disease",
103
+ "icd10": "M19.90",
104
+ "key_findings": ["joint space narrowing", "osteophytes", "subchondral sclerosis", "subchondral cysts"],
105
+ "modalities": ["X-RAY", "MRI"],
106
+ "typical_severity": "INCIDENTAL"
107
+ },
108
+ {
109
+ "condition": "Hepatic Steatosis",
110
+ "icd10": "K76.0",
111
+ "key_findings": ["decreased hepatic attenuation", "liver brighter than spleen", "fatty infiltration", "hepatomegaly"],
112
+ "modalities": ["CT", "MRI", "X-RAY"],
113
+ "typical_severity": "INCIDENTAL"
114
+ },
115
+ {
116
+ "condition": "Herniated Disc",
117
+ "icd10": "M51.16",
118
+ "key_findings": ["disc protrusion", "nerve root compression", "thecal sac indentation", "annular tear"],
119
+ "modalities": ["MRI", "CT"],
120
+ "typical_severity": "SIGNIFICANT"
121
+ },
122
+ {
123
+ "condition": "Pulmonary Nodule",
124
+ "icd10": "R91.1",
125
+ "key_findings": ["solitary pulmonary nodule", "ground-glass opacity", "spiculated margins", "calcification pattern"],
126
+ "modalities": ["X-RAY", "CT"],
127
+ "typical_severity": "SIGNIFICANT"
128
+ }
129
+ ]
130
+
131
+
132
+ class ResearchAgent:
133
+ """
134
+ Knowledge-driven differential diagnosis engine. Matches imaging findings
135
+ to a curated clinical knowledge base, applies demographic weighting, and
136
+ returns ranked diagnostic hypotheses with ICD-10 codes and confidence.
137
+ """
138
+
139
+ SYSTEM_PROMPT = """You are a clinical radiology research specialist. Cross-reference imaging findings against the provided knowledge base and return ONLY valid JSON:
140
+ {"differential_diagnoses":[{"condition_name":"string","match_probability":0-100,"supporting_evidence":"string","differential_rank":1,"icd10_code":"string"}],"matched_conditions":["string"],"relevant_guidelines":["string"],"research_notes":"string"}
141
+
142
+ Rules:
143
+ 1. ONLY use conditions from the provided KB. Do not invent diagnoses.
144
+ 2. Match anatomical regions and radiological descriptors to KB key_findings.
145
+ 3. Factor in demographics (age, sex, comorbidities) to adjust probabilities.
146
+ 4. Output 2-4 differentials maximum, ranked highest to lowest probability.
147
+ 5. Use exact ICD-10 codes from the KB.
148
+ 6. Skip conditions with no imaging evidence. Never force-fit.
149
+ 7. Minimum probability 5%. Never output 0%.
150
+ 8. Each supporting_evidence must explain WHY the condition matches (one full sentence minimum).
151
+ 9. No markdown, no commentary β€” JSON only."""
152
+
153
+ def __init__(self, llm_client: Optional[LLMClient] = None):
154
+ self.llm = llm_client or LLMClient()
155
+
156
+ def process(self, vision_findings: List[VisionFinding], demographics: Dict[str, Any] = None, detected_modality: str = "UNKNOWN") -> ResearchOutput:
157
+ """
158
+ Execute knowledge-base cross-referencing and differential generation.
159
+
160
+ Args:
161
+ vision_findings: List of structured findings from Vision Agent
162
+ demographics: Patient metadata from Intake Agent
163
+
164
+ Returns:
165
+ ResearchOutput: Ranked differentials, matched conditions, and clinical notes
166
+ """
167
+ logger.info("πŸ” Research Agent initiated differential diagnosis matching")
168
+
169
+ demographics = demographics or {}
170
+ findings_text = self._format_findings_for_prompt(vision_findings)
171
+ kb_text = self._format_kb_for_prompt(detected_modality)
172
+
173
+ user_prompt = f"""Patient Demographics:
174
+ - Age: {demographics.get('age', 'Unknown')}
175
+ - Sex: {demographics.get('sex', 'Unknown')}
176
+ - Comorbidities: {demographics.get('comorbidities', 'None reported')}
177
+
178
+ Vision Agent Findings:
179
+ {findings_text}
180
+
181
+ Medical Knowledge Base:
182
+ {kb_text}
183
+
184
+ Analyze the findings, match them against the knowledge base, factor in demographics, and return the ranked differential diagnosis in the specified JSON format."""
185
+
186
+ result = self.llm.generate_text(
187
+ prompt=user_prompt,
188
+ system_prompt=self.SYSTEM_PROMPT,
189
+ temperature=0.1,
190
+ force_json=True
191
+ )
192
+
193
+ if not result.get("success"):
194
+ logger.error(f"❌ Research LLM call failed: {result.get('error')}")
195
+ return self._get_fallback_output()
196
+
197
+ raw_content = result.get("content", "")
198
+ parsed = LLMClient.extract_json_from_response(raw_content)
199
+ if not parsed:
200
+ logger.warning("⚠️ Failed to parse research LLM JSON response. Using fallback.")
201
+ return self._get_fallback_output()
202
+
203
+ try:
204
+ return self._parse_research_response(parsed)
205
+ except Exception as e:
206
+ logger.error(f"πŸ’₯ Research response mapping failed: {e}")
207
+ return self._get_fallback_output()
208
+
209
+ def _format_findings_for_prompt(self, findings: List[VisionFinding]) -> str:
210
+ """Convert VisionFinding objects into LLM-readable text blocks."""
211
+ if not findings:
212
+ return "No specific findings reported by vision agent. Image appears unremarkable."
213
+ blocks = []
214
+ for i, f in enumerate(findings, 1):
215
+ blocks.append(
216
+ f"[{i}] Region: {f.anatomical_region} | "
217
+ f"Description: {f.description} | "
218
+ f"Severity: {f.severity.value} | "
219
+ f"Confidence: {f.confidence.value} ({f.confidence_score:.1f}%) | "
220
+ f"Anomaly: {'Yes' if f.is_anomaly else 'No'}"
221
+ )
222
+ return "\n".join(blocks)
223
+
224
+ def _format_kb_for_prompt(self, modality: str = "UNKNOWN") -> str:
225
+ """Format the KB into a structured reference block, pre-filtered by modality."""
226
+ # Filter to only conditions compatible with the detected modality
227
+ if modality in ("X-RAY", "CT", "MRI"):
228
+ relevant = [e for e in MEDICAL_KB if modality in e["modalities"]]
229
+ else:
230
+ relevant = MEDICAL_KB
231
+
232
+ lines = ["[CONDITION REFERENCE TABLE]"]
233
+ for entry in relevant:
234
+ lines.append(
235
+ f"- {entry['condition']} (ICD-10: {entry['icd10']}) | "
236
+ f"Findings: {', '.join(entry['key_findings'])} | "
237
+ f"Severity: {entry['typical_severity']}"
238
+ )
239
+ return "\n".join(lines)
240
+
241
+ def _parse_research_response(self, data: Dict[str, Any]) -> ResearchOutput:
242
+ """Validate and map LLM output to ResearchOutput model."""
243
+ raw_diffs = data.get("differential_diagnoses", [])
244
+ differentials = []
245
+
246
+ for rank, item in enumerate(raw_diffs, 1):
247
+ try:
248
+ match = KnowledgeMatch(
249
+ condition_name=str(item.get("condition_name", "Unknown Condition")),
250
+ match_probability=float(item.get("match_probability", 0.0)),
251
+ supporting_evidence=str(item.get("supporting_evidence", "Insufficient data for correlation.")),
252
+ differential_rank=rank,
253
+ icd10_code=str(item.get("icd10_code", "Z00.00"))
254
+ )
255
+ differentials.append(match)
256
+ except Exception as e:
257
+ logger.warning(f"⚠️ Skipping malformed differential entry: {e}")
258
+ continue
259
+
260
+ matched_conditions = [d.condition_name for d in differentials]
261
+ guidelines = data.get("relevant_guidelines", ["ACR Appropriateness Criteria", "NICE Imaging Guidelines"])
262
+ notes = data.get("research_notes", "Standard knowledge-base cross-referencing applied.")
263
+
264
+ return ResearchOutput(
265
+ differential_diagnoses=differentials,
266
+ matched_conditions=matched_conditions,
267
+ relevant_guidelines=guidelines,
268
+ research_notes=notes,
269
+ sources_used=["internal_knowledge_base", "ac_radiology_standards"]
270
+ )
271
+
272
+ def _get_fallback_output(self) -> ResearchOutput:
273
+ """Safe fallback when KB matching fails."""
274
+ logger.warning("⚠️ Returning fallback ResearchOutput.")
275
+ return ResearchOutput(
276
+ differential_diagnoses=[],
277
+ matched_conditions=[],
278
+ relevant_guidelines=["Manual radiologist review required"],
279
+ research_notes="Knowledge base matching failed. Clinical correlation strongly recommended.",
280
+ sources_used=["internal_knowledge_base"]
281
+ )
agents/vision.py ADDED
@@ -0,0 +1,157 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mediagent/agents/vision.py
2
+ """
3
+ Vision Agent for MediAgent.
4
+ Multimodal medical image analysis engine that processes base64-encoded
5
+ diagnostic images using local Qwen vision capabilities. Extracts
6
+ anatomical structures, detects pathologies, and structures findings
7
+ into standardized radiological observations with confidence scoring.
8
+ """
9
+
10
+ import logging
11
+ from typing import Any, Dict, List, Optional
12
+
13
+ from core.llm import LLMClient
14
+ from core.models import (
15
+ ConfidenceLevel,
16
+ ImageModality,
17
+ SeverityLevel,
18
+ VisionFinding,
19
+ VisionOutput,
20
+ )
21
+
22
+ logger = logging.getLogger(__name__)
23
+
24
+
25
+ class VisionAgent:
26
+ """
27
+ Specialized radiological analysis agent. Interprets medical imagery
28
+ (X-ray, MRI, CT) and outputs structured findings with severity and
29
+ confidence classifications. Operates with deterministic fallbacks
30
+ to maintain pipeline continuity on LLM failures.
31
+ """
32
+
33
+ SYSTEM_PROMPT = """You are a board-certified radiologist. Analyze the medical image and return ONLY valid JSON:
34
+ {
35
+ "modality_detected": "X-RAY"|"MRI"|"CT"|"UNKNOWN",
36
+ "technical_quality": "brief quality/artifact/positioning note",
37
+ "findings": [
38
+ {
39
+ "anatomical_region": "string",
40
+ "description": "concise radiological observation using standard terminology",
41
+ "severity": "NORMAL"|"INCIDENTAL"|"SIGNIFICANT"|"CRITICAL",
42
+ "confidence": "LOW"|"MEDIUM"|"HIGH",
43
+ "confidence_score": 0.0-100.0,
44
+ "is_anomaly": boolean
45
+ }
46
+ ],
47
+ "overall_assessment": "concise clinical summary"
48
+ }
49
+ Rules: precise radiological terms; correct anatomical orientation; distinguish normal variants from pathology; realistic confidence scores; no treatment plans; no markdown; no extra text."""
50
+
51
+ def __init__(self, llm_client: Optional[LLMClient] = None):
52
+ self.llm = llm_client or LLMClient()
53
+
54
+ def process(self, image_base64: str, clinical_context: str = "") -> VisionOutput:
55
+ """
56
+ Execute multimodal image analysis.
57
+
58
+ Args:
59
+ image_base64: Base64 encoded medical image
60
+ clinical_context: Standardized symptoms/demographics from Intake Agent
61
+
62
+ Returns:
63
+ VisionOutput: Structured radiological findings and metadata
64
+ """
65
+ logger.info("πŸ‘οΈ Vision Agent initiated multimodal analysis")
66
+
67
+ user_prompt = "Analyze this medical image carefully."
68
+ if clinical_context:
69
+ user_prompt += f"\n\nClinical Context: {clinical_context}"
70
+ user_prompt += "\n\nProvide a structured radiological assessment following the specified JSON schema."
71
+
72
+ result = self.llm.generate_vision(
73
+ base64_image=image_base64,
74
+ prompt=user_prompt,
75
+ system_prompt=self.SYSTEM_PROMPT,
76
+ temperature=0.0,
77
+ max_tokens=2000
78
+ )
79
+
80
+ if not result.get("success"):
81
+ logger.error(f"❌ Vision LLM call failed: {result.get('error')}")
82
+ return self._get_fallback_output()
83
+
84
+ raw_content = result.get("content", "")
85
+ parsed = LLMClient.extract_json_from_response(raw_content)
86
+ if not parsed:
87
+ logger.warning("⚠️ Failed to parse vision LLM JSON response. Using fallback.")
88
+ return self._get_fallback_output()
89
+
90
+ try:
91
+ return self._parse_vision_response(parsed, result.get("usage", {}))
92
+ except Exception as e:
93
+ logger.error(f"πŸ’₯ Vision response mapping failed: {e}")
94
+ return self._get_fallback_output()
95
+
96
+ def _parse_vision_response(self, data: Dict[str, Any], usage: Dict[str, int]) -> VisionOutput:
97
+ """Map raw LLM JSON to validated Pydantic models with safe enum conversion."""
98
+ findings = []
99
+ raw_findings = data.get("findings", [])
100
+
101
+ for item in raw_findings:
102
+ try:
103
+ finding = VisionFinding(
104
+ anatomical_region=item.get("anatomical_region", "Unspecified Region"),
105
+ description=item.get("description", "Unable to generate detailed description."),
106
+ severity=self._safe_enum(SeverityLevel, item.get("severity"), SeverityLevel.NORMAL),
107
+ confidence=self._safe_enum(ConfidenceLevel, item.get("confidence"), ConfidenceLevel.MEDIUM),
108
+ confidence_score=float(item.get("confidence_score", 50.0)),
109
+ is_anomaly=bool(item.get("is_anomaly", False))
110
+ )
111
+ findings.append(finding)
112
+ except Exception as e:
113
+ logger.warning(f"⚠️ Skipping malformed finding due to validation error: {e}")
114
+ continue
115
+
116
+ modality_str = data.get("modality_detected", "UNKNOWN").upper()
117
+ try:
118
+ modality = ImageModality(modality_str)
119
+ except ValueError:
120
+ modality = ImageModality.UNKNOWN
121
+
122
+ return VisionOutput(
123
+ modality_detected=modality,
124
+ technical_quality=data.get(
125
+ "technical_quality",
126
+ "Image quality acceptable for preliminary assessment."
127
+ ),
128
+ findings=findings,
129
+ overall_assessment=data.get(
130
+ "overall_assessment",
131
+ "Unable to generate overall assessment from provided data."
132
+ ),
133
+ metadata={
134
+ "llm_usage": usage,
135
+ "findings_count": len(findings),
136
+ "anomalies_detected": sum(1 for f in findings if f.is_anomaly)
137
+ }
138
+ )
139
+
140
+ @staticmethod
141
+ def _safe_enum(enum_cls, value, default):
142
+ """Safely convert string to enum, falling back gracefully."""
143
+ try:
144
+ return enum_cls(str(value).strip().upper())
145
+ except (ValueError, AttributeError, TypeError):
146
+ return default
147
+
148
+ def _get_fallback_output(self) -> VisionOutput:
149
+ """Return a safe, non-failure-breaking VisionOutput when processing fails."""
150
+ logger.warning("⚠️ Returning fallback VisionOutput due to processing failure.")
151
+ return VisionOutput(
152
+ modality_detected=ImageModality.UNKNOWN,
153
+ technical_quality="Analysis unavailable due to system error. Manual review required.",
154
+ findings=[],
155
+ overall_assessment="Vision analysis could not be completed. Please verify image quality and system connectivity.",
156
+ metadata={"error": "VISION_AGENT_FALLBACK", "llm_usage": {"prompt_tokens": 0, "completion_tokens": 0, "total_tokens": 0}}
157
+ )
core/__init__.py ADDED
File without changes
core/dicom.py ADDED
@@ -0,0 +1,128 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mediagent/core/dicom.py
2
+ """
3
+ DICOM file parser for MediAgent.
4
+ Extracts pixel data + clinical metadata from .dcm files,
5
+ converts to base64 PNG for the vision pipeline, and returns
6
+ structured metadata to pre-populate the intake form.
7
+ """
8
+
9
+ import base64
10
+ import io
11
+ import logging
12
+ from typing import Any, Dict, Optional, Tuple
13
+
14
+ logger = logging.getLogger(__name__)
15
+
16
+
17
+ def parse_dicom(file_bytes: bytes) -> Tuple[str, Dict[str, Any]]:
18
+ """
19
+ Parse a DICOM (.dcm) file.
20
+
21
+ Returns:
22
+ (base64_image_string, metadata_dict)
23
+ base64_image_string: "data:image/png;base64,..." ready for vision pipeline
24
+ metadata_dict: extracted clinical metadata for intake pre-population
25
+ """
26
+ try:
27
+ import pydicom
28
+ import numpy as np
29
+ from PIL import Image
30
+ except ImportError as e:
31
+ raise ImportError(f"DICOM support requires pydicom, numpy, Pillow: {e}")
32
+
33
+ ds = pydicom.dcmread(io.BytesIO(file_bytes), force=True)
34
+
35
+ # ── Metadata extraction ───────────────────────────────────────────────────
36
+ metadata: Dict[str, Any] = {}
37
+
38
+ _tag_map = {
39
+ "PatientName": "patient_name",
40
+ "PatientID": "patient_id",
41
+ "PatientBirthDate": "birth_date",
42
+ "PatientSex": "sex",
43
+ "PatientAge": "age_str",
44
+ "StudyDate": "study_date",
45
+ "StudyDescription": "study_description",
46
+ "SeriesDescription": "series_description",
47
+ "Modality": "modality",
48
+ "InstitutionName": "institution",
49
+ "Manufacturer": "manufacturer",
50
+ "ManufacturerModelName": "device_model",
51
+ "KVP": "kvp",
52
+ "ExposureTime": "exposure_time_ms",
53
+ "SliceThickness": "slice_thickness_mm",
54
+ "BodyPartExamined": "body_part",
55
+ "StudyInstanceUID": "study_uid",
56
+ "SOPInstanceUID": "instance_uid",
57
+ "Rows": "image_rows",
58
+ "Columns": "image_cols",
59
+ "PixelSpacing": "pixel_spacing_mm",
60
+ }
61
+
62
+ for dicom_tag, key in _tag_map.items():
63
+ try:
64
+ val = getattr(ds, dicom_tag, None)
65
+ if val is not None:
66
+ metadata[key] = str(val)
67
+ except Exception:
68
+ pass
69
+
70
+ # Normalise age: DICOM age strings look like "045Y", "006M", "010D"
71
+ age: Optional[int] = None
72
+ age_str = metadata.pop("age_str", None)
73
+ if age_str:
74
+ try:
75
+ if age_str.endswith("Y"):
76
+ age = int(age_str[:-1])
77
+ elif age_str.endswith("M"):
78
+ age = max(0, int(int(age_str[:-1]) / 12))
79
+ except ValueError:
80
+ pass
81
+ if age is not None:
82
+ metadata["age"] = age
83
+
84
+ # Normalise sex: DICOM uses M/F/O
85
+ sex = metadata.get("sex", "")
86
+ if sex and sex.upper() in ("M", "F", "O"):
87
+ metadata["sex"] = sex.upper()
88
+ else:
89
+ metadata.pop("sex", None)
90
+
91
+ # ── Pixel data β†’ PNG base64 ───────────────────────────────────────────────
92
+ try:
93
+ pixel_array = ds.pixel_array.astype(float)
94
+ except Exception as e:
95
+ raise ValueError(f"Could not read DICOM pixel data: {e}")
96
+
97
+ # MONOCHROME1 means bright = low value β†’ invert
98
+ photometric = str(getattr(ds, "PhotometricInterpretation", "MONOCHROME2")).strip()
99
+ if photometric == "MONOCHROME1":
100
+ pixel_array = pixel_array.max() - pixel_array
101
+
102
+ # Normalise to 0–255
103
+ p_min, p_max = pixel_array.min(), pixel_array.max()
104
+ if p_max > p_min:
105
+ pixel_array = ((pixel_array - p_min) / (p_max - p_min) * 255).astype("uint8")
106
+ else:
107
+ pixel_array = pixel_array.astype("uint8")
108
+
109
+ # Handle grayscale, RGB, multi-frame (take first frame)
110
+ if pixel_array.ndim == 3 and pixel_array.shape[0] > 3:
111
+ pixel_array = pixel_array[0] # first frame of multi-frame
112
+
113
+ if pixel_array.ndim == 2:
114
+ img = Image.fromarray(pixel_array, mode="L").convert("RGB")
115
+ else:
116
+ img = Image.fromarray(pixel_array.astype("uint8"))
117
+
118
+ buf = io.BytesIO()
119
+ img.save(buf, format="PNG", optimize=True)
120
+ b64 = base64.b64encode(buf.getvalue()).decode("utf-8")
121
+ base64_image = f"data:image/png;base64,{b64}"
122
+
123
+ logger.info(
124
+ f"DICOM parsed | modality={metadata.get('modality','?')} "
125
+ f"body_part={metadata.get('body_part','?')} "
126
+ f"size={metadata.get('image_rows','?')}x{metadata.get('image_cols','?')}"
127
+ )
128
+ return base64_image, metadata
core/fhir.py ADDED
@@ -0,0 +1,210 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mediagent/core/fhir.py
2
+ """
3
+ FHIR R4 DiagnosticReport export for MediAgent.
4
+ Converts a FinalReport into a standards-compliant HL7 FHIR R4 resource
5
+ suitable for import into any EMR system (Epic, Cerner, etc.).
6
+ """
7
+
8
+ import base64
9
+ import uuid
10
+ from datetime import datetime
11
+ from typing import Any, Dict, List
12
+
13
+ from core.models import FinalReport
14
+
15
+
16
+ def to_fhir_diagnostic_report(report: FinalReport) -> Dict[str, Any]:
17
+ """
18
+ Convert a MediAgent FinalReport into a FHIR R4 DiagnosticReport resource.
19
+
20
+ Conforms to:
21
+ http://hl7.org/fhir/R4/diagnosticreport.html
22
+ LOINC 18748-4 (Diagnostic imaging study)
23
+ """
24
+ meta = report.patient_metadata or {}
25
+ sections = report.sections
26
+ ts = report.generation_timestamp.strftime("%Y-%m-%dT%H:%M:%SZ")
27
+ patient_uid = f"patient-{uuid.uuid4().hex[:8]}"
28
+
29
+ # ── Contained Patient resource ────────────────────────────────────────────
30
+ patient: Dict[str, Any] = {
31
+ "resourceType": "Patient",
32
+ "id": patient_uid,
33
+ }
34
+ if meta.get("sex"):
35
+ patient["gender"] = _map_sex(str(meta["sex"]))
36
+ age = meta.get("age")
37
+ if age:
38
+ birth_year = datetime.now().year - int(age)
39
+ patient["birthDate"] = str(birth_year)
40
+ if meta.get("patient_name"):
41
+ patient["name"] = [{"text": str(meta["patient_name"])}]
42
+
43
+ # ── Imaging study Observations (one per finding section) ─────────────────
44
+ observations: List[Dict[str, Any]] = []
45
+ finding_text = sections.findings or ""
46
+ if finding_text:
47
+ obs_id = f"obs-{uuid.uuid4().hex[:8]}"
48
+ observations.append({
49
+ "resourceType": "Observation",
50
+ "id": obs_id,
51
+ "status": "final",
52
+ "category": [{
53
+ "coding": [{
54
+ "system": "http://terminology.hl7.org/CodeSystem/observation-category",
55
+ "code": "imaging",
56
+ "display": "Imaging"
57
+ }]
58
+ }],
59
+ "code": {
60
+ "coding": [{
61
+ "system": "http://loinc.org",
62
+ "code": "59776-5",
63
+ "display": "Findings"
64
+ }]
65
+ },
66
+ "subject": {"reference": f"#{patient_uid}"},
67
+ "effectiveDateTime": ts,
68
+ "valueString": finding_text[:2000] # FHIR string limit safeguard
69
+ })
70
+
71
+ # ── Build contained resources list ───────────────────────────────────────
72
+ contained: List[Dict] = [patient] + observations
73
+ obs_refs = [{"reference": f"#{o['id']}"} for o in observations]
74
+
75
+ # ── Severity β†’ SNOMED code ────────────────────────────────────────────────
76
+ severity_val = report.overall_severity.value if hasattr(report.overall_severity, "value") else str(report.overall_severity)
77
+ conclusion_codes = _severity_to_snomed(severity_val)
78
+
79
+ # ── Modality coding from vision summary ──────────────────────────────────
80
+ modality_code = _extract_modality_code(report.vision_summary)
81
+
82
+ # ── Presented form: full plain-text report ────────────────────────────────
83
+ report_text = (
84
+ f"CLINICAL HISTORY\n{sections.clinical_history}\n\n"
85
+ f"TECHNIQUE\n{sections.technique}\n\n"
86
+ f"FINDINGS\n{sections.findings}\n\n"
87
+ f"IMPRESSION\n{sections.impression}\n\n"
88
+ f"RECOMMENDATIONS\n{sections.recommendations}\n\n"
89
+ f"DISCLAIMER\n{sections.disclaimer}"
90
+ )
91
+
92
+ # ── Agent pipeline extension ──────────────────────────────────────────────
93
+ pipeline_str = ", ".join(
94
+ f"{k}:{v.value}" for k, v in report.agent_pipeline_status.items()
95
+ )
96
+
97
+ resource: Dict[str, Any] = {
98
+ "resourceType": "DiagnosticReport",
99
+ "id": report.report_id,
100
+ "meta": {
101
+ "profile": [
102
+ "http://hl7.org/fhir/StructureDefinition/DiagnosticReport"
103
+ ],
104
+ "lastUpdated": ts,
105
+ "tag": [{
106
+ "system": "https://mediagent.ai/tags",
107
+ "code": "ai-generated",
108
+ "display": "AI Generated β€” Requires Radiologist Review"
109
+ }]
110
+ },
111
+ "contained": contained,
112
+ "status": "final",
113
+ "category": [{
114
+ "coding": [{
115
+ "system": "http://terminology.hl7.org/CodeSystem/v2-0074",
116
+ "code": "RAD",
117
+ "display": "Radiology"
118
+ }]
119
+ }],
120
+ "code": {
121
+ "coding": [
122
+ {
123
+ "system": "http://loinc.org",
124
+ "code": "18748-4",
125
+ "display": "Diagnostic imaging study"
126
+ },
127
+ modality_code
128
+ ],
129
+ "text": "Medical Imaging Analysis β€” AI-Assisted Radiology Report"
130
+ },
131
+ "subject": {
132
+ "reference": f"#{patient_uid}"
133
+ },
134
+ "effectiveDateTime": ts,
135
+ "issued": ts,
136
+ "performer": [{
137
+ "display": "MediAgent AI System (AMD Instinct MI300X)"
138
+ }],
139
+ "result": obs_refs,
140
+ "conclusion": sections.impression,
141
+ "conclusionCode": conclusion_codes,
142
+ "presentedForm": [{
143
+ "contentType": "text/plain; charset=utf-8",
144
+ "data": base64.b64encode(report_text.encode("utf-8")).decode("utf-8"),
145
+ "title": f"Radiology Report {report.report_id}",
146
+ "creation": ts
147
+ }],
148
+ "extension": [
149
+ {
150
+ "url": "https://mediagent.ai/fhir/StructureDefinition/ai-quality-score",
151
+ "valueInteger": _extract_qa_score(sections.recommendations)
152
+ },
153
+ {
154
+ "url": "https://mediagent.ai/fhir/StructureDefinition/overall-severity",
155
+ "valueCode": severity_val
156
+ },
157
+ {
158
+ "url": "https://mediagent.ai/fhir/StructureDefinition/pipeline-status",
159
+ "valueString": pipeline_str
160
+ },
161
+ {
162
+ "url": "https://mediagent.ai/fhir/StructureDefinition/inference-platform",
163
+ "valueString": "AMD Instinct MI300X / ROCm / vLLM / Qwen"
164
+ }
165
+ ]
166
+ }
167
+
168
+ return resource
169
+
170
+
171
+ # ── Helpers ───────────────────────────────────────────────────────────────────
172
+
173
+ def _map_sex(sex: str) -> str:
174
+ return {"M": "male", "F": "female", "O": "other"}.get(sex.upper(), "unknown")
175
+
176
+
177
+ def _severity_to_snomed(severity: str) -> List[Dict]:
178
+ snomed = {
179
+ "NORMAL": ("260313008", "Normal"),
180
+ "INCIDENTAL": ("102483000", "Incidental finding"),
181
+ "SIGNIFICANT": ("404684003", "Clinical finding"),
182
+ "CRITICAL": ("399625000", "Critical finding"),
183
+ }
184
+ code, display = snomed.get(severity.upper(), ("404684003", "Clinical finding"))
185
+ return [{
186
+ "coding": [{
187
+ "system": "http://snomed.info/sct",
188
+ "code": code,
189
+ "display": display
190
+ }]
191
+ }]
192
+
193
+
194
+ def _extract_modality_code(vision_summary: str) -> Dict:
195
+ """Map detected modality to DICOM/RadLex LOINC codes."""
196
+ loinc_map = {
197
+ "X-RAY": ("24627-2", "Chest X-ray"),
198
+ "CT": ("18747-6", "CT study"),
199
+ "MRI": ("18755-9", "MRI study"),
200
+ }
201
+ for key, (code, display) in loinc_map.items():
202
+ if key in vision_summary.upper():
203
+ return {"system": "http://loinc.org", "code": code, "display": display}
204
+ return {"system": "http://loinc.org", "code": "18748-4", "display": "Diagnostic imaging study"}
205
+
206
+
207
+ def _extract_qa_score(recommendations: str) -> int:
208
+ import re
209
+ m = re.search(r"Score[:\s]+(\d+)", recommendations or "")
210
+ return int(m.group(1)) if m else 85
core/llm.py ADDED
@@ -0,0 +1,315 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mediagent/core/llm.py
2
+ """
3
+ Production-grade LLM client wrapper for MediAgent.
4
+ Handles text and multimodal (vision) completions against local Qwen model.
5
+ Implements retry logic, error handling, response parsing, and OpenAI-compatible API calls.
6
+ """
7
+
8
+ import logging
9
+ import time
10
+ import re
11
+ from typing import Any, Dict, List, Optional, Union
12
+
13
+ import openai
14
+
15
+ logger = logging.getLogger(__name__)
16
+
17
+
18
+ class LLMClient:
19
+ """
20
+ Lightweight, framework-agnostic LLM client wrapping OpenAI Python SDK.
21
+ Designed for local inference endpoints (vLLM, Ollama, TensorRT-LLM)
22
+ running at http://localhost:8000/v1 with model path "/model".
23
+ """
24
+
25
+ DEFAULT_BASE_URL = "http://localhost:8000/v1"
26
+ DEFAULT_MODEL = "/model"
27
+ DEFAULT_API_KEY = "none"
28
+
29
+ def __init__(
30
+ self,
31
+ base_url: str = DEFAULT_BASE_URL,
32
+ model: str = DEFAULT_MODEL,
33
+ max_retries: int = 3,
34
+ timeout: float = 90.0,
35
+ temperature: float = 0.0
36
+ ):
37
+ self.model = model
38
+ self.max_retries = max_retries
39
+ self.default_temperature = temperature
40
+ self.timeout = timeout
41
+
42
+ self.client = openai.OpenAI(
43
+ base_url=base_url,
44
+ api_key=self.DEFAULT_API_KEY,
45
+ timeout=timeout
46
+ )
47
+ logger.info(f"LLMClient initialized | Model: {self.model} | Endpoint: {base_url}")
48
+
49
+ # ─────────────────────────────────────────────────────────────────────────
50
+ # CORE GENERATION METHODS
51
+ # ─────────────────────────────────────────────────────────────────────────
52
+
53
+ def generate_text(
54
+ self,
55
+ prompt: str,
56
+ system_prompt: str = "",
57
+ temperature: Optional[float] = None,
58
+ force_json: bool = False,
59
+ max_tokens: Optional[int] = None,
60
+ extra_body: Optional[Dict] = None,
61
+ ) -> Dict[str, Any]:
62
+ """
63
+ Send a text-only completion request to the LLM.
64
+ Returns standardized response dict with content, usage, success flag, and error.
65
+ """
66
+ messages = self._build_messages(system_prompt, prompt)
67
+ response_format = {"type": "json_object"} if force_json else None
68
+ return self._execute_with_retry(
69
+ messages=messages,
70
+ temperature=temperature,
71
+ response_format=response_format,
72
+ call_type="TEXT",
73
+ max_tokens=max_tokens,
74
+ extra_body=extra_body,
75
+ )
76
+
77
+ def generate_text_streaming(
78
+ self,
79
+ prompt: str,
80
+ system_prompt: str = "",
81
+ temperature: Optional[float] = None,
82
+ on_token: Optional[Any] = None,
83
+ ) -> Dict[str, Any]:
84
+ """
85
+ Text completion with optional token-level streaming callback.
86
+ When on_token is provided, calls on_token(chunk: str) for every token chunk
87
+ as it arrives from the model. Returns the full response dict at the end.
88
+ Falls back to standard generate_text if streaming fails.
89
+ """
90
+ if on_token is None:
91
+ return self.generate_text(prompt, system_prompt, temperature)
92
+
93
+ messages = self._build_messages(system_prompt, prompt)
94
+ temp = temperature if temperature is not None else self.default_temperature
95
+
96
+ try:
97
+ stream = self.client.chat.completions.create(
98
+ model=self.model,
99
+ messages=messages,
100
+ temperature=temp,
101
+ stream=True,
102
+ )
103
+ full_content = ""
104
+ for chunk in stream:
105
+ delta = (chunk.choices[0].delta.content or "") if chunk.choices else ""
106
+ if delta:
107
+ full_content += delta
108
+ try:
109
+ on_token(delta)
110
+ except Exception:
111
+ pass # callback errors must not break generation
112
+
113
+ logger.debug("Streaming TEXT generation completed | chars=%d", len(full_content))
114
+ return {
115
+ "success": True,
116
+ "content": full_content,
117
+ "usage": {"prompt_tokens": 0, "completion_tokens": 0, "total_tokens": 0},
118
+ "model": self.model,
119
+ "error": None,
120
+ }
121
+ except Exception as e:
122
+ logger.warning("Streaming failed (%s), falling back to standard call", e)
123
+ return self.generate_text(prompt, system_prompt, temperature)
124
+
125
+ def generate_vision(
126
+ self,
127
+ base64_image: str,
128
+ prompt: str,
129
+ system_prompt: str = "",
130
+ temperature: Optional[float] = None,
131
+ max_tokens: Optional[int] = None
132
+ ) -> Dict[str, Any]:
133
+ """
134
+ Send a multimodal completion request with a base64 encoded medical image.
135
+ Automatically detects image MIME type and formats per OpenAI vision spec.
136
+ """
137
+ img_url = self._format_image_url(base64_image)
138
+ user_content = [
139
+ {"type": "text", "text": prompt},
140
+ {"type": "image_url", "image_url": {"url": img_url}}
141
+ ]
142
+ messages = self._build_messages(system_prompt, user_content)
143
+ return self._execute_with_retry(
144
+ messages=messages,
145
+ temperature=temperature,
146
+ response_format=None,
147
+ call_type="VISION",
148
+ max_tokens=max_tokens
149
+ )
150
+
151
+ # ─────────────────────────────────────────────────────────────────────────
152
+ # INTERNAL HELPERS
153
+ # ─────────────────────────────────────────────────────────────────────────
154
+
155
+ def _build_messages(
156
+ self,
157
+ system_prompt: str,
158
+ user_content: Union[str, List[Dict]]
159
+ ) -> List[Dict]:
160
+ """Construct OpenAI-compatible message array."""
161
+ messages = []
162
+ if system_prompt:
163
+ messages.append({"role": "system", "content": system_prompt})
164
+ if isinstance(user_content, str):
165
+ messages.append({"role": "user", "content": user_content})
166
+ else:
167
+ messages.append({"role": "user", "content": user_content})
168
+ return messages
169
+
170
+ def _format_image_url(self, base64_data: str) -> str:
171
+ """Normalize base64 image data into OpenAI vision-compatible URL format."""
172
+ if base64_data.startswith(("data:image/png;base64,", "data:image/jpeg;base64,", "data:image/jpg;base64,")):
173
+ return base64_data
174
+ # Default to JPEG if no MIME prefix is present
175
+ return f"data:image/jpeg;base64,{base64_data}"
176
+
177
+ def _attempt_call(
178
+ self,
179
+ messages: List[Dict],
180
+ temperature: Optional[float],
181
+ response_format: Optional[Dict],
182
+ max_tokens: Optional[int] = None,
183
+ extra_body: Optional[Dict] = None,
184
+ ) -> Dict[str, Any]:
185
+ """Execute a single API call with the OpenAI client."""
186
+ kwargs = {
187
+ "model": self.model,
188
+ "messages": messages,
189
+ "temperature": temperature if temperature is not None else self.default_temperature,
190
+ }
191
+ if max_tokens:
192
+ kwargs["max_tokens"] = max_tokens
193
+ if response_format:
194
+ kwargs["response_format"] = response_format
195
+ if extra_body:
196
+ kwargs["extra_body"] = extra_body
197
+
198
+ response = self.client.chat.completions.create(**kwargs)
199
+ content = response.choices[0].message.content or ""
200
+
201
+ usage = response.usage
202
+ return {
203
+ "success": True,
204
+ "content": content,
205
+ "raw_response": response,
206
+ "usage": {
207
+ "prompt_tokens": usage.prompt_tokens if usage else 0,
208
+ "completion_tokens": usage.completion_tokens if usage else 0,
209
+ "total_tokens": usage.total_tokens if usage else 0,
210
+ },
211
+ "model": response.model,
212
+ "error": None
213
+ }
214
+
215
+ def _execute_with_retry(
216
+ self,
217
+ messages: List[Dict],
218
+ temperature: Optional[float],
219
+ response_format: Optional[Dict],
220
+ call_type: str,
221
+ max_tokens: Optional[int] = None,
222
+ extra_body: Optional[Dict] = None,
223
+ ) -> Dict[str, Any]:
224
+ """Retry wrapper with exponential backoff for robust local inference."""
225
+ last_error = None
226
+ for attempt in range(1, self.max_retries + 1):
227
+ try:
228
+ result = self._attempt_call(messages, temperature, response_format, max_tokens, extra_body)
229
+ if result["success"]:
230
+ logger.debug(f"{call_type} generation successful on attempt {attempt}")
231
+ return result
232
+ except Exception as e:
233
+ last_error = str(e)
234
+ logger.warning(f"{call_type} generation failed on attempt {attempt}/{self.max_retries}: {e}")
235
+ if attempt < self.max_retries:
236
+ # Short fixed backoff for local inference β€” no need for exponential waits
237
+ backoff = 1.0
238
+ logger.info(f"Retrying in {backoff}s...")
239
+ time.sleep(backoff)
240
+
241
+ logger.error(f"{call_type} generation failed permanently after {self.max_retries} attempts.")
242
+ return {
243
+ "success": False,
244
+ "content": "",
245
+ "usage": {"prompt_tokens": 0, "completion_tokens": 0, "total_tokens": 0},
246
+ "model": self.model,
247
+ "error": last_error or f"{call_type} endpoint unreachable or max retries exceeded."
248
+ }
249
+
250
+ # ─────────────────────────────────────────────────────────────────────────
251
+ # RESPONSE PARSING UTILITIES
252
+ # ─────────────────────────────────────────────────────────────────────────
253
+
254
+ @staticmethod
255
+ def extract_json_from_response(content: str) -> Optional[Dict[str, Any]]:
256
+ """
257
+ Safely extract JSON from LLM output, stripping markdown formatting
258
+ and handling partial/comma-separated JSON arrays if necessary.
259
+ """
260
+ if not content:
261
+ return None
262
+ try:
263
+ # Strip markdown code fences if present
264
+ cleaned = re.sub(r"^```(?:json)?\s*|\s*```$", "", content.strip(), flags=re.MULTILINE)
265
+ # First try direct JSON decode
266
+ return LLMClient._safe_json_decode(cleaned)
267
+ except Exception:
268
+ logger.debug("Direct JSON extraction failed. Attempting fallback parsing...")
269
+ return LLMClient._fallback_json_parse(cleaned)
270
+
271
+ @staticmethod
272
+ def _safe_json_decode(text: str):
273
+ """Import json lazily and decode, raising cleanly on failure."""
274
+ import json
275
+ return json.loads(text)
276
+
277
+ @staticmethod
278
+ def _fallback_json_parse(text: str) -> Optional[Dict[str, Any]]:
279
+ """
280
+ Fallback: scan for first valid JSON object or array in the text.
281
+ Handles cases where the LLM adds conversational padding.
282
+ """
283
+ import json
284
+ brace_depth = 0
285
+ start_idx = None
286
+ for i, char in enumerate(text):
287
+ if char == "{":
288
+ if brace_depth == 0:
289
+ start_idx = i
290
+ brace_depth += 1
291
+ elif char == "}":
292
+ brace_depth -= 1
293
+ if brace_depth == 0 and start_idx is not None:
294
+ candidate = text[start_idx:i+1]
295
+ try:
296
+ return json.loads(candidate)
297
+ except json.JSONDecodeError:
298
+ continue
299
+ # Try array fallback
300
+ bracket_depth = 0
301
+ start_idx = None
302
+ for i, char in enumerate(text):
303
+ if char == "[":
304
+ if bracket_depth == 0:
305
+ start_idx = i
306
+ bracket_depth += 1
307
+ elif char == "]":
308
+ bracket_depth -= 1
309
+ if bracket_depth == 0 and start_idx is not None:
310
+ candidate = text[start_idx:i+1]
311
+ try:
312
+ return json.loads(candidate)
313
+ except json.JSONDecodeError:
314
+ continue
315
+ return None
core/models.py ADDED
@@ -0,0 +1,215 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mediagent/core/models.py
2
+ """
3
+ Pydantic data models for MediAgent multi-agent medical imaging pipeline.
4
+ Defines input, agent outputs, report structure, and pipeline state tracking.
5
+ """
6
+
7
+ import enum
8
+ import uuid
9
+ from datetime import datetime
10
+ from typing import Any, Dict, List, Optional
11
+
12
+ from pydantic import BaseModel, Field, field_validator
13
+
14
+
15
+ # ─────────────────────────────────────────────────────────────────────────────
16
+ # ENUMERATIONS
17
+ # ─────────────────────────────────────────────────────────────────────────────
18
+
19
+ class SeverityLevel(str, enum.Enum):
20
+ """Clinical severity classification for findings."""
21
+ NORMAL = "NORMAL"
22
+ INCIDENTAL = "INCIDENTAL"
23
+ SIGNIFICANT = "SIGNIFICANT"
24
+ CRITICAL = "CRITICAL"
25
+
26
+
27
+ class ConfidenceLevel(str, enum.Enum):
28
+ """AI confidence classification for model outputs."""
29
+ LOW = "LOW"
30
+ MEDIUM = "MEDIUM"
31
+ HIGH = "HIGH"
32
+
33
+
34
+ class AgentStatus(str, enum.Enum):
35
+ """Real-time pipeline agent execution states."""
36
+ WAITING = "WAITING"
37
+ RUNNING = "RUNNING"
38
+ DONE = "DONE"
39
+ ERROR = "ERROR"
40
+
41
+
42
+ class ImageModality(str, enum.Enum):
43
+ """Supported medical imaging modalities."""
44
+ XRAY = "X-RAY"
45
+ MRI = "MRI"
46
+ CT = "CT"
47
+ UNKNOWN = "UNKNOWN"
48
+
49
+
50
+ # ─────────────────────────────────────────────────────────────────────────────
51
+ # INPUT MODELS
52
+ # ─────────────────────────────────────────────────────────────────────────────
53
+
54
+ class PatientInput(BaseModel):
55
+ """Initial client submission containing image and clinical context."""
56
+ image_base64: str = Field(
57
+ ...,
58
+ description="Base64 encoded medical image (PNG/JPG format)"
59
+ )
60
+ symptoms: str = Field(
61
+ default="",
62
+ description="Patient reported symptoms or chief complaint"
63
+ )
64
+ age: Optional[int] = Field(
65
+ default=None, ge=0, le=120, description="Patient age in years"
66
+ )
67
+ sex: Optional[str] = Field(
68
+ default=None, pattern="^(M|F|O)$", description="Patient biological sex"
69
+ )
70
+ clinical_context: Optional[str] = Field(
71
+ default=None, description="Relevant medical history or referral details"
72
+ )
73
+
74
+ @field_validator("image_base64")
75
+ @classmethod
76
+ def validate_image_data(cls, v: str) -> str:
77
+ if not v or len(v) < 10:
78
+ raise ValueError("Invalid or empty base64 image data provided.")
79
+ return v
80
+
81
+
82
+ # ─────────────────────────────────────────────────────────────────────────────
83
+ # AGENT OUTPUT MODELS
84
+ # ─────────────────────────────────────────────────────────────────────────────
85
+
86
+ class IntakeOutput(BaseModel):
87
+ """Structured data produced by the Intake Agent."""
88
+ validated: bool = Field(default=True, description="Whether input passed validation checks")
89
+ standardized_symptoms: str = Field(default="", description="Clinically normalized symptom description")
90
+ extracted_demographics: Dict[str, Any] = Field(default_factory=dict)
91
+ safety_flags: List[str] = Field(default_factory=list, description="Pre-analysis safety/alert flags")
92
+ recommended_modality: ImageModality = Field(default=ImageModality.UNKNOWN)
93
+ processing_notes: str = Field(default="")
94
+
95
+
96
+ class VisionFinding(BaseModel):
97
+ """Individual anatomical observation from the Vision Agent."""
98
+ anatomical_region: str = Field(..., description="e.g., Left Lung Field, Medial Patella")
99
+ description: str = Field(..., description="Detailed radiological description")
100
+ severity: SeverityLevel = Field(default=SeverityLevel.NORMAL)
101
+ confidence: ConfidenceLevel = Field(default=ConfidenceLevel.LOW)
102
+ confidence_score: float = Field(default=0.0, ge=0.0, le=100.0)
103
+ is_anomaly: bool = Field(default=False)
104
+
105
+
106
+ class VisionOutput(BaseModel):
107
+ """Complete visual analysis result from the Vision Agent."""
108
+ modality_detected: ImageModality = Field(default=ImageModality.UNKNOWN)
109
+ technical_quality: str = Field(default="Acceptable", description="Image quality/artifact assessment")
110
+ findings: List[VisionFinding] = Field(default_factory=list)
111
+ overall_assessment: str = Field(default="No obvious abnormalities detected.")
112
+ metadata: Dict[str, Any] = Field(default_factory=dict)
113
+
114
+
115
+ class KnowledgeMatch(BaseModel):
116
+ """Differential diagnosis entry from the Research Agent."""
117
+ condition_name: str = Field(..., description="Medical condition or diagnosis")
118
+ match_probability: float = Field(..., ge=0.0, le=100.0, description="Confidence percentage")
119
+ supporting_evidence: str = Field(..., description="Pathophysiological/clinical correlation")
120
+ differential_rank: int = Field(default=0, ge=1)
121
+ icd10_code: Optional[str] = Field(default=None)
122
+
123
+
124
+ class ResearchOutput(BaseModel):
125
+ """Knowledge base search and differential diagnosis result."""
126
+ differential_diagnoses: List[KnowledgeMatch] = Field(default_factory=list)
127
+ matched_conditions: List[str] = Field(default_factory=list)
128
+ relevant_guidelines: List[str] = Field(default_factory=list)
129
+ research_notes: str = Field(default="")
130
+ sources_used: List[str] = Field(default=["internal_knowledge_base"])
131
+
132
+
133
+ # ─────────────────────────────────────────────────────────────────────────────
134
+ # REPORT MODELS
135
+ # ─────────────────────────────────────────────────────────────────────────────
136
+
137
+ class ReportSection(BaseModel):
138
+ """Standard radiology report structure."""
139
+ clinical_history: str = Field(default="Not provided.")
140
+ technique: str = Field(default="Digital advanced imaging acquisition.")
141
+ findings: str = Field(default="No abnormalities detected.")
142
+ impression: str = Field(default="Within normal limits.")
143
+ recommendations: str = Field(default="Routine follow-up as clinically indicated.")
144
+ disclaimer: str = Field(
145
+ default="This analysis is AI-generated and must be reviewed by a licensed radiologist before any clinical decisions are made."
146
+ )
147
+
148
+
149
+ class FinalReport(BaseModel):
150
+ """Complete synthesized clinical report ready for delivery."""
151
+ report_id: str = Field(default_factory=lambda: f"REP-{uuid.uuid4().hex[:12].upper()}")
152
+ patient_metadata: Dict[str, Any] = Field(default_factory=dict)
153
+ sections: ReportSection = Field(default_factory=ReportSection)
154
+ vision_summary: str = Field(default="")
155
+ research_summary: str = Field(default="")
156
+ overall_severity: SeverityLevel = Field(default=SeverityLevel.NORMAL)
157
+ generation_timestamp: datetime = Field(default_factory=datetime.now)
158
+ agent_pipeline_status: Dict[str, AgentStatus] = Field(default_factory=dict)
159
+
160
+
161
+ # ─────────────────────────────────────────────────────────────────────────────
162
+ # CHAT / ADVISOR MODELS
163
+ # ─────────────────────────────────────────────────────────────────────────────
164
+
165
+ class ChatMessage(BaseModel):
166
+ """Single turn in the post-report clinical advisor chat."""
167
+ role: str = Field(..., description="'user' or 'assistant'")
168
+ content: str = Field(..., description="Message text")
169
+ timestamp: datetime = Field(default_factory=datetime.now)
170
+
171
+
172
+ class ChatRequest(BaseModel):
173
+ """Incoming question for the ClinicalAdvisorAgent."""
174
+ question: str = Field(..., min_length=3, max_length=1000)
175
+
176
+
177
+ class ChatResponse(BaseModel):
178
+ """Response from the ClinicalAdvisorAgent."""
179
+ answer: str
180
+ report_id: str
181
+ timestamp: datetime = Field(default_factory=datetime.now)
182
+
183
+
184
+ # ─────────────────────────────────────────────────────────────────────────────
185
+ # PIPELINE STATE MODEL
186
+ # ─────────────────────────────────────────────────────────────────────────────
187
+
188
+ class PipelineState(BaseModel):
189
+ """Tracks real-time execution state across all agents."""
190
+ current_step: str = Field(default="INIT")
191
+ agent_statuses: Dict[str, AgentStatus] = Field(
192
+ default_factory=lambda: {
193
+ "INTAKE": AgentStatus.WAITING,
194
+ "VISION": AgentStatus.WAITING,
195
+ "RESEARCH": AgentStatus.WAITING,
196
+ "REPORT": AgentStatus.WAITING,
197
+ "CRITIC": AgentStatus.WAITING
198
+ }
199
+ )
200
+ intake_output: Optional[IntakeOutput] = None
201
+ vision_output: Optional[VisionOutput] = None
202
+ research_output: Optional[ResearchOutput] = None
203
+ report_draft: Optional[ReportSection] = None
204
+ final_report: Optional[FinalReport] = None
205
+ error_log: List[str] = Field(default_factory=list)
206
+
207
+ def mark_running(self, agent_name: str) -> None:
208
+ self.agent_statuses[agent_name] = AgentStatus.RUNNING
209
+
210
+ def mark_done(self, agent_name: str) -> None:
211
+ self.agent_statuses[agent_name] = AgentStatus.DONE
212
+
213
+ def mark_error(self, agent_name: str, error_msg: str) -> None:
214
+ self.agent_statuses[agent_name] = AgentStatus.ERROR
215
+ self.error_log.append(f"[{agent_name}] {error_msg}")
core/pipeline.py ADDED
@@ -0,0 +1,238 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mediagent/core/pipeline.py
2
+ """
3
+ Pipeline Orchestrator for MediAgent.
4
+ Manages execution of all medical imaging analysis agents with parallel
5
+ scheduling where possible, tracks real-time state, handles graceful error
6
+ recovery, and coordinates data flow between specialized AI components.
7
+
8
+ Parallelism strategy:
9
+ - INTAKE + VISION run concurrently (vision only needs the raw image)
10
+ - RESEARCH runs after VISION completes (needs findings)
11
+ - REPORT runs after all three complete
12
+ - CRITIC runs after REPORT
13
+ """
14
+
15
+ import logging
16
+ import time
17
+ from concurrent.futures import ThreadPoolExecutor, wait as futures_wait
18
+ from datetime import datetime
19
+ from typing import Any, Callable, Dict, Optional
20
+
21
+ from core.models import (
22
+ AgentStatus,
23
+ FinalReport,
24
+ IntakeOutput,
25
+ PipelineState,
26
+ PatientInput,
27
+ ResearchOutput,
28
+ ReportSection,
29
+ VisionOutput,
30
+ )
31
+
32
+ logger = logging.getLogger(__name__)
33
+
34
+
35
+ class PipelineOrchestrator:
36
+ """
37
+ Central execution engine that routes patient data through the 5-agent
38
+ medical analysis pipeline. Runs INTAKE and VISION in parallel to cut
39
+ wall-clock latency, then sequences RESEARCH β†’ REPORT β†’ CRITIC.
40
+ """
41
+
42
+ AGENT_ORDER = ["INTAKE", "VISION", "RESEARCH", "REPORT", "CRITIC"]
43
+
44
+ def __init__(
45
+ self,
46
+ intake_agent: Any,
47
+ vision_agent: Any,
48
+ research_agent: Any,
49
+ report_agent: Any,
50
+ critic_agent: Any,
51
+ on_status_update: Optional[Callable[[PipelineState], None]] = None,
52
+ ):
53
+ self.agents = {
54
+ "INTAKE": intake_agent,
55
+ "VISION": vision_agent,
56
+ "RESEARCH": research_agent,
57
+ "REPORT": report_agent,
58
+ "CRITIC": critic_agent,
59
+ }
60
+ self.on_status_update = on_status_update
61
+
62
+ def run(self, patient_input: PatientInput) -> PipelineState:
63
+ """
64
+ Execute the full diagnostic pipeline with parallel INTAKE+VISION stage.
65
+
66
+ Returns:
67
+ PipelineState: Complete execution state containing all outputs,
68
+ agent statuses, and final report.
69
+ """
70
+ logger.info("πŸš€ Pipeline execution started | Input ID: %s", id(patient_input))
71
+ state = PipelineState()
72
+
73
+ try:
74
+ # ─────────────────────────────────────────────────────────────
75
+ # STAGE 1: INTAKE + VISION in parallel
76
+ # Vision uses raw symptoms as clinical context so it doesn't
77
+ # have to wait for intake normalization to complete.
78
+ # ─────────────────────────────────────────────────────────────
79
+ raw_context = patient_input.symptoms or ""
80
+ with ThreadPoolExecutor(max_workers=2) as pool:
81
+ intake_fut = pool.submit(
82
+ self._execute_step, state, "INTAKE",
83
+ patient_input=patient_input
84
+ )
85
+ vision_fut = pool.submit(
86
+ self._execute_step, state, "VISION",
87
+ image_base64=patient_input.image_base64,
88
+ clinical_context=raw_context
89
+ )
90
+ futures_wait([intake_fut, vision_fut])
91
+
92
+ if state.agent_statuses["INTAKE"] == AgentStatus.ERROR:
93
+ logger.warning("⚠️ Intake failed. Halting pipeline for safety.")
94
+ return state
95
+ if state.agent_statuses["VISION"] == AgentStatus.ERROR:
96
+ logger.warning("⚠️ Vision analysis failed. Continuing with degraded pipeline.")
97
+
98
+ # ─────────────────────────────────────────────────────────────
99
+ # STAGE 2: RESEARCH (needs vision findings + intake demographics)
100
+ # ─────────────────────────────────────────────────────────────
101
+ self._execute_step(
102
+ state, "RESEARCH",
103
+ vision_findings=state.vision_output.findings if state.vision_output else [],
104
+ demographics=state.intake_output.extracted_demographics,
105
+ detected_modality=state.vision_output.modality_detected.value if state.vision_output else "UNKNOWN"
106
+ )
107
+ if state.agent_statuses["RESEARCH"] == AgentStatus.ERROR:
108
+ logger.warning("⚠️ Research failed. Generating report without differential augmentation.")
109
+
110
+ # ─────────────────────────────────────────────────────────────
111
+ # STAGE 3: REPORT (synthesizes all three upstream outputs)
112
+ # ─────────────────────────────────────────────────────────────
113
+ self._execute_step(
114
+ state, "REPORT",
115
+ intake=state.intake_output,
116
+ vision=state.vision_output,
117
+ research=state.research_output,
118
+ )
119
+ if state.agent_statuses["REPORT"] == AgentStatus.ERROR:
120
+ logger.error("❌ Report generation failed. Pipeline cannot complete safely.")
121
+ return state
122
+
123
+ # ─────────────────────────────────────────────────────────────
124
+ # STAGE 4: CRITIC (QA review of final draft)
125
+ # ─────────────────────────────────────────────────────────────
126
+ self._execute_step(
127
+ state, "CRITIC",
128
+ draft_report=state.report_draft,
129
+ pipeline_state=state
130
+ )
131
+
132
+ state.final_report = self._assemble_final_report(state)
133
+ state.current_step = "COMPLETE"
134
+ logger.info("βœ… Pipeline execution completed successfully.")
135
+
136
+ except Exception as e:
137
+ logger.exception("πŸ’₯ Unhandled pipeline failure: %s", str(e))
138
+ state.current_step = "FAILED"
139
+ state.error_log.append(f"SYSTEM_FAILURE: {str(e)}")
140
+
141
+ return state
142
+
143
+ def _execute_step(
144
+ self,
145
+ state: PipelineState,
146
+ agent_name: str,
147
+ **kwargs
148
+ ) -> None:
149
+ """
150
+ Generic step executor with state management, timing, and error isolation.
151
+ Thread-safe: each agent writes to its own dedicated state field.
152
+ """
153
+ logger.info(f"▢️ Executing agent: {agent_name}")
154
+ state.current_step = agent_name
155
+ state.mark_running(agent_name)
156
+ self._notify(state)
157
+
158
+ start_time = time.perf_counter()
159
+ try:
160
+ agent = self.agents[agent_name]
161
+ output = agent.process(**kwargs)
162
+ elapsed = time.perf_counter() - start_time
163
+
164
+ if output is not None:
165
+ if agent_name == "INTAKE":
166
+ state.intake_output = output
167
+ elif agent_name == "VISION":
168
+ state.vision_output = output
169
+ elif agent_name == "RESEARCH":
170
+ state.research_output = output
171
+ elif agent_name == "REPORT":
172
+ state.report_draft = output
173
+
174
+ state.mark_done(agent_name)
175
+ logger.info(f"βœ… {agent_name} completed in {elapsed:.3f}s")
176
+
177
+ except Exception as e:
178
+ elapsed = time.perf_counter() - start_time
179
+ error_msg = f"{agent_name} execution failed after {elapsed:.3f}s: {str(e)}"
180
+ logger.error("❌ %s", error_msg, exc_info=True)
181
+ state.mark_error(agent_name, str(e))
182
+
183
+ finally:
184
+ self._notify(state)
185
+
186
+ def _assemble_final_report(self, state: PipelineState) -> FinalReport:
187
+ """
188
+ Synthesize all agent outputs into the final deliverable report structure.
189
+ Applies critic modifications and standardizes formatting.
190
+ """
191
+ report_draft = state.report_draft or ReportSection()
192
+
193
+ # Determine overall severity from vision findings
194
+ overall_severity = "NORMAL"
195
+ if state.vision_output and state.vision_output.findings:
196
+ severity_hierarchy = {"CRITICAL": 4, "SIGNIFICANT": 3, "INCIDENTAL": 2, "NORMAL": 1}
197
+ highest = max(
198
+ (severity_hierarchy.get(f.severity.value, 1) for f in state.vision_output.findings),
199
+ default=1
200
+ )
201
+ severity_map = {4: "CRITICAL", 3: "SIGNIFICANT", 2: "INCIDENTAL", 1: "NORMAL"}
202
+ overall_severity = severity_map.get(highest, "NORMAL")
203
+
204
+ # Build vision summary
205
+ vision_summary = "No imaging analysis performed."
206
+ if state.vision_output:
207
+ anomalies = [f.description for f in state.vision_output.findings if f.is_anomaly]
208
+ vision_summary = (
209
+ f"Modality: {state.vision_output.modality_detected.value} | "
210
+ f"Quality: {state.vision_output.technical_quality} | "
211
+ f"Anomalies Detected: {len(anomalies)} | "
212
+ f"Overall: {state.vision_output.overall_assessment}"
213
+ )
214
+
215
+ # Build research summary
216
+ research_summary = "No differential diagnosis generated."
217
+ if state.research_output and state.research_output.differential_diagnoses:
218
+ top_dx = state.research_output.differential_diagnoses[:3]
219
+ dx_list = " | ".join([d.condition_name for d in top_dx])
220
+ research_summary = f"Top Differentials: {dx_list} | Confidence: {'/'.join([f'{d.match_probability:.0f}%' for d in top_dx])}"
221
+
222
+ return FinalReport(
223
+ patient_metadata=state.intake_output.extracted_demographics if state.intake_output else {},
224
+ sections=report_draft,
225
+ vision_summary=vision_summary,
226
+ research_summary=research_summary,
227
+ overall_severity=overall_severity,
228
+ agent_pipeline_status=state.agent_statuses,
229
+ generation_timestamp=datetime.now()
230
+ )
231
+
232
+ def _notify(self, state: PipelineState) -> None:
233
+ """Invoke status callback if registered (used for SSE/UI polling)."""
234
+ if self.on_status_update:
235
+ try:
236
+ self.on_status_update(state)
237
+ except Exception as e:
238
+ logger.warning("⚠️ Status callback failed: %s", str(e))
main.py ADDED
@@ -0,0 +1,521 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mediagent/main.py
2
+ """
3
+ MediAgent - Autonomous Multi-Agent Medical Imaging Analysis System
4
+ Production FastAPI Server & Orchestrator Entry Point
5
+
6
+ New in v2.0:
7
+ - DICOM (.dcm) file support with automatic metadata extraction
8
+ - Real-time token-level streaming during report generation
9
+ - AMD GPU metrics endpoint (/metrics/gpu)
10
+ - Post-report clinical advisor chat (/chat/{report_id})
11
+ - FHIR R4 DiagnosticReport export (/export/fhir/{report_id})
12
+ """
13
+
14
+ import json
15
+ import logging
16
+ import base64
17
+ import uuid
18
+ import asyncio
19
+ import subprocess
20
+ import uvicorn
21
+ import os
22
+ from datetime import datetime
23
+ from typing import Dict, Optional, Any, AsyncGenerator
24
+ from concurrent.futures import ThreadPoolExecutor
25
+
26
+ from fastapi import FastAPI, UploadFile, File, Form, HTTPException
27
+ from fastapi.middleware.cors import CORSMiddleware
28
+ from fastapi.responses import HTMLResponse, JSONResponse, FileResponse, StreamingResponse
29
+ from fastapi.staticfiles import StaticFiles
30
+
31
+ from core.llm import LLMClient
32
+ from core.models import PatientInput, PipelineState, ChatRequest, ChatResponse
33
+ from core.pipeline import PipelineOrchestrator
34
+ from agents.intake import IntakeAgent
35
+ from agents.vision import VisionAgent
36
+ from agents.research import ResearchAgent
37
+ from agents.report import ReportAgent
38
+ from agents.critic import CriticAgent
39
+ from agents.advisor import ClinicalAdvisorAgent
40
+
41
+ # ─────────────────────────────────────────────────────────────────────────────
42
+ # LOGGING
43
+ # ─────────────────────────────────────────────────────────────────────────────
44
+ logging.basicConfig(
45
+ level=logging.INFO,
46
+ format="%(asctime)s | %(levelname)-8s | %(name)s | %(message)s",
47
+ datefmt="%Y-%m-%d %H:%M:%S",
48
+ force=True
49
+ )
50
+ logger = logging.getLogger("mediagent.server")
51
+
52
+ # ─────────────────────────────────────────────────────────────────────────────
53
+ # CONFIG
54
+ # ─────────────────────────────────────────────────────────────────────────────
55
+ LLM_BASE_URL = os.getenv("LLM_BASE_URL", "http://localhost:8000/v1")
56
+ DEMO_MODE = os.getenv("DEMO_MODE", "false").lower() == "true"
57
+
58
+ # ─────────────────────────────────────────────────────────────────────────────
59
+ # APP
60
+ # ─────────────────────────────────────────────────────────────────────────────
61
+ app = FastAPI(
62
+ title="MediAgent API",
63
+ version="2.0.0",
64
+ description="Autonomous Multi-Agent Medical Imaging Analysis System β€” AMD MI300X",
65
+ docs_url="/api/docs",
66
+ redoc_url="/api/redoc"
67
+ )
68
+
69
+ app.add_middleware(
70
+ CORSMiddleware,
71
+ allow_origins=["*"],
72
+ allow_credentials=True,
73
+ allow_methods=["*"],
74
+ allow_headers=["*"],
75
+ )
76
+
77
+ # Registry: report_id β†’ PipelineState (capped at 200 entries)
78
+ pipeline_registry: Dict[str, PipelineState] = {}
79
+ REGISTRY_MAX_SIZE = 200
80
+
81
+
82
+ # ─────────────────────────────────────────────────────────────────────────────
83
+ # STARTUP
84
+ # ─────────────────────────────────────────────────────────────────────────────
85
+ @app.on_event("startup")
86
+ async def startup_event():
87
+ logger.info("πŸš€ MediAgent v2.0 β€” System Startup")
88
+ if DEMO_MODE:
89
+ logger.info("⚠️ DEMO MODE ACTIVE β€” no real inference will be performed")
90
+ try:
91
+ llm_client = LLMClient()
92
+ app.state.llm_client = llm_client
93
+ app.state.intake_agent = IntakeAgent(llm_client=llm_client)
94
+ app.state.vision_agent = VisionAgent(llm_client=llm_client)
95
+ app.state.research_agent = ResearchAgent(llm_client=llm_client)
96
+ app.state.report_agent = ReportAgent(llm_client=llm_client)
97
+ app.state.critic_agent = CriticAgent(llm_client=llm_client)
98
+ app.state.advisor_agent = ClinicalAdvisorAgent(llm_client=llm_client)
99
+
100
+ def _default_cb(state: PipelineState):
101
+ pass
102
+
103
+ app.state.orchestrator = PipelineOrchestrator(
104
+ intake_agent=app.state.intake_agent,
105
+ vision_agent=app.state.vision_agent,
106
+ research_agent=app.state.research_agent,
107
+ report_agent=app.state.report_agent,
108
+ critic_agent=app.state.critic_agent,
109
+ on_status_update=_default_cb,
110
+ )
111
+ logger.info("βœ… All agents initialised. MediAgent v2.0 online.")
112
+ except Exception as e:
113
+ logger.critical("πŸ’₯ Startup failure: %s", e)
114
+ raise SystemExit(str(e))
115
+
116
+
117
+ # ─────────────────────────────────────────────────────────────────────────────
118
+ # HELPERS
119
+ # ─────────────────────────────────────────────────────────────────────────────
120
+
121
+ def _evict_registry():
122
+ if len(pipeline_registry) >= REGISTRY_MAX_SIZE:
123
+ del pipeline_registry[next(iter(pipeline_registry))]
124
+
125
+
126
+ async def _read_and_encode_image(image: UploadFile):
127
+ image_bytes = await image.read()
128
+ if len(image_bytes) > 20 * 1024 * 1024:
129
+ raise HTTPException(status_code=413, detail="Image exceeds 20 MB size limit.")
130
+
131
+ filename = (image.filename or "").lower()
132
+ content_type = image.content_type or ""
133
+
134
+ is_dicom = (
135
+ filename.endswith(".dcm")
136
+ or "dicom" in content_type
137
+ or image_bytes[:4] == b"\x00\x00\x00\x00"
138
+ or image_bytes[128:132] == b"DICM"
139
+ )
140
+
141
+ if is_dicom:
142
+ try:
143
+ from core.dicom import parse_dicom
144
+ b64_image, dicom_meta = parse_dicom(image_bytes)
145
+ logger.info("DICOM parsed | meta keys: %s", list(dicom_meta.keys()))
146
+ return b64_image, dicom_meta
147
+ except Exception as e:
148
+ logger.warning("DICOM parse failed (%s), treating as regular image", e)
149
+
150
+ b64_data = base64.b64encode(image_bytes).decode("utf-8")
151
+ mime = content_type if content_type.startswith("image/") else "image/jpeg"
152
+ return f"data:{mime};base64,{b64_data}", None
153
+
154
+
155
+ # ─────────────────────────────────────────────────────────────────────────────
156
+ # ROUTES β€” STATIC / HEALTH
157
+ # ─────────────────────────────────────────────────────────────────────────────
158
+
159
+ @app.get("/", response_class=HTMLResponse)
160
+ async def serve_frontend():
161
+ return FileResponse("static/index.html")
162
+
163
+
164
+ @app.get("/health")
165
+ async def health_check():
166
+ return {
167
+ "status": "healthy",
168
+ "version": "2.0.0",
169
+ "timestamp": datetime.utcnow().isoformat() + "Z",
170
+ "system": "MediAgent",
171
+ "infrastructure": "AMD Instinct MI300X / ROCm / vLLM",
172
+ "agents_loaded": hasattr(app.state, "orchestrator"),
173
+ "active_sessions": len(pipeline_registry),
174
+ "demo_mode": DEMO_MODE,
175
+ "features": ["dicom", "clinical_chat", "gpu_metrics"],
176
+ }
177
+
178
+
179
+ # ─────────────────────────────────────────────────────────────────────────────
180
+ # ROUTES β€” AMD GPU METRICS
181
+ # ─────────────────────────────────────────────────────────────────────────────
182
+
183
+ @app.get("/metrics/gpu")
184
+ async def get_gpu_metrics():
185
+ metrics: Dict[str, Any] = {
186
+ "available": False,
187
+ "timestamp": datetime.utcnow().isoformat() + "Z",
188
+ }
189
+
190
+ try:
191
+ import amdsmi
192
+ amdsmi.amdsmi_init()
193
+ devices = amdsmi.amdsmi_get_processor_handles()
194
+ cards = []
195
+ for i, dev in enumerate(devices):
196
+ try:
197
+ usage = amdsmi.amdsmi_get_gpu_activity(dev)
198
+ vram = amdsmi.amdsmi_get_gpu_memory_usage(dev, amdsmi.AmdSmiMemoryType.VRAM)
199
+ vtotal = amdsmi.amdsmi_get_gpu_memory_total(dev, amdsmi.AmdSmiMemoryType.VRAM)
200
+ temp = amdsmi.amdsmi_get_temp_metric(dev, amdsmi.AmdSmiTemperatureType.JUNCTION,
201
+ amdsmi.AmdSmiTemperatureMetric.CURRENT)
202
+ power = amdsmi.amdsmi_get_power_info(dev)
203
+ clk = amdsmi.amdsmi_get_clk_freq(dev, amdsmi.AmdSmiClkType.GFX)
204
+ cards.append({
205
+ "card": f"GPU {i}",
206
+ "gpu_use_pct": usage.get("gfx_activity", 0),
207
+ "vram_used_mb": round(vram / 1024 / 1024, 1),
208
+ "vram_total_mb": round(vtotal / 1024 / 1024, 1),
209
+ "temp_c": temp,
210
+ "power_w": round(power.get("current_socket_power", 0), 1),
211
+ "clk_mhz": clk.get("cur_clk", 0),
212
+ })
213
+ except Exception:
214
+ pass
215
+ amdsmi.amdsmi_shut_down()
216
+ if cards:
217
+ metrics["available"] = True
218
+ metrics["source"] = "amdsmi"
219
+ metrics["cards"] = cards
220
+ return metrics
221
+ except Exception:
222
+ pass
223
+
224
+ try:
225
+ result = subprocess.run(
226
+ ["rocm-smi", "--showuse", "--showmeminfo", "vram", "--showtemp", "--showpower", "--json"],
227
+ capture_output=True, text=True, timeout=5
228
+ )
229
+ if result.returncode == 0 and result.stdout.strip():
230
+ raw = json.loads(result.stdout)
231
+ cards = []
232
+ for key, val in raw.items():
233
+ if not isinstance(val, dict):
234
+ continue
235
+
236
+ def _pick(d, *keys, default=0):
237
+ for k in keys:
238
+ v = d.get(k)
239
+ if v is not None:
240
+ try: return float(str(v).replace("%","").strip())
241
+ except ValueError: pass
242
+ return default
243
+
244
+ vram_used = _pick(val,
245
+ "VRAM Total Used Memory (B)", "Used VRAM (B)", "vram_used",
246
+ "VRAM Total Used Memory (MiB)", "Used VRAM (MiB)", "VRAM Total Memory Used (MiB)")
247
+ vram_total = _pick(val,
248
+ "VRAM Total Memory (B)", "Total VRAM (B)", "vram_total",
249
+ "VRAM Total Memory (MiB)", "Total VRAM (MiB)", "VRAM Total Memory Size (MiB)")
250
+ if vram_used > 1_000_000: vram_used = round(vram_used / 1024 / 1024, 1)
251
+ if vram_total > 1_000_000: vram_total = round(vram_total / 1024 / 1024, 1)
252
+
253
+ cards.append({
254
+ "card": key,
255
+ "gpu_use_pct": _pick(val, "GPU use (%)", "GPU Use (%)", "GFX Activity (%)", "GPU activity (%)"),
256
+ "vram_used_mb": vram_used,
257
+ "vram_total_mb": vram_total,
258
+ "temp_c": _pick(val, "Temperature (Sensor junction) (C)",
259
+ "Temperature (Sensor HBM 0) (C)", "Junction Temperature (C)"),
260
+ "power_w": _pick(val, "Current Socket Graphics Package Power (W)",
261
+ "Average Graphics Package Power (W)", "Socket Power (W)"),
262
+ })
263
+
264
+ if cards:
265
+ metrics["available"] = True
266
+ metrics["source"] = "rocm-smi"
267
+ metrics["cards"] = cards
268
+ return metrics
269
+ except FileNotFoundError:
270
+ pass
271
+ except Exception as e:
272
+ logger.debug("rocm-smi JSON failed: %s", e)
273
+
274
+ metrics["note"] = "AMD GPU metrics unavailable β€” is ROCm installed?"
275
+ return metrics
276
+
277
+
278
+ # ─────────────────────────────────────────────────────────────────────────────
279
+ # ROUTES β€” ANALYSIS
280
+ # ─────────────────────────────────────────────────────────────────────────────
281
+
282
+ @app.post("/analyze")
283
+ async def analyze_image(
284
+ image: UploadFile = File(...),
285
+ symptoms: str = Form(default=""),
286
+ age: Optional[int] = Form(default=None, ge=0, le=120),
287
+ sex: Optional[str] = Form(default=None),
288
+ clinical_context: str = Form(default=""),
289
+ ):
290
+ logger.info("[SYNC] New analysis request | file=%s", image.filename)
291
+ report_id = f"REP-{uuid.uuid4().hex[:12].upper()}"
292
+
293
+ try:
294
+ b64_image, dicom_meta = await _read_and_encode_image(image)
295
+ except HTTPException:
296
+ raise
297
+ except Exception as e:
298
+ raise HTTPException(status_code=400, detail=f"Image processing failed: {e}")
299
+
300
+ if dicom_meta:
301
+ if age is None and dicom_meta.get("age"):
302
+ try: age = int(dicom_meta["age"])
303
+ except (ValueError, TypeError): pass
304
+ if not sex and dicom_meta.get("sex"):
305
+ sex = dicom_meta["sex"]
306
+ if not clinical_context and dicom_meta.get("study_description"):
307
+ clinical_context = f"DICOM: {dicom_meta.get('study_description','')} | {dicom_meta.get('body_part','')}"
308
+
309
+ patient_input = PatientInput(
310
+ image_base64=b64_image, symptoms=symptoms, age=age,
311
+ sex=sex, clinical_context=clinical_context
312
+ )
313
+
314
+ _evict_registry()
315
+ pipeline_registry[report_id] = PipelineState()
316
+
317
+ try:
318
+ state = app.state.orchestrator.run(patient_input)
319
+ pipeline_registry[report_id] = state
320
+ if not state.final_report:
321
+ raise HTTPException(status_code=500, detail="Pipeline completed without final report.")
322
+ report_dict = state.final_report.model_dump()
323
+ if dicom_meta:
324
+ report_dict["dicom_metadata"] = dicom_meta
325
+ logger.info("[SYNC] Complete | report_id=%s", report_id)
326
+ return JSONResponse(content=report_dict)
327
+ except HTTPException:
328
+ raise
329
+ except Exception as e:
330
+ logger.exception("Pipeline failure: %s", e)
331
+ raise HTTPException(status_code=500, detail=str(e))
332
+
333
+
334
+ @app.post("/analyze/stream")
335
+ async def analyze_stream(
336
+ image: UploadFile = File(...),
337
+ symptoms: str = Form(default=""),
338
+ age: Optional[int] = Form(default=None, ge=0, le=120),
339
+ sex: Optional[str] = Form(default=None),
340
+ clinical_context: str = Form(default=""),
341
+ ):
342
+ logger.info("[STREAM] New streaming analysis request | file=%s", image.filename)
343
+
344
+ try:
345
+ b64_image, dicom_meta = await _read_and_encode_image(image)
346
+ except HTTPException:
347
+ raise
348
+ except Exception as e:
349
+ raise HTTPException(status_code=400, detail=str(e))
350
+
351
+ if dicom_meta:
352
+ if age is None and dicom_meta.get("age"):
353
+ try: age = int(dicom_meta["age"])
354
+ except (ValueError, TypeError): pass
355
+ if not sex and dicom_meta.get("sex"):
356
+ sex = dicom_meta["sex"]
357
+ if not clinical_context and dicom_meta.get("study_description"):
358
+ clinical_context = f"DICOM: {dicom_meta.get('study_description','')} | {dicom_meta.get('body_part','')}"
359
+
360
+ patient_input = PatientInput(
361
+ image_base64=b64_image, symptoms=symptoms, age=age,
362
+ sex=sex, clinical_context=clinical_context
363
+ )
364
+
365
+ async def event_generator() -> AsyncGenerator[str, None]:
366
+ queue: asyncio.Queue = asyncio.Queue()
367
+ executor = ThreadPoolExecutor(max_workers=1)
368
+
369
+ _last_statuses: dict = {}
370
+
371
+ def _status_cb(state: PipelineState):
372
+ for agent_name, status in state.agent_statuses.items():
373
+ if _last_statuses.get(agent_name) != status:
374
+ _last_statuses[agent_name] = status
375
+ payload = {"agent": agent_name, "status": status.value}
376
+ queue.put_nowait(f"data: {json.dumps(payload)}\n\n")
377
+
378
+ def _run_pipeline():
379
+ # ── DEMO MODE ──────────────────────────────────────────────────────
380
+ if DEMO_MODE:
381
+ import time
382
+ for agent in ["INTAKE", "VISION", "RESEARCH", "REPORT", "CRITIC"]:
383
+ queue.put_nowait(f'data: {json.dumps({"agent": agent, "status": "RUNNING"})}\n\n')
384
+ time.sleep(1.2)
385
+ queue.put_nowait(f'data: {json.dumps({"agent": agent, "status": "DONE"})}\n\n')
386
+ demo_report = {
387
+ "type": "report",
388
+ "report_id": "REP-DEMO0000001",
389
+ "data": {
390
+ "report_id": "REP-DEMO0000001",
391
+ "overall_severity": "SIGNIFICANT",
392
+ "vision_summary": "Demo mode active β€” live inference runs on AMD Instinct MI300X via ROCm + vLLM.",
393
+ "research_summary": "This is a demonstration deployment. Real analysis requires the AMD MI300X inference backend.",
394
+ "agent_pipeline_status": "DEMO",
395
+ "sections": {
396
+ "clinical_history": "Demo submission β€” AMD Developer Hackathon 2026.",
397
+ "technique": "Demo mode active. No live GPU inference on this host.",
398
+ "findings": "This Space demonstrates the full MediAgent UI and pipeline architecture. Live multimodal analysis runs on AMD Instinct MI300X via ROCm + vLLM. See the video demo for live inference on real medical images.",
399
+ "impression": "1. Demo mode active β€” no real image analysis performed (85%)\n\nConfidence Level: N/A β€” Demo deployment",
400
+ "recommendations": "Deploy with LLM_BASE_URL pointed at a live vLLM ROCm endpoint to enable full analysis.\n\n[QUALITY ASSESSMENT]\nScore: N/A | Demo mode",
401
+ "disclaimer": "This analysis is AI-generated and must be reviewed by a licensed radiologist before any clinical decisions are made."
402
+ },
403
+ "vision_findings": [
404
+ {"severity": "SIGNIFICANT", "confidence_score": 85, "description": "Demo mode β€” live analysis requires AMD MI300X backend"},
405
+ {"severity": "NORMAL", "confidence_score": 95, "description": "Demo mode β€” system operational"}
406
+ ],
407
+ "differential_diagnoses": [
408
+ {"condition_name": "Demo Mode Active", "match_probability": 100},
409
+ {"condition_name": "Requires AMD MI300X", "match_probability": 85}
410
+ ]
411
+ }
412
+ }
413
+ queue.put_nowait(f'data: {json.dumps(demo_report)}\n\n')
414
+ queue.put_nowait(None)
415
+ return
416
+ # ── LIVE MODE ──────────────────────────────────────────────────────
417
+ try:
418
+ report_id = f"REP-{uuid.uuid4().hex[:12].upper()}"
419
+ orchestrator = PipelineOrchestrator(
420
+ intake_agent=app.state.intake_agent,
421
+ vision_agent=app.state.vision_agent,
422
+ research_agent=app.state.research_agent,
423
+ report_agent=app.state.report_agent,
424
+ critic_agent=app.state.critic_agent,
425
+ on_status_update=_status_cb,
426
+ )
427
+ state = orchestrator.run(patient_input)
428
+ _evict_registry()
429
+ pipeline_registry[report_id] = state
430
+
431
+ if state.final_report:
432
+ report_dict = state.final_report.model_dump()
433
+ if dicom_meta:
434
+ report_dict["dicom_metadata"] = dicom_meta
435
+ if state.vision_output:
436
+ report_dict["vision_findings"] = [
437
+ {"severity": f.severity.value, "confidence_score": f.confidence_score, "description": f.description}
438
+ for f in state.vision_output.findings
439
+ ]
440
+ if state.research_output:
441
+ report_dict["differential_diagnoses"] = [
442
+ {"condition_name": d.condition_name, "match_probability": d.match_probability}
443
+ for d in state.research_output.differential_diagnoses[:5]
444
+ ]
445
+ payload = {"type": "report", "data": report_dict, "report_id": report_id}
446
+ queue.put_nowait(f"data: {json.dumps(payload, default=str)}\n\n")
447
+ else:
448
+ queue.put_nowait(f"data: {json.dumps({'type':'error','message':'Pipeline produced no report'})}\n\n")
449
+ except Exception as e:
450
+ logger.exception("Stream pipeline crashed: %s", e)
451
+ queue.put_nowait(f"data: {json.dumps({'type':'error','message':str(e)})}\n\n")
452
+ finally:
453
+ queue.put_nowait(None)
454
+
455
+ asyncio.get_running_loop().run_in_executor(executor, _run_pipeline)
456
+
457
+ while True:
458
+ event = await queue.get()
459
+ if event is None:
460
+ break
461
+ yield event
462
+
463
+ return StreamingResponse(event_generator(), media_type="text/event-stream")
464
+
465
+
466
+ # ─────────────────────────────────────────────────────────────────────────────
467
+ # ROUTES β€” CLINICAL ADVISOR CHAT
468
+ # ─────────────────────────────────────────────────────────────────────────────
469
+
470
+ @app.post("/chat/{report_id}", response_model=ChatResponse)
471
+ async def clinical_chat(report_id: str, request: ChatRequest):
472
+ if DEMO_MODE:
473
+ return ChatResponse(
474
+ answer="This is a demo deployment. Clinical Q&A is available when running on AMD Instinct MI300X with live inference. See the video demo for full functionality.",
475
+ report_id=report_id
476
+ )
477
+
478
+ if report_id not in pipeline_registry:
479
+ raise HTTPException(status_code=404, detail="Report not found. Run analysis first.")
480
+
481
+ state = pipeline_registry[report_id]
482
+ if not state.final_report:
483
+ raise HTTPException(status_code=400, detail="Report not yet complete.")
484
+
485
+ loop = asyncio.get_running_loop()
486
+ answer = await loop.run_in_executor(
487
+ None,
488
+ app.state.advisor_agent.answer,
489
+ request.question,
490
+ state.final_report,
491
+ )
492
+
493
+ return ChatResponse(answer=answer, report_id=report_id)
494
+
495
+
496
+ # ─────────────────────────────────────────────────────────────────────────────
497
+ # ROUTES β€” STATUS
498
+ # ─────────────────────────────────────────────────────────────────────────────
499
+
500
+ @app.get("/status/{report_id}")
501
+ async def get_pipeline_status(report_id: str):
502
+ if report_id not in pipeline_registry:
503
+ raise HTTPException(status_code=404, detail="Report ID not found.")
504
+ state = pipeline_registry[report_id]
505
+ return {
506
+ "report_id": report_id,
507
+ "current_step": state.current_step,
508
+ "agent_statuses": {k: v.value for k, v in state.agent_statuses.items()},
509
+ "error_log": state.error_log,
510
+ "completed": state.current_step == "COMPLETE",
511
+ }
512
+
513
+
514
+ # ─────────────────────────────────────────────────────────────────────────────
515
+ # STATIC + ENTRY
516
+ # ─────────────────────────────────────────────────────────────────────────────
517
+ app.mount("/static", StaticFiles(directory="static"), name="static")
518
+
519
+ if __name__ == "__main__":
520
+ logger.info("πŸ₯ Starting MediAgent v2.0 on port 8090")
521
+ uvicorn.run("main:app", host="0.0.0.0", port=8090, log_level="info", reload=False)
requirements.txt ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # MediAgent Core Dependencies
2
+ # Compatible with Python 3.12+
3
+ # Optimized for AMD MI300X local inference stack
4
+
5
+ fastapi==0.115.6
6
+ uvicorn[standard]==0.34.0
7
+ openai==1.58.1
8
+ python-multipart==0.0.20
9
+ pydantic==2.10.5
10
+ Pillow==11.1.0
11
+ pydicom==2.4.4
12
+ numpy==1.26.4
static/index.html ADDED
@@ -0,0 +1,833 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <!DOCTYPE html>
2
+ <html lang="en">
3
+ <head>
4
+ <meta charset="UTF-8">
5
+ <meta name="viewport" content="width=device-width, initial-scale=1.0">
6
+ <title>MediAgent v2 | AMD MI300X Radiology AI</title>
7
+ <script src="https://cdn.tailwindcss.com"></script>
8
+ <script src="https://cdnjs.cloudflare.com/ajax/libs/jspdf/2.5.1/jspdf.umd.min.js"></script>
9
+ <script src="https://cdn.jsdelivr.net/npm/chart.js@4.4.0/dist/chart.umd.min.js"></script>
10
+ <link href="https://fonts.googleapis.com/css2?family=Inter:wght@300;400;500;600;700&family=JetBrains+Mono:wght@400;500&display=swap" rel="stylesheet">
11
+ <script>
12
+ tailwind.config = {
13
+ theme: {
14
+ extend: {
15
+ fontFamily: { sans: ['Inter','sans-serif'], mono: ['JetBrains Mono','monospace'] },
16
+ colors: {
17
+ med: { 50:'#eff6ff',100:'#dbeafe',200:'#bfdbfe',500:'#3b82f6',600:'#2563eb',700:'#1d4ed8',800:'#1e40af',900:'#1e3a8a' },
18
+ amd: { 400:'#f97316',500:'#ea580c',600:'#c2410c' }
19
+ }
20
+ }
21
+ }
22
+ }
23
+ </script>
24
+ <style>
25
+ * { box-sizing: border-box; }
26
+ body { height:100vh; overflow:hidden; background:#f1f5f9; }
27
+ ::-webkit-scrollbar { width:5px; height:5px; }
28
+ ::-webkit-scrollbar-track { background:transparent; }
29
+ ::-webkit-scrollbar-thumb { background:#cbd5e1; border-radius:3px; }
30
+ @keyframes fadeIn { from{opacity:0;transform:translateY(4px)} to{opacity:1;transform:translateY(0)} }
31
+ @keyframes typing { from{opacity:0} to{opacity:1} }
32
+ .animate-fade-in { animation:fadeIn 0.3s ease-out forwards; }
33
+ .drop-zone.drag-over { border-color:#2563eb !important; background:#eff6ff !important; transform:scale(1.01); }
34
+ .finding-NORMAL { border-left-color:#10b981; }
35
+ .finding-INCIDENTAL{ border-left-color:#f59e0b; }
36
+ .finding-SIGNIFICANT{border-left-color:#f97316; }
37
+ .finding-CRITICAL { border-left-color:#ef4444; }
38
+ .chat-bubble-user { background:#1e40af; color:#fff; border-radius:12px 12px 2px 12px; }
39
+ .chat-bubble-ai { background:#f1f5f9; color:#1e293b; border-radius:12px 12px 12px 2px; }
40
+ .token-stream { white-space:pre-wrap; }
41
+ .gpu-bar { transition: width 0.8s ease; }
42
+ .panel-tab.active { border-bottom:2px solid #2563eb; color:#1d4ed8; font-weight:600; }
43
+ .dicom-badge { background:linear-gradient(135deg,#1e40af,#7c3aed); }
44
+ </style>
45
+ </head>
46
+ <body class="flex flex-col text-slate-800 font-sans">
47
+
48
+ <!-- ═══════════════════════════════════════════════════════════════════ HEADER -->
49
+ <header class="h-14 bg-slate-900 text-white flex items-center justify-between px-5 shrink-0 z-20 shadow-lg">
50
+ <div class="flex items-center gap-3">
51
+ <div class="w-8 h-8 bg-med-600 rounded-lg flex items-center justify-center font-black text-white text-sm">M</div>
52
+ <div>
53
+ <div class="flex items-center gap-2">
54
+ <h1 class="font-bold text-base tracking-tight leading-none">MediAgent <span class="text-med-400">v2</span></h1>
55
+ <span class="text-[9px] bg-amd-500 text-white px-1.5 py-0.5 rounded font-bold uppercase tracking-wider">AMD MI300X</span>
56
+ </div>
57
+ <div class="flex items-center gap-1.5 mt-0.5">
58
+ <span class="w-1.5 h-1.5 rounded-full bg-green-400 animate-pulse"></span>
59
+ <span class="text-[9px] uppercase tracking-widest text-slate-400">System Online</span>
60
+ </div>
61
+ </div>
62
+ </div>
63
+ <div class="flex items-center gap-3">
64
+ <div id="gpu-header-badge" class="hidden items-center gap-2 bg-slate-800 border border-orange-500/30 px-3 py-1 rounded-full">
65
+ <span class="w-1.5 h-1.5 rounded-full bg-orange-400 animate-pulse"></span>
66
+ <span class="text-[10px] font-mono text-orange-300" id="gpu-header-text">GPU: --</span>
67
+ </div>
68
+ <span id="clock" class="text-xs font-mono text-slate-400"></span>
69
+ </div>
70
+ </header>
71
+
72
+ <!-- ═══════════════════════════════════════════════════════════════════ MAIN -->
73
+ <main class="flex-1 grid grid-cols-12 overflow-hidden" style="height:calc(100vh - 56px)">
74
+
75
+ <!-- ══ LEFT: INPUT ══════════════════════════════════════════════════════ -->
76
+ <section class="col-span-3 bg-slate-50 border-r border-slate-200 flex flex-col overflow-y-auto">
77
+ <div class="p-4 flex flex-col gap-4">
78
+
79
+ <!-- Upload zone -->
80
+ <div>
81
+ <p class="text-[10px] font-bold text-slate-500 uppercase tracking-widest mb-2">Medical Image</p>
82
+ <div id="drop-zone" class="drop-zone border-2 border-dashed border-slate-300 rounded-xl p-5 text-center cursor-pointer bg-white hover:border-med-500 transition-all relative group">
83
+ <input type="file" id="file-input" class="hidden" accept="image/png,image/jpeg,.dcm,application/dicom">
84
+ <div id="upload-placeholder">
85
+ <svg class="w-9 h-9 text-slate-300 mx-auto mb-2 group-hover:text-med-500 transition-colors" fill="none" stroke="currentColor" viewBox="0 0 24 24"><path stroke-linecap="round" stroke-linejoin="round" stroke-width="1.5" d="M4 16l4.586-4.586a2 2 0 012.828 0L16 16m-2-2l1.586-1.586a2 2 0 012.828 0L20 14m-6-6h.01M6 20h12a2 2 0 002-2V6a2 2 0 00-2-2H6a2 2 0 00-2 2v12a2 2 0 002 2z"></path></svg>
86
+ <p class="text-xs font-semibold text-slate-500">Drop image or click to upload</p>
87
+ <p class="text-[10px] text-slate-400 mt-1">PNG Β· JPG Β· <span class="text-purple-600 font-semibold">DICOM (.dcm)</span></p>
88
+ </div>
89
+ <img id="image-preview" class="hidden w-full h-36 object-contain rounded-lg bg-slate-100" alt="Preview">
90
+ <div id="dicom-info" class="hidden mt-2 p-2 dicom-badge rounded-lg text-white text-[10px] font-mono leading-relaxed"></div>
91
+ </div>
92
+ </div>
93
+
94
+ <!-- Form -->
95
+ <form id="analysis-form" class="space-y-3">
96
+ <div>
97
+ <label class="block text-[10px] font-bold text-slate-600 uppercase tracking-wider mb-1">Chief Complaint</label>
98
+ <textarea id="symptoms" rows="2" class="w-full text-xs border border-slate-300 rounded-lg p-2.5 focus:ring-2 focus:ring-med-500 focus:border-med-500 outline-none resize-none" placeholder="e.g. Chest pain, shortness of breath..."></textarea>
99
+ </div>
100
+ <div class="grid grid-cols-2 gap-2">
101
+ <div>
102
+ <label class="block text-[10px] font-bold text-slate-600 uppercase tracking-wider mb-1">Age</label>
103
+ <input type="number" id="age" min="0" max="120" class="w-full text-xs border border-slate-300 rounded-lg p-2.5 focus:ring-2 focus:ring-med-500 outline-none" placeholder="Yrs">
104
+ </div>
105
+ <div>
106
+ <label class="block text-[10px] font-bold text-slate-600 uppercase tracking-wider mb-1">Sex</label>
107
+ <select id="sex" class="w-full text-xs border border-slate-300 rounded-lg p-2.5 focus:ring-2 focus:ring-med-500 outline-none bg-white">
108
+ <option value="">Select</option>
109
+ <option value="M">Male</option>
110
+ <option value="F">Female</option>
111
+ <option value="O">Other</option>
112
+ </select>
113
+ </div>
114
+ </div>
115
+ <div>
116
+ <label class="block text-[10px] font-bold text-slate-600 uppercase tracking-wider mb-1">Clinical History</label>
117
+ <textarea id="clinical_context" rows="2" class="w-full text-xs border border-slate-300 rounded-lg p-2.5 focus:ring-2 focus:ring-med-500 outline-none resize-none" placeholder="Relevant history, medications..."></textarea>
118
+ </div>
119
+ <button type="submit" id="submit-btn" class="w-full bg-med-800 hover:bg-med-900 text-white font-bold py-2.5 rounded-lg shadow transition-all flex items-center justify-center gap-2 disabled:opacity-40 disabled:cursor-not-allowed text-sm">
120
+ <span id="btn-text">Run Analysis</span>
121
+ <svg id="btn-spinner" class="hidden w-4 h-4 animate-spin" fill="none" viewBox="0 0 24 24"><circle class="opacity-25" cx="12" cy="12" r="10" stroke="currentColor" stroke-width="4"></circle><path class="opacity-75" fill="currentColor" d="M4 12a8 8 0 018-8V0C5.373 0 0 5.373 0 12h4z"></path></svg>
122
+ </button>
123
+ </form>
124
+
125
+ <!-- AMD GPU Panel -->
126
+ <div style="background: var(--color-background-primary); border-radius: var(--border-radius-lg); border: 0.5px solid var(--color-border-tertiary); padding: 14px 16px;">
127
+ <div style="display: flex; align-items: center; justify-content: space-between; margin-bottom: 14px;">
128
+ <div style="display: flex; align-items: center; gap: 8px;">
129
+ <svg width="14" height="14" viewBox="0 0 24 24" fill="none" stroke="#f97316" stroke-width="2" stroke-linecap="round" stroke-linejoin="round"><rect x="2" y="6" width="20" height="12" rx="2" /><path d="M6 6V4M10 6V4M14 6V4M18 6V4M6 20v-2M10 20v-2M14 20v-2M18 20v-2" /></svg>
130
+ <span style="font-size: 11px; font-weight: 500; color: #f97316; letter-spacing: 0.06em; text-transform: uppercase;">AMD GPU Metrics</span>
131
+ </div>
132
+ <span id="gpu-status-dot" style="width: 8px; height: 8px; border-radius: 50%; background: var(--color-border-secondary); display: inline-block;"></span>
133
+ </div>
134
+ <div id="gpu-panel-content">
135
+ <p style="font-size: 11px; color: var(--color-text-tertiary); font-family: var(--font-mono); text-align: center; padding: 8px 0; margin: 0;">Polling GPU...</p>
136
+ </div>
137
+ </div>
138
+
139
+ </div>
140
+ </section>
141
+
142
+ <!-- ══ CENTER: PIPELINE + CHARTS ════════════════════════════════════════ -->
143
+ <section class="col-span-3 bg-white border-r border-slate-200 flex flex-col overflow-hidden">
144
+ <!-- Tabs -->
145
+ <div class="flex border-b border-slate-200 px-4 pt-3 gap-4 shrink-0">
146
+ <button class="panel-tab active text-[11px] pb-2 px-1 text-slate-500 transition-colors" onclick="switchTab('pipeline',this)">Pipeline</button>
147
+ <button class="panel-tab text-[11px] pb-2 px-1 text-slate-500 transition-colors" onclick="switchTab('charts',this)">Analytics</button>
148
+ </div>
149
+
150
+ <!-- Pipeline tab -->
151
+ <div id="tab-pipeline" class="flex-1 overflow-y-auto p-4 space-y-3">
152
+ <div id="pipeline-tracker"></div>
153
+ <div class="p-3 bg-slate-50 rounded-lg border border-slate-200 mt-2">
154
+ <p class="text-[9px] font-bold text-slate-400 uppercase tracking-widest mb-1">System Log</p>
155
+ <div id="pipeline-log" class="text-[11px] font-mono text-slate-600 leading-relaxed">Waiting for input...</div>
156
+ <div id="pipeline-timer" class="text-[11px] font-mono text-med-600 mt-1 hidden"></div>
157
+ </div>
158
+ </div>
159
+
160
+ <!-- Analytics tab -->
161
+ <div id="tab-charts" class="hidden flex-1 overflow-y-auto p-4 space-y-4">
162
+ <div>
163
+ <p class="text-[10px] font-bold text-slate-500 uppercase tracking-widest mb-2">Severity Distribution</p>
164
+ <div class="flex justify-center"><canvas id="severityChart" width="160" height="160"></canvas></div>
165
+ </div>
166
+ <div>
167
+ <p class="text-[10px] font-bold text-slate-500 uppercase tracking-widest mb-2">Finding Confidence</p>
168
+ <canvas id="confidenceChart" height="160"></canvas>
169
+ </div>
170
+ <div>
171
+ <p class="text-[10px] font-bold text-slate-500 uppercase tracking-widest mb-2">Agent Timing (s)</p>
172
+ <canvas id="timingChart" height="120"></canvas>
173
+ </div>
174
+ </div>
175
+ </section>
176
+
177
+ <!-- ══ RIGHT: REPORT + CHAT ════════════════════════════════════════════ -->
178
+ <section class="col-span-6 flex flex-col overflow-hidden bg-white">
179
+
180
+ <!-- Report area -->
181
+ <div id="report-scroll" class="flex-1 overflow-y-auto px-6 py-5">
182
+ <!-- Empty state -->
183
+ <div id="empty-state" class="h-full flex flex-col items-center justify-center text-slate-300">
184
+ <svg class="w-16 h-16 mb-4" fill="none" stroke="currentColor" viewBox="0 0 24 24"><path stroke-linecap="round" stroke-linejoin="round" stroke-width="1" d="M9 12h6m-6 4h6m2 5H7a2 2 0 01-2-2V5a2 2 0 012-2h5.586a1 1 0 01.707.293l5.414 5.414a1 1 0 01.293.707V19a2 2 0 01-2 2z"></path></svg>
185
+ <p class="text-sm font-medium text-slate-400">Upload an image and run analysis</p>
186
+ <p class="text-xs text-slate-300 mt-1">Supports PNG, JPG, DICOM</p>
187
+ </div>
188
+
189
+ <!-- Final report -->
190
+ <div id="report-content" class="hidden space-y-5 animate-fade-in">
191
+
192
+ <!-- Severity banner -->
193
+ <div id="severity-banner" class="px-4 py-3 rounded-xl flex items-center justify-between">
194
+ <div class="flex items-center gap-3">
195
+ <span id="banner-icon" class="w-8 h-8 rounded-full flex items-center justify-center bg-white/50"></span>
196
+ <span id="severity-text" class="font-bold text-sm uppercase tracking-wide">Severity: NORMAL</span>
197
+ </div>
198
+ <div class="text-[10px] font-mono opacity-75 flex items-center gap-3">
199
+ <span id="meta-id">---</span>
200
+ <span>|</span>
201
+ <span id="meta-time">---</span>
202
+ <span>|</span>
203
+ <span id="meta-qa" class="font-bold">QA ---</span>
204
+ </div>
205
+ </div>
206
+
207
+ <!-- DICOM metadata card -->
208
+ <div id="dicom-meta-card" class="hidden p-3 rounded-xl border border-purple-200 bg-purple-50">
209
+ <p class="text-[10px] font-bold text-purple-700 uppercase tracking-widest mb-2">DICOM Metadata</p>
210
+ <div id="dicom-meta-content" class="grid grid-cols-2 gap-x-4 gap-y-1 text-[11px] font-mono text-purple-800"></div>
211
+ </div>
212
+
213
+ <!-- Report sections -->
214
+ <div class="space-y-4">
215
+ <div><h3 class="text-xs font-bold text-slate-700 border-b border-slate-100 pb-1 mb-2 uppercase tracking-wide">Clinical History</h3><p id="sec-history" class="text-sm text-slate-600 leading-relaxed"></p></div>
216
+ <div><h3 class="text-xs font-bold text-slate-700 border-b border-slate-100 pb-1 mb-2 uppercase tracking-wide">Technique</h3><p id="sec-technique" class="text-sm text-slate-600 leading-relaxed"></p></div>
217
+ <div><h3 class="text-xs font-bold text-slate-700 border-b border-slate-100 pb-1 mb-2 uppercase tracking-wide">Findings</h3><div id="sec-findings" class="space-y-2"></div></div>
218
+ <div><h3 class="text-xs font-bold text-slate-700 border-b border-slate-100 pb-1 mb-2 uppercase tracking-wide">Impression</h3><p id="sec-impression" class="text-sm text-slate-700 leading-relaxed font-medium"></p></div>
219
+ <div><h3 class="text-xs font-bold text-slate-700 border-b border-slate-100 pb-1 mb-2 uppercase tracking-wide">Recommendations</h3><p id="sec-recommendations" class="text-sm text-slate-600 leading-relaxed"></p></div>
220
+ <div class="bg-red-50 border border-red-100 rounded-xl p-4"><p class="text-[11px] text-red-800 italic font-medium" id="sec-disclaimer"></p></div>
221
+ </div>
222
+
223
+ </div>
224
+ </div>
225
+
226
+ <!-- Bottom bar: actions + chat -->
227
+ <div class="border-t border-slate-200 bg-white shrink-0">
228
+
229
+ <!-- Action buttons -->
230
+ <div class="flex items-center gap-2 px-4 py-2 border-b border-slate-100">
231
+ <button id="export-pdf-btn" disabled class="flex items-center gap-1.5 bg-slate-800 hover:bg-slate-700 text-white text-xs font-semibold px-4 py-1.5 rounded-lg shadow disabled:opacity-30 disabled:cursor-not-allowed transition-all">
232
+ <svg class="w-3.5 h-3.5" fill="none" stroke="currentColor" viewBox="0 0 24 24"><path stroke-linecap="round" stroke-linejoin="round" stroke-width="2" d="M12 10v6m0 0l-3-3m3 3l3-3m2 8H7a2 2 0 01-2-2V5a2 2 0 012-2h5.586a1 1 0 01.707.293l5.414 5.414a1 1 0 01.293.707V19a2 2 0 01-2 2z"></path></svg>
233
+ Export PDF
234
+ </button>
235
+ <button id="toggle-chat-btn" disabled class="ml-auto flex items-center gap-1.5 bg-blue-600 hover:bg-blue-700 text-white text-xs font-semibold px-4 py-1.5 rounded-lg shadow disabled:opacity-30 disabled:cursor-not-allowed transition-all">
236
+ <svg class="w-3.5 h-3.5" fill="none" stroke="currentColor" viewBox="0 0 24 24"><path stroke-linecap="round" stroke-linejoin="round" stroke-width="2" d="M8 12h.01M12 12h.01M16 12h.01M21 12c0 4.418-4.03 8-9 8a9.863 9.863 0 01-4.255-.949L3 20l1.395-3.72C3.512 15.042 3 13.574 3 12c0-4.418 4.03-8 9-8s9 3.582 9 8z"></path></svg>
237
+ Ask AI Consultant
238
+ </button>
239
+ </div>
240
+
241
+ <!-- Clinical chat -->
242
+ <div id="chat-panel" class="hidden flex flex-col" style="height:240px">
243
+ <div id="chat-messages" class="flex-1 overflow-y-auto px-4 py-3 space-y-2 bg-slate-50"></div>
244
+ <div class="flex gap-2 px-4 py-2 bg-white border-t border-slate-100">
245
+ <input id="chat-input" type="text" placeholder="Ask a follow-up question about the report..." class="flex-1 text-xs border border-slate-300 rounded-lg px-3 py-2 focus:ring-2 focus:ring-blue-400 outline-none">
246
+ <button id="chat-send-btn" class="bg-blue-600 hover:bg-blue-700 text-white text-xs font-bold px-4 py-2 rounded-lg transition-all">Send</button>
247
+ </div>
248
+ </div>
249
+ </div>
250
+ </section>
251
+
252
+ </main>
253
+
254
+ <!-- Toast -->
255
+ <div id="toast" class="fixed bottom-5 right-5 translate-y-16 opacity-0 transition-all duration-300 z-50 pointer-events-none">
256
+ <div id="toast-inner" class="bg-slate-800 text-white px-4 py-2.5 rounded-lg shadow-xl text-xs font-medium flex items-center gap-2">
257
+ <span id="toast-message">Message</span>
258
+ </div>
259
+ </div>
260
+
261
+ <script>
262
+ // ═══════════════════════════════════════════════════════ GLOBALS
263
+ const AGENTS = ['INTAKE','VISION','RESEARCH','REPORT','CRITIC'];
264
+ let lastReportData = null;
265
+ let currentReportId = null;
266
+ let startTime = 0;
267
+ let timerInterval = null;
268
+ let agentTimings = {};
269
+ let agentStartTimes = {};
270
+ let severityChart = null;
271
+ let confidenceChart = null;
272
+ let timingChart = null;
273
+ let gpuPollInterval = null;
274
+
275
+ // ═══════════════════════════════════════════════════════ INIT
276
+ function init() {
277
+ renderAgents();
278
+ setInterval(() => {
279
+ document.getElementById('clock').textContent = new Date().toLocaleTimeString([],{hour:'2-digit',minute:'2-digit',second:'2-digit'});
280
+ }, 1000);
281
+ startGpuPolling();
282
+ }
283
+
284
+ // ═══════════════════════════════════════════════════════ TAB SWITCHING
285
+ function switchTab(name, btn) {
286
+ document.querySelectorAll('.panel-tab').forEach(b => b.classList.remove('active'));
287
+ btn.classList.add('active');
288
+ document.getElementById('tab-pipeline').classList.toggle('hidden', name !== 'pipeline');
289
+ document.getElementById('tab-charts').classList.toggle('hidden', name !== 'charts');
290
+ }
291
+
292
+ // ═══════════════════════════════════════════════════════ FILE UPLOAD
293
+ const dropZone = document.getElementById('drop-zone');
294
+ const fileInput = document.getElementById('file-input');
295
+ dropZone.addEventListener('click', () => fileInput.click());
296
+ dropZone.addEventListener('dragover', e => { e.preventDefault(); dropZone.classList.add('drag-over'); });
297
+ dropZone.addEventListener('dragleave', () => dropZone.classList.remove('drag-over'));
298
+ dropZone.addEventListener('drop', e => { e.preventDefault(); dropZone.classList.remove('drag-over'); if (e.dataTransfer.files.length) handleFile(e.dataTransfer.files[0]); });
299
+ fileInput.addEventListener('change', e => { if (e.target.files.length) handleFile(e.target.files[0]); });
300
+
301
+ function handleFile(file) {
302
+ const isDicom = file.name.toLowerCase().endsWith('.dcm') || file.type === 'application/dicom';
303
+ if (!isDicom && !file.type.startsWith('image/')) { showToast('Unsupported file type.', 'error'); return; }
304
+
305
+ const reader = new FileReader();
306
+ reader.onload = e => {
307
+ const preview = document.getElementById('image-preview');
308
+ const placeholder = document.getElementById('upload-placeholder');
309
+ const dicomInfo = document.getElementById('dicom-info');
310
+
311
+ if (isDicom) {
312
+ preview.classList.add('hidden');
313
+ placeholder.classList.add('hidden');
314
+ dicomInfo.classList.remove('hidden');
315
+ dicomInfo.innerHTML = `πŸ“‚ DICOM File Detected<br><span class="opacity-75">${file.name}</span><br><span class="opacity-60">${(file.size/1024).toFixed(1)} KB</span>`;
316
+ showToast('DICOM file loaded β€” metadata will be auto-extracted', 'success');
317
+ } else {
318
+ preview.src = e.target.result;
319
+ preview.classList.remove('hidden');
320
+ placeholder.classList.add('hidden');
321
+ dicomInfo.classList.add('hidden');
322
+ showToast('Image loaded.', 'success');
323
+ }
324
+ };
325
+ if (isDicom) {
326
+ reader.readAsArrayBuffer(file);
327
+ } else {
328
+ reader.readAsDataURL(file);
329
+ }
330
+ }
331
+
332
+ // ═══════════════════════════════════════════════════════ AGENT PIPELINE UI
333
+ function renderAgents() {
334
+ document.getElementById('pipeline-tracker').innerHTML = AGENTS.map(a => `
335
+ <div id="card-${a}" class="rounded-xl border border-slate-200 bg-white p-3 flex items-center gap-3 transition-all duration-300">
336
+ <div id="icon-${a}" class="w-8 h-8 rounded-full bg-slate-100 flex items-center justify-center shrink-0 transition-all">
337
+ <svg class="w-4 h-4 text-slate-400" fill="none" stroke="currentColor" viewBox="0 0 24 24"><circle cx="12" cy="12" r="9" stroke-width="2"/></svg>
338
+ </div>
339
+ <div class="flex-1 min-w-0">
340
+ <div class="flex justify-between items-center">
341
+ <span class="text-[11px] font-bold text-slate-700 uppercase tracking-wider">${a}</span>
342
+ <span id="badge-${a}" class="text-[9px] font-mono font-bold px-1.5 py-0.5 rounded bg-slate-100 text-slate-500">WAITING</span>
343
+ </div>
344
+ <span id="timing-${a}" class="text-[9px] font-mono text-slate-400"></span>
345
+ </div>
346
+ </div>
347
+ `).join('');
348
+ }
349
+
350
+ function updateAgent(name, status, elapsed) {
351
+ const card = document.getElementById(`card-${name}`);
352
+ const badge = document.getElementById(`badge-${name}`);
353
+ const icon = document.getElementById(`icon-${name}`);
354
+ const timEl = document.getElementById(`timing-${name}`);
355
+ if (!card) return;
356
+
357
+ const cfg = {
358
+ WAITING: { card:'border-slate-200 bg-white', badge:'bg-slate-100 text-slate-500', icon:'bg-slate-100', iconHtml:'<svg class="w-4 h-4 text-slate-400" fill="none" stroke="currentColor" viewBox="0 0 24 24"><circle cx="12" cy="12" r="9" stroke-width="2"/></svg>' },
359
+ RUNNING: { card:'border-blue-400 bg-blue-50', badge:'bg-blue-100 text-blue-700', icon:'bg-blue-100', iconHtml:'<svg class="w-4 h-4 text-blue-600 animate-spin" fill="none" viewBox="0 0 24 24"><circle class="opacity-25" cx="12" cy="12" r="10" stroke="currentColor" stroke-width="4"></circle><path class="opacity-75" fill="currentColor" d="M4 12a8 8 0 018-8V0C5.373 0 0 5.373 0 12h4z"></path></svg>' },
360
+ DONE: { card:'border-green-400 bg-green-50', badge:'bg-green-100 text-green-700', icon:'bg-green-100', iconHtml:'<svg class="w-4 h-4 text-green-600" fill="none" stroke="currentColor" viewBox="0 0 24 24"><path stroke-linecap="round" stroke-linejoin="round" stroke-width="2.5" d="M5 13l4 4L19 7"/></svg>' },
361
+ ERROR: { card:'border-red-400 bg-red-50', badge:'bg-red-100 text-red-700', icon:'bg-red-100', iconHtml:'<svg class="w-4 h-4 text-red-600" fill="none" stroke="currentColor" viewBox="0 0 24 24"><path stroke-linecap="round" stroke-linejoin="round" stroke-width="2.5" d="M6 18L18 6M6 6l12 12"/></svg>' },
362
+ };
363
+ const c = cfg[status] || cfg.WAITING;
364
+ card.className = `rounded-xl border p-3 flex items-center gap-3 transition-all duration-300 ${c.card}`;
365
+ badge.className = `text-[9px] font-mono font-bold px-1.5 py-0.5 rounded ${c.badge}`;
366
+ badge.textContent = status;
367
+ icon.className = `w-8 h-8 rounded-full flex items-center justify-center shrink-0 transition-all ${c.icon}`;
368
+ icon.innerHTML = c.iconHtml;
369
+ if (elapsed !== undefined) timEl.textContent = `${elapsed.toFixed(2)}s`;
370
+ }
371
+
372
+ // ═══════════════════════════════════════════════════════ SUBMIT
373
+ document.getElementById('analysis-form').addEventListener('submit', async e => {
374
+ e.preventDefault();
375
+ const files = fileInput.files;
376
+ if (!files.length) { showToast('Upload an image first.', 'error'); return; }
377
+
378
+ setLoading(true);
379
+ renderAgents();
380
+ resetReport();
381
+ agentTimings = {};
382
+ agentStartTimes = {};
383
+ startTimer();
384
+ log('Sending to AMD MI300X pipeline...');
385
+
386
+ const fd = new FormData();
387
+ fd.append('image', files[0]);
388
+ fd.append('symptoms', document.getElementById('symptoms').value);
389
+ fd.append('age', document.getElementById('age').value || '');
390
+ fd.append('sex', document.getElementById('sex').value);
391
+ fd.append('clinical_context', document.getElementById('clinical_context').value);
392
+
393
+ try {
394
+ const res = await fetch('/analyze/stream', { method:'POST', body:fd });
395
+ if (!res.ok) throw new Error(`HTTP ${res.status}`);
396
+
397
+ const reader = res.body.getReader();
398
+ const dec = new TextDecoder();
399
+ let buf = '';
400
+
401
+ while (true) {
402
+ const { done, value } = await reader.read();
403
+ if (done) break;
404
+ buf += dec.decode(value, { stream:true });
405
+ const parts = buf.split('\n\n');
406
+ buf = parts.pop();
407
+ for (const part of parts) {
408
+ if (part.startsWith('data: ')) {
409
+ try { handleSSE(JSON.parse(part.slice(6))); } catch {}
410
+ }
411
+ }
412
+ }
413
+ } catch (err) {
414
+ stopTimer();
415
+ showToast(`Error: ${err.message}`, 'error');
416
+ log(`ERROR: ${err.message}`);
417
+ } finally {
418
+ setLoading(false);
419
+ }
420
+ });
421
+
422
+ function handleSSE(data) {
423
+ if (data.agent && data.status) {
424
+ const now = performance.now();
425
+ if (data.status === 'RUNNING') {
426
+ agentStartTimes[data.agent] = now;
427
+ } else if (data.status === 'DONE' || data.status === 'ERROR') {
428
+ const elapsed = agentStartTimes[data.agent] ? (now - agentStartTimes[data.agent]) / 1000 : 0;
429
+ agentTimings[data.agent] = elapsed;
430
+ updateAgent(data.agent, data.status, elapsed);
431
+ }
432
+ if (data.status === 'RUNNING') updateAgent(data.agent, 'RUNNING');
433
+ log(`${data.agent} β†’ ${data.status}`);
434
+ return;
435
+ }
436
+
437
+ if (data.type === 'report') {
438
+ stopTimer();
439
+ lastReportData = data.data;
440
+ currentReportId = data.report_id || data.data?.report_id;
441
+ renderReport(data.data);
442
+ updateCharts(data.data);
443
+ document.getElementById('export-pdf-btn').disabled = false;
444
+ document.getElementById('toggle-chat-btn').disabled = false;
445
+ showToast('Analysis complete.', 'success');
446
+ return;
447
+ }
448
+
449
+ if (data.type === 'error') {
450
+ stopTimer();
451
+ showToast(`Pipeline error: ${data.message}`, 'error');
452
+ log(`ERROR: ${data.message}`);
453
+ }
454
+ }
455
+
456
+ // ═══════════════════════════════════════════════════════ REPORT RENDER
457
+ function resetReport() {
458
+ document.getElementById('empty-state').classList.remove('hidden');
459
+ document.getElementById('report-content').classList.add('hidden');
460
+ document.getElementById('dicom-meta-card').classList.add('hidden');
461
+ document.getElementById('export-pdf-btn').disabled = true;
462
+ document.getElementById('toggle-chat-btn').disabled = true;
463
+ document.getElementById('chat-panel').classList.add('hidden');
464
+ document.getElementById('chat-messages').innerHTML = '';
465
+ }
466
+
467
+ function renderReport(data) {
468
+ const s = data.sections;
469
+ document.getElementById('empty-state').classList.add('hidden');
470
+ document.getElementById('report-content').classList.remove('hidden');
471
+
472
+ // Severity banner
473
+ const sev = data.overall_severity || 'NORMAL';
474
+ const bannerCfg = {
475
+ NORMAL: 'bg-emerald-50 text-emerald-800 border border-emerald-200',
476
+ INCIDENTAL: 'bg-amber-50 text-amber-800 border border-amber-200',
477
+ SIGNIFICANT: 'bg-orange-50 text-orange-800 border border-orange-200',
478
+ CRITICAL: 'bg-red-50 text-red-800 border border-red-200 animate-pulse',
479
+ };
480
+ const banner = document.getElementById('severity-banner');
481
+ banner.className = `px-4 py-3 rounded-xl flex items-center justify-between ${bannerCfg[sev] || bannerCfg.NORMAL}`;
482
+ document.getElementById('severity-text').textContent = `Severity: ${sev}`;
483
+
484
+ const iconCfg = { NORMAL:'βœ…', INCIDENTAL:'⚠️', SIGNIFICANT:'πŸ”Ά', CRITICAL:'🚨' };
485
+ document.getElementById('banner-icon').textContent = iconCfg[sev] || 'βœ…';
486
+
487
+ // Meta
488
+ document.getElementById('meta-id').textContent = data.report_id || '---';
489
+ document.getElementById('meta-time').textContent = data.generation_timestamp
490
+ ? new Date(data.generation_timestamp).toLocaleTimeString() : '---';
491
+ const qaM = (s.recommendations || '').match(/Score[:\s]+(\d+)/);
492
+ document.getElementById('meta-qa').textContent = `QA ${qaM ? qaM[1] : '85'}/100`;
493
+
494
+ // DICOM metadata
495
+ if (data.dicom_metadata && Object.keys(data.dicom_metadata).length) {
496
+ const card = document.getElementById('dicom-meta-card');
497
+ const content = document.getElementById('dicom-meta-content');
498
+ card.classList.remove('hidden');
499
+ const show = ['modality','body_part','study_date','institution','kvp','slice_thickness_mm','image_rows','image_cols'];
500
+ content.innerHTML = show
501
+ .filter(k => data.dicom_metadata[k])
502
+ .map(k => `<div><span class="opacity-60">${k.replace(/_/g,' ')}:</span> <span class="font-semibold">${data.dicom_metadata[k]}</span></div>`)
503
+ .join('');
504
+ }
505
+
506
+ // Sections
507
+ document.getElementById('sec-history').textContent = s.clinical_history || 'Not provided.';
508
+ document.getElementById('sec-technique').textContent = s.technique || 'Not provided.';
509
+ document.getElementById('sec-impression').textContent = s.impression || 'Not provided.';
510
+ document.getElementById('sec-recommendations').textContent = s.recommendations || 'None.';
511
+ document.getElementById('sec-disclaimer').textContent = s.disclaimer;
512
+ document.getElementById('sec-findings').innerHTML = renderFindings(s.findings);
513
+
514
+ document.getElementById('report-scroll').scrollTop = 0;
515
+ }
516
+
517
+ function renderFindings(text) {
518
+ if (!text) return '<p class="text-xs text-slate-400 italic">No findings.</p>';
519
+ const sevOrder = { CRITICAL:4, SIGNIFICANT:3, INCIDENTAL:2, NORMAL:1 };
520
+ const sevColors = {
521
+ NORMAL: { border:'finding-NORMAL', badge:'bg-emerald-100 text-emerald-800', bar:'bg-emerald-500' },
522
+ INCIDENTAL: { border:'finding-INCIDENTAL', badge:'bg-amber-100 text-amber-800', bar:'bg-amber-500' },
523
+ SIGNIFICANT: { border:'finding-SIGNIFICANT', badge:'bg-orange-100 text-orange-800', bar:'bg-orange-500' },
524
+ CRITICAL: { border:'finding-CRITICAL', badge:'bg-red-100 text-red-800', bar:'bg-red-500' },
525
+ };
526
+
527
+ // Find highest severity in text
528
+ let topSev = 'NORMAL';
529
+ for (const sev of ['CRITICAL','SIGNIFICANT','INCIDENTAL','NORMAL']) {
530
+ if (text.toUpperCase().includes(sev)) { topSev = sev; break; }
531
+ }
532
+
533
+ // Extract confidence scores
534
+ const confMatches = [...text.matchAll(/(\d+(?:\.\d+)?)%/g)];
535
+ const avgConf = confMatches.length
536
+ ? Math.round(confMatches.reduce((a,m) => a + parseFloat(m[1]), 0) / confMatches.length)
537
+ : 75;
538
+
539
+ const c = sevColors[topSev] || sevColors.NORMAL;
540
+ const confColor = avgConf >= 75 ? 'bg-emerald-500' : avgConf >= 50 ? 'bg-amber-500' : 'bg-red-500';
541
+
542
+ return `
543
+ <div class="pl-4 pr-4 py-4 rounded-lg border-l-4 ${c.border} bg-white shadow-sm">
544
+ <div class="flex justify-between items-start mb-3">
545
+ <span class="text-[10px] font-bold px-2 py-0.5 rounded ${c.badge} uppercase">${topSev}</span>
546
+ <span class="text-[10px] font-mono text-slate-500">${avgConf}% avg confidence</span>
547
+ </div>
548
+ <p class="text-sm text-slate-700 leading-relaxed whitespace-pre-wrap mb-3">${text}</p>
549
+ <div class="flex items-center gap-2">
550
+ <span class="text-[9px] font-mono text-slate-400 uppercase w-16">Confidence</span>
551
+ <div class="flex-1 h-1.5 bg-slate-100 rounded-full overflow-hidden">
552
+ <div class="h-full rounded-full ${confColor} transition-all duration-1000" style="width:${avgConf}%"></div>
553
+ </div>
554
+ <span class="text-[10px] font-bold font-mono text-slate-600">${avgConf}%</span>
555
+ </div>
556
+ </div>`;
557
+ }
558
+
559
+ // ═══════════════════════════════════════════════════════ CHARTS
560
+ function updateCharts(data) {
561
+ // Severity donut β€” read directly from structured vision findings
562
+ const sevCounts = { NORMAL: 0, INCIDENTAL: 0, SIGNIFICANT: 0, CRITICAL: 0 };
563
+ const visionFindings = data.vision_findings || [];
564
+ if (visionFindings.length > 0) {
565
+ for (const f of visionFindings) {
566
+ const sev = (f.severity || 'NORMAL').toUpperCase();
567
+ if (sev in sevCounts) sevCounts[sev]++;
568
+ }
569
+ }
570
+ if (Object.values(sevCounts).every(v => v === 0)) sevCounts[data.overall_severity || 'NORMAL'] = 1;
571
+
572
+ if (severityChart) severityChart.destroy();
573
+ severityChart = new Chart(document.getElementById('severityChart'), {
574
+ type: 'doughnut',
575
+ data: {
576
+ labels: Object.keys(sevCounts),
577
+ datasets: [{ data: Object.values(sevCounts), backgroundColor: ['#10b981', '#f59e0b', '#f97316', '#ef4444'], borderWidth: 2, borderColor: '#fff' }]
578
+ },
579
+ options: { responsive: false, plugins: { legend: { position: 'bottom', labels: { font: { size: 9 }, padding: 8 } } }, cutout: '65%' }
580
+ });
581
+
582
+ // Confidence bar β€” read directly from structured differential diagnoses
583
+ const differentials = data.differential_diagnoses || [];
584
+ const labels = differentials.length > 0
585
+ ? differentials.map(d => (d.condition_name || 'Unknown').slice(0, 22))
586
+ : ['Dx 1', 'Dx 2', 'Dx 3'];
587
+ const probs = differentials.length > 0
588
+ ? differentials.map(d => Math.round(parseFloat(d.match_probability) || 0))
589
+ : [0, 0, 0];
590
+
591
+ if (confidenceChart) confidenceChart.destroy();
592
+ confidenceChart = new Chart(document.getElementById('confidenceChart'), {
593
+ type: 'bar',
594
+ data: {
595
+ labels: labels,
596
+ datasets: [{ label: 'Probability %', data: probs, backgroundColor: ['#3b82f6', '#8b5cf6', '#06b6d4', '#10b981', '#f97316'], borderRadius: 4 }]
597
+ },
598
+ options: { responsive: true, indexAxis: 'y', scales: { x: { max: 100, ticks: { font: { size: 9 } } }, y: { ticks: { font: { size: 9 } } } }, plugins: { legend: { display: false } } }
599
+ });
600
+
601
+ // Agent timing
602
+ const agentLabels = Object.keys(agentTimings);
603
+ const agentValues = agentLabels.map(k => parseFloat(agentTimings[k].toFixed(2)));
604
+ if (timingChart) timingChart.destroy();
605
+ if (agentLabels.length) {
606
+ timingChart = new Chart(document.getElementById('timingChart'), {
607
+ type: 'bar',
608
+ data: {
609
+ labels: agentLabels,
610
+ datasets: [{ label:'Seconds', data:agentValues, backgroundColor:'#f97316', borderRadius:4 }]
611
+ },
612
+ options: { responsive:true, scales:{ y:{ ticks:{font:{size:9}} }, x:{ ticks:{font:{size:9}} } }, plugins:{ legend:{display:false} } }
613
+ });
614
+ }
615
+ }
616
+
617
+ // ═══════════════════════════════════════════════════════ CLINICAL CHAT
618
+ document.getElementById('toggle-chat-btn').addEventListener('click', () => {
619
+ const panel = document.getElementById('chat-panel');
620
+ const isHidden = panel.classList.contains('hidden');
621
+ panel.classList.toggle('hidden', !isHidden);
622
+ if (isHidden && document.getElementById('chat-messages').children.length === 0) {
623
+ appendChat('assistant', "Report loaded. Ask me anything about the findings, differentials, or recommendations.");
624
+ }
625
+ });
626
+
627
+ document.getElementById('chat-send-btn').addEventListener('click', sendChat);
628
+ document.getElementById('chat-input').addEventListener('keydown', e => { if (e.key === 'Enter' && !e.shiftKey) { e.preventDefault(); sendChat(); } });
629
+
630
+ async function sendChat() {
631
+ const input = document.getElementById('chat-input');
632
+ const q = input.value.trim();
633
+ if (!q || !currentReportId) return;
634
+ input.value = '';
635
+ appendChat('user', q);
636
+ const thinking = appendChat('assistant', '⏳ Consulting AI...');
637
+
638
+ try {
639
+ const res = await fetch(`/chat/${currentReportId}`, {
640
+ method: 'POST',
641
+ headers: { 'Content-Type': 'application/json' },
642
+ body: JSON.stringify({ question: q })
643
+ });
644
+ const data = await res.json();
645
+ thinking.textContent = data.answer || 'No response.';
646
+ } catch (e) {
647
+ thinking.textContent = 'Failed to get response. Please try again.';
648
+ }
649
+ }
650
+
651
+ function appendChat(role, text) {
652
+ const container = document.getElementById('chat-messages');
653
+ const div = document.createElement('div');
654
+ div.className = `text-xs leading-relaxed p-2.5 max-w-[90%] animate-fade-in ${role === 'user' ? 'chat-bubble-user ml-auto' : 'chat-bubble-ai'}`;
655
+ div.textContent = text;
656
+ container.appendChild(div);
657
+ container.scrollTop = container.scrollHeight;
658
+ return div;
659
+ }
660
+
661
+ // ══════════════════════════════════════��════════════════ PDF EXPORT
662
+ document.getElementById('export-pdf-btn').addEventListener('click', () => {
663
+ if (!lastReportData) return;
664
+ try {
665
+ const { jsPDF } = window.jspdf;
666
+ const doc = new jsPDF();
667
+ const d = lastReportData;
668
+ const s = d.sections;
669
+ const W=210, LM=14, CW=182, TM=32, FY=285;
670
+ let y = TM;
671
+ const NAVY=[30,58,138], BLUE=[30,64,175], GRAY=[60,60,60], DARK=[20,20,20];
672
+ const sevRGB = {NORMAL:[16,185,129],INCIDENTAL:[245,158,11],SIGNIFICANT:[249,115,22],CRITICAL:[239,68,68]}[d.overall_severity]||[16,185,129];
673
+
674
+ doc.setFillColor(...NAVY); doc.rect(0,0,W,26,'F');
675
+ doc.setFillColor(59,130,246); doc.roundedRect(LM,6,10,14,2,2,'F');
676
+ doc.setTextColor(255,255,255); doc.setFont('helvetica','bold'); doc.setFontSize(10); doc.text('M',LM+3.5,14.5);
677
+ doc.setFontSize(16); doc.text('MediAgent Clinical Report',28,14);
678
+ doc.setFontSize(8); doc.setFont('helvetica','normal'); doc.text('AMD Instinct MI300X | ROCm | vLLM | Qwen',28,20);
679
+ const now=new Date(); doc.text(`${d.report_id} | ${now.toLocaleString()}`,196,12,{align:'right'});
680
+ doc.text(`Overall: ${d.overall_severity||'NORMAL'}`,196,18,{align:'right'});
681
+ doc.setFillColor(...sevRGB); doc.rect(0,26,W,4,'F');
682
+
683
+ const footer=pg=>{ doc.setDrawColor(200); doc.line(LM,FY-5,196,FY-5); doc.setFont('helvetica','normal'); doc.setFontSize(7); doc.setTextColor(150,150,150); doc.text('MediAgent v2.0 | AMD MI300X | AI-generated β€” requires licensed radiologist review',LM,FY); doc.text(`${pg}`,196,FY,{align:'right'}); };
684
+ const brk=(h=10)=>{ if(y+h>FY-8){ footer(doc.internal.getNumberOfPages()); doc.addPage(); y=TM; } };
685
+ const txt=(t,{sz=10,bold=false,col=GRAY,gap=5}={})=>{ if(!t)t='Not provided.'; doc.setFont('helvetica',bold?'bold':'normal'); doc.setFontSize(sz); doc.setTextColor(...col); const lines=doc.splitTextToSize(t,CW); for(const l of lines){ brk(gap); doc.text(l,LM,y); y+=gap; } y+=1; };
686
+ const sec=t=>{ brk(12); doc.setDrawColor(...BLUE); doc.setLineWidth(0.4); doc.line(LM,y,196,y); y+=4; doc.setFont('helvetica','bold'); doc.setFontSize(11); doc.setTextColor(...NAVY); doc.text(t,LM,y); y+=6; };
687
+
688
+ brk(22); doc.setFillColor(241,245,249); doc.rect(LM,y,CW,18,'F');
689
+ doc.setFont('helvetica','normal'); doc.setFontSize(9); doc.setTextColor(...GRAY);
690
+ doc.text(`Report ID: ${d.report_id}`,LM+3,y+6); doc.text(`Generated: ${now.toLocaleString()}`,LM+3,y+11);
691
+ doc.text(`Severity: ${d.overall_severity||'NORMAL'}`,LM+3,y+16);
692
+ const qa=((s.recommendations||'').match(/Score[:\s]+(\d+)/)||[])[1]||'85';
693
+ doc.text(`QA Score: ${qa}/100`,LM+100,y+6);
694
+ y+=24;
695
+
696
+ if(d.dicom_metadata && Object.keys(d.dicom_metadata).length>0) {
697
+ sec('DICOM Metadata');
698
+ const dm=d.dicom_metadata;
699
+ const pairs=[['Modality',dm.modality],['Body Part',dm.body_part],['Study Date',dm.study_date],['Institution',dm.institution]].filter(p=>p[1]);
700
+ txt(pairs.map(p=>`${p[0]}: ${p[1]}`).join(' | '),{sz:9});
701
+ }
702
+ sec('Clinical History'); txt(s.clinical_history);
703
+ sec('Technique'); txt(s.technique);
704
+ sec('Findings'); brk(20); doc.setFillColor(249,250,251); const fl=doc.splitTextToSize(s.findings||'None.',CW-4); const fh=Math.max(18,fl.length*5+8); doc.rect(LM,y,CW,fh,'F'); txt(s.findings||'None.');
705
+ sec('Impression'); txt(s.impression,{sz:11,bold:true,col:DARK,gap:6});
706
+ sec('Recommendations'); txt(s.recommendations);
707
+ sec('Disclaimer'); brk(18); doc.setFillColor(254,226,226); doc.rect(LM,y,CW,16,'F'); doc.setFont('helvetica','bold'); doc.setFontSize(8); doc.setTextColor(127,29,29); doc.splitTextToSize('DISCLAIMER: '+(s.disclaimer||''),CW-6).forEach((l,i)=>doc.text(l,LM+3,y+5+i*4.5));
708
+ footer(doc.internal.getNumberOfPages());
709
+ doc.save(`MediAgent_${d.report_id}_${now.toISOString().split('T')[0]}.pdf`);
710
+ showToast('PDF exported.', 'success');
711
+ } catch(e) { console.error(e); showToast('PDF export failed.', 'error'); }
712
+ });
713
+
714
+ // ═══════════════════════════════════════════════════════ AMD GPU POLLING
715
+ function startGpuPolling() {
716
+ pollGpu();
717
+ gpuPollInterval = setInterval(pollGpu, 3000);
718
+ }
719
+
720
+ async function pollGpu() {
721
+ try {
722
+ const res = await fetch('/metrics/gpu');
723
+ const data = await res.json();
724
+ renderGpuPanel(data);
725
+ } catch(e) {
726
+ // silently ignore
727
+ }
728
+ }
729
+
730
+ function renderGpuPanel(data) {
731
+ const panel = document.getElementById('gpu-panel-content');
732
+ const dot = document.getElementById('gpu-status-dot');
733
+ const headerBadge = document.getElementById('gpu-header-badge');
734
+ const headerText = document.getElementById('gpu-header-text');
735
+
736
+ if (!data.available || !data.cards || !data.cards.length) {
737
+ dot.className = 'w-1.5 h-1.5 rounded-full bg-slate-600';
738
+ panel.innerHTML = `<p class="text-[10px] text-slate-500 font-mono text-center py-1">${data.note || 'No GPU detected'}</p>`;
739
+ return;
740
+ }
741
+
742
+ dot.className = 'w-1.5 h-1.5 rounded-full bg-orange-400 animate-pulse';
743
+ const card = data.cards[0];
744
+ const gpuPct = Math.round(parseFloat(card.gpu_use_pct) || 0);
745
+ const vramUsed = Math.round(parseFloat(card.vram_used_mb) || 0);
746
+ const vramTotal = Math.round(parseFloat(card.vram_total_mb) || 1);
747
+ const vramPct = vramTotal > 0 ? Math.round((vramUsed / vramTotal) * 100) : 0;
748
+ const temp = Math.round(parseFloat(card.temp_c) || 0);
749
+ const power = Math.round(parseFloat(card.power_w) || 0);
750
+
751
+ // Header badge
752
+ headerBadge.classList.remove('hidden');
753
+ headerBadge.classList.add('flex');
754
+ headerText.textContent = `GPU ${gpuPct}% | ${temp}Β°C`;
755
+
756
+ const bar = (pct, color) => `
757
+ <div class="h-1.5 bg-slate-700 rounded-full overflow-hidden">
758
+ <div class="h-full rounded-full ${color} gpu-bar" style="width:${pct}%"></div>
759
+ </div>`;
760
+
761
+ const gpuColor = gpuPct > 80 ? 'bg-red-500' : gpuPct > 50 ? 'bg-orange-400' : 'bg-green-400';
762
+ const vramColor = vramPct > 85 ? 'bg-red-500' : vramPct > 60 ? 'bg-orange-400' : 'bg-blue-400';
763
+
764
+ panel.innerHTML = `
765
+ <div class="space-y-2">
766
+ <div>
767
+ <div class="flex justify-between items-center mb-0.5">
768
+ <span class="text-[9px] font-mono text-slate-400">GPU UTIL</span>
769
+ <span class="text-[10px] font-bold font-mono text-orange-300">${gpuPct}%</span>
770
+ </div>
771
+ ${bar(gpuPct, gpuColor)}
772
+ </div>
773
+ <div>
774
+ <div class="flex justify-between items-center mb-0.5">
775
+ <span class="text-[9px] font-mono text-slate-400">VRAM</span>
776
+ <span class="text-[10px] font-bold font-mono text-blue-300">${vramUsed}/${vramTotal} MiB</span>
777
+ </div>
778
+ ${bar(vramPct, vramColor)}
779
+ </div>
780
+ <div class="grid grid-cols-2 gap-2 pt-1 border-t border-slate-700">
781
+ <div class="text-center">
782
+ <div class="text-[9px] text-slate-500 font-mono">TEMP</div>
783
+ <div class="text-[11px] font-bold font-mono ${temp>80?'text-red-400':temp>65?'text-orange-400':'text-green-400'}">${temp}Β°C</div>
784
+ </div>
785
+ <div class="text-center">
786
+ <div class="text-[9px] text-slate-500 font-mono">POWER</div>
787
+ <div class="text-[11px] font-bold font-mono text-slate-300">${power}W</div>
788
+ </div>
789
+ </div>
790
+ ${data.note ? `<p class="text-[9px] text-slate-600 font-mono text-center">${data.note}</p>` : ''}
791
+ </div>`;
792
+ }
793
+
794
+ // ═══════════════════════════════════════════════════════ UTILITIES
795
+ function setLoading(v) {
796
+ document.getElementById('submit-btn').disabled = v;
797
+ document.getElementById('btn-text').textContent = v ? 'Processing...' : 'Run Analysis';
798
+ document.getElementById('btn-spinner').classList.toggle('hidden', !v);
799
+ }
800
+
801
+ function log(msg) {
802
+ document.getElementById('pipeline-log').innerHTML = `<span class="text-slate-700">&gt; ${msg}</span>`;
803
+ }
804
+
805
+ function startTimer() {
806
+ startTime = performance.now();
807
+ const timerEl = document.getElementById('pipeline-timer');
808
+ timerEl.classList.remove('hidden');
809
+ timerInterval = setInterval(() => {
810
+ timerEl.textContent = `⏱ ${((performance.now()-startTime)/1000).toFixed(1)}s elapsed`;
811
+ }, 100);
812
+ }
813
+
814
+ function stopTimer() {
815
+ clearInterval(timerInterval);
816
+ const elapsed = ((performance.now()-startTime)/1000).toFixed(1);
817
+ document.getElementById('pipeline-log').innerHTML = `<span class="text-green-600 font-bold">βœ… Complete in ${elapsed}s</span>`;
818
+ document.getElementById('pipeline-timer').classList.add('hidden');
819
+ }
820
+
821
+ function showToast(msg, type='success') {
822
+ const toast = document.getElementById('toast');
823
+ const inner = document.getElementById('toast-inner');
824
+ document.getElementById('toast-message').textContent = msg;
825
+ inner.className = `${type==='error'?'bg-red-600':'bg-slate-800'} text-white px-4 py-2.5 rounded-lg shadow-xl text-xs font-medium flex items-center gap-2`;
826
+ toast.classList.remove('translate-y-16','opacity-0');
827
+ setTimeout(() => toast.classList.add('translate-y-16','opacity-0'), 3500);
828
+ }
829
+
830
+ init();
831
+ </script>
832
+ </body>
833
+ </html>