| from dotenv import load_dotenv |
| import os |
| import httpx |
| import base64 |
| load_dotenv() |
|
|
| from fastapi import FastAPI, HTTPException, Form |
| from fastapi.middleware.cors import CORSMiddleware |
| from pydantic import BaseModel |
| from typing import List, Dict, Any, Optional |
| from models import Patient, AgentState |
| from agents.intake import run_intake_agent |
| from agents.anamnesis import run_anamnesis_agent |
| from agents.diagnosis import run_diagnosis_agent |
| from agents.planner import run_planner_agent |
|
|
|
|
| app = FastAPI(title="Medical Multi-Agent POC") |
|
|
| |
| origins = ["*"] |
|
|
| app.add_middleware( |
| CORSMiddleware, |
| allow_origins=origins, |
| allow_credentials=True, |
| allow_methods=["*"], |
| allow_headers=["*"], |
| ) |
|
|
| class IntakeRequest(BaseModel): |
| input: str |
|
|
| class AnamnesisRequest(BaseModel): |
| history: List[Dict[str, str]] |
| input: str |
|
|
| class DiagnosisRequest(BaseModel): |
| patient_info: Dict[str, Any] |
| symptom_summary: str |
| bio_data: Optional[Dict[str, Any]] = None |
|
|
| class PlannerRequest(BaseModel): |
| diagnosis_report: str |
|
|
| @app.get("/") |
| async def root(): |
| return {"message": "Medical Multi-Agent POC Backend is running"} |
|
|
| @app.post("/api/intake", response_model=Patient) |
| async def intake_endpoint(request: IntakeRequest): |
| return await run_intake_agent(request.input) |
|
|
| @app.post("/api/anamnesis") |
| async def anamnesis_endpoint(request: AnamnesisRequest): |
| response = await run_anamnesis_agent(request.history, request.input) |
| return {"response": response} |
|
|
| @app.post("/api/diagnosis") |
| async def diagnosis_endpoint(request: DiagnosisRequest): |
| report = await run_diagnosis_agent(request.patient_info, request.symptom_summary, request.bio_data) |
| return {"report": report} |
|
|
| @app.post("/api/planner") |
| async def planner_endpoint(request: PlannerRequest): |
| plan = await run_planner_agent(request.diagnosis_report) |
| return {"plan": plan} |
|
|
| |
|
|
| from agents.triage import run_triage_agent |
| from agents.pharmacist import run_pharmacist_agent |
| from agents.imaging import run_imaging_agent |
| from agents.followup import run_followup_agent |
|
|
| class TriageRequest(BaseModel): |
| symptoms: str |
|
|
| class PharmacistRequest(BaseModel): |
| patient_name: str |
| patient_age: int |
| history: str |
| prescription: str |
|
|
| class ImagingRequest(BaseModel): |
| imaging_desc: str |
| clinical_context: str |
|
|
| class FollowupRequest(BaseModel): |
| diagnosis: str |
| treatment: str |
|
|
| @app.post("/api/triage") |
| async def triage_endpoint(request: TriageRequest): |
| result = await run_triage_agent(request.symptoms) |
| return {"result": result} |
|
|
| @app.post("/api/pharmacist") |
| async def pharmacist_endpoint(request: PharmacistRequest): |
| result = await run_pharmacist_agent(request.patient_name, request.patient_age, request.history, request.prescription) |
| return {"result": result} |
|
|
| @app.post("/api/imaging") |
| async def imaging_endpoint(request: ImagingRequest): |
| result = await run_imaging_agent(request.imaging_desc, request.clinical_context) |
| return {"result": result} |
|
|
| @app.post("/api/followup") |
| async def followup_endpoint(request: FollowupRequest): |
| result = await run_followup_agent(request.diagnosis, request.treatment) |
| return {"result": result} |
|
|
| from agents.bio_analysis import run_bio_analysis_agent |
| from agents.radiology import run_radiology_agent |
| from agents.report import generate_report_content, create_pdf_report |
| from fastapi.responses import FileResponse |
|
|
| from fastapi import UploadFile, File |
|
|
| class ReportRequest(BaseModel): |
| patient_info: Dict[str, Any] |
| diagnosis: str |
| plan: str |
|
|
| @app.post("/api/bio-analysis") |
| async def bio_analysis_endpoint(file: UploadFile = File(...)): |
| content = await file.read() |
| analysis = await run_bio_analysis_agent(content, file.content_type) |
| return {"analysis": analysis} |
|
|
| @app.post("/api/radiology-analysis") |
| async def radiology_analysis_endpoint( |
| file: UploadFile = File(...), |
| model_type: str = Form(...), |
| question: Optional[str] = Form(None) |
| ): |
| content = await file.read() |
| result = await run_radiology_agent(content, model_type, question) |
| return {"result": result} |
|
|
| @app.post("/api/generate-report") |
| async def report_endpoint(request: ReportRequest): |
| content = await generate_report_content(request.patient_info, request.diagnosis, request.plan) |
| patient_name = request.patient_info.get("name", "patient").replace(" ", "_").lower() |
| filename = f"{patient_name}_report.pdf" |
| file_path = create_pdf_report(content, request.patient_info.get("name", "Unknown"), filename) |
| return {"content": content, "pdf_url": f"/reports/{filename}"} |
|
|
| @app.get("/reports/{filename}") |
| async def get_report(filename: str): |
| return FileResponse(f"reports/{filename}") |
|
|
| import google.generativeai as genai |
| import json |
|
|
| @app.post("/api/collab-chat") |
| async def collab_chat_endpoint( |
| text: str = Form(...), |
| history: str = Form(...), |
| file: Optional[UploadFile] = File(None) |
| ): |
| try: |
| |
| chat_history = json.loads(history) |
| |
| |
| z_api_key = os.getenv("Z_AI_API_KEY") |
| |
| if not z_api_key: |
| return {"response": "Erreur: Clé API IA Vision (VLM) non configurée dans le fichier .env."} |
| |
| api_url = "https://api.z.ai/api/paas/v4/chat/completions" |
| headers = { |
| "Authorization": f"Bearer {z_api_key}", |
| "Content-Type": "application/json" |
| } |
| |
| |
| messages = [] |
| |
| messages.append({ |
| "role": "system", |
| "content": "Tu es un expert médical SmartDiag. Analyse les documents et réponds aux médecins avec précision clinique." |
| }) |
| |
| for h in chat_history: |
| role = "user" if h["role"] == "user" else "assistant" |
| |
| msg_text = "" |
| if isinstance(h["parts"], list) and len(h["parts"]) > 0: |
| msg_text = h["parts"][0].get("text", "") |
| elif isinstance(h["parts"], str): |
| msg_text = h["parts"] |
| |
| if msg_text: |
| messages.append({"role": role, "content": msg_text}) |
| |
| |
| if file: |
| current_content = [] |
| extracted_text = "" |
| file_bytes = await file.read() |
| |
| if file.content_type.startswith("image/"): |
| img_b64 = base64.b64encode(file_bytes).decode("utf-8") |
| current_content.append({ |
| "type": "image_url", |
| "image_url": {"url": f"data:{file.content_type};base64,{img_b64}"} |
| }) |
| elif file.content_type == "application/pdf": |
| try: |
| import io |
| from pypdf import PdfReader |
| pdf_file = io.BytesIO(file_bytes) |
| reader = PdfReader(pdf_file) |
| for page in reader.pages: |
| extracted_text += page.extract_text() + "\n" |
| |
| if extracted_text: |
| text = f"{text}\n\n[Contenu du PDF joint ({file.filename})]:\n{extracted_text}" |
| else: |
| return {"response": "Impossible d'extraire le texte de ce PDF (scan/image). Merci d'envoyer une capture d'écran."} |
| except Exception as pdf_err: |
| print(f"PDF Error: {pdf_err}") |
| return {"response": "Erreur lecture PDF."} |
| |
| |
| current_content.append({"type": "text", "text": text}) |
| messages.append({"role": "user", "content": current_content}) |
| model_name = "glm-4.6v" |
| else: |
| |
| if not text.strip(): |
| return {"response": "Veuillez saisir un message."} |
| |
| |
| messages.append({ |
| "role": "user", |
| "content": [{"type": "text", "text": text}] |
| }) |
| model_name = "glm-4.6v" |
| |
| payload = { |
| "model": model_name, |
| "messages": messages, |
| "temperature": 0.7, |
| "top_p": 0.9, |
| "stream": False |
| } |
| |
| async with httpx.AsyncClient(timeout=120.0) as client: |
| response = await client.post(api_url, headers=headers, json=payload) |
| if response.status_code == 200: |
| result = response.json() |
| return {"response": result["choices"][0]["message"]["content"]} |
| else: |
| error_msg = response.text |
| print(f"GLM API Error: {error_msg}") |
| return {"response": f"Erreur VLM : Impossible d'analyser le document ({response.status_code})."} |
| |
| except Exception as e: |
| print(f"Critical error in collab-chat: {str(e)}") |
| import traceback |
| traceback.print_exc() |
| return {"response": f"Désolé, une erreur technique est survenue : {str(e)}"} |
|
|