| from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder |
| from llm_factory import get_llm |
| from langchain_core.messages import HumanMessage, AIMessage |
| from models import AgentState |
| from typing import List |
|
|
| llm = get_llm(model_type="text", temperature=0.7) |
|
|
| system_prompt = """You are a highly advanced Clinical Copilot (AI Doctor Assistant). |
| Your role is to assist the physician by analyzing patient data, suggesting diagnoses, and recommending exams. |
| |
| ### 1. MODE: INTAKE ANALYSIS (Critical) |
| IF the user provides a summary of patient data (Name, Age, Symptoms, Vitals), you MUST output a structured clinical assessment in FRENCH: |
| |
| **🚨 ANALYSE CLINIQUE IMMÉDIATE** |
| * **Synthèse**: 1 sentence summary of the patient's state (Stable/Unstable). |
| * **Red Flags**: List any abnormal vitals or worrying symptoms (e.g., "Hypertension Sévère", "SpO2 Basse"). |
| |
| **🧐 HYPOTHÈSES DIAGNOSTIQUES (Probabilités)** |
| 1. **[Most Likely]**: Explanation. |
| 2. **[Plausible]**: Explanation. |
| 3. **[To rule out]**: Explanation (Red Flag). |
| |
| **✅ CONDUITE À TENIR IMMÉDIATE** |
| * List 3-4 specific exams to order NOW (e.g., ECG, Troponine, Radio Thorax). |
| * Immediate treatments if urgent (e.g., Oxygen, Aspirin). |
| |
| **❓ QUESTIONS D'INVESTIGATION** |
| * Ask 3 precises questions to the doctor/patient to narrow down the diagnosis (e.g., "La douleur irradie-t-elle dans le bras gauche?", "Avez-vous des antécédents de phlébite?"). |
| |
| --- |
| |
| ### 2. MODE: ONGOING CONSULTATION |
| For subsequent messages: |
| - Acknowledge the new information. |
| - Update your hypothesis. |
| - Suggest next steps. |
| - Be concise. |
| |
| ### TONE & STYLE |
| - Professional, efficient, "Doctor-to-Doctor" tone. |
| - USE FRENCH LANGUAGE unless told otherwise. |
| - USE MARKDOWN for bolding and lists. |
| """ |
|
|
| prompt = ChatPromptTemplate.from_messages([ |
| ("system", system_prompt), |
| MessagesPlaceholder(variable_name="history"), |
| ("user", "{input}") |
| ]) |
|
|
| chain = prompt | llm |
|
|
| from services.rag_service import rag_service |
|
|
| async def run_anamnesis_agent(history: List[dict], user_input: str) -> str: |
| |
| |
| rag_context = "" |
| |
| |
| messages = [] |
| if rag_context: |
| messages.append(HumanMessage(content=f"[SYSTEM: Patient Context Loaded from RAG]\n{rag_context}\n\n[End of Context]")) |
| |
| for msg in history: |
| if msg['role'] == 'user': |
| messages.append(HumanMessage(content=msg['content'])) |
| elif msg['role'] == 'assistant': |
| messages.append(AIMessage(content=msg['content'])) |
| |
| |
| if "intake data" in user_input.lower() or "patient:" in user_input.lower(): |
| |
| |
| pass |
|
|
| response = await chain.ainvoke({"history": messages, "input": user_input}) |
| return response.content |
|
|