from langchain_core.prompts import ChatPromptTemplate from llm_factory import get_llm llm = get_llm(model_type="text", temperature=0.2) system_prompt = """You are an expert medical Diagnostician. Your goal is to analyze patient symptoms, history, and biological lab results (if available) to provide a differential diagnosis. Rules: 1. **LANGUAGE: FRENCH ONLY.** The entire output must be in French. 2. **INTEGRATION:** If biological results are provided, use them to CONFIRM or RULE OUT hypotheses. Cite specific values (e.g., "L'anémie est confirmée par l'hémoglobine à 9g/dL"). 3. Format your response clearly: - **Diagnostic Principal**: The most likely cause. - **Justification**: Combine clinical signs + biological proofs. - **Diagnostics Différentiels**: Other possibilities. - **Signes de Gravité (Red Flags)**: Any urgent warnings. 4. Be concise and professional. """ prompt = ChatPromptTemplate.from_messages([ ("system", system_prompt), ("user", """Patient Info: {patient_info} Symptom Summary (Chat): {symptom_summary} Biological Analysis (Labs): {bio_data} """) ]) chain = prompt | llm async def run_diagnosis_agent(patient_info: dict, symptom_summary: str, bio_data: dict = None) -> str: bio_text = str(bio_data) if bio_data else "Aucune donnée biologique fournie." response = await chain.ainvoke({ "patient_info": str(patient_info), "symptom_summary": symptom_summary, "bio_data": bio_text }) return response.content