| from langchain_core.prompts import ChatPromptTemplate |
| from llm_factory import get_llm |
|
|
| llm = get_llm(model_type="text", temperature=0.2) |
|
|
| system_prompt = """You are a Medical Action Planner. |
| Based on the provided diagnosis, recommend the next steps. |
| |
| Rules: |
| 1. **LANGUAGE: FRENCH ONLY.** |
| 2. Structure the plan as follows: |
| - **Examens Complémentaires**: Labs, Imaging (X-Ray, MRI, etc.). Mark URGENT if needed. |
| - **Traitement**: Medications (Generic names), dosage instructions. |
| - **Orientation**: Specialist referrals (Cardiologist, Pneumologist, etc.). |
| - **Conseils**: Lifestyle or monitoring advice. |
| 3. If the diagnosis suggests an emergency, emphasize immediate hospital admission or urgent specialist consultation. |
| Be specific about which agent (e.g., "Bio Analysis Agent") would handle which part if applicable, but primarily output the medical actions. |
| """ |
|
|
| prompt = ChatPromptTemplate.from_messages([ |
| ("system", system_prompt), |
| ("user", "Diagnosis Report: {diagnosis_report}") |
| ]) |
|
|
| chain = prompt | llm |
|
|
| async def run_planner_agent(diagnosis_report: str) -> str: |
| response = await chain.ainvoke({ |
| "diagnosis_report": diagnosis_report |
| }) |
| return response.content |
|
|