| from langchain_core.prompts import ChatPromptTemplate |
| from llm_factory import get_llm |
| from langchain_core.output_parsers import JsonOutputParser |
| from models import Patient |
| import os |
|
|
| |
| llm = get_llm(model_type="text", temperature=0.1) |
|
|
| |
| parser = JsonOutputParser(pydantic_object=Patient) |
|
|
| |
| system_prompt = """You are a medical receptionist agent. Your goal is to extract patient information from a natural language introduction. |
| Extract the following fields: name, age, gender, and any mentioned medical history. |
| If a field is missing, leave it as null or infer it if obvious. |
| Return the result as a JSON object matching the Patient schema. |
| """ |
|
|
| prompt = ChatPromptTemplate.from_messages([ |
| ("system", system_prompt), |
| ("user", "{input}") |
| ]) |
|
|
| chain = prompt | llm | parser |
|
|
| async def run_intake_agent(user_input: str) -> Patient: |
| try: |
| result = await chain.ainvoke({"input": user_input}) |
| return Patient(**result) |
| except Exception as e: |
| |
| print(f"Error in intake agent: {e}") |
| return Patient(name="Unknown", age=0, gender="Unknown") |
|
|