Álvaro Valenzuela Valdes commited on
Commit ·
080b51f
1
Parent(s): 73126b2
fix: improve chatbot self-awareness and expand model engine mapping
Browse files
backend/app/routers/analysis.py
CHANGED
|
@@ -42,20 +42,21 @@ def get_analysis_history():
|
|
| 42 |
@router.post("/chat")
|
| 43 |
async def agent_chat(request: ChatRequest):
|
| 44 |
# Construct context
|
| 45 |
-
history_str = "\n".join([f"{m.role.upper()}: {m.content}" for m in request.history])
|
| 46 |
|
| 47 |
prompt = (
|
| 48 |
-
f"Eres {request.agent} en AndesOps AI, un consultor experto de élite.
|
|
|
|
| 49 |
f"CONTEXTO DE LA LICITACIÓN:\n{request.tender.model_dump_json()}\n\n"
|
| 50 |
f"DATOS DE MI EMPRESA:\n{request.company_profile.model_dump_json()}\n\n"
|
| 51 |
f"HISTORIAL DE CHAT:\n{history_str}\n\n"
|
| 52 |
f"PREGUNTA DEL USUARIO: {request.message}\n\n"
|
| 53 |
f"INSTRUCCIONES CRÍTICAS:\n"
|
| 54 |
f"1. Responde con la personalidad de {request.agent}. Sé agudo, profesional y estratégico.\n"
|
| 55 |
-
f"2.
|
| 56 |
-
f"3.
|
| 57 |
-
f"4.
|
| 58 |
-
f"5.
|
| 59 |
f"RESPONDE EN ESPAÑOL."
|
| 60 |
)
|
| 61 |
|
|
|
|
| 42 |
@router.post("/chat")
|
| 43 |
async def agent_chat(request: ChatRequest):
|
| 44 |
# Construct context
|
| 45 |
+
history_str = "\n".join([f"{m.role.upper()}{f' ({m.agent_name})' if m.agent_name else ''}: {m.content}" for m in request.history])
|
| 46 |
|
| 47 |
prompt = (
|
| 48 |
+
f"Eres {request.agent} en AndesOps AI, un consultor experto de élite. "
|
| 49 |
+
f"Actualmente estás operando bajo el motor de IA: {request.model}.\n\n"
|
| 50 |
f"CONTEXTO DE LA LICITACIÓN:\n{request.tender.model_dump_json()}\n\n"
|
| 51 |
f"DATOS DE MI EMPRESA:\n{request.company_profile.model_dump_json()}\n\n"
|
| 52 |
f"HISTORIAL DE CHAT:\n{history_str}\n\n"
|
| 53 |
f"PREGUNTA DEL USUARIO: {request.message}\n\n"
|
| 54 |
f"INSTRUCCIONES CRÍTICAS:\n"
|
| 55 |
f"1. Responde con la personalidad de {request.agent}. Sé agudo, profesional y estratégico.\n"
|
| 56 |
+
f"2. IDENTIDAD: Si el usuario pregunta qué modelo eres o quién te potencia, menciona que eres {request.agent} de AndesOps, funcionando sobre {request.model}.\n"
|
| 57 |
+
f"3. ANALIZA LAS BASES: Revisa el campo 'description' para responder.\n"
|
| 58 |
+
f"4. CITA EL DOCUMENTO: Menciona montos, multas o plazos explícitos si están disponibles.\n"
|
| 59 |
+
f"5. CONSEJO ESTRATÉGICO: Sugiere mejoras basadas en la experiencia de la empresa ({request.company_profile.experience}).\n"
|
| 60 |
f"RESPONDE EN ESPAÑOL."
|
| 61 |
)
|
| 62 |
|
backend/app/schemas/analysis.py
CHANGED
|
@@ -9,6 +9,7 @@ from app.schemas.tender import Tender
|
|
| 9 |
class ChatMessage(BaseModel):
|
| 10 |
role: str
|
| 11 |
content: str
|
|
|
|
| 12 |
|
| 13 |
|
| 14 |
class ChatRequest(BaseModel):
|
|
|
|
| 9 |
class ChatMessage(BaseModel):
|
| 10 |
role: str
|
| 11 |
content: str
|
| 12 |
+
agent_name: str | None = None
|
| 13 |
|
| 14 |
|
| 15 |
class ChatRequest(BaseModel):
|
backend/app/services/llm.py
CHANGED
|
@@ -107,6 +107,9 @@ async def call_gemini_with_model(prompt: str, model_name: str | None = None, is_
|
|
| 107 |
"Llama-3.3-70B (Groq)": "groq:llama-3.3-70b-versatile",
|
| 108 |
"Llama-3.1-8B (Groq)": "groq:llama-3.1-8b-instant",
|
| 109 |
"Llama-3.1-70B (Groq)": "groq:llama-3.1-70b-versatile",
|
|
|
|
|
|
|
|
|
|
| 110 |
}
|
| 111 |
|
| 112 |
model_id = model_map.get(model_name, "gemini")
|
|
|
|
| 107 |
"Llama-3.3-70B (Groq)": "groq:llama-3.3-70b-versatile",
|
| 108 |
"Llama-3.1-8B (Groq)": "groq:llama-3.1-8b-instant",
|
| 109 |
"Llama-3.1-70B (Groq)": "groq:llama-3.1-70b-versatile",
|
| 110 |
+
"Mixtral-8x7B (Groq)": "groq:mixtral-8x7b-32768",
|
| 111 |
+
"Gemma-2-9B (Featherless)": "google/gemma-2-9b-it",
|
| 112 |
+
"Llama-3.1-8B (Featherless)": "meta-llama/Meta-Llama-3.1-8B-Instruct",
|
| 113 |
}
|
| 114 |
|
| 115 |
model_id = model_map.get(model_name, "gemini")
|
frontend/components/AgentChat.tsx
CHANGED
|
@@ -122,7 +122,7 @@ export default function AgentChat({ tender, companyProfile }: Props) {
|
|
| 122 |
message: contextText ? `[DOC CONTEXT: ${contextText.slice(0, 3000)}]\n\nUSER QUESTION: ${messageToSend}` : messageToSend,
|
| 123 |
agent: selectedAgent.id,
|
| 124 |
model: selectedModel,
|
| 125 |
-
history: messages.map(({role, content}) => ({role, content})),
|
| 126 |
}),
|
| 127 |
});
|
| 128 |
|
|
|
|
| 122 |
message: contextText ? `[DOC CONTEXT: ${contextText.slice(0, 3000)}]\n\nUSER QUESTION: ${messageToSend}` : messageToSend,
|
| 123 |
agent: selectedAgent.id,
|
| 124 |
model: selectedModel,
|
| 125 |
+
history: messages.map(({role, content, agent}) => ({role, content, agent_name: agent})),
|
| 126 |
}),
|
| 127 |
});
|
| 128 |
|