File size: 17,904 Bytes
e418416 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 | import hashlib
import json
import httpx
import google.generativeai as genai
from app.config import settings
from app.schemas.analysis import AnalysisResult, RiskItem, ActionItem, CompanyProfile, Tender
from app.services.report import generate_markdown_report
# Configure Gemini
genai.configure(api_key=settings.gemini_api_key)
async def call_gemini(prompt: str, is_json: bool = False) -> str:
if not settings.gemini_api_key:
return ""
try:
generation_config = {
"temperature": 0.2,
"top_p": 0.95,
"top_k": 40,
"max_output_tokens": 8192,
}
if is_json:
generation_config["response_mime_type"] = "application/json"
model = genai.GenerativeModel(
model_name="gemini-2.0-flash",
generation_config=generation_config,
)
response = await model.generate_content_async(prompt)
return response.text
except Exception as e:
print(f"Error calling Gemini (is_json={is_json}): {e}, trying fallback...")
if settings.groq_api_key:
return await call_groq(prompt, "llama-3.3-70b-versatile")
return await call_featherless(prompt, "Qwen/Qwen2.5-72B-Instruct")
async def call_featherless(prompt: str, model: str = "Qwen/Qwen2.5-72B-Instruct") -> str:
if not settings.featherless_api_key:
return ""
try:
async with httpx.AsyncClient(timeout=60.0) as client:
payload = {
"model": model,
"messages": [{"role": "user", "content": prompt}],
"temperature": 0.2
}
if "json" in prompt.lower():
payload["response_format"] = {"type": "json_object"}
response = await client.post(
"https://api.featherless.ai/v1/chat/completions",
headers={
"Authorization": f"Bearer {settings.featherless_api_key}",
"Content-Type": "application/json"
},
json=payload
)
if response.status_code != 200:
print(f"Featherless Error ({model}): {response.status_code} - {response.text}")
return ""
data = response.json()
return data["choices"][0]["message"]["content"]
except Exception as e:
print(f"Error calling Featherless ({model}): {e}")
return ""
async def call_groq(prompt: str, model: str = "llama-3.3-70b-versatile") -> str:
if not settings.groq_api_key:
return ""
try:
async with httpx.AsyncClient(timeout=60.0) as client:
payload = {
"model": model,
"messages": [{"role": "user", "content": prompt}],
"temperature": 0.2
}
if "json" in prompt.lower():
payload["response_format"] = {"type": "json_object"}
response = await client.post(
"https://api.groq.com/openai/v1/chat/completions",
headers={
"Authorization": f"Bearer {settings.groq_api_key}",
"Content-Type": "application/json"
},
json=payload
)
if response.status_code != 200:
print(f"Groq Error ({model}): {response.status_code} - {response.text}")
return ""
data = response.json()
return data["choices"][0]["message"]["content"]
except Exception as e:
print(f"Error calling Groq ({model}): {e}")
return ""
async def call_gemini_with_model(prompt: str, model_name: str | None = None, is_json: bool = False) -> str:
model_map = {
"Gemini 2.5 Flash": "gemini",
"DeepSeek-V3 (Featherless)": "deepseek-ai/DeepSeek-V3",
"Qwen-2.5 (Featherless)": "Qwen/Qwen2.5-72B-Instruct",
"Llama-3.3-70B (Groq)": "groq:llama-3.3-70b-versatile",
"Llama-3.1-8B (Groq)": "groq:llama-3.1-8b-instant",
"Llama-3.1-70B (Groq)": "groq:llama-3.1-70b-versatile",
"Mixtral-8x7B (Groq)": "groq:mixtral-8x7b-32768",
"Gemma-2-9B (Featherless)": "google/gemma-2-9b-it",
"Llama-3.1-8B (Featherless)": "meta-llama/Meta-Llama-3.1-8B-Instruct",
"Llama-3.2-11B-Vision (Groq)": "groq:llama-3.2-11b-vision-preview",
}
model_id = model_map.get(model_name, "gemini")
print(f"DEBUG: Calling LLM with model_name='{model_name}' -> model_id='{model_id}'")
# Check keys
if model_id.startswith("groq:") and not settings.groq_api_key:
print("DEBUG WARNING: GROQ_API_KEY is missing! Falling back to Gemini.")
model_id = "gemini"
if model_id == "gemini":
res = await call_gemini(prompt, is_json=is_json)
if not res and settings.groq_api_key:
print("DEBUG: Gemini failed or returned empty. Trying Groq fallback.")
return await call_groq(prompt, "llama-3.3-70b-versatile")
return res
elif model_id.startswith("groq:"):
# Check if it's a vision call (hacky way for now, but effective)
if "IMAGE_DATA:" in prompt:
parts = prompt.split("IMAGE_DATA:")
text_prompt = parts[0].strip()
image_b64 = parts[1].strip()
res = await call_groq_vision(text_prompt, image_b64, model=model_id[5:])
else:
res = await call_groq(prompt, model=model_id[5:])
if not res and settings.gemini_api_key:
print("DEBUG: Groq failed or returned empty. Trying Gemini fallback.")
return await call_gemini(prompt, is_json=is_json)
return res
else:
res = await call_featherless(prompt, model=model_id)
if not res and settings.groq_api_key:
print("DEBUG: Featherless failed. Trying Groq fallback.")
return await call_groq(prompt, "llama-3.3-70b-versatile")
return res
async def call_groq_vision(prompt: str, image_b64: str, model: str = "llama-3.2-11b-vision-preview") -> str:
if not settings.groq_api_key:
return ""
try:
async with httpx.AsyncClient(timeout=60.0) as client:
# Ensure proper data URL format
if not image_b64.startswith("data:image"):
image_b64 = f"data:image/jpeg;base64,{image_b64}"
payload = {
"model": model,
"messages": [
{
"role": "user",
"content": [
{"type": "text", "text": prompt},
{
"type": "image_url",
"image_url": {"url": image_b64}
}
]
}
],
"temperature": 0.2
}
response = await client.post(
"https://api.groq.com/openai/v1/chat/completions",
headers={
"Authorization": f"Bearer {settings.groq_api_key}",
"Content-Type": "application/json"
},
json=payload
)
if response.status_code != 200:
print(f"Groq Vision Error ({model}): {response.status_code} - {response.text}")
return ""
data = response.json()
return data["choices"][0]["message"]["content"]
except Exception as e:
print(f"Error calling Groq Vision ({model}): {e}")
return ""
def _parse_gemini_response(output: str) -> dict | None:
if not output:
return None
# Remove Markdown code blocks if present
clean_output = output.strip()
if clean_output.startswith("```json"):
clean_output = clean_output[7:-3].strip()
elif clean_output.startswith("```"):
clean_output = clean_output[3:-3].strip()
try:
data = json.loads(clean_output)
except Exception as e:
print(f"JSON Parsing Error: {e}\nRaw Output: {output[:200]}...")
return None
if data:
# Handle nesting (LLMs sometimes wrap the result in a key)
if not all(k in data for k in ["fit_score", "decision", "risks"]):
for val in data.values():
if isinstance(val, dict) and any(k in val for k in ["fit_score", "decision", "risks"]):
data = val
break
# Ensure strategic_roadmap is a string
if "strategic_roadmap" in data:
if isinstance(data["strategic_roadmap"], list):
data["strategic_roadmap"] = "\n".join([str(item) for item in data["strategic_roadmap"]])
elif isinstance(data["strategic_roadmap"], dict):
data["strategic_roadmap"] = json.dumps(data["strategic_roadmap"], indent=2, ensure_ascii=False)
# Ensure risks is a list of objects
if "risks" in data and isinstance(data["risks"], list):
new_risks = []
for item in data["risks"]:
if isinstance(item, str):
new_risks.append({"title": item, "severity": "Medium", "explanation": item})
elif isinstance(item, dict):
new_risks.append(item)
data["risks"] = new_risks
# Ensure action_plan is a list of objects
if "action_plan" in data and isinstance(data["action_plan"], list):
new_plan = []
for item in data["action_plan"]:
if isinstance(item, str):
new_plan.append({"task": item, "priority": "Medium", "owner": "Team", "timeline": "TBD"})
elif isinstance(item, dict):
new_plan.append(item)
data["action_plan"] = new_plan
# Ensure fit_score is int
if "fit_score" in data:
try:
data["fit_score"] = int(data["fit_score"])
except:
data["fit_score"] = 0
return data
return None
def generate_mock_analysis(tender: Tender, company: CompanyProfile) -> AnalysisResult:
raw = f"{tender.code}:{tender.name}:{company.name}"
digest = hashlib.sha256(raw.encode("utf-8")).hexdigest()
score = int(digest[:8], 16) % 41 + 55
return AnalysisResult(
fit_score=score,
decision="Recommended" if score > 75 else "Review Carefully",
executive_summary=f"Análisis automático para {tender.name}. Se observa un encaje técnico razonable.",
key_requirements=["Documentación legal", "Experiencia técnica", "Garantía de seriedad"],
risks=[{"title": "Plazo ajustado", "severity": "Medium", "explanation": "El tiempo de entrega es crítico."}],
compliance_gaps=["Validar boleta de garantía"],
action_plan=[{"task": "Revisar bases", "priority": "High", "owner": "Legal", "timeline": "2 días"}],
proposal_draft="Borrador generado automáticamente...",
report_markdown="# Reporte de Licitación",
audit_log=["Iniciando análisis de respaldo...", "Generando datos mock."]
)
async def generate_analysis(tender: Tender, company: CompanyProfile, document_text: str | None = None, models: dict | None = None) -> AnalysisResult:
chosen = models or {
"legal": "Llama-3.3-70B (Groq)" if settings.groq_api_key else "Gemini 2.5 Flash",
"tech": "Llama-3.1-8B (Groq)" if settings.groq_api_key else "Qwen-2.5 (Featherless)",
"risk": "Llama-3.3-70B (Groq)" if settings.groq_api_key else "Qwen-2.5 (Featherless)"
}
audit_messages = ["🚀 Launching Multi-Agent Orchestration Pipeline."]
agent_outputs = {}
agent_definitions = {
"legal": "Experto Legal & Cumplimiento: Evalúa bases administrativas, multas y garantías. Pon especial atención a los ANEXOS de Sustentabilidad y Admisibilidad.",
"tech": "Ingeniero Técnico: Evalúa arquitectura, stack tecnológico y capacidad de ejecución. Considera si se requieren certificaciones ambientales.",
"risk": "Estratega Comercial: Evalúa rentabilidad, competencia y riesgos de mercado. Analiza el impacto de los criterios de evaluación ESG en el puntaje final."
}
for agent_id, role_desc in agent_definitions.items():
model_name = chosen.get(agent_id, "Gemini 2.5 Flash")
audit_messages.append(f"🤖 Agent {agent_id.upper()} calling {model_name}...")
agent_prompt = f"""
Actúa como {role_desc}
Licitación: {tender.name} ({tender.code})
Empresa: {company.name}
Contexto Adicional: {document_text[:5000] if document_text else 'No adjunto.'}
PROPORCIONA TU ANÁLISIS ESPECÍFICO (Máx 200 palabras) EN ESPAÑOL.
"""
res = await call_gemini_with_model(agent_prompt, model_name=model_name)
agent_outputs[agent_id] = res or "Análisis no disponible debido a error de conexión."
audit_messages.append("🧠 Synthesis phase: Consolidating agent insights...")
synthesis_prompt = f"""
SISTEMA DE CONSENSO ANDESOPS AI
Licitación: {tender.name}
Resultados de Agentes:
- LEGAL: {agent_outputs.get('legal')}
- TECH: {agent_outputs.get('tech')}
- RISK: {agent_outputs.get('risk')}
Genera el JSON final AnalysisResult con una decisión fundamentada.
RESPONDE SOLO EL JSON.
"""
final_json = await call_gemini(synthesis_prompt, is_json=True)
if not final_json and settings.groq_api_key:
final_json = await call_groq(synthesis_prompt, model="llama-3.3-70b-versatile")
elif not final_json and settings.featherless_api_key:
final_json = await call_featherless(synthesis_prompt, model="Qwen/Qwen2.5-72B-Instruct")
parse_result = _parse_gemini_response(final_json)
if parse_result:
try:
if not parse_result.get("report_markdown"):
parse_result["report_markdown"] = generate_markdown_report(parse_result)
if not parse_result.get("proposal_draft") or len(parse_result["proposal_draft"]) < 100:
audit_messages.append("📝 Generating specialized proposal draft...")
parse_result["proposal_draft"] = await generate_proposal_draft(parse_result, company)
result = AnalysisResult(**parse_result)
result.audit_log = audit_messages + (result.audit_log or [])
return result
except Exception as e:
print(f"Validation Error in generate_analysis: {e}")
analysis = generate_mock_analysis(tender, company)
analysis.audit_log = audit_messages + ["⚠️ Synthesis failed, using emergency fallback."]
return analysis
async def generate_proposal_draft(analysis: dict, company: CompanyProfile) -> str:
prompt = f"""
Como experto redactor de propuestas de licitación, genera un borrador profesional (en Markdown) basado en este análisis técnico:
{analysis.get('executive_summary', 'Analizar bases adjuntas.')}
Perfil de la Empresa: {company.name} - {company.experience}
Requisitos Críticos a Abordar: {', '.join(analysis.get('key_requirements', []))}
Estructura la propuesta en ESPAÑOL con:
1. Introducción Ejecutiva
2. Resumen de la Solución Técnica
3. Aseguramiento de Cumplimiento (Compliance)
4. Propuesta de Valor Estratégica
"""
return await call_gemini_with_model(prompt, model_name="Llama-3.3-70B (Groq)" if settings.groq_api_key else "Gemini 2.5 Flash")
async def generate_synthetic_tenders(keyword: str) -> list[Tender]:
"""
Generates realistic synthetic tenders with coherent bidding documents (bases)
when official sources are unavailable or empty.
"""
prompt = f"""
Genera 4 licitaciones de Mercado Público CHILE realistas para el rubro: {keyword}
Para cada licitación, genera un JSON con:
- code: Formato XXXXX-XX-XX26
- name: Nombre profesional
- buyer: Una institución pública chilena real
- description: UN DOCUMENTO EXTENSO de 'Bases Administrativas y Técnicas' (mínimo 300 palabras)
que incluya: Objeto de licitación, Requisitos técnicos, Plazos, Multas y Criterios de Evaluación.
- status: 'Publicada'
- closing_date: ISO date en 2 semanas
- estimated_amount: Monto en CLP entre 5M y 50M
- region: Una región de Chile
RESPONDE SOLO EL JSON (Lista de objetos).
"""
res = await call_gemini(prompt, is_json=True)
items = []
try:
data = json.loads(res)
# Handle if LLM wraps in a key
if isinstance(data, dict):
for v in data.values():
if isinstance(v, list):
data = v
break
for i in data:
items.append(Tender(
code=i.get("code", "000-00-00"),
name=i.get("name", "Licitación Sintética"),
description=i.get("description", "Documento de bases en proceso..."),
buyer=i.get("buyer", "Organismo Público"),
status=i.get("status", "Publicada"),
closing_date=i.get("closing_date", datetime.now().isoformat()),
estimated_amount=float(i.get("estimated_amount", 0)),
source="AndesOps AI - Intelligent Discovery",
region=i.get("region", "Nacional"),
sector="Privado/Público",
items=[],
attachments=[{
"name": "Bases_Tecnicas_y_Administrativas.pdf",
"url": "#synthetic-doc",
"type": "pdf"
}]
))
except Exception as e:
print(f"Error generating synthetic tenders: {e}")
return items
|