| """ |
| Agente de processamento de contexto inicial para sugestão de queries SQL |
| """ |
| import logging |
| import asyncio |
| from typing import Optional, Dict, Any |
| from langchain_openai import ChatOpenAI |
| from langchain_anthropic import ChatAnthropic |
| from langchain_google_genai import ChatGoogleGenerativeAI |
| from langchain_community.llms import HuggingFaceEndpoint |
| from langchain.schema import HumanMessage |
|
|
| from utils.config import ( |
| TEMPERATURE, |
| AVAILABLE_MODELS, |
| OPENAI_MODELS, |
| ANTHROPIC_MODELS, |
| GOOGLE_MODELS, |
| REFINEMENT_MODELS |
| ) |
|
|
|
|
| class ProcessingAgentManager: |
| """ |
| Gerenciador do agente de processamento de contexto inicial |
| """ |
| |
| def __init__(self, model_name: str = "gpt-4o-mini"): |
| self.model_name = model_name |
| self.llm = None |
| self._initialize_llm() |
| |
| def _initialize_llm(self): |
| """Inicializa o modelo LLM baseado no nome fornecido""" |
| try: |
| |
| model_id = AVAILABLE_MODELS.get(self.model_name, self.model_name) |
| |
| |
| if model_id not in AVAILABLE_MODELS.values(): |
| model_id = REFINEMENT_MODELS.get(self.model_name, model_id) |
| |
| |
| if model_id in OPENAI_MODELS: |
| |
| if model_id == "o3-mini": |
| |
| self.llm = ChatOpenAI(model=model_id) |
| else: |
| |
| self.llm = ChatOpenAI(model=model_id, temperature=TEMPERATURE) |
| |
| elif model_id in ANTHROPIC_MODELS: |
| |
| self.llm = ChatAnthropic( |
| model=model_id, |
| temperature=TEMPERATURE, |
| max_tokens=4096, |
| max_retries=2, |
| timeout=60.0 |
| ) |
|
|
| elif model_id in GOOGLE_MODELS: |
| |
| self.llm = ChatGoogleGenerativeAI( |
| model=model_id, |
| temperature=TEMPERATURE, |
| max_tokens=4096, |
| max_retries=2, |
| timeout=60.0 |
| ) |
|
|
| else: |
| |
| self.llm = HuggingFaceEndpoint( |
| endpoint_url=f"https://api-inference.huggingface.co/models/{model_id}", |
| temperature=TEMPERATURE, |
| max_new_tokens=1024, |
| timeout=120 |
| ) |
| |
| logging.info(f"Processing Agent inicializado com modelo {model_id}") |
| |
| except Exception as e: |
| logging.error(f"Erro ao inicializar Processing Agent: {e}") |
| |
| self.llm = ChatOpenAI(model="gpt-4o-mini", temperature=TEMPERATURE) |
| logging.warning("Usando GPT-4o-mini como fallback") |
| |
| def recreate_llm(self, new_model: str): |
| """ |
| Recria o LLM com novo modelo |
| |
| Args: |
| new_model: Nome do novo modelo |
| """ |
| old_model = self.model_name |
| self.model_name = new_model |
| self._initialize_llm() |
| logging.info(f"[PROCESSING] Modelo alterado de '{old_model}' para '{new_model}'") |
| |
| async def process_context(self, context_prompt: str) -> Dict[str, Any]: |
| """ |
| Processa o contexto inicial e retorna sugestão de query |
| |
| Args: |
| context_prompt: Prompt com contexto e pergunta do usuário |
| |
| Returns: |
| Resultado do processamento com pergunta e sugestão de query |
| """ |
| try: |
| logging.info(f"[PROCESSING] ===== INICIANDO PROCESSING AGENT =====") |
| logging.info(f"[PROCESSING] Modelo utilizado: {self.model_name}") |
| logging.info(f"[PROCESSING] Tamanho do contexto: {len(context_prompt)} caracteres") |
|
|
| |
| if hasattr(self.llm, 'ainvoke'): |
| |
| logging.info(f"[PROCESSING] Executando chamada assíncrona para {self.model_name}") |
| response = await self.llm.ainvoke([HumanMessage(content=context_prompt)]) |
| output = response.content |
| else: |
| |
| logging.info(f"[PROCESSING] Executando chamada síncrona para {self.model_name}") |
| response = await asyncio.get_event_loop().run_in_executor( |
| None, |
| lambda: self.llm.invoke([HumanMessage(content=context_prompt)]) |
| ) |
| output = response.content if hasattr(response, 'content') else str(response) |
|
|
| logging.info(f"[PROCESSING] Resposta recebida do modelo ({len(output)} caracteres)") |
|
|
| |
| processed_result = self._parse_processing_response(output) |
|
|
| result = { |
| "success": True, |
| "output": output, |
| "processed_question": processed_result.get("question", ""), |
| "suggested_query": processed_result.get("query", ""), |
| "query_observations": processed_result.get("observations", ""), |
| "model_used": self.model_name |
| } |
|
|
| |
| if result['suggested_query']: |
| logging.info(f"[PROCESSING] ✅ Query SQL extraída com sucesso") |
| else: |
| logging.warning(f"[PROCESSING] ❌ Nenhuma query SQL foi extraída") |
|
|
| logging.info(f"[PROCESSING] ===== PROCESSING AGENT CONCLUÍDO =====") |
| return result |
| |
| except Exception as e: |
| error_msg = f"Erro no Processing Agent: {e}" |
| logging.error(error_msg) |
| |
| return { |
| "success": False, |
| "output": error_msg, |
| "processed_question": "", |
| "suggested_query": "", |
| "model_used": self.model_name |
| } |
| |
| def _parse_processing_response(self, response: str) -> Dict[str, str]: |
| """ |
| Extrai query SQL e observações da resposta |
| |
| Args: |
| response: Resposta do modelo |
| |
| Returns: |
| Dicionário com query e observações extraídas |
| """ |
| try: |
| import re |
|
|
| query = "" |
| observations = "" |
|
|
| |
| obs_match = re.search(r'Observações:\s*(.*?)(?:\n|$)', response, re.IGNORECASE) |
| if obs_match: |
| observations = obs_match.group(1).strip() |
|
|
| |
| sql_patterns = [ |
| |
| r'```sql\s*(.*?)\s*```', |
| |
| r'```\s*(WITH.*?)\s*```', |
| r'```\s*(SELECT.*?)\s*```', |
| |
| r'Opção de querySQL:\s*(WITH.*?)(?=Observações:|$)', |
| r'Opção de querySQL:\s*(SELECT.*?)(?=Observações:|$)', |
| |
| r'(WITH\s+.*?;)', |
| r'(SELECT\s+.*?;)' |
| ] |
|
|
| for pattern in sql_patterns: |
| match = re.search(pattern, response, re.DOTALL | re.IGNORECASE) |
| if match: |
| query = match.group(1).strip() |
| break |
|
|
| |
| if query: |
| |
| query = query.replace('```', '').replace('sql', '').strip() |
|
|
| |
| query = query.strip('\n').strip() |
|
|
| |
| if not observations: |
| obs_patterns = [ |
| r'Observações:\s*(.*)', |
| r'Observacoes:\s*(.*)', |
| ] |
| for pattern in obs_patterns: |
| match = re.search(pattern, response, re.IGNORECASE | re.DOTALL) |
| if match: |
| observations = match.group(1).strip() |
| break |
|
|
| return { |
| "question": "", |
| "query": query, |
| "observations": observations |
| } |
|
|
| except Exception as e: |
| logging.error(f"Erro ao extrair query e observações: {e}") |
| return { |
| "question": "", |
| "query": "", |
| "observations": "" |
| } |
|
|
|
|
| def get_default_processing_agent(model_name: str = "gpt-4o-mini") -> ProcessingAgentManager: |
| """ |
| Cria um Processing Agent com configurações padrão |
| |
| Args: |
| model_name: Nome do modelo a usar |
| |
| Returns: |
| ProcessingAgentManager configurado |
| """ |
| return ProcessingAgentManager(model_name) |
|
|