RepoPeft-data / oracle_context_cache /JoshuaC215__agent-service-toolkit.json
nanigock's picture
Upload folder using huggingface_hub
cd15502 verified
{"repo": "JoshuaC215/agent-service-toolkit", "n_pairs": 115, "version": "v2_function_scoped", "contexts": {"tests/service/test_service.py::21": {"resolved_imports": ["src/agents/agents.py", "src/schema/__init__.py", "src/schema/models.py"], "used_names": ["AIMessage", "ChatMessage", "json"], "enclosing_function": "test_invoke", "extracted_code": "# Source: src/schema/__init__.py\n ChatHistory,\n ChatHistoryInput,\n ChatMessage,\n Feedback,\n FeedbackResponse,\n ServiceMetadata,\n StreamInput,\n UserInput,\n)\n\n__all__ = [\n \"AgentInfo\",\n\n \"AllModelEnum\",\n \"UserInput\",\n \"ChatMessage\",\n \"ServiceMetadata\",\n \"StreamInput\",\n \"Feedback\",\n \"FeedbackResponse\",\n \"ChatHistoryInput\",\n \"ChatHistory\",\n]", "n_imports_parsed": 10, "n_files_resolved": 3, "n_chars_extracted": 393}, "tests/voice/providers/test_openai_stt.py::55": {"resolved_imports": ["src/voice/providers/openai_stt.py"], "used_names": ["OpenAISTT", "patch"], "enclosing_function": "test_transcribe_api_error", "extracted_code": "# Source: src/voice/providers/openai_stt.py\nclass OpenAISTT:\n \"\"\"OpenAI Whisper STT provider.\"\"\"\n\n def __init__(self, api_key: str | None = None):\n \"\"\"Initialize OpenAI STT.\n\n Args:\n api_key: OpenAI API key (uses env var if not provided)\n\n Raises:\n Exception: If OpenAI client initialization fails\n \"\"\"\n # Create OpenAI client with provided key or from environment\n self.client = OpenAI(api_key=api_key) if api_key else OpenAI()\n logger.info(\"OpenAI STT initialized\")\n\n def transcribe(self, audio_file: BinaryIO) -> str:\n \"\"\"Transcribe audio using OpenAI Whisper.\n\n Args:\n audio_file: Binary audio file\n\n Returns:\n Transcribed text (empty string on failure)\n\n Note:\n Errors are logged but not raised - returns empty string instead.\n This allows graceful degradation in user-facing applications.\n \"\"\"\n try:\n # Reset file pointer to beginning (may have been read elsewhere)\n audio_file.seek(0)\n\n # Call OpenAI Whisper API for transcription\n result = self.client.audio.transcriptions.create(\n model=\"whisper-1\", file=audio_file, response_format=\"text\"\n )\n\n # Clean up whitespace from result\n transcribed = result.strip()\n logger.info(f\"OpenAI STT: transcribed {len(transcribed)} chars\")\n return transcribed\n\n except Exception as e:\n # Log error with full traceback for debugging\n logger.error(f\"OpenAI STT failed: {e}\", exc_info=True)\n # Return empty string to allow graceful degradation\n return \"\"", "n_imports_parsed": 2, "n_files_resolved": 1, "n_chars_extracted": 1726}, "tests/agents/test_github_mcp_agent.py::66": {"resolved_imports": ["src/agents/github_mcp_agent/github_mcp_agent.py", "src/core/settings.py"], "used_names": ["AsyncMock", "GitHubMCPAgent", "Mock", "Tool", "patch", "pytest", "settings"], "enclosing_function": "test_load_with_github_pat", "extracted_code": "# Source: src/agents/github_mcp_agent/github_mcp_agent.py\nclass GitHubMCPAgent(LazyLoadingAgent):\n \"\"\"GitHub MCP Agent with async initialization.\"\"\"\n\n def __init__(self) -> None:\n super().__init__()\n self._mcp_tools: list[BaseTool] = []\n self._mcp_client: MultiServerMCPClient | None = None\n\n async def load(self) -> None:\n \"\"\"Initialize the GitHub MCP agent by loading MCP tools.\"\"\"\n if not settings.GITHUB_PAT:\n logger.info(\"GITHUB_PAT is not set, GitHub MCP agent will have no tools\")\n self._mcp_tools = []\n self._graph = self._create_graph()\n self._loaded = True\n return\n\n try:\n # Initialize MCP client directly\n github_pat = settings.GITHUB_PAT.get_secret_value()\n connections = {\n \"github\": StreamableHttpConnection(\n transport=\"streamable_http\",\n url=settings.MCP_GITHUB_SERVER_URL,\n headers={\n \"Authorization\": f\"Bearer {github_pat}\",\n },\n )\n }\n\n self._mcp_client = MultiServerMCPClient(connections)\n logger.info(\"MCP client initialized successfully\")\n\n # Get tools from the client\n self._mcp_tools = await self._mcp_client.get_tools()\n logger.info(f\"GitHub MCP agent initialized with {len(self._mcp_tools)} tools\")\n\n except Exception as e:\n logger.error(f\"Failed to initialize GitHub MCP agent: {e}\")\n self._mcp_tools = []\n self._mcp_client = None\n\n # Create and store the graph\n self._graph = self._create_graph()\n self._loaded = True\n\n def _create_graph(self) -> CompiledStateGraph:\n \"\"\"Create the GitHub MCP agent graph.\"\"\"\n model = get_model(settings.DEFAULT_MODEL)\n\n return create_agent(\n model=model,\n tools=self._mcp_tools,\n name=\"github-mcp-agent\",\n system_prompt=prompt,\n )\n\n\n# Source: src/core/settings.py\nsettings = Settings()", "n_imports_parsed": 5, "n_files_resolved": 2, "n_chars_extracted": 2116}, "tests/service/test_service_message_generator.py::68": {"resolved_imports": ["src/schema/__init__.py", "src/agents/langgraph_supervisor_hierarchy_agent.py", "src/service/service.py"], "used_names": ["AIMessage", "ChatMessage", "MemorySaver", "StreamInput", "ToolCall", "json", "message_generator", "patch", "pytest", "workflow"], "enclosing_function": "test_three_layer_supervisor_hierarchy_agent_with_fake_model", "extracted_code": "# Source: src/schema/__init__.py\n ChatHistory,\n ChatHistoryInput,\n ChatMessage,\n Feedback,\n FeedbackResponse,\n ServiceMetadata,\n StreamInput,\n UserInput,\n)\n\n__all__ = [\n \"AgentInfo\",\n\n FeedbackResponse,\n ServiceMetadata,\n StreamInput,\n UserInput,\n)\n\n__all__ = [\n \"AgentInfo\",\n \"AllModelEnum\",\n \"UserInput\",\n \"ChatMessage\",\n \"ServiceMetadata\",\n\n \"AllModelEnum\",\n \"UserInput\",\n \"ChatMessage\",\n \"ServiceMetadata\",\n \"StreamInput\",\n \"Feedback\",\n \"FeedbackResponse\",\n \"ChatHistoryInput\",\n \"ChatHistory\",\n]\n\n \"ChatMessage\",\n \"ServiceMetadata\",\n \"StreamInput\",\n \"Feedback\",\n \"FeedbackResponse\",\n \"ChatHistoryInput\",\n \"ChatHistory\",\n]\n\n\n# Source: src/agents/langgraph_supervisor_hierarchy_agent.py\ndef workflow(chosen_model):\n math_agent = create_agent(\n model=chosen_model,\n tools=[add, multiply],\n name=\"sub-agent-math_expert\", # Identify the graph node as a sub-agent\n system_prompt=\"You are a math expert. Always use one tool at a time.\",\n ).with_config(tags=[\"skip_stream\"])\n\n research_agent = (\n create_supervisor(\n [math_agent],\n model=chosen_model,\n tools=[web_search],\n prompt=\"You are a world class researcher with access to web search. Do not do any math, you have a math expert for that. \",\n supervisor_name=\"supervisor-research_expert\", # Identify the graph node as a supervisor to the math agent\n )\n .compile(\n name=\"sub-agent-research_expert\"\n ) # Identify the graph node as a sub-agent to the main supervisor\n .with_config(tags=[\"skip_stream\"])\n ) # Stream tokens are ignored for sub-agents in the UI\n\n # Create supervisor workflow\n return create_supervisor(\n [research_agent],\n model=chosen_model,\n prompt=(\n \"You are a team supervisor managing a research expert with math capabilities.\"\n \"For current events, use research_agent. \"\n ),\n add_handoff_back_messages=True,\n # UI now expects this to be True so we don't have to guess when a handoff back occurs\n output_mode=\"full_history\", # otherwise when reloading conversations, the sub-agents' messages are not included\n )\n\n\n# Source: src/service/service.py\nasync def message_generator(\n user_input: StreamInput, agent_id: str = DEFAULT_AGENT\n) -> AsyncGenerator[str, None]:\n \"\"\"\n Generate a stream of messages from the agent.\n\n This is the workhorse method for the /stream endpoint.\n \"\"\"\n agent: AgentGraph = get_agent(agent_id)\n kwargs, run_id = await _handle_input(user_input, agent)\n\n try:\n # Process streamed events from the graph and yield messages over the SSE stream.\n async for stream_event in agent.astream(\n **kwargs, stream_mode=[\"updates\", \"messages\", \"custom\"], subgraphs=True\n ):\n if not isinstance(stream_event, tuple):\n continue\n # Handle different stream event structures based on subgraphs\n if len(stream_event) == 3:\n # With subgraphs=True: (node_path, stream_mode, event)\n _, stream_mode, event = stream_event\n else:\n # Without subgraphs: (stream_mode, event)\n stream_mode, event = stream_event\n new_messages = []\n if stream_mode == \"updates\":\n for node, updates in event.items():\n # A simple approach to handle agent interrupts.\n # In a more sophisticated implementation, we could add\n # some structured ChatMessage type to return the interrupt value.\n if node == \"__interrupt__\":\n interrupt: Interrupt\n for interrupt in updates:\n new_messages.append(AIMessage(content=interrupt.value))\n continue\n updates = updates or {}\n update_messages = updates.get(\"messages\", [])\n # special cases for using langgraph-supervisor library\n if \"supervisor\" in node or \"sub-agent\" in node:\n # the only tools that come from the actual agent are the handoff and handback tools\n if isinstance(update_messages[-1], ToolMessage):\n if \"sub-agent\" in node and len(update_messages) > 1:\n # If this is a sub-agent, we want to keep the last 2 messages - the handback tool, and it's result\n update_messages = update_messages[-2:]\n else:\n # If this is a supervisor, we want to keep the last message only - the handoff result. The tool comes from the 'agent' node.\n update_messages = [update_messages[-1]]\n else:\n update_messages = []\n new_messages.extend(update_messages)\n\n if stream_mode == \"custom\":\n new_messages = [event]\n\n # LangGraph streaming may emit tuples: (field_name, field_value)\n # e.g. ('content', <str>), ('tool_calls', [ToolCall,...]), ('additional_kwargs', {...}), etc.\n # We accumulate only supported fields into `parts` and skip unsupported metadata.\n # More info at: https://langchain-ai.github.io/langgraph/cloud/how-tos/stream_messages/\n processed_messages = []\n current_message: dict[str, Any] = {}\n for message in new_messages:\n if isinstance(message, tuple):\n key, value = message\n # Store parts in temporary dict\n current_message[key] = value\n else:\n # Add complete message if we have one in progress\n if current_message:\n processed_messages.append(_create_ai_message(current_message))\n current_message = {}\n processed_messages.append(message)\n\n # Add any remaining message parts\n if current_message:\n processed_messages.append(_create_ai_message(current_message))\n\n for message in processed_messages:\n try:\n chat_message = langchain_to_chat_message(message)\n chat_message.run_id = str(run_id)\n except Exception as e:\n logger.error(f\"Error parsing message: {e}\")\n yield f\"data: {json.dumps({'type': 'error', 'content': 'Unexpected error'})}\\n\\n\"\n continue\n # LangGraph re-sends the input message, which feels weird, so drop it\n if chat_message.type == \"human\" and chat_message.content == user_input.message:\n continue\n yield f\"data: {json.dumps({'type': 'message', 'content': chat_message.model_dump()})}\\n\\n\"\n\n if stream_mode == \"messages\":\n if not user_input.stream_tokens:\n continue\n msg, metadata = event\n if \"skip_stream\" in metadata.get(\"tags\", []):\n continue\n # For some reason, astream(\"messages\") causes non-LLM nodes to send extra messages.\n # Drop them.\n if not isinstance(msg, AIMessageChunk):\n continue\n content = remove_tool_calls(msg.content)\n if content:\n # Empty content in the context of OpenAI usually means\n # that the model is asking for a tool to be invoked.\n # So we only print non-empty content.\n yield f\"data: {json.dumps({'type': 'token', 'content': convert_message_content_to_string(content)})}\\n\\n\"\n except Exception as e:\n logger.error(f\"Error in message generator: {e}\")\n yield f\"data: {json.dumps({'type': 'error', 'content': 'Internal server error'})}\\n\\n\"\n finally:\n yield \"data: [DONE]\\n\\n\"", "n_imports_parsed": 9, "n_files_resolved": 3, "n_chars_extracted": 8204}, "tests/core/test_settings.py::32": {"resolved_imports": ["src/core/settings.py", "src/schema/models.py"], "used_names": ["Settings"], "enclosing_function": "test_settings_default_values", "extracted_code": "# Source: src/core/settings.py\nclass Settings(BaseSettings):\n model_config = SettingsConfigDict(\n env_file=find_dotenv(),\n env_file_encoding=\"utf-8\",\n env_ignore_empty=True,\n extra=\"ignore\",\n validate_default=False,\n )\n MODE: str | None = None\n\n HOST: str = \"0.0.0.0\"\n PORT: int = 8080\n GRACEFUL_SHUTDOWN_TIMEOUT: int = 30\n LOG_LEVEL: LogLevel = LogLevel.WARNING\n\n AUTH_SECRET: SecretStr | None = None\n\n OPENAI_API_KEY: SecretStr | None = None\n DEEPSEEK_API_KEY: SecretStr | None = None\n ANTHROPIC_API_KEY: SecretStr | None = None\n GOOGLE_API_KEY: SecretStr | None = None\n GOOGLE_APPLICATION_CREDENTIALS: SecretStr | None = None\n GROQ_API_KEY: SecretStr | None = None\n USE_AWS_BEDROCK: bool = False\n OLLAMA_MODEL: str | None = None\n OLLAMA_BASE_URL: str | None = None\n USE_FAKE_MODEL: bool = False\n OPENROUTER_API_KEY: str | None = None\n\n # If DEFAULT_MODEL is None, it will be set in model_post_init\n DEFAULT_MODEL: AllModelEnum | None = None # type: ignore[assignment]\n AVAILABLE_MODELS: set[AllModelEnum] = set() # type: ignore[assignment]\n\n # Set openai compatible api, mainly used for proof of concept\n COMPATIBLE_MODEL: str | None = None\n COMPATIBLE_API_KEY: SecretStr | None = None\n COMPATIBLE_BASE_URL: str | None = None\n\n OPENWEATHERMAP_API_KEY: SecretStr | None = None\n\n # MCP Configuration\n GITHUB_PAT: SecretStr | None = None\n MCP_GITHUB_SERVER_URL: str = \"https://api.githubcopilot.com/mcp/\"\n\n LANGCHAIN_TRACING_V2: bool = False\n LANGCHAIN_PROJECT: str = \"default\"\n LANGCHAIN_ENDPOINT: Annotated[str, BeforeValidator(check_str_is_http)] = (\n \"https://api.smith.langchain.com\"\n )\n LANGCHAIN_API_KEY: SecretStr | None = None\n\n LANGFUSE_TRACING: bool = False\n LANGFUSE_HOST: Annotated[str, BeforeValidator(check_str_is_http)] = \"https://cloud.langfuse.com\"\n LANGFUSE_PUBLIC_KEY: SecretStr | None = None\n LANGFUSE_SECRET_KEY: SecretStr | None = None\n\n # Database Configuration\n DATABASE_TYPE: DatabaseType = (\n DatabaseType.SQLITE\n ) # Options: DatabaseType.SQLITE or DatabaseType.POSTGRES\n SQLITE_DB_PATH: str = \"checkpoints.db\"\n\n # PostgreSQL Configuration\n POSTGRES_USER: str | None = None\n POSTGRES_PASSWORD: SecretStr | None = None\n POSTGRES_HOST: str | None = None\n POSTGRES_PORT: int | None = None\n POSTGRES_DB: str | None = None\n POSTGRES_APPLICATION_NAME: str = \"agent-service-toolkit\"\n POSTGRES_MIN_CONNECTIONS_PER_POOL: int = 1\n POSTGRES_MAX_CONNECTIONS_PER_POOL: int = 1\n\n # MongoDB Configuration\n MONGO_HOST: str | None = None\n MONGO_PORT: int | None = None\n MONGO_DB: str | None = None\n MONGO_USER: str | None = None\n MONGO_PASSWORD: SecretStr | None = None\n MONGO_AUTH_SOURCE: str | None = None\n\n # Azure OpenAI Settings\n AZURE_OPENAI_API_KEY: SecretStr | None = None\n AZURE_OPENAI_ENDPOINT: str | None = None\n AZURE_OPENAI_API_VERSION: str = \"2024-02-15-preview\"\n AZURE_OPENAI_DEPLOYMENT_MAP: dict[str, str] = Field(\n default_factory=dict, description=\"Map of model names to Azure deployment IDs\"\n )\n\n def model_post_init(self, __context: Any) -> None:\n api_keys = {\n Provider.OPENAI: self.OPENAI_API_KEY,\n Provider.OPENAI_COMPATIBLE: self.COMPATIBLE_BASE_URL and self.COMPATIBLE_MODEL,\n Provider.DEEPSEEK: self.DEEPSEEK_API_KEY,\n Provider.ANTHROPIC: self.ANTHROPIC_API_KEY,\n Provider.GOOGLE: self.GOOGLE_API_KEY,\n Provider.VERTEXAI: self.GOOGLE_APPLICATION_CREDENTIALS,\n Provider.GROQ: self.GROQ_API_KEY,\n Provider.AWS: self.USE_AWS_BEDROCK,\n Provider.OLLAMA: self.OLLAMA_MODEL,\n Provider.FAKE: self.USE_FAKE_MODEL,\n Provider.AZURE_OPENAI: self.AZURE_OPENAI_API_KEY,\n Provider.OPENROUTER: self.OPENROUTER_API_KEY,\n }\n active_keys = [k for k, v in api_keys.items() if v]\n if not active_keys:\n raise ValueError(\"At least one LLM API key must be provided.\")\n\n for provider in active_keys:\n match provider:\n case Provider.OPENAI:\n if self.DEFAULT_MODEL is None:\n self.DEFAULT_MODEL = OpenAIModelName.GPT_5_NANO\n self.AVAILABLE_MODELS.update(set(OpenAIModelName))\n case Provider.OPENAI_COMPATIBLE:\n if self.DEFAULT_MODEL is None:\n self.DEFAULT_MODEL = OpenAICompatibleName.OPENAI_COMPATIBLE\n self.AVAILABLE_MODELS.update(set(OpenAICompatibleName))\n case Provider.DEEPSEEK:\n if self.DEFAULT_MODEL is None:\n self.DEFAULT_MODEL = DeepseekModelName.DEEPSEEK_CHAT\n self.AVAILABLE_MODELS.update(set(DeepseekModelName))\n case Provider.ANTHROPIC:\n if self.DEFAULT_MODEL is None:\n self.DEFAULT_MODEL = AnthropicModelName.HAIKU_45\n self.AVAILABLE_MODELS.update(set(AnthropicModelName))\n case Provider.GOOGLE:\n if self.DEFAULT_MODEL is None:\n self.DEFAULT_MODEL = GoogleModelName.GEMINI_20_FLASH\n self.AVAILABLE_MODELS.update(set(GoogleModelName))\n case Provider.VERTEXAI:\n if self.DEFAULT_MODEL is None:\n self.DEFAULT_MODEL = VertexAIModelName.GEMINI_20_FLASH\n self.AVAILABLE_MODELS.update(set(VertexAIModelName))\n case Provider.GROQ:\n if self.DEFAULT_MODEL is None:\n self.DEFAULT_MODEL = GroqModelName.LLAMA_31_8B\n self.AVAILABLE_MODELS.update(set(GroqModelName))\n case Provider.AWS:\n if self.DEFAULT_MODEL is None:\n self.DEFAULT_MODEL = AWSModelName.BEDROCK_HAIKU\n self.AVAILABLE_MODELS.update(set(AWSModelName))\n case Provider.OLLAMA:\n if self.DEFAULT_MODEL is None:\n self.DEFAULT_MODEL = OllamaModelName.OLLAMA_GENERIC\n self.AVAILABLE_MODELS.update(set(OllamaModelName))\n case Provider.OPENROUTER:\n if self.DEFAULT_MODEL is None:\n self.DEFAULT_MODEL = OpenRouterModelName.GEMINI_25_FLASH\n self.AVAILABLE_MODELS.update(set(OpenRouterModelName))\n case Provider.FAKE:\n if self.DEFAULT_MODEL is None:\n self.DEFAULT_MODEL = FakeModelName.FAKE\n self.AVAILABLE_MODELS.update(set(FakeModelName))\n case Provider.AZURE_OPENAI:\n if self.DEFAULT_MODEL is None:\n self.DEFAULT_MODEL = AzureOpenAIModelName.AZURE_GPT_4O_MINI\n self.AVAILABLE_MODELS.update(set(AzureOpenAIModelName))\n # Validate Azure OpenAI settings if Azure provider is available\n if not self.AZURE_OPENAI_API_KEY:\n raise ValueError(\"AZURE_OPENAI_API_KEY must be set\")\n if not self.AZURE_OPENAI_ENDPOINT:\n raise ValueError(\"AZURE_OPENAI_ENDPOINT must be set\")\n if not self.AZURE_OPENAI_DEPLOYMENT_MAP:\n raise ValueError(\"AZURE_OPENAI_DEPLOYMENT_MAP must be set\")\n\n # Parse deployment map if it's a string\n if isinstance(self.AZURE_OPENAI_DEPLOYMENT_MAP, str):\n try:\n self.AZURE_OPENAI_DEPLOYMENT_MAP = loads(\n self.AZURE_OPENAI_DEPLOYMENT_MAP\n )\n except Exception as e:\n raise ValueError(f\"Invalid AZURE_OPENAI_DEPLOYMENT_MAP JSON: {e}\")\n\n # Validate required deployments exist\n required_models = {\"gpt-4o\", \"gpt-4o-mini\"}\n missing_models = required_models - set(self.AZURE_OPENAI_DEPLOYMENT_MAP.keys())\n if missing_models:\n raise ValueError(f\"Missing required Azure deployments: {missing_models}\")\n case _:\n raise ValueError(f\"Unknown provider: {provider}\")\n\n @computed_field # type: ignore[prop-decorator]\n @property\n def BASE_URL(self) -> str:\n return f\"http://{self.HOST}:{self.PORT}\"\n\n def is_dev(self) -> bool:\n return self.MODE == \"dev\"", "n_imports_parsed": 8, "n_files_resolved": 2, "n_chars_extracted": 8669}, "tests/voice/test_manager.py::15": {"resolved_imports": ["src/voice/manager.py"], "used_names": ["Mock", "VoiceManager"], "enclosing_function": "test_init_with_both_stt_and_tts", "extracted_code": "# Source: src/voice/manager.py\nclass VoiceManager:\n \"\"\"Streamlit convenience layer for voice features.\n\n This class provides Streamlit-specific methods for voice input/output.\n It handles UI feedback (spinners, errors) while delegating actual\n voice processing to STT and TTS modules.\n\n Example:\n >>> voice = VoiceManager.from_env()\n >>>\n >>> if voice:\n ... user_input = voice.get_chat_input()\n ... if user_input:\n ... with st.chat_message(\"ai\"):\n ... voice.render_message(\"Hello!\")\n \"\"\"\n\n def __init__(self, stt: SpeechToText | None = None, tts: TextToSpeech | None = None):\n \"\"\"Initialize VoiceManager.\n\n Args:\n stt: SpeechToText instance (None to disable STT)\n tts: TextToSpeech instance (None to disable TTS)\n \"\"\"\n self.stt = stt\n self.tts = tts\n\n logger.info(\n f\"VoiceManager: STT={'enabled' if stt else 'disabled'}, \"\n f\"TTS={'enabled' if tts else 'disabled'}\"\n )\n\n @classmethod\n def from_env(cls) -> Optional[\"VoiceManager\"]:\n \"\"\"Create VoiceManager from environment variables.\n\n Reads VOICE_STT_PROVIDER and VOICE_TTS_PROVIDER to configure\n speech-to-text and text-to-speech providers.\n\n Returns:\n VoiceManager if either STT or TTS is configured, None otherwise\n\n Example:\n >>> # In .env:\n >>> # VOICE_STT_PROVIDER=openai\n >>> # VOICE_TTS_PROVIDER=openai\n >>>\n >>> voice = VoiceManager.from_env()\n >>> # Returns configured VoiceManager or None if disabled\n \"\"\"\n # Create STT and TTS from environment\n stt = SpeechToText.from_env()\n tts = TextToSpeech.from_env()\n\n # If both disabled, return None (no voice features)\n if not stt and not tts:\n logger.debug(\"Voice features not configured\")\n return None\n\n return cls(stt=stt, tts=tts)\n\n def _transcribe_audio(self, audio) -> str | None:\n \"\"\"Transcribe audio with UI feedback.\n\n Shows spinner during transcription and error message on failure.\n\n Args:\n audio: Audio file object from Streamlit chat input\n\n Returns:\n Transcribed text, or None if transcription failed\n \"\"\"\n # Defensive check (should not happen if called correctly)\n if not self.stt:\n st.error(\"⚠️ Speech-to-text not configured.\")\n return None\n\n # Show spinner while transcribing\n with st.spinner(\"🎤 Transcribing audio...\"):\n transcribed = self.stt.transcribe(audio)\n\n # Check if transcription succeeded\n if not transcribed:\n st.error(\"⚠️ Transcription failed. Please try again or type your message.\")\n return None\n\n return transcribed\n\n def get_chat_input(self, placeholder: str = \"Your message\") -> str | None:\n \"\"\"Get chat input with optional voice transcription.\n\n Handles Streamlit UI including audio input widget and transcription\n feedback (spinner, errors).\n\n Args:\n placeholder: Placeholder text for input\n\n Returns:\n User's message (transcribed if audio, otherwise text), or None if no input\n \"\"\"\n # No STT - use regular text input\n if not self.stt:\n return st.chat_input(placeholder)\n\n # STT enabled - use audio-capable input\n chat_value = st.chat_input(placeholder, accept_audio=True)\n\n if not chat_value:\n return None\n\n # Handle string return (text-only input)\n if isinstance(chat_value, str):\n return chat_value\n\n # Handle object/dict return (audio-capable input)\n # Extract text - support both attribute and dict access\n text_content = None\n if hasattr(chat_value, \"text\"):\n text_content = chat_value.text\n elif isinstance(chat_value, dict):\n text_content = chat_value.get(\"text\", \"\")\n\n # Extract audio - support both attribute and dict access\n audio_content = None\n if hasattr(chat_value, \"audio\"):\n audio_content = chat_value.audio\n elif isinstance(chat_value, dict):\n audio_content = chat_value.get(\"audio\")\n\n # If audio is provided, transcribe it\n if audio_content:\n return self._transcribe_audio(audio_content)\n\n # If no audio, return the text content\n if text_content:\n return text_content\n\n # No text or audio provided\n return None\n\n def render_message(self, content: str, container=None, audio_only: bool = False) -> None:\n \"\"\"Render message with optional TTS audio.\n\n Handles Streamlit UI including text display and audio player.\n Saves generated audio in session state so it persists across reruns.\n\n Args:\n content: Message content to display\n container: Streamlit container (defaults to current context)\n audio_only: If True, only render audio (text already displayed)\n \"\"\"\n if container is None:\n container = st\n\n # Show text unless audio_only mode (for streaming where text is already shown)\n if not audio_only:\n container.write(content)\n\n # Add audio if TTS enabled and content is not empty\n if self.tts and content.strip():\n # Show placeholder while generating audio\n placeholder = container.empty()\n with placeholder:\n st.caption(\"🎙️ Generating audio...\")\n\n # Generate TTS audio\n audio = self.tts.generate(content)\n\n # Save audio in session state for the last AI message\n # This allows it to persist across st.rerun() calls\n if audio:\n st.session_state.last_audio = {\"data\": audio, \"format\": self.tts.get_format()}\n\n # Replace placeholder with audio player or error message\n if audio:\n placeholder.audio(audio, format=self.tts.get_format())\n else:\n placeholder.caption(\"🔇 Audio generation unavailable\")", "n_imports_parsed": 2, "n_files_resolved": 1, "n_chars_extracted": 6225}, "tests/voice/providers/test_openai_tts.py::58": {"resolved_imports": ["src/voice/providers/openai_tts.py"], "used_names": ["OpenAITTS", "patch"], "enclosing_function": "test_generate_success", "extracted_code": "# Source: src/voice/providers/openai_tts.py\nclass OpenAITTS:\n \"\"\"OpenAI TTS provider.\"\"\"\n\n # API constraints\n MAX_TEXT_LENGTH = 4096\n MIN_TEXT_LENGTH = 3\n\n # Available configuration options\n VALID_VOICES = [\"alloy\", \"echo\", \"fable\", \"onyx\", \"nova\", \"shimmer\"]\n VALID_MODELS = [\"tts-1\", \"tts-1-hd\"]\n\n def __init__(self, api_key: str | None = None, voice: str = \"alloy\", model: str = \"tts-1\"):\n \"\"\"Initialize OpenAI TTS.\n\n Args:\n api_key: OpenAI API key (uses env var if not provided)\n voice: Voice name (alloy, echo, fable, onyx, nova, shimmer)\n model: Model name (tts-1 or tts-1-hd)\n\n Raises:\n ValueError: If voice or model is invalid\n Exception: If OpenAI client initialization fails\n \"\"\"\n # Validate voice parameter\n if voice not in self.VALID_VOICES:\n raise ValueError(f\"Invalid voice '{voice}'. Must be one of {self.VALID_VOICES}\")\n\n # Validate model parameter\n if model not in self.VALID_MODELS:\n raise ValueError(f\"Invalid model '{model}'. Must be one of {self.VALID_MODELS}\")\n\n # Create OpenAI client with provided key or from environment\n self.client = OpenAI(api_key=api_key) if api_key else OpenAI()\n self.voice = voice\n self.model = model\n\n logger.info(f\"OpenAI TTS initialized: voice={voice}, model={model}\")\n\n def _validate_and_prepare_text(self, text: str) -> str | None:\n \"\"\"Validate and prepare text for TTS generation.\n\n Args:\n text: Raw text input\n\n Returns:\n Prepared text ready for TTS, or None if text is too short\n\n Note:\n - Strips whitespace\n - Returns None if text is below minimum length\n - Truncates text if above maximum length\n \"\"\"\n # Remove leading/trailing whitespace\n text = text.strip()\n\n # Skip very short text (not worth API call)\n if len(text) < self.MIN_TEXT_LENGTH:\n logger.debug(f\"OpenAI TTS: skipping short text ({len(text)} chars)\")\n return None\n\n # Truncate to API limit if needed\n if len(text) > self.MAX_TEXT_LENGTH:\n logger.warning(\n f\"OpenAI TTS: truncating from {len(text)} to {self.MAX_TEXT_LENGTH} chars\"\n )\n text = text[: self.MAX_TEXT_LENGTH]\n\n return text\n\n def generate(self, text: str) -> bytes | None:\n \"\"\"Generate speech from text.\n\n Args:\n text: Text to convert to speech\n\n Returns:\n MP3 audio bytes, or None if text is too short or generation fails\n\n Note:\n - Text shorter than 3 chars returns None\n - Text longer than 4096 chars is truncated\n - Errors are logged but not raised - returns None instead\n \"\"\"\n # Validate and prepare text\n prepared_text = self._validate_and_prepare_text(text)\n if not prepared_text:\n return None\n\n try:\n # Call OpenAI TTS API\n response = self.client.audio.speech.create(\n model=self.model,\n voice=self.voice,\n input=prepared_text,\n response_format=\"mp3\",\n )\n\n # Extract audio bytes from response\n audio_bytes = response.content\n logger.info(f\"OpenAI TTS: generated {len(audio_bytes)} bytes\")\n return audio_bytes\n\n except Exception as e:\n # Log error with full traceback for debugging\n logger.error(f\"OpenAI TTS failed: {e}\", exc_info=True)\n # Return None to allow graceful degradation\n return None\n\n def get_format(self) -> str:\n \"\"\"Get audio format (MIME type).\n\n Returns:\n MIME type string for generated audio\n \"\"\"\n return \"audio/mp3\"", "n_imports_parsed": 3, "n_files_resolved": 1, "n_chars_extracted": 3890}, "tests/voice/providers/test_openai_stt.py::13": {"resolved_imports": ["src/voice/providers/openai_stt.py"], "used_names": ["OpenAISTT", "patch"], "enclosing_function": "test_init_with_api_key", "extracted_code": "# Source: src/voice/providers/openai_stt.py\nclass OpenAISTT:\n \"\"\"OpenAI Whisper STT provider.\"\"\"\n\n def __init__(self, api_key: str | None = None):\n \"\"\"Initialize OpenAI STT.\n\n Args:\n api_key: OpenAI API key (uses env var if not provided)\n\n Raises:\n Exception: If OpenAI client initialization fails\n \"\"\"\n # Create OpenAI client with provided key or from environment\n self.client = OpenAI(api_key=api_key) if api_key else OpenAI()\n logger.info(\"OpenAI STT initialized\")\n\n def transcribe(self, audio_file: BinaryIO) -> str:\n \"\"\"Transcribe audio using OpenAI Whisper.\n\n Args:\n audio_file: Binary audio file\n\n Returns:\n Transcribed text (empty string on failure)\n\n Note:\n Errors are logged but not raised - returns empty string instead.\n This allows graceful degradation in user-facing applications.\n \"\"\"\n try:\n # Reset file pointer to beginning (may have been read elsewhere)\n audio_file.seek(0)\n\n # Call OpenAI Whisper API for transcription\n result = self.client.audio.transcriptions.create(\n model=\"whisper-1\", file=audio_file, response_format=\"text\"\n )\n\n # Clean up whitespace from result\n transcribed = result.strip()\n logger.info(f\"OpenAI STT: transcribed {len(transcribed)} chars\")\n return transcribed\n\n except Exception as e:\n # Log error with full traceback for debugging\n logger.error(f\"OpenAI STT failed: {e}\", exc_info=True)\n # Return empty string to allow graceful degradation\n return \"\"", "n_imports_parsed": 2, "n_files_resolved": 1, "n_chars_extracted": 1726}, "tests/integration/test_docker_e2e.py::33": {"resolved_imports": ["src/client/__init__.py"], "used_names": ["AppTest", "pytest"], "enclosing_function": "test_service_with_app", "extracted_code": "", "n_imports_parsed": 3, "n_files_resolved": 1, "n_chars_extracted": 0}, "tests/core/test_settings.py::113": {"resolved_imports": ["src/core/settings.py", "src/schema/models.py"], "used_names": ["AzureOpenAIModelName", "Settings", "os", "patch"], "enclosing_function": "test_settings_with_azure_openai_key", "extracted_code": "# Source: src/core/settings.py\nclass Settings(BaseSettings):\n model_config = SettingsConfigDict(\n env_file=find_dotenv(),\n env_file_encoding=\"utf-8\",\n env_ignore_empty=True,\n extra=\"ignore\",\n validate_default=False,\n )\n MODE: str | None = None\n\n HOST: str = \"0.0.0.0\"\n PORT: int = 8080\n GRACEFUL_SHUTDOWN_TIMEOUT: int = 30\n LOG_LEVEL: LogLevel = LogLevel.WARNING\n\n AUTH_SECRET: SecretStr | None = None\n\n OPENAI_API_KEY: SecretStr | None = None\n DEEPSEEK_API_KEY: SecretStr | None = None\n ANTHROPIC_API_KEY: SecretStr | None = None\n GOOGLE_API_KEY: SecretStr | None = None\n GOOGLE_APPLICATION_CREDENTIALS: SecretStr | None = None\n GROQ_API_KEY: SecretStr | None = None\n USE_AWS_BEDROCK: bool = False\n OLLAMA_MODEL: str | None = None\n OLLAMA_BASE_URL: str | None = None\n USE_FAKE_MODEL: bool = False\n OPENROUTER_API_KEY: str | None = None\n\n # If DEFAULT_MODEL is None, it will be set in model_post_init\n DEFAULT_MODEL: AllModelEnum | None = None # type: ignore[assignment]\n AVAILABLE_MODELS: set[AllModelEnum] = set() # type: ignore[assignment]\n\n # Set openai compatible api, mainly used for proof of concept\n COMPATIBLE_MODEL: str | None = None\n COMPATIBLE_API_KEY: SecretStr | None = None\n COMPATIBLE_BASE_URL: str | None = None\n\n OPENWEATHERMAP_API_KEY: SecretStr | None = None\n\n # MCP Configuration\n GITHUB_PAT: SecretStr | None = None\n MCP_GITHUB_SERVER_URL: str = \"https://api.githubcopilot.com/mcp/\"\n\n LANGCHAIN_TRACING_V2: bool = False\n LANGCHAIN_PROJECT: str = \"default\"\n LANGCHAIN_ENDPOINT: Annotated[str, BeforeValidator(check_str_is_http)] = (\n \"https://api.smith.langchain.com\"\n )\n LANGCHAIN_API_KEY: SecretStr | None = None\n\n LANGFUSE_TRACING: bool = False\n LANGFUSE_HOST: Annotated[str, BeforeValidator(check_str_is_http)] = \"https://cloud.langfuse.com\"\n LANGFUSE_PUBLIC_KEY: SecretStr | None = None\n LANGFUSE_SECRET_KEY: SecretStr | None = None\n\n # Database Configuration\n DATABASE_TYPE: DatabaseType = (\n DatabaseType.SQLITE\n ) # Options: DatabaseType.SQLITE or DatabaseType.POSTGRES\n SQLITE_DB_PATH: str = \"checkpoints.db\"\n\n # PostgreSQL Configuration\n POSTGRES_USER: str | None = None\n POSTGRES_PASSWORD: SecretStr | None = None\n POSTGRES_HOST: str | None = None\n POSTGRES_PORT: int | None = None\n POSTGRES_DB: str | None = None\n POSTGRES_APPLICATION_NAME: str = \"agent-service-toolkit\"\n POSTGRES_MIN_CONNECTIONS_PER_POOL: int = 1\n POSTGRES_MAX_CONNECTIONS_PER_POOL: int = 1\n\n # MongoDB Configuration\n MONGO_HOST: str | None = None\n MONGO_PORT: int | None = None\n MONGO_DB: str | None = None\n MONGO_USER: str | None = None\n MONGO_PASSWORD: SecretStr | None = None\n MONGO_AUTH_SOURCE: str | None = None\n\n # Azure OpenAI Settings\n AZURE_OPENAI_API_KEY: SecretStr | None = None\n AZURE_OPENAI_ENDPOINT: str | None = None\n AZURE_OPENAI_API_VERSION: str = \"2024-02-15-preview\"\n AZURE_OPENAI_DEPLOYMENT_MAP: dict[str, str] = Field(\n default_factory=dict, description=\"Map of model names to Azure deployment IDs\"\n )\n\n def model_post_init(self, __context: Any) -> None:\n api_keys = {\n Provider.OPENAI: self.OPENAI_API_KEY,\n Provider.OPENAI_COMPATIBLE: self.COMPATIBLE_BASE_URL and self.COMPATIBLE_MODEL,\n Provider.DEEPSEEK: self.DEEPSEEK_API_KEY,\n Provider.ANTHROPIC: self.ANTHROPIC_API_KEY,\n Provider.GOOGLE: self.GOOGLE_API_KEY,\n Provider.VERTEXAI: self.GOOGLE_APPLICATION_CREDENTIALS,\n Provider.GROQ: self.GROQ_API_KEY,\n Provider.AWS: self.USE_AWS_BEDROCK,\n Provider.OLLAMA: self.OLLAMA_MODEL,\n Provider.FAKE: self.USE_FAKE_MODEL,\n Provider.AZURE_OPENAI: self.AZURE_OPENAI_API_KEY,\n Provider.OPENROUTER: self.OPENROUTER_API_KEY,\n }\n active_keys = [k for k, v in api_keys.items() if v]\n if not active_keys:\n raise ValueError(\"At least one LLM API key must be provided.\")\n\n for provider in active_keys:\n match provider:\n case Provider.OPENAI:\n if self.DEFAULT_MODEL is None:\n self.DEFAULT_MODEL = OpenAIModelName.GPT_5_NANO\n self.AVAILABLE_MODELS.update(set(OpenAIModelName))\n case Provider.OPENAI_COMPATIBLE:\n if self.DEFAULT_MODEL is None:\n self.DEFAULT_MODEL = OpenAICompatibleName.OPENAI_COMPATIBLE\n self.AVAILABLE_MODELS.update(set(OpenAICompatibleName))\n case Provider.DEEPSEEK:\n if self.DEFAULT_MODEL is None:\n self.DEFAULT_MODEL = DeepseekModelName.DEEPSEEK_CHAT\n self.AVAILABLE_MODELS.update(set(DeepseekModelName))\n case Provider.ANTHROPIC:\n if self.DEFAULT_MODEL is None:\n self.DEFAULT_MODEL = AnthropicModelName.HAIKU_45\n self.AVAILABLE_MODELS.update(set(AnthropicModelName))\n case Provider.GOOGLE:\n if self.DEFAULT_MODEL is None:\n self.DEFAULT_MODEL = GoogleModelName.GEMINI_20_FLASH\n self.AVAILABLE_MODELS.update(set(GoogleModelName))\n case Provider.VERTEXAI:\n if self.DEFAULT_MODEL is None:\n self.DEFAULT_MODEL = VertexAIModelName.GEMINI_20_FLASH\n self.AVAILABLE_MODELS.update(set(VertexAIModelName))\n case Provider.GROQ:\n if self.DEFAULT_MODEL is None:\n self.DEFAULT_MODEL = GroqModelName.LLAMA_31_8B\n self.AVAILABLE_MODELS.update(set(GroqModelName))\n case Provider.AWS:\n if self.DEFAULT_MODEL is None:\n self.DEFAULT_MODEL = AWSModelName.BEDROCK_HAIKU\n self.AVAILABLE_MODELS.update(set(AWSModelName))\n case Provider.OLLAMA:\n if self.DEFAULT_MODEL is None:\n self.DEFAULT_MODEL = OllamaModelName.OLLAMA_GENERIC\n self.AVAILABLE_MODELS.update(set(OllamaModelName))\n case Provider.OPENROUTER:\n if self.DEFAULT_MODEL is None:\n self.DEFAULT_MODEL = OpenRouterModelName.GEMINI_25_FLASH\n self.AVAILABLE_MODELS.update(set(OpenRouterModelName))\n case Provider.FAKE:\n if self.DEFAULT_MODEL is None:\n self.DEFAULT_MODEL = FakeModelName.FAKE\n self.AVAILABLE_MODELS.update(set(FakeModelName))\n case Provider.AZURE_OPENAI:\n if self.DEFAULT_MODEL is None:\n self.DEFAULT_MODEL = AzureOpenAIModelName.AZURE_GPT_4O_MINI\n self.AVAILABLE_MODELS.update(set(AzureOpenAIModelName))\n # Validate Azure OpenAI settings if Azure provider is available\n if not self.AZURE_OPENAI_API_KEY:\n raise ValueError(\"AZURE_OPENAI_API_KEY must be set\")\n if not self.AZURE_OPENAI_ENDPOINT:\n raise ValueError(\"AZURE_OPENAI_ENDPOINT must be set\")\n if not self.AZURE_OPENAI_DEPLOYMENT_MAP:\n raise ValueError(\"AZURE_OPENAI_DEPLOYMENT_MAP must be set\")\n\n # Parse deployment map if it's a string\n if isinstance(self.AZURE_OPENAI_DEPLOYMENT_MAP, str):\n try:\n self.AZURE_OPENAI_DEPLOYMENT_MAP = loads(\n self.AZURE_OPENAI_DEPLOYMENT_MAP\n )\n except Exception as e:\n raise ValueError(f\"Invalid AZURE_OPENAI_DEPLOYMENT_MAP JSON: {e}\")\n\n # Validate required deployments exist\n required_models = {\"gpt-4o\", \"gpt-4o-mini\"}\n missing_models = required_models - set(self.AZURE_OPENAI_DEPLOYMENT_MAP.keys())\n if missing_models:\n raise ValueError(f\"Missing required Azure deployments: {missing_models}\")\n case _:\n raise ValueError(f\"Unknown provider: {provider}\")\n\n @computed_field # type: ignore[prop-decorator]\n @property\n def BASE_URL(self) -> str:\n return f\"http://{self.HOST}:{self.PORT}\"\n\n def is_dev(self) -> bool:\n return self.MODE == \"dev\"\n\n\n# Source: src/schema/models.py\nclass AzureOpenAIModelName(StrEnum):\n \"\"\"Azure OpenAI model names\"\"\"\n\n AZURE_GPT_4O = \"azure-gpt-4o\"\n AZURE_GPT_4O_MINI = \"azure-gpt-4o-mini\"", "n_imports_parsed": 8, "n_files_resolved": 2, "n_chars_extracted": 8853}, "tests/core/test_settings.py::96": {"resolved_imports": ["src/core/settings.py", "src/schema/models.py"], "used_names": ["Settings"], "enclosing_function": "test_settings_is_dev", "extracted_code": "# Source: src/core/settings.py\nclass Settings(BaseSettings):\n model_config = SettingsConfigDict(\n env_file=find_dotenv(),\n env_file_encoding=\"utf-8\",\n env_ignore_empty=True,\n extra=\"ignore\",\n validate_default=False,\n )\n MODE: str | None = None\n\n HOST: str = \"0.0.0.0\"\n PORT: int = 8080\n GRACEFUL_SHUTDOWN_TIMEOUT: int = 30\n LOG_LEVEL: LogLevel = LogLevel.WARNING\n\n AUTH_SECRET: SecretStr | None = None\n\n OPENAI_API_KEY: SecretStr | None = None\n DEEPSEEK_API_KEY: SecretStr | None = None\n ANTHROPIC_API_KEY: SecretStr | None = None\n GOOGLE_API_KEY: SecretStr | None = None\n GOOGLE_APPLICATION_CREDENTIALS: SecretStr | None = None\n GROQ_API_KEY: SecretStr | None = None\n USE_AWS_BEDROCK: bool = False\n OLLAMA_MODEL: str | None = None\n OLLAMA_BASE_URL: str | None = None\n USE_FAKE_MODEL: bool = False\n OPENROUTER_API_KEY: str | None = None\n\n # If DEFAULT_MODEL is None, it will be set in model_post_init\n DEFAULT_MODEL: AllModelEnum | None = None # type: ignore[assignment]\n AVAILABLE_MODELS: set[AllModelEnum] = set() # type: ignore[assignment]\n\n # Set openai compatible api, mainly used for proof of concept\n COMPATIBLE_MODEL: str | None = None\n COMPATIBLE_API_KEY: SecretStr | None = None\n COMPATIBLE_BASE_URL: str | None = None\n\n OPENWEATHERMAP_API_KEY: SecretStr | None = None\n\n # MCP Configuration\n GITHUB_PAT: SecretStr | None = None\n MCP_GITHUB_SERVER_URL: str = \"https://api.githubcopilot.com/mcp/\"\n\n LANGCHAIN_TRACING_V2: bool = False\n LANGCHAIN_PROJECT: str = \"default\"\n LANGCHAIN_ENDPOINT: Annotated[str, BeforeValidator(check_str_is_http)] = (\n \"https://api.smith.langchain.com\"\n )\n LANGCHAIN_API_KEY: SecretStr | None = None\n\n LANGFUSE_TRACING: bool = False\n LANGFUSE_HOST: Annotated[str, BeforeValidator(check_str_is_http)] = \"https://cloud.langfuse.com\"\n LANGFUSE_PUBLIC_KEY: SecretStr | None = None\n LANGFUSE_SECRET_KEY: SecretStr | None = None\n\n # Database Configuration\n DATABASE_TYPE: DatabaseType = (\n DatabaseType.SQLITE\n ) # Options: DatabaseType.SQLITE or DatabaseType.POSTGRES\n SQLITE_DB_PATH: str = \"checkpoints.db\"\n\n # PostgreSQL Configuration\n POSTGRES_USER: str | None = None\n POSTGRES_PASSWORD: SecretStr | None = None\n POSTGRES_HOST: str | None = None\n POSTGRES_PORT: int | None = None\n POSTGRES_DB: str | None = None\n POSTGRES_APPLICATION_NAME: str = \"agent-service-toolkit\"\n POSTGRES_MIN_CONNECTIONS_PER_POOL: int = 1\n POSTGRES_MAX_CONNECTIONS_PER_POOL: int = 1\n\n # MongoDB Configuration\n MONGO_HOST: str | None = None\n MONGO_PORT: int | None = None\n MONGO_DB: str | None = None\n MONGO_USER: str | None = None\n MONGO_PASSWORD: SecretStr | None = None\n MONGO_AUTH_SOURCE: str | None = None\n\n # Azure OpenAI Settings\n AZURE_OPENAI_API_KEY: SecretStr | None = None\n AZURE_OPENAI_ENDPOINT: str | None = None\n AZURE_OPENAI_API_VERSION: str = \"2024-02-15-preview\"\n AZURE_OPENAI_DEPLOYMENT_MAP: dict[str, str] = Field(\n default_factory=dict, description=\"Map of model names to Azure deployment IDs\"\n )\n\n def model_post_init(self, __context: Any) -> None:\n api_keys = {\n Provider.OPENAI: self.OPENAI_API_KEY,\n Provider.OPENAI_COMPATIBLE: self.COMPATIBLE_BASE_URL and self.COMPATIBLE_MODEL,\n Provider.DEEPSEEK: self.DEEPSEEK_API_KEY,\n Provider.ANTHROPIC: self.ANTHROPIC_API_KEY,\n Provider.GOOGLE: self.GOOGLE_API_KEY,\n Provider.VERTEXAI: self.GOOGLE_APPLICATION_CREDENTIALS,\n Provider.GROQ: self.GROQ_API_KEY,\n Provider.AWS: self.USE_AWS_BEDROCK,\n Provider.OLLAMA: self.OLLAMA_MODEL,\n Provider.FAKE: self.USE_FAKE_MODEL,\n Provider.AZURE_OPENAI: self.AZURE_OPENAI_API_KEY,\n Provider.OPENROUTER: self.OPENROUTER_API_KEY,\n }\n active_keys = [k for k, v in api_keys.items() if v]\n if not active_keys:\n raise ValueError(\"At least one LLM API key must be provided.\")\n\n for provider in active_keys:\n match provider:\n case Provider.OPENAI:\n if self.DEFAULT_MODEL is None:\n self.DEFAULT_MODEL = OpenAIModelName.GPT_5_NANO\n self.AVAILABLE_MODELS.update(set(OpenAIModelName))\n case Provider.OPENAI_COMPATIBLE:\n if self.DEFAULT_MODEL is None:\n self.DEFAULT_MODEL = OpenAICompatibleName.OPENAI_COMPATIBLE\n self.AVAILABLE_MODELS.update(set(OpenAICompatibleName))\n case Provider.DEEPSEEK:\n if self.DEFAULT_MODEL is None:\n self.DEFAULT_MODEL = DeepseekModelName.DEEPSEEK_CHAT\n self.AVAILABLE_MODELS.update(set(DeepseekModelName))\n case Provider.ANTHROPIC:\n if self.DEFAULT_MODEL is None:\n self.DEFAULT_MODEL = AnthropicModelName.HAIKU_45\n self.AVAILABLE_MODELS.update(set(AnthropicModelName))\n case Provider.GOOGLE:\n if self.DEFAULT_MODEL is None:\n self.DEFAULT_MODEL = GoogleModelName.GEMINI_20_FLASH\n self.AVAILABLE_MODELS.update(set(GoogleModelName))\n case Provider.VERTEXAI:\n if self.DEFAULT_MODEL is None:\n self.DEFAULT_MODEL = VertexAIModelName.GEMINI_20_FLASH\n self.AVAILABLE_MODELS.update(set(VertexAIModelName))\n case Provider.GROQ:\n if self.DEFAULT_MODEL is None:\n self.DEFAULT_MODEL = GroqModelName.LLAMA_31_8B\n self.AVAILABLE_MODELS.update(set(GroqModelName))\n case Provider.AWS:\n if self.DEFAULT_MODEL is None:\n self.DEFAULT_MODEL = AWSModelName.BEDROCK_HAIKU\n self.AVAILABLE_MODELS.update(set(AWSModelName))\n case Provider.OLLAMA:\n if self.DEFAULT_MODEL is None:\n self.DEFAULT_MODEL = OllamaModelName.OLLAMA_GENERIC\n self.AVAILABLE_MODELS.update(set(OllamaModelName))\n case Provider.OPENROUTER:\n if self.DEFAULT_MODEL is None:\n self.DEFAULT_MODEL = OpenRouterModelName.GEMINI_25_FLASH\n self.AVAILABLE_MODELS.update(set(OpenRouterModelName))\n case Provider.FAKE:\n if self.DEFAULT_MODEL is None:\n self.DEFAULT_MODEL = FakeModelName.FAKE\n self.AVAILABLE_MODELS.update(set(FakeModelName))\n case Provider.AZURE_OPENAI:\n if self.DEFAULT_MODEL is None:\n self.DEFAULT_MODEL = AzureOpenAIModelName.AZURE_GPT_4O_MINI\n self.AVAILABLE_MODELS.update(set(AzureOpenAIModelName))\n # Validate Azure OpenAI settings if Azure provider is available\n if not self.AZURE_OPENAI_API_KEY:\n raise ValueError(\"AZURE_OPENAI_API_KEY must be set\")\n if not self.AZURE_OPENAI_ENDPOINT:\n raise ValueError(\"AZURE_OPENAI_ENDPOINT must be set\")\n if not self.AZURE_OPENAI_DEPLOYMENT_MAP:\n raise ValueError(\"AZURE_OPENAI_DEPLOYMENT_MAP must be set\")\n\n # Parse deployment map if it's a string\n if isinstance(self.AZURE_OPENAI_DEPLOYMENT_MAP, str):\n try:\n self.AZURE_OPENAI_DEPLOYMENT_MAP = loads(\n self.AZURE_OPENAI_DEPLOYMENT_MAP\n )\n except Exception as e:\n raise ValueError(f\"Invalid AZURE_OPENAI_DEPLOYMENT_MAP JSON: {e}\")\n\n # Validate required deployments exist\n required_models = {\"gpt-4o\", \"gpt-4o-mini\"}\n missing_models = required_models - set(self.AZURE_OPENAI_DEPLOYMENT_MAP.keys())\n if missing_models:\n raise ValueError(f\"Missing required Azure deployments: {missing_models}\")\n case _:\n raise ValueError(f\"Unknown provider: {provider}\")\n\n @computed_field # type: ignore[prop-decorator]\n @property\n def BASE_URL(self) -> str:\n return f\"http://{self.HOST}:{self.PORT}\"\n\n def is_dev(self) -> bool:\n return self.MODE == \"dev\"", "n_imports_parsed": 8, "n_files_resolved": 2, "n_chars_extracted": 8669}, "tests/voice/test_tts.py::38": {"resolved_imports": ["src/voice/tts.py"], "used_names": ["TextToSpeech", "os", "patch"], "enclosing_function": "test_from_env_provider_not_set", "extracted_code": "# Source: src/voice/tts.py\nclass TextToSpeech:\n \"\"\"Text-to-speech factory.\n\n Loads and delegates to specific TTS provider implementations.\n\n Example:\n >>> tts = TextToSpeech(provider=\"openai\", voice=\"nova\")\n >>> audio = tts.generate(\"Hello world\")\n >>>\n >>> # Or from environment\n >>> tts = TextToSpeech.from_env()\n >>> if tts:\n ... audio = tts.generate(\"Hello world\")\n \"\"\"\n\n def __init__(self, provider: Provider = \"openai\", api_key: str | None = None, **config):\n \"\"\"Initialize TTS with specified provider.\n\n Args:\n provider: Provider name (\"openai\", \"elevenlabs\", etc.)\n api_key: API key (uses env var if not provided)\n **config: Provider-specific configuration\n OpenAI: voice=\"alloy\", model=\"tts-1\"\n ElevenLabs: voice_id=\"...\", model_id=\"...\"\n\n Raises:\n ValueError: If provider is unknown\n \"\"\"\n self._provider_name = provider\n\n # Resolve API key from parameter or environment\n resolved_api_key = self._get_api_key(provider, api_key)\n\n # Load and configure the provider\n self._provider = self._load_provider(provider, resolved_api_key, config)\n\n logger.info(f\"TextToSpeech created with provider={provider}\")\n\n def _get_api_key(self, provider: Provider, api_key: str | None) -> str | None:\n \"\"\"Get API key from parameter or environment.\n\n Args:\n provider: Provider name\n api_key: API key from parameter (takes precedence)\n\n Returns:\n Resolved API key or None\n \"\"\"\n # If API key provided explicitly, use it\n if api_key:\n return api_key\n\n # Otherwise, get from environment based on provider\n match provider:\n case \"openai\":\n return os.getenv(\"OPENAI_API_KEY\")\n case \"elevenlabs\":\n return os.getenv(\"ELEVENLABS_API_KEY\")\n case _:\n return None\n\n def _load_provider(self, provider: Provider, api_key: str | None, config: dict):\n \"\"\"Load the appropriate TTS provider implementation.\n\n Args:\n provider: Provider name\n api_key: Resolved API key\n config: Provider-specific configuration\n\n Returns:\n Provider instance\n\n Raises:\n ValueError: If provider is unknown\n NotImplementedError: If provider not yet implemented\n \"\"\"\n match provider:\n case \"openai\":\n from voice.providers.openai_tts import OpenAITTS\n\n # Extract OpenAI-specific config with defaults\n voice = config.get(\"voice\", \"alloy\")\n model = config.get(\"model\", \"tts-1\")\n\n return OpenAITTS(api_key=api_key, voice=voice, model=model)\n\n case \"elevenlabs\":\n # Example for future extensions: to add ElevenLabs support, implement ElevenLabsTTS provider and uncomment:\n # from voice.providers.elevenlabs_tts import ElevenLabsTTS\n # voice_id = config.get(\"voice_id\")\n # model_id = config.get(\"model_id\", \"eleven_monolingual_v1\")\n # return ElevenLabsTTS(api_key=api_key, voice_id=voice_id, model_id=model_id)\n raise NotImplementedError(\"ElevenLabs TTS provider not yet implemented\")\n\n case _:\n # Catch-all for unknown providers\n raise ValueError(f\"Unknown TTS provider: {provider}. Available providers: openai\")\n\n @property\n def provider(self) -> str:\n \"\"\"Get the provider name.\n\n Returns:\n Provider name string\n \"\"\"\n return self._provider_name\n\n @classmethod\n def from_env(cls) -> \"TextToSpeech | None\":\n \"\"\"Create TTS from environment variables.\n\n Reads VOICE_TTS_PROVIDER env var to determine which provider to use.\n Returns None if not configured.\n\n Returns:\n TextToSpeech instance or None\n\n Example:\n >>> # In .env: VOICE_TTS_PROVIDER=openai\n >>> tts = TextToSpeech.from_env()\n >>> if tts:\n ... audio = tts.generate(\"Hello world\")\n \"\"\"\n provider = os.getenv(\"VOICE_TTS_PROVIDER\")\n\n # If provider not set, voice features are disabled\n if not provider:\n logger.debug(\"VOICE_TTS_PROVIDER not set, TTS disabled\")\n return None\n\n try:\n # Create instance with provider from environment\n # Validates provider and raises ValueError if invalid\n return cls(provider=cast(Provider, provider))\n except Exception as e:\n # Log error but don't crash - allow app to continue without voice\n logger.error(f\"Failed to create TTS provider: {e}\", exc_info=True)\n return None\n\n def generate(self, text: str) -> bytes | None:\n \"\"\"Generate speech from text.\n\n Delegates to the underlying provider implementation.\n\n Args:\n text: Text to convert to speech\n\n Returns:\n Audio bytes (format depends on provider), or None on failure\n \"\"\"\n return self._provider.generate(text)\n\n def get_format(self) -> str:\n \"\"\"Get audio format (MIME type) for this provider.\n\n Returns:\n MIME type string (e.g., \"audio/mp3\")\n \"\"\"\n return self._provider.get_format()", "n_imports_parsed": 4, "n_files_resolved": 1, "n_chars_extracted": 5482}, "tests/service/test_service_streaming.py::55": {"resolved_imports": ["src/service/service.py"], "used_names": ["ValidationError", "_create_ai_message", "pytest"], "enclosing_function": "test_create_ai_message_missing_required_content_raises", "extracted_code": "# Source: src/service/service.py\ndef _create_ai_message(parts: dict) -> AIMessage:\n sig = inspect.signature(AIMessage)\n valid_keys = set(sig.parameters)\n filtered = {k: v for k, v in parts.items() if k in valid_keys}\n return AIMessage(**filtered)", "n_imports_parsed": 4, "n_files_resolved": 1, "n_chars_extracted": 258}, "tests/voice/test_stt.py::22": {"resolved_imports": ["src/voice/stt.py"], "used_names": ["SpeechToText", "pytest"], "enclosing_function": "test_init_with_invalid_provider", "extracted_code": "# Source: src/voice/stt.py\nclass SpeechToText:\n \"\"\"Speech-to-text factory.\n\n Loads and delegates to specific STT provider implementations.\n\n Example:\n >>> stt = SpeechToText(provider=\"openai\")\n >>> text = stt.transcribe(audio_file)\n >>>\n >>> # Or from environment\n >>> stt = SpeechToText.from_env()\n >>> if stt:\n ... text = stt.transcribe(audio_file)\n \"\"\"\n\n def __init__(self, provider: Provider = \"openai\", api_key: str | None = None, **config):\n \"\"\"Initialize STT with specified provider.\n\n Args:\n provider: Provider name (\"openai\", \"deepgram\", etc.)\n api_key: API key (uses env var if not provided)\n **config: Provider-specific configuration\n\n Raises:\n ValueError: If provider is unknown\n \"\"\"\n self._provider_name = provider\n\n # Resolve API key from parameter or environment\n resolved_api_key = self._get_api_key(provider, api_key)\n\n # Load and configure the provider\n self._provider = self._load_provider(provider, resolved_api_key, config)\n\n logger.info(f\"SpeechToText created with provider={provider}\")\n\n def _get_api_key(self, provider: Provider, api_key: str | None) -> str | None:\n \"\"\"Get API key from parameter or environment.\n\n Args:\n provider: Provider name\n api_key: API key from parameter (takes precedence)\n\n Returns:\n Resolved API key or None\n \"\"\"\n # If API key provided explicitly, use it\n if api_key:\n return api_key\n\n # Otherwise, get from environment based on provider\n match provider:\n case \"openai\":\n return os.getenv(\"OPENAI_API_KEY\")\n case \"deepgram\":\n return os.getenv(\"DEEPGRAM_API_KEY\")\n case _:\n return None\n\n def _load_provider(self, provider: Provider, api_key: str | None, config: dict):\n \"\"\"Load the appropriate STT provider implementation.\n\n Args:\n provider: Provider name\n api_key: Resolved API key\n config: Provider-specific configuration\n\n Returns:\n Provider instance\n\n Raises:\n ValueError: If provider is unknown\n NotImplementedError: If provider not yet implemented\n \"\"\"\n match provider:\n case \"openai\":\n from voice.providers.openai_stt import OpenAISTT\n\n return OpenAISTT(api_key=api_key, **config)\n\n case \"deepgram\":\n # Example for future extensions: to add Deepgram support, implement DeepgramSTT provider and uncomment:\n # from voice.providers.deepgram_stt import DeepgramSTT\n # return DeepgramSTT(api_key=api_key, **config)\n raise NotImplementedError(\"Deepgram STT provider not yet implemented\")\n\n case _:\n # Catch-all for unknown providers\n raise ValueError(f\"Unknown STT provider: {provider}. Available providers: openai\")\n\n @property\n def provider(self) -> str:\n \"\"\"Get the provider name.\n\n Returns:\n Provider name string\n \"\"\"\n return self._provider_name\n\n @classmethod\n def from_env(cls) -> \"SpeechToText | None\":\n \"\"\"Create STT from environment variables.\n\n Reads VOICE_STT_PROVIDER env var to determine which provider to use.\n Returns None if not configured.\n\n Returns:\n SpeechToText instance or None\n\n Example:\n >>> # In .env: VOICE_STT_PROVIDER=openai\n >>> stt = SpeechToText.from_env()\n >>> if stt:\n ... text = stt.transcribe(audio_file)\n \"\"\"\n provider = os.getenv(\"VOICE_STT_PROVIDER\")\n\n # If provider not set, voice features are disabled\n if not provider:\n logger.debug(\"VOICE_STT_PROVIDER not set, STT disabled\")\n return None\n\n try:\n # Create instance with provider from environment\n # Validates provider and raises ValueError if invalid\n return cls(provider=cast(Provider, provider))\n except Exception as e:\n # Log error but don't crash - allow app to continue without voice\n logger.error(f\"Failed to create STT provider: {e}\", exc_info=True)\n return None\n\n def transcribe(self, audio_file: BinaryIO) -> str:\n \"\"\"Transcribe audio to text.\n\n Delegates to the underlying provider implementation.\n\n Args:\n audio_file: Binary audio file\n\n Returns:\n Transcribed text (empty string on failure)\n \"\"\"\n return self._provider.transcribe(audio_file)", "n_imports_parsed": 4, "n_files_resolved": 1, "n_chars_extracted": 4763}, "tests/voice/test_manager.py::14": {"resolved_imports": ["src/voice/manager.py"], "used_names": ["Mock", "VoiceManager"], "enclosing_function": "test_init_with_both_stt_and_tts", "extracted_code": "# Source: src/voice/manager.py\nclass VoiceManager:\n \"\"\"Streamlit convenience layer for voice features.\n\n This class provides Streamlit-specific methods for voice input/output.\n It handles UI feedback (spinners, errors) while delegating actual\n voice processing to STT and TTS modules.\n\n Example:\n >>> voice = VoiceManager.from_env()\n >>>\n >>> if voice:\n ... user_input = voice.get_chat_input()\n ... if user_input:\n ... with st.chat_message(\"ai\"):\n ... voice.render_message(\"Hello!\")\n \"\"\"\n\n def __init__(self, stt: SpeechToText | None = None, tts: TextToSpeech | None = None):\n \"\"\"Initialize VoiceManager.\n\n Args:\n stt: SpeechToText instance (None to disable STT)\n tts: TextToSpeech instance (None to disable TTS)\n \"\"\"\n self.stt = stt\n self.tts = tts\n\n logger.info(\n f\"VoiceManager: STT={'enabled' if stt else 'disabled'}, \"\n f\"TTS={'enabled' if tts else 'disabled'}\"\n )\n\n @classmethod\n def from_env(cls) -> Optional[\"VoiceManager\"]:\n \"\"\"Create VoiceManager from environment variables.\n\n Reads VOICE_STT_PROVIDER and VOICE_TTS_PROVIDER to configure\n speech-to-text and text-to-speech providers.\n\n Returns:\n VoiceManager if either STT or TTS is configured, None otherwise\n\n Example:\n >>> # In .env:\n >>> # VOICE_STT_PROVIDER=openai\n >>> # VOICE_TTS_PROVIDER=openai\n >>>\n >>> voice = VoiceManager.from_env()\n >>> # Returns configured VoiceManager or None if disabled\n \"\"\"\n # Create STT and TTS from environment\n stt = SpeechToText.from_env()\n tts = TextToSpeech.from_env()\n\n # If both disabled, return None (no voice features)\n if not stt and not tts:\n logger.debug(\"Voice features not configured\")\n return None\n\n return cls(stt=stt, tts=tts)\n\n def _transcribe_audio(self, audio) -> str | None:\n \"\"\"Transcribe audio with UI feedback.\n\n Shows spinner during transcription and error message on failure.\n\n Args:\n audio: Audio file object from Streamlit chat input\n\n Returns:\n Transcribed text, or None if transcription failed\n \"\"\"\n # Defensive check (should not happen if called correctly)\n if not self.stt:\n st.error(\"⚠️ Speech-to-text not configured.\")\n return None\n\n # Show spinner while transcribing\n with st.spinner(\"🎤 Transcribing audio...\"):\n transcribed = self.stt.transcribe(audio)\n\n # Check if transcription succeeded\n if not transcribed:\n st.error(\"⚠️ Transcription failed. Please try again or type your message.\")\n return None\n\n return transcribed\n\n def get_chat_input(self, placeholder: str = \"Your message\") -> str | None:\n \"\"\"Get chat input with optional voice transcription.\n\n Handles Streamlit UI including audio input widget and transcription\n feedback (spinner, errors).\n\n Args:\n placeholder: Placeholder text for input\n\n Returns:\n User's message (transcribed if audio, otherwise text), or None if no input\n \"\"\"\n # No STT - use regular text input\n if not self.stt:\n return st.chat_input(placeholder)\n\n # STT enabled - use audio-capable input\n chat_value = st.chat_input(placeholder, accept_audio=True)\n\n if not chat_value:\n return None\n\n # Handle string return (text-only input)\n if isinstance(chat_value, str):\n return chat_value\n\n # Handle object/dict return (audio-capable input)\n # Extract text - support both attribute and dict access\n text_content = None\n if hasattr(chat_value, \"text\"):\n text_content = chat_value.text\n elif isinstance(chat_value, dict):\n text_content = chat_value.get(\"text\", \"\")\n\n # Extract audio - support both attribute and dict access\n audio_content = None\n if hasattr(chat_value, \"audio\"):\n audio_content = chat_value.audio\n elif isinstance(chat_value, dict):\n audio_content = chat_value.get(\"audio\")\n\n # If audio is provided, transcribe it\n if audio_content:\n return self._transcribe_audio(audio_content)\n\n # If no audio, return the text content\n if text_content:\n return text_content\n\n # No text or audio provided\n return None\n\n def render_message(self, content: str, container=None, audio_only: bool = False) -> None:\n \"\"\"Render message with optional TTS audio.\n\n Handles Streamlit UI including text display and audio player.\n Saves generated audio in session state so it persists across reruns.\n\n Args:\n content: Message content to display\n container: Streamlit container (defaults to current context)\n audio_only: If True, only render audio (text already displayed)\n \"\"\"\n if container is None:\n container = st\n\n # Show text unless audio_only mode (for streaming where text is already shown)\n if not audio_only:\n container.write(content)\n\n # Add audio if TTS enabled and content is not empty\n if self.tts and content.strip():\n # Show placeholder while generating audio\n placeholder = container.empty()\n with placeholder:\n st.caption(\"🎙️ Generating audio...\")\n\n # Generate TTS audio\n audio = self.tts.generate(content)\n\n # Save audio in session state for the last AI message\n # This allows it to persist across st.rerun() calls\n if audio:\n st.session_state.last_audio = {\"data\": audio, \"format\": self.tts.get_format()}\n\n # Replace placeholder with audio player or error message\n if audio:\n placeholder.audio(audio, format=self.tts.get_format())\n else:\n placeholder.caption(\"🔇 Audio generation unavailable\")", "n_imports_parsed": 2, "n_files_resolved": 1, "n_chars_extracted": 6225}, "tests/app/test_streamlit_app.py::30": {"resolved_imports": ["src/client/__init__.py", "src/schema/__init__.py", "src/schema/models.py"], "used_names": ["AppTest", "AsyncMock", "ChatMessage"], "enclosing_function": "test_app_simple_non_streaming", "extracted_code": "# Source: src/schema/__init__.py\n ChatHistory,\n ChatHistoryInput,\n ChatMessage,\n Feedback,\n FeedbackResponse,\n ServiceMetadata,\n StreamInput,\n UserInput,\n)\n\n__all__ = [\n \"AgentInfo\",\n\n \"AllModelEnum\",\n \"UserInput\",\n \"ChatMessage\",\n \"ServiceMetadata\",\n \"StreamInput\",\n \"Feedback\",\n \"FeedbackResponse\",\n \"ChatHistoryInput\",\n \"ChatHistory\",\n]", "n_imports_parsed": 7, "n_files_resolved": 3, "n_chars_extracted": 393}, "tests/voice/test_stt.py::16": {"resolved_imports": ["src/voice/stt.py"], "used_names": ["SpeechToText", "patch"], "enclosing_function": "test_init_with_openai_provider", "extracted_code": "# Source: src/voice/stt.py\nclass SpeechToText:\n \"\"\"Speech-to-text factory.\n\n Loads and delegates to specific STT provider implementations.\n\n Example:\n >>> stt = SpeechToText(provider=\"openai\")\n >>> text = stt.transcribe(audio_file)\n >>>\n >>> # Or from environment\n >>> stt = SpeechToText.from_env()\n >>> if stt:\n ... text = stt.transcribe(audio_file)\n \"\"\"\n\n def __init__(self, provider: Provider = \"openai\", api_key: str | None = None, **config):\n \"\"\"Initialize STT with specified provider.\n\n Args:\n provider: Provider name (\"openai\", \"deepgram\", etc.)\n api_key: API key (uses env var if not provided)\n **config: Provider-specific configuration\n\n Raises:\n ValueError: If provider is unknown\n \"\"\"\n self._provider_name = provider\n\n # Resolve API key from parameter or environment\n resolved_api_key = self._get_api_key(provider, api_key)\n\n # Load and configure the provider\n self._provider = self._load_provider(provider, resolved_api_key, config)\n\n logger.info(f\"SpeechToText created with provider={provider}\")\n\n def _get_api_key(self, provider: Provider, api_key: str | None) -> str | None:\n \"\"\"Get API key from parameter or environment.\n\n Args:\n provider: Provider name\n api_key: API key from parameter (takes precedence)\n\n Returns:\n Resolved API key or None\n \"\"\"\n # If API key provided explicitly, use it\n if api_key:\n return api_key\n\n # Otherwise, get from environment based on provider\n match provider:\n case \"openai\":\n return os.getenv(\"OPENAI_API_KEY\")\n case \"deepgram\":\n return os.getenv(\"DEEPGRAM_API_KEY\")\n case _:\n return None\n\n def _load_provider(self, provider: Provider, api_key: str | None, config: dict):\n \"\"\"Load the appropriate STT provider implementation.\n\n Args:\n provider: Provider name\n api_key: Resolved API key\n config: Provider-specific configuration\n\n Returns:\n Provider instance\n\n Raises:\n ValueError: If provider is unknown\n NotImplementedError: If provider not yet implemented\n \"\"\"\n match provider:\n case \"openai\":\n from voice.providers.openai_stt import OpenAISTT\n\n return OpenAISTT(api_key=api_key, **config)\n\n case \"deepgram\":\n # Example for future extensions: to add Deepgram support, implement DeepgramSTT provider and uncomment:\n # from voice.providers.deepgram_stt import DeepgramSTT\n # return DeepgramSTT(api_key=api_key, **config)\n raise NotImplementedError(\"Deepgram STT provider not yet implemented\")\n\n case _:\n # Catch-all for unknown providers\n raise ValueError(f\"Unknown STT provider: {provider}. Available providers: openai\")\n\n @property\n def provider(self) -> str:\n \"\"\"Get the provider name.\n\n Returns:\n Provider name string\n \"\"\"\n return self._provider_name\n\n @classmethod\n def from_env(cls) -> \"SpeechToText | None\":\n \"\"\"Create STT from environment variables.\n\n Reads VOICE_STT_PROVIDER env var to determine which provider to use.\n Returns None if not configured.\n\n Returns:\n SpeechToText instance or None\n\n Example:\n >>> # In .env: VOICE_STT_PROVIDER=openai\n >>> stt = SpeechToText.from_env()\n >>> if stt:\n ... text = stt.transcribe(audio_file)\n \"\"\"\n provider = os.getenv(\"VOICE_STT_PROVIDER\")\n\n # If provider not set, voice features are disabled\n if not provider:\n logger.debug(\"VOICE_STT_PROVIDER not set, STT disabled\")\n return None\n\n try:\n # Create instance with provider from environment\n # Validates provider and raises ValueError if invalid\n return cls(provider=cast(Provider, provider))\n except Exception as e:\n # Log error but don't crash - allow app to continue without voice\n logger.error(f\"Failed to create STT provider: {e}\", exc_info=True)\n return None\n\n def transcribe(self, audio_file: BinaryIO) -> str:\n \"\"\"Transcribe audio to text.\n\n Delegates to the underlying provider implementation.\n\n Args:\n audio_file: Binary audio file\n\n Returns:\n Transcribed text (empty string on failure)\n \"\"\"\n return self._provider.transcribe(audio_file)", "n_imports_parsed": 4, "n_files_resolved": 1, "n_chars_extracted": 4763}, "tests/service/test_service_e2e.py::45": {"resolved_imports": ["src/agents/agents.py", "src/agents/utils.py", "src/client/__init__.py", "src/schema/schema.py", "src/service/utils.py"], "used_names": [], "enclosing_function": "test_messages_conversion", "extracted_code": "", "n_imports_parsed": 11, "n_files_resolved": 5, "n_chars_extracted": 0}, "tests/service/test_service_message_generator.py::67": {"resolved_imports": ["src/schema/__init__.py", "src/agents/langgraph_supervisor_hierarchy_agent.py", "src/service/service.py"], "used_names": ["AIMessage", "ChatMessage", "MemorySaver", "StreamInput", "ToolCall", "json", "message_generator", "patch", "pytest", "workflow"], "enclosing_function": "test_three_layer_supervisor_hierarchy_agent_with_fake_model", "extracted_code": "# Source: src/schema/__init__.py\n ChatHistory,\n ChatHistoryInput,\n ChatMessage,\n Feedback,\n FeedbackResponse,\n ServiceMetadata,\n StreamInput,\n UserInput,\n)\n\n__all__ = [\n \"AgentInfo\",\n\n FeedbackResponse,\n ServiceMetadata,\n StreamInput,\n UserInput,\n)\n\n__all__ = [\n \"AgentInfo\",\n \"AllModelEnum\",\n \"UserInput\",\n \"ChatMessage\",\n \"ServiceMetadata\",\n\n \"AllModelEnum\",\n \"UserInput\",\n \"ChatMessage\",\n \"ServiceMetadata\",\n \"StreamInput\",\n \"Feedback\",\n \"FeedbackResponse\",\n \"ChatHistoryInput\",\n \"ChatHistory\",\n]\n\n \"ChatMessage\",\n \"ServiceMetadata\",\n \"StreamInput\",\n \"Feedback\",\n \"FeedbackResponse\",\n \"ChatHistoryInput\",\n \"ChatHistory\",\n]\n\n\n# Source: src/agents/langgraph_supervisor_hierarchy_agent.py\ndef workflow(chosen_model):\n math_agent = create_agent(\n model=chosen_model,\n tools=[add, multiply],\n name=\"sub-agent-math_expert\", # Identify the graph node as a sub-agent\n system_prompt=\"You are a math expert. Always use one tool at a time.\",\n ).with_config(tags=[\"skip_stream\"])\n\n research_agent = (\n create_supervisor(\n [math_agent],\n model=chosen_model,\n tools=[web_search],\n prompt=\"You are a world class researcher with access to web search. Do not do any math, you have a math expert for that. \",\n supervisor_name=\"supervisor-research_expert\", # Identify the graph node as a supervisor to the math agent\n )\n .compile(\n name=\"sub-agent-research_expert\"\n ) # Identify the graph node as a sub-agent to the main supervisor\n .with_config(tags=[\"skip_stream\"])\n ) # Stream tokens are ignored for sub-agents in the UI\n\n # Create supervisor workflow\n return create_supervisor(\n [research_agent],\n model=chosen_model,\n prompt=(\n \"You are a team supervisor managing a research expert with math capabilities.\"\n \"For current events, use research_agent. \"\n ),\n add_handoff_back_messages=True,\n # UI now expects this to be True so we don't have to guess when a handoff back occurs\n output_mode=\"full_history\", # otherwise when reloading conversations, the sub-agents' messages are not included\n )\n\n\n# Source: src/service/service.py\nasync def message_generator(\n user_input: StreamInput, agent_id: str = DEFAULT_AGENT\n) -> AsyncGenerator[str, None]:\n \"\"\"\n Generate a stream of messages from the agent.\n\n This is the workhorse method for the /stream endpoint.\n \"\"\"\n agent: AgentGraph = get_agent(agent_id)\n kwargs, run_id = await _handle_input(user_input, agent)\n\n try:\n # Process streamed events from the graph and yield messages over the SSE stream.\n async for stream_event in agent.astream(\n **kwargs, stream_mode=[\"updates\", \"messages\", \"custom\"], subgraphs=True\n ):\n if not isinstance(stream_event, tuple):\n continue\n # Handle different stream event structures based on subgraphs\n if len(stream_event) == 3:\n # With subgraphs=True: (node_path, stream_mode, event)\n _, stream_mode, event = stream_event\n else:\n # Without subgraphs: (stream_mode, event)\n stream_mode, event = stream_event\n new_messages = []\n if stream_mode == \"updates\":\n for node, updates in event.items():\n # A simple approach to handle agent interrupts.\n # In a more sophisticated implementation, we could add\n # some structured ChatMessage type to return the interrupt value.\n if node == \"__interrupt__\":\n interrupt: Interrupt\n for interrupt in updates:\n new_messages.append(AIMessage(content=interrupt.value))\n continue\n updates = updates or {}\n update_messages = updates.get(\"messages\", [])\n # special cases for using langgraph-supervisor library\n if \"supervisor\" in node or \"sub-agent\" in node:\n # the only tools that come from the actual agent are the handoff and handback tools\n if isinstance(update_messages[-1], ToolMessage):\n if \"sub-agent\" in node and len(update_messages) > 1:\n # If this is a sub-agent, we want to keep the last 2 messages - the handback tool, and it's result\n update_messages = update_messages[-2:]\n else:\n # If this is a supervisor, we want to keep the last message only - the handoff result. The tool comes from the 'agent' node.\n update_messages = [update_messages[-1]]\n else:\n update_messages = []\n new_messages.extend(update_messages)\n\n if stream_mode == \"custom\":\n new_messages = [event]\n\n # LangGraph streaming may emit tuples: (field_name, field_value)\n # e.g. ('content', <str>), ('tool_calls', [ToolCall,...]), ('additional_kwargs', {...}), etc.\n # We accumulate only supported fields into `parts` and skip unsupported metadata.\n # More info at: https://langchain-ai.github.io/langgraph/cloud/how-tos/stream_messages/\n processed_messages = []\n current_message: dict[str, Any] = {}\n for message in new_messages:\n if isinstance(message, tuple):\n key, value = message\n # Store parts in temporary dict\n current_message[key] = value\n else:\n # Add complete message if we have one in progress\n if current_message:\n processed_messages.append(_create_ai_message(current_message))\n current_message = {}\n processed_messages.append(message)\n\n # Add any remaining message parts\n if current_message:\n processed_messages.append(_create_ai_message(current_message))\n\n for message in processed_messages:\n try:\n chat_message = langchain_to_chat_message(message)\n chat_message.run_id = str(run_id)\n except Exception as e:\n logger.error(f\"Error parsing message: {e}\")\n yield f\"data: {json.dumps({'type': 'error', 'content': 'Unexpected error'})}\\n\\n\"\n continue\n # LangGraph re-sends the input message, which feels weird, so drop it\n if chat_message.type == \"human\" and chat_message.content == user_input.message:\n continue\n yield f\"data: {json.dumps({'type': 'message', 'content': chat_message.model_dump()})}\\n\\n\"\n\n if stream_mode == \"messages\":\n if not user_input.stream_tokens:\n continue\n msg, metadata = event\n if \"skip_stream\" in metadata.get(\"tags\", []):\n continue\n # For some reason, astream(\"messages\") causes non-LLM nodes to send extra messages.\n # Drop them.\n if not isinstance(msg, AIMessageChunk):\n continue\n content = remove_tool_calls(msg.content)\n if content:\n # Empty content in the context of OpenAI usually means\n # that the model is asking for a tool to be invoked.\n # So we only print non-empty content.\n yield f\"data: {json.dumps({'type': 'token', 'content': convert_message_content_to_string(content)})}\\n\\n\"\n except Exception as e:\n logger.error(f\"Error in message generator: {e}\")\n yield f\"data: {json.dumps({'type': 'error', 'content': 'Internal server error'})}\\n\\n\"\n finally:\n yield \"data: [DONE]\\n\\n\"", "n_imports_parsed": 9, "n_files_resolved": 3, "n_chars_extracted": 8204}, "tests/voice/test_manager.py::23": {"resolved_imports": ["src/voice/manager.py"], "used_names": ["Mock", "VoiceManager"], "enclosing_function": "test_init_with_only_tts", "extracted_code": "# Source: src/voice/manager.py\nclass VoiceManager:\n \"\"\"Streamlit convenience layer for voice features.\n\n This class provides Streamlit-specific methods for voice input/output.\n It handles UI feedback (spinners, errors) while delegating actual\n voice processing to STT and TTS modules.\n\n Example:\n >>> voice = VoiceManager.from_env()\n >>>\n >>> if voice:\n ... user_input = voice.get_chat_input()\n ... if user_input:\n ... with st.chat_message(\"ai\"):\n ... voice.render_message(\"Hello!\")\n \"\"\"\n\n def __init__(self, stt: SpeechToText | None = None, tts: TextToSpeech | None = None):\n \"\"\"Initialize VoiceManager.\n\n Args:\n stt: SpeechToText instance (None to disable STT)\n tts: TextToSpeech instance (None to disable TTS)\n \"\"\"\n self.stt = stt\n self.tts = tts\n\n logger.info(\n f\"VoiceManager: STT={'enabled' if stt else 'disabled'}, \"\n f\"TTS={'enabled' if tts else 'disabled'}\"\n )\n\n @classmethod\n def from_env(cls) -> Optional[\"VoiceManager\"]:\n \"\"\"Create VoiceManager from environment variables.\n\n Reads VOICE_STT_PROVIDER and VOICE_TTS_PROVIDER to configure\n speech-to-text and text-to-speech providers.\n\n Returns:\n VoiceManager if either STT or TTS is configured, None otherwise\n\n Example:\n >>> # In .env:\n >>> # VOICE_STT_PROVIDER=openai\n >>> # VOICE_TTS_PROVIDER=openai\n >>>\n >>> voice = VoiceManager.from_env()\n >>> # Returns configured VoiceManager or None if disabled\n \"\"\"\n # Create STT and TTS from environment\n stt = SpeechToText.from_env()\n tts = TextToSpeech.from_env()\n\n # If both disabled, return None (no voice features)\n if not stt and not tts:\n logger.debug(\"Voice features not configured\")\n return None\n\n return cls(stt=stt, tts=tts)\n\n def _transcribe_audio(self, audio) -> str | None:\n \"\"\"Transcribe audio with UI feedback.\n\n Shows spinner during transcription and error message on failure.\n\n Args:\n audio: Audio file object from Streamlit chat input\n\n Returns:\n Transcribed text, or None if transcription failed\n \"\"\"\n # Defensive check (should not happen if called correctly)\n if not self.stt:\n st.error(\"⚠️ Speech-to-text not configured.\")\n return None\n\n # Show spinner while transcribing\n with st.spinner(\"🎤 Transcribing audio...\"):\n transcribed = self.stt.transcribe(audio)\n\n # Check if transcription succeeded\n if not transcribed:\n st.error(\"⚠️ Transcription failed. Please try again or type your message.\")\n return None\n\n return transcribed\n\n def get_chat_input(self, placeholder: str = \"Your message\") -> str | None:\n \"\"\"Get chat input with optional voice transcription.\n\n Handles Streamlit UI including audio input widget and transcription\n feedback (spinner, errors).\n\n Args:\n placeholder: Placeholder text for input\n\n Returns:\n User's message (transcribed if audio, otherwise text), or None if no input\n \"\"\"\n # No STT - use regular text input\n if not self.stt:\n return st.chat_input(placeholder)\n\n # STT enabled - use audio-capable input\n chat_value = st.chat_input(placeholder, accept_audio=True)\n\n if not chat_value:\n return None\n\n # Handle string return (text-only input)\n if isinstance(chat_value, str):\n return chat_value\n\n # Handle object/dict return (audio-capable input)\n # Extract text - support both attribute and dict access\n text_content = None\n if hasattr(chat_value, \"text\"):\n text_content = chat_value.text\n elif isinstance(chat_value, dict):\n text_content = chat_value.get(\"text\", \"\")\n\n # Extract audio - support both attribute and dict access\n audio_content = None\n if hasattr(chat_value, \"audio\"):\n audio_content = chat_value.audio\n elif isinstance(chat_value, dict):\n audio_content = chat_value.get(\"audio\")\n\n # If audio is provided, transcribe it\n if audio_content:\n return self._transcribe_audio(audio_content)\n\n # If no audio, return the text content\n if text_content:\n return text_content\n\n # No text or audio provided\n return None\n\n def render_message(self, content: str, container=None, audio_only: bool = False) -> None:\n \"\"\"Render message with optional TTS audio.\n\n Handles Streamlit UI including text display and audio player.\n Saves generated audio in session state so it persists across reruns.\n\n Args:\n content: Message content to display\n container: Streamlit container (defaults to current context)\n audio_only: If True, only render audio (text already displayed)\n \"\"\"\n if container is None:\n container = st\n\n # Show text unless audio_only mode (for streaming where text is already shown)\n if not audio_only:\n container.write(content)\n\n # Add audio if TTS enabled and content is not empty\n if self.tts and content.strip():\n # Show placeholder while generating audio\n placeholder = container.empty()\n with placeholder:\n st.caption(\"🎙️ Generating audio...\")\n\n # Generate TTS audio\n audio = self.tts.generate(content)\n\n # Save audio in session state for the last AI message\n # This allows it to persist across st.rerun() calls\n if audio:\n st.session_state.last_audio = {\"data\": audio, \"format\": self.tts.get_format()}\n\n # Replace placeholder with audio player or error message\n if audio:\n placeholder.audio(audio, format=self.tts.get_format())\n else:\n placeholder.caption(\"🔇 Audio generation unavailable\")", "n_imports_parsed": 2, "n_files_resolved": 1, "n_chars_extracted": 6225}, "tests/client/test_client.py::255": {"resolved_imports": ["src/client/__init__.py", "src/schema/__init__.py", "src/schema/models.py"], "used_names": ["AgentClientError", "Request", "Response", "patch", "pytest"], "enclosing_function": "test_acreate_feedback", "extracted_code": "# Source: src/client/__init__.py\nfrom client.client import AgentClient, AgentClientError\n\n__all__ = [\"AgentClient\", \"AgentClientError\"]\n\nfrom client.client import AgentClient, AgentClientError\n\n__all__ = [\"AgentClient\", \"AgentClientError\"]", "n_imports_parsed": 8, "n_files_resolved": 3, "n_chars_extracted": 239}, "tests/service/test_service_lifespan.py::71": {"resolved_imports": ["src/schema/__init__.py", "src/service/__init__.py", "src/service/service.py"], "used_names": ["AgentInfo", "FastAPI", "asynccontextmanager", "logging", "pytest", "service"], "enclosing_function": "test_lifespan", "extracted_code": "# Source: src/schema/__init__.py\nfrom schema.models import AllModelEnum\nfrom schema.schema import (\n AgentInfo,\n ChatHistory,\n ChatHistoryInput,\n ChatMessage,\n Feedback,\n FeedbackResponse,\n ServiceMetadata,\n StreamInput,\n UserInput,\n)\n\n\n__all__ = [\n \"AgentInfo\",\n \"AllModelEnum\",\n \"UserInput\",\n \"ChatMessage\",\n \"ServiceMetadata\",\n \"StreamInput\",\n \"Feedback\",\n \"FeedbackResponse\",\n \"ChatHistoryInput\",\n \"ChatHistory\",\n\n\n# Source: src/service/__init__.py\nfrom service.service import app\n\n__all__ = [\"app\"]", "n_imports_parsed": 6, "n_files_resolved": 3, "n_chars_extracted": 560}, "tests/core/test_llm.py::69": {"resolved_imports": ["src/core/llm.py", "src/schema/models.py"], "used_names": ["get_model", "pytest"], "enclosing_function": "test_get_model_invalid", "extracted_code": "# Source: src/core/llm.py\ndef get_model(model_name: AllModelEnum, /) -> ModelT:\n # NOTE: models with streaming=True will send tokens as they are generated\n # if the /stream endpoint is called with stream_tokens=True (the default)\n api_model_name = _MODEL_TABLE.get(model_name)\n if not api_model_name:\n raise ValueError(f\"Unsupported model: {model_name}\")\n\n if model_name in OpenAIModelName:\n return ChatOpenAI(model=api_model_name, streaming=True)\n if model_name in OpenAICompatibleName:\n if not settings.COMPATIBLE_BASE_URL or not settings.COMPATIBLE_MODEL:\n raise ValueError(\"OpenAICompatible base url and endpoint must be configured\")\n\n return ChatOpenAI(\n model=settings.COMPATIBLE_MODEL,\n temperature=0.5,\n streaming=True,\n openai_api_base=settings.COMPATIBLE_BASE_URL,\n openai_api_key=settings.COMPATIBLE_API_KEY,\n )\n if model_name in AzureOpenAIModelName:\n if not settings.AZURE_OPENAI_API_KEY or not settings.AZURE_OPENAI_ENDPOINT:\n raise ValueError(\"Azure OpenAI API key and endpoint must be configured\")\n\n return AzureChatOpenAI(\n azure_endpoint=settings.AZURE_OPENAI_ENDPOINT,\n deployment_name=api_model_name,\n api_version=settings.AZURE_OPENAI_API_VERSION,\n temperature=0.5,\n streaming=True,\n timeout=60,\n max_retries=3,\n )\n if model_name in DeepseekModelName:\n return ChatOpenAI(\n model=api_model_name,\n temperature=0.5,\n streaming=True,\n openai_api_base=\"https://api.deepseek.com\",\n openai_api_key=settings.DEEPSEEK_API_KEY,\n )\n if model_name in AnthropicModelName:\n return ChatAnthropic(model=api_model_name, temperature=0.5, streaming=True)\n if model_name in GoogleModelName:\n return ChatGoogleGenerativeAI(model=api_model_name, temperature=0.5, streaming=True)\n if model_name in VertexAIModelName:\n return ChatVertexAI(model=api_model_name, temperature=0.5, streaming=True)\n if model_name in GroqModelName:\n if model_name == GroqModelName.LLAMA_GUARD_4_12B:\n return ChatGroq(model=api_model_name, temperature=0.0) # type: ignore[call-arg]\n return ChatGroq(model=api_model_name, temperature=0.5) # type: ignore[call-arg]\n if model_name in AWSModelName:\n return ChatBedrock(model_id=api_model_name, temperature=0.5)\n if model_name in OllamaModelName:\n if settings.OLLAMA_BASE_URL:\n chat_ollama = ChatOllama(\n model=settings.OLLAMA_MODEL, temperature=0.5, base_url=settings.OLLAMA_BASE_URL\n )\n else:\n chat_ollama = ChatOllama(model=settings.OLLAMA_MODEL, temperature=0.5)\n return chat_ollama\n if model_name in OpenRouterModelName:\n return ChatOpenAI(\n model=api_model_name,\n temperature=0.5,\n streaming=True,\n base_url=\"https://openrouter.ai/api/v1/\",\n api_key=settings.OPENROUTER_API_KEY,\n )\n if model_name in FakeModelName:\n return FakeToolModel(responses=[\"This is a test response from the fake model.\"])\n\n raise ValueError(f\"Unsupported model: {model_name}\")", "n_imports_parsed": 10, "n_files_resolved": 2, "n_chars_extracted": 3306}, "tests/integration/test_docker_e2e.py::15": {"resolved_imports": ["src/client/__init__.py"], "used_names": ["AgentClient", "pytest"], "enclosing_function": "test_service_with_fake_model", "extracted_code": "# Source: src/client/__init__.py\nfrom client.client import AgentClient, AgentClientError\n\n__all__ = [\"AgentClient\", \"AgentClientError\"]\n\nfrom client.client import AgentClient, AgentClientError\n\n__all__ = [\"AgentClient\", \"AgentClientError\"]", "n_imports_parsed": 3, "n_files_resolved": 1, "n_chars_extracted": 239}, "tests/core/test_settings.py::201": {"resolved_imports": ["src/core/settings.py", "src/schema/models.py"], "used_names": ["Settings", "json", "os", "patch"], "enclosing_function": "test_settings_azure_openai", "extracted_code": "# Source: src/core/settings.py\nclass Settings(BaseSettings):\n model_config = SettingsConfigDict(\n env_file=find_dotenv(),\n env_file_encoding=\"utf-8\",\n env_ignore_empty=True,\n extra=\"ignore\",\n validate_default=False,\n )\n MODE: str | None = None\n\n HOST: str = \"0.0.0.0\"\n PORT: int = 8080\n GRACEFUL_SHUTDOWN_TIMEOUT: int = 30\n LOG_LEVEL: LogLevel = LogLevel.WARNING\n\n AUTH_SECRET: SecretStr | None = None\n\n OPENAI_API_KEY: SecretStr | None = None\n DEEPSEEK_API_KEY: SecretStr | None = None\n ANTHROPIC_API_KEY: SecretStr | None = None\n GOOGLE_API_KEY: SecretStr | None = None\n GOOGLE_APPLICATION_CREDENTIALS: SecretStr | None = None\n GROQ_API_KEY: SecretStr | None = None\n USE_AWS_BEDROCK: bool = False\n OLLAMA_MODEL: str | None = None\n OLLAMA_BASE_URL: str | None = None\n USE_FAKE_MODEL: bool = False\n OPENROUTER_API_KEY: str | None = None\n\n # If DEFAULT_MODEL is None, it will be set in model_post_init\n DEFAULT_MODEL: AllModelEnum | None = None # type: ignore[assignment]\n AVAILABLE_MODELS: set[AllModelEnum] = set() # type: ignore[assignment]\n\n # Set openai compatible api, mainly used for proof of concept\n COMPATIBLE_MODEL: str | None = None\n COMPATIBLE_API_KEY: SecretStr | None = None\n COMPATIBLE_BASE_URL: str | None = None\n\n OPENWEATHERMAP_API_KEY: SecretStr | None = None\n\n # MCP Configuration\n GITHUB_PAT: SecretStr | None = None\n MCP_GITHUB_SERVER_URL: str = \"https://api.githubcopilot.com/mcp/\"\n\n LANGCHAIN_TRACING_V2: bool = False\n LANGCHAIN_PROJECT: str = \"default\"\n LANGCHAIN_ENDPOINT: Annotated[str, BeforeValidator(check_str_is_http)] = (\n \"https://api.smith.langchain.com\"\n )\n LANGCHAIN_API_KEY: SecretStr | None = None\n\n LANGFUSE_TRACING: bool = False\n LANGFUSE_HOST: Annotated[str, BeforeValidator(check_str_is_http)] = \"https://cloud.langfuse.com\"\n LANGFUSE_PUBLIC_KEY: SecretStr | None = None\n LANGFUSE_SECRET_KEY: SecretStr | None = None\n\n # Database Configuration\n DATABASE_TYPE: DatabaseType = (\n DatabaseType.SQLITE\n ) # Options: DatabaseType.SQLITE or DatabaseType.POSTGRES\n SQLITE_DB_PATH: str = \"checkpoints.db\"\n\n # PostgreSQL Configuration\n POSTGRES_USER: str | None = None\n POSTGRES_PASSWORD: SecretStr | None = None\n POSTGRES_HOST: str | None = None\n POSTGRES_PORT: int | None = None\n POSTGRES_DB: str | None = None\n POSTGRES_APPLICATION_NAME: str = \"agent-service-toolkit\"\n POSTGRES_MIN_CONNECTIONS_PER_POOL: int = 1\n POSTGRES_MAX_CONNECTIONS_PER_POOL: int = 1\n\n # MongoDB Configuration\n MONGO_HOST: str | None = None\n MONGO_PORT: int | None = None\n MONGO_DB: str | None = None\n MONGO_USER: str | None = None\n MONGO_PASSWORD: SecretStr | None = None\n MONGO_AUTH_SOURCE: str | None = None\n\n # Azure OpenAI Settings\n AZURE_OPENAI_API_KEY: SecretStr | None = None\n AZURE_OPENAI_ENDPOINT: str | None = None\n AZURE_OPENAI_API_VERSION: str = \"2024-02-15-preview\"\n AZURE_OPENAI_DEPLOYMENT_MAP: dict[str, str] = Field(\n default_factory=dict, description=\"Map of model names to Azure deployment IDs\"\n )\n\n def model_post_init(self, __context: Any) -> None:\n api_keys = {\n Provider.OPENAI: self.OPENAI_API_KEY,\n Provider.OPENAI_COMPATIBLE: self.COMPATIBLE_BASE_URL and self.COMPATIBLE_MODEL,\n Provider.DEEPSEEK: self.DEEPSEEK_API_KEY,\n Provider.ANTHROPIC: self.ANTHROPIC_API_KEY,\n Provider.GOOGLE: self.GOOGLE_API_KEY,\n Provider.VERTEXAI: self.GOOGLE_APPLICATION_CREDENTIALS,\n Provider.GROQ: self.GROQ_API_KEY,\n Provider.AWS: self.USE_AWS_BEDROCK,\n Provider.OLLAMA: self.OLLAMA_MODEL,\n Provider.FAKE: self.USE_FAKE_MODEL,\n Provider.AZURE_OPENAI: self.AZURE_OPENAI_API_KEY,\n Provider.OPENROUTER: self.OPENROUTER_API_KEY,\n }\n active_keys = [k for k, v in api_keys.items() if v]\n if not active_keys:\n raise ValueError(\"At least one LLM API key must be provided.\")\n\n for provider in active_keys:\n match provider:\n case Provider.OPENAI:\n if self.DEFAULT_MODEL is None:\n self.DEFAULT_MODEL = OpenAIModelName.GPT_5_NANO\n self.AVAILABLE_MODELS.update(set(OpenAIModelName))\n case Provider.OPENAI_COMPATIBLE:\n if self.DEFAULT_MODEL is None:\n self.DEFAULT_MODEL = OpenAICompatibleName.OPENAI_COMPATIBLE\n self.AVAILABLE_MODELS.update(set(OpenAICompatibleName))\n case Provider.DEEPSEEK:\n if self.DEFAULT_MODEL is None:\n self.DEFAULT_MODEL = DeepseekModelName.DEEPSEEK_CHAT\n self.AVAILABLE_MODELS.update(set(DeepseekModelName))\n case Provider.ANTHROPIC:\n if self.DEFAULT_MODEL is None:\n self.DEFAULT_MODEL = AnthropicModelName.HAIKU_45\n self.AVAILABLE_MODELS.update(set(AnthropicModelName))\n case Provider.GOOGLE:\n if self.DEFAULT_MODEL is None:\n self.DEFAULT_MODEL = GoogleModelName.GEMINI_20_FLASH\n self.AVAILABLE_MODELS.update(set(GoogleModelName))\n case Provider.VERTEXAI:\n if self.DEFAULT_MODEL is None:\n self.DEFAULT_MODEL = VertexAIModelName.GEMINI_20_FLASH\n self.AVAILABLE_MODELS.update(set(VertexAIModelName))\n case Provider.GROQ:\n if self.DEFAULT_MODEL is None:\n self.DEFAULT_MODEL = GroqModelName.LLAMA_31_8B\n self.AVAILABLE_MODELS.update(set(GroqModelName))\n case Provider.AWS:\n if self.DEFAULT_MODEL is None:\n self.DEFAULT_MODEL = AWSModelName.BEDROCK_HAIKU\n self.AVAILABLE_MODELS.update(set(AWSModelName))\n case Provider.OLLAMA:\n if self.DEFAULT_MODEL is None:\n self.DEFAULT_MODEL = OllamaModelName.OLLAMA_GENERIC\n self.AVAILABLE_MODELS.update(set(OllamaModelName))\n case Provider.OPENROUTER:\n if self.DEFAULT_MODEL is None:\n self.DEFAULT_MODEL = OpenRouterModelName.GEMINI_25_FLASH\n self.AVAILABLE_MODELS.update(set(OpenRouterModelName))\n case Provider.FAKE:\n if self.DEFAULT_MODEL is None:\n self.DEFAULT_MODEL = FakeModelName.FAKE\n self.AVAILABLE_MODELS.update(set(FakeModelName))\n case Provider.AZURE_OPENAI:\n if self.DEFAULT_MODEL is None:\n self.DEFAULT_MODEL = AzureOpenAIModelName.AZURE_GPT_4O_MINI\n self.AVAILABLE_MODELS.update(set(AzureOpenAIModelName))\n # Validate Azure OpenAI settings if Azure provider is available\n if not self.AZURE_OPENAI_API_KEY:\n raise ValueError(\"AZURE_OPENAI_API_KEY must be set\")\n if not self.AZURE_OPENAI_ENDPOINT:\n raise ValueError(\"AZURE_OPENAI_ENDPOINT must be set\")\n if not self.AZURE_OPENAI_DEPLOYMENT_MAP:\n raise ValueError(\"AZURE_OPENAI_DEPLOYMENT_MAP must be set\")\n\n # Parse deployment map if it's a string\n if isinstance(self.AZURE_OPENAI_DEPLOYMENT_MAP, str):\n try:\n self.AZURE_OPENAI_DEPLOYMENT_MAP = loads(\n self.AZURE_OPENAI_DEPLOYMENT_MAP\n )\n except Exception as e:\n raise ValueError(f\"Invalid AZURE_OPENAI_DEPLOYMENT_MAP JSON: {e}\")\n\n # Validate required deployments exist\n required_models = {\"gpt-4o\", \"gpt-4o-mini\"}\n missing_models = required_models - set(self.AZURE_OPENAI_DEPLOYMENT_MAP.keys())\n if missing_models:\n raise ValueError(f\"Missing required Azure deployments: {missing_models}\")\n case _:\n raise ValueError(f\"Unknown provider: {provider}\")\n\n @computed_field # type: ignore[prop-decorator]\n @property\n def BASE_URL(self) -> str:\n return f\"http://{self.HOST}:{self.PORT}\"\n\n def is_dev(self) -> bool:\n return self.MODE == \"dev\"", "n_imports_parsed": 8, "n_files_resolved": 2, "n_chars_extracted": 8669}, "tests/client/test_client.py::18": {"resolved_imports": ["src/client/__init__.py", "src/schema/__init__.py", "src/schema/models.py"], "used_names": ["AgentClient"], "enclosing_function": "test_init", "extracted_code": "# Source: src/client/__init__.py\nfrom client.client import AgentClient, AgentClientError\n\n__all__ = [\"AgentClient\", \"AgentClientError\"]\n\nfrom client.client import AgentClient, AgentClientError\n\n__all__ = [\"AgentClient\", \"AgentClientError\"]", "n_imports_parsed": 8, "n_files_resolved": 3, "n_chars_extracted": 239}, "tests/service/test_utils.py::10": {"resolved_imports": ["src/service/utils.py"], "used_names": ["AIMessage", "HumanMessage", "SystemMessage", "ToolMessage", "langchain_to_chat_message"], "enclosing_function": "test_messages_from_langchain", "extracted_code": "# Source: src/service/utils.py\ndef langchain_to_chat_message(message: BaseMessage) -> ChatMessage:\n \"\"\"Create a ChatMessage from a LangChain message.\"\"\"\n match message:\n case HumanMessage():\n human_message = ChatMessage(\n type=\"human\",\n content=convert_message_content_to_string(message.content),\n )\n return human_message\n case AIMessage():\n ai_message = ChatMessage(\n type=\"ai\",\n content=convert_message_content_to_string(message.content),\n )\n if message.tool_calls:\n ai_message.tool_calls = message.tool_calls\n if message.response_metadata:\n ai_message.response_metadata = message.response_metadata\n return ai_message\n case ToolMessage():\n tool_message = ChatMessage(\n type=\"tool\",\n content=convert_message_content_to_string(message.content),\n tool_call_id=message.tool_call_id,\n )\n return tool_message\n case LangchainChatMessage():\n if message.role == \"custom\":\n custom_message = ChatMessage(\n type=\"custom\",\n content=\"\",\n custom_data=message.content[0],\n )\n return custom_message\n else:\n raise ValueError(f\"Unsupported chat message role: {message.role}\")\n case _:\n raise ValueError(f\"Unsupported message type: {message.__class__.__name__}\")", "n_imports_parsed": 2, "n_files_resolved": 1, "n_chars_extracted": 1591}, "tests/voice/providers/test_openai_stt.py::33": {"resolved_imports": ["src/voice/providers/openai_stt.py"], "used_names": ["OpenAISTT", "patch"], "enclosing_function": "test_transcribe_seeks_file_to_beginning", "extracted_code": "# Source: src/voice/providers/openai_stt.py\nclass OpenAISTT:\n \"\"\"OpenAI Whisper STT provider.\"\"\"\n\n def __init__(self, api_key: str | None = None):\n \"\"\"Initialize OpenAI STT.\n\n Args:\n api_key: OpenAI API key (uses env var if not provided)\n\n Raises:\n Exception: If OpenAI client initialization fails\n \"\"\"\n # Create OpenAI client with provided key or from environment\n self.client = OpenAI(api_key=api_key) if api_key else OpenAI()\n logger.info(\"OpenAI STT initialized\")\n\n def transcribe(self, audio_file: BinaryIO) -> str:\n \"\"\"Transcribe audio using OpenAI Whisper.\n\n Args:\n audio_file: Binary audio file\n\n Returns:\n Transcribed text (empty string on failure)\n\n Note:\n Errors are logged but not raised - returns empty string instead.\n This allows graceful degradation in user-facing applications.\n \"\"\"\n try:\n # Reset file pointer to beginning (may have been read elsewhere)\n audio_file.seek(0)\n\n # Call OpenAI Whisper API for transcription\n result = self.client.audio.transcriptions.create(\n model=\"whisper-1\", file=audio_file, response_format=\"text\"\n )\n\n # Clean up whitespace from result\n transcribed = result.strip()\n logger.info(f\"OpenAI STT: transcribed {len(transcribed)} chars\")\n return transcribed\n\n except Exception as e:\n # Log error with full traceback for debugging\n logger.error(f\"OpenAI STT failed: {e}\", exc_info=True)\n # Return empty string to allow graceful degradation\n return \"\"", "n_imports_parsed": 2, "n_files_resolved": 1, "n_chars_extracted": 1726}, "tests/service/test_service_message_generator.py::69": {"resolved_imports": ["src/schema/__init__.py", "src/agents/langgraph_supervisor_hierarchy_agent.py", "src/service/service.py"], "used_names": ["AIMessage", "ChatMessage", "MemorySaver", "StreamInput", "ToolCall", "json", "message_generator", "patch", "pytest", "workflow"], "enclosing_function": "test_three_layer_supervisor_hierarchy_agent_with_fake_model", "extracted_code": "# Source: src/schema/__init__.py\n ChatHistory,\n ChatHistoryInput,\n ChatMessage,\n Feedback,\n FeedbackResponse,\n ServiceMetadata,\n StreamInput,\n UserInput,\n)\n\n__all__ = [\n \"AgentInfo\",\n\n FeedbackResponse,\n ServiceMetadata,\n StreamInput,\n UserInput,\n)\n\n__all__ = [\n \"AgentInfo\",\n \"AllModelEnum\",\n \"UserInput\",\n \"ChatMessage\",\n \"ServiceMetadata\",\n\n \"AllModelEnum\",\n \"UserInput\",\n \"ChatMessage\",\n \"ServiceMetadata\",\n \"StreamInput\",\n \"Feedback\",\n \"FeedbackResponse\",\n \"ChatHistoryInput\",\n \"ChatHistory\",\n]\n\n \"ChatMessage\",\n \"ServiceMetadata\",\n \"StreamInput\",\n \"Feedback\",\n \"FeedbackResponse\",\n \"ChatHistoryInput\",\n \"ChatHistory\",\n]\n\n\n# Source: src/agents/langgraph_supervisor_hierarchy_agent.py\ndef workflow(chosen_model):\n math_agent = create_agent(\n model=chosen_model,\n tools=[add, multiply],\n name=\"sub-agent-math_expert\", # Identify the graph node as a sub-agent\n system_prompt=\"You are a math expert. Always use one tool at a time.\",\n ).with_config(tags=[\"skip_stream\"])\n\n research_agent = (\n create_supervisor(\n [math_agent],\n model=chosen_model,\n tools=[web_search],\n prompt=\"You are a world class researcher with access to web search. Do not do any math, you have a math expert for that. \",\n supervisor_name=\"supervisor-research_expert\", # Identify the graph node as a supervisor to the math agent\n )\n .compile(\n name=\"sub-agent-research_expert\"\n ) # Identify the graph node as a sub-agent to the main supervisor\n .with_config(tags=[\"skip_stream\"])\n ) # Stream tokens are ignored for sub-agents in the UI\n\n # Create supervisor workflow\n return create_supervisor(\n [research_agent],\n model=chosen_model,\n prompt=(\n \"You are a team supervisor managing a research expert with math capabilities.\"\n \"For current events, use research_agent. \"\n ),\n add_handoff_back_messages=True,\n # UI now expects this to be True so we don't have to guess when a handoff back occurs\n output_mode=\"full_history\", # otherwise when reloading conversations, the sub-agents' messages are not included\n )\n\n\n# Source: src/service/service.py\nasync def message_generator(\n user_input: StreamInput, agent_id: str = DEFAULT_AGENT\n) -> AsyncGenerator[str, None]:\n \"\"\"\n Generate a stream of messages from the agent.\n\n This is the workhorse method for the /stream endpoint.\n \"\"\"\n agent: AgentGraph = get_agent(agent_id)\n kwargs, run_id = await _handle_input(user_input, agent)\n\n try:\n # Process streamed events from the graph and yield messages over the SSE stream.\n async for stream_event in agent.astream(\n **kwargs, stream_mode=[\"updates\", \"messages\", \"custom\"], subgraphs=True\n ):\n if not isinstance(stream_event, tuple):\n continue\n # Handle different stream event structures based on subgraphs\n if len(stream_event) == 3:\n # With subgraphs=True: (node_path, stream_mode, event)\n _, stream_mode, event = stream_event\n else:\n # Without subgraphs: (stream_mode, event)\n stream_mode, event = stream_event\n new_messages = []\n if stream_mode == \"updates\":\n for node, updates in event.items():\n # A simple approach to handle agent interrupts.\n # In a more sophisticated implementation, we could add\n # some structured ChatMessage type to return the interrupt value.\n if node == \"__interrupt__\":\n interrupt: Interrupt\n for interrupt in updates:\n new_messages.append(AIMessage(content=interrupt.value))\n continue\n updates = updates or {}\n update_messages = updates.get(\"messages\", [])\n # special cases for using langgraph-supervisor library\n if \"supervisor\" in node or \"sub-agent\" in node:\n # the only tools that come from the actual agent are the handoff and handback tools\n if isinstance(update_messages[-1], ToolMessage):\n if \"sub-agent\" in node and len(update_messages) > 1:\n # If this is a sub-agent, we want to keep the last 2 messages - the handback tool, and it's result\n update_messages = update_messages[-2:]\n else:\n # If this is a supervisor, we want to keep the last message only - the handoff result. The tool comes from the 'agent' node.\n update_messages = [update_messages[-1]]\n else:\n update_messages = []\n new_messages.extend(update_messages)\n\n if stream_mode == \"custom\":\n new_messages = [event]\n\n # LangGraph streaming may emit tuples: (field_name, field_value)\n # e.g. ('content', <str>), ('tool_calls', [ToolCall,...]), ('additional_kwargs', {...}), etc.\n # We accumulate only supported fields into `parts` and skip unsupported metadata.\n # More info at: https://langchain-ai.github.io/langgraph/cloud/how-tos/stream_messages/\n processed_messages = []\n current_message: dict[str, Any] = {}\n for message in new_messages:\n if isinstance(message, tuple):\n key, value = message\n # Store parts in temporary dict\n current_message[key] = value\n else:\n # Add complete message if we have one in progress\n if current_message:\n processed_messages.append(_create_ai_message(current_message))\n current_message = {}\n processed_messages.append(message)\n\n # Add any remaining message parts\n if current_message:\n processed_messages.append(_create_ai_message(current_message))\n\n for message in processed_messages:\n try:\n chat_message = langchain_to_chat_message(message)\n chat_message.run_id = str(run_id)\n except Exception as e:\n logger.error(f\"Error parsing message: {e}\")\n yield f\"data: {json.dumps({'type': 'error', 'content': 'Unexpected error'})}\\n\\n\"\n continue\n # LangGraph re-sends the input message, which feels weird, so drop it\n if chat_message.type == \"human\" and chat_message.content == user_input.message:\n continue\n yield f\"data: {json.dumps({'type': 'message', 'content': chat_message.model_dump()})}\\n\\n\"\n\n if stream_mode == \"messages\":\n if not user_input.stream_tokens:\n continue\n msg, metadata = event\n if \"skip_stream\" in metadata.get(\"tags\", []):\n continue\n # For some reason, astream(\"messages\") causes non-LLM nodes to send extra messages.\n # Drop them.\n if not isinstance(msg, AIMessageChunk):\n continue\n content = remove_tool_calls(msg.content)\n if content:\n # Empty content in the context of OpenAI usually means\n # that the model is asking for a tool to be invoked.\n # So we only print non-empty content.\n yield f\"data: {json.dumps({'type': 'token', 'content': convert_message_content_to_string(content)})}\\n\\n\"\n except Exception as e:\n logger.error(f\"Error in message generator: {e}\")\n yield f\"data: {json.dumps({'type': 'error', 'content': 'Internal server error'})}\\n\\n\"\n finally:\n yield \"data: [DONE]\\n\\n\"", "n_imports_parsed": 9, "n_files_resolved": 3, "n_chars_extracted": 8204}, "tests/service/test_utils.py::35": {"resolved_imports": ["src/service/utils.py"], "used_names": ["AIMessage", "langchain_to_chat_message"], "enclosing_function": "test_message_run_id_usage", "extracted_code": "# Source: src/service/utils.py\ndef langchain_to_chat_message(message: BaseMessage) -> ChatMessage:\n \"\"\"Create a ChatMessage from a LangChain message.\"\"\"\n match message:\n case HumanMessage():\n human_message = ChatMessage(\n type=\"human\",\n content=convert_message_content_to_string(message.content),\n )\n return human_message\n case AIMessage():\n ai_message = ChatMessage(\n type=\"ai\",\n content=convert_message_content_to_string(message.content),\n )\n if message.tool_calls:\n ai_message.tool_calls = message.tool_calls\n if message.response_metadata:\n ai_message.response_metadata = message.response_metadata\n return ai_message\n case ToolMessage():\n tool_message = ChatMessage(\n type=\"tool\",\n content=convert_message_content_to_string(message.content),\n tool_call_id=message.tool_call_id,\n )\n return tool_message\n case LangchainChatMessage():\n if message.role == \"custom\":\n custom_message = ChatMessage(\n type=\"custom\",\n content=\"\",\n custom_data=message.content[0],\n )\n return custom_message\n else:\n raise ValueError(f\"Unsupported chat message role: {message.role}\")\n case _:\n raise ValueError(f\"Unsupported message type: {message.__class__.__name__}\")", "n_imports_parsed": 2, "n_files_resolved": 1, "n_chars_extracted": 1591}, "tests/client/test_client.py::36": {"resolved_imports": ["src/client/__init__.py", "src/schema/__init__.py", "src/schema/models.py"], "used_names": ["AgentClient", "os", "patch"], "enclosing_function": "test_headers", "extracted_code": "# Source: src/client/__init__.py\nfrom client.client import AgentClient, AgentClientError\n\n__all__ = [\"AgentClient\", \"AgentClientError\"]\n\nfrom client.client import AgentClient, AgentClientError\n\n__all__ = [\"AgentClient\", \"AgentClientError\"]", "n_imports_parsed": 8, "n_files_resolved": 3, "n_chars_extracted": 239}, "tests/service/test_utils.py::44": {"resolved_imports": ["src/service/utils.py"], "used_names": ["AIMessage", "ToolCall", "langchain_to_chat_message"], "enclosing_function": "test_messages_tool_calls", "extracted_code": "# Source: src/service/utils.py\ndef langchain_to_chat_message(message: BaseMessage) -> ChatMessage:\n \"\"\"Create a ChatMessage from a LangChain message.\"\"\"\n match message:\n case HumanMessage():\n human_message = ChatMessage(\n type=\"human\",\n content=convert_message_content_to_string(message.content),\n )\n return human_message\n case AIMessage():\n ai_message = ChatMessage(\n type=\"ai\",\n content=convert_message_content_to_string(message.content),\n )\n if message.tool_calls:\n ai_message.tool_calls = message.tool_calls\n if message.response_metadata:\n ai_message.response_metadata = message.response_metadata\n return ai_message\n case ToolMessage():\n tool_message = ChatMessage(\n type=\"tool\",\n content=convert_message_content_to_string(message.content),\n tool_call_id=message.tool_call_id,\n )\n return tool_message\n case LangchainChatMessage():\n if message.role == \"custom\":\n custom_message = ChatMessage(\n type=\"custom\",\n content=\"\",\n custom_data=message.content[0],\n )\n return custom_message\n else:\n raise ValueError(f\"Unsupported chat message role: {message.role}\")\n case _:\n raise ValueError(f\"Unsupported message type: {message.__class__.__name__}\")", "n_imports_parsed": 2, "n_files_resolved": 1, "n_chars_extracted": 1591}, "tests/client/test_client.py::283": {"resolved_imports": ["src/client/__init__.py", "src/schema/__init__.py", "src/schema/models.py"], "used_names": ["AgentClientError", "ChatHistory", "Request", "Response", "patch", "pytest"], "enclosing_function": "test_get_history", "extracted_code": "# Source: src/client/__init__.py\nfrom client.client import AgentClient, AgentClientError\n\n__all__ = [\"AgentClient\", \"AgentClientError\"]\n\nfrom client.client import AgentClient, AgentClientError\n\n__all__ = [\"AgentClient\", \"AgentClientError\"]\n\n\n# Source: src/schema/__init__.py\nfrom schema.schema import (\n AgentInfo,\n ChatHistory,\n ChatHistoryInput,\n ChatMessage,\n Feedback,\n FeedbackResponse,\n ServiceMetadata,\n StreamInput,\n UserInput,\n)\n\n\n AgentInfo,\n ChatHistory,\n ChatHistoryInput,\n ChatMessage,\n Feedback,\n FeedbackResponse,\n ServiceMetadata,\n StreamInput,\n UserInput,\n)\n\n__all__ = [\n\n \"Feedback\",\n \"FeedbackResponse\",\n \"ChatHistoryInput\",\n \"ChatHistory\",\n]\n\n \"FeedbackResponse\",\n \"ChatHistoryInput\",\n \"ChatHistory\",\n]", "n_imports_parsed": 8, "n_files_resolved": 3, "n_chars_extracted": 797}, "tests/core/test_settings.py::208": {"resolved_imports": ["src/core/settings.py", "src/schema/models.py"], "used_names": ["LogLevel", "logging"], "enclosing_function": "test_log_level_enum", "extracted_code": "# Source: src/core/settings.py\nclass LogLevel(StrEnum):\n DEBUG = \"DEBUG\"\n INFO = \"INFO\"\n WARNING = \"WARNING\"\n ERROR = \"ERROR\"\n CRITICAL = \"CRITICAL\"\n\n def to_logging_level(self) -> int:\n \"\"\"Convert to Python logging level constant.\"\"\"\n import logging\n\n mapping = {\n LogLevel.DEBUG: logging.DEBUG,\n LogLevel.INFO: logging.INFO,\n LogLevel.WARNING: logging.WARNING,\n LogLevel.ERROR: logging.ERROR,\n LogLevel.CRITICAL: logging.CRITICAL,\n }\n return mapping[self]", "n_imports_parsed": 8, "n_files_resolved": 2, "n_chars_extracted": 565}, "tests/agents/test_github_mcp_agent.py::132": {"resolved_imports": ["src/agents/github_mcp_agent/github_mcp_agent.py", "src/core/settings.py"], "used_names": ["GitHubMCPAgent", "Mock"], "enclosing_function": "test_get_graph_loaded", "extracted_code": "# Source: src/agents/github_mcp_agent/github_mcp_agent.py\nclass GitHubMCPAgent(LazyLoadingAgent):\n \"\"\"GitHub MCP Agent with async initialization.\"\"\"\n\n def __init__(self) -> None:\n super().__init__()\n self._mcp_tools: list[BaseTool] = []\n self._mcp_client: MultiServerMCPClient | None = None\n\n async def load(self) -> None:\n \"\"\"Initialize the GitHub MCP agent by loading MCP tools.\"\"\"\n if not settings.GITHUB_PAT:\n logger.info(\"GITHUB_PAT is not set, GitHub MCP agent will have no tools\")\n self._mcp_tools = []\n self._graph = self._create_graph()\n self._loaded = True\n return\n\n try:\n # Initialize MCP client directly\n github_pat = settings.GITHUB_PAT.get_secret_value()\n connections = {\n \"github\": StreamableHttpConnection(\n transport=\"streamable_http\",\n url=settings.MCP_GITHUB_SERVER_URL,\n headers={\n \"Authorization\": f\"Bearer {github_pat}\",\n },\n )\n }\n\n self._mcp_client = MultiServerMCPClient(connections)\n logger.info(\"MCP client initialized successfully\")\n\n # Get tools from the client\n self._mcp_tools = await self._mcp_client.get_tools()\n logger.info(f\"GitHub MCP agent initialized with {len(self._mcp_tools)} tools\")\n\n except Exception as e:\n logger.error(f\"Failed to initialize GitHub MCP agent: {e}\")\n self._mcp_tools = []\n self._mcp_client = None\n\n # Create and store the graph\n self._graph = self._create_graph()\n self._loaded = True\n\n def _create_graph(self) -> CompiledStateGraph:\n \"\"\"Create the GitHub MCP agent graph.\"\"\"\n model = get_model(settings.DEFAULT_MODEL)\n\n return create_agent(\n model=model,\n tools=self._mcp_tools,\n name=\"github-mcp-agent\",\n system_prompt=prompt,\n )", "n_imports_parsed": 4, "n_files_resolved": 2, "n_chars_extracted": 2061}, "tests/voice/test_tts.py::16": {"resolved_imports": ["src/voice/tts.py"], "used_names": ["TextToSpeech", "patch"], "enclosing_function": "test_init_with_openai_provider", "extracted_code": "# Source: src/voice/tts.py\nclass TextToSpeech:\n \"\"\"Text-to-speech factory.\n\n Loads and delegates to specific TTS provider implementations.\n\n Example:\n >>> tts = TextToSpeech(provider=\"openai\", voice=\"nova\")\n >>> audio = tts.generate(\"Hello world\")\n >>>\n >>> # Or from environment\n >>> tts = TextToSpeech.from_env()\n >>> if tts:\n ... audio = tts.generate(\"Hello world\")\n \"\"\"\n\n def __init__(self, provider: Provider = \"openai\", api_key: str | None = None, **config):\n \"\"\"Initialize TTS with specified provider.\n\n Args:\n provider: Provider name (\"openai\", \"elevenlabs\", etc.)\n api_key: API key (uses env var if not provided)\n **config: Provider-specific configuration\n OpenAI: voice=\"alloy\", model=\"tts-1\"\n ElevenLabs: voice_id=\"...\", model_id=\"...\"\n\n Raises:\n ValueError: If provider is unknown\n \"\"\"\n self._provider_name = provider\n\n # Resolve API key from parameter or environment\n resolved_api_key = self._get_api_key(provider, api_key)\n\n # Load and configure the provider\n self._provider = self._load_provider(provider, resolved_api_key, config)\n\n logger.info(f\"TextToSpeech created with provider={provider}\")\n\n def _get_api_key(self, provider: Provider, api_key: str | None) -> str | None:\n \"\"\"Get API key from parameter or environment.\n\n Args:\n provider: Provider name\n api_key: API key from parameter (takes precedence)\n\n Returns:\n Resolved API key or None\n \"\"\"\n # If API key provided explicitly, use it\n if api_key:\n return api_key\n\n # Otherwise, get from environment based on provider\n match provider:\n case \"openai\":\n return os.getenv(\"OPENAI_API_KEY\")\n case \"elevenlabs\":\n return os.getenv(\"ELEVENLABS_API_KEY\")\n case _:\n return None\n\n def _load_provider(self, provider: Provider, api_key: str | None, config: dict):\n \"\"\"Load the appropriate TTS provider implementation.\n\n Args:\n provider: Provider name\n api_key: Resolved API key\n config: Provider-specific configuration\n\n Returns:\n Provider instance\n\n Raises:\n ValueError: If provider is unknown\n NotImplementedError: If provider not yet implemented\n \"\"\"\n match provider:\n case \"openai\":\n from voice.providers.openai_tts import OpenAITTS\n\n # Extract OpenAI-specific config with defaults\n voice = config.get(\"voice\", \"alloy\")\n model = config.get(\"model\", \"tts-1\")\n\n return OpenAITTS(api_key=api_key, voice=voice, model=model)\n\n case \"elevenlabs\":\n # Example for future extensions: to add ElevenLabs support, implement ElevenLabsTTS provider and uncomment:\n # from voice.providers.elevenlabs_tts import ElevenLabsTTS\n # voice_id = config.get(\"voice_id\")\n # model_id = config.get(\"model_id\", \"eleven_monolingual_v1\")\n # return ElevenLabsTTS(api_key=api_key, voice_id=voice_id, model_id=model_id)\n raise NotImplementedError(\"ElevenLabs TTS provider not yet implemented\")\n\n case _:\n # Catch-all for unknown providers\n raise ValueError(f\"Unknown TTS provider: {provider}. Available providers: openai\")\n\n @property\n def provider(self) -> str:\n \"\"\"Get the provider name.\n\n Returns:\n Provider name string\n \"\"\"\n return self._provider_name\n\n @classmethod\n def from_env(cls) -> \"TextToSpeech | None\":\n \"\"\"Create TTS from environment variables.\n\n Reads VOICE_TTS_PROVIDER env var to determine which provider to use.\n Returns None if not configured.\n\n Returns:\n TextToSpeech instance or None\n\n Example:\n >>> # In .env: VOICE_TTS_PROVIDER=openai\n >>> tts = TextToSpeech.from_env()\n >>> if tts:\n ... audio = tts.generate(\"Hello world\")\n \"\"\"\n provider = os.getenv(\"VOICE_TTS_PROVIDER\")\n\n # If provider not set, voice features are disabled\n if not provider:\n logger.debug(\"VOICE_TTS_PROVIDER not set, TTS disabled\")\n return None\n\n try:\n # Create instance with provider from environment\n # Validates provider and raises ValueError if invalid\n return cls(provider=cast(Provider, provider))\n except Exception as e:\n # Log error but don't crash - allow app to continue without voice\n logger.error(f\"Failed to create TTS provider: {e}\", exc_info=True)\n return None\n\n def generate(self, text: str) -> bytes | None:\n \"\"\"Generate speech from text.\n\n Delegates to the underlying provider implementation.\n\n Args:\n text: Text to convert to speech\n\n Returns:\n Audio bytes (format depends on provider), or None on failure\n \"\"\"\n return self._provider.generate(text)\n\n def get_format(self) -> str:\n \"\"\"Get audio format (MIME type) for this provider.\n\n Returns:\n MIME type string (e.g., \"audio/mp3\")\n \"\"\"\n return self._provider.get_format()", "n_imports_parsed": 4, "n_files_resolved": 1, "n_chars_extracted": 5482}, "tests/app/test_streamlit_app.py::33": {"resolved_imports": ["src/client/__init__.py", "src/schema/__init__.py", "src/schema/models.py"], "used_names": ["AppTest", "AsyncMock", "ChatMessage"], "enclosing_function": "test_app_simple_non_streaming", "extracted_code": "# Source: src/schema/__init__.py\n ChatHistory,\n ChatHistoryInput,\n ChatMessage,\n Feedback,\n FeedbackResponse,\n ServiceMetadata,\n StreamInput,\n UserInput,\n)\n\n__all__ = [\n \"AgentInfo\",\n\n \"AllModelEnum\",\n \"UserInput\",\n \"ChatMessage\",\n \"ServiceMetadata\",\n \"StreamInput\",\n \"Feedback\",\n \"FeedbackResponse\",\n \"ChatHistoryInput\",\n \"ChatHistory\",\n]", "n_imports_parsed": 7, "n_files_resolved": 3, "n_chars_extracted": 393}, "tests/agents/test_lazy_agent.py::46": {"resolved_imports": ["src/agents/lazy_agent.py"], "used_names": ["pytest"], "enclosing_function": "test_get_graph_before_load", "extracted_code": "", "n_imports_parsed": 3, "n_files_resolved": 1, "n_chars_extracted": 0}, "tests/voice/providers/test_openai_tts.py::38": {"resolved_imports": ["src/voice/providers/openai_tts.py"], "used_names": ["OpenAITTS", "patch"], "enclosing_function": "test_validate_text_too_short", "extracted_code": "# Source: src/voice/providers/openai_tts.py\nclass OpenAITTS:\n \"\"\"OpenAI TTS provider.\"\"\"\n\n # API constraints\n MAX_TEXT_LENGTH = 4096\n MIN_TEXT_LENGTH = 3\n\n # Available configuration options\n VALID_VOICES = [\"alloy\", \"echo\", \"fable\", \"onyx\", \"nova\", \"shimmer\"]\n VALID_MODELS = [\"tts-1\", \"tts-1-hd\"]\n\n def __init__(self, api_key: str | None = None, voice: str = \"alloy\", model: str = \"tts-1\"):\n \"\"\"Initialize OpenAI TTS.\n\n Args:\n api_key: OpenAI API key (uses env var if not provided)\n voice: Voice name (alloy, echo, fable, onyx, nova, shimmer)\n model: Model name (tts-1 or tts-1-hd)\n\n Raises:\n ValueError: If voice or model is invalid\n Exception: If OpenAI client initialization fails\n \"\"\"\n # Validate voice parameter\n if voice not in self.VALID_VOICES:\n raise ValueError(f\"Invalid voice '{voice}'. Must be one of {self.VALID_VOICES}\")\n\n # Validate model parameter\n if model not in self.VALID_MODELS:\n raise ValueError(f\"Invalid model '{model}'. Must be one of {self.VALID_MODELS}\")\n\n # Create OpenAI client with provided key or from environment\n self.client = OpenAI(api_key=api_key) if api_key else OpenAI()\n self.voice = voice\n self.model = model\n\n logger.info(f\"OpenAI TTS initialized: voice={voice}, model={model}\")\n\n def _validate_and_prepare_text(self, text: str) -> str | None:\n \"\"\"Validate and prepare text for TTS generation.\n\n Args:\n text: Raw text input\n\n Returns:\n Prepared text ready for TTS, or None if text is too short\n\n Note:\n - Strips whitespace\n - Returns None if text is below minimum length\n - Truncates text if above maximum length\n \"\"\"\n # Remove leading/trailing whitespace\n text = text.strip()\n\n # Skip very short text (not worth API call)\n if len(text) < self.MIN_TEXT_LENGTH:\n logger.debug(f\"OpenAI TTS: skipping short text ({len(text)} chars)\")\n return None\n\n # Truncate to API limit if needed\n if len(text) > self.MAX_TEXT_LENGTH:\n logger.warning(\n f\"OpenAI TTS: truncating from {len(text)} to {self.MAX_TEXT_LENGTH} chars\"\n )\n text = text[: self.MAX_TEXT_LENGTH]\n\n return text\n\n def generate(self, text: str) -> bytes | None:\n \"\"\"Generate speech from text.\n\n Args:\n text: Text to convert to speech\n\n Returns:\n MP3 audio bytes, or None if text is too short or generation fails\n\n Note:\n - Text shorter than 3 chars returns None\n - Text longer than 4096 chars is truncated\n - Errors are logged but not raised - returns None instead\n \"\"\"\n # Validate and prepare text\n prepared_text = self._validate_and_prepare_text(text)\n if not prepared_text:\n return None\n\n try:\n # Call OpenAI TTS API\n response = self.client.audio.speech.create(\n model=self.model,\n voice=self.voice,\n input=prepared_text,\n response_format=\"mp3\",\n )\n\n # Extract audio bytes from response\n audio_bytes = response.content\n logger.info(f\"OpenAI TTS: generated {len(audio_bytes)} bytes\")\n return audio_bytes\n\n except Exception as e:\n # Log error with full traceback for debugging\n logger.error(f\"OpenAI TTS failed: {e}\", exc_info=True)\n # Return None to allow graceful degradation\n return None\n\n def get_format(self) -> str:\n \"\"\"Get audio format (MIME type).\n\n Returns:\n MIME type string for generated audio\n \"\"\"\n return \"audio/mp3\"", "n_imports_parsed": 3, "n_files_resolved": 1, "n_chars_extracted": 3890}, "tests/service/test_service_lifespan.py::76": {"resolved_imports": ["src/schema/__init__.py", "src/service/__init__.py", "src/service/service.py"], "used_names": ["AgentInfo", "FastAPI", "asynccontextmanager", "logging", "pytest", "service"], "enclosing_function": "test_lifespan", "extracted_code": "# Source: src/schema/__init__.py\nfrom schema.models import AllModelEnum\nfrom schema.schema import (\n AgentInfo,\n ChatHistory,\n ChatHistoryInput,\n ChatMessage,\n Feedback,\n FeedbackResponse,\n ServiceMetadata,\n StreamInput,\n UserInput,\n)\n\n\n__all__ = [\n \"AgentInfo\",\n \"AllModelEnum\",\n \"UserInput\",\n \"ChatMessage\",\n \"ServiceMetadata\",\n \"StreamInput\",\n \"Feedback\",\n \"FeedbackResponse\",\n \"ChatHistoryInput\",\n \"ChatHistory\",\n\n\n# Source: src/service/__init__.py\nfrom service.service import app\n\n__all__ = [\"app\"]", "n_imports_parsed": 6, "n_files_resolved": 3, "n_chars_extracted": 560}, "tests/agents/test_github_mcp_agent.py::18": {"resolved_imports": ["src/agents/github_mcp_agent/github_mcp_agent.py", "src/core/settings.py"], "used_names": ["GitHubMCPAgent"], "enclosing_function": "test_initialization", "extracted_code": "# Source: src/agents/github_mcp_agent/github_mcp_agent.py\nclass GitHubMCPAgent(LazyLoadingAgent):\n \"\"\"GitHub MCP Agent with async initialization.\"\"\"\n\n def __init__(self) -> None:\n super().__init__()\n self._mcp_tools: list[BaseTool] = []\n self._mcp_client: MultiServerMCPClient | None = None\n\n async def load(self) -> None:\n \"\"\"Initialize the GitHub MCP agent by loading MCP tools.\"\"\"\n if not settings.GITHUB_PAT:\n logger.info(\"GITHUB_PAT is not set, GitHub MCP agent will have no tools\")\n self._mcp_tools = []\n self._graph = self._create_graph()\n self._loaded = True\n return\n\n try:\n # Initialize MCP client directly\n github_pat = settings.GITHUB_PAT.get_secret_value()\n connections = {\n \"github\": StreamableHttpConnection(\n transport=\"streamable_http\",\n url=settings.MCP_GITHUB_SERVER_URL,\n headers={\n \"Authorization\": f\"Bearer {github_pat}\",\n },\n )\n }\n\n self._mcp_client = MultiServerMCPClient(connections)\n logger.info(\"MCP client initialized successfully\")\n\n # Get tools from the client\n self._mcp_tools = await self._mcp_client.get_tools()\n logger.info(f\"GitHub MCP agent initialized with {len(self._mcp_tools)} tools\")\n\n except Exception as e:\n logger.error(f\"Failed to initialize GitHub MCP agent: {e}\")\n self._mcp_tools = []\n self._mcp_client = None\n\n # Create and store the graph\n self._graph = self._create_graph()\n self._loaded = True\n\n def _create_graph(self) -> CompiledStateGraph:\n \"\"\"Create the GitHub MCP agent graph.\"\"\"\n model = get_model(settings.DEFAULT_MODEL)\n\n return create_agent(\n model=model,\n tools=self._mcp_tools,\n name=\"github-mcp-agent\",\n system_prompt=prompt,\n )", "n_imports_parsed": 4, "n_files_resolved": 2, "n_chars_extracted": 2061}, "tests/core/test_settings.py::210": {"resolved_imports": ["src/core/settings.py", "src/schema/models.py"], "used_names": ["LogLevel", "logging"], "enclosing_function": "test_log_level_enum", "extracted_code": "# Source: src/core/settings.py\nclass LogLevel(StrEnum):\n DEBUG = \"DEBUG\"\n INFO = \"INFO\"\n WARNING = \"WARNING\"\n ERROR = \"ERROR\"\n CRITICAL = \"CRITICAL\"\n\n def to_logging_level(self) -> int:\n \"\"\"Convert to Python logging level constant.\"\"\"\n import logging\n\n mapping = {\n LogLevel.DEBUG: logging.DEBUG,\n LogLevel.INFO: logging.INFO,\n LogLevel.WARNING: logging.WARNING,\n LogLevel.ERROR: logging.ERROR,\n LogLevel.CRITICAL: logging.CRITICAL,\n }\n return mapping[self]", "n_imports_parsed": 8, "n_files_resolved": 2, "n_chars_extracted": 565}, "tests/core/test_settings.py::24": {"resolved_imports": ["src/core/settings.py", "src/schema/models.py"], "used_names": ["ValidationError", "check_str_is_http", "pytest"], "enclosing_function": "test_check_str_is_http", "extracted_code": "# Source: src/core/settings.py\ndef check_str_is_http(x: str) -> str:\n http_url_adapter = TypeAdapter(HttpUrl)\n return str(http_url_adapter.validate_python(x))", "n_imports_parsed": 8, "n_files_resolved": 2, "n_chars_extracted": 164}, "tests/core/test_llm.py::58": {"resolved_imports": ["src/core/llm.py", "src/schema/models.py"], "used_names": ["ChatOllama", "OllamaModelName", "get_model", "patch"], "enclosing_function": "test_get_model_ollama", "extracted_code": "# Source: src/core/llm.py\ndef get_model(model_name: AllModelEnum, /) -> ModelT:\n # NOTE: models with streaming=True will send tokens as they are generated\n # if the /stream endpoint is called with stream_tokens=True (the default)\n api_model_name = _MODEL_TABLE.get(model_name)\n if not api_model_name:\n raise ValueError(f\"Unsupported model: {model_name}\")\n\n if model_name in OpenAIModelName:\n return ChatOpenAI(model=api_model_name, streaming=True)\n if model_name in OpenAICompatibleName:\n if not settings.COMPATIBLE_BASE_URL or not settings.COMPATIBLE_MODEL:\n raise ValueError(\"OpenAICompatible base url and endpoint must be configured\")\n\n return ChatOpenAI(\n model=settings.COMPATIBLE_MODEL,\n temperature=0.5,\n streaming=True,\n openai_api_base=settings.COMPATIBLE_BASE_URL,\n openai_api_key=settings.COMPATIBLE_API_KEY,\n )\n if model_name in AzureOpenAIModelName:\n if not settings.AZURE_OPENAI_API_KEY or not settings.AZURE_OPENAI_ENDPOINT:\n raise ValueError(\"Azure OpenAI API key and endpoint must be configured\")\n\n return AzureChatOpenAI(\n azure_endpoint=settings.AZURE_OPENAI_ENDPOINT,\n deployment_name=api_model_name,\n api_version=settings.AZURE_OPENAI_API_VERSION,\n temperature=0.5,\n streaming=True,\n timeout=60,\n max_retries=3,\n )\n if model_name in DeepseekModelName:\n return ChatOpenAI(\n model=api_model_name,\n temperature=0.5,\n streaming=True,\n openai_api_base=\"https://api.deepseek.com\",\n openai_api_key=settings.DEEPSEEK_API_KEY,\n )\n if model_name in AnthropicModelName:\n return ChatAnthropic(model=api_model_name, temperature=0.5, streaming=True)\n if model_name in GoogleModelName:\n return ChatGoogleGenerativeAI(model=api_model_name, temperature=0.5, streaming=True)\n if model_name in VertexAIModelName:\n return ChatVertexAI(model=api_model_name, temperature=0.5, streaming=True)\n if model_name in GroqModelName:\n if model_name == GroqModelName.LLAMA_GUARD_4_12B:\n return ChatGroq(model=api_model_name, temperature=0.0) # type: ignore[call-arg]\n return ChatGroq(model=api_model_name, temperature=0.5) # type: ignore[call-arg]\n if model_name in AWSModelName:\n return ChatBedrock(model_id=api_model_name, temperature=0.5)\n if model_name in OllamaModelName:\n if settings.OLLAMA_BASE_URL:\n chat_ollama = ChatOllama(\n model=settings.OLLAMA_MODEL, temperature=0.5, base_url=settings.OLLAMA_BASE_URL\n )\n else:\n chat_ollama = ChatOllama(model=settings.OLLAMA_MODEL, temperature=0.5)\n return chat_ollama\n if model_name in OpenRouterModelName:\n return ChatOpenAI(\n model=api_model_name,\n temperature=0.5,\n streaming=True,\n base_url=\"https://openrouter.ai/api/v1/\",\n api_key=settings.OPENROUTER_API_KEY,\n )\n if model_name in FakeModelName:\n return FakeToolModel(responses=[\"This is a test response from the fake model.\"])\n\n raise ValueError(f\"Unsupported model: {model_name}\")\n\n\n# Source: src/schema/models.py\nclass OllamaModelName(StrEnum):\n \"\"\"https://ollama.com/search\"\"\"\n\n OLLAMA_GENERIC = \"ollama\"", "n_imports_parsed": 10, "n_files_resolved": 2, "n_chars_extracted": 3438}, "tests/agents/test_github_mcp_agent.py::65": {"resolved_imports": ["src/agents/github_mcp_agent/github_mcp_agent.py", "src/core/settings.py"], "used_names": ["AsyncMock", "GitHubMCPAgent", "Mock", "Tool", "patch", "pytest", "settings"], "enclosing_function": "test_load_with_github_pat", "extracted_code": "# Source: src/agents/github_mcp_agent/github_mcp_agent.py\nclass GitHubMCPAgent(LazyLoadingAgent):\n \"\"\"GitHub MCP Agent with async initialization.\"\"\"\n\n def __init__(self) -> None:\n super().__init__()\n self._mcp_tools: list[BaseTool] = []\n self._mcp_client: MultiServerMCPClient | None = None\n\n async def load(self) -> None:\n \"\"\"Initialize the GitHub MCP agent by loading MCP tools.\"\"\"\n if not settings.GITHUB_PAT:\n logger.info(\"GITHUB_PAT is not set, GitHub MCP agent will have no tools\")\n self._mcp_tools = []\n self._graph = self._create_graph()\n self._loaded = True\n return\n\n try:\n # Initialize MCP client directly\n github_pat = settings.GITHUB_PAT.get_secret_value()\n connections = {\n \"github\": StreamableHttpConnection(\n transport=\"streamable_http\",\n url=settings.MCP_GITHUB_SERVER_URL,\n headers={\n \"Authorization\": f\"Bearer {github_pat}\",\n },\n )\n }\n\n self._mcp_client = MultiServerMCPClient(connections)\n logger.info(\"MCP client initialized successfully\")\n\n # Get tools from the client\n self._mcp_tools = await self._mcp_client.get_tools()\n logger.info(f\"GitHub MCP agent initialized with {len(self._mcp_tools)} tools\")\n\n except Exception as e:\n logger.error(f\"Failed to initialize GitHub MCP agent: {e}\")\n self._mcp_tools = []\n self._mcp_client = None\n\n # Create and store the graph\n self._graph = self._create_graph()\n self._loaded = True\n\n def _create_graph(self) -> CompiledStateGraph:\n \"\"\"Create the GitHub MCP agent graph.\"\"\"\n model = get_model(settings.DEFAULT_MODEL)\n\n return create_agent(\n model=model,\n tools=self._mcp_tools,\n name=\"github-mcp-agent\",\n system_prompt=prompt,\n )\n\n\n# Source: src/core/settings.py\nsettings = Settings()", "n_imports_parsed": 5, "n_files_resolved": 2, "n_chars_extracted": 2116}, "tests/service/test_service.py::93": {"resolved_imports": ["src/agents/agents.py", "src/schema/__init__.py", "src/schema/models.py"], "used_names": ["AIMessage", "ChatMessage", "json"], "enclosing_function": "test_invoke_model_param", "extracted_code": "# Source: src/schema/__init__.py\n ChatHistory,\n ChatHistoryInput,\n ChatMessage,\n Feedback,\n FeedbackResponse,\n ServiceMetadata,\n StreamInput,\n UserInput,\n)\n\n__all__ = [\n \"AgentInfo\",\n\n \"AllModelEnum\",\n \"UserInput\",\n \"ChatMessage\",\n \"ServiceMetadata\",\n \"StreamInput\",\n \"Feedback\",\n \"FeedbackResponse\",\n \"ChatHistoryInput\",\n \"ChatHistory\",\n]", "n_imports_parsed": 10, "n_files_resolved": 3, "n_chars_extracted": 393}, "tests/service/test_service.py::372": {"resolved_imports": ["src/agents/agents.py", "src/schema/__init__.py", "src/schema/models.py"], "used_names": ["Agent", "OpenAIModelName", "ServiceMetadata", "json", "patch"], "enclosing_function": "test_info", "extracted_code": "# Source: src/agents/agents.py\nclass Agent:\n description: str\n graph_like: AgentGraphLike\n\n\n# Source: src/schema/__init__.py\n Feedback,\n FeedbackResponse,\n ServiceMetadata,\n StreamInput,\n UserInput,\n)\n\n__all__ = [\n \"AgentInfo\",\n \"AllModelEnum\",\n \"UserInput\",\n \"ChatMessage\",\n\n \"UserInput\",\n \"ChatMessage\",\n \"ServiceMetadata\",\n \"StreamInput\",\n \"Feedback\",\n \"FeedbackResponse\",\n \"ChatHistoryInput\",\n \"ChatHistory\",\n]\n\n\n# Source: src/schema/models.py\nclass OpenAIModelName(StrEnum):\n \"\"\"https://platform.openai.com/docs/models/gpt-4o\"\"\"\n\n GPT_5_NANO = \"gpt-5-nano\"\n GPT_5_MINI = \"gpt-5-mini\"\n GPT_5_1 = \"gpt-5.1\"", "n_imports_parsed": 10, "n_files_resolved": 3, "n_chars_extracted": 678}, "tests/service/test_service_message_generator.py::73": {"resolved_imports": ["src/schema/__init__.py", "src/agents/langgraph_supervisor_hierarchy_agent.py", "src/service/service.py"], "used_names": ["AIMessage", "ChatMessage", "MemorySaver", "StreamInput", "ToolCall", "json", "message_generator", "patch", "pytest", "workflow"], "enclosing_function": "test_three_layer_supervisor_hierarchy_agent_with_fake_model", "extracted_code": "# Source: src/schema/__init__.py\n ChatHistory,\n ChatHistoryInput,\n ChatMessage,\n Feedback,\n FeedbackResponse,\n ServiceMetadata,\n StreamInput,\n UserInput,\n)\n\n__all__ = [\n \"AgentInfo\",\n\n FeedbackResponse,\n ServiceMetadata,\n StreamInput,\n UserInput,\n)\n\n__all__ = [\n \"AgentInfo\",\n \"AllModelEnum\",\n \"UserInput\",\n \"ChatMessage\",\n \"ServiceMetadata\",\n\n \"AllModelEnum\",\n \"UserInput\",\n \"ChatMessage\",\n \"ServiceMetadata\",\n \"StreamInput\",\n \"Feedback\",\n \"FeedbackResponse\",\n \"ChatHistoryInput\",\n \"ChatHistory\",\n]\n\n \"ChatMessage\",\n \"ServiceMetadata\",\n \"StreamInput\",\n \"Feedback\",\n \"FeedbackResponse\",\n \"ChatHistoryInput\",\n \"ChatHistory\",\n]\n\n\n# Source: src/agents/langgraph_supervisor_hierarchy_agent.py\ndef workflow(chosen_model):\n math_agent = create_agent(\n model=chosen_model,\n tools=[add, multiply],\n name=\"sub-agent-math_expert\", # Identify the graph node as a sub-agent\n system_prompt=\"You are a math expert. Always use one tool at a time.\",\n ).with_config(tags=[\"skip_stream\"])\n\n research_agent = (\n create_supervisor(\n [math_agent],\n model=chosen_model,\n tools=[web_search],\n prompt=\"You are a world class researcher with access to web search. Do not do any math, you have a math expert for that. \",\n supervisor_name=\"supervisor-research_expert\", # Identify the graph node as a supervisor to the math agent\n )\n .compile(\n name=\"sub-agent-research_expert\"\n ) # Identify the graph node as a sub-agent to the main supervisor\n .with_config(tags=[\"skip_stream\"])\n ) # Stream tokens are ignored for sub-agents in the UI\n\n # Create supervisor workflow\n return create_supervisor(\n [research_agent],\n model=chosen_model,\n prompt=(\n \"You are a team supervisor managing a research expert with math capabilities.\"\n \"For current events, use research_agent. \"\n ),\n add_handoff_back_messages=True,\n # UI now expects this to be True so we don't have to guess when a handoff back occurs\n output_mode=\"full_history\", # otherwise when reloading conversations, the sub-agents' messages are not included\n )\n\n\n# Source: src/service/service.py\nasync def message_generator(\n user_input: StreamInput, agent_id: str = DEFAULT_AGENT\n) -> AsyncGenerator[str, None]:\n \"\"\"\n Generate a stream of messages from the agent.\n\n This is the workhorse method for the /stream endpoint.\n \"\"\"\n agent: AgentGraph = get_agent(agent_id)\n kwargs, run_id = await _handle_input(user_input, agent)\n\n try:\n # Process streamed events from the graph and yield messages over the SSE stream.\n async for stream_event in agent.astream(\n **kwargs, stream_mode=[\"updates\", \"messages\", \"custom\"], subgraphs=True\n ):\n if not isinstance(stream_event, tuple):\n continue\n # Handle different stream event structures based on subgraphs\n if len(stream_event) == 3:\n # With subgraphs=True: (node_path, stream_mode, event)\n _, stream_mode, event = stream_event\n else:\n # Without subgraphs: (stream_mode, event)\n stream_mode, event = stream_event\n new_messages = []\n if stream_mode == \"updates\":\n for node, updates in event.items():\n # A simple approach to handle agent interrupts.\n # In a more sophisticated implementation, we could add\n # some structured ChatMessage type to return the interrupt value.\n if node == \"__interrupt__\":\n interrupt: Interrupt\n for interrupt in updates:\n new_messages.append(AIMessage(content=interrupt.value))\n continue\n updates = updates or {}\n update_messages = updates.get(\"messages\", [])\n # special cases for using langgraph-supervisor library\n if \"supervisor\" in node or \"sub-agent\" in node:\n # the only tools that come from the actual agent are the handoff and handback tools\n if isinstance(update_messages[-1], ToolMessage):\n if \"sub-agent\" in node and len(update_messages) > 1:\n # If this is a sub-agent, we want to keep the last 2 messages - the handback tool, and it's result\n update_messages = update_messages[-2:]\n else:\n # If this is a supervisor, we want to keep the last message only - the handoff result. The tool comes from the 'agent' node.\n update_messages = [update_messages[-1]]\n else:\n update_messages = []\n new_messages.extend(update_messages)\n\n if stream_mode == \"custom\":\n new_messages = [event]\n\n # LangGraph streaming may emit tuples: (field_name, field_value)\n # e.g. ('content', <str>), ('tool_calls', [ToolCall,...]), ('additional_kwargs', {...}), etc.\n # We accumulate only supported fields into `parts` and skip unsupported metadata.\n # More info at: https://langchain-ai.github.io/langgraph/cloud/how-tos/stream_messages/\n processed_messages = []\n current_message: dict[str, Any] = {}\n for message in new_messages:\n if isinstance(message, tuple):\n key, value = message\n # Store parts in temporary dict\n current_message[key] = value\n else:\n # Add complete message if we have one in progress\n if current_message:\n processed_messages.append(_create_ai_message(current_message))\n current_message = {}\n processed_messages.append(message)\n\n # Add any remaining message parts\n if current_message:\n processed_messages.append(_create_ai_message(current_message))\n\n for message in processed_messages:\n try:\n chat_message = langchain_to_chat_message(message)\n chat_message.run_id = str(run_id)\n except Exception as e:\n logger.error(f\"Error parsing message: {e}\")\n yield f\"data: {json.dumps({'type': 'error', 'content': 'Unexpected error'})}\\n\\n\"\n continue\n # LangGraph re-sends the input message, which feels weird, so drop it\n if chat_message.type == \"human\" and chat_message.content == user_input.message:\n continue\n yield f\"data: {json.dumps({'type': 'message', 'content': chat_message.model_dump()})}\\n\\n\"\n\n if stream_mode == \"messages\":\n if not user_input.stream_tokens:\n continue\n msg, metadata = event\n if \"skip_stream\" in metadata.get(\"tags\", []):\n continue\n # For some reason, astream(\"messages\") causes non-LLM nodes to send extra messages.\n # Drop them.\n if not isinstance(msg, AIMessageChunk):\n continue\n content = remove_tool_calls(msg.content)\n if content:\n # Empty content in the context of OpenAI usually means\n # that the model is asking for a tool to be invoked.\n # So we only print non-empty content.\n yield f\"data: {json.dumps({'type': 'token', 'content': convert_message_content_to_string(content)})}\\n\\n\"\n except Exception as e:\n logger.error(f\"Error in message generator: {e}\")\n yield f\"data: {json.dumps({'type': 'error', 'content': 'Internal server error'})}\\n\\n\"\n finally:\n yield \"data: [DONE]\\n\\n\"", "n_imports_parsed": 9, "n_files_resolved": 3, "n_chars_extracted": 8204}, "tests/integration/test_docker_e2e.py::16": {"resolved_imports": ["src/client/__init__.py"], "used_names": ["AgentClient", "pytest"], "enclosing_function": "test_service_with_fake_model", "extracted_code": "# Source: src/client/__init__.py\nfrom client.client import AgentClient, AgentClientError\n\n__all__ = [\"AgentClient\", \"AgentClientError\"]\n\nfrom client.client import AgentClient, AgentClientError\n\n__all__ = [\"AgentClient\", \"AgentClientError\"]", "n_imports_parsed": 3, "n_files_resolved": 1, "n_chars_extracted": 239}, "tests/voice/providers/test_openai_stt.py::44": {"resolved_imports": ["src/voice/providers/openai_stt.py"], "used_names": ["OpenAISTT", "patch"], "enclosing_function": "test_transcribe_strips_whitespace", "extracted_code": "# Source: src/voice/providers/openai_stt.py\nclass OpenAISTT:\n \"\"\"OpenAI Whisper STT provider.\"\"\"\n\n def __init__(self, api_key: str | None = None):\n \"\"\"Initialize OpenAI STT.\n\n Args:\n api_key: OpenAI API key (uses env var if not provided)\n\n Raises:\n Exception: If OpenAI client initialization fails\n \"\"\"\n # Create OpenAI client with provided key or from environment\n self.client = OpenAI(api_key=api_key) if api_key else OpenAI()\n logger.info(\"OpenAI STT initialized\")\n\n def transcribe(self, audio_file: BinaryIO) -> str:\n \"\"\"Transcribe audio using OpenAI Whisper.\n\n Args:\n audio_file: Binary audio file\n\n Returns:\n Transcribed text (empty string on failure)\n\n Note:\n Errors are logged but not raised - returns empty string instead.\n This allows graceful degradation in user-facing applications.\n \"\"\"\n try:\n # Reset file pointer to beginning (may have been read elsewhere)\n audio_file.seek(0)\n\n # Call OpenAI Whisper API for transcription\n result = self.client.audio.transcriptions.create(\n model=\"whisper-1\", file=audio_file, response_format=\"text\"\n )\n\n # Clean up whitespace from result\n transcribed = result.strip()\n logger.info(f\"OpenAI STT: transcribed {len(transcribed)} chars\")\n return transcribed\n\n except Exception as e:\n # Log error with full traceback for debugging\n logger.error(f\"OpenAI STT failed: {e}\", exc_info=True)\n # Return empty string to allow graceful degradation\n return \"\"", "n_imports_parsed": 2, "n_files_resolved": 1, "n_chars_extracted": 1726}, "tests/agents/test_lazy_agent.py::34": {"resolved_imports": ["src/agents/lazy_agent.py"], "used_names": [], "enclosing_function": "test_initialization", "extracted_code": "", "n_imports_parsed": 3, "n_files_resolved": 1, "n_chars_extracted": 0}, "tests/agents/test_github_mcp_agent.py::122": {"resolved_imports": ["src/agents/github_mcp_agent/github_mcp_agent.py", "src/core/settings.py"], "used_names": ["GitHubMCPAgent", "pytest"], "enclosing_function": "test_get_graph_not_loaded", "extracted_code": "# Source: src/agents/github_mcp_agent/github_mcp_agent.py\nclass GitHubMCPAgent(LazyLoadingAgent):\n \"\"\"GitHub MCP Agent with async initialization.\"\"\"\n\n def __init__(self) -> None:\n super().__init__()\n self._mcp_tools: list[BaseTool] = []\n self._mcp_client: MultiServerMCPClient | None = None\n\n async def load(self) -> None:\n \"\"\"Initialize the GitHub MCP agent by loading MCP tools.\"\"\"\n if not settings.GITHUB_PAT:\n logger.info(\"GITHUB_PAT is not set, GitHub MCP agent will have no tools\")\n self._mcp_tools = []\n self._graph = self._create_graph()\n self._loaded = True\n return\n\n try:\n # Initialize MCP client directly\n github_pat = settings.GITHUB_PAT.get_secret_value()\n connections = {\n \"github\": StreamableHttpConnection(\n transport=\"streamable_http\",\n url=settings.MCP_GITHUB_SERVER_URL,\n headers={\n \"Authorization\": f\"Bearer {github_pat}\",\n },\n )\n }\n\n self._mcp_client = MultiServerMCPClient(connections)\n logger.info(\"MCP client initialized successfully\")\n\n # Get tools from the client\n self._mcp_tools = await self._mcp_client.get_tools()\n logger.info(f\"GitHub MCP agent initialized with {len(self._mcp_tools)} tools\")\n\n except Exception as e:\n logger.error(f\"Failed to initialize GitHub MCP agent: {e}\")\n self._mcp_tools = []\n self._mcp_client = None\n\n # Create and store the graph\n self._graph = self._create_graph()\n self._loaded = True\n\n def _create_graph(self) -> CompiledStateGraph:\n \"\"\"Create the GitHub MCP agent graph.\"\"\"\n model = get_model(settings.DEFAULT_MODEL)\n\n return create_agent(\n model=model,\n tools=self._mcp_tools,\n name=\"github-mcp-agent\",\n system_prompt=prompt,\n )", "n_imports_parsed": 4, "n_files_resolved": 2, "n_chars_extracted": 2061}, "tests/app/test_streamlit_app.py::31": {"resolved_imports": ["src/client/__init__.py", "src/schema/__init__.py", "src/schema/models.py"], "used_names": ["AppTest", "AsyncMock", "ChatMessage"], "enclosing_function": "test_app_simple_non_streaming", "extracted_code": "# Source: src/schema/__init__.py\n ChatHistory,\n ChatHistoryInput,\n ChatMessage,\n Feedback,\n FeedbackResponse,\n ServiceMetadata,\n StreamInput,\n UserInput,\n)\n\n__all__ = [\n \"AgentInfo\",\n\n \"AllModelEnum\",\n \"UserInput\",\n \"ChatMessage\",\n \"ServiceMetadata\",\n \"StreamInput\",\n \"Feedback\",\n \"FeedbackResponse\",\n \"ChatHistoryInput\",\n \"ChatHistory\",\n]", "n_imports_parsed": 7, "n_files_resolved": 3, "n_chars_extracted": 393}, "tests/agents/test_agent_loading.py::65": {"resolved_imports": ["src/agents/agents.py", "src/agents/lazy_agent.py"], "used_names": ["LazyLoadingAgent", "Mock", "agents", "get_agent", "patch"], "enclosing_function": "test_get_agent_lazy_agent_loaded", "extracted_code": "# Source: src/agents/agents.py\ndef get_agent(agent_id: str) -> AgentGraph:\n \"\"\"Get an agent graph, loading lazy agents if needed.\"\"\"\n agent_graph = agents[agent_id].graph_like\n\n # If it's a lazy loading agent, ensure it's loaded and return its graph\n if isinstance(agent_graph, LazyLoadingAgent):\n if not agent_graph._loaded:\n raise RuntimeError(f\"Agent {agent_id} not loaded. Call load() first.\")\n return agent_graph.get_graph()\n\n # Otherwise return the graph directly\n return agent_graph\n\n\n# Source: src/agents/lazy_agent.py\nclass LazyLoadingAgent(ABC):\n \"\"\"Base class for agents that require async loading.\"\"\"\n\n def __init__(self) -> None:\n \"\"\"Initialize the agent.\"\"\"\n self._loaded = False\n self._graph: CompiledStateGraph | Pregel | None = None\n\n @abstractmethod\n async def load(self) -> None:\n \"\"\"\n Perform async loading for this agent.\n\n This method is called during service startup and should handle:\n - Setting up external connections (MCP clients, databases, etc.)\n - Loading tools or resources\n - Any other async setup required\n - Creating the agent's graph\n \"\"\"\n raise NotImplementedError # pragma: no cover\n\n def get_graph(self) -> CompiledStateGraph | Pregel:\n \"\"\"\n Get the agent's graph.\n\n Returns the graph instance that was created during load().\n\n Returns:\n The agent's graph (CompiledStateGraph or Pregel)\n \"\"\"\n if not self._loaded:\n raise RuntimeError(\"Agent not loaded. Call load() first.\")\n if self._graph is None:\n raise RuntimeError(\"Agent graph not created during load().\")\n return self._graph", "n_imports_parsed": 4, "n_files_resolved": 2, "n_chars_extracted": 1750}, "tests/service/test_utils.py::21": {"resolved_imports": ["src/service/utils.py"], "used_names": ["AIMessage", "HumanMessage", "SystemMessage", "ToolMessage", "langchain_to_chat_message"], "enclosing_function": "test_messages_from_langchain", "extracted_code": "# Source: src/service/utils.py\ndef langchain_to_chat_message(message: BaseMessage) -> ChatMessage:\n \"\"\"Create a ChatMessage from a LangChain message.\"\"\"\n match message:\n case HumanMessage():\n human_message = ChatMessage(\n type=\"human\",\n content=convert_message_content_to_string(message.content),\n )\n return human_message\n case AIMessage():\n ai_message = ChatMessage(\n type=\"ai\",\n content=convert_message_content_to_string(message.content),\n )\n if message.tool_calls:\n ai_message.tool_calls = message.tool_calls\n if message.response_metadata:\n ai_message.response_metadata = message.response_metadata\n return ai_message\n case ToolMessage():\n tool_message = ChatMessage(\n type=\"tool\",\n content=convert_message_content_to_string(message.content),\n tool_call_id=message.tool_call_id,\n )\n return tool_message\n case LangchainChatMessage():\n if message.role == \"custom\":\n custom_message = ChatMessage(\n type=\"custom\",\n content=\"\",\n custom_data=message.content[0],\n )\n return custom_message\n else:\n raise ValueError(f\"Unsupported chat message role: {message.role}\")\n case _:\n raise ValueError(f\"Unsupported message type: {message.__class__.__name__}\")", "n_imports_parsed": 2, "n_files_resolved": 1, "n_chars_extracted": 1591}, "tests/service/test_utils.py::9": {"resolved_imports": ["src/service/utils.py"], "used_names": ["AIMessage", "HumanMessage", "SystemMessage", "ToolMessage", "langchain_to_chat_message"], "enclosing_function": "test_messages_from_langchain", "extracted_code": "# Source: src/service/utils.py\ndef langchain_to_chat_message(message: BaseMessage) -> ChatMessage:\n \"\"\"Create a ChatMessage from a LangChain message.\"\"\"\n match message:\n case HumanMessage():\n human_message = ChatMessage(\n type=\"human\",\n content=convert_message_content_to_string(message.content),\n )\n return human_message\n case AIMessage():\n ai_message = ChatMessage(\n type=\"ai\",\n content=convert_message_content_to_string(message.content),\n )\n if message.tool_calls:\n ai_message.tool_calls = message.tool_calls\n if message.response_metadata:\n ai_message.response_metadata = message.response_metadata\n return ai_message\n case ToolMessage():\n tool_message = ChatMessage(\n type=\"tool\",\n content=convert_message_content_to_string(message.content),\n tool_call_id=message.tool_call_id,\n )\n return tool_message\n case LangchainChatMessage():\n if message.role == \"custom\":\n custom_message = ChatMessage(\n type=\"custom\",\n content=\"\",\n custom_data=message.content[0],\n )\n return custom_message\n else:\n raise ValueError(f\"Unsupported chat message role: {message.role}\")\n case _:\n raise ValueError(f\"Unsupported message type: {message.__class__.__name__}\")", "n_imports_parsed": 2, "n_files_resolved": 1, "n_chars_extracted": 1591}, "tests/agents/test_agent_loading.py::42": {"resolved_imports": ["src/agents/agents.py", "src/agents/lazy_agent.py"], "used_names": ["get_agent"], "enclosing_function": "test_get_agent_static_agent", "extracted_code": "# Source: src/agents/agents.py\ndef get_agent(agent_id: str) -> AgentGraph:\n \"\"\"Get an agent graph, loading lazy agents if needed.\"\"\"\n agent_graph = agents[agent_id].graph_like\n\n # If it's a lazy loading agent, ensure it's loaded and return its graph\n if isinstance(agent_graph, LazyLoadingAgent):\n if not agent_graph._loaded:\n raise RuntimeError(f\"Agent {agent_id} not loaded. Call load() first.\")\n return agent_graph.get_graph()\n\n # Otherwise return the graph directly\n return agent_graph", "n_imports_parsed": 4, "n_files_resolved": 2, "n_chars_extracted": 532}, "tests/voice/providers/test_openai_tts.py::21": {"resolved_imports": ["src/voice/providers/openai_tts.py"], "used_names": ["OpenAITTS", "pytest"], "enclosing_function": "test_init_with_invalid_voice", "extracted_code": "# Source: src/voice/providers/openai_tts.py\nclass OpenAITTS:\n \"\"\"OpenAI TTS provider.\"\"\"\n\n # API constraints\n MAX_TEXT_LENGTH = 4096\n MIN_TEXT_LENGTH = 3\n\n # Available configuration options\n VALID_VOICES = [\"alloy\", \"echo\", \"fable\", \"onyx\", \"nova\", \"shimmer\"]\n VALID_MODELS = [\"tts-1\", \"tts-1-hd\"]\n\n def __init__(self, api_key: str | None = None, voice: str = \"alloy\", model: str = \"tts-1\"):\n \"\"\"Initialize OpenAI TTS.\n\n Args:\n api_key: OpenAI API key (uses env var if not provided)\n voice: Voice name (alloy, echo, fable, onyx, nova, shimmer)\n model: Model name (tts-1 or tts-1-hd)\n\n Raises:\n ValueError: If voice or model is invalid\n Exception: If OpenAI client initialization fails\n \"\"\"\n # Validate voice parameter\n if voice not in self.VALID_VOICES:\n raise ValueError(f\"Invalid voice '{voice}'. Must be one of {self.VALID_VOICES}\")\n\n # Validate model parameter\n if model not in self.VALID_MODELS:\n raise ValueError(f\"Invalid model '{model}'. Must be one of {self.VALID_MODELS}\")\n\n # Create OpenAI client with provided key or from environment\n self.client = OpenAI(api_key=api_key) if api_key else OpenAI()\n self.voice = voice\n self.model = model\n\n logger.info(f\"OpenAI TTS initialized: voice={voice}, model={model}\")\n\n def _validate_and_prepare_text(self, text: str) -> str | None:\n \"\"\"Validate and prepare text for TTS generation.\n\n Args:\n text: Raw text input\n\n Returns:\n Prepared text ready for TTS, or None if text is too short\n\n Note:\n - Strips whitespace\n - Returns None if text is below minimum length\n - Truncates text if above maximum length\n \"\"\"\n # Remove leading/trailing whitespace\n text = text.strip()\n\n # Skip very short text (not worth API call)\n if len(text) < self.MIN_TEXT_LENGTH:\n logger.debug(f\"OpenAI TTS: skipping short text ({len(text)} chars)\")\n return None\n\n # Truncate to API limit if needed\n if len(text) > self.MAX_TEXT_LENGTH:\n logger.warning(\n f\"OpenAI TTS: truncating from {len(text)} to {self.MAX_TEXT_LENGTH} chars\"\n )\n text = text[: self.MAX_TEXT_LENGTH]\n\n return text\n\n def generate(self, text: str) -> bytes | None:\n \"\"\"Generate speech from text.\n\n Args:\n text: Text to convert to speech\n\n Returns:\n MP3 audio bytes, or None if text is too short or generation fails\n\n Note:\n - Text shorter than 3 chars returns None\n - Text longer than 4096 chars is truncated\n - Errors are logged but not raised - returns None instead\n \"\"\"\n # Validate and prepare text\n prepared_text = self._validate_and_prepare_text(text)\n if not prepared_text:\n return None\n\n try:\n # Call OpenAI TTS API\n response = self.client.audio.speech.create(\n model=self.model,\n voice=self.voice,\n input=prepared_text,\n response_format=\"mp3\",\n )\n\n # Extract audio bytes from response\n audio_bytes = response.content\n logger.info(f\"OpenAI TTS: generated {len(audio_bytes)} bytes\")\n return audio_bytes\n\n except Exception as e:\n # Log error with full traceback for debugging\n logger.error(f\"OpenAI TTS failed: {e}\", exc_info=True)\n # Return None to allow graceful degradation\n return None\n\n def get_format(self) -> str:\n \"\"\"Get audio format (MIME type).\n\n Returns:\n MIME type string for generated audio\n \"\"\"\n return \"audio/mp3\"", "n_imports_parsed": 3, "n_files_resolved": 1, "n_chars_extracted": 3890}, "tests/core/test_settings.py::86": {"resolved_imports": ["src/core/settings.py", "src/schema/models.py"], "used_names": ["AnthropicModelName", "OpenAIModelName", "SecretStr", "Settings", "os", "patch"], "enclosing_function": "test_settings_with_multiple_api_keys", "extracted_code": "# Source: src/core/settings.py\nclass Settings(BaseSettings):\n model_config = SettingsConfigDict(\n env_file=find_dotenv(),\n env_file_encoding=\"utf-8\",\n env_ignore_empty=True,\n extra=\"ignore\",\n validate_default=False,\n )\n MODE: str | None = None\n\n HOST: str = \"0.0.0.0\"\n PORT: int = 8080\n GRACEFUL_SHUTDOWN_TIMEOUT: int = 30\n LOG_LEVEL: LogLevel = LogLevel.WARNING\n\n AUTH_SECRET: SecretStr | None = None\n\n OPENAI_API_KEY: SecretStr | None = None\n DEEPSEEK_API_KEY: SecretStr | None = None\n ANTHROPIC_API_KEY: SecretStr | None = None\n GOOGLE_API_KEY: SecretStr | None = None\n GOOGLE_APPLICATION_CREDENTIALS: SecretStr | None = None\n GROQ_API_KEY: SecretStr | None = None\n USE_AWS_BEDROCK: bool = False\n OLLAMA_MODEL: str | None = None\n OLLAMA_BASE_URL: str | None = None\n USE_FAKE_MODEL: bool = False\n OPENROUTER_API_KEY: str | None = None\n\n # If DEFAULT_MODEL is None, it will be set in model_post_init\n DEFAULT_MODEL: AllModelEnum | None = None # type: ignore[assignment]\n AVAILABLE_MODELS: set[AllModelEnum] = set() # type: ignore[assignment]\n\n # Set openai compatible api, mainly used for proof of concept\n COMPATIBLE_MODEL: str | None = None\n COMPATIBLE_API_KEY: SecretStr | None = None\n COMPATIBLE_BASE_URL: str | None = None\n\n OPENWEATHERMAP_API_KEY: SecretStr | None = None\n\n # MCP Configuration\n GITHUB_PAT: SecretStr | None = None\n MCP_GITHUB_SERVER_URL: str = \"https://api.githubcopilot.com/mcp/\"\n\n LANGCHAIN_TRACING_V2: bool = False\n LANGCHAIN_PROJECT: str = \"default\"\n LANGCHAIN_ENDPOINT: Annotated[str, BeforeValidator(check_str_is_http)] = (\n \"https://api.smith.langchain.com\"\n )\n LANGCHAIN_API_KEY: SecretStr | None = None\n\n LANGFUSE_TRACING: bool = False\n LANGFUSE_HOST: Annotated[str, BeforeValidator(check_str_is_http)] = \"https://cloud.langfuse.com\"\n LANGFUSE_PUBLIC_KEY: SecretStr | None = None\n LANGFUSE_SECRET_KEY: SecretStr | None = None\n\n # Database Configuration\n DATABASE_TYPE: DatabaseType = (\n DatabaseType.SQLITE\n ) # Options: DatabaseType.SQLITE or DatabaseType.POSTGRES\n SQLITE_DB_PATH: str = \"checkpoints.db\"\n\n # PostgreSQL Configuration\n POSTGRES_USER: str | None = None\n POSTGRES_PASSWORD: SecretStr | None = None\n POSTGRES_HOST: str | None = None\n POSTGRES_PORT: int | None = None\n POSTGRES_DB: str | None = None\n POSTGRES_APPLICATION_NAME: str = \"agent-service-toolkit\"\n POSTGRES_MIN_CONNECTIONS_PER_POOL: int = 1\n POSTGRES_MAX_CONNECTIONS_PER_POOL: int = 1\n\n # MongoDB Configuration\n MONGO_HOST: str | None = None\n MONGO_PORT: int | None = None\n MONGO_DB: str | None = None\n MONGO_USER: str | None = None\n MONGO_PASSWORD: SecretStr | None = None\n MONGO_AUTH_SOURCE: str | None = None\n\n # Azure OpenAI Settings\n AZURE_OPENAI_API_KEY: SecretStr | None = None\n AZURE_OPENAI_ENDPOINT: str | None = None\n AZURE_OPENAI_API_VERSION: str = \"2024-02-15-preview\"\n AZURE_OPENAI_DEPLOYMENT_MAP: dict[str, str] = Field(\n default_factory=dict, description=\"Map of model names to Azure deployment IDs\"\n )\n\n def model_post_init(self, __context: Any) -> None:\n api_keys = {\n Provider.OPENAI: self.OPENAI_API_KEY,\n Provider.OPENAI_COMPATIBLE: self.COMPATIBLE_BASE_URL and self.COMPATIBLE_MODEL,\n Provider.DEEPSEEK: self.DEEPSEEK_API_KEY,\n Provider.ANTHROPIC: self.ANTHROPIC_API_KEY,\n Provider.GOOGLE: self.GOOGLE_API_KEY,\n Provider.VERTEXAI: self.GOOGLE_APPLICATION_CREDENTIALS,\n Provider.GROQ: self.GROQ_API_KEY,\n Provider.AWS: self.USE_AWS_BEDROCK,\n Provider.OLLAMA: self.OLLAMA_MODEL,\n Provider.FAKE: self.USE_FAKE_MODEL,\n Provider.AZURE_OPENAI: self.AZURE_OPENAI_API_KEY,\n Provider.OPENROUTER: self.OPENROUTER_API_KEY,\n }\n active_keys = [k for k, v in api_keys.items() if v]\n if not active_keys:\n raise ValueError(\"At least one LLM API key must be provided.\")\n\n for provider in active_keys:\n match provider:\n case Provider.OPENAI:\n if self.DEFAULT_MODEL is None:\n self.DEFAULT_MODEL = OpenAIModelName.GPT_5_NANO\n self.AVAILABLE_MODELS.update(set(OpenAIModelName))\n case Provider.OPENAI_COMPATIBLE:\n if self.DEFAULT_MODEL is None:\n self.DEFAULT_MODEL = OpenAICompatibleName.OPENAI_COMPATIBLE\n self.AVAILABLE_MODELS.update(set(OpenAICompatibleName))\n case Provider.DEEPSEEK:\n if self.DEFAULT_MODEL is None:\n self.DEFAULT_MODEL = DeepseekModelName.DEEPSEEK_CHAT\n self.AVAILABLE_MODELS.update(set(DeepseekModelName))\n case Provider.ANTHROPIC:\n if self.DEFAULT_MODEL is None:\n self.DEFAULT_MODEL = AnthropicModelName.HAIKU_45\n self.AVAILABLE_MODELS.update(set(AnthropicModelName))\n case Provider.GOOGLE:\n if self.DEFAULT_MODEL is None:\n self.DEFAULT_MODEL = GoogleModelName.GEMINI_20_FLASH\n self.AVAILABLE_MODELS.update(set(GoogleModelName))\n case Provider.VERTEXAI:\n if self.DEFAULT_MODEL is None:\n self.DEFAULT_MODEL = VertexAIModelName.GEMINI_20_FLASH\n self.AVAILABLE_MODELS.update(set(VertexAIModelName))\n case Provider.GROQ:\n if self.DEFAULT_MODEL is None:\n self.DEFAULT_MODEL = GroqModelName.LLAMA_31_8B\n self.AVAILABLE_MODELS.update(set(GroqModelName))\n case Provider.AWS:\n if self.DEFAULT_MODEL is None:\n self.DEFAULT_MODEL = AWSModelName.BEDROCK_HAIKU\n self.AVAILABLE_MODELS.update(set(AWSModelName))\n case Provider.OLLAMA:\n if self.DEFAULT_MODEL is None:\n self.DEFAULT_MODEL = OllamaModelName.OLLAMA_GENERIC\n self.AVAILABLE_MODELS.update(set(OllamaModelName))\n case Provider.OPENROUTER:\n if self.DEFAULT_MODEL is None:\n self.DEFAULT_MODEL = OpenRouterModelName.GEMINI_25_FLASH\n self.AVAILABLE_MODELS.update(set(OpenRouterModelName))\n case Provider.FAKE:\n if self.DEFAULT_MODEL is None:\n self.DEFAULT_MODEL = FakeModelName.FAKE\n self.AVAILABLE_MODELS.update(set(FakeModelName))\n case Provider.AZURE_OPENAI:\n if self.DEFAULT_MODEL is None:\n self.DEFAULT_MODEL = AzureOpenAIModelName.AZURE_GPT_4O_MINI\n self.AVAILABLE_MODELS.update(set(AzureOpenAIModelName))\n # Validate Azure OpenAI settings if Azure provider is available\n if not self.AZURE_OPENAI_API_KEY:\n raise ValueError(\"AZURE_OPENAI_API_KEY must be set\")\n if not self.AZURE_OPENAI_ENDPOINT:\n raise ValueError(\"AZURE_OPENAI_ENDPOINT must be set\")\n if not self.AZURE_OPENAI_DEPLOYMENT_MAP:\n raise ValueError(\"AZURE_OPENAI_DEPLOYMENT_MAP must be set\")\n\n # Parse deployment map if it's a string\n if isinstance(self.AZURE_OPENAI_DEPLOYMENT_MAP, str):\n try:\n self.AZURE_OPENAI_DEPLOYMENT_MAP = loads(\n self.AZURE_OPENAI_DEPLOYMENT_MAP\n )\n except Exception as e:\n raise ValueError(f\"Invalid AZURE_OPENAI_DEPLOYMENT_MAP JSON: {e}\")\n\n # Validate required deployments exist\n required_models = {\"gpt-4o\", \"gpt-4o-mini\"}\n missing_models = required_models - set(self.AZURE_OPENAI_DEPLOYMENT_MAP.keys())\n if missing_models:\n raise ValueError(f\"Missing required Azure deployments: {missing_models}\")\n case _:\n raise ValueError(f\"Unknown provider: {provider}\")\n\n @computed_field # type: ignore[prop-decorator]\n @property\n def BASE_URL(self) -> str:\n return f\"http://{self.HOST}:{self.PORT}\"\n\n def is_dev(self) -> bool:\n return self.MODE == \"dev\"\n\n\n# Source: src/schema/models.py\nclass OpenAIModelName(StrEnum):\n \"\"\"https://platform.openai.com/docs/models/gpt-4o\"\"\"\n\n GPT_5_NANO = \"gpt-5-nano\"\n GPT_5_MINI = \"gpt-5-mini\"\n GPT_5_1 = \"gpt-5.1\"\n\nclass AnthropicModelName(StrEnum):\n \"\"\"https://docs.anthropic.com/en/docs/about-claude/models#model-names\"\"\"\n\n HAIKU_45 = \"claude-haiku-4-5\"\n SONNET_45 = \"claude-sonnet-4-5\"", "n_imports_parsed": 8, "n_files_resolved": 2, "n_chars_extracted": 9060}, "tests/voice/providers/test_openai_tts.py::28": {"resolved_imports": ["src/voice/providers/openai_tts.py"], "used_names": ["OpenAITTS", "pytest"], "enclosing_function": "test_init_with_invalid_model", "extracted_code": "# Source: src/voice/providers/openai_tts.py\nclass OpenAITTS:\n \"\"\"OpenAI TTS provider.\"\"\"\n\n # API constraints\n MAX_TEXT_LENGTH = 4096\n MIN_TEXT_LENGTH = 3\n\n # Available configuration options\n VALID_VOICES = [\"alloy\", \"echo\", \"fable\", \"onyx\", \"nova\", \"shimmer\"]\n VALID_MODELS = [\"tts-1\", \"tts-1-hd\"]\n\n def __init__(self, api_key: str | None = None, voice: str = \"alloy\", model: str = \"tts-1\"):\n \"\"\"Initialize OpenAI TTS.\n\n Args:\n api_key: OpenAI API key (uses env var if not provided)\n voice: Voice name (alloy, echo, fable, onyx, nova, shimmer)\n model: Model name (tts-1 or tts-1-hd)\n\n Raises:\n ValueError: If voice or model is invalid\n Exception: If OpenAI client initialization fails\n \"\"\"\n # Validate voice parameter\n if voice not in self.VALID_VOICES:\n raise ValueError(f\"Invalid voice '{voice}'. Must be one of {self.VALID_VOICES}\")\n\n # Validate model parameter\n if model not in self.VALID_MODELS:\n raise ValueError(f\"Invalid model '{model}'. Must be one of {self.VALID_MODELS}\")\n\n # Create OpenAI client with provided key or from environment\n self.client = OpenAI(api_key=api_key) if api_key else OpenAI()\n self.voice = voice\n self.model = model\n\n logger.info(f\"OpenAI TTS initialized: voice={voice}, model={model}\")\n\n def _validate_and_prepare_text(self, text: str) -> str | None:\n \"\"\"Validate and prepare text for TTS generation.\n\n Args:\n text: Raw text input\n\n Returns:\n Prepared text ready for TTS, or None if text is too short\n\n Note:\n - Strips whitespace\n - Returns None if text is below minimum length\n - Truncates text if above maximum length\n \"\"\"\n # Remove leading/trailing whitespace\n text = text.strip()\n\n # Skip very short text (not worth API call)\n if len(text) < self.MIN_TEXT_LENGTH:\n logger.debug(f\"OpenAI TTS: skipping short text ({len(text)} chars)\")\n return None\n\n # Truncate to API limit if needed\n if len(text) > self.MAX_TEXT_LENGTH:\n logger.warning(\n f\"OpenAI TTS: truncating from {len(text)} to {self.MAX_TEXT_LENGTH} chars\"\n )\n text = text[: self.MAX_TEXT_LENGTH]\n\n return text\n\n def generate(self, text: str) -> bytes | None:\n \"\"\"Generate speech from text.\n\n Args:\n text: Text to convert to speech\n\n Returns:\n MP3 audio bytes, or None if text is too short or generation fails\n\n Note:\n - Text shorter than 3 chars returns None\n - Text longer than 4096 chars is truncated\n - Errors are logged but not raised - returns None instead\n \"\"\"\n # Validate and prepare text\n prepared_text = self._validate_and_prepare_text(text)\n if not prepared_text:\n return None\n\n try:\n # Call OpenAI TTS API\n response = self.client.audio.speech.create(\n model=self.model,\n voice=self.voice,\n input=prepared_text,\n response_format=\"mp3\",\n )\n\n # Extract audio bytes from response\n audio_bytes = response.content\n logger.info(f\"OpenAI TTS: generated {len(audio_bytes)} bytes\")\n return audio_bytes\n\n except Exception as e:\n # Log error with full traceback for debugging\n logger.error(f\"OpenAI TTS failed: {e}\", exc_info=True)\n # Return None to allow graceful degradation\n return None\n\n def get_format(self) -> str:\n \"\"\"Get audio format (MIME type).\n\n Returns:\n MIME type string for generated audio\n \"\"\"\n return \"audio/mp3\"", "n_imports_parsed": 3, "n_files_resolved": 1, "n_chars_extracted": 3890}, "tests/client/test_client.py::27": {"resolved_imports": ["src/client/__init__.py", "src/schema/__init__.py", "src/schema/models.py"], "used_names": ["AgentClient"], "enclosing_function": "test_init", "extracted_code": "# Source: src/client/__init__.py\nfrom client.client import AgentClient, AgentClientError\n\n__all__ = [\"AgentClient\", \"AgentClientError\"]\n\nfrom client.client import AgentClient, AgentClientError\n\n__all__ = [\"AgentClient\", \"AgentClientError\"]", "n_imports_parsed": 8, "n_files_resolved": 3, "n_chars_extracted": 239}, "tests/service/test_utils.py::42": {"resolved_imports": ["src/service/utils.py"], "used_names": ["AIMessage", "ToolCall", "langchain_to_chat_message"], "enclosing_function": "test_messages_tool_calls", "extracted_code": "# Source: src/service/utils.py\ndef langchain_to_chat_message(message: BaseMessage) -> ChatMessage:\n \"\"\"Create a ChatMessage from a LangChain message.\"\"\"\n match message:\n case HumanMessage():\n human_message = ChatMessage(\n type=\"human\",\n content=convert_message_content_to_string(message.content),\n )\n return human_message\n case AIMessage():\n ai_message = ChatMessage(\n type=\"ai\",\n content=convert_message_content_to_string(message.content),\n )\n if message.tool_calls:\n ai_message.tool_calls = message.tool_calls\n if message.response_metadata:\n ai_message.response_metadata = message.response_metadata\n return ai_message\n case ToolMessage():\n tool_message = ChatMessage(\n type=\"tool\",\n content=convert_message_content_to_string(message.content),\n tool_call_id=message.tool_call_id,\n )\n return tool_message\n case LangchainChatMessage():\n if message.role == \"custom\":\n custom_message = ChatMessage(\n type=\"custom\",\n content=\"\",\n custom_data=message.content[0],\n )\n return custom_message\n else:\n raise ValueError(f\"Unsupported chat message role: {message.role}\")\n case _:\n raise ValueError(f\"Unsupported message type: {message.__class__.__name__}\")", "n_imports_parsed": 2, "n_files_resolved": 1, "n_chars_extracted": 1591}, "tests/service/test_service_message_generator.py::75": {"resolved_imports": ["src/schema/__init__.py", "src/agents/langgraph_supervisor_hierarchy_agent.py", "src/service/service.py"], "used_names": ["AIMessage", "ChatMessage", "MemorySaver", "StreamInput", "ToolCall", "json", "message_generator", "patch", "pytest", "workflow"], "enclosing_function": "test_three_layer_supervisor_hierarchy_agent_with_fake_model", "extracted_code": "# Source: src/schema/__init__.py\n ChatHistory,\n ChatHistoryInput,\n ChatMessage,\n Feedback,\n FeedbackResponse,\n ServiceMetadata,\n StreamInput,\n UserInput,\n)\n\n__all__ = [\n \"AgentInfo\",\n\n FeedbackResponse,\n ServiceMetadata,\n StreamInput,\n UserInput,\n)\n\n__all__ = [\n \"AgentInfo\",\n \"AllModelEnum\",\n \"UserInput\",\n \"ChatMessage\",\n \"ServiceMetadata\",\n\n \"AllModelEnum\",\n \"UserInput\",\n \"ChatMessage\",\n \"ServiceMetadata\",\n \"StreamInput\",\n \"Feedback\",\n \"FeedbackResponse\",\n \"ChatHistoryInput\",\n \"ChatHistory\",\n]\n\n \"ChatMessage\",\n \"ServiceMetadata\",\n \"StreamInput\",\n \"Feedback\",\n \"FeedbackResponse\",\n \"ChatHistoryInput\",\n \"ChatHistory\",\n]\n\n\n# Source: src/agents/langgraph_supervisor_hierarchy_agent.py\ndef workflow(chosen_model):\n math_agent = create_agent(\n model=chosen_model,\n tools=[add, multiply],\n name=\"sub-agent-math_expert\", # Identify the graph node as a sub-agent\n system_prompt=\"You are a math expert. Always use one tool at a time.\",\n ).with_config(tags=[\"skip_stream\"])\n\n research_agent = (\n create_supervisor(\n [math_agent],\n model=chosen_model,\n tools=[web_search],\n prompt=\"You are a world class researcher with access to web search. Do not do any math, you have a math expert for that. \",\n supervisor_name=\"supervisor-research_expert\", # Identify the graph node as a supervisor to the math agent\n )\n .compile(\n name=\"sub-agent-research_expert\"\n ) # Identify the graph node as a sub-agent to the main supervisor\n .with_config(tags=[\"skip_stream\"])\n ) # Stream tokens are ignored for sub-agents in the UI\n\n # Create supervisor workflow\n return create_supervisor(\n [research_agent],\n model=chosen_model,\n prompt=(\n \"You are a team supervisor managing a research expert with math capabilities.\"\n \"For current events, use research_agent. \"\n ),\n add_handoff_back_messages=True,\n # UI now expects this to be True so we don't have to guess when a handoff back occurs\n output_mode=\"full_history\", # otherwise when reloading conversations, the sub-agents' messages are not included\n )\n\n\n# Source: src/service/service.py\nasync def message_generator(\n user_input: StreamInput, agent_id: str = DEFAULT_AGENT\n) -> AsyncGenerator[str, None]:\n \"\"\"\n Generate a stream of messages from the agent.\n\n This is the workhorse method for the /stream endpoint.\n \"\"\"\n agent: AgentGraph = get_agent(agent_id)\n kwargs, run_id = await _handle_input(user_input, agent)\n\n try:\n # Process streamed events from the graph and yield messages over the SSE stream.\n async for stream_event in agent.astream(\n **kwargs, stream_mode=[\"updates\", \"messages\", \"custom\"], subgraphs=True\n ):\n if not isinstance(stream_event, tuple):\n continue\n # Handle different stream event structures based on subgraphs\n if len(stream_event) == 3:\n # With subgraphs=True: (node_path, stream_mode, event)\n _, stream_mode, event = stream_event\n else:\n # Without subgraphs: (stream_mode, event)\n stream_mode, event = stream_event\n new_messages = []\n if stream_mode == \"updates\":\n for node, updates in event.items():\n # A simple approach to handle agent interrupts.\n # In a more sophisticated implementation, we could add\n # some structured ChatMessage type to return the interrupt value.\n if node == \"__interrupt__\":\n interrupt: Interrupt\n for interrupt in updates:\n new_messages.append(AIMessage(content=interrupt.value))\n continue\n updates = updates or {}\n update_messages = updates.get(\"messages\", [])\n # special cases for using langgraph-supervisor library\n if \"supervisor\" in node or \"sub-agent\" in node:\n # the only tools that come from the actual agent are the handoff and handback tools\n if isinstance(update_messages[-1], ToolMessage):\n if \"sub-agent\" in node and len(update_messages) > 1:\n # If this is a sub-agent, we want to keep the last 2 messages - the handback tool, and it's result\n update_messages = update_messages[-2:]\n else:\n # If this is a supervisor, we want to keep the last message only - the handoff result. The tool comes from the 'agent' node.\n update_messages = [update_messages[-1]]\n else:\n update_messages = []\n new_messages.extend(update_messages)\n\n if stream_mode == \"custom\":\n new_messages = [event]\n\n # LangGraph streaming may emit tuples: (field_name, field_value)\n # e.g. ('content', <str>), ('tool_calls', [ToolCall,...]), ('additional_kwargs', {...}), etc.\n # We accumulate only supported fields into `parts` and skip unsupported metadata.\n # More info at: https://langchain-ai.github.io/langgraph/cloud/how-tos/stream_messages/\n processed_messages = []\n current_message: dict[str, Any] = {}\n for message in new_messages:\n if isinstance(message, tuple):\n key, value = message\n # Store parts in temporary dict\n current_message[key] = value\n else:\n # Add complete message if we have one in progress\n if current_message:\n processed_messages.append(_create_ai_message(current_message))\n current_message = {}\n processed_messages.append(message)\n\n # Add any remaining message parts\n if current_message:\n processed_messages.append(_create_ai_message(current_message))\n\n for message in processed_messages:\n try:\n chat_message = langchain_to_chat_message(message)\n chat_message.run_id = str(run_id)\n except Exception as e:\n logger.error(f\"Error parsing message: {e}\")\n yield f\"data: {json.dumps({'type': 'error', 'content': 'Unexpected error'})}\\n\\n\"\n continue\n # LangGraph re-sends the input message, which feels weird, so drop it\n if chat_message.type == \"human\" and chat_message.content == user_input.message:\n continue\n yield f\"data: {json.dumps({'type': 'message', 'content': chat_message.model_dump()})}\\n\\n\"\n\n if stream_mode == \"messages\":\n if not user_input.stream_tokens:\n continue\n msg, metadata = event\n if \"skip_stream\" in metadata.get(\"tags\", []):\n continue\n # For some reason, astream(\"messages\") causes non-LLM nodes to send extra messages.\n # Drop them.\n if not isinstance(msg, AIMessageChunk):\n continue\n content = remove_tool_calls(msg.content)\n if content:\n # Empty content in the context of OpenAI usually means\n # that the model is asking for a tool to be invoked.\n # So we only print non-empty content.\n yield f\"data: {json.dumps({'type': 'token', 'content': convert_message_content_to_string(content)})}\\n\\n\"\n except Exception as e:\n logger.error(f\"Error in message generator: {e}\")\n yield f\"data: {json.dumps({'type': 'error', 'content': 'Internal server error'})}\\n\\n\"\n finally:\n yield \"data: [DONE]\\n\\n\"", "n_imports_parsed": 9, "n_files_resolved": 3, "n_chars_extracted": 8204}, "tests/core/test_settings.py::33": {"resolved_imports": ["src/core/settings.py", "src/schema/models.py"], "used_names": ["Settings"], "enclosing_function": "test_settings_default_values", "extracted_code": "# Source: src/core/settings.py\nclass Settings(BaseSettings):\n model_config = SettingsConfigDict(\n env_file=find_dotenv(),\n env_file_encoding=\"utf-8\",\n env_ignore_empty=True,\n extra=\"ignore\",\n validate_default=False,\n )\n MODE: str | None = None\n\n HOST: str = \"0.0.0.0\"\n PORT: int = 8080\n GRACEFUL_SHUTDOWN_TIMEOUT: int = 30\n LOG_LEVEL: LogLevel = LogLevel.WARNING\n\n AUTH_SECRET: SecretStr | None = None\n\n OPENAI_API_KEY: SecretStr | None = None\n DEEPSEEK_API_KEY: SecretStr | None = None\n ANTHROPIC_API_KEY: SecretStr | None = None\n GOOGLE_API_KEY: SecretStr | None = None\n GOOGLE_APPLICATION_CREDENTIALS: SecretStr | None = None\n GROQ_API_KEY: SecretStr | None = None\n USE_AWS_BEDROCK: bool = False\n OLLAMA_MODEL: str | None = None\n OLLAMA_BASE_URL: str | None = None\n USE_FAKE_MODEL: bool = False\n OPENROUTER_API_KEY: str | None = None\n\n # If DEFAULT_MODEL is None, it will be set in model_post_init\n DEFAULT_MODEL: AllModelEnum | None = None # type: ignore[assignment]\n AVAILABLE_MODELS: set[AllModelEnum] = set() # type: ignore[assignment]\n\n # Set openai compatible api, mainly used for proof of concept\n COMPATIBLE_MODEL: str | None = None\n COMPATIBLE_API_KEY: SecretStr | None = None\n COMPATIBLE_BASE_URL: str | None = None\n\n OPENWEATHERMAP_API_KEY: SecretStr | None = None\n\n # MCP Configuration\n GITHUB_PAT: SecretStr | None = None\n MCP_GITHUB_SERVER_URL: str = \"https://api.githubcopilot.com/mcp/\"\n\n LANGCHAIN_TRACING_V2: bool = False\n LANGCHAIN_PROJECT: str = \"default\"\n LANGCHAIN_ENDPOINT: Annotated[str, BeforeValidator(check_str_is_http)] = (\n \"https://api.smith.langchain.com\"\n )\n LANGCHAIN_API_KEY: SecretStr | None = None\n\n LANGFUSE_TRACING: bool = False\n LANGFUSE_HOST: Annotated[str, BeforeValidator(check_str_is_http)] = \"https://cloud.langfuse.com\"\n LANGFUSE_PUBLIC_KEY: SecretStr | None = None\n LANGFUSE_SECRET_KEY: SecretStr | None = None\n\n # Database Configuration\n DATABASE_TYPE: DatabaseType = (\n DatabaseType.SQLITE\n ) # Options: DatabaseType.SQLITE or DatabaseType.POSTGRES\n SQLITE_DB_PATH: str = \"checkpoints.db\"\n\n # PostgreSQL Configuration\n POSTGRES_USER: str | None = None\n POSTGRES_PASSWORD: SecretStr | None = None\n POSTGRES_HOST: str | None = None\n POSTGRES_PORT: int | None = None\n POSTGRES_DB: str | None = None\n POSTGRES_APPLICATION_NAME: str = \"agent-service-toolkit\"\n POSTGRES_MIN_CONNECTIONS_PER_POOL: int = 1\n POSTGRES_MAX_CONNECTIONS_PER_POOL: int = 1\n\n # MongoDB Configuration\n MONGO_HOST: str | None = None\n MONGO_PORT: int | None = None\n MONGO_DB: str | None = None\n MONGO_USER: str | None = None\n MONGO_PASSWORD: SecretStr | None = None\n MONGO_AUTH_SOURCE: str | None = None\n\n # Azure OpenAI Settings\n AZURE_OPENAI_API_KEY: SecretStr | None = None\n AZURE_OPENAI_ENDPOINT: str | None = None\n AZURE_OPENAI_API_VERSION: str = \"2024-02-15-preview\"\n AZURE_OPENAI_DEPLOYMENT_MAP: dict[str, str] = Field(\n default_factory=dict, description=\"Map of model names to Azure deployment IDs\"\n )\n\n def model_post_init(self, __context: Any) -> None:\n api_keys = {\n Provider.OPENAI: self.OPENAI_API_KEY,\n Provider.OPENAI_COMPATIBLE: self.COMPATIBLE_BASE_URL and self.COMPATIBLE_MODEL,\n Provider.DEEPSEEK: self.DEEPSEEK_API_KEY,\n Provider.ANTHROPIC: self.ANTHROPIC_API_KEY,\n Provider.GOOGLE: self.GOOGLE_API_KEY,\n Provider.VERTEXAI: self.GOOGLE_APPLICATION_CREDENTIALS,\n Provider.GROQ: self.GROQ_API_KEY,\n Provider.AWS: self.USE_AWS_BEDROCK,\n Provider.OLLAMA: self.OLLAMA_MODEL,\n Provider.FAKE: self.USE_FAKE_MODEL,\n Provider.AZURE_OPENAI: self.AZURE_OPENAI_API_KEY,\n Provider.OPENROUTER: self.OPENROUTER_API_KEY,\n }\n active_keys = [k for k, v in api_keys.items() if v]\n if not active_keys:\n raise ValueError(\"At least one LLM API key must be provided.\")\n\n for provider in active_keys:\n match provider:\n case Provider.OPENAI:\n if self.DEFAULT_MODEL is None:\n self.DEFAULT_MODEL = OpenAIModelName.GPT_5_NANO\n self.AVAILABLE_MODELS.update(set(OpenAIModelName))\n case Provider.OPENAI_COMPATIBLE:\n if self.DEFAULT_MODEL is None:\n self.DEFAULT_MODEL = OpenAICompatibleName.OPENAI_COMPATIBLE\n self.AVAILABLE_MODELS.update(set(OpenAICompatibleName))\n case Provider.DEEPSEEK:\n if self.DEFAULT_MODEL is None:\n self.DEFAULT_MODEL = DeepseekModelName.DEEPSEEK_CHAT\n self.AVAILABLE_MODELS.update(set(DeepseekModelName))\n case Provider.ANTHROPIC:\n if self.DEFAULT_MODEL is None:\n self.DEFAULT_MODEL = AnthropicModelName.HAIKU_45\n self.AVAILABLE_MODELS.update(set(AnthropicModelName))\n case Provider.GOOGLE:\n if self.DEFAULT_MODEL is None:\n self.DEFAULT_MODEL = GoogleModelName.GEMINI_20_FLASH\n self.AVAILABLE_MODELS.update(set(GoogleModelName))\n case Provider.VERTEXAI:\n if self.DEFAULT_MODEL is None:\n self.DEFAULT_MODEL = VertexAIModelName.GEMINI_20_FLASH\n self.AVAILABLE_MODELS.update(set(VertexAIModelName))\n case Provider.GROQ:\n if self.DEFAULT_MODEL is None:\n self.DEFAULT_MODEL = GroqModelName.LLAMA_31_8B\n self.AVAILABLE_MODELS.update(set(GroqModelName))\n case Provider.AWS:\n if self.DEFAULT_MODEL is None:\n self.DEFAULT_MODEL = AWSModelName.BEDROCK_HAIKU\n self.AVAILABLE_MODELS.update(set(AWSModelName))\n case Provider.OLLAMA:\n if self.DEFAULT_MODEL is None:\n self.DEFAULT_MODEL = OllamaModelName.OLLAMA_GENERIC\n self.AVAILABLE_MODELS.update(set(OllamaModelName))\n case Provider.OPENROUTER:\n if self.DEFAULT_MODEL is None:\n self.DEFAULT_MODEL = OpenRouterModelName.GEMINI_25_FLASH\n self.AVAILABLE_MODELS.update(set(OpenRouterModelName))\n case Provider.FAKE:\n if self.DEFAULT_MODEL is None:\n self.DEFAULT_MODEL = FakeModelName.FAKE\n self.AVAILABLE_MODELS.update(set(FakeModelName))\n case Provider.AZURE_OPENAI:\n if self.DEFAULT_MODEL is None:\n self.DEFAULT_MODEL = AzureOpenAIModelName.AZURE_GPT_4O_MINI\n self.AVAILABLE_MODELS.update(set(AzureOpenAIModelName))\n # Validate Azure OpenAI settings if Azure provider is available\n if not self.AZURE_OPENAI_API_KEY:\n raise ValueError(\"AZURE_OPENAI_API_KEY must be set\")\n if not self.AZURE_OPENAI_ENDPOINT:\n raise ValueError(\"AZURE_OPENAI_ENDPOINT must be set\")\n if not self.AZURE_OPENAI_DEPLOYMENT_MAP:\n raise ValueError(\"AZURE_OPENAI_DEPLOYMENT_MAP must be set\")\n\n # Parse deployment map if it's a string\n if isinstance(self.AZURE_OPENAI_DEPLOYMENT_MAP, str):\n try:\n self.AZURE_OPENAI_DEPLOYMENT_MAP = loads(\n self.AZURE_OPENAI_DEPLOYMENT_MAP\n )\n except Exception as e:\n raise ValueError(f\"Invalid AZURE_OPENAI_DEPLOYMENT_MAP JSON: {e}\")\n\n # Validate required deployments exist\n required_models = {\"gpt-4o\", \"gpt-4o-mini\"}\n missing_models = required_models - set(self.AZURE_OPENAI_DEPLOYMENT_MAP.keys())\n if missing_models:\n raise ValueError(f\"Missing required Azure deployments: {missing_models}\")\n case _:\n raise ValueError(f\"Unknown provider: {provider}\")\n\n @computed_field # type: ignore[prop-decorator]\n @property\n def BASE_URL(self) -> str:\n return f\"http://{self.HOST}:{self.PORT}\"\n\n def is_dev(self) -> bool:\n return self.MODE == \"dev\"", "n_imports_parsed": 8, "n_files_resolved": 2, "n_chars_extracted": 8669}, "tests/client/test_client.py::256": {"resolved_imports": ["src/client/__init__.py", "src/schema/__init__.py", "src/schema/models.py"], "used_names": ["AgentClientError", "Request", "Response", "patch", "pytest"], "enclosing_function": "test_acreate_feedback", "extracted_code": "# Source: src/client/__init__.py\nfrom client.client import AgentClient, AgentClientError\n\n__all__ = [\"AgentClient\", \"AgentClientError\"]\n\nfrom client.client import AgentClient, AgentClientError\n\n__all__ = [\"AgentClient\", \"AgentClientError\"]", "n_imports_parsed": 8, "n_files_resolved": 3, "n_chars_extracted": 239}, "tests/voice/test_tts.py::29": {"resolved_imports": ["src/voice/tts.py"], "used_names": ["TextToSpeech", "pytest"], "enclosing_function": "test_init_with_unimplemented_provider", "extracted_code": "# Source: src/voice/tts.py\nclass TextToSpeech:\n \"\"\"Text-to-speech factory.\n\n Loads and delegates to specific TTS provider implementations.\n\n Example:\n >>> tts = TextToSpeech(provider=\"openai\", voice=\"nova\")\n >>> audio = tts.generate(\"Hello world\")\n >>>\n >>> # Or from environment\n >>> tts = TextToSpeech.from_env()\n >>> if tts:\n ... audio = tts.generate(\"Hello world\")\n \"\"\"\n\n def __init__(self, provider: Provider = \"openai\", api_key: str | None = None, **config):\n \"\"\"Initialize TTS with specified provider.\n\n Args:\n provider: Provider name (\"openai\", \"elevenlabs\", etc.)\n api_key: API key (uses env var if not provided)\n **config: Provider-specific configuration\n OpenAI: voice=\"alloy\", model=\"tts-1\"\n ElevenLabs: voice_id=\"...\", model_id=\"...\"\n\n Raises:\n ValueError: If provider is unknown\n \"\"\"\n self._provider_name = provider\n\n # Resolve API key from parameter or environment\n resolved_api_key = self._get_api_key(provider, api_key)\n\n # Load and configure the provider\n self._provider = self._load_provider(provider, resolved_api_key, config)\n\n logger.info(f\"TextToSpeech created with provider={provider}\")\n\n def _get_api_key(self, provider: Provider, api_key: str | None) -> str | None:\n \"\"\"Get API key from parameter or environment.\n\n Args:\n provider: Provider name\n api_key: API key from parameter (takes precedence)\n\n Returns:\n Resolved API key or None\n \"\"\"\n # If API key provided explicitly, use it\n if api_key:\n return api_key\n\n # Otherwise, get from environment based on provider\n match provider:\n case \"openai\":\n return os.getenv(\"OPENAI_API_KEY\")\n case \"elevenlabs\":\n return os.getenv(\"ELEVENLABS_API_KEY\")\n case _:\n return None\n\n def _load_provider(self, provider: Provider, api_key: str | None, config: dict):\n \"\"\"Load the appropriate TTS provider implementation.\n\n Args:\n provider: Provider name\n api_key: Resolved API key\n config: Provider-specific configuration\n\n Returns:\n Provider instance\n\n Raises:\n ValueError: If provider is unknown\n NotImplementedError: If provider not yet implemented\n \"\"\"\n match provider:\n case \"openai\":\n from voice.providers.openai_tts import OpenAITTS\n\n # Extract OpenAI-specific config with defaults\n voice = config.get(\"voice\", \"alloy\")\n model = config.get(\"model\", \"tts-1\")\n\n return OpenAITTS(api_key=api_key, voice=voice, model=model)\n\n case \"elevenlabs\":\n # Example for future extensions: to add ElevenLabs support, implement ElevenLabsTTS provider and uncomment:\n # from voice.providers.elevenlabs_tts import ElevenLabsTTS\n # voice_id = config.get(\"voice_id\")\n # model_id = config.get(\"model_id\", \"eleven_monolingual_v1\")\n # return ElevenLabsTTS(api_key=api_key, voice_id=voice_id, model_id=model_id)\n raise NotImplementedError(\"ElevenLabs TTS provider not yet implemented\")\n\n case _:\n # Catch-all for unknown providers\n raise ValueError(f\"Unknown TTS provider: {provider}. Available providers: openai\")\n\n @property\n def provider(self) -> str:\n \"\"\"Get the provider name.\n\n Returns:\n Provider name string\n \"\"\"\n return self._provider_name\n\n @classmethod\n def from_env(cls) -> \"TextToSpeech | None\":\n \"\"\"Create TTS from environment variables.\n\n Reads VOICE_TTS_PROVIDER env var to determine which provider to use.\n Returns None if not configured.\n\n Returns:\n TextToSpeech instance or None\n\n Example:\n >>> # In .env: VOICE_TTS_PROVIDER=openai\n >>> tts = TextToSpeech.from_env()\n >>> if tts:\n ... audio = tts.generate(\"Hello world\")\n \"\"\"\n provider = os.getenv(\"VOICE_TTS_PROVIDER\")\n\n # If provider not set, voice features are disabled\n if not provider:\n logger.debug(\"VOICE_TTS_PROVIDER not set, TTS disabled\")\n return None\n\n try:\n # Create instance with provider from environment\n # Validates provider and raises ValueError if invalid\n return cls(provider=cast(Provider, provider))\n except Exception as e:\n # Log error but don't crash - allow app to continue without voice\n logger.error(f\"Failed to create TTS provider: {e}\", exc_info=True)\n return None\n\n def generate(self, text: str) -> bytes | None:\n \"\"\"Generate speech from text.\n\n Delegates to the underlying provider implementation.\n\n Args:\n text: Text to convert to speech\n\n Returns:\n Audio bytes (format depends on provider), or None on failure\n \"\"\"\n return self._provider.generate(text)\n\n def get_format(self) -> str:\n \"\"\"Get audio format (MIME type) for this provider.\n\n Returns:\n MIME type string (e.g., \"audio/mp3\")\n \"\"\"\n return self._provider.get_format()", "n_imports_parsed": 4, "n_files_resolved": 1, "n_chars_extracted": 5482}, "tests/core/test_settings.py::209": {"resolved_imports": ["src/core/settings.py", "src/schema/models.py"], "used_names": ["LogLevel", "logging"], "enclosing_function": "test_log_level_enum", "extracted_code": "# Source: src/core/settings.py\nclass LogLevel(StrEnum):\n DEBUG = \"DEBUG\"\n INFO = \"INFO\"\n WARNING = \"WARNING\"\n ERROR = \"ERROR\"\n CRITICAL = \"CRITICAL\"\n\n def to_logging_level(self) -> int:\n \"\"\"Convert to Python logging level constant.\"\"\"\n import logging\n\n mapping = {\n LogLevel.DEBUG: logging.DEBUG,\n LogLevel.INFO: logging.INFO,\n LogLevel.WARNING: logging.WARNING,\n LogLevel.ERROR: logging.ERROR,\n LogLevel.CRITICAL: logging.CRITICAL,\n }\n return mapping[self]", "n_imports_parsed": 8, "n_files_resolved": 2, "n_chars_extracted": 565}, "tests/agents/test_agent_loading.py::36": {"resolved_imports": ["src/agents/agents.py", "src/agents/lazy_agent.py"], "used_names": ["load_agent", "pytest"], "enclosing_function": "test_load_agent_nonexistent", "extracted_code": "# Source: src/agents/agents.py\nasync def load_agent(agent_id: str) -> None:\n \"\"\"Load lazy agents if needed.\"\"\"\n graph_like = agents[agent_id].graph_like\n if isinstance(graph_like, LazyLoadingAgent):\n await graph_like.load()", "n_imports_parsed": 4, "n_files_resolved": 2, "n_chars_extracted": 239}, "tests/integration/test_docker_e2e.py::26": {"resolved_imports": ["src/client/__init__.py"], "used_names": ["AppTest", "pytest"], "enclosing_function": "test_service_with_app", "extracted_code": "", "n_imports_parsed": 3, "n_files_resolved": 1, "n_chars_extracted": 0}, "tests/service/test_auth.py::38": {"resolved_imports": [], "used_names": ["SecretStr"], "enclosing_function": "test_auth_secret_incorrect", "extracted_code": "", "n_imports_parsed": 1, "n_files_resolved": 0, "n_chars_extracted": 0}, "tests/voice/providers/test_openai_tts.py::49": {"resolved_imports": ["src/voice/providers/openai_tts.py"], "used_names": ["OpenAITTS", "patch"], "enclosing_function": "test_validate_text_too_long", "extracted_code": "# Source: src/voice/providers/openai_tts.py\nclass OpenAITTS:\n \"\"\"OpenAI TTS provider.\"\"\"\n\n # API constraints\n MAX_TEXT_LENGTH = 4096\n MIN_TEXT_LENGTH = 3\n\n # Available configuration options\n VALID_VOICES = [\"alloy\", \"echo\", \"fable\", \"onyx\", \"nova\", \"shimmer\"]\n VALID_MODELS = [\"tts-1\", \"tts-1-hd\"]\n\n def __init__(self, api_key: str | None = None, voice: str = \"alloy\", model: str = \"tts-1\"):\n \"\"\"Initialize OpenAI TTS.\n\n Args:\n api_key: OpenAI API key (uses env var if not provided)\n voice: Voice name (alloy, echo, fable, onyx, nova, shimmer)\n model: Model name (tts-1 or tts-1-hd)\n\n Raises:\n ValueError: If voice or model is invalid\n Exception: If OpenAI client initialization fails\n \"\"\"\n # Validate voice parameter\n if voice not in self.VALID_VOICES:\n raise ValueError(f\"Invalid voice '{voice}'. Must be one of {self.VALID_VOICES}\")\n\n # Validate model parameter\n if model not in self.VALID_MODELS:\n raise ValueError(f\"Invalid model '{model}'. Must be one of {self.VALID_MODELS}\")\n\n # Create OpenAI client with provided key or from environment\n self.client = OpenAI(api_key=api_key) if api_key else OpenAI()\n self.voice = voice\n self.model = model\n\n logger.info(f\"OpenAI TTS initialized: voice={voice}, model={model}\")\n\n def _validate_and_prepare_text(self, text: str) -> str | None:\n \"\"\"Validate and prepare text for TTS generation.\n\n Args:\n text: Raw text input\n\n Returns:\n Prepared text ready for TTS, or None if text is too short\n\n Note:\n - Strips whitespace\n - Returns None if text is below minimum length\n - Truncates text if above maximum length\n \"\"\"\n # Remove leading/trailing whitespace\n text = text.strip()\n\n # Skip very short text (not worth API call)\n if len(text) < self.MIN_TEXT_LENGTH:\n logger.debug(f\"OpenAI TTS: skipping short text ({len(text)} chars)\")\n return None\n\n # Truncate to API limit if needed\n if len(text) > self.MAX_TEXT_LENGTH:\n logger.warning(\n f\"OpenAI TTS: truncating from {len(text)} to {self.MAX_TEXT_LENGTH} chars\"\n )\n text = text[: self.MAX_TEXT_LENGTH]\n\n return text\n\n def generate(self, text: str) -> bytes | None:\n \"\"\"Generate speech from text.\n\n Args:\n text: Text to convert to speech\n\n Returns:\n MP3 audio bytes, or None if text is too short or generation fails\n\n Note:\n - Text shorter than 3 chars returns None\n - Text longer than 4096 chars is truncated\n - Errors are logged but not raised - returns None instead\n \"\"\"\n # Validate and prepare text\n prepared_text = self._validate_and_prepare_text(text)\n if not prepared_text:\n return None\n\n try:\n # Call OpenAI TTS API\n response = self.client.audio.speech.create(\n model=self.model,\n voice=self.voice,\n input=prepared_text,\n response_format=\"mp3\",\n )\n\n # Extract audio bytes from response\n audio_bytes = response.content\n logger.info(f\"OpenAI TTS: generated {len(audio_bytes)} bytes\")\n return audio_bytes\n\n except Exception as e:\n # Log error with full traceback for debugging\n logger.error(f\"OpenAI TTS failed: {e}\", exc_info=True)\n # Return None to allow graceful degradation\n return None\n\n def get_format(self) -> str:\n \"\"\"Get audio format (MIME type).\n\n Returns:\n MIME type string for generated audio\n \"\"\"\n return \"audio/mp3\"", "n_imports_parsed": 3, "n_files_resolved": 1, "n_chars_extracted": 3890}, "tests/app/test_streamlit_app.py::366": {"resolved_imports": ["src/client/__init__.py", "src/schema/__init__.py", "src/schema/models.py"], "used_names": ["AppTest", "Mock", "pytest"], "enclosing_function": "test_app_streaming_single_sub_agent", "extracted_code": "", "n_imports_parsed": 7, "n_files_resolved": 3, "n_chars_extracted": 0}, "tests/service/test_service.py::132": {"resolved_imports": ["src/agents/agents.py", "src/schema/__init__.py", "src/schema/models.py"], "used_names": ["AIMessage", "ChatMessage", "json"], "enclosing_function": "test_invoke_custom_agent_config", "extracted_code": "# Source: src/schema/__init__.py\n ChatHistory,\n ChatHistoryInput,\n ChatMessage,\n Feedback,\n FeedbackResponse,\n ServiceMetadata,\n StreamInput,\n UserInput,\n)\n\n__all__ = [\n \"AgentInfo\",\n\n \"AllModelEnum\",\n \"UserInput\",\n \"ChatMessage\",\n \"ServiceMetadata\",\n \"StreamInput\",\n \"Feedback\",\n \"FeedbackResponse\",\n \"ChatHistoryInput\",\n \"ChatHistory\",\n]", "n_imports_parsed": 10, "n_files_resolved": 3, "n_chars_extracted": 393}, "tests/core/test_llm.py::25": {"resolved_imports": ["src/core/llm.py", "src/schema/models.py"], "used_names": ["ChatOpenAI", "OpenAIModelName", "get_model", "os", "patch"], "enclosing_function": "test_get_model_openai", "extracted_code": "# Source: src/core/llm.py\ndef get_model(model_name: AllModelEnum, /) -> ModelT:\n # NOTE: models with streaming=True will send tokens as they are generated\n # if the /stream endpoint is called with stream_tokens=True (the default)\n api_model_name = _MODEL_TABLE.get(model_name)\n if not api_model_name:\n raise ValueError(f\"Unsupported model: {model_name}\")\n\n if model_name in OpenAIModelName:\n return ChatOpenAI(model=api_model_name, streaming=True)\n if model_name in OpenAICompatibleName:\n if not settings.COMPATIBLE_BASE_URL or not settings.COMPATIBLE_MODEL:\n raise ValueError(\"OpenAICompatible base url and endpoint must be configured\")\n\n return ChatOpenAI(\n model=settings.COMPATIBLE_MODEL,\n temperature=0.5,\n streaming=True,\n openai_api_base=settings.COMPATIBLE_BASE_URL,\n openai_api_key=settings.COMPATIBLE_API_KEY,\n )\n if model_name in AzureOpenAIModelName:\n if not settings.AZURE_OPENAI_API_KEY or not settings.AZURE_OPENAI_ENDPOINT:\n raise ValueError(\"Azure OpenAI API key and endpoint must be configured\")\n\n return AzureChatOpenAI(\n azure_endpoint=settings.AZURE_OPENAI_ENDPOINT,\n deployment_name=api_model_name,\n api_version=settings.AZURE_OPENAI_API_VERSION,\n temperature=0.5,\n streaming=True,\n timeout=60,\n max_retries=3,\n )\n if model_name in DeepseekModelName:\n return ChatOpenAI(\n model=api_model_name,\n temperature=0.5,\n streaming=True,\n openai_api_base=\"https://api.deepseek.com\",\n openai_api_key=settings.DEEPSEEK_API_KEY,\n )\n if model_name in AnthropicModelName:\n return ChatAnthropic(model=api_model_name, temperature=0.5, streaming=True)\n if model_name in GoogleModelName:\n return ChatGoogleGenerativeAI(model=api_model_name, temperature=0.5, streaming=True)\n if model_name in VertexAIModelName:\n return ChatVertexAI(model=api_model_name, temperature=0.5, streaming=True)\n if model_name in GroqModelName:\n if model_name == GroqModelName.LLAMA_GUARD_4_12B:\n return ChatGroq(model=api_model_name, temperature=0.0) # type: ignore[call-arg]\n return ChatGroq(model=api_model_name, temperature=0.5) # type: ignore[call-arg]\n if model_name in AWSModelName:\n return ChatBedrock(model_id=api_model_name, temperature=0.5)\n if model_name in OllamaModelName:\n if settings.OLLAMA_BASE_URL:\n chat_ollama = ChatOllama(\n model=settings.OLLAMA_MODEL, temperature=0.5, base_url=settings.OLLAMA_BASE_URL\n )\n else:\n chat_ollama = ChatOllama(model=settings.OLLAMA_MODEL, temperature=0.5)\n return chat_ollama\n if model_name in OpenRouterModelName:\n return ChatOpenAI(\n model=api_model_name,\n temperature=0.5,\n streaming=True,\n base_url=\"https://openrouter.ai/api/v1/\",\n api_key=settings.OPENROUTER_API_KEY,\n )\n if model_name in FakeModelName:\n return FakeToolModel(responses=[\"This is a test response from the fake model.\"])\n\n raise ValueError(f\"Unsupported model: {model_name}\")\n\n\n# Source: src/schema/models.py\nclass OpenAIModelName(StrEnum):\n \"\"\"https://platform.openai.com/docs/models/gpt-4o\"\"\"\n\n GPT_5_NANO = \"gpt-5-nano\"\n GPT_5_MINI = \"gpt-5-mini\"\n GPT_5_1 = \"gpt-5.1\"", "n_imports_parsed": 10, "n_files_resolved": 2, "n_chars_extracted": 3513}, "tests/service/test_service_e2e.py::53": {"resolved_imports": ["src/agents/agents.py", "src/agents/utils.py", "src/client/__init__.py", "src/schema/schema.py", "src/service/utils.py"], "used_names": [], "enclosing_function": "test_messages_conversion", "extracted_code": "", "n_imports_parsed": 11, "n_files_resolved": 5, "n_chars_extracted": 0}, "tests/service/test_auth.py::12": {"resolved_imports": [], "used_names": [], "enclosing_function": "test_no_auth_secret", "extracted_code": "", "n_imports_parsed": 1, "n_files_resolved": 0, "n_chars_extracted": 0}, "tests/client/test_client.py::60": {"resolved_imports": ["src/client/__init__.py", "src/schema/__init__.py", "src/schema/models.py"], "used_names": ["AgentClientError", "ChatMessage", "Request", "Response", "patch", "pytest"], "enclosing_function": "test_invoke", "extracted_code": "# Source: src/client/__init__.py\nfrom client.client import AgentClient, AgentClientError\n\n__all__ = [\"AgentClient\", \"AgentClientError\"]\n\nfrom client.client import AgentClient, AgentClientError\n\n__all__ = [\"AgentClient\", \"AgentClientError\"]\n\n\n# Source: src/schema/__init__.py\n ChatHistory,\n ChatHistoryInput,\n ChatMessage,\n Feedback,\n FeedbackResponse,\n ServiceMetadata,\n StreamInput,\n UserInput,\n)\n\n__all__ = [\n \"AgentInfo\",\n\n \"AllModelEnum\",\n \"UserInput\",\n \"ChatMessage\",\n \"ServiceMetadata\",\n \"StreamInput\",\n \"Feedback\",\n \"FeedbackResponse\",\n \"ChatHistoryInput\",\n \"ChatHistory\",\n]", "n_imports_parsed": 8, "n_files_resolved": 3, "n_chars_extracted": 635}, "tests/service/test_service_e2e.py::59": {"resolved_imports": ["src/agents/agents.py", "src/agents/utils.py", "src/client/__init__.py", "src/schema/schema.py", "src/service/utils.py"], "used_names": [], "enclosing_function": "test_messages_conversion", "extracted_code": "", "n_imports_parsed": 11, "n_files_resolved": 5, "n_chars_extracted": 0}, "tests/app/test_streamlit_app.py::90": {"resolved_imports": ["src/client/__init__.py", "src/schema/__init__.py", "src/schema/models.py"], "used_names": ["AppTest", "ChatHistory", "ChatMessage"], "enclosing_function": "test_app_thread_id_history", "extracted_code": "# Source: src/schema/__init__.py\nfrom schema.schema import (\n AgentInfo,\n ChatHistory,\n ChatHistoryInput,\n ChatMessage,\n Feedback,\n FeedbackResponse,\n ServiceMetadata,\n StreamInput,\n UserInput,\n)\n\n\n AgentInfo,\n ChatHistory,\n ChatHistoryInput,\n ChatMessage,\n Feedback,\n FeedbackResponse,\n ServiceMetadata,\n StreamInput,\n UserInput,\n)\n\n__all__ = [\n\n ChatHistory,\n ChatHistoryInput,\n ChatMessage,\n Feedback,\n FeedbackResponse,\n ServiceMetadata,\n StreamInput,\n UserInput,\n)\n\n__all__ = [\n \"AgentInfo\",\n\n \"AllModelEnum\",\n \"UserInput\",\n \"ChatMessage\",\n \"ServiceMetadata\",\n \"StreamInput\",\n \"Feedback\",\n \"FeedbackResponse\",\n \"ChatHistoryInput\",\n \"ChatHistory\",\n]\n\n \"Feedback\",\n \"FeedbackResponse\",\n \"ChatHistoryInput\",\n \"ChatHistory\",\n]", "n_imports_parsed": 7, "n_files_resolved": 3, "n_chars_extracted": 847}, "tests/service/test_utils.py::14": {"resolved_imports": ["src/service/utils.py"], "used_names": ["AIMessage", "HumanMessage", "SystemMessage", "ToolMessage", "langchain_to_chat_message"], "enclosing_function": "test_messages_from_langchain", "extracted_code": "# Source: src/service/utils.py\ndef langchain_to_chat_message(message: BaseMessage) -> ChatMessage:\n \"\"\"Create a ChatMessage from a LangChain message.\"\"\"\n match message:\n case HumanMessage():\n human_message = ChatMessage(\n type=\"human\",\n content=convert_message_content_to_string(message.content),\n )\n return human_message\n case AIMessage():\n ai_message = ChatMessage(\n type=\"ai\",\n content=convert_message_content_to_string(message.content),\n )\n if message.tool_calls:\n ai_message.tool_calls = message.tool_calls\n if message.response_metadata:\n ai_message.response_metadata = message.response_metadata\n return ai_message\n case ToolMessage():\n tool_message = ChatMessage(\n type=\"tool\",\n content=convert_message_content_to_string(message.content),\n tool_call_id=message.tool_call_id,\n )\n return tool_message\n case LangchainChatMessage():\n if message.role == \"custom\":\n custom_message = ChatMessage(\n type=\"custom\",\n content=\"\",\n custom_data=message.content[0],\n )\n return custom_message\n else:\n raise ValueError(f\"Unsupported chat message role: {message.role}\")\n case _:\n raise ValueError(f\"Unsupported message type: {message.__class__.__name__}\")", "n_imports_parsed": 2, "n_files_resolved": 1, "n_chars_extracted": 1591}, "tests/agents/test_github_mcp_agent.py::19": {"resolved_imports": ["src/agents/github_mcp_agent/github_mcp_agent.py", "src/core/settings.py"], "used_names": ["GitHubMCPAgent"], "enclosing_function": "test_initialization", "extracted_code": "# Source: src/agents/github_mcp_agent/github_mcp_agent.py\nclass GitHubMCPAgent(LazyLoadingAgent):\n \"\"\"GitHub MCP Agent with async initialization.\"\"\"\n\n def __init__(self) -> None:\n super().__init__()\n self._mcp_tools: list[BaseTool] = []\n self._mcp_client: MultiServerMCPClient | None = None\n\n async def load(self) -> None:\n \"\"\"Initialize the GitHub MCP agent by loading MCP tools.\"\"\"\n if not settings.GITHUB_PAT:\n logger.info(\"GITHUB_PAT is not set, GitHub MCP agent will have no tools\")\n self._mcp_tools = []\n self._graph = self._create_graph()\n self._loaded = True\n return\n\n try:\n # Initialize MCP client directly\n github_pat = settings.GITHUB_PAT.get_secret_value()\n connections = {\n \"github\": StreamableHttpConnection(\n transport=\"streamable_http\",\n url=settings.MCP_GITHUB_SERVER_URL,\n headers={\n \"Authorization\": f\"Bearer {github_pat}\",\n },\n )\n }\n\n self._mcp_client = MultiServerMCPClient(connections)\n logger.info(\"MCP client initialized successfully\")\n\n # Get tools from the client\n self._mcp_tools = await self._mcp_client.get_tools()\n logger.info(f\"GitHub MCP agent initialized with {len(self._mcp_tools)} tools\")\n\n except Exception as e:\n logger.error(f\"Failed to initialize GitHub MCP agent: {e}\")\n self._mcp_tools = []\n self._mcp_client = None\n\n # Create and store the graph\n self._graph = self._create_graph()\n self._loaded = True\n\n def _create_graph(self) -> CompiledStateGraph:\n \"\"\"Create the GitHub MCP agent graph.\"\"\"\n model = get_model(settings.DEFAULT_MODEL)\n\n return create_agent(\n model=model,\n tools=self._mcp_tools,\n name=\"github-mcp-agent\",\n system_prompt=prompt,\n )", "n_imports_parsed": 4, "n_files_resolved": 2, "n_chars_extracted": 2061}, "tests/service/test_service_e2e.py::52": {"resolved_imports": ["src/agents/agents.py", "src/agents/utils.py", "src/client/__init__.py", "src/schema/schema.py", "src/service/utils.py"], "used_names": [], "enclosing_function": "test_messages_conversion", "extracted_code": "", "n_imports_parsed": 11, "n_files_resolved": 5, "n_chars_extracted": 0}, "tests/core/test_llm.py::51": {"resolved_imports": ["src/core/llm.py", "src/schema/models.py"], "used_names": ["ChatGroq", "GroqModelName", "get_model", "os", "patch"], "enclosing_function": "test_get_model_groq_guard", "extracted_code": "# Source: src/core/llm.py\ndef get_model(model_name: AllModelEnum, /) -> ModelT:\n # NOTE: models with streaming=True will send tokens as they are generated\n # if the /stream endpoint is called with stream_tokens=True (the default)\n api_model_name = _MODEL_TABLE.get(model_name)\n if not api_model_name:\n raise ValueError(f\"Unsupported model: {model_name}\")\n\n if model_name in OpenAIModelName:\n return ChatOpenAI(model=api_model_name, streaming=True)\n if model_name in OpenAICompatibleName:\n if not settings.COMPATIBLE_BASE_URL or not settings.COMPATIBLE_MODEL:\n raise ValueError(\"OpenAICompatible base url and endpoint must be configured\")\n\n return ChatOpenAI(\n model=settings.COMPATIBLE_MODEL,\n temperature=0.5,\n streaming=True,\n openai_api_base=settings.COMPATIBLE_BASE_URL,\n openai_api_key=settings.COMPATIBLE_API_KEY,\n )\n if model_name in AzureOpenAIModelName:\n if not settings.AZURE_OPENAI_API_KEY or not settings.AZURE_OPENAI_ENDPOINT:\n raise ValueError(\"Azure OpenAI API key and endpoint must be configured\")\n\n return AzureChatOpenAI(\n azure_endpoint=settings.AZURE_OPENAI_ENDPOINT,\n deployment_name=api_model_name,\n api_version=settings.AZURE_OPENAI_API_VERSION,\n temperature=0.5,\n streaming=True,\n timeout=60,\n max_retries=3,\n )\n if model_name in DeepseekModelName:\n return ChatOpenAI(\n model=api_model_name,\n temperature=0.5,\n streaming=True,\n openai_api_base=\"https://api.deepseek.com\",\n openai_api_key=settings.DEEPSEEK_API_KEY,\n )\n if model_name in AnthropicModelName:\n return ChatAnthropic(model=api_model_name, temperature=0.5, streaming=True)\n if model_name in GoogleModelName:\n return ChatGoogleGenerativeAI(model=api_model_name, temperature=0.5, streaming=True)\n if model_name in VertexAIModelName:\n return ChatVertexAI(model=api_model_name, temperature=0.5, streaming=True)\n if model_name in GroqModelName:\n if model_name == GroqModelName.LLAMA_GUARD_4_12B:\n return ChatGroq(model=api_model_name, temperature=0.0) # type: ignore[call-arg]\n return ChatGroq(model=api_model_name, temperature=0.5) # type: ignore[call-arg]\n if model_name in AWSModelName:\n return ChatBedrock(model_id=api_model_name, temperature=0.5)\n if model_name in OllamaModelName:\n if settings.OLLAMA_BASE_URL:\n chat_ollama = ChatOllama(\n model=settings.OLLAMA_MODEL, temperature=0.5, base_url=settings.OLLAMA_BASE_URL\n )\n else:\n chat_ollama = ChatOllama(model=settings.OLLAMA_MODEL, temperature=0.5)\n return chat_ollama\n if model_name in OpenRouterModelName:\n return ChatOpenAI(\n model=api_model_name,\n temperature=0.5,\n streaming=True,\n base_url=\"https://openrouter.ai/api/v1/\",\n api_key=settings.OPENROUTER_API_KEY,\n )\n if model_name in FakeModelName:\n return FakeToolModel(responses=[\"This is a test response from the fake model.\"])\n\n raise ValueError(f\"Unsupported model: {model_name}\")\n\n\n# Source: src/schema/models.py\nclass GroqModelName(StrEnum):\n \"\"\"https://console.groq.com/docs/models\"\"\"\n\n LLAMA_31_8B = \"llama-3.1-8b\"\n LLAMA_33_70B = \"llama-3.3-70b\"\n\n LLAMA_GUARD_4_12B = \"meta-llama/llama-guard-4-12b\"", "n_imports_parsed": 10, "n_files_resolved": 2, "n_chars_extracted": 3541}, "tests/voice/test_tts.py::22": {"resolved_imports": ["src/voice/tts.py"], "used_names": ["TextToSpeech", "pytest"], "enclosing_function": "test_init_with_invalid_provider", "extracted_code": "# Source: src/voice/tts.py\nclass TextToSpeech:\n \"\"\"Text-to-speech factory.\n\n Loads and delegates to specific TTS provider implementations.\n\n Example:\n >>> tts = TextToSpeech(provider=\"openai\", voice=\"nova\")\n >>> audio = tts.generate(\"Hello world\")\n >>>\n >>> # Or from environment\n >>> tts = TextToSpeech.from_env()\n >>> if tts:\n ... audio = tts.generate(\"Hello world\")\n \"\"\"\n\n def __init__(self, provider: Provider = \"openai\", api_key: str | None = None, **config):\n \"\"\"Initialize TTS with specified provider.\n\n Args:\n provider: Provider name (\"openai\", \"elevenlabs\", etc.)\n api_key: API key (uses env var if not provided)\n **config: Provider-specific configuration\n OpenAI: voice=\"alloy\", model=\"tts-1\"\n ElevenLabs: voice_id=\"...\", model_id=\"...\"\n\n Raises:\n ValueError: If provider is unknown\n \"\"\"\n self._provider_name = provider\n\n # Resolve API key from parameter or environment\n resolved_api_key = self._get_api_key(provider, api_key)\n\n # Load and configure the provider\n self._provider = self._load_provider(provider, resolved_api_key, config)\n\n logger.info(f\"TextToSpeech created with provider={provider}\")\n\n def _get_api_key(self, provider: Provider, api_key: str | None) -> str | None:\n \"\"\"Get API key from parameter or environment.\n\n Args:\n provider: Provider name\n api_key: API key from parameter (takes precedence)\n\n Returns:\n Resolved API key or None\n \"\"\"\n # If API key provided explicitly, use it\n if api_key:\n return api_key\n\n # Otherwise, get from environment based on provider\n match provider:\n case \"openai\":\n return os.getenv(\"OPENAI_API_KEY\")\n case \"elevenlabs\":\n return os.getenv(\"ELEVENLABS_API_KEY\")\n case _:\n return None\n\n def _load_provider(self, provider: Provider, api_key: str | None, config: dict):\n \"\"\"Load the appropriate TTS provider implementation.\n\n Args:\n provider: Provider name\n api_key: Resolved API key\n config: Provider-specific configuration\n\n Returns:\n Provider instance\n\n Raises:\n ValueError: If provider is unknown\n NotImplementedError: If provider not yet implemented\n \"\"\"\n match provider:\n case \"openai\":\n from voice.providers.openai_tts import OpenAITTS\n\n # Extract OpenAI-specific config with defaults\n voice = config.get(\"voice\", \"alloy\")\n model = config.get(\"model\", \"tts-1\")\n\n return OpenAITTS(api_key=api_key, voice=voice, model=model)\n\n case \"elevenlabs\":\n # Example for future extensions: to add ElevenLabs support, implement ElevenLabsTTS provider and uncomment:\n # from voice.providers.elevenlabs_tts import ElevenLabsTTS\n # voice_id = config.get(\"voice_id\")\n # model_id = config.get(\"model_id\", \"eleven_monolingual_v1\")\n # return ElevenLabsTTS(api_key=api_key, voice_id=voice_id, model_id=model_id)\n raise NotImplementedError(\"ElevenLabs TTS provider not yet implemented\")\n\n case _:\n # Catch-all for unknown providers\n raise ValueError(f\"Unknown TTS provider: {provider}. Available providers: openai\")\n\n @property\n def provider(self) -> str:\n \"\"\"Get the provider name.\n\n Returns:\n Provider name string\n \"\"\"\n return self._provider_name\n\n @classmethod\n def from_env(cls) -> \"TextToSpeech | None\":\n \"\"\"Create TTS from environment variables.\n\n Reads VOICE_TTS_PROVIDER env var to determine which provider to use.\n Returns None if not configured.\n\n Returns:\n TextToSpeech instance or None\n\n Example:\n >>> # In .env: VOICE_TTS_PROVIDER=openai\n >>> tts = TextToSpeech.from_env()\n >>> if tts:\n ... audio = tts.generate(\"Hello world\")\n \"\"\"\n provider = os.getenv(\"VOICE_TTS_PROVIDER\")\n\n # If provider not set, voice features are disabled\n if not provider:\n logger.debug(\"VOICE_TTS_PROVIDER not set, TTS disabled\")\n return None\n\n try:\n # Create instance with provider from environment\n # Validates provider and raises ValueError if invalid\n return cls(provider=cast(Provider, provider))\n except Exception as e:\n # Log error but don't crash - allow app to continue without voice\n logger.error(f\"Failed to create TTS provider: {e}\", exc_info=True)\n return None\n\n def generate(self, text: str) -> bytes | None:\n \"\"\"Generate speech from text.\n\n Delegates to the underlying provider implementation.\n\n Args:\n text: Text to convert to speech\n\n Returns:\n Audio bytes (format depends on provider), or None on failure\n \"\"\"\n return self._provider.generate(text)\n\n def get_format(self) -> str:\n \"\"\"Get audio format (MIME type) for this provider.\n\n Returns:\n MIME type string (e.g., \"audio/mp3\")\n \"\"\"\n return self._provider.get_format()", "n_imports_parsed": 4, "n_files_resolved": 1, "n_chars_extracted": 5482}, "tests/agents/test_agent_loading.py::50": {"resolved_imports": ["src/agents/agents.py", "src/agents/lazy_agent.py"], "used_names": ["LazyLoadingAgent", "Mock", "agents", "get_agent", "patch", "pytest"], "enclosing_function": "test_get_agent_lazy_agent_not_loaded", "extracted_code": "# Source: src/agents/agents.py\ndef get_agent(agent_id: str) -> AgentGraph:\n \"\"\"Get an agent graph, loading lazy agents if needed.\"\"\"\n agent_graph = agents[agent_id].graph_like\n\n # If it's a lazy loading agent, ensure it's loaded and return its graph\n if isinstance(agent_graph, LazyLoadingAgent):\n if not agent_graph._loaded:\n raise RuntimeError(f\"Agent {agent_id} not loaded. Call load() first.\")\n return agent_graph.get_graph()\n\n # Otherwise return the graph directly\n return agent_graph\n\n\n# Source: src/agents/lazy_agent.py\nclass LazyLoadingAgent(ABC):\n \"\"\"Base class for agents that require async loading.\"\"\"\n\n def __init__(self) -> None:\n \"\"\"Initialize the agent.\"\"\"\n self._loaded = False\n self._graph: CompiledStateGraph | Pregel | None = None\n\n @abstractmethod\n async def load(self) -> None:\n \"\"\"\n Perform async loading for this agent.\n\n This method is called during service startup and should handle:\n - Setting up external connections (MCP clients, databases, etc.)\n - Loading tools or resources\n - Any other async setup required\n - Creating the agent's graph\n \"\"\"\n raise NotImplementedError # pragma: no cover\n\n def get_graph(self) -> CompiledStateGraph | Pregel:\n \"\"\"\n Get the agent's graph.\n\n Returns the graph instance that was created during load().\n\n Returns:\n The agent's graph (CompiledStateGraph or Pregel)\n \"\"\"\n if not self._loaded:\n raise RuntimeError(\"Agent not loaded. Call load() first.\")\n if self._graph is None:\n raise RuntimeError(\"Agent graph not created during load().\")\n return self._graph", "n_imports_parsed": 4, "n_files_resolved": 2, "n_chars_extracted": 1750}, "tests/voice/providers/test_openai_tts.py::15": {"resolved_imports": ["src/voice/providers/openai_tts.py"], "used_names": ["OpenAITTS", "patch"], "enclosing_function": "test_init_with_valid_params", "extracted_code": "# Source: src/voice/providers/openai_tts.py\nclass OpenAITTS:\n \"\"\"OpenAI TTS provider.\"\"\"\n\n # API constraints\n MAX_TEXT_LENGTH = 4096\n MIN_TEXT_LENGTH = 3\n\n # Available configuration options\n VALID_VOICES = [\"alloy\", \"echo\", \"fable\", \"onyx\", \"nova\", \"shimmer\"]\n VALID_MODELS = [\"tts-1\", \"tts-1-hd\"]\n\n def __init__(self, api_key: str | None = None, voice: str = \"alloy\", model: str = \"tts-1\"):\n \"\"\"Initialize OpenAI TTS.\n\n Args:\n api_key: OpenAI API key (uses env var if not provided)\n voice: Voice name (alloy, echo, fable, onyx, nova, shimmer)\n model: Model name (tts-1 or tts-1-hd)\n\n Raises:\n ValueError: If voice or model is invalid\n Exception: If OpenAI client initialization fails\n \"\"\"\n # Validate voice parameter\n if voice not in self.VALID_VOICES:\n raise ValueError(f\"Invalid voice '{voice}'. Must be one of {self.VALID_VOICES}\")\n\n # Validate model parameter\n if model not in self.VALID_MODELS:\n raise ValueError(f\"Invalid model '{model}'. Must be one of {self.VALID_MODELS}\")\n\n # Create OpenAI client with provided key or from environment\n self.client = OpenAI(api_key=api_key) if api_key else OpenAI()\n self.voice = voice\n self.model = model\n\n logger.info(f\"OpenAI TTS initialized: voice={voice}, model={model}\")\n\n def _validate_and_prepare_text(self, text: str) -> str | None:\n \"\"\"Validate and prepare text for TTS generation.\n\n Args:\n text: Raw text input\n\n Returns:\n Prepared text ready for TTS, or None if text is too short\n\n Note:\n - Strips whitespace\n - Returns None if text is below minimum length\n - Truncates text if above maximum length\n \"\"\"\n # Remove leading/trailing whitespace\n text = text.strip()\n\n # Skip very short text (not worth API call)\n if len(text) < self.MIN_TEXT_LENGTH:\n logger.debug(f\"OpenAI TTS: skipping short text ({len(text)} chars)\")\n return None\n\n # Truncate to API limit if needed\n if len(text) > self.MAX_TEXT_LENGTH:\n logger.warning(\n f\"OpenAI TTS: truncating from {len(text)} to {self.MAX_TEXT_LENGTH} chars\"\n )\n text = text[: self.MAX_TEXT_LENGTH]\n\n return text\n\n def generate(self, text: str) -> bytes | None:\n \"\"\"Generate speech from text.\n\n Args:\n text: Text to convert to speech\n\n Returns:\n MP3 audio bytes, or None if text is too short or generation fails\n\n Note:\n - Text shorter than 3 chars returns None\n - Text longer than 4096 chars is truncated\n - Errors are logged but not raised - returns None instead\n \"\"\"\n # Validate and prepare text\n prepared_text = self._validate_and_prepare_text(text)\n if not prepared_text:\n return None\n\n try:\n # Call OpenAI TTS API\n response = self.client.audio.speech.create(\n model=self.model,\n voice=self.voice,\n input=prepared_text,\n response_format=\"mp3\",\n )\n\n # Extract audio bytes from response\n audio_bytes = response.content\n logger.info(f\"OpenAI TTS: generated {len(audio_bytes)} bytes\")\n return audio_bytes\n\n except Exception as e:\n # Log error with full traceback for debugging\n logger.error(f\"OpenAI TTS failed: {e}\", exc_info=True)\n # Return None to allow graceful degradation\n return None\n\n def get_format(self) -> str:\n \"\"\"Get audio format (MIME type).\n\n Returns:\n MIME type string for generated audio\n \"\"\"\n return \"audio/mp3\"", "n_imports_parsed": 3, "n_files_resolved": 1, "n_chars_extracted": 3890}, "tests/voice/test_stt.py::38": {"resolved_imports": ["src/voice/stt.py"], "used_names": ["SpeechToText", "os", "patch"], "enclosing_function": "test_from_env_provider_not_set", "extracted_code": "# Source: src/voice/stt.py\nclass SpeechToText:\n \"\"\"Speech-to-text factory.\n\n Loads and delegates to specific STT provider implementations.\n\n Example:\n >>> stt = SpeechToText(provider=\"openai\")\n >>> text = stt.transcribe(audio_file)\n >>>\n >>> # Or from environment\n >>> stt = SpeechToText.from_env()\n >>> if stt:\n ... text = stt.transcribe(audio_file)\n \"\"\"\n\n def __init__(self, provider: Provider = \"openai\", api_key: str | None = None, **config):\n \"\"\"Initialize STT with specified provider.\n\n Args:\n provider: Provider name (\"openai\", \"deepgram\", etc.)\n api_key: API key (uses env var if not provided)\n **config: Provider-specific configuration\n\n Raises:\n ValueError: If provider is unknown\n \"\"\"\n self._provider_name = provider\n\n # Resolve API key from parameter or environment\n resolved_api_key = self._get_api_key(provider, api_key)\n\n # Load and configure the provider\n self._provider = self._load_provider(provider, resolved_api_key, config)\n\n logger.info(f\"SpeechToText created with provider={provider}\")\n\n def _get_api_key(self, provider: Provider, api_key: str | None) -> str | None:\n \"\"\"Get API key from parameter or environment.\n\n Args:\n provider: Provider name\n api_key: API key from parameter (takes precedence)\n\n Returns:\n Resolved API key or None\n \"\"\"\n # If API key provided explicitly, use it\n if api_key:\n return api_key\n\n # Otherwise, get from environment based on provider\n match provider:\n case \"openai\":\n return os.getenv(\"OPENAI_API_KEY\")\n case \"deepgram\":\n return os.getenv(\"DEEPGRAM_API_KEY\")\n case _:\n return None\n\n def _load_provider(self, provider: Provider, api_key: str | None, config: dict):\n \"\"\"Load the appropriate STT provider implementation.\n\n Args:\n provider: Provider name\n api_key: Resolved API key\n config: Provider-specific configuration\n\n Returns:\n Provider instance\n\n Raises:\n ValueError: If provider is unknown\n NotImplementedError: If provider not yet implemented\n \"\"\"\n match provider:\n case \"openai\":\n from voice.providers.openai_stt import OpenAISTT\n\n return OpenAISTT(api_key=api_key, **config)\n\n case \"deepgram\":\n # Example for future extensions: to add Deepgram support, implement DeepgramSTT provider and uncomment:\n # from voice.providers.deepgram_stt import DeepgramSTT\n # return DeepgramSTT(api_key=api_key, **config)\n raise NotImplementedError(\"Deepgram STT provider not yet implemented\")\n\n case _:\n # Catch-all for unknown providers\n raise ValueError(f\"Unknown STT provider: {provider}. Available providers: openai\")\n\n @property\n def provider(self) -> str:\n \"\"\"Get the provider name.\n\n Returns:\n Provider name string\n \"\"\"\n return self._provider_name\n\n @classmethod\n def from_env(cls) -> \"SpeechToText | None\":\n \"\"\"Create STT from environment variables.\n\n Reads VOICE_STT_PROVIDER env var to determine which provider to use.\n Returns None if not configured.\n\n Returns:\n SpeechToText instance or None\n\n Example:\n >>> # In .env: VOICE_STT_PROVIDER=openai\n >>> stt = SpeechToText.from_env()\n >>> if stt:\n ... text = stt.transcribe(audio_file)\n \"\"\"\n provider = os.getenv(\"VOICE_STT_PROVIDER\")\n\n # If provider not set, voice features are disabled\n if not provider:\n logger.debug(\"VOICE_STT_PROVIDER not set, STT disabled\")\n return None\n\n try:\n # Create instance with provider from environment\n # Validates provider and raises ValueError if invalid\n return cls(provider=cast(Provider, provider))\n except Exception as e:\n # Log error but don't crash - allow app to continue without voice\n logger.error(f\"Failed to create STT provider: {e}\", exc_info=True)\n return None\n\n def transcribe(self, audio_file: BinaryIO) -> str:\n \"\"\"Transcribe audio to text.\n\n Delegates to the underlying provider implementation.\n\n Args:\n audio_file: Binary audio file\n\n Returns:\n Transcribed text (empty string on failure)\n \"\"\"\n return self._provider.transcribe(audio_file)", "n_imports_parsed": 4, "n_files_resolved": 1, "n_chars_extracted": 4763}, "tests/service/test_service_streaming.py::42": {"resolved_imports": ["src/service/service.py"], "used_names": ["AIMessage", "_create_ai_message", "pytest"], "enclosing_function": "test_create_ai_message_filters_and_passes_through", "extracted_code": "# Source: src/service/service.py\ndef _create_ai_message(parts: dict) -> AIMessage:\n sig = inspect.signature(AIMessage)\n valid_keys = set(sig.parameters)\n filtered = {k: v for k, v in parts.items() if k in valid_keys}\n return AIMessage(**filtered)", "n_imports_parsed": 4, "n_files_resolved": 1, "n_chars_extracted": 258}, "tests/voice/providers/test_openai_stt.py::22": {"resolved_imports": ["src/voice/providers/openai_stt.py"], "used_names": ["OpenAISTT", "patch"], "enclosing_function": "test_transcribe_success", "extracted_code": "# Source: src/voice/providers/openai_stt.py\nclass OpenAISTT:\n \"\"\"OpenAI Whisper STT provider.\"\"\"\n\n def __init__(self, api_key: str | None = None):\n \"\"\"Initialize OpenAI STT.\n\n Args:\n api_key: OpenAI API key (uses env var if not provided)\n\n Raises:\n Exception: If OpenAI client initialization fails\n \"\"\"\n # Create OpenAI client with provided key or from environment\n self.client = OpenAI(api_key=api_key) if api_key else OpenAI()\n logger.info(\"OpenAI STT initialized\")\n\n def transcribe(self, audio_file: BinaryIO) -> str:\n \"\"\"Transcribe audio using OpenAI Whisper.\n\n Args:\n audio_file: Binary audio file\n\n Returns:\n Transcribed text (empty string on failure)\n\n Note:\n Errors are logged but not raised - returns empty string instead.\n This allows graceful degradation in user-facing applications.\n \"\"\"\n try:\n # Reset file pointer to beginning (may have been read elsewhere)\n audio_file.seek(0)\n\n # Call OpenAI Whisper API for transcription\n result = self.client.audio.transcriptions.create(\n model=\"whisper-1\", file=audio_file, response_format=\"text\"\n )\n\n # Clean up whitespace from result\n transcribed = result.strip()\n logger.info(f\"OpenAI STT: transcribed {len(transcribed)} chars\")\n return transcribed\n\n except Exception as e:\n # Log error with full traceback for debugging\n logger.error(f\"OpenAI STT failed: {e}\", exc_info=True)\n # Return empty string to allow graceful degradation\n return \"\"", "n_imports_parsed": 2, "n_files_resolved": 1, "n_chars_extracted": 1726}, "tests/app/test_streamlit_app.py::560": {"resolved_imports": ["src/client/__init__.py", "src/schema/__init__.py", "src/schema/models.py"], "used_names": ["AppTest", "Mock", "pytest"], "enclosing_function": "test_app_streaming_nested_sub_agents", "extracted_code": "", "n_imports_parsed": 7, "n_files_resolved": 3, "n_chars_extracted": 0}, "tests/app/test_streamlit_app.py::469": {"resolved_imports": ["src/client/__init__.py", "src/schema/__init__.py", "src/schema/models.py"], "used_names": ["AppTest", "Mock", "pytest"], "enclosing_function": "test_app_streaming_sequential_sub_agents", "extracted_code": "", "n_imports_parsed": 7, "n_files_resolved": 3, "n_chars_extracted": 0}, "tests/service/test_service_e2e.py::111": {"resolved_imports": ["src/agents/agents.py", "src/agents/utils.py", "src/client/__init__.py", "src/schema/schema.py", "src/service/utils.py"], "used_names": ["Agent", "AgentClient", "ChatMessage", "patch"], "enclosing_function": "test_agent_stream", "extracted_code": "# Source: src/agents/agents.py\nclass Agent:\n description: str\n graph_like: AgentGraphLike\n\n\n# Source: src/client/__init__.py\nfrom client.client import AgentClient, AgentClientError\n\n__all__ = [\"AgentClient\", \"AgentClientError\"]\n\nfrom client.client import AgentClient, AgentClientError\n\n__all__ = [\"AgentClient\", \"AgentClientError\"]\n\n\n# Source: src/schema/schema.py\nclass ChatMessage(BaseModel):\n \"\"\"Message in a chat.\"\"\"\n\n type: Literal[\"human\", \"ai\", \"tool\", \"custom\"] = Field(\n description=\"Role of the message.\",\n examples=[\"human\", \"ai\", \"tool\", \"custom\"],\n )\n content: str = Field(\n description=\"Content of the message.\",\n examples=[\"Hello, world!\"],\n )\n tool_calls: list[ToolCall] = Field(\n description=\"Tool calls in the message.\",\n default=[],\n )\n tool_call_id: str | None = Field(\n description=\"Tool call that this message is responding to.\",\n default=None,\n examples=[\"call_Jja7J89XsjrOLA5r!MEOW!SL\"],\n )\n run_id: str | None = Field(\n description=\"Run ID of the message.\",\n default=None,\n examples=[\"847c6285-8fc9-4560-a83f-4e6285809254\"],\n )\n response_metadata: dict[str, Any] = Field(\n description=\"Response metadata. For example: response headers, logprobs, token counts.\",\n default={},\n )\n custom_data: dict[str, Any] = Field(\n description=\"Custom message data.\",\n default={},\n )\n\n def pretty_repr(self) -> str:\n \"\"\"Get a pretty representation of the message.\"\"\"\n base_title = self.type.title() + \" Message\"\n padded = \" \" + base_title + \" \"\n sep_len = (80 - len(padded)) // 2\n sep = \"=\" * sep_len\n second_sep = sep + \"=\" if len(padded) % 2 else sep\n title = f\"{sep}{padded}{second_sep}\"\n return f\"{title}\\n\\n{self.content}\"\n\n def pretty_print(self) -> None:\n print(self.pretty_repr())", "n_imports_parsed": 11, "n_files_resolved": 5, "n_chars_extracted": 1930}, "tests/core/test_llm.py::50": {"resolved_imports": ["src/core/llm.py", "src/schema/models.py"], "used_names": ["ChatGroq", "GroqModelName", "get_model", "os", "patch"], "enclosing_function": "test_get_model_groq_guard", "extracted_code": "# Source: src/core/llm.py\ndef get_model(model_name: AllModelEnum, /) -> ModelT:\n # NOTE: models with streaming=True will send tokens as they are generated\n # if the /stream endpoint is called with stream_tokens=True (the default)\n api_model_name = _MODEL_TABLE.get(model_name)\n if not api_model_name:\n raise ValueError(f\"Unsupported model: {model_name}\")\n\n if model_name in OpenAIModelName:\n return ChatOpenAI(model=api_model_name, streaming=True)\n if model_name in OpenAICompatibleName:\n if not settings.COMPATIBLE_BASE_URL or not settings.COMPATIBLE_MODEL:\n raise ValueError(\"OpenAICompatible base url and endpoint must be configured\")\n\n return ChatOpenAI(\n model=settings.COMPATIBLE_MODEL,\n temperature=0.5,\n streaming=True,\n openai_api_base=settings.COMPATIBLE_BASE_URL,\n openai_api_key=settings.COMPATIBLE_API_KEY,\n )\n if model_name in AzureOpenAIModelName:\n if not settings.AZURE_OPENAI_API_KEY or not settings.AZURE_OPENAI_ENDPOINT:\n raise ValueError(\"Azure OpenAI API key and endpoint must be configured\")\n\n return AzureChatOpenAI(\n azure_endpoint=settings.AZURE_OPENAI_ENDPOINT,\n deployment_name=api_model_name,\n api_version=settings.AZURE_OPENAI_API_VERSION,\n temperature=0.5,\n streaming=True,\n timeout=60,\n max_retries=3,\n )\n if model_name in DeepseekModelName:\n return ChatOpenAI(\n model=api_model_name,\n temperature=0.5,\n streaming=True,\n openai_api_base=\"https://api.deepseek.com\",\n openai_api_key=settings.DEEPSEEK_API_KEY,\n )\n if model_name in AnthropicModelName:\n return ChatAnthropic(model=api_model_name, temperature=0.5, streaming=True)\n if model_name in GoogleModelName:\n return ChatGoogleGenerativeAI(model=api_model_name, temperature=0.5, streaming=True)\n if model_name in VertexAIModelName:\n return ChatVertexAI(model=api_model_name, temperature=0.5, streaming=True)\n if model_name in GroqModelName:\n if model_name == GroqModelName.LLAMA_GUARD_4_12B:\n return ChatGroq(model=api_model_name, temperature=0.0) # type: ignore[call-arg]\n return ChatGroq(model=api_model_name, temperature=0.5) # type: ignore[call-arg]\n if model_name in AWSModelName:\n return ChatBedrock(model_id=api_model_name, temperature=0.5)\n if model_name in OllamaModelName:\n if settings.OLLAMA_BASE_URL:\n chat_ollama = ChatOllama(\n model=settings.OLLAMA_MODEL, temperature=0.5, base_url=settings.OLLAMA_BASE_URL\n )\n else:\n chat_ollama = ChatOllama(model=settings.OLLAMA_MODEL, temperature=0.5)\n return chat_ollama\n if model_name in OpenRouterModelName:\n return ChatOpenAI(\n model=api_model_name,\n temperature=0.5,\n streaming=True,\n base_url=\"https://openrouter.ai/api/v1/\",\n api_key=settings.OPENROUTER_API_KEY,\n )\n if model_name in FakeModelName:\n return FakeToolModel(responses=[\"This is a test response from the fake model.\"])\n\n raise ValueError(f\"Unsupported model: {model_name}\")\n\n\n# Source: src/schema/models.py\nclass GroqModelName(StrEnum):\n \"\"\"https://console.groq.com/docs/models\"\"\"\n\n LLAMA_31_8B = \"llama-3.1-8b\"\n LLAMA_33_70B = \"llama-3.3-70b\"\n\n LLAMA_GUARD_4_12B = \"meta-llama/llama-guard-4-12b\"", "n_imports_parsed": 10, "n_files_resolved": 2, "n_chars_extracted": 3541}, "tests/client/test_client.py::254": {"resolved_imports": ["src/client/__init__.py", "src/schema/__init__.py", "src/schema/models.py"], "used_names": ["AgentClientError", "Request", "Response", "patch", "pytest"], "enclosing_function": "test_acreate_feedback", "extracted_code": "# Source: src/client/__init__.py\nfrom client.client import AgentClient, AgentClientError\n\n__all__ = [\"AgentClient\", \"AgentClientError\"]\n\nfrom client.client import AgentClient, AgentClientError\n\n__all__ = [\"AgentClient\", \"AgentClientError\"]", "n_imports_parsed": 8, "n_files_resolved": 3, "n_chars_extracted": 239}, "tests/core/test_llm.py::42": {"resolved_imports": ["src/core/llm.py", "src/schema/models.py"], "used_names": ["ChatGroq", "GroqModelName", "get_model", "os", "patch"], "enclosing_function": "test_get_model_groq", "extracted_code": "# Source: src/core/llm.py\ndef get_model(model_name: AllModelEnum, /) -> ModelT:\n # NOTE: models with streaming=True will send tokens as they are generated\n # if the /stream endpoint is called with stream_tokens=True (the default)\n api_model_name = _MODEL_TABLE.get(model_name)\n if not api_model_name:\n raise ValueError(f\"Unsupported model: {model_name}\")\n\n if model_name in OpenAIModelName:\n return ChatOpenAI(model=api_model_name, streaming=True)\n if model_name in OpenAICompatibleName:\n if not settings.COMPATIBLE_BASE_URL or not settings.COMPATIBLE_MODEL:\n raise ValueError(\"OpenAICompatible base url and endpoint must be configured\")\n\n return ChatOpenAI(\n model=settings.COMPATIBLE_MODEL,\n temperature=0.5,\n streaming=True,\n openai_api_base=settings.COMPATIBLE_BASE_URL,\n openai_api_key=settings.COMPATIBLE_API_KEY,\n )\n if model_name in AzureOpenAIModelName:\n if not settings.AZURE_OPENAI_API_KEY or not settings.AZURE_OPENAI_ENDPOINT:\n raise ValueError(\"Azure OpenAI API key and endpoint must be configured\")\n\n return AzureChatOpenAI(\n azure_endpoint=settings.AZURE_OPENAI_ENDPOINT,\n deployment_name=api_model_name,\n api_version=settings.AZURE_OPENAI_API_VERSION,\n temperature=0.5,\n streaming=True,\n timeout=60,\n max_retries=3,\n )\n if model_name in DeepseekModelName:\n return ChatOpenAI(\n model=api_model_name,\n temperature=0.5,\n streaming=True,\n openai_api_base=\"https://api.deepseek.com\",\n openai_api_key=settings.DEEPSEEK_API_KEY,\n )\n if model_name in AnthropicModelName:\n return ChatAnthropic(model=api_model_name, temperature=0.5, streaming=True)\n if model_name in GoogleModelName:\n return ChatGoogleGenerativeAI(model=api_model_name, temperature=0.5, streaming=True)\n if model_name in VertexAIModelName:\n return ChatVertexAI(model=api_model_name, temperature=0.5, streaming=True)\n if model_name in GroqModelName:\n if model_name == GroqModelName.LLAMA_GUARD_4_12B:\n return ChatGroq(model=api_model_name, temperature=0.0) # type: ignore[call-arg]\n return ChatGroq(model=api_model_name, temperature=0.5) # type: ignore[call-arg]\n if model_name in AWSModelName:\n return ChatBedrock(model_id=api_model_name, temperature=0.5)\n if model_name in OllamaModelName:\n if settings.OLLAMA_BASE_URL:\n chat_ollama = ChatOllama(\n model=settings.OLLAMA_MODEL, temperature=0.5, base_url=settings.OLLAMA_BASE_URL\n )\n else:\n chat_ollama = ChatOllama(model=settings.OLLAMA_MODEL, temperature=0.5)\n return chat_ollama\n if model_name in OpenRouterModelName:\n return ChatOpenAI(\n model=api_model_name,\n temperature=0.5,\n streaming=True,\n base_url=\"https://openrouter.ai/api/v1/\",\n api_key=settings.OPENROUTER_API_KEY,\n )\n if model_name in FakeModelName:\n return FakeToolModel(responses=[\"This is a test response from the fake model.\"])\n\n raise ValueError(f\"Unsupported model: {model_name}\")\n\n\n# Source: src/schema/models.py\nclass GroqModelName(StrEnum):\n \"\"\"https://console.groq.com/docs/models\"\"\"\n\n LLAMA_31_8B = \"llama-3.1-8b\"\n LLAMA_33_70B = \"llama-3.3-70b\"\n\n LLAMA_GUARD_4_12B = \"meta-llama/llama-guard-4-12b\"", "n_imports_parsed": 10, "n_files_resolved": 2, "n_chars_extracted": 3541}, "tests/agents/test_lazy_agent.py::64": {"resolved_imports": ["src/agents/lazy_agent.py"], "used_names": ["pytest"], "enclosing_function": "test_get_graph_no_graph_created", "extracted_code": "", "n_imports_parsed": 3, "n_files_resolved": 1, "n_chars_extracted": 0}, "tests/client/test_client.py::153": {"resolved_imports": ["src/client/__init__.py", "src/schema/__init__.py", "src/schema/models.py"], "used_names": ["AgentClientError", "ChatMessage", "Mock", "Request", "Response", "json", "patch", "pytest"], "enclosing_function": "test_stream", "extracted_code": "# Source: src/client/__init__.py\nfrom client.client import AgentClient, AgentClientError\n\n__all__ = [\"AgentClient\", \"AgentClientError\"]\n\nfrom client.client import AgentClient, AgentClientError\n\n__all__ = [\"AgentClient\", \"AgentClientError\"]\n\n\n# Source: src/schema/__init__.py\n ChatHistory,\n ChatHistoryInput,\n ChatMessage,\n Feedback,\n FeedbackResponse,\n ServiceMetadata,\n StreamInput,\n UserInput,\n)\n\n__all__ = [\n \"AgentInfo\",\n\n \"AllModelEnum\",\n \"UserInput\",\n \"ChatMessage\",\n \"ServiceMetadata\",\n \"StreamInput\",\n \"Feedback\",\n \"FeedbackResponse\",\n \"ChatHistoryInput\",\n \"ChatHistory\",\n]", "n_imports_parsed": 8, "n_files_resolved": 3, "n_chars_extracted": 635}, "tests/core/test_llm.py::26": {"resolved_imports": ["src/core/llm.py", "src/schema/models.py"], "used_names": ["ChatOpenAI", "OpenAIModelName", "get_model", "os", "patch"], "enclosing_function": "test_get_model_openai", "extracted_code": "# Source: src/core/llm.py\ndef get_model(model_name: AllModelEnum, /) -> ModelT:\n # NOTE: models with streaming=True will send tokens as they are generated\n # if the /stream endpoint is called with stream_tokens=True (the default)\n api_model_name = _MODEL_TABLE.get(model_name)\n if not api_model_name:\n raise ValueError(f\"Unsupported model: {model_name}\")\n\n if model_name in OpenAIModelName:\n return ChatOpenAI(model=api_model_name, streaming=True)\n if model_name in OpenAICompatibleName:\n if not settings.COMPATIBLE_BASE_URL or not settings.COMPATIBLE_MODEL:\n raise ValueError(\"OpenAICompatible base url and endpoint must be configured\")\n\n return ChatOpenAI(\n model=settings.COMPATIBLE_MODEL,\n temperature=0.5,\n streaming=True,\n openai_api_base=settings.COMPATIBLE_BASE_URL,\n openai_api_key=settings.COMPATIBLE_API_KEY,\n )\n if model_name in AzureOpenAIModelName:\n if not settings.AZURE_OPENAI_API_KEY or not settings.AZURE_OPENAI_ENDPOINT:\n raise ValueError(\"Azure OpenAI API key and endpoint must be configured\")\n\n return AzureChatOpenAI(\n azure_endpoint=settings.AZURE_OPENAI_ENDPOINT,\n deployment_name=api_model_name,\n api_version=settings.AZURE_OPENAI_API_VERSION,\n temperature=0.5,\n streaming=True,\n timeout=60,\n max_retries=3,\n )\n if model_name in DeepseekModelName:\n return ChatOpenAI(\n model=api_model_name,\n temperature=0.5,\n streaming=True,\n openai_api_base=\"https://api.deepseek.com\",\n openai_api_key=settings.DEEPSEEK_API_KEY,\n )\n if model_name in AnthropicModelName:\n return ChatAnthropic(model=api_model_name, temperature=0.5, streaming=True)\n if model_name in GoogleModelName:\n return ChatGoogleGenerativeAI(model=api_model_name, temperature=0.5, streaming=True)\n if model_name in VertexAIModelName:\n return ChatVertexAI(model=api_model_name, temperature=0.5, streaming=True)\n if model_name in GroqModelName:\n if model_name == GroqModelName.LLAMA_GUARD_4_12B:\n return ChatGroq(model=api_model_name, temperature=0.0) # type: ignore[call-arg]\n return ChatGroq(model=api_model_name, temperature=0.5) # type: ignore[call-arg]\n if model_name in AWSModelName:\n return ChatBedrock(model_id=api_model_name, temperature=0.5)\n if model_name in OllamaModelName:\n if settings.OLLAMA_BASE_URL:\n chat_ollama = ChatOllama(\n model=settings.OLLAMA_MODEL, temperature=0.5, base_url=settings.OLLAMA_BASE_URL\n )\n else:\n chat_ollama = ChatOllama(model=settings.OLLAMA_MODEL, temperature=0.5)\n return chat_ollama\n if model_name in OpenRouterModelName:\n return ChatOpenAI(\n model=api_model_name,\n temperature=0.5,\n streaming=True,\n base_url=\"https://openrouter.ai/api/v1/\",\n api_key=settings.OPENROUTER_API_KEY,\n )\n if model_name in FakeModelName:\n return FakeToolModel(responses=[\"This is a test response from the fake model.\"])\n\n raise ValueError(f\"Unsupported model: {model_name}\")\n\n\n# Source: src/schema/models.py\nclass OpenAIModelName(StrEnum):\n \"\"\"https://platform.openai.com/docs/models/gpt-4o\"\"\"\n\n GPT_5_NANO = \"gpt-5-nano\"\n GPT_5_MINI = \"gpt-5-mini\"\n GPT_5_1 = \"gpt-5.1\"", "n_imports_parsed": 10, "n_files_resolved": 2, "n_chars_extracted": 3513}, "tests/core/test_llm.py::34": {"resolved_imports": ["src/core/llm.py", "src/schema/models.py"], "used_names": ["AnthropicModelName", "ChatAnthropic", "get_model", "os", "patch"], "enclosing_function": "test_get_model_anthropic", "extracted_code": "# Source: src/core/llm.py\ndef get_model(model_name: AllModelEnum, /) -> ModelT:\n # NOTE: models with streaming=True will send tokens as they are generated\n # if the /stream endpoint is called with stream_tokens=True (the default)\n api_model_name = _MODEL_TABLE.get(model_name)\n if not api_model_name:\n raise ValueError(f\"Unsupported model: {model_name}\")\n\n if model_name in OpenAIModelName:\n return ChatOpenAI(model=api_model_name, streaming=True)\n if model_name in OpenAICompatibleName:\n if not settings.COMPATIBLE_BASE_URL or not settings.COMPATIBLE_MODEL:\n raise ValueError(\"OpenAICompatible base url and endpoint must be configured\")\n\n return ChatOpenAI(\n model=settings.COMPATIBLE_MODEL,\n temperature=0.5,\n streaming=True,\n openai_api_base=settings.COMPATIBLE_BASE_URL,\n openai_api_key=settings.COMPATIBLE_API_KEY,\n )\n if model_name in AzureOpenAIModelName:\n if not settings.AZURE_OPENAI_API_KEY or not settings.AZURE_OPENAI_ENDPOINT:\n raise ValueError(\"Azure OpenAI API key and endpoint must be configured\")\n\n return AzureChatOpenAI(\n azure_endpoint=settings.AZURE_OPENAI_ENDPOINT,\n deployment_name=api_model_name,\n api_version=settings.AZURE_OPENAI_API_VERSION,\n temperature=0.5,\n streaming=True,\n timeout=60,\n max_retries=3,\n )\n if model_name in DeepseekModelName:\n return ChatOpenAI(\n model=api_model_name,\n temperature=0.5,\n streaming=True,\n openai_api_base=\"https://api.deepseek.com\",\n openai_api_key=settings.DEEPSEEK_API_KEY,\n )\n if model_name in AnthropicModelName:\n return ChatAnthropic(model=api_model_name, temperature=0.5, streaming=True)\n if model_name in GoogleModelName:\n return ChatGoogleGenerativeAI(model=api_model_name, temperature=0.5, streaming=True)\n if model_name in VertexAIModelName:\n return ChatVertexAI(model=api_model_name, temperature=0.5, streaming=True)\n if model_name in GroqModelName:\n if model_name == GroqModelName.LLAMA_GUARD_4_12B:\n return ChatGroq(model=api_model_name, temperature=0.0) # type: ignore[call-arg]\n return ChatGroq(model=api_model_name, temperature=0.5) # type: ignore[call-arg]\n if model_name in AWSModelName:\n return ChatBedrock(model_id=api_model_name, temperature=0.5)\n if model_name in OllamaModelName:\n if settings.OLLAMA_BASE_URL:\n chat_ollama = ChatOllama(\n model=settings.OLLAMA_MODEL, temperature=0.5, base_url=settings.OLLAMA_BASE_URL\n )\n else:\n chat_ollama = ChatOllama(model=settings.OLLAMA_MODEL, temperature=0.5)\n return chat_ollama\n if model_name in OpenRouterModelName:\n return ChatOpenAI(\n model=api_model_name,\n temperature=0.5,\n streaming=True,\n base_url=\"https://openrouter.ai/api/v1/\",\n api_key=settings.OPENROUTER_API_KEY,\n )\n if model_name in FakeModelName:\n return FakeToolModel(responses=[\"This is a test response from the fake model.\"])\n\n raise ValueError(f\"Unsupported model: {model_name}\")\n\n\n# Source: src/schema/models.py\nclass AnthropicModelName(StrEnum):\n \"\"\"https://docs.anthropic.com/en/docs/about-claude/models#model-names\"\"\"\n\n HAIKU_45 = \"claude-haiku-4-5\"\n SONNET_45 = \"claude-sonnet-4-5\"", "n_imports_parsed": 10, "n_files_resolved": 2, "n_chars_extracted": 3522}, "tests/service/test_utils.py::19": {"resolved_imports": ["src/service/utils.py"], "used_names": ["AIMessage", "HumanMessage", "SystemMessage", "ToolMessage", "langchain_to_chat_message"], "enclosing_function": "test_messages_from_langchain", "extracted_code": "# Source: src/service/utils.py\ndef langchain_to_chat_message(message: BaseMessage) -> ChatMessage:\n \"\"\"Create a ChatMessage from a LangChain message.\"\"\"\n match message:\n case HumanMessage():\n human_message = ChatMessage(\n type=\"human\",\n content=convert_message_content_to_string(message.content),\n )\n return human_message\n case AIMessage():\n ai_message = ChatMessage(\n type=\"ai\",\n content=convert_message_content_to_string(message.content),\n )\n if message.tool_calls:\n ai_message.tool_calls = message.tool_calls\n if message.response_metadata:\n ai_message.response_metadata = message.response_metadata\n return ai_message\n case ToolMessage():\n tool_message = ChatMessage(\n type=\"tool\",\n content=convert_message_content_to_string(message.content),\n tool_call_id=message.tool_call_id,\n )\n return tool_message\n case LangchainChatMessage():\n if message.role == \"custom\":\n custom_message = ChatMessage(\n type=\"custom\",\n content=\"\",\n custom_data=message.content[0],\n )\n return custom_message\n else:\n raise ValueError(f\"Unsupported chat message role: {message.role}\")\n case _:\n raise ValueError(f\"Unsupported message type: {message.__class__.__name__}\")", "n_imports_parsed": 2, "n_files_resolved": 1, "n_chars_extracted": 1591}, "tests/core/test_settings.py::203": {"resolved_imports": ["src/core/settings.py", "src/schema/models.py"], "used_names": ["Settings", "json", "os", "patch"], "enclosing_function": "test_settings_azure_openai", "extracted_code": "# Source: src/core/settings.py\nclass Settings(BaseSettings):\n model_config = SettingsConfigDict(\n env_file=find_dotenv(),\n env_file_encoding=\"utf-8\",\n env_ignore_empty=True,\n extra=\"ignore\",\n validate_default=False,\n )\n MODE: str | None = None\n\n HOST: str = \"0.0.0.0\"\n PORT: int = 8080\n GRACEFUL_SHUTDOWN_TIMEOUT: int = 30\n LOG_LEVEL: LogLevel = LogLevel.WARNING\n\n AUTH_SECRET: SecretStr | None = None\n\n OPENAI_API_KEY: SecretStr | None = None\n DEEPSEEK_API_KEY: SecretStr | None = None\n ANTHROPIC_API_KEY: SecretStr | None = None\n GOOGLE_API_KEY: SecretStr | None = None\n GOOGLE_APPLICATION_CREDENTIALS: SecretStr | None = None\n GROQ_API_KEY: SecretStr | None = None\n USE_AWS_BEDROCK: bool = False\n OLLAMA_MODEL: str | None = None\n OLLAMA_BASE_URL: str | None = None\n USE_FAKE_MODEL: bool = False\n OPENROUTER_API_KEY: str | None = None\n\n # If DEFAULT_MODEL is None, it will be set in model_post_init\n DEFAULT_MODEL: AllModelEnum | None = None # type: ignore[assignment]\n AVAILABLE_MODELS: set[AllModelEnum] = set() # type: ignore[assignment]\n\n # Set openai compatible api, mainly used for proof of concept\n COMPATIBLE_MODEL: str | None = None\n COMPATIBLE_API_KEY: SecretStr | None = None\n COMPATIBLE_BASE_URL: str | None = None\n\n OPENWEATHERMAP_API_KEY: SecretStr | None = None\n\n # MCP Configuration\n GITHUB_PAT: SecretStr | None = None\n MCP_GITHUB_SERVER_URL: str = \"https://api.githubcopilot.com/mcp/\"\n\n LANGCHAIN_TRACING_V2: bool = False\n LANGCHAIN_PROJECT: str = \"default\"\n LANGCHAIN_ENDPOINT: Annotated[str, BeforeValidator(check_str_is_http)] = (\n \"https://api.smith.langchain.com\"\n )\n LANGCHAIN_API_KEY: SecretStr | None = None\n\n LANGFUSE_TRACING: bool = False\n LANGFUSE_HOST: Annotated[str, BeforeValidator(check_str_is_http)] = \"https://cloud.langfuse.com\"\n LANGFUSE_PUBLIC_KEY: SecretStr | None = None\n LANGFUSE_SECRET_KEY: SecretStr | None = None\n\n # Database Configuration\n DATABASE_TYPE: DatabaseType = (\n DatabaseType.SQLITE\n ) # Options: DatabaseType.SQLITE or DatabaseType.POSTGRES\n SQLITE_DB_PATH: str = \"checkpoints.db\"\n\n # PostgreSQL Configuration\n POSTGRES_USER: str | None = None\n POSTGRES_PASSWORD: SecretStr | None = None\n POSTGRES_HOST: str | None = None\n POSTGRES_PORT: int | None = None\n POSTGRES_DB: str | None = None\n POSTGRES_APPLICATION_NAME: str = \"agent-service-toolkit\"\n POSTGRES_MIN_CONNECTIONS_PER_POOL: int = 1\n POSTGRES_MAX_CONNECTIONS_PER_POOL: int = 1\n\n # MongoDB Configuration\n MONGO_HOST: str | None = None\n MONGO_PORT: int | None = None\n MONGO_DB: str | None = None\n MONGO_USER: str | None = None\n MONGO_PASSWORD: SecretStr | None = None\n MONGO_AUTH_SOURCE: str | None = None\n\n # Azure OpenAI Settings\n AZURE_OPENAI_API_KEY: SecretStr | None = None\n AZURE_OPENAI_ENDPOINT: str | None = None\n AZURE_OPENAI_API_VERSION: str = \"2024-02-15-preview\"\n AZURE_OPENAI_DEPLOYMENT_MAP: dict[str, str] = Field(\n default_factory=dict, description=\"Map of model names to Azure deployment IDs\"\n )\n\n def model_post_init(self, __context: Any) -> None:\n api_keys = {\n Provider.OPENAI: self.OPENAI_API_KEY,\n Provider.OPENAI_COMPATIBLE: self.COMPATIBLE_BASE_URL and self.COMPATIBLE_MODEL,\n Provider.DEEPSEEK: self.DEEPSEEK_API_KEY,\n Provider.ANTHROPIC: self.ANTHROPIC_API_KEY,\n Provider.GOOGLE: self.GOOGLE_API_KEY,\n Provider.VERTEXAI: self.GOOGLE_APPLICATION_CREDENTIALS,\n Provider.GROQ: self.GROQ_API_KEY,\n Provider.AWS: self.USE_AWS_BEDROCK,\n Provider.OLLAMA: self.OLLAMA_MODEL,\n Provider.FAKE: self.USE_FAKE_MODEL,\n Provider.AZURE_OPENAI: self.AZURE_OPENAI_API_KEY,\n Provider.OPENROUTER: self.OPENROUTER_API_KEY,\n }\n active_keys = [k for k, v in api_keys.items() if v]\n if not active_keys:\n raise ValueError(\"At least one LLM API key must be provided.\")\n\n for provider in active_keys:\n match provider:\n case Provider.OPENAI:\n if self.DEFAULT_MODEL is None:\n self.DEFAULT_MODEL = OpenAIModelName.GPT_5_NANO\n self.AVAILABLE_MODELS.update(set(OpenAIModelName))\n case Provider.OPENAI_COMPATIBLE:\n if self.DEFAULT_MODEL is None:\n self.DEFAULT_MODEL = OpenAICompatibleName.OPENAI_COMPATIBLE\n self.AVAILABLE_MODELS.update(set(OpenAICompatibleName))\n case Provider.DEEPSEEK:\n if self.DEFAULT_MODEL is None:\n self.DEFAULT_MODEL = DeepseekModelName.DEEPSEEK_CHAT\n self.AVAILABLE_MODELS.update(set(DeepseekModelName))\n case Provider.ANTHROPIC:\n if self.DEFAULT_MODEL is None:\n self.DEFAULT_MODEL = AnthropicModelName.HAIKU_45\n self.AVAILABLE_MODELS.update(set(AnthropicModelName))\n case Provider.GOOGLE:\n if self.DEFAULT_MODEL is None:\n self.DEFAULT_MODEL = GoogleModelName.GEMINI_20_FLASH\n self.AVAILABLE_MODELS.update(set(GoogleModelName))\n case Provider.VERTEXAI:\n if self.DEFAULT_MODEL is None:\n self.DEFAULT_MODEL = VertexAIModelName.GEMINI_20_FLASH\n self.AVAILABLE_MODELS.update(set(VertexAIModelName))\n case Provider.GROQ:\n if self.DEFAULT_MODEL is None:\n self.DEFAULT_MODEL = GroqModelName.LLAMA_31_8B\n self.AVAILABLE_MODELS.update(set(GroqModelName))\n case Provider.AWS:\n if self.DEFAULT_MODEL is None:\n self.DEFAULT_MODEL = AWSModelName.BEDROCK_HAIKU\n self.AVAILABLE_MODELS.update(set(AWSModelName))\n case Provider.OLLAMA:\n if self.DEFAULT_MODEL is None:\n self.DEFAULT_MODEL = OllamaModelName.OLLAMA_GENERIC\n self.AVAILABLE_MODELS.update(set(OllamaModelName))\n case Provider.OPENROUTER:\n if self.DEFAULT_MODEL is None:\n self.DEFAULT_MODEL = OpenRouterModelName.GEMINI_25_FLASH\n self.AVAILABLE_MODELS.update(set(OpenRouterModelName))\n case Provider.FAKE:\n if self.DEFAULT_MODEL is None:\n self.DEFAULT_MODEL = FakeModelName.FAKE\n self.AVAILABLE_MODELS.update(set(FakeModelName))\n case Provider.AZURE_OPENAI:\n if self.DEFAULT_MODEL is None:\n self.DEFAULT_MODEL = AzureOpenAIModelName.AZURE_GPT_4O_MINI\n self.AVAILABLE_MODELS.update(set(AzureOpenAIModelName))\n # Validate Azure OpenAI settings if Azure provider is available\n if not self.AZURE_OPENAI_API_KEY:\n raise ValueError(\"AZURE_OPENAI_API_KEY must be set\")\n if not self.AZURE_OPENAI_ENDPOINT:\n raise ValueError(\"AZURE_OPENAI_ENDPOINT must be set\")\n if not self.AZURE_OPENAI_DEPLOYMENT_MAP:\n raise ValueError(\"AZURE_OPENAI_DEPLOYMENT_MAP must be set\")\n\n # Parse deployment map if it's a string\n if isinstance(self.AZURE_OPENAI_DEPLOYMENT_MAP, str):\n try:\n self.AZURE_OPENAI_DEPLOYMENT_MAP = loads(\n self.AZURE_OPENAI_DEPLOYMENT_MAP\n )\n except Exception as e:\n raise ValueError(f\"Invalid AZURE_OPENAI_DEPLOYMENT_MAP JSON: {e}\")\n\n # Validate required deployments exist\n required_models = {\"gpt-4o\", \"gpt-4o-mini\"}\n missing_models = required_models - set(self.AZURE_OPENAI_DEPLOYMENT_MAP.keys())\n if missing_models:\n raise ValueError(f\"Missing required Azure deployments: {missing_models}\")\n case _:\n raise ValueError(f\"Unknown provider: {provider}\")\n\n @computed_field # type: ignore[prop-decorator]\n @property\n def BASE_URL(self) -> str:\n return f\"http://{self.HOST}:{self.PORT}\"\n\n def is_dev(self) -> bool:\n return self.MODE == \"dev\"", "n_imports_parsed": 8, "n_files_resolved": 2, "n_chars_extracted": 8669}, "tests/service/test_service.py::315": {"resolved_imports": ["src/agents/agents.py", "src/schema/__init__.py", "src/schema/models.py"], "used_names": ["AIMessage", "AIMessageChunk", "json", "pytest"], "enclosing_function": "test_stream_no_tokens", "extracted_code": "", "n_imports_parsed": 10, "n_files_resolved": 3, "n_chars_extracted": 0}, "tests/service/test_utils.py::43": {"resolved_imports": ["src/service/utils.py"], "used_names": ["AIMessage", "ToolCall", "langchain_to_chat_message"], "enclosing_function": "test_messages_tool_calls", "extracted_code": "# Source: src/service/utils.py\ndef langchain_to_chat_message(message: BaseMessage) -> ChatMessage:\n \"\"\"Create a ChatMessage from a LangChain message.\"\"\"\n match message:\n case HumanMessage():\n human_message = ChatMessage(\n type=\"human\",\n content=convert_message_content_to_string(message.content),\n )\n return human_message\n case AIMessage():\n ai_message = ChatMessage(\n type=\"ai\",\n content=convert_message_content_to_string(message.content),\n )\n if message.tool_calls:\n ai_message.tool_calls = message.tool_calls\n if message.response_metadata:\n ai_message.response_metadata = message.response_metadata\n return ai_message\n case ToolMessage():\n tool_message = ChatMessage(\n type=\"tool\",\n content=convert_message_content_to_string(message.content),\n tool_call_id=message.tool_call_id,\n )\n return tool_message\n case LangchainChatMessage():\n if message.role == \"custom\":\n custom_message = ChatMessage(\n type=\"custom\",\n content=\"\",\n custom_data=message.content[0],\n )\n return custom_message\n else:\n raise ValueError(f\"Unsupported chat message role: {message.role}\")\n case _:\n raise ValueError(f\"Unsupported message type: {message.__class__.__name__}\")", "n_imports_parsed": 2, "n_files_resolved": 1, "n_chars_extracted": 1591}, "tests/integration/test_docker_e2e.py::34": {"resolved_imports": ["src/client/__init__.py"], "used_names": ["AppTest", "pytest"], "enclosing_function": "test_service_with_app", "extracted_code": "", "n_imports_parsed": 3, "n_files_resolved": 1, "n_chars_extracted": 0}, "tests/voice/test_stt.py::29": {"resolved_imports": ["src/voice/stt.py"], "used_names": ["SpeechToText", "pytest"], "enclosing_function": "test_init_with_unimplemented_provider", "extracted_code": "# Source: src/voice/stt.py\nclass SpeechToText:\n \"\"\"Speech-to-text factory.\n\n Loads and delegates to specific STT provider implementations.\n\n Example:\n >>> stt = SpeechToText(provider=\"openai\")\n >>> text = stt.transcribe(audio_file)\n >>>\n >>> # Or from environment\n >>> stt = SpeechToText.from_env()\n >>> if stt:\n ... text = stt.transcribe(audio_file)\n \"\"\"\n\n def __init__(self, provider: Provider = \"openai\", api_key: str | None = None, **config):\n \"\"\"Initialize STT with specified provider.\n\n Args:\n provider: Provider name (\"openai\", \"deepgram\", etc.)\n api_key: API key (uses env var if not provided)\n **config: Provider-specific configuration\n\n Raises:\n ValueError: If provider is unknown\n \"\"\"\n self._provider_name = provider\n\n # Resolve API key from parameter or environment\n resolved_api_key = self._get_api_key(provider, api_key)\n\n # Load and configure the provider\n self._provider = self._load_provider(provider, resolved_api_key, config)\n\n logger.info(f\"SpeechToText created with provider={provider}\")\n\n def _get_api_key(self, provider: Provider, api_key: str | None) -> str | None:\n \"\"\"Get API key from parameter or environment.\n\n Args:\n provider: Provider name\n api_key: API key from parameter (takes precedence)\n\n Returns:\n Resolved API key or None\n \"\"\"\n # If API key provided explicitly, use it\n if api_key:\n return api_key\n\n # Otherwise, get from environment based on provider\n match provider:\n case \"openai\":\n return os.getenv(\"OPENAI_API_KEY\")\n case \"deepgram\":\n return os.getenv(\"DEEPGRAM_API_KEY\")\n case _:\n return None\n\n def _load_provider(self, provider: Provider, api_key: str | None, config: dict):\n \"\"\"Load the appropriate STT provider implementation.\n\n Args:\n provider: Provider name\n api_key: Resolved API key\n config: Provider-specific configuration\n\n Returns:\n Provider instance\n\n Raises:\n ValueError: If provider is unknown\n NotImplementedError: If provider not yet implemented\n \"\"\"\n match provider:\n case \"openai\":\n from voice.providers.openai_stt import OpenAISTT\n\n return OpenAISTT(api_key=api_key, **config)\n\n case \"deepgram\":\n # Example for future extensions: to add Deepgram support, implement DeepgramSTT provider and uncomment:\n # from voice.providers.deepgram_stt import DeepgramSTT\n # return DeepgramSTT(api_key=api_key, **config)\n raise NotImplementedError(\"Deepgram STT provider not yet implemented\")\n\n case _:\n # Catch-all for unknown providers\n raise ValueError(f\"Unknown STT provider: {provider}. Available providers: openai\")\n\n @property\n def provider(self) -> str:\n \"\"\"Get the provider name.\n\n Returns:\n Provider name string\n \"\"\"\n return self._provider_name\n\n @classmethod\n def from_env(cls) -> \"SpeechToText | None\":\n \"\"\"Create STT from environment variables.\n\n Reads VOICE_STT_PROVIDER env var to determine which provider to use.\n Returns None if not configured.\n\n Returns:\n SpeechToText instance or None\n\n Example:\n >>> # In .env: VOICE_STT_PROVIDER=openai\n >>> stt = SpeechToText.from_env()\n >>> if stt:\n ... text = stt.transcribe(audio_file)\n \"\"\"\n provider = os.getenv(\"VOICE_STT_PROVIDER\")\n\n # If provider not set, voice features are disabled\n if not provider:\n logger.debug(\"VOICE_STT_PROVIDER not set, STT disabled\")\n return None\n\n try:\n # Create instance with provider from environment\n # Validates provider and raises ValueError if invalid\n return cls(provider=cast(Provider, provider))\n except Exception as e:\n # Log error but don't crash - allow app to continue without voice\n logger.error(f\"Failed to create STT provider: {e}\", exc_info=True)\n return None\n\n def transcribe(self, audio_file: BinaryIO) -> str:\n \"\"\"Transcribe audio to text.\n\n Delegates to the underlying provider implementation.\n\n Args:\n audio_file: Binary audio file\n\n Returns:\n Transcribed text (empty string on failure)\n \"\"\"\n return self._provider.transcribe(audio_file)", "n_imports_parsed": 4, "n_files_resolved": 1, "n_chars_extracted": 4763}, "tests/client/test_client.py::253": {"resolved_imports": ["src/client/__init__.py", "src/schema/__init__.py", "src/schema/models.py"], "used_names": ["AgentClientError", "Request", "Response", "patch", "pytest"], "enclosing_function": "test_acreate_feedback", "extracted_code": "# Source: src/client/__init__.py\nfrom client.client import AgentClient, AgentClientError\n\n__all__ = [\"AgentClient\", \"AgentClientError\"]\n\nfrom client.client import AgentClient, AgentClientError\n\n__all__ = [\"AgentClient\", \"AgentClientError\"]", "n_imports_parsed": 8, "n_files_resolved": 3, "n_chars_extracted": 239}, "tests/core/test_settings.py::34": {"resolved_imports": ["src/core/settings.py", "src/schema/models.py"], "used_names": ["Settings"], "enclosing_function": "test_settings_default_values", "extracted_code": "# Source: src/core/settings.py\nclass Settings(BaseSettings):\n model_config = SettingsConfigDict(\n env_file=find_dotenv(),\n env_file_encoding=\"utf-8\",\n env_ignore_empty=True,\n extra=\"ignore\",\n validate_default=False,\n )\n MODE: str | None = None\n\n HOST: str = \"0.0.0.0\"\n PORT: int = 8080\n GRACEFUL_SHUTDOWN_TIMEOUT: int = 30\n LOG_LEVEL: LogLevel = LogLevel.WARNING\n\n AUTH_SECRET: SecretStr | None = None\n\n OPENAI_API_KEY: SecretStr | None = None\n DEEPSEEK_API_KEY: SecretStr | None = None\n ANTHROPIC_API_KEY: SecretStr | None = None\n GOOGLE_API_KEY: SecretStr | None = None\n GOOGLE_APPLICATION_CREDENTIALS: SecretStr | None = None\n GROQ_API_KEY: SecretStr | None = None\n USE_AWS_BEDROCK: bool = False\n OLLAMA_MODEL: str | None = None\n OLLAMA_BASE_URL: str | None = None\n USE_FAKE_MODEL: bool = False\n OPENROUTER_API_KEY: str | None = None\n\n # If DEFAULT_MODEL is None, it will be set in model_post_init\n DEFAULT_MODEL: AllModelEnum | None = None # type: ignore[assignment]\n AVAILABLE_MODELS: set[AllModelEnum] = set() # type: ignore[assignment]\n\n # Set openai compatible api, mainly used for proof of concept\n COMPATIBLE_MODEL: str | None = None\n COMPATIBLE_API_KEY: SecretStr | None = None\n COMPATIBLE_BASE_URL: str | None = None\n\n OPENWEATHERMAP_API_KEY: SecretStr | None = None\n\n # MCP Configuration\n GITHUB_PAT: SecretStr | None = None\n MCP_GITHUB_SERVER_URL: str = \"https://api.githubcopilot.com/mcp/\"\n\n LANGCHAIN_TRACING_V2: bool = False\n LANGCHAIN_PROJECT: str = \"default\"\n LANGCHAIN_ENDPOINT: Annotated[str, BeforeValidator(check_str_is_http)] = (\n \"https://api.smith.langchain.com\"\n )\n LANGCHAIN_API_KEY: SecretStr | None = None\n\n LANGFUSE_TRACING: bool = False\n LANGFUSE_HOST: Annotated[str, BeforeValidator(check_str_is_http)] = \"https://cloud.langfuse.com\"\n LANGFUSE_PUBLIC_KEY: SecretStr | None = None\n LANGFUSE_SECRET_KEY: SecretStr | None = None\n\n # Database Configuration\n DATABASE_TYPE: DatabaseType = (\n DatabaseType.SQLITE\n ) # Options: DatabaseType.SQLITE or DatabaseType.POSTGRES\n SQLITE_DB_PATH: str = \"checkpoints.db\"\n\n # PostgreSQL Configuration\n POSTGRES_USER: str | None = None\n POSTGRES_PASSWORD: SecretStr | None = None\n POSTGRES_HOST: str | None = None\n POSTGRES_PORT: int | None = None\n POSTGRES_DB: str | None = None\n POSTGRES_APPLICATION_NAME: str = \"agent-service-toolkit\"\n POSTGRES_MIN_CONNECTIONS_PER_POOL: int = 1\n POSTGRES_MAX_CONNECTIONS_PER_POOL: int = 1\n\n # MongoDB Configuration\n MONGO_HOST: str | None = None\n MONGO_PORT: int | None = None\n MONGO_DB: str | None = None\n MONGO_USER: str | None = None\n MONGO_PASSWORD: SecretStr | None = None\n MONGO_AUTH_SOURCE: str | None = None\n\n # Azure OpenAI Settings\n AZURE_OPENAI_API_KEY: SecretStr | None = None\n AZURE_OPENAI_ENDPOINT: str | None = None\n AZURE_OPENAI_API_VERSION: str = \"2024-02-15-preview\"\n AZURE_OPENAI_DEPLOYMENT_MAP: dict[str, str] = Field(\n default_factory=dict, description=\"Map of model names to Azure deployment IDs\"\n )\n\n def model_post_init(self, __context: Any) -> None:\n api_keys = {\n Provider.OPENAI: self.OPENAI_API_KEY,\n Provider.OPENAI_COMPATIBLE: self.COMPATIBLE_BASE_URL and self.COMPATIBLE_MODEL,\n Provider.DEEPSEEK: self.DEEPSEEK_API_KEY,\n Provider.ANTHROPIC: self.ANTHROPIC_API_KEY,\n Provider.GOOGLE: self.GOOGLE_API_KEY,\n Provider.VERTEXAI: self.GOOGLE_APPLICATION_CREDENTIALS,\n Provider.GROQ: self.GROQ_API_KEY,\n Provider.AWS: self.USE_AWS_BEDROCK,\n Provider.OLLAMA: self.OLLAMA_MODEL,\n Provider.FAKE: self.USE_FAKE_MODEL,\n Provider.AZURE_OPENAI: self.AZURE_OPENAI_API_KEY,\n Provider.OPENROUTER: self.OPENROUTER_API_KEY,\n }\n active_keys = [k for k, v in api_keys.items() if v]\n if not active_keys:\n raise ValueError(\"At least one LLM API key must be provided.\")\n\n for provider in active_keys:\n match provider:\n case Provider.OPENAI:\n if self.DEFAULT_MODEL is None:\n self.DEFAULT_MODEL = OpenAIModelName.GPT_5_NANO\n self.AVAILABLE_MODELS.update(set(OpenAIModelName))\n case Provider.OPENAI_COMPATIBLE:\n if self.DEFAULT_MODEL is None:\n self.DEFAULT_MODEL = OpenAICompatibleName.OPENAI_COMPATIBLE\n self.AVAILABLE_MODELS.update(set(OpenAICompatibleName))\n case Provider.DEEPSEEK:\n if self.DEFAULT_MODEL is None:\n self.DEFAULT_MODEL = DeepseekModelName.DEEPSEEK_CHAT\n self.AVAILABLE_MODELS.update(set(DeepseekModelName))\n case Provider.ANTHROPIC:\n if self.DEFAULT_MODEL is None:\n self.DEFAULT_MODEL = AnthropicModelName.HAIKU_45\n self.AVAILABLE_MODELS.update(set(AnthropicModelName))\n case Provider.GOOGLE:\n if self.DEFAULT_MODEL is None:\n self.DEFAULT_MODEL = GoogleModelName.GEMINI_20_FLASH\n self.AVAILABLE_MODELS.update(set(GoogleModelName))\n case Provider.VERTEXAI:\n if self.DEFAULT_MODEL is None:\n self.DEFAULT_MODEL = VertexAIModelName.GEMINI_20_FLASH\n self.AVAILABLE_MODELS.update(set(VertexAIModelName))\n case Provider.GROQ:\n if self.DEFAULT_MODEL is None:\n self.DEFAULT_MODEL = GroqModelName.LLAMA_31_8B\n self.AVAILABLE_MODELS.update(set(GroqModelName))\n case Provider.AWS:\n if self.DEFAULT_MODEL is None:\n self.DEFAULT_MODEL = AWSModelName.BEDROCK_HAIKU\n self.AVAILABLE_MODELS.update(set(AWSModelName))\n case Provider.OLLAMA:\n if self.DEFAULT_MODEL is None:\n self.DEFAULT_MODEL = OllamaModelName.OLLAMA_GENERIC\n self.AVAILABLE_MODELS.update(set(OllamaModelName))\n case Provider.OPENROUTER:\n if self.DEFAULT_MODEL is None:\n self.DEFAULT_MODEL = OpenRouterModelName.GEMINI_25_FLASH\n self.AVAILABLE_MODELS.update(set(OpenRouterModelName))\n case Provider.FAKE:\n if self.DEFAULT_MODEL is None:\n self.DEFAULT_MODEL = FakeModelName.FAKE\n self.AVAILABLE_MODELS.update(set(FakeModelName))\n case Provider.AZURE_OPENAI:\n if self.DEFAULT_MODEL is None:\n self.DEFAULT_MODEL = AzureOpenAIModelName.AZURE_GPT_4O_MINI\n self.AVAILABLE_MODELS.update(set(AzureOpenAIModelName))\n # Validate Azure OpenAI settings if Azure provider is available\n if not self.AZURE_OPENAI_API_KEY:\n raise ValueError(\"AZURE_OPENAI_API_KEY must be set\")\n if not self.AZURE_OPENAI_ENDPOINT:\n raise ValueError(\"AZURE_OPENAI_ENDPOINT must be set\")\n if not self.AZURE_OPENAI_DEPLOYMENT_MAP:\n raise ValueError(\"AZURE_OPENAI_DEPLOYMENT_MAP must be set\")\n\n # Parse deployment map if it's a string\n if isinstance(self.AZURE_OPENAI_DEPLOYMENT_MAP, str):\n try:\n self.AZURE_OPENAI_DEPLOYMENT_MAP = loads(\n self.AZURE_OPENAI_DEPLOYMENT_MAP\n )\n except Exception as e:\n raise ValueError(f\"Invalid AZURE_OPENAI_DEPLOYMENT_MAP JSON: {e}\")\n\n # Validate required deployments exist\n required_models = {\"gpt-4o\", \"gpt-4o-mini\"}\n missing_models = required_models - set(self.AZURE_OPENAI_DEPLOYMENT_MAP.keys())\n if missing_models:\n raise ValueError(f\"Missing required Azure deployments: {missing_models}\")\n case _:\n raise ValueError(f\"Unknown provider: {provider}\")\n\n @computed_field # type: ignore[prop-decorator]\n @property\n def BASE_URL(self) -> str:\n return f\"http://{self.HOST}:{self.PORT}\"\n\n def is_dev(self) -> bool:\n return self.MODE == \"dev\"", "n_imports_parsed": 8, "n_files_resolved": 2, "n_chars_extracted": 8669}, "tests/service/test_service_lifespan.py::72": {"resolved_imports": ["src/schema/__init__.py", "src/service/__init__.py", "src/service/service.py"], "used_names": ["AgentInfo", "FastAPI", "asynccontextmanager", "logging", "pytest", "service"], "enclosing_function": "test_lifespan", "extracted_code": "# Source: src/schema/__init__.py\nfrom schema.models import AllModelEnum\nfrom schema.schema import (\n AgentInfo,\n ChatHistory,\n ChatHistoryInput,\n ChatMessage,\n Feedback,\n FeedbackResponse,\n ServiceMetadata,\n StreamInput,\n UserInput,\n)\n\n\n__all__ = [\n \"AgentInfo\",\n \"AllModelEnum\",\n \"UserInput\",\n \"ChatMessage\",\n \"ServiceMetadata\",\n \"StreamInput\",\n \"Feedback\",\n \"FeedbackResponse\",\n \"ChatHistoryInput\",\n \"ChatHistory\",\n\n\n# Source: src/service/__init__.py\nfrom service.service import app\n\n__all__ = [\"app\"]", "n_imports_parsed": 6, "n_files_resolved": 3, "n_chars_extracted": 560}, "tests/core/test_llm.py::33": {"resolved_imports": ["src/core/llm.py", "src/schema/models.py"], "used_names": ["AnthropicModelName", "ChatAnthropic", "get_model", "os", "patch"], "enclosing_function": "test_get_model_anthropic", "extracted_code": "# Source: src/core/llm.py\ndef get_model(model_name: AllModelEnum, /) -> ModelT:\n # NOTE: models with streaming=True will send tokens as they are generated\n # if the /stream endpoint is called with stream_tokens=True (the default)\n api_model_name = _MODEL_TABLE.get(model_name)\n if not api_model_name:\n raise ValueError(f\"Unsupported model: {model_name}\")\n\n if model_name in OpenAIModelName:\n return ChatOpenAI(model=api_model_name, streaming=True)\n if model_name in OpenAICompatibleName:\n if not settings.COMPATIBLE_BASE_URL or not settings.COMPATIBLE_MODEL:\n raise ValueError(\"OpenAICompatible base url and endpoint must be configured\")\n\n return ChatOpenAI(\n model=settings.COMPATIBLE_MODEL,\n temperature=0.5,\n streaming=True,\n openai_api_base=settings.COMPATIBLE_BASE_URL,\n openai_api_key=settings.COMPATIBLE_API_KEY,\n )\n if model_name in AzureOpenAIModelName:\n if not settings.AZURE_OPENAI_API_KEY or not settings.AZURE_OPENAI_ENDPOINT:\n raise ValueError(\"Azure OpenAI API key and endpoint must be configured\")\n\n return AzureChatOpenAI(\n azure_endpoint=settings.AZURE_OPENAI_ENDPOINT,\n deployment_name=api_model_name,\n api_version=settings.AZURE_OPENAI_API_VERSION,\n temperature=0.5,\n streaming=True,\n timeout=60,\n max_retries=3,\n )\n if model_name in DeepseekModelName:\n return ChatOpenAI(\n model=api_model_name,\n temperature=0.5,\n streaming=True,\n openai_api_base=\"https://api.deepseek.com\",\n openai_api_key=settings.DEEPSEEK_API_KEY,\n )\n if model_name in AnthropicModelName:\n return ChatAnthropic(model=api_model_name, temperature=0.5, streaming=True)\n if model_name in GoogleModelName:\n return ChatGoogleGenerativeAI(model=api_model_name, temperature=0.5, streaming=True)\n if model_name in VertexAIModelName:\n return ChatVertexAI(model=api_model_name, temperature=0.5, streaming=True)\n if model_name in GroqModelName:\n if model_name == GroqModelName.LLAMA_GUARD_4_12B:\n return ChatGroq(model=api_model_name, temperature=0.0) # type: ignore[call-arg]\n return ChatGroq(model=api_model_name, temperature=0.5) # type: ignore[call-arg]\n if model_name in AWSModelName:\n return ChatBedrock(model_id=api_model_name, temperature=0.5)\n if model_name in OllamaModelName:\n if settings.OLLAMA_BASE_URL:\n chat_ollama = ChatOllama(\n model=settings.OLLAMA_MODEL, temperature=0.5, base_url=settings.OLLAMA_BASE_URL\n )\n else:\n chat_ollama = ChatOllama(model=settings.OLLAMA_MODEL, temperature=0.5)\n return chat_ollama\n if model_name in OpenRouterModelName:\n return ChatOpenAI(\n model=api_model_name,\n temperature=0.5,\n streaming=True,\n base_url=\"https://openrouter.ai/api/v1/\",\n api_key=settings.OPENROUTER_API_KEY,\n )\n if model_name in FakeModelName:\n return FakeToolModel(responses=[\"This is a test response from the fake model.\"])\n\n raise ValueError(f\"Unsupported model: {model_name}\")\n\n\n# Source: src/schema/models.py\nclass AnthropicModelName(StrEnum):\n \"\"\"https://docs.anthropic.com/en/docs/about-claude/models#model-names\"\"\"\n\n HAIKU_45 = \"claude-haiku-4-5\"\n SONNET_45 = \"claude-sonnet-4-5\"", "n_imports_parsed": 10, "n_files_resolved": 2, "n_chars_extracted": 3522}, "tests/core/test_settings.py::211": {"resolved_imports": ["src/core/settings.py", "src/schema/models.py"], "used_names": ["LogLevel", "logging"], "enclosing_function": "test_log_level_enum", "extracted_code": "# Source: src/core/settings.py\nclass LogLevel(StrEnum):\n DEBUG = \"DEBUG\"\n INFO = \"INFO\"\n WARNING = \"WARNING\"\n ERROR = \"ERROR\"\n CRITICAL = \"CRITICAL\"\n\n def to_logging_level(self) -> int:\n \"\"\"Convert to Python logging level constant.\"\"\"\n import logging\n\n mapping = {\n LogLevel.DEBUG: logging.DEBUG,\n LogLevel.INFO: logging.INFO,\n LogLevel.WARNING: logging.WARNING,\n LogLevel.ERROR: logging.ERROR,\n LogLevel.CRITICAL: logging.CRITICAL,\n }\n return mapping[self]", "n_imports_parsed": 8, "n_files_resolved": 2, "n_chars_extracted": 565}, "tests/client/test_client.py::59": {"resolved_imports": ["src/client/__init__.py", "src/schema/__init__.py", "src/schema/models.py"], "used_names": ["AgentClientError", "ChatMessage", "Request", "Response", "patch", "pytest"], "enclosing_function": "test_invoke", "extracted_code": "# Source: src/client/__init__.py\nfrom client.client import AgentClient, AgentClientError\n\n__all__ = [\"AgentClient\", \"AgentClientError\"]\n\nfrom client.client import AgentClient, AgentClientError\n\n__all__ = [\"AgentClient\", \"AgentClientError\"]\n\n\n# Source: src/schema/__init__.py\n ChatHistory,\n ChatHistoryInput,\n ChatMessage,\n Feedback,\n FeedbackResponse,\n ServiceMetadata,\n StreamInput,\n UserInput,\n)\n\n__all__ = [\n \"AgentInfo\",\n\n \"AllModelEnum\",\n \"UserInput\",\n \"ChatMessage\",\n \"ServiceMetadata\",\n \"StreamInput\",\n \"Feedback\",\n \"FeedbackResponse\",\n \"ChatHistoryInput\",\n \"ChatHistory\",\n]", "n_imports_parsed": 8, "n_files_resolved": 3, "n_chars_extracted": 635}, "tests/core/test_llm.py::65": {"resolved_imports": ["src/core/llm.py", "src/schema/models.py"], "used_names": ["FakeListChatModel", "FakeModelName", "get_model"], "enclosing_function": "test_get_model_fake", "extracted_code": "# Source: src/core/llm.py\ndef get_model(model_name: AllModelEnum, /) -> ModelT:\n # NOTE: models with streaming=True will send tokens as they are generated\n # if the /stream endpoint is called with stream_tokens=True (the default)\n api_model_name = _MODEL_TABLE.get(model_name)\n if not api_model_name:\n raise ValueError(f\"Unsupported model: {model_name}\")\n\n if model_name in OpenAIModelName:\n return ChatOpenAI(model=api_model_name, streaming=True)\n if model_name in OpenAICompatibleName:\n if not settings.COMPATIBLE_BASE_URL or not settings.COMPATIBLE_MODEL:\n raise ValueError(\"OpenAICompatible base url and endpoint must be configured\")\n\n return ChatOpenAI(\n model=settings.COMPATIBLE_MODEL,\n temperature=0.5,\n streaming=True,\n openai_api_base=settings.COMPATIBLE_BASE_URL,\n openai_api_key=settings.COMPATIBLE_API_KEY,\n )\n if model_name in AzureOpenAIModelName:\n if not settings.AZURE_OPENAI_API_KEY or not settings.AZURE_OPENAI_ENDPOINT:\n raise ValueError(\"Azure OpenAI API key and endpoint must be configured\")\n\n return AzureChatOpenAI(\n azure_endpoint=settings.AZURE_OPENAI_ENDPOINT,\n deployment_name=api_model_name,\n api_version=settings.AZURE_OPENAI_API_VERSION,\n temperature=0.5,\n streaming=True,\n timeout=60,\n max_retries=3,\n )\n if model_name in DeepseekModelName:\n return ChatOpenAI(\n model=api_model_name,\n temperature=0.5,\n streaming=True,\n openai_api_base=\"https://api.deepseek.com\",\n openai_api_key=settings.DEEPSEEK_API_KEY,\n )\n if model_name in AnthropicModelName:\n return ChatAnthropic(model=api_model_name, temperature=0.5, streaming=True)\n if model_name in GoogleModelName:\n return ChatGoogleGenerativeAI(model=api_model_name, temperature=0.5, streaming=True)\n if model_name in VertexAIModelName:\n return ChatVertexAI(model=api_model_name, temperature=0.5, streaming=True)\n if model_name in GroqModelName:\n if model_name == GroqModelName.LLAMA_GUARD_4_12B:\n return ChatGroq(model=api_model_name, temperature=0.0) # type: ignore[call-arg]\n return ChatGroq(model=api_model_name, temperature=0.5) # type: ignore[call-arg]\n if model_name in AWSModelName:\n return ChatBedrock(model_id=api_model_name, temperature=0.5)\n if model_name in OllamaModelName:\n if settings.OLLAMA_BASE_URL:\n chat_ollama = ChatOllama(\n model=settings.OLLAMA_MODEL, temperature=0.5, base_url=settings.OLLAMA_BASE_URL\n )\n else:\n chat_ollama = ChatOllama(model=settings.OLLAMA_MODEL, temperature=0.5)\n return chat_ollama\n if model_name in OpenRouterModelName:\n return ChatOpenAI(\n model=api_model_name,\n temperature=0.5,\n streaming=True,\n base_url=\"https://openrouter.ai/api/v1/\",\n api_key=settings.OPENROUTER_API_KEY,\n )\n if model_name in FakeModelName:\n return FakeToolModel(responses=[\"This is a test response from the fake model.\"])\n\n raise ValueError(f\"Unsupported model: {model_name}\")\n\n\n# Source: src/schema/models.py\nclass FakeModelName(StrEnum):\n \"\"\"Fake model for testing.\"\"\"\n\n FAKE = \"fake\"", "n_imports_parsed": 10, "n_files_resolved": 2, "n_chars_extracted": 3422}, "tests/app/test_streamlit_app.py::142": {"resolved_imports": ["src/client/__init__.py", "src/schema/__init__.py", "src/schema/models.py"], "used_names": ["AppTest", "AsyncGenerator", "ChatMessage", "Mock", "pytest"], "enclosing_function": "test_app_streaming", "extracted_code": "# Source: src/schema/__init__.py\n ChatHistory,\n ChatHistoryInput,\n ChatMessage,\n Feedback,\n FeedbackResponse,\n ServiceMetadata,\n StreamInput,\n UserInput,\n)\n\n__all__ = [\n \"AgentInfo\",\n\n \"AllModelEnum\",\n \"UserInput\",\n \"ChatMessage\",\n \"ServiceMetadata\",\n \"StreamInput\",\n \"Feedback\",\n \"FeedbackResponse\",\n \"ChatHistoryInput\",\n \"ChatHistory\",\n]", "n_imports_parsed": 7, "n_files_resolved": 3, "n_chars_extracted": 393}, "tests/core/test_settings.py::227": {"resolved_imports": ["src/core/settings.py", "src/schema/models.py"], "used_names": ["LogLevel", "Settings", "logging", "os", "patch"], "enclosing_function": "test_settings_log_level_from_env", "extracted_code": "# Source: src/core/settings.py\nclass LogLevel(StrEnum):\n DEBUG = \"DEBUG\"\n INFO = \"INFO\"\n WARNING = \"WARNING\"\n ERROR = \"ERROR\"\n CRITICAL = \"CRITICAL\"\n\n def to_logging_level(self) -> int:\n \"\"\"Convert to Python logging level constant.\"\"\"\n import logging\n\n mapping = {\n LogLevel.DEBUG: logging.DEBUG,\n LogLevel.INFO: logging.INFO,\n LogLevel.WARNING: logging.WARNING,\n LogLevel.ERROR: logging.ERROR,\n LogLevel.CRITICAL: logging.CRITICAL,\n }\n return mapping[self]\n\nclass Settings(BaseSettings):\n model_config = SettingsConfigDict(\n env_file=find_dotenv(),\n env_file_encoding=\"utf-8\",\n env_ignore_empty=True,\n extra=\"ignore\",\n validate_default=False,\n )\n MODE: str | None = None\n\n HOST: str = \"0.0.0.0\"\n PORT: int = 8080\n GRACEFUL_SHUTDOWN_TIMEOUT: int = 30\n LOG_LEVEL: LogLevel = LogLevel.WARNING\n\n AUTH_SECRET: SecretStr | None = None\n\n OPENAI_API_KEY: SecretStr | None = None\n DEEPSEEK_API_KEY: SecretStr | None = None\n ANTHROPIC_API_KEY: SecretStr | None = None\n GOOGLE_API_KEY: SecretStr | None = None\n GOOGLE_APPLICATION_CREDENTIALS: SecretStr | None = None\n GROQ_API_KEY: SecretStr | None = None\n USE_AWS_BEDROCK: bool = False\n OLLAMA_MODEL: str | None = None\n OLLAMA_BASE_URL: str | None = None\n USE_FAKE_MODEL: bool = False\n OPENROUTER_API_KEY: str | None = None\n\n # If DEFAULT_MODEL is None, it will be set in model_post_init\n DEFAULT_MODEL: AllModelEnum | None = None # type: ignore[assignment]\n AVAILABLE_MODELS: set[AllModelEnum] = set() # type: ignore[assignment]\n\n # Set openai compatible api, mainly used for proof of concept\n COMPATIBLE_MODEL: str | None = None\n COMPATIBLE_API_KEY: SecretStr | None = None\n COMPATIBLE_BASE_URL: str | None = None\n\n OPENWEATHERMAP_API_KEY: SecretStr | None = None\n\n # MCP Configuration\n GITHUB_PAT: SecretStr | None = None\n MCP_GITHUB_SERVER_URL: str = \"https://api.githubcopilot.com/mcp/\"\n\n LANGCHAIN_TRACING_V2: bool = False\n LANGCHAIN_PROJECT: str = \"default\"\n LANGCHAIN_ENDPOINT: Annotated[str, BeforeValidator(check_str_is_http)] = (\n \"https://api.smith.langchain.com\"\n )\n LANGCHAIN_API_KEY: SecretStr | None = None\n\n LANGFUSE_TRACING: bool = False\n LANGFUSE_HOST: Annotated[str, BeforeValidator(check_str_is_http)] = \"https://cloud.langfuse.com\"\n LANGFUSE_PUBLIC_KEY: SecretStr | None = None\n LANGFUSE_SECRET_KEY: SecretStr | None = None\n\n # Database Configuration\n DATABASE_TYPE: DatabaseType = (\n DatabaseType.SQLITE\n ) # Options: DatabaseType.SQLITE or DatabaseType.POSTGRES\n SQLITE_DB_PATH: str = \"checkpoints.db\"\n\n # PostgreSQL Configuration\n POSTGRES_USER: str | None = None\n POSTGRES_PASSWORD: SecretStr | None = None\n POSTGRES_HOST: str | None = None\n POSTGRES_PORT: int | None = None\n POSTGRES_DB: str | None = None\n POSTGRES_APPLICATION_NAME: str = \"agent-service-toolkit\"\n POSTGRES_MIN_CONNECTIONS_PER_POOL: int = 1\n POSTGRES_MAX_CONNECTIONS_PER_POOL: int = 1\n\n # MongoDB Configuration\n MONGO_HOST: str | None = None\n MONGO_PORT: int | None = None\n MONGO_DB: str | None = None\n MONGO_USER: str | None = None\n MONGO_PASSWORD: SecretStr | None = None\n MONGO_AUTH_SOURCE: str | None = None\n\n # Azure OpenAI Settings\n AZURE_OPENAI_API_KEY: SecretStr | None = None\n AZURE_OPENAI_ENDPOINT: str | None = None\n AZURE_OPENAI_API_VERSION: str = \"2024-02-15-preview\"\n AZURE_OPENAI_DEPLOYMENT_MAP: dict[str, str] = Field(\n default_factory=dict, description=\"Map of model names to Azure deployment IDs\"\n )\n\n def model_post_init(self, __context: Any) -> None:\n api_keys = {\n Provider.OPENAI: self.OPENAI_API_KEY,\n Provider.OPENAI_COMPATIBLE: self.COMPATIBLE_BASE_URL and self.COMPATIBLE_MODEL,\n Provider.DEEPSEEK: self.DEEPSEEK_API_KEY,\n Provider.ANTHROPIC: self.ANTHROPIC_API_KEY,\n Provider.GOOGLE: self.GOOGLE_API_KEY,\n Provider.VERTEXAI: self.GOOGLE_APPLICATION_CREDENTIALS,\n Provider.GROQ: self.GROQ_API_KEY,\n Provider.AWS: self.USE_AWS_BEDROCK,\n Provider.OLLAMA: self.OLLAMA_MODEL,\n Provider.FAKE: self.USE_FAKE_MODEL,\n Provider.AZURE_OPENAI: self.AZURE_OPENAI_API_KEY,\n Provider.OPENROUTER: self.OPENROUTER_API_KEY,\n }\n active_keys = [k for k, v in api_keys.items() if v]\n if not active_keys:\n raise ValueError(\"At least one LLM API key must be provided.\")\n\n for provider in active_keys:\n match provider:\n case Provider.OPENAI:\n if self.DEFAULT_MODEL is None:\n self.DEFAULT_MODEL = OpenAIModelName.GPT_5_NANO\n self.AVAILABLE_MODELS.update(set(OpenAIModelName))\n case Provider.OPENAI_COMPATIBLE:\n if self.DEFAULT_MODEL is None:\n self.DEFAULT_MODEL = OpenAICompatibleName.OPENAI_COMPATIBLE\n self.AVAILABLE_MODELS.update(set(OpenAICompatibleName))\n case Provider.DEEPSEEK:\n if self.DEFAULT_MODEL is None:\n self.DEFAULT_MODEL = DeepseekModelName.DEEPSEEK_CHAT\n self.AVAILABLE_MODELS.update(set(DeepseekModelName))\n case Provider.ANTHROPIC:\n if self.DEFAULT_MODEL is None:\n self.DEFAULT_MODEL = AnthropicModelName.HAIKU_45\n self.AVAILABLE_MODELS.update(set(AnthropicModelName))\n case Provider.GOOGLE:\n if self.DEFAULT_MODEL is None:\n self.DEFAULT_MODEL = GoogleModelName.GEMINI_20_FLASH\n self.AVAILABLE_MODELS.update(set(GoogleModelName))\n case Provider.VERTEXAI:\n if self.DEFAULT_MODEL is None:\n self.DEFAULT_MODEL = VertexAIModelName.GEMINI_20_FLASH\n self.AVAILABLE_MODELS.update(set(VertexAIModelName))\n case Provider.GROQ:\n if self.DEFAULT_MODEL is None:\n self.DEFAULT_MODEL = GroqModelName.LLAMA_31_8B\n self.AVAILABLE_MODELS.update(set(GroqModelName))\n case Provider.AWS:\n if self.DEFAULT_MODEL is None:\n self.DEFAULT_MODEL = AWSModelName.BEDROCK_HAIKU\n self.AVAILABLE_MODELS.update(set(AWSModelName))\n case Provider.OLLAMA:\n if self.DEFAULT_MODEL is None:\n self.DEFAULT_MODEL = OllamaModelName.OLLAMA_GENERIC\n self.AVAILABLE_MODELS.update(set(OllamaModelName))\n case Provider.OPENROUTER:\n if self.DEFAULT_MODEL is None:\n self.DEFAULT_MODEL = OpenRouterModelName.GEMINI_25_FLASH\n self.AVAILABLE_MODELS.update(set(OpenRouterModelName))\n case Provider.FAKE:\n if self.DEFAULT_MODEL is None:\n self.DEFAULT_MODEL = FakeModelName.FAKE\n self.AVAILABLE_MODELS.update(set(FakeModelName))\n case Provider.AZURE_OPENAI:\n if self.DEFAULT_MODEL is None:\n self.DEFAULT_MODEL = AzureOpenAIModelName.AZURE_GPT_4O_MINI\n self.AVAILABLE_MODELS.update(set(AzureOpenAIModelName))\n # Validate Azure OpenAI settings if Azure provider is available\n if not self.AZURE_OPENAI_API_KEY:\n raise ValueError(\"AZURE_OPENAI_API_KEY must be set\")\n if not self.AZURE_OPENAI_ENDPOINT:\n raise ValueError(\"AZURE_OPENAI_ENDPOINT must be set\")\n if not self.AZURE_OPENAI_DEPLOYMENT_MAP:\n raise ValueError(\"AZURE_OPENAI_DEPLOYMENT_MAP must be set\")\n\n # Parse deployment map if it's a string\n if isinstance(self.AZURE_OPENAI_DEPLOYMENT_MAP, str):\n try:\n self.AZURE_OPENAI_DEPLOYMENT_MAP = loads(\n self.AZURE_OPENAI_DEPLOYMENT_MAP\n )\n except Exception as e:\n raise ValueError(f\"Invalid AZURE_OPENAI_DEPLOYMENT_MAP JSON: {e}\")\n\n # Validate required deployments exist\n required_models = {\"gpt-4o\", \"gpt-4o-mini\"}\n missing_models = required_models - set(self.AZURE_OPENAI_DEPLOYMENT_MAP.keys())\n if missing_models:\n raise ValueError(f\"Missing required Azure deployments: {missing_models}\")\n case _:\n raise ValueError(f\"Unknown provider: {provider}\")\n\n @computed_field # type: ignore[prop-decorator]\n @property\n def BASE_URL(self) -> str:\n return f\"http://{self.HOST}:{self.PORT}\"\n\n def is_dev(self) -> bool:\n return self.MODE == \"dev\"", "n_imports_parsed": 8, "n_files_resolved": 2, "n_chars_extracted": 9205}, "tests/agents/test_github_mcp_agent.py::110": {"resolved_imports": ["src/agents/github_mcp_agent/github_mcp_agent.py", "src/core/settings.py"], "used_names": ["GitHubMCPAgent", "Mock", "patch", "prompt"], "enclosing_function": "test_create_graph", "extracted_code": "# Source: src/agents/github_mcp_agent/github_mcp_agent.py\nprompt = f\"\"\"\nYou are GitHubBot, a specialized assistant for GitHub repository management and development workflows.\nYou have access to GitHub MCP tools that allow you to interact with GitHub repositories, issues, pull requests,\nand other GitHub resources. Today's date is {current_date}.\n\nYour capabilities include:\n- Repository management (create, clone, browse)\n- Issue management (create, list, update, close)\n- Pull request management (create, review, merge)\n- Branch management (create, switch, merge)\n- File operations (read, write, search)\n- Commit operations (create, view history)\n\nGuidelines:\n- Always be helpful and provide clear explanations of GitHub operations\n- When creating or modifying content, ensure it follows best practices\n- Be cautious with destructive operations (deletes, force pushes, etc.)\n- Provide context about what you're doing and why\n- Use appropriate commit messages and PR descriptions\n- Respect repository permissions and access controls\n\nNOTE: You have access to GitHub MCP tools that provide direct GitHub API access.\n\"\"\"\n\nclass GitHubMCPAgent(LazyLoadingAgent):\n \"\"\"GitHub MCP Agent with async initialization.\"\"\"\n\n def __init__(self) -> None:\n super().__init__()\n self._mcp_tools: list[BaseTool] = []\n self._mcp_client: MultiServerMCPClient | None = None\n\n async def load(self) -> None:\n \"\"\"Initialize the GitHub MCP agent by loading MCP tools.\"\"\"\n if not settings.GITHUB_PAT:\n logger.info(\"GITHUB_PAT is not set, GitHub MCP agent will have no tools\")\n self._mcp_tools = []\n self._graph = self._create_graph()\n self._loaded = True\n return\n\n try:\n # Initialize MCP client directly\n github_pat = settings.GITHUB_PAT.get_secret_value()\n connections = {\n \"github\": StreamableHttpConnection(\n transport=\"streamable_http\",\n url=settings.MCP_GITHUB_SERVER_URL,\n headers={\n \"Authorization\": f\"Bearer {github_pat}\",\n },\n )\n }\n\n self._mcp_client = MultiServerMCPClient(connections)\n logger.info(\"MCP client initialized successfully\")\n\n # Get tools from the client\n self._mcp_tools = await self._mcp_client.get_tools()\n logger.info(f\"GitHub MCP agent initialized with {len(self._mcp_tools)} tools\")\n\n except Exception as e:\n logger.error(f\"Failed to initialize GitHub MCP agent: {e}\")\n self._mcp_tools = []\n self._mcp_client = None\n\n # Create and store the graph\n self._graph = self._create_graph()\n self._loaded = True\n\n def _create_graph(self) -> CompiledStateGraph:\n \"\"\"Create the GitHub MCP agent graph.\"\"\"\n model = get_model(settings.DEFAULT_MODEL)\n\n return create_agent(\n model=model,\n tools=self._mcp_tools,\n name=\"github-mcp-agent\",\n system_prompt=prompt,\n )", "n_imports_parsed": 4, "n_files_resolved": 2, "n_chars_extracted": 3124}}}