Spaces:
Running
Running
| import os | |
| from openai import OpenAI | |
| def get_llm_client(provider="nvidia"): | |
| """ | |
| Returns an OpenAI client configured for the specified provider. | |
| Args: | |
| provider (str): "nvidia" or "mistral" | |
| Returns: | |
| OpenAI: The configured client | |
| """ | |
| if provider == "nvidia": | |
| # Llama 4 Maverick via NVIDIA NIM | |
| api_key = os.getenv("NVIDIA_API_KEY") | |
| if not api_key: | |
| print("Warning: NVIDIA_API_KEY not found in environment variables.") | |
| return OpenAI( | |
| base_url="https://integrate.api.nvidia.com/v1", | |
| api_key=api_key | |
| ) | |
| else: | |
| # Mistral Large 3 via Mistral API | |
| api_key = os.getenv("MISTRAL_API_KEY") | |
| if not api_key: | |
| print("Warning: MISTRAL_API_KEY not found in environment variables.") | |
| return OpenAI( | |
| base_url="https://api.mistral.ai/v1", | |
| api_key=api_key | |
| ) | |
| def get_model_name(provider="nvidia"): | |
| if provider == "nvidia": | |
| # Meta Llama 3.1 70B is stable and common on NVIDIA NIM | |
| return "meta/llama-3.1-70b-instruct" | |
| else: | |
| # Mistral Large latest ID | |
| return "mistral-large-latest" | |