| """LLM provider abstraction for the interactive demo. |
| |
| The demo points at any OpenAI-compatible ``/v1/chat/completions`` endpoint: |
| local Ollama, Hugging Face's Inference Providers router, OpenAI itself, |
| vLLM, OpenRouter, etc. Everything funnels through one factory so the UI |
| only has to learn one shape. |
| |
| The browser passes ``base_url``, ``model``, and (optionally) ``api_key`` |
| on every request. If ``api_key`` is missing we fall back to a per-provider |
| env var so a Hugging Face Space can ship a default working config without |
| hard-coding secrets in client bundles. |
| """ |
|
|
| from __future__ import annotations |
|
|
| import logging |
| import os |
| from collections.abc import Callable |
| from typing import Optional |
|
|
| from fastapi import HTTPException |
| from pydantic import BaseModel, ConfigDict, Field |
|
|
| _log = logging.getLogger(__name__) |
|
|
|
|
| |
| |
| |
| HF_ROUTER_BASE_URL = "https://router.huggingface.co/v1" |
| OPENAI_BASE_URL = "https://api.openai.com/v1" |
| OLLAMA_OPENAI_BASE_URL = "http://localhost:11434/v1" |
|
|
|
|
| class LlmStepRequest(BaseModel): |
| """Provider-agnostic step payload. |
| |
| The browser names a base URL + model + (optional) key. The server |
| fans these into an ``openai.OpenAI`` client. ``base_url`` is required |
| so we never silently default to the wrong endpoint when the user |
| swaps providers mid-session. |
| """ |
|
|
| model_config = ConfigDict(extra="forbid") |
|
|
| base_url: str = Field( |
| description=( |
| "OpenAI-compatible /v1 base URL. E.g. http://localhost:11434/v1, " |
| "https://router.huggingface.co/v1, https://api.openai.com/v1." |
| ), |
| ) |
| model: str = Field( |
| description=( |
| "Model id understood by the chosen base URL. For HF this is the " |
| "repo id (optionally suffixed with :provider, e.g. ':fastest'); " |
| "for Ollama it's the local tag; for OpenAI it's the model name." |
| ), |
| ) |
| api_key: Optional[str] = Field( |
| default=None, |
| description=( |
| "Bearer token forwarded as Authorization header. Falls back to " |
| "HF_TOKEN / OPENAI_API_KEY / OLLAMA_API_KEY env vars on the " |
| "server based on `base_url` if omitted." |
| ), |
| ) |
| temperature: float = Field(default=0.4, ge=0.0, le=2.0) |
| max_tokens: int = Field(default=2048, ge=64, le=8192) |
| request_timeout_s: float = Field(default=120.0, ge=5.0, le=600.0) |
|
|
|
|
| |
| LlmPolicy = Callable[[list[dict[str, str]]], str] |
| LlmPolicyFactory = Callable[[LlmStepRequest], LlmPolicy] |
|
|
|
|
| def resolve_api_key(request: LlmStepRequest) -> Optional[str]: |
| """Pick the bearer token to use for this request. |
| |
| Browser-supplied keys win. When the browser sends nothing we fall |
| back to a server-side env var picked from the URL — this lets a |
| public Hugging Face Space ship a usable default by setting |
| ``HF_TOKEN`` as a Space secret while still letting power users |
| bring their own. |
| """ |
|
|
| if request.api_key: |
| return request.api_key |
|
|
| base_url = (request.base_url or "").lower() |
| if "huggingface" in base_url: |
| return os.environ.get("HF_TOKEN") or os.environ.get("HUGGINGFACE_API_KEY") |
| if "openai.com" in base_url: |
| return os.environ.get("OPENAI_API_KEY") |
| if "openrouter" in base_url: |
| return os.environ.get("OPENROUTER_API_KEY") |
| if "localhost" in base_url or "127.0.0.1" in base_url: |
| |
| |
| return os.environ.get("OLLAMA_API_KEY", "ollama") |
| return None |
|
|
|
|
| def default_openai_compat_policy_factory(request: LlmStepRequest) -> LlmPolicy: |
| """Build a chat policy backed by any OpenAI-compatible endpoint. |
| |
| Used by the interactive router for every demo turn. Failures bubble |
| up as ``HTTPException(502)`` so the UI can surface a clear "your |
| provider is unhappy" banner instead of a stack trace. |
| """ |
|
|
| try: |
| from openai import OpenAI |
| except ImportError as exc: |
| raise HTTPException( |
| status_code=503, |
| detail=( |
| "The 'openai' Python package is not installed on the server. " |
| "Install with: pip install -e '.[demo]'" |
| ), |
| ) from exc |
|
|
| api_key = resolve_api_key(request) |
| client = OpenAI( |
| base_url=request.base_url, |
| api_key=api_key or "missing", |
| timeout=request.request_timeout_s, |
| ) |
|
|
| def _policy(prompt: list[dict[str, str]]) -> str: |
| try: |
| response = client.chat.completions.create( |
| model=request.model, |
| messages=prompt, |
| temperature=request.temperature, |
| max_tokens=request.max_tokens, |
| |
| |
| |
| |
| response_format={"type": "json_object"}, |
| ) |
| except TypeError: |
| |
| response = client.chat.completions.create( |
| model=request.model, |
| messages=prompt, |
| temperature=request.temperature, |
| max_tokens=request.max_tokens, |
| ) |
| except Exception as exc: |
| raise HTTPException( |
| status_code=502, |
| detail=_format_provider_error(request, exc), |
| ) from exc |
|
|
| choice = response.choices[0] if response.choices else None |
| content = (choice.message.content if choice and choice.message else "") or "" |
| return str(content) |
|
|
| return _policy |
|
|
|
|
| def _format_provider_error(request: LlmStepRequest, exc: Exception) -> str: |
| """Make the most common failure modes self-diagnosing in the UI.""" |
|
|
| base_msg = f"Chat completion failed via {request.base_url} for model {request.model!r}: {exc}" |
| text = str(exc).lower() |
| if "401" in text or "unauthorized" in text or "invalid api key" in text: |
| return ( |
| f"{base_msg}\n\n" |
| "Hint: the API key is missing or rejected. Open the connection " |
| "panel and paste a valid token, or set the matching env var on " |
| "the server (HF_TOKEN, OPENAI_API_KEY, etc.)." |
| ) |
| if "404" in text or "not found" in text or "no such model" in text: |
| return ( |
| f"{base_msg}\n\n" |
| "Hint: the chosen model isn't reachable through this endpoint. " |
| "For Hugging Face, verify the repo id is public and that " |
| "Inference Providers is enabled for it. For Ollama, run " |
| f"'ollama pull {request.model}'." |
| ) |
| if "connection" in text or "refused" in text or "timeout" in text: |
| return ( |
| f"{base_msg}\n\n" |
| "Hint: the endpoint isn't reachable. For Ollama, make sure " |
| "'ollama serve' is running on the host you pointed at." |
| ) |
| return base_msg |
|
|
|
|
| |
| |
| |
|
|
|
|
| class LlmModelInfo(BaseModel): |
| """A single locally-pulled Ollama model tag.""" |
|
|
| model_config = ConfigDict(frozen=True) |
|
|
| name: str |
| size_bytes: Optional[int] = None |
| parameter_size: Optional[str] = None |
| family: Optional[str] = None |
|
|
|
|
| class LlmModelsResponse(BaseModel): |
| models: list[LlmModelInfo] = Field(default_factory=list) |
| error: Optional[str] = None |
|
|
|
|
| LlmModelsLister = Callable[[], LlmModelsResponse] |
|
|
|
|
| def default_ollama_models_lister() -> LlmModelsResponse: |
| """Enumerate locally-pulled Ollama tags. Best-effort.""" |
|
|
| try: |
| import ollama |
| except ImportError: |
| return LlmModelsResponse( |
| models=[], |
| error=( |
| "The 'ollama' Python package is not installed on the server. " |
| "Install with: pip install -e '.[demo]'" |
| ), |
| ) |
|
|
| try: |
| response = ollama.Client().list() |
| except Exception as exc: |
| return LlmModelsResponse( |
| models=[], |
| error=( |
| f"Could not reach the local Ollama daemon ({exc}). " |
| "Is 'ollama serve' running?" |
| ), |
| ) |
|
|
| raw_models = getattr(response, "models", None) |
| if raw_models is None and isinstance(response, dict): |
| raw_models = response.get("models", []) |
| raw_models = raw_models or [] |
|
|
| out: list[LlmModelInfo] = [] |
| for entry in raw_models: |
| name = _model_attr(entry, "model") or _model_attr(entry, "name") |
| if not isinstance(name, str) or not name: |
| continue |
| details = _model_attr(entry, "details") |
| out.append( |
| LlmModelInfo( |
| name=name, |
| size_bytes=_coerce_int(_model_attr(entry, "size")), |
| parameter_size=_model_attr(details, "parameter_size"), |
| family=_model_attr(details, "family"), |
| ) |
| ) |
|
|
| out.sort(key=lambda m: m.name) |
| return LlmModelsResponse(models=out) |
|
|
|
|
| def _model_attr(obj: object, key: str) -> object: |
| if obj is None: |
| return None |
| if isinstance(obj, dict): |
| return obj.get(key) |
| return getattr(obj, key, None) |
|
|
|
|
| def _coerce_int(value: object) -> Optional[int]: |
| if value is None: |
| return None |
| try: |
| return int(value) |
| except (TypeError, ValueError): |
| return None |
|
|