Spaces:
Sleeping
Sleeping
| """Tests for the OpenAI-compatible provider abstraction. | |
| The provider module is the seam between the browser-supplied connection | |
| config (base URL / model / api key) and a concrete chat client. We | |
| exercise the public surface that's hard to cover indirectly: | |
| * `resolve_api_key` — env-var fallback rules per URL family. This is | |
| exactly the path a Hugging Face Space takes when the visitor leaves | |
| the API key field empty and the Space ships HF_TOKEN as a secret. | |
| * `_format_provider_error` — the strings the UI shows when something | |
| blows up. We pin the substrings the frontend is allowed to depend on. | |
| """ | |
| from __future__ import annotations | |
| import pytest | |
| from physix.server.providers import ( | |
| HF_ROUTER_BASE_URL, | |
| OLLAMA_OPENAI_BASE_URL, | |
| OPENAI_BASE_URL, | |
| LlmStepRequest, | |
| _format_provider_error, | |
| resolve_api_key, | |
| ) | |
| def _req(*, base_url: str, api_key: str | None = None) -> LlmStepRequest: | |
| return LlmStepRequest(base_url=base_url, model="stub", api_key=api_key) | |
| # --- resolve_api_key ------------------------------------------------------- | |
| def test_resolve_api_key_browser_supplied_wins(monkeypatch: pytest.MonkeyPatch) -> None: | |
| """A key in the request body must beat any env-var fallback so that | |
| visitor tokens never get silently overridden by the Space's default.""" | |
| monkeypatch.setenv("HF_TOKEN", "server-default") | |
| key = resolve_api_key(_req(base_url=HF_ROUTER_BASE_URL, api_key="visitor-key")) | |
| assert key == "visitor-key" | |
| def test_resolve_api_key_falls_back_to_hf_token_for_hf_router( | |
| monkeypatch: pytest.MonkeyPatch, | |
| ) -> None: | |
| monkeypatch.setenv("HF_TOKEN", "from-env") | |
| monkeypatch.delenv("HUGGINGFACE_API_KEY", raising=False) | |
| assert resolve_api_key(_req(base_url=HF_ROUTER_BASE_URL)) == "from-env" | |
| def test_resolve_api_key_uses_huggingface_api_key_if_hf_token_missing( | |
| monkeypatch: pytest.MonkeyPatch, | |
| ) -> None: | |
| monkeypatch.delenv("HF_TOKEN", raising=False) | |
| monkeypatch.setenv("HUGGINGFACE_API_KEY", "alt") | |
| assert resolve_api_key(_req(base_url=HF_ROUTER_BASE_URL)) == "alt" | |
| def test_resolve_api_key_falls_back_to_openai_env( | |
| monkeypatch: pytest.MonkeyPatch, | |
| ) -> None: | |
| monkeypatch.setenv("OPENAI_API_KEY", "sk-server") | |
| assert resolve_api_key(_req(base_url=OPENAI_BASE_URL)) == "sk-server" | |
| def test_resolve_api_key_for_ollama_returns_placeholder_when_no_env( | |
| monkeypatch: pytest.MonkeyPatch, | |
| ) -> None: | |
| """Ollama doesn't need a real key but the OpenAI SDK rejects empty | |
| strings — the placeholder keeps the SDK happy.""" | |
| monkeypatch.delenv("OLLAMA_API_KEY", raising=False) | |
| assert resolve_api_key(_req(base_url=OLLAMA_OPENAI_BASE_URL)) == "ollama" | |
| def test_resolve_api_key_returns_none_for_unknown_url( | |
| monkeypatch: pytest.MonkeyPatch, | |
| ) -> None: | |
| """Unknown providers don't get a free env-var lookup. The browser | |
| must supply the key explicitly so we don't leak HF_TOKEN to a | |
| third-party endpoint someone pasted in by mistake.""" | |
| monkeypatch.setenv("HF_TOKEN", "secret") | |
| monkeypatch.setenv("OPENAI_API_KEY", "secret") | |
| assert resolve_api_key(_req(base_url="https://random-provider.example/v1")) is None | |
| # --- _format_provider_error ------------------------------------------------ | |
| def test_format_provider_error_pinpoints_auth_failure() -> None: | |
| msg = _format_provider_error( | |
| _req(base_url=HF_ROUTER_BASE_URL), | |
| Exception("401 Unauthorized: invalid api key"), | |
| ) | |
| assert "API key" in msg | |
| assert HF_ROUTER_BASE_URL in msg | |
| def test_format_provider_error_pinpoints_missing_model() -> None: | |
| msg = _format_provider_error( | |
| _req(base_url=OLLAMA_OPENAI_BASE_URL), | |
| Exception("404 Not Found: no such model 'qwen2.5:99b'"), | |
| ) | |
| assert "ollama pull" in msg | |
| def test_format_provider_error_pinpoints_unreachable_endpoint() -> None: | |
| msg = _format_provider_error( | |
| _req(base_url=OLLAMA_OPENAI_BASE_URL), | |
| Exception("Connection refused"), | |
| ) | |
| assert "ollama serve" in msg | |