File size: 4,012 Bytes
0e24aff
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
"""Tests for the OpenAI-compatible provider abstraction.

The provider module is the seam between the browser-supplied connection
config (base URL / model / api key) and a concrete chat client. We
exercise the public surface that's hard to cover indirectly:

* `resolve_api_key` — env-var fallback rules per URL family. This is
  exactly the path a Hugging Face Space takes when the visitor leaves
  the API key field empty and the Space ships HF_TOKEN as a secret.
* `_format_provider_error` — the strings the UI shows when something
  blows up. We pin the substrings the frontend is allowed to depend on.
"""

from __future__ import annotations

import pytest

from physix.server.providers import (
    HF_ROUTER_BASE_URL,
    OLLAMA_OPENAI_BASE_URL,
    OPENAI_BASE_URL,
    LlmStepRequest,
    _format_provider_error,
    resolve_api_key,
)


def _req(*, base_url: str, api_key: str | None = None) -> LlmStepRequest:
    return LlmStepRequest(base_url=base_url, model="stub", api_key=api_key)


# --- resolve_api_key -------------------------------------------------------


def test_resolve_api_key_browser_supplied_wins(monkeypatch: pytest.MonkeyPatch) -> None:
    """A key in the request body must beat any env-var fallback so that
    visitor tokens never get silently overridden by the Space's default."""
    monkeypatch.setenv("HF_TOKEN", "server-default")
    key = resolve_api_key(_req(base_url=HF_ROUTER_BASE_URL, api_key="visitor-key"))
    assert key == "visitor-key"


def test_resolve_api_key_falls_back_to_hf_token_for_hf_router(
    monkeypatch: pytest.MonkeyPatch,
) -> None:
    monkeypatch.setenv("HF_TOKEN", "from-env")
    monkeypatch.delenv("HUGGINGFACE_API_KEY", raising=False)
    assert resolve_api_key(_req(base_url=HF_ROUTER_BASE_URL)) == "from-env"


def test_resolve_api_key_uses_huggingface_api_key_if_hf_token_missing(
    monkeypatch: pytest.MonkeyPatch,
) -> None:
    monkeypatch.delenv("HF_TOKEN", raising=False)
    monkeypatch.setenv("HUGGINGFACE_API_KEY", "alt")
    assert resolve_api_key(_req(base_url=HF_ROUTER_BASE_URL)) == "alt"


def test_resolve_api_key_falls_back_to_openai_env(
    monkeypatch: pytest.MonkeyPatch,
) -> None:
    monkeypatch.setenv("OPENAI_API_KEY", "sk-server")
    assert resolve_api_key(_req(base_url=OPENAI_BASE_URL)) == "sk-server"


def test_resolve_api_key_for_ollama_returns_placeholder_when_no_env(
    monkeypatch: pytest.MonkeyPatch,
) -> None:
    """Ollama doesn't need a real key but the OpenAI SDK rejects empty
    strings — the placeholder keeps the SDK happy."""
    monkeypatch.delenv("OLLAMA_API_KEY", raising=False)
    assert resolve_api_key(_req(base_url=OLLAMA_OPENAI_BASE_URL)) == "ollama"


def test_resolve_api_key_returns_none_for_unknown_url(
    monkeypatch: pytest.MonkeyPatch,
) -> None:
    """Unknown providers don't get a free env-var lookup. The browser
    must supply the key explicitly so we don't leak HF_TOKEN to a
    third-party endpoint someone pasted in by mistake."""
    monkeypatch.setenv("HF_TOKEN", "secret")
    monkeypatch.setenv("OPENAI_API_KEY", "secret")
    assert resolve_api_key(_req(base_url="https://random-provider.example/v1")) is None


# --- _format_provider_error ------------------------------------------------


def test_format_provider_error_pinpoints_auth_failure() -> None:
    msg = _format_provider_error(
        _req(base_url=HF_ROUTER_BASE_URL),
        Exception("401 Unauthorized: invalid api key"),
    )
    assert "API key" in msg
    assert HF_ROUTER_BASE_URL in msg


def test_format_provider_error_pinpoints_missing_model() -> None:
    msg = _format_provider_error(
        _req(base_url=OLLAMA_OPENAI_BASE_URL),
        Exception("404 Not Found: no such model 'qwen2.5:99b'"),
    )
    assert "ollama pull" in msg


def test_format_provider_error_pinpoints_unreachable_endpoint() -> None:
    msg = _format_provider_error(
        _req(base_url=OLLAMA_OPENAI_BASE_URL),
        Exception("Connection refused"),
    )
    assert "ollama serve" in msg