File size: 1,773 Bytes
d814291
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
import os

import pytest
import requests

from osint_env.domain.models import LLMConfig
from osint_env.llm.interface import OllamaLLMClient, RuleBasedMockLLM, build_llm_client


def test_build_llm_client_mock_default():
    client = build_llm_client(LLMConfig(provider="mock"))
    assert isinstance(client, RuleBasedMockLLM)


def test_build_llm_client_openai_requires_key(monkeypatch: pytest.MonkeyPatch):
    monkeypatch.delenv("OPENAI_API_KEY", raising=False)
    with pytest.raises(ValueError):
        build_llm_client(LLMConfig(provider="openai", openai_api_key="", openai_api_key_env="OPENAI_API_KEY"))


def test_build_llm_client_openai_with_key(monkeypatch: pytest.MonkeyPatch):
    monkeypatch.setenv("OPENAI_API_KEY", "test-key")
    cfg = LLMConfig(provider="openai", model="gpt-4o-mini", openai_api_key_env="OPENAI_API_KEY")
    # Constructing should not fail when a key is present; actual API call is not made in this test.
    client = build_llm_client(cfg)
    assert client is not None


def test_openai_key_can_come_from_config_value(monkeypatch: pytest.MonkeyPatch):
    monkeypatch.delenv("OPENAI_API_KEY", raising=False)
    cfg = LLMConfig(provider="openai", model="gpt-4o-mini", openai_api_key="cfg-key")
    client = build_llm_client(cfg)
    assert client is not None


def test_ollama_client_gracefully_handles_request_failure(monkeypatch: pytest.MonkeyPatch):
    def _raise(*args, **kwargs):
        raise requests.exceptions.ReadTimeout("timed out")

    monkeypatch.setattr("osint_env.llm.interface.requests.post", _raise)
    client = OllamaLLMClient(model="qwen3:2b", timeout_seconds=1)
    response = client.generate([{"role": "system", "content": "ping"}], tools=[])
    assert response.content == ""
    assert response.tool_calls == []