Fix model resolution bug for non-Ollama models in easy API (v3.0.1)
Browse files- purpose_agent/unified.py +5 -19
- pyproject.toml +1 -1
purpose_agent/unified.py
CHANGED
|
@@ -178,30 +178,16 @@ class Agent:
|
|
| 178 |
@staticmethod
|
| 179 |
def _resolve_model(model: str, api_key: str | None = None) -> LLMBackend:
|
| 180 |
"""Resolve a model string to an LLMBackend."""
|
| 181 |
-
# Local Ollama models (contain ":" like "qwen3:1.7b")
|
| 182 |
-
if ":" in model and not model.startswith("http"):
|
| 183 |
-
from purpose_agent.slm_backends import OllamaBackend
|
| 184 |
-
return OllamaBackend(model=model)
|
| 185 |
-
|
| 186 |
-
# Known SLM registry keys
|
| 187 |
from purpose_agent.slm_backends import SLM_REGISTRY
|
|
|
|
|
|
|
| 188 |
if model in SLM_REGISTRY:
|
| 189 |
from purpose_agent.slm_backends import create_slm_backend
|
| 190 |
return create_slm_backend(model)
|
| 191 |
|
| 192 |
-
#
|
| 193 |
-
|
| 194 |
-
|
| 195 |
-
return OpenAICompatibleBackend(model=model, api_key=api_key)
|
| 196 |
-
|
| 197 |
-
# HuggingFace models (contain "/")
|
| 198 |
-
if "/" in model:
|
| 199 |
-
from purpose_agent.llm_backend import HFInferenceBackend
|
| 200 |
-
return HFInferenceBackend(model_id=model, api_key=api_key)
|
| 201 |
-
|
| 202 |
-
# Fallback: try Ollama
|
| 203 |
-
from purpose_agent.slm_backends import OllamaBackend
|
| 204 |
-
return OllamaBackend(model=model)
|
| 205 |
|
| 206 |
|
| 207 |
class _ToolEnvironment(Environment):
|
|
|
|
| 178 |
@staticmethod
|
| 179 |
def _resolve_model(model: str, api_key: str | None = None) -> LLMBackend:
|
| 180 |
"""Resolve a model string to an LLMBackend."""
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 181 |
from purpose_agent.slm_backends import SLM_REGISTRY
|
| 182 |
+
|
| 183 |
+
# Known SLM registry keys
|
| 184 |
if model in SLM_REGISTRY:
|
| 185 |
from purpose_agent.slm_backends import create_slm_backend
|
| 186 |
return create_slm_backend(model)
|
| 187 |
|
| 188 |
+
# Delegate to the centralized resolver for all other models (e.g. groq:, openai:)
|
| 189 |
+
from purpose_agent.llm_backend import resolve_backend
|
| 190 |
+
return resolve_backend(model, api_key)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 191 |
|
| 192 |
|
| 193 |
class _ToolEnvironment(Environment):
|
pyproject.toml
CHANGED
|
@@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta"
|
|
| 4 |
|
| 5 |
[project]
|
| 6 |
name = "purpose-agent"
|
| 7 |
-
version = "3.0.
|
| 8 |
description = "A local-first self-improvement kernel for agents. Turns traces into tested memory so agents improve without fine-tuning."
|
| 9 |
readme = "README.md"
|
| 10 |
license = {text = "MIT"}
|
|
|
|
| 4 |
|
| 5 |
[project]
|
| 6 |
name = "purpose-agent"
|
| 7 |
+
version = "3.0.1"
|
| 8 |
description = "A local-first self-improvement kernel for agents. Turns traces into tested memory so agents improve without fine-tuning."
|
| 9 |
readme = "README.md"
|
| 10 |
license = {text = "MIT"}
|