Integration: Added DigitalOcean Serverless Inference provider
Browse files
backend/agents/agent_factory.py
CHANGED
|
@@ -5,6 +5,7 @@ from .amd_agent import AMDAgent
|
|
| 5 |
from .groq_agent import GroqAgent
|
| 6 |
from .gemini_agent import GeminiAgent
|
| 7 |
from .local_agent import LocalAgent
|
|
|
|
| 8 |
from services.config import settings
|
| 9 |
|
| 10 |
# Map of providers to their respective classes
|
|
@@ -14,7 +15,8 @@ PROVIDER_MAP: Dict[str, Type[BaseAgent]] = {
|
|
| 14 |
"groq": GroqAgent,
|
| 15 |
"gemini": GeminiAgent,
|
| 16 |
"local": LocalAgent,
|
| 17 |
-
"ollama": LocalAgent
|
|
|
|
| 18 |
}
|
| 19 |
|
| 20 |
class AgentFactory:
|
|
|
|
| 5 |
from .groq_agent import GroqAgent
|
| 6 |
from .gemini_agent import GeminiAgent
|
| 7 |
from .local_agent import LocalAgent
|
| 8 |
+
from .digitalocean_agent import DigitalOceanAgent
|
| 9 |
from services.config import settings
|
| 10 |
|
| 11 |
# Map of providers to their respective classes
|
|
|
|
| 15 |
"groq": GroqAgent,
|
| 16 |
"gemini": GeminiAgent,
|
| 17 |
"local": LocalAgent,
|
| 18 |
+
"ollama": LocalAgent,
|
| 19 |
+
"digitalocean": DigitalOceanAgent
|
| 20 |
}
|
| 21 |
|
| 22 |
class AgentFactory:
|
backend/agents/digitalocean_agent.py
ADDED
|
@@ -0,0 +1,35 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from .base import BaseAgent
|
| 2 |
+
from typing import Dict, Any, List
|
| 3 |
+
import openai
|
| 4 |
+
from services.config import settings, config_service
|
| 5 |
+
|
| 6 |
+
class DigitalOceanAgent(BaseAgent):
|
| 7 |
+
"""
|
| 8 |
+
Agent provider using DigitalOcean's Gradient Inference API.
|
| 9 |
+
Compatible with OpenAI's Chat Completion format.
|
| 10 |
+
"""
|
| 11 |
+
def __init__(self, name: str, role: str, model: str = "llama-3-70b-instruct", system_prompt: str = None):
|
| 12 |
+
super().__init__(name, role, model, system_prompt)
|
| 13 |
+
|
| 14 |
+
# Load dynamic config
|
| 15 |
+
self.provider_config = config_service.get_provider_config("digitalocean")
|
| 16 |
+
api_key = self.provider_config.get("api_key") or settings.DO_INFERENCE_KEY or settings.DO_API_TOKEN
|
| 17 |
+
|
| 18 |
+
# DigitalOcean Inference endpoint
|
| 19 |
+
self.client = openai.AsyncOpenAI(
|
| 20 |
+
api_key=api_key,
|
| 21 |
+
base_url="https://inference.do-ai.run/v1"
|
| 22 |
+
)
|
| 23 |
+
self.temperature = self.provider_config.get("temperature", 0.7)
|
| 24 |
+
self.max_tokens = self.provider_config.get("max_tokens", 4096)
|
| 25 |
+
|
| 26 |
+
async def run(self, task_description: str, context: List[Dict[str, Any]], use_tools: bool = False, extra_context: str = "") -> Dict[str, Any]:
|
| 27 |
+
# Use the base OpenAI-compatible runner
|
| 28 |
+
return await self._run_openai_compatible(
|
| 29 |
+
provider="digitalocean",
|
| 30 |
+
create_fn=self.client.chat.completions.create,
|
| 31 |
+
task_description=task_description,
|
| 32 |
+
context=context,
|
| 33 |
+
use_tools=use_tools,
|
| 34 |
+
extra_context=extra_context
|
| 35 |
+
)
|
backend/services/config.py
CHANGED
|
@@ -18,6 +18,7 @@ class Settings(BaseSettings):
|
|
| 18 |
|
| 19 |
# Infrastructure (DigitalOcean)
|
| 20 |
DO_API_TOKEN: Optional[str] = None
|
|
|
|
| 21 |
DO_REGION: str = "nyc3"
|
| 22 |
|
| 23 |
# App Config
|
|
|
|
| 18 |
|
| 19 |
# Infrastructure (DigitalOcean)
|
| 20 |
DO_API_TOKEN: Optional[str] = None
|
| 21 |
+
DO_INFERENCE_KEY: Optional[str] = None
|
| 22 |
DO_REGION: str = "nyc3"
|
| 23 |
|
| 24 |
# App Config
|