cesjavi commited on
Commit
327739b
·
1 Parent(s): a154d73

DigitalOcean: Integrated AMD_API_KEY support and fixed default model

Browse files
backend/agents/digitalocean_agent.py CHANGED
@@ -14,12 +14,13 @@ class DigitalOceanAgent(BaseAgent):
14
  # Load dynamic config
15
  self.provider_config = config_service.get_provider_config("digitalocean")
16
 
17
- # Priority: Agent Access Key -> Inference Key -> DO Token
18
  api_key = (
19
  self.provider_config.get("agent_access_key") or
20
  settings.DO_AGENT_ACCESS_KEY or
21
  self.provider_config.get("api_key") or
22
  settings.DO_INFERENCE_KEY or
 
23
  settings.DO_API_TOKEN
24
  )
25
 
 
14
  # Load dynamic config
15
  self.provider_config = config_service.get_provider_config("digitalocean")
16
 
17
+ # Priority: Agent Access Key -> Inference Key -> AMD Key -> DO Token
18
  api_key = (
19
  self.provider_config.get("agent_access_key") or
20
  settings.DO_AGENT_ACCESS_KEY or
21
  self.provider_config.get("api_key") or
22
  settings.DO_INFERENCE_KEY or
23
+ settings.AMD_API_KEY or
24
  settings.DO_API_TOKEN
25
  )
26
 
backend/services/config.py CHANGED
@@ -60,7 +60,7 @@ class ConfigService:
60
  "openai": {"enabled": True, "default_model": "gpt-4o", "temperature": 0.7, "max_tokens": 4096},
61
  "openrouter": {"enabled": True, "default_model": "google/gemini-2.0-flash", "temperature": 0.7, "max_tokens": 8192},
62
  "gemini": {"enabled": True, "default_model": "gemini-2.0-flash", "temperature": 0.7, "max_tokens": 8192},
63
- "amd": {"enabled": True, "default_model": "gpt-4o", "temperature": 0.7, "max_tokens": 4096, "base_url": "https://inference.do-ai.run/v1"},
64
  "ollama": {"enabled": True, "default_model": "llama3.1:8b", "temperature": 0.7, "base_url": "http://localhost:11434"},
65
  }
66
 
 
60
  "openai": {"enabled": True, "default_model": "gpt-4o", "temperature": 0.7, "max_tokens": 4096},
61
  "openrouter": {"enabled": True, "default_model": "google/gemini-2.0-flash", "temperature": 0.7, "max_tokens": 8192},
62
  "gemini": {"enabled": True, "default_model": "gemini-2.0-flash", "temperature": 0.7, "max_tokens": 8192},
63
+ "amd": {"enabled": True, "default_model": "llama-3.3-70b-instruct", "temperature": 0.7, "max_tokens": 4096, "base_url": "https://inference.do-ai.run/v1"},
64
  "ollama": {"enabled": True, "default_model": "llama3.1:8b", "temperature": 0.7, "base_url": "http://localhost:11434"},
65
  }
66