cesjavi commited on
Commit
e079f6a
·
1 Parent(s): 00470f6

Fix: Updated Groq model to llama-3.3-70b-versatile due to deprecation (Phase 9)

Browse files
Files changed (1) hide show
  1. backend/services/config.py +1 -1
backend/services/config.py CHANGED
@@ -56,7 +56,7 @@ class ConfigService:
56
 
57
  # Defaults used when DB has no config entry for a provider
58
  _DEFAULTS: Dict[str, Any] = {
59
- "groq": {"enabled": True, "default_model": "llama3-70b-8192", "temperature": 0.7, "max_tokens": 4096},
60
  "openai": {"enabled": True, "default_model": "gpt-4o", "temperature": 0.7, "max_tokens": 4096},
61
  "openrouter": {"enabled": True, "default_model": "google/gemini-2.0-flash", "temperature": 0.7, "max_tokens": 8192},
62
  "gemini": {"enabled": True, "default_model": "gemini-2.0-flash", "temperature": 0.7, "max_tokens": 8192},
 
56
 
57
  # Defaults used when DB has no config entry for a provider
58
  _DEFAULTS: Dict[str, Any] = {
59
+ "groq": {"enabled": True, "default_model": "llama-3.3-70b-versatile", "temperature": 0.7, "max_tokens": 4096},
60
  "openai": {"enabled": True, "default_model": "gpt-4o", "temperature": 0.7, "max_tokens": 4096},
61
  "openrouter": {"enabled": True, "default_model": "google/gemini-2.0-flash", "temperature": 0.7, "max_tokens": 8192},
62
  "gemini": {"enabled": True, "default_model": "gemini-2.0-flash", "temperature": 0.7, "max_tokens": 8192},