Fix: Updated Groq model to llama-3.3-70b-versatile due to deprecation (Phase 9)
Browse files
backend/services/config.py
CHANGED
|
@@ -56,7 +56,7 @@ class ConfigService:
|
|
| 56 |
|
| 57 |
# Defaults used when DB has no config entry for a provider
|
| 58 |
_DEFAULTS: Dict[str, Any] = {
|
| 59 |
-
"groq": {"enabled": True, "default_model": "
|
| 60 |
"openai": {"enabled": True, "default_model": "gpt-4o", "temperature": 0.7, "max_tokens": 4096},
|
| 61 |
"openrouter": {"enabled": True, "default_model": "google/gemini-2.0-flash", "temperature": 0.7, "max_tokens": 8192},
|
| 62 |
"gemini": {"enabled": True, "default_model": "gemini-2.0-flash", "temperature": 0.7, "max_tokens": 8192},
|
|
|
|
| 56 |
|
| 57 |
# Defaults used when DB has no config entry for a provider
|
| 58 |
_DEFAULTS: Dict[str, Any] = {
|
| 59 |
+
"groq": {"enabled": True, "default_model": "llama-3.3-70b-versatile", "temperature": 0.7, "max_tokens": 4096},
|
| 60 |
"openai": {"enabled": True, "default_model": "gpt-4o", "temperature": 0.7, "max_tokens": 4096},
|
| 61 |
"openrouter": {"enabled": True, "default_model": "google/gemini-2.0-flash", "temperature": 0.7, "max_tokens": 8192},
|
| 62 |
"gemini": {"enabled": True, "default_model": "gemini-2.0-flash", "temperature": 0.7, "max_tokens": 8192},
|