cesjavi commited on
Commit
00470f6
·
1 Parent(s): d0998b9

Fix: Reverted generator to llama-3.3-70b-versatile for consistency (Phase 9)

Browse files
Files changed (1) hide show
  1. backend/routers/generator.py +4 -3
backend/routers/generator.py CHANGED
@@ -81,8 +81,8 @@ async def generate_project(
81
 
82
  client = groq.AsyncGroq(api_key=api_key)
83
 
84
- # Use llama3-70b-8192 as a more stable fallback/default
85
- model_name = provider_config.get("default_model") or "llama3-70b-8192"
86
  logger.info("Calling Groq with model: %s (Key: %s...)", model_name, api_key[:8] if api_key else "None")
87
 
88
  response = await client.chat.completions.create(
@@ -93,7 +93,6 @@ async def generate_project(
93
  ],
94
  temperature=0.3,
95
  max_tokens=2048
96
- # response_format={"type": "json_object"} # Disabled for stability testing
97
  )
98
 
99
  response_text = response.choices[0].message.content
@@ -105,4 +104,6 @@ async def generate_project(
105
  logger.exception("Project generation failed")
106
  error_type = type(e).__name__
107
  error_msg = str(e)
 
 
108
  raise HTTPException(status_code=500, detail=f"AI Error ({error_type}): {error_msg}")
 
81
 
82
  client = groq.AsyncGroq(api_key=api_key)
83
 
84
+ # Use llama-3.3-70b-versatile to match GroqAgent.py
85
+ model_name = provider_config.get("default_model") or "llama-3.3-70b-versatile"
86
  logger.info("Calling Groq with model: %s (Key: %s...)", model_name, api_key[:8] if api_key else "None")
87
 
88
  response = await client.chat.completions.create(
 
93
  ],
94
  temperature=0.3,
95
  max_tokens=2048
 
96
  )
97
 
98
  response_text = response.choices[0].message.content
 
104
  logger.exception("Project generation failed")
105
  error_type = type(e).__name__
106
  error_msg = str(e)
107
+ if "401" in error_msg:
108
+ error_msg = "Invalid API Key - Please check your Groq Dashboard and .env"
109
  raise HTTPException(status_code=500, detail=f"AI Error ({error_type}): {error_msg}")