Fix v3: max_tokens=32000, shorter prompt, JSON repair for truncated responses
Browse files- interface/app_gradio.py +30 -14
interface/app_gradio.py
CHANGED
|
@@ -80,21 +80,12 @@ def llm_chat(prompt, system=None, max_tokens=16000, temperature=0.4):
|
|
| 80 |
def generate_outline(subject, num_slides, style, language, audience, image_mode):
|
| 81 |
"""Step 1: Ask LLM to generate the slide outline."""
|
| 82 |
|
| 83 |
-
prompt = f"""Generate a {num_slides}-slide presentation
|
| 84 |
-
|
|
|
|
|
|
|
| 85 |
|
| 86 |
-
|
| 87 |
-
{{"title":"Presentation title","slides":[{{"number":1,"title":"Slide title","content":["Rich detailed point 1","Rich detailed point 2","Rich detailed point 3","Rich detailed point 4"],"image_prompt":"cinematic english image description for AI generation, {style} style, no text","layout":"cover"}}]}}
|
| 88 |
-
|
| 89 |
-
Rules:
|
| 90 |
-
- Exactly {num_slides} slides
|
| 91 |
-
- Each slide has 3-5 detailed content points with facts/data when possible
|
| 92 |
-
- image_prompt: English, descriptive, cinematic, adapted to "{style}" style
|
| 93 |
-
- layout values: cover (slide 1), content, comparison, timeline, quote, closing (last slide)
|
| 94 |
-
- Content language: {language}
|
| 95 |
-
- Make it rich, informative, with specific facts and details"""
|
| 96 |
-
|
| 97 |
-
result = llm_chat(prompt, max_tokens=16000)
|
| 98 |
|
| 99 |
# Extract JSON from response
|
| 100 |
try:
|
|
@@ -105,6 +96,31 @@ Rules:
|
|
| 105 |
else:
|
| 106 |
outline = json.loads(result)
|
| 107 |
except json.JSONDecodeError as e:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 108 |
raise RuntimeError(f"Le LLM n'a pas retourné du JSON valide.\n\nRéponse:\n{result[:2000]}")
|
| 109 |
|
| 110 |
return outline
|
|
|
|
| 80 |
def generate_outline(subject, num_slides, style, language, audience, image_mode):
|
| 81 |
"""Step 1: Ask LLM to generate the slide outline."""
|
| 82 |
|
| 83 |
+
prompt = f"""Generate a {num_slides}-slide presentation about "{subject}". Style: {style}. Language: {language}. Audience: {audience}.
|
| 84 |
+
Reply ONLY raw JSON: {{"title":"...","slides":[{{"number":1,"title":"...","content":["point1","point2","point3"],"image_prompt":"english cinematic description","layout":"cover"}}]}}
|
| 85 |
+
Layouts: cover(1st), content, comparison, timeline, closing(last).
|
| 86 |
+
IMPORTANT: Keep each content point SHORT (max 20 words). Keep image_prompt under 30 words. No text in images."""
|
| 87 |
|
| 88 |
+
result = llm_chat(prompt, max_tokens=32000)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 89 |
|
| 90 |
# Extract JSON from response
|
| 91 |
try:
|
|
|
|
| 96 |
else:
|
| 97 |
outline = json.loads(result)
|
| 98 |
except json.JSONDecodeError as e:
|
| 99 |
+
# Try to repair truncated JSON by closing brackets
|
| 100 |
+
repaired = result.rstrip()
|
| 101 |
+
# Remove trailing incomplete values
|
| 102 |
+
for _ in range(10):
|
| 103 |
+
try:
|
| 104 |
+
# Try adding closing brackets
|
| 105 |
+
for fix in ['}]}', '"}]}', '"]},{"number":99,"title":"FIN","content":[""],"image_prompt":"","layout":"closing"}]}']:
|
| 106 |
+
try:
|
| 107 |
+
test = repaired + fix
|
| 108 |
+
json_match = re.search(r'\{[\s\S]*\}', test)
|
| 109 |
+
if json_match:
|
| 110 |
+
outline = json.loads(json_match.group())
|
| 111 |
+
# Remove dummy slides if added
|
| 112 |
+
outline["slides"] = [s for s in outline.get("slides",[]) if s.get("number") != 99]
|
| 113 |
+
return outline
|
| 114 |
+
except:
|
| 115 |
+
continue
|
| 116 |
+
# Cut last incomplete entry and retry
|
| 117 |
+
last_brace = repaired.rfind('{')
|
| 118 |
+
if last_brace > 0:
|
| 119 |
+
repaired = repaired[:last_brace].rstrip().rstrip(',')
|
| 120 |
+
else:
|
| 121 |
+
break
|
| 122 |
+
except:
|
| 123 |
+
break
|
| 124 |
raise RuntimeError(f"Le LLM n'a pas retourné du JSON valide.\n\nRéponse:\n{result[:2000]}")
|
| 125 |
|
| 126 |
return outline
|