V6: Multi-model — HY3 for Strategist + GPT-OSS-120B for Executor SVG (2-3x faster, higher quality)
Browse files- interface/app_gradio.py +20 -14
interface/app_gradio.py
CHANGED
|
@@ -33,10 +33,10 @@ load_prefixed_env_file(("OPENROUTER_", "COMFYUI_", "IMAGE_", "PEXELS_", "PIXABAY
|
|
| 33 |
# LLM Client
|
| 34 |
# ============================================================
|
| 35 |
|
| 36 |
-
def llm_chat(prompt, system=None, max_tokens=32000, temperature=0.3):
|
| 37 |
import requests
|
| 38 |
api_key = os.environ.get("OPENROUTER_API_KEY", "")
|
| 39 |
-
model = os.environ.get("OPENROUTER_MODEL", "tencent/hy3-preview:free")
|
| 40 |
base_url = os.environ.get("OPENROUTER_BASE_URL", "https://openrouter.ai/api/v1").rstrip("/")
|
| 41 |
if not api_key:
|
| 42 |
raise RuntimeError("OPENROUTER_API_KEY non configuré dans .env")
|
|
@@ -44,12 +44,14 @@ def llm_chat(prompt, system=None, max_tokens=32000, temperature=0.3):
|
|
| 44 |
if system:
|
| 45 |
messages.append({"role": "system", "content": system})
|
| 46 |
messages.append({"role": "user", "content": prompt})
|
|
|
|
|
|
|
|
|
|
|
|
|
| 47 |
r = requests.post(f"{base_url}/chat/completions",
|
| 48 |
headers={"Authorization": f"Bearer {api_key}", "Content-Type": "application/json",
|
| 49 |
"HTTP-Referer": "http://localhost/ppt-master", "X-Title": "ppt-master-gradio"},
|
| 50 |
-
json=
|
| 51 |
-
"max_tokens": max_tokens, "reasoning": {"exclude": True}},
|
| 52 |
-
timeout=600)
|
| 53 |
if r.status_code >= 400:
|
| 54 |
raise RuntimeError(f"OpenRouter erreur {r.status_code}: {r.text[:500]}")
|
| 55 |
data = r.json()
|
|
@@ -58,6 +60,10 @@ def llm_chat(prompt, system=None, max_tokens=32000, temperature=0.3):
|
|
| 58 |
raise RuntimeError(f"Pas de contenu retourné par le LLM")
|
| 59 |
return content
|
| 60 |
|
|
|
|
|
|
|
|
|
|
|
|
|
| 61 |
|
| 62 |
# ============================================================
|
| 63 |
# STEP 1: STRATEGIST — Generate design_spec + spec_lock + outline
|
|
@@ -117,7 +123,7 @@ Rules:
|
|
| 117 |
- page_rhythm: "anchor" for cover/closing, "dense" for data-heavy, "breathing" for impact pages
|
| 118 |
- Reply ONLY with raw JSON, no markdown code blocks, no explanation"""
|
| 119 |
|
| 120 |
-
result = llm_chat(prompt, max_tokens=32000)
|
| 121 |
|
| 122 |
# Parse JSON
|
| 123 |
try:
|
|
@@ -317,7 +323,7 @@ DESIGN THIS SLIDE:
|
|
| 317 |
Reply with ONLY the SVG. Start <svg, end </svg>. No explanation."""
|
| 318 |
|
| 319 |
try:
|
| 320 |
-
svg_content = llm_chat(prompt, max_tokens=
|
| 321 |
|
| 322 |
# Extract SVG
|
| 323 |
svg_start = svg_content.find('<svg')
|
|
@@ -439,8 +445,8 @@ def step2_handler(data_json, style, image_mode, progress=gr.Progress()):
|
|
| 439 |
|
| 440 |
# Phase 2: Executor (SVG generation via LLM)
|
| 441 |
progress(0.2, desc=f"Génération SVG par LLM (0/{num_slides})...")
|
| 442 |
-
log.append("━━━ PHASE 2: Executor (SVG via
|
| 443 |
-
log.append(f" ⏱️ Estimation: ~
|
| 444 |
|
| 445 |
slide_count = [0]
|
| 446 |
def svg_cb(msg):
|
|
@@ -505,7 +511,7 @@ with gr.Blocks(title="PPT Master — Générateur IA") as app:
|
|
| 505 |
|
| 506 |
with gr.Tab("2️⃣ Générer le PPTX"):
|
| 507 |
gr.Markdown("### 🚀 Pipeline complet: Images + SVG (LLM) + Export")
|
| 508 |
-
gr.Markdown("⏱️ **Temps estimé**: ~2 min (images) + ~
|
| 509 |
btn_gen = gr.Button("⚡ Lancer la génération complète", variant="primary", size="lg")
|
| 510 |
status2 = gr.Textbox(label="Statut", interactive=False)
|
| 511 |
with gr.Row():
|
|
@@ -516,16 +522,16 @@ with gr.Blocks(title="PPT Master — Générateur IA") as app:
|
|
| 516 |
gr.Markdown("""
|
| 517 |
## Workflow PPT Master complet
|
| 518 |
|
| 519 |
-
1. **Strategist** →
|
| 520 |
2. **Image Acquisition** → ComfyUI GPU ou recherche web
|
| 521 |
-
3. **Executor** →
|
| 522 |
4. **Post-processing** → finalize_svg.py (icônes, images, rounded rects)
|
| 523 |
5. **Export** → svg_to_pptx.py → PPTX natif éditable
|
| 524 |
|
| 525 |
-
**
|
| 526 |
Le spec_lock garantit la cohérence couleurs/fonts sur l'ensemble du deck.
|
| 527 |
|
| 528 |
-
**Stack**: OpenRouter (
|
| 529 |
""")
|
| 530 |
|
| 531 |
btn_plan.click(fn=step1_handler, inputs=[subject, num_slides, style, language, audience, image_mode],
|
|
|
|
| 33 |
# LLM Client
|
| 34 |
# ============================================================
|
| 35 |
|
| 36 |
+
def llm_chat(prompt, system=None, max_tokens=32000, temperature=0.3, model_override=None):
|
| 37 |
import requests
|
| 38 |
api_key = os.environ.get("OPENROUTER_API_KEY", "")
|
| 39 |
+
model = model_override or os.environ.get("OPENROUTER_MODEL", "tencent/hy3-preview:free")
|
| 40 |
base_url = os.environ.get("OPENROUTER_BASE_URL", "https://openrouter.ai/api/v1").rstrip("/")
|
| 41 |
if not api_key:
|
| 42 |
raise RuntimeError("OPENROUTER_API_KEY non configuré dans .env")
|
|
|
|
| 44 |
if system:
|
| 45 |
messages.append({"role": "system", "content": system})
|
| 46 |
messages.append({"role": "user", "content": prompt})
|
| 47 |
+
payload = {"model": model, "messages": messages, "temperature": temperature, "max_tokens": max_tokens}
|
| 48 |
+
# For reasoning models, exclude internal reasoning from output
|
| 49 |
+
if "hy3" in model or "qwen3" in model:
|
| 50 |
+
payload["reasoning"] = {"exclude": True}
|
| 51 |
r = requests.post(f"{base_url}/chat/completions",
|
| 52 |
headers={"Authorization": f"Bearer {api_key}", "Content-Type": "application/json",
|
| 53 |
"HTTP-Referer": "http://localhost/ppt-master", "X-Title": "ppt-master-gradio"},
|
| 54 |
+
json=payload, timeout=600)
|
|
|
|
|
|
|
| 55 |
if r.status_code >= 400:
|
| 56 |
raise RuntimeError(f"OpenRouter erreur {r.status_code}: {r.text[:500]}")
|
| 57 |
data = r.json()
|
|
|
|
| 60 |
raise RuntimeError(f"Pas de contenu retourné par le LLM")
|
| 61 |
return content
|
| 62 |
|
| 63 |
+
# Model selection: use the best model for each task
|
| 64 |
+
MODEL_STRATEGIST = "tencent/hy3-preview:free" # Good at planning/reasoning
|
| 65 |
+
MODEL_EXECUTOR = "openai/gpt-oss-120b:free" # Best at SVG code generation (fast + high quality)
|
| 66 |
+
|
| 67 |
|
| 68 |
# ============================================================
|
| 69 |
# STEP 1: STRATEGIST — Generate design_spec + spec_lock + outline
|
|
|
|
| 123 |
- page_rhythm: "anchor" for cover/closing, "dense" for data-heavy, "breathing" for impact pages
|
| 124 |
- Reply ONLY with raw JSON, no markdown code blocks, no explanation"""
|
| 125 |
|
| 126 |
+
result = llm_chat(prompt, max_tokens=32000, model_override=MODEL_STRATEGIST)
|
| 127 |
|
| 128 |
# Parse JSON
|
| 129 |
try:
|
|
|
|
| 323 |
Reply with ONLY the SVG. Start <svg, end </svg>. No explanation."""
|
| 324 |
|
| 325 |
try:
|
| 326 |
+
svg_content = llm_chat(prompt, max_tokens=10000, temperature=0.3, model_override=MODEL_EXECUTOR)
|
| 327 |
|
| 328 |
# Extract SVG
|
| 329 |
svg_start = svg_content.find('<svg')
|
|
|
|
| 445 |
|
| 446 |
# Phase 2: Executor (SVG generation via LLM)
|
| 447 |
progress(0.2, desc=f"Génération SVG par LLM (0/{num_slides})...")
|
| 448 |
+
log.append("━━━ PHASE 2: Executor (SVG via GPT-OSS-120B) ━━━")
|
| 449 |
+
log.append(f" ⏱️ Estimation: ~30-45s par slide, {num_slides} slides\n")
|
| 450 |
|
| 451 |
slide_count = [0]
|
| 452 |
def svg_cb(msg):
|
|
|
|
| 511 |
|
| 512 |
with gr.Tab("2️⃣ Générer le PPTX"):
|
| 513 |
gr.Markdown("### 🚀 Pipeline complet: Images + SVG (LLM) + Export")
|
| 514 |
+
gr.Markdown("⏱️ **Temps estimé**: ~2 min (images) + ~5-7 min (SVG via GPT-OSS-120B) + ~30s (export)")
|
| 515 |
btn_gen = gr.Button("⚡ Lancer la génération complète", variant="primary", size="lg")
|
| 516 |
status2 = gr.Textbox(label="Statut", interactive=False)
|
| 517 |
with gr.Row():
|
|
|
|
| 522 |
gr.Markdown("""
|
| 523 |
## Workflow PPT Master complet
|
| 524 |
|
| 525 |
+
1. **Strategist** (HY3-preview) → design_spec + spec_lock (palette, typo, icônes, rythme)
|
| 526 |
2. **Image Acquisition** → ComfyUI GPU ou recherche web
|
| 527 |
+
3. **Executor** (GPT-OSS-120B) → Génère chaque SVG individuellement, qualité premium
|
| 528 |
4. **Post-processing** → finalize_svg.py (icônes, images, rounded rects)
|
| 529 |
5. **Export** → svg_to_pptx.py → PPTX natif éditable
|
| 530 |
|
| 531 |
+
**Approche multi-modèles**: HY3 pour la planification (raisonnement), GPT-OSS-120B pour le code SVG (rapide + précis).
|
| 532 |
Le spec_lock garantit la cohérence couleurs/fonts sur l'ensemble du deck.
|
| 533 |
|
| 534 |
+
**Stack**: OpenRouter (2 modèles gratuits) + ComfyUI local (RTX 5070 Ti)
|
| 535 |
""")
|
| 536 |
|
| 537 |
btn_plan.click(fn=step1_handler, inputs=[subject, num_slides, style, language, audience, image_mode],
|