#!/usr/bin/env python3
"""
PPT Master — Interface Gradio V5
Full PPT Master workflow: Strategist → spec_lock → Image Acquisition → Executor (LLM SVG) → Quality Check → Finalize → Export
"""
import gradio as gr
import json
import os
import sys
import time
import glob
import shutil
import subprocess
import re
import urllib.parse
import urllib.request
import base64
import mimetypes
from pathlib import Path
from datetime import datetime
# Setup paths
SCRIPT_DIR = Path(__file__).resolve().parent
SKILL_DIR = SCRIPT_DIR / "skills" / "ppt-master"
SCRIPTS_DIR = SKILL_DIR / "scripts"
PROJECTS_DIR = SCRIPT_DIR / "projects"
REFS_DIR = SKILL_DIR / "references"
sys.path.insert(0, str(SCRIPTS_DIR))
from config import load_prefixed_env_file
# Load env
load_prefixed_env_file(("OPENROUTER_", "COMFYUI_", "IMAGE_", "PEXELS_", "PIXABY_", "PIXABAY_", "GROQ_"))
# ============================================================
# LLM Client
# ============================================================
def llm_chat(prompt, system=None, max_tokens=32000, temperature=0.3, model_override=None):
import requests
api_key = os.environ.get("OPENROUTER_API_KEY", "")
model = model_override or os.environ.get("OPENROUTER_MODEL", "tencent/hy3-preview:free")
base_url = os.environ.get("OPENROUTER_BASE_URL", "https://openrouter.ai/api/v1").rstrip("/")
if not api_key:
raise RuntimeError("OPENROUTER_API_KEY non configuré dans .env")
messages = []
if system:
messages.append({"role": "system", "content": system})
messages.append({"role": "user", "content": prompt})
payload = {"model": model, "messages": messages, "temperature": temperature, "max_tokens": max_tokens}
# For reasoning models, exclude internal reasoning from output
if "hy3" in model or "qwen3" in model:
payload["reasoning"] = {"exclude": True}
r = requests.post(f"{base_url}/chat/completions",
headers={"Authorization": f"Bearer {api_key}", "Content-Type": "application/json",
"HTTP-Referer": "http://localhost/ppt-master", "X-Title": "ppt-master-gradio"},
json=payload, timeout=600)
if r.status_code >= 400:
raise RuntimeError(f"OpenRouter erreur {r.status_code}: {r.text[:500]}")
data = r.json()
content = data.get("choices", [{}])[0].get("message", {}).get("content")
if not content:
raise RuntimeError(f"Pas de contenu retourné par le LLM")
return content
# Model selection: use the best model for each task
MODEL_STRATEGIST = "tencent/hy3-preview:free" # Good at planning/reasoning
MODEL_EXECUTOR = "openai/gpt-oss-120b:free" # Best tested free model for SVG code generation
MODEL_CRITIC = "tencent/hy3-preview:free" # Good visual/design critic and instruction follower
MODEL_FALLBACK_EXECUTOR = "tencent/hy3-preview:free" # Fallback if GPT-OSS is rate-limited
# ============================================================
# WEB RESEARCH (optional, no API key)
# ============================================================
def web_research(subject, max_results=6):
"""Fetch lightweight recent/contextual info from the web using no-key sources.
Uses DuckDuckGo HTML snippets + Wikipedia summaries as fallback/context.
"""
snippets = []
q = subject.strip()
if not q:
return ""
# DuckDuckGo HTML snippets
try:
url = "https://duckduckgo.com/html/?" + urllib.parse.urlencode({"q": q})
req = urllib.request.Request(url, headers={"User-Agent": "Mozilla/5.0"})
html = urllib.request.urlopen(req, timeout=12).read().decode("utf-8", errors="ignore")
# Extract result title/snippet pairs (rough but dependency-free)
blocks = re.findall(r']*>(.*?).*?]*>(.*?)', html, flags=re.S)
for title, snip in blocks[:max_results]:
clean_title = re.sub('<.*?>', '', title).strip()
clean_snip = re.sub('<.*?>', '', snip).strip()
clean_snip = clean_snip.replace('"', '"').replace('&', '&').replace(''', "'")
if clean_title or clean_snip:
snippets.append(f"- {clean_title}: {clean_snip}")
except Exception:
pass
# Wikipedia summary fallback / enrichment
try:
search_url = "https://en.wikipedia.org/w/api.php?" + urllib.parse.urlencode({
"action": "opensearch", "search": q, "limit": 3, "namespace": 0, "format": "json"
})
req = urllib.request.Request(search_url, headers={"User-Agent": "Mozilla/5.0"})
data = json.loads(urllib.request.urlopen(req, timeout=10).read().decode("utf-8"))
titles = data[1] if len(data) > 1 else []
for title in titles[:3]:
summary_url = "https://en.wikipedia.org/api/rest_v1/page/summary/" + urllib.parse.quote(title)
req2 = urllib.request.Request(summary_url, headers={"User-Agent": "Mozilla/5.0"})
summary = json.loads(urllib.request.urlopen(req2, timeout=10).read().decode("utf-8"))
extract = summary.get("extract")
if extract:
snippets.append(f"- Wikipedia — {title}: {extract[:600]}")
except Exception:
pass
if not snippets:
return ""
return "WEB RESEARCH CONTEXT (use for factual accuracy and recent/contextual details):\n" + "\n".join(snippets[:max_results+3])
# ============================================================
# STEP 1: STRATEGIST — Generate design_spec + spec_lock + outline
# ============================================================
def strategist_phase(subject, num_slides, style, language, audience, use_web=False):
"""The Strategist generates the full design specification."""
research_context = web_research(subject) if use_web else ""
prompt = f"""You are the Strategist for PPT Master. Generate a complete design specification for a presentation.
Subject: {subject}
Number of slides: {num_slides}
Style: {style}
Language: {language}
Target audience: {audience}
Canvas: PPT 16:9 (1280x720, viewBox="0 0 1280 720")
{research_context}
Produce a JSON response with this EXACT structure:
{{
"title": "Presentation title",
"spec_lock": {{
"canvas": {{"viewBox": "0 0 1280 720", "format": "PPT 16:9"}},
"colors": {{"bg": "#...", "bg_alt": "#...", "primary": "#...", "accent": "#...", "secondary_accent": "#...", "text": "#...", "text_secondary": "#...", "border": "#..."}},
"typography": {{"title_family": "...", "body_family": "...", "body": 20, "title": 40, "subtitle": 26, "annotation": 14}},
"icons": {{"library": "chunk-filled", "inventory": ["icon1", "icon2", "..."]}},
"page_rhythm": {{"P01": "anchor", "P02": "dense", "...": "..."}}
}},
"slides": [
{{
"number": 1,
"title": "Slide title",
"layout": "cover",
"rhythm": "anchor",
"design_pattern": "cinematic_full_bleed",
"content": ["Point 1", "Point 2", "Point 3"],
"image_prompt": "English cinematic image description, no text",
"image_source": "ai",
"notes": "Speaker notes for this slide (conversational tone)"
}}
]
}}
Style guidelines:
- "Dark Fantasy": dark backgrounds (#0B0F17), gold (#C8A45D), red (#8B1E2D), ice blue (#6FA8B8)
- "Corporate": white, professional blue (#1A56DB), clean
- "Tech / Startup": dark navy (#0F172A), cyan (#06B6D4), purple (#8B5CF6)
- "Nature / Zen": warm white (#FEFDF8), green (#2D5016), amber (#B45309)
- "Académique": cream (#FFFBF5), burgundy (#7C2D12), blue (#1E40AF)
- "Minimaliste": pure white, black (#18181B), red accent (#DC2626)
- "Luxury Editorial": deep charcoal/cream, champagne gold, magazine typography, full-bleed imagery
- "McKinsey Consulting": white/ink blue, precise grids, executive charts, numbered insights
- "Cinematic Documentary": dark cinematic overlays, frame bars, photography-first, caption labels
- "Neo Futuristic": black, electric cyan/magenta, glow grids, sci-fi panels
- "Japanese Minimal Zen": warm paper, ink black, muted green, asymmetry, generous whitespace
- "Swiss Modern": white, red/black, strong grid, huge typography, brutal clarity
- "Vintage Scientific": parchment/cream, sepia, blueprint lines, diagrams, annotations
- "Premium Data Story": dark/white hybrid, hero metrics, dashboards, elegant charts
Rules:
- Content points: max 20 words each, factual and specific
- image_prompt: English, cinematic, under 30 words, NO text in image
- image_source: "ai" for generated, "web" for stock photo search
- design_pattern: choose one of cinematic_full_bleed, editorial_split, consulting_dashboard, hero_metric, timeline_ribbon, comparison_duel, image_mosaic, map_pins, quote_breathing, process_flow, card_grid, blueprint_diagram, luxury_catalog, swiss_poster
- notes: conversational tone, 2-3 sentences, like talking to audience
- icons inventory: pick from chunk-filled library (crown, shield, sword, fire, users, chart-bar, lightbulb, target, bolt, map, castle, skull, book-open, globe, rocket, heart, star, trophy, flag, clock)
- First slide layout="cover", last="closing", others mix of "content"/"comparison"/"timeline"/"quote"
- page_rhythm: "anchor" for cover/closing, "dense" for data-heavy, "breathing" for impact pages
- Reply ONLY with raw JSON, no markdown code blocks, no explanation"""
result = llm_chat(prompt, max_tokens=32000, model_override=MODEL_STRATEGIST)
# Parse JSON
try:
json_match = re.search(r'\{[\s\S]*\}', result)
if json_match:
data = json.loads(json_match.group())
else:
data = json.loads(result)
except json.JSONDecodeError:
# Try repair
for suffix in ['}]}', '"}]}', '"]}]}']:
try:
repaired = result.rstrip() + suffix
json_match = re.search(r'\{[\s\S]*\}', repaired)
if json_match:
data = json.loads(json_match.group())
break
except:
continue
else:
raise RuntimeError(f"JSON invalide du Strategist.\n\nRéponse:\n{result[:3000]}")
return data
def format_strategist_preview(data):
"""Format strategist output as readable markdown."""
lines = [f"# {data.get('title', 'Présentation')}\n"]
spec = data.get("spec_lock", {})
colors = spec.get("colors", {})
typo = spec.get("typography", {})
lines.append("## 🎨 Design Spec\n")
lines.append(f"**Palette**: {' | '.join(f'{k}: `{v}`' for k,v in colors.items())}\n")
lines.append(f"**Typo**: titre={typo.get('title_family','?')}, corps={typo.get('body_family','?')}, taille={typo.get('body',20)}px\n")
lines.append(f"**Icônes**: {', '.join(spec.get('icons',{}).get('inventory',[]))}\n")
lines.append("\n## 📋 Plan des slides\n")
for slide in data.get("slides", []):
lines.append(f"### Slide {slide['number']}: {slide['title']}")
lines.append(f"*Layout: {slide.get('layout','content')} | Rythme: {slide.get('rhythm','dense')} | Pattern: {slide.get('design_pattern','auto')}*\n")
for p in slide.get("content", []):
lines.append(f"- {p}")
if slide.get("image_prompt"):
lines.append(f"\n🖼️ Image ({slide.get('image_source','ai')}): *{slide['image_prompt']}*")
if slide.get("notes"):
lines.append(f"\n🎙️ Notes: *{slide['notes'][:100]}...*")
lines.append("")
return "\n".join(lines)
def write_project_specs(data, project_path):
"""Write design_spec.md and spec_lock.md like original PPT Master so quality checker can enforce drift."""
project = Path(project_path)
spec = data.get("spec_lock", {})
colors = spec.get("colors", {})
typo = spec.get("typography", {})
icons = spec.get("icons", {})
rhythms = spec.get("page_rhythm", {})
design_lines = [f"# {data.get('title','Presentation')} - Design Spec", "", "## I. Project Information", "", f"- Canvas Format: PPT 16:9 (1280×720)", f"- Page Count: {len(data.get('slides', []))}", "- Design Style: AI-generated via PPT Master full workflow", "", "## III. Visual Theme", ""]
for k,v in colors.items():
design_lines.append(f"- {k}: `{v}`")
design_lines += ["", "## IV. Typography", ""]
for k,v in typo.items():
design_lines.append(f"- {k}: {v}")
design_lines += ["", "## IX. Content Outline", ""]
for sl in data.get('slides', []):
design_lines.append(f"### Slide {sl.get('number'):02d} - {sl.get('title')}")
for pt in sl.get('content', []):
design_lines.append(f"- {pt}")
design_lines.append("")
(project / "design_spec.md").write_text("\n".join(design_lines), encoding="utf-8")
lock = ["# Execution Lock", "", "## canvas", "- viewBox: 0 0 1280 720", "- format: PPT 16:9", "", "## colors"]
for k,v in colors.items():
lock.append(f"- {k}: {v}")
# Add common colors used by gradients/overlays if not present
for k,v in {"black":"#000000", "white":"#FFFFFF"}.items():
if k not in colors:
lock.append(f"- {k}: {v}")
lock += ["", "## typography"]
if "font_family" not in typo:
lock.append(f"- font_family: {typo.get('body_family','Arial, sans-serif')}")
for k,v in typo.items():
lock.append(f"- {k}: {v}")
lock += ["", "## icons", f"- library: {icons.get('library','chunk-filled')}", f"- inventory: {', '.join(icons.get('inventory', []))}", "", "## images"]
for sl in data.get('slides', []):
n = int(sl.get('number', 0))
lock.append(f"- slide_{n:02d}: images/slide_{n:02d}.png")
lock.append(f"- slide_{n:02d}_jpg: images/slide_{n:02d}.jpg")
lock += ["", "## page_rhythm"]
for sl in data.get('slides', []):
key=f"P{int(sl.get('number',0)):02d}"
lock.append(f"- {key}: {sl.get('rhythm') or rhythms.get(key,'dense')}")
lock += ["", "## forbidden", "- Mixing icon libraries", "- rgba()", "-