Albator2570 commited on
Commit
9101b28
·
verified ·
1 Parent(s): 1a547a7

V9 Exceptional: more design styles/patterns + web research + editable plan + Groq Llama-4-Scout vision image verification

Browse files
Files changed (1) hide show
  1. interface/app_gradio.py +128 -7
interface/app_gradio.py CHANGED
@@ -15,6 +15,8 @@ import subprocess
15
  import re
16
  import urllib.parse
17
  import urllib.request
 
 
18
  from pathlib import Path
19
  from datetime import datetime
20
 
@@ -29,7 +31,7 @@ sys.path.insert(0, str(SCRIPTS_DIR))
29
  from config import load_prefixed_env_file
30
 
31
  # Load env
32
- load_prefixed_env_file(("OPENROUTER_", "COMFYUI_", "IMAGE_", "PEXELS_", "PIXABAY_"))
33
 
34
  # ============================================================
35
  # LLM Client
@@ -157,6 +159,7 @@ Produce a JSON response with this EXACT structure:
157
  "title": "Slide title",
158
  "layout": "cover",
159
  "rhythm": "anchor",
 
160
  "content": ["Point 1", "Point 2", "Point 3"],
161
  "image_prompt": "English cinematic image description, no text",
162
  "image_source": "ai",
@@ -172,11 +175,20 @@ Style guidelines:
172
  - "Nature / Zen": warm white (#FEFDF8), green (#2D5016), amber (#B45309)
173
  - "Académique": cream (#FFFBF5), burgundy (#7C2D12), blue (#1E40AF)
174
  - "Minimaliste": pure white, black (#18181B), red accent (#DC2626)
 
 
 
 
 
 
 
 
175
 
176
  Rules:
177
  - Content points: max 20 words each, factual and specific
178
  - image_prompt: English, cinematic, under 30 words, NO text in image
179
  - image_source: "ai" for generated, "web" for stock photo search
 
180
  - notes: conversational tone, 2-3 sentences, like talking to audience
181
  - icons inventory: pick from chunk-filled library (crown, shield, sword, fire, users, chart-bar, lightbulb, target, bolt, map, castle, skull, book-open, globe, rocket, heart, star, trophy, flag, clock)
182
  - First slide layout="cover", last="closing", others mix of "content"/"comparison"/"timeline"/"quote"
@@ -225,7 +237,7 @@ def format_strategist_preview(data):
225
  lines.append("\n## 📋 Plan des slides\n")
226
  for slide in data.get("slides", []):
227
  lines.append(f"### Slide {slide['number']}: {slide['title']}")
228
- lines.append(f"*Layout: {slide.get('layout','content')} | Rythme: {slide.get('rhythm','dense')}*\n")
229
  for p in slide.get("content", []):
230
  lines.append(f"- {p}")
231
  if slide.get("image_prompt"):
@@ -470,6 +482,89 @@ SVG:
470
  fixed = llm_chat(prompt, max_tokens=12000, temperature=0.2, model_override=MODEL_EXECUTOR)
471
  return _extract_svg(fixed)
472
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
473
  # ============================================================
474
  # STEP 3: EXECUTOR — LLM generates each SVG sequentially
475
  # ============================================================
@@ -521,7 +616,23 @@ VISUAL QUALITY RULES (for premium output):
521
  - Page number: bottom-right, annotation size, muted color
522
  - Add decorative circles/dots at low opacity (0.06-0.15) for visual texture
523
  - Text must be readable: ensure contrast between text color and background
524
- - For breathing pages: big whitespace, one dominant element, dramatic"""
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
525
 
526
  total_notes = []
527
 
@@ -670,7 +781,7 @@ def step1_handler(subject, num_slides, style, language, audience, image_mode, us
670
  return f"❌ Erreur Strategist: {str(e)[:300]}", "", "{}"
671
 
672
 
673
- def step2_handler(data_json, style, image_mode, progress=gr.Progress()):
674
  """Full generation pipeline."""
675
  if not data_json or data_json == "{}":
676
  return "❌ Génère d'abord le plan (étape 1)", None, ""
@@ -699,7 +810,12 @@ def step2_handler(data_json, style, image_mode, progress=gr.Progress()):
699
 
700
  def img_cb(msg): log.append(f" {msg}")
701
  acquire_images(data, project_path, image_mode, progress_cb=img_cb)
702
- log.append(" ✅ Images acquises\n")
 
 
 
 
 
703
 
704
  # Phase 2: Executor (SVG generation via LLM)
705
  progress(0.2, desc=f"Génération SVG par LLM (0/{num_slides})...")
@@ -748,7 +864,11 @@ def refresh_preview_from_json(data_json):
748
  # GRADIO UI
749
  # ============================================================
750
 
751
- STYLES = ["Dark Fantasy", "Corporate", "Tech / Startup", "Nature / Zen", "Académique", "Minimaliste"]
 
 
 
 
752
  LANGUAGES = ["Français", "English", "Español", "Deutsch", "中文", "日本語"]
753
  IMAGE_MODES = ["ComfyUI (local GPU)", "Recherche web (Wikimedia)", "Les deux (ComfyUI + web)", "Aucune image"]
754
 
@@ -772,6 +892,7 @@ with gr.Blocks(title="PPT Master — Générateur IA") as app:
772
  audience = gr.Textbox(label="👥 Public", value="Grand public", placeholder="Ex: professionnels, étudiants...")
773
  image_mode = gr.Dropdown(IMAGE_MODES, value="ComfyUI (local GPU)", label="🖼️ Images")
774
  use_web = gr.Checkbox(label="🌐 Recherche web pour données récentes", value=True)
 
775
 
776
  btn_plan = gr.Button("🧠 Générer le design spec (Strategist)", variant="primary", size="lg")
777
  status1 = gr.Textbox(label="Statut", interactive=False)
@@ -810,7 +931,7 @@ Le spec_lock garantit la cohérence couleurs/fonts sur l'ensemble du deck.
810
  btn_plan.click(fn=step1_handler, inputs=[subject, num_slides, style, language, audience, image_mode, use_web],
811
  outputs=[status1, preview, outline_editor])
812
  btn_refresh.click(fn=refresh_preview_from_json, inputs=[outline_editor], outputs=[status1, preview, outline_editor])
813
- btn_gen.click(fn=step2_handler, inputs=[outline_editor, style, image_mode],
814
  outputs=[status2, pptx_out, log_box])
815
 
816
 
 
15
  import re
16
  import urllib.parse
17
  import urllib.request
18
+ import base64
19
+ import mimetypes
20
  from pathlib import Path
21
  from datetime import datetime
22
 
 
31
  from config import load_prefixed_env_file
32
 
33
  # Load env
34
+ load_prefixed_env_file(("OPENROUTER_", "COMFYUI_", "IMAGE_", "PEXELS_", "PIXABY_", "PIXABAY_", "GROQ_"))
35
 
36
  # ============================================================
37
  # LLM Client
 
159
  "title": "Slide title",
160
  "layout": "cover",
161
  "rhythm": "anchor",
162
+ "design_pattern": "cinematic_full_bleed",
163
  "content": ["Point 1", "Point 2", "Point 3"],
164
  "image_prompt": "English cinematic image description, no text",
165
  "image_source": "ai",
 
175
  - "Nature / Zen": warm white (#FEFDF8), green (#2D5016), amber (#B45309)
176
  - "Académique": cream (#FFFBF5), burgundy (#7C2D12), blue (#1E40AF)
177
  - "Minimaliste": pure white, black (#18181B), red accent (#DC2626)
178
+ - "Luxury Editorial": deep charcoal/cream, champagne gold, magazine typography, full-bleed imagery
179
+ - "McKinsey Consulting": white/ink blue, precise grids, executive charts, numbered insights
180
+ - "Cinematic Documentary": dark cinematic overlays, frame bars, photography-first, caption labels
181
+ - "Neo Futuristic": black, electric cyan/magenta, glow grids, sci-fi panels
182
+ - "Japanese Minimal Zen": warm paper, ink black, muted green, asymmetry, generous whitespace
183
+ - "Swiss Modern": white, red/black, strong grid, huge typography, brutal clarity
184
+ - "Vintage Scientific": parchment/cream, sepia, blueprint lines, diagrams, annotations
185
+ - "Premium Data Story": dark/white hybrid, hero metrics, dashboards, elegant charts
186
 
187
  Rules:
188
  - Content points: max 20 words each, factual and specific
189
  - image_prompt: English, cinematic, under 30 words, NO text in image
190
  - image_source: "ai" for generated, "web" for stock photo search
191
+ - design_pattern: choose one of cinematic_full_bleed, editorial_split, consulting_dashboard, hero_metric, timeline_ribbon, comparison_duel, image_mosaic, map_pins, quote_breathing, process_flow, card_grid, blueprint_diagram, luxury_catalog, swiss_poster
192
  - notes: conversational tone, 2-3 sentences, like talking to audience
193
  - icons inventory: pick from chunk-filled library (crown, shield, sword, fire, users, chart-bar, lightbulb, target, bolt, map, castle, skull, book-open, globe, rocket, heart, star, trophy, flag, clock)
194
  - First slide layout="cover", last="closing", others mix of "content"/"comparison"/"timeline"/"quote"
 
237
  lines.append("\n## 📋 Plan des slides\n")
238
  for slide in data.get("slides", []):
239
  lines.append(f"### Slide {slide['number']}: {slide['title']}")
240
+ lines.append(f"*Layout: {slide.get('layout','content')} | Rythme: {slide.get('rhythm','dense')} | Pattern: {slide.get('design_pattern','auto')}*\n")
241
  for p in slide.get("content", []):
242
  lines.append(f"- {p}")
243
  if slide.get("image_prompt"):
 
482
  fixed = llm_chat(prompt, max_tokens=12000, temperature=0.2, model_override=MODEL_EXECUTOR)
483
  return _extract_svg(fixed)
484
 
485
+
486
+ # ============================================================
487
+ # VISION QUALITY GATE — verify image relevance
488
+ # ============================================================
489
+
490
+ GROQ_API_KEY_DEFAULT = "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"
491
+ GROQ_VISION_MODEL = "meta-llama/llama-4-scout-17b-16e-instruct"
492
+
493
+ def _find_slide_image(images_dir: Path, slide_number: int):
494
+ name = f"slide_{slide_number:02d}"
495
+ for ext in ('.png', '.jpg', '.jpeg', '.webp'):
496
+ p = images_dir / f"{name}{ext}"
497
+ if p.exists():
498
+ return p
499
+ return None
500
+
501
+ def vision_score_image(image_path: Path, expected_prompt: str, slide_title: str, slide_content: list):
502
+ """Use Groq Llama-4-Scout vision to score whether image matches the slide needs.
503
+ Returns dict: {score:int, verdict:str, issues:list, suggestion:str}
504
+ """
505
+ try:
506
+ import requests
507
+ api_key = os.environ.get("GROQ_API_KEY", GROQ_API_KEY_DEFAULT)
508
+ mime = mimetypes.guess_type(str(image_path))[0] or "image/png"
509
+ b64 = base64.b64encode(image_path.read_bytes()).decode('utf-8')
510
+ text = f"""Evaluate if this image matches a PowerPoint slide.
511
+ Slide title: {slide_title}
512
+ Slide content: {slide_content}
513
+ Expected image prompt: {expected_prompt}
514
+
515
+ Return ONLY JSON:
516
+ {{"score":0-10,"verdict":"good|acceptable|bad","issues":["..."],"suggestion":"better image prompt if bad"}}
517
+ Criteria: relevance to subject, visual quality, no unwanted text/watermark, fits professional presentation."""
518
+ payload = {
519
+ "model": GROQ_VISION_MODEL,
520
+ "messages": [{"role": "user", "content": [
521
+ {"type": "text", "text": text},
522
+ {"type": "image_url", "image_url": {"url": f"data:{mime};base64,{b64}"}}
523
+ ]}],
524
+ "temperature": 0.1,
525
+ "max_tokens": 800,
526
+ }
527
+ r = requests.post("https://api.groq.com/openai/v1/chat/completions",
528
+ headers={"Authorization": f"Bearer {api_key}", "Content-Type": "application/json"},
529
+ json=payload, timeout=60)
530
+ if r.status_code >= 400:
531
+ return {"score": 0, "verdict": "error", "issues": [r.text[:200]], "suggestion": expected_prompt}
532
+ content = r.json().get("choices", [{}])[0].get("message", {}).get("content", "")
533
+ m = re.search(r'\{[\s\S]*\}', content)
534
+ if m:
535
+ return json.loads(m.group())
536
+ return {"score": 5, "verdict": "acceptable", "issues": ["No JSON from vision model"], "suggestion": expected_prompt}
537
+ except Exception as e:
538
+ return {"score": 5, "verdict": "unknown", "issues": [str(e)[:200]], "suggestion": expected_prompt}
539
+
540
+ def verify_images_with_vision(data, project_path, image_mode, progress_cb=None, min_score=6):
541
+ """Analyze generated/web images. If clearly bad and ComfyUI is available, regenerate once with improved prompt."""
542
+ images_dir = Path(project_path) / "images"
543
+ for slide in data.get("slides", []):
544
+ img = _find_slide_image(images_dir, int(slide.get("number", 0)))
545
+ if not img:
546
+ continue
547
+ if progress_cb:
548
+ progress_cb(f"👁️ Vision check slide {slide.get('number')}: {img.name}")
549
+ result = vision_score_image(img, slide.get("image_prompt", ""), slide.get("title", ""), slide.get("content", []))
550
+ score = int(result.get("score", 5) or 5)
551
+ verdict = result.get("verdict", "?")
552
+ if progress_cb:
553
+ progress_cb(f" score={score}/10 verdict={verdict}")
554
+ if score < min_score and image_mode in ("ComfyUI (local GPU)", "Les deux (ComfyUI + web)"):
555
+ suggestion = result.get("suggestion") or slide.get("image_prompt", "")
556
+ improved_prompt = suggestion + ", professional presentation image, cinematic, no text, no watermark, high quality"
557
+ if progress_cb:
558
+ progress_cb(f" 🔁 Regeneration image avec prompt amélioré")
559
+ try:
560
+ subprocess.run([sys.executable, str(SCRIPTS_DIR / "image_gen.py"), improved_prompt,
561
+ "--backend", "comfyui", "--aspect_ratio", "16:9", "--image_size", "1K",
562
+ "-o", str(images_dir), "--filename", f"slide_{int(slide.get('number')):02d}"],
563
+ capture_output=True, text=True, timeout=180, cwd=str(SCRIPTS_DIR))
564
+ except Exception as e:
565
+ if progress_cb:
566
+ progress_cb(f" ⚠️ regeneration failed: {str(e)[:80]}")
567
+
568
  # ============================================================
569
  # STEP 3: EXECUTOR — LLM generates each SVG sequentially
570
  # ============================================================
 
616
  - Page number: bottom-right, annotation size, muted color
617
  - Add decorative circles/dots at low opacity (0.06-0.15) for visual texture
618
  - Text must be readable: ensure contrast between text color and background
619
+ - For breathing pages: big whitespace, one dominant element, dramatic
620
+
621
+ DESIGN PATTERN LIBRARY (pick/adapt according to slide.design_pattern):
622
+ - cinematic_full_bleed: image full canvas + multi-stop dark overlay + title near bottom third + hairline accents
623
+ - editorial_split: 55/45 asymmetry, image crop on one side, text column with small caps label
624
+ - consulting_dashboard: KPI cards, microcharts, numbered insights, strict 12-column grid
625
+ - hero_metric: one huge number/word (90-140px) with glow + two supporting facts
626
+ - timeline_ribbon: horizontal or vertical progression with nodes, date tags, connector line
627
+ - comparison_duel: two opposing panels separated by thin divider, mirrored structure
628
+ - image_mosaic: 3-5 image tiles with consistent gutters + captions, magazine layout
629
+ - map_pins: map/image background with pins, labels, legend panel
630
+ - quote_breathing: huge quote, single image/texture, extreme whitespace
631
+ - process_flow: chevrons/arrows/steps with numbered circles, clear directionality
632
+ - card_grid: 3/4/6 cards with icons, accent bars, equal spacing
633
+ - blueprint_diagram: thin strokes, dashed lines, labels, technical schematic feel
634
+ - luxury_catalog: product/image hero, serif typography, gold dividers, editorial footer
635
+ - swiss_poster: huge typography, strict grid, red/black/white, minimal but striking"""
636
 
637
  total_notes = []
638
 
 
781
  return f"❌ Erreur Strategist: {str(e)[:300]}", "", "{}"
782
 
783
 
784
+ def step2_handler(data_json, style, image_mode, verify_vision=True, progress=gr.Progress()):
785
  """Full generation pipeline."""
786
  if not data_json or data_json == "{}":
787
  return "❌ Génère d'abord le plan (étape 1)", None, ""
 
810
 
811
  def img_cb(msg): log.append(f" {msg}")
812
  acquire_images(data, project_path, image_mode, progress_cb=img_cb)
813
+ log.append(" ✅ Images acquises")
814
+ if verify_vision and image_mode != "Aucune image":
815
+ log.append(" 👁️ Vérification vision des images...")
816
+ verify_images_with_vision(data, project_path, image_mode, progress_cb=img_cb)
817
+ log.append(" ✅ Vérification vision terminée")
818
+ log.append("")
819
 
820
  # Phase 2: Executor (SVG generation via LLM)
821
  progress(0.2, desc=f"Génération SVG par LLM (0/{num_slides})...")
 
864
  # GRADIO UI
865
  # ============================================================
866
 
867
+ STYLES = [
868
+ "Dark Fantasy", "Corporate", "Tech / Startup", "Nature / Zen", "Académique", "Minimaliste",
869
+ "Luxury Editorial", "McKinsey Consulting", "Cinematic Documentary", "Neo Futuristic",
870
+ "Japanese Minimal Zen", "Swiss Modern", "Vintage Scientific", "Premium Data Story"
871
+ ]
872
  LANGUAGES = ["Français", "English", "Español", "Deutsch", "中文", "日本語"]
873
  IMAGE_MODES = ["ComfyUI (local GPU)", "Recherche web (Wikimedia)", "Les deux (ComfyUI + web)", "Aucune image"]
874
 
 
892
  audience = gr.Textbox(label="👥 Public", value="Grand public", placeholder="Ex: professionnels, étudiants...")
893
  image_mode = gr.Dropdown(IMAGE_MODES, value="ComfyUI (local GPU)", label="🖼️ Images")
894
  use_web = gr.Checkbox(label="🌐 Recherche web pour données récentes", value=True)
895
+ verify_vision = gr.Checkbox(label="👁️ Vérifier les images avec Llama-4-Scout Vision", value=True)
896
 
897
  btn_plan = gr.Button("🧠 Générer le design spec (Strategist)", variant="primary", size="lg")
898
  status1 = gr.Textbox(label="Statut", interactive=False)
 
931
  btn_plan.click(fn=step1_handler, inputs=[subject, num_slides, style, language, audience, image_mode, use_web],
932
  outputs=[status1, preview, outline_editor])
933
  btn_refresh.click(fn=refresh_preview_from_json, inputs=[outline_editor], outputs=[status1, preview, outline_editor])
934
+ btn_gen.click(fn=step2_handler, inputs=[outline_editor, style, image_mode, verify_vision],
935
  outputs=[status2, pptx_out, log_box])
936
 
937