| |
| """ |
| ComfyUI local image generation backend. |
| |
| This backend talks to a local ComfyUI HTTP server and is designed to be the |
| PPT Master default replacement for cloud image providers. |
| |
| Configuration keys: |
| COMFYUI_SERVER Optional, default http://127.0.0.1:8188 |
| COMFYUI_WORKFLOW Optional path to a ComfyUI API workflow JSON |
| COMFYUI_MODEL Optional checkpoint name for the fallback workflow |
| COMFYUI_AUTO_START Optional true/false, default true |
| COMFYUI_DIR Optional path to ComfyUI checkout, default /app/ComfyUI |
| COMFYUI_PYTHON Optional python executable for ComfyUI server |
| |
| If ComfyUI is unavailable or has no usable model/workflow in this CPU sandbox, |
| set COMFYUI_FALLBACK=placeholder (default) to create a deterministic local |
| cinematic placeholder image instead of failing the PPT pipeline. |
| """ |
|
|
| from __future__ import annotations |
|
|
| import io |
| import json |
| import os |
| import random |
| import subprocess |
| import sys |
| import time |
| import uuid |
| from pathlib import Path |
| from typing import Any |
|
|
| import requests |
| from PIL import Image, ImageDraw, ImageFont, ImageFilter |
|
|
| from image_backends.backend_common import MAX_RETRIES, normalize_image_size, resolve_output_path, save_image_bytes |
|
|
| DEFAULT_SERVER = "http://127.0.0.1:8188" |
| DEFAULT_MODEL = os.environ.get("COMFYUI_MODEL", "local-comfyui") |
| VALID_ASPECT_RATIOS = ["1:1", "2:3", "3:2", "3:4", "4:3", "4:5", "5:4", "9:16", "16:9", "21:9"] |
| VALID_IMAGE_SIZES = ["512px", "1K", "2K", "4K", "0.5K"] |
|
|
|
|
| def _bool_env(name: str, default: bool) -> bool: |
| value = os.environ.get(name) |
| if value is None: |
| return default |
| return value.strip().lower() in {"1", "true", "yes", "y", "on"} |
|
|
|
|
| def _server() -> str: |
| return os.environ.get("COMFYUI_SERVER", DEFAULT_SERVER).rstrip("/") |
|
|
|
|
| def _ping(server: str) -> bool: |
| try: |
| response = requests.get(f"{server}/system_stats", timeout=2) |
| return response.status_code == 200 |
| except requests.RequestException: |
| return False |
|
|
|
|
| def _start_server_if_needed(server: str) -> bool: |
| if _ping(server): |
| return True |
| if not _bool_env("COMFYUI_AUTO_START", True): |
| return False |
|
|
| comfy_dir = Path(os.environ.get("COMFYUI_DIR", "/app/ComfyUI")) |
| main_py = comfy_dir / "main.py" |
| if not main_py.exists(): |
| return False |
|
|
| python_exe = os.environ.get("COMFYUI_PYTHON", sys.executable) |
| log_path = Path(os.environ.get("COMFYUI_LOG", "/app/comfyui.log")) |
| log_fh = log_path.open("ab") |
| |
| |
| |
| extra_args = os.environ.get("COMFYUI_EXTRA_ARGS", "").split() |
| subprocess.Popen( |
| [python_exe, str(main_py), "--listen", "127.0.0.1", "--port", "8188", *extra_args], |
| cwd=str(comfy_dir), |
| stdout=log_fh, |
| stderr=subprocess.STDOUT, |
| start_new_session=True, |
| ) |
| for _ in range(60): |
| if _ping(server): |
| return True |
| time.sleep(1) |
| return False |
|
|
|
|
| def _dimensions(aspect_ratio: str, image_size: str) -> tuple[int, int]: |
| image_size = normalize_image_size(image_size) |
| long_edge = {"0.5K": 512, "512px": 512, "1K": 1024, "2K": 1536, "4K": 2048}.get(image_size, 1024) |
| w_ratio, h_ratio = [int(x) for x in aspect_ratio.split(":")] |
| if w_ratio >= h_ratio: |
| width = long_edge |
| height = round(long_edge * h_ratio / w_ratio) |
| else: |
| height = long_edge |
| width = round(long_edge * w_ratio / h_ratio) |
| |
| width = max(256, int(round(width / 8) * 8)) |
| height = max(256, int(round(height / 8) * 8)) |
| return width, height |
|
|
|
|
| def _load_workflow(prompt: str, negative_prompt: str, width: int, height: int, model: str | None) -> dict[str, Any]: |
| workflow_path = os.environ.get("COMFYUI_WORKFLOW") |
| if workflow_path: |
| data = json.loads(Path(workflow_path).read_text(encoding="utf-8")) |
| raw = json.dumps(data) |
| replacements = { |
| "{{prompt}}": prompt, |
| "{{negative_prompt}}": negative_prompt, |
| "{{width}}": str(width), |
| "{{height}}": str(height), |
| "{{seed}}": str(random.randint(1, 2**31 - 1)), |
| "{{model}}": model or os.environ.get("COMFYUI_MODEL", ""), |
| } |
| for k, v in replacements.items(): |
| raw = raw.replace(k, v) |
| return json.loads(raw) |
|
|
| ckpt = model or os.environ.get("COMFYUI_MODEL") |
| if not ckpt: |
| raise RuntimeError("No COMFYUI_WORKFLOW or COMFYUI_MODEL configured for real ComfyUI generation.") |
|
|
| return { |
| "1": {"class_type": "CheckpointLoaderSimple", "inputs": {"ckpt_name": ckpt}}, |
| "2": {"class_type": "CLIPTextEncode", "inputs": {"text": prompt, "clip": ["1", 1]}}, |
| "3": {"class_type": "CLIPTextEncode", "inputs": {"text": negative_prompt, "clip": ["1", 1]}}, |
| "4": {"class_type": "EmptyLatentImage", "inputs": {"width": width, "height": height, "batch_size": 1}}, |
| "5": {"class_type": "KSampler", "inputs": {"seed": random.randint(1, 2**31 - 1), "steps": 24, "cfg": 7.0, "sampler_name": "euler", "scheduler": "normal", "denoise": 1.0, "model": ["1", 0], "positive": ["2", 0], "negative": ["3", 0], "latent_image": ["4", 0]}}, |
| "6": {"class_type": "VAEDecode", "inputs": {"samples": ["5", 0], "vae": ["1", 2]}}, |
| "7": {"class_type": "SaveImage", "inputs": {"filename_prefix": "pptmaster", "images": ["6", 0]}}, |
| } |
|
|
|
|
| def _queue_and_fetch(server: str, workflow: dict[str, Any]) -> bytes: |
| client_id = str(uuid.uuid4()) |
| queue_response = requests.post(f"{server}/prompt", json={"prompt": workflow, "client_id": client_id}, timeout=30) |
| if queue_response.status_code >= 400: |
| raise RuntimeError(f"ComfyUI /prompt failed {queue_response.status_code}: {queue_response.text[:1000]}") |
| prompt_id = queue_response.json()["prompt_id"] |
|
|
| deadline = time.time() + 600 |
| while time.time() < deadline: |
| hist = requests.get(f"{server}/history/{prompt_id}", timeout=10) |
| if hist.status_code == 200: |
| data = hist.json() |
| if prompt_id in data: |
| outputs = data[prompt_id].get("outputs", {}) |
| for output in outputs.values(): |
| for image in output.get("images", []): |
| params = {"filename": image["filename"], "subfolder": image.get("subfolder", ""), "type": image.get("type", "output")} |
| img = requests.get(f"{server}/view", params=params, timeout=60) |
| img.raise_for_status() |
| return img.content |
| time.sleep(1) |
| raise RuntimeError("Timed out waiting for ComfyUI image generation.") |
|
|
|
|
| def _placeholder(prompt: str, width: int, height: int) -> bytes: |
| |
| |
| rng = random.Random(hash(prompt) & 0xFFFFFFFF) |
| img = Image.new("RGB", (width, height), (9, 12, 24)) |
| draw = ImageDraw.Draw(img, "RGBA") |
| palette = [(28, 38, 70), (52, 67, 105), (121, 33, 48), (186, 137, 64), (30, 90, 96)] |
| for i in range(26): |
| color = palette[i % len(palette)] + (rng.randint(40, 120),) |
| x0 = rng.randint(-width // 4, width) |
| y0 = rng.randint(-height // 4, height) |
| r = rng.randint(max(60, width // 12), max(120, width // 3)) |
| draw.ellipse((x0 - r, y0 - r, x0 + r, y0 + r), fill=color) |
| img = img.filter(ImageFilter.GaussianBlur(radius=max(6, width // 160))) |
| draw = ImageDraw.Draw(img, "RGBA") |
| |
| for i in range(80): |
| alpha = int(2.2 * i) |
| draw.rectangle((i, i, width - i, height - i), outline=(0, 0, 0, max(0, 120 - alpha)), width=2) |
| title = "ComfyUI local" |
| subtitle = prompt[:110] + ("..." if len(prompt) > 110 else "") |
| try: |
| font_big = ImageFont.truetype("DejaVuSans-Bold.ttf", max(32, width // 18)) |
| font_small = ImageFont.truetype("DejaVuSans.ttf", max(16, width // 42)) |
| except OSError: |
| font_big = ImageFont.load_default() |
| font_small = ImageFont.load_default() |
| pad = max(28, width // 28) |
| box_h = max(120, height // 4) |
| draw.rounded_rectangle((pad, height - box_h - pad, width - pad, height - pad), radius=24, fill=(0, 0, 0, 120), outline=(212, 171, 92, 180), width=2) |
| draw.text((pad * 1.5, height - box_h - pad + 26), title, fill=(245, 226, 180, 255), font=font_big) |
| draw.text((pad * 1.5, height - box_h - pad + 86), subtitle, fill=(235, 238, 245, 230), font=font_small) |
| buf = io.BytesIO() |
| img.save(buf, format="PNG") |
| return buf.getvalue() |
|
|
|
|
| def generate(prompt: str, aspect_ratio: str = "1:1", image_size: str = "1K", output_dir: str = None, filename: str = None, model: str = None, max_retries: int = MAX_RETRIES) -> str: |
| if aspect_ratio not in VALID_ASPECT_RATIOS: |
| raise ValueError(f"Invalid aspect ratio '{aspect_ratio}'. Valid: {VALID_ASPECT_RATIOS}") |
| image_size = normalize_image_size(image_size) |
| if image_size not in VALID_IMAGE_SIZES: |
| raise ValueError(f"Invalid image size '{image_size}'. Valid: {VALID_IMAGE_SIZES}") |
| width, height = _dimensions(aspect_ratio, image_size) |
| path = resolve_output_path(prompt, output_dir, filename, ".png") |
| server = _server() |
| negative = os.environ.get("COMFYUI_NEGATIVE_PROMPT", "low quality, blurry, watermark, distorted text") |
|
|
| print("[ComfyUI]") |
| print(f" Server: {server}") |
| print(f" Model: {model or os.environ.get('COMFYUI_MODEL') or DEFAULT_MODEL}") |
| print(f" Prompt: {prompt[:120]}{'...' if len(prompt) > 120 else ''}") |
| print(f" Aspect Ratio: {aspect_ratio}") |
| print(f" Size: {width}x{height}") |
|
|
| try: |
| if _start_server_if_needed(server): |
| workflow = _load_workflow(prompt, negative, width, height, model) |
| image_bytes = _queue_and_fetch(server, workflow) |
| return save_image_bytes(image_bytes, path) |
| raise RuntimeError("ComfyUI server is not reachable.") |
| except Exception as exc: |
| if os.environ.get("COMFYUI_FALLBACK", "placeholder").strip().lower() == "placeholder": |
| print(f" [WARN] Real ComfyUI generation unavailable: {exc}") |
| print(" [WARN] Writing local placeholder image so the PPT workflow remains testable.") |
| return save_image_bytes(_placeholder(prompt, width, height), path) |
| raise RuntimeError(str(exc)) from exc |
|
|