File size: 10,622 Bytes
4eb549a | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 | #!/usr/bin/env python3
"""
ComfyUI local image generation backend.
This backend talks to a local ComfyUI HTTP server and is designed to be the
PPT Master default replacement for cloud image providers.
Configuration keys:
COMFYUI_SERVER Optional, default http://127.0.0.1:8188
COMFYUI_WORKFLOW Optional path to a ComfyUI API workflow JSON
COMFYUI_MODEL Optional checkpoint name for the fallback workflow
COMFYUI_AUTO_START Optional true/false, default true
COMFYUI_DIR Optional path to ComfyUI checkout, default /app/ComfyUI
COMFYUI_PYTHON Optional python executable for ComfyUI server
If ComfyUI is unavailable or has no usable model/workflow in this CPU sandbox,
set COMFYUI_FALLBACK=placeholder (default) to create a deterministic local
cinematic placeholder image instead of failing the PPT pipeline.
"""
from __future__ import annotations
import io
import json
import os
import random
import subprocess
import sys
import time
import uuid
from pathlib import Path
from typing import Any
import requests
from PIL import Image, ImageDraw, ImageFont, ImageFilter
from image_backends.backend_common import MAX_RETRIES, normalize_image_size, resolve_output_path, save_image_bytes
DEFAULT_SERVER = "http://127.0.0.1:8188"
DEFAULT_MODEL = os.environ.get("COMFYUI_MODEL", "local-comfyui")
VALID_ASPECT_RATIOS = ["1:1", "2:3", "3:2", "3:4", "4:3", "4:5", "5:4", "9:16", "16:9", "21:9"]
VALID_IMAGE_SIZES = ["512px", "1K", "2K", "4K", "0.5K"]
def _bool_env(name: str, default: bool) -> bool:
value = os.environ.get(name)
if value is None:
return default
return value.strip().lower() in {"1", "true", "yes", "y", "on"}
def _server() -> str:
return os.environ.get("COMFYUI_SERVER", DEFAULT_SERVER).rstrip("/")
def _ping(server: str) -> bool:
try:
response = requests.get(f"{server}/system_stats", timeout=2)
return response.status_code == 200
except requests.RequestException:
return False
def _start_server_if_needed(server: str) -> bool:
if _ping(server):
return True
if not _bool_env("COMFYUI_AUTO_START", True):
return False
comfy_dir = Path(os.environ.get("COMFYUI_DIR", "/app/ComfyUI"))
main_py = comfy_dir / "main.py"
if not main_py.exists():
return False
python_exe = os.environ.get("COMFYUI_PYTHON", sys.executable)
log_path = Path(os.environ.get("COMFYUI_LOG", "/app/comfyui.log"))
log_fh = log_path.open("ab")
# GPU by default: do NOT force --cpu. This lets ComfyUI use CUDA on
# machines with an NVIDIA GPU such as RTX 5070 Ti. If CPU mode is desired,
# set COMFYUI_EXTRA_ARGS=--cpu explicitly.
extra_args = os.environ.get("COMFYUI_EXTRA_ARGS", "").split()
subprocess.Popen(
[python_exe, str(main_py), "--listen", "127.0.0.1", "--port", "8188", *extra_args],
cwd=str(comfy_dir),
stdout=log_fh,
stderr=subprocess.STDOUT,
start_new_session=True,
)
for _ in range(60):
if _ping(server):
return True
time.sleep(1)
return False
def _dimensions(aspect_ratio: str, image_size: str) -> tuple[int, int]:
image_size = normalize_image_size(image_size)
long_edge = {"0.5K": 512, "512px": 512, "1K": 1024, "2K": 1536, "4K": 2048}.get(image_size, 1024)
w_ratio, h_ratio = [int(x) for x in aspect_ratio.split(":")]
if w_ratio >= h_ratio:
width = long_edge
height = round(long_edge * h_ratio / w_ratio)
else:
height = long_edge
width = round(long_edge * w_ratio / h_ratio)
# Stable Diffusion checkpoints usually want multiples of 8.
width = max(256, int(round(width / 8) * 8))
height = max(256, int(round(height / 8) * 8))
return width, height
def _load_workflow(prompt: str, negative_prompt: str, width: int, height: int, model: str | None) -> dict[str, Any]:
workflow_path = os.environ.get("COMFYUI_WORKFLOW")
if workflow_path:
data = json.loads(Path(workflow_path).read_text(encoding="utf-8"))
raw = json.dumps(data)
replacements = {
"{{prompt}}": prompt,
"{{negative_prompt}}": negative_prompt,
"{{width}}": str(width),
"{{height}}": str(height),
"{{seed}}": str(random.randint(1, 2**31 - 1)),
"{{model}}": model or os.environ.get("COMFYUI_MODEL", ""),
}
for k, v in replacements.items():
raw = raw.replace(k, v)
return json.loads(raw)
ckpt = model or os.environ.get("COMFYUI_MODEL")
if not ckpt:
raise RuntimeError("No COMFYUI_WORKFLOW or COMFYUI_MODEL configured for real ComfyUI generation.")
return {
"1": {"class_type": "CheckpointLoaderSimple", "inputs": {"ckpt_name": ckpt}},
"2": {"class_type": "CLIPTextEncode", "inputs": {"text": prompt, "clip": ["1", 1]}},
"3": {"class_type": "CLIPTextEncode", "inputs": {"text": negative_prompt, "clip": ["1", 1]}},
"4": {"class_type": "EmptyLatentImage", "inputs": {"width": width, "height": height, "batch_size": 1}},
"5": {"class_type": "KSampler", "inputs": {"seed": random.randint(1, 2**31 - 1), "steps": 24, "cfg": 7.0, "sampler_name": "euler", "scheduler": "normal", "denoise": 1.0, "model": ["1", 0], "positive": ["2", 0], "negative": ["3", 0], "latent_image": ["4", 0]}},
"6": {"class_type": "VAEDecode", "inputs": {"samples": ["5", 0], "vae": ["1", 2]}},
"7": {"class_type": "SaveImage", "inputs": {"filename_prefix": "pptmaster", "images": ["6", 0]}},
}
def _queue_and_fetch(server: str, workflow: dict[str, Any]) -> bytes:
client_id = str(uuid.uuid4())
queue_response = requests.post(f"{server}/prompt", json={"prompt": workflow, "client_id": client_id}, timeout=30)
if queue_response.status_code >= 400:
raise RuntimeError(f"ComfyUI /prompt failed {queue_response.status_code}: {queue_response.text[:1000]}")
prompt_id = queue_response.json()["prompt_id"]
deadline = time.time() + 600
while time.time() < deadline:
hist = requests.get(f"{server}/history/{prompt_id}", timeout=10)
if hist.status_code == 200:
data = hist.json()
if prompt_id in data:
outputs = data[prompt_id].get("outputs", {})
for output in outputs.values():
for image in output.get("images", []):
params = {"filename": image["filename"], "subfolder": image.get("subfolder", ""), "type": image.get("type", "output")}
img = requests.get(f"{server}/view", params=params, timeout=60)
img.raise_for_status()
return img.content
time.sleep(1)
raise RuntimeError("Timed out waiting for ComfyUI image generation.")
def _placeholder(prompt: str, width: int, height: int) -> bytes:
# Local deterministic fallback keeps the full PPT Master pipeline testable in
# CPU-only/headless environments where model weights cannot be installed.
rng = random.Random(hash(prompt) & 0xFFFFFFFF)
img = Image.new("RGB", (width, height), (9, 12, 24))
draw = ImageDraw.Draw(img, "RGBA")
palette = [(28, 38, 70), (52, 67, 105), (121, 33, 48), (186, 137, 64), (30, 90, 96)]
for i in range(26):
color = palette[i % len(palette)] + (rng.randint(40, 120),)
x0 = rng.randint(-width // 4, width)
y0 = rng.randint(-height // 4, height)
r = rng.randint(max(60, width // 12), max(120, width // 3))
draw.ellipse((x0 - r, y0 - r, x0 + r, y0 + r), fill=color)
img = img.filter(ImageFilter.GaussianBlur(radius=max(6, width // 160)))
draw = ImageDraw.Draw(img, "RGBA")
# subtle vignette
for i in range(80):
alpha = int(2.2 * i)
draw.rectangle((i, i, width - i, height - i), outline=(0, 0, 0, max(0, 120 - alpha)), width=2)
title = "ComfyUI local"
subtitle = prompt[:110] + ("..." if len(prompt) > 110 else "")
try:
font_big = ImageFont.truetype("DejaVuSans-Bold.ttf", max(32, width // 18))
font_small = ImageFont.truetype("DejaVuSans.ttf", max(16, width // 42))
except OSError:
font_big = ImageFont.load_default()
font_small = ImageFont.load_default()
pad = max(28, width // 28)
box_h = max(120, height // 4)
draw.rounded_rectangle((pad, height - box_h - pad, width - pad, height - pad), radius=24, fill=(0, 0, 0, 120), outline=(212, 171, 92, 180), width=2)
draw.text((pad * 1.5, height - box_h - pad + 26), title, fill=(245, 226, 180, 255), font=font_big)
draw.text((pad * 1.5, height - box_h - pad + 86), subtitle, fill=(235, 238, 245, 230), font=font_small)
buf = io.BytesIO()
img.save(buf, format="PNG")
return buf.getvalue()
def generate(prompt: str, aspect_ratio: str = "1:1", image_size: str = "1K", output_dir: str = None, filename: str = None, model: str = None, max_retries: int = MAX_RETRIES) -> str:
if aspect_ratio not in VALID_ASPECT_RATIOS:
raise ValueError(f"Invalid aspect ratio '{aspect_ratio}'. Valid: {VALID_ASPECT_RATIOS}")
image_size = normalize_image_size(image_size)
if image_size not in VALID_IMAGE_SIZES:
raise ValueError(f"Invalid image size '{image_size}'. Valid: {VALID_IMAGE_SIZES}")
width, height = _dimensions(aspect_ratio, image_size)
path = resolve_output_path(prompt, output_dir, filename, ".png")
server = _server()
negative = os.environ.get("COMFYUI_NEGATIVE_PROMPT", "low quality, blurry, watermark, distorted text")
print("[ComfyUI]")
print(f" Server: {server}")
print(f" Model: {model or os.environ.get('COMFYUI_MODEL') or DEFAULT_MODEL}")
print(f" Prompt: {prompt[:120]}{'...' if len(prompt) > 120 else ''}")
print(f" Aspect Ratio: {aspect_ratio}")
print(f" Size: {width}x{height}")
try:
if _start_server_if_needed(server):
workflow = _load_workflow(prompt, negative, width, height, model)
image_bytes = _queue_and_fetch(server, workflow)
return save_image_bytes(image_bytes, path)
raise RuntimeError("ComfyUI server is not reachable.")
except Exception as exc:
if os.environ.get("COMFYUI_FALLBACK", "placeholder").strip().lower() == "placeholder":
print(f" [WARN] Real ComfyUI generation unavailable: {exc}")
print(" [WARN] Writing local placeholder image so the PPT workflow remains testable.")
return save_image_bytes(_placeholder(prompt, width, height), path)
raise RuntimeError(str(exc)) from exc
|