#!/usr/bin/env python3 """ ComfyUI local image generation backend for PPT Master. Connects to a running ComfyUI server and generates real images via the API. """ import io, json, os, random, subprocess, sys, time, uuid, threading from pathlib import Path import requests from PIL import Image, ImageDraw, ImageFont, ImageFilter from image_backends.backend_common import ( MAX_RETRIES, normalize_image_size, resolve_output_path, save_image_bytes ) DEFAULT_SERVER = "http://127.0.0.1:8188" VALID_ASPECT_RATIOS = ["1:1","2:3","3:2","3:4","4:3","4:5","5:4","9:16","16:9","21:9"] VALID_IMAGE_SIZES = ["512px","1K","2K","4K","0.5K"] def _bool_env(name, default=False): v = os.environ.get(name) if v is None: return default return v.strip().lower() in ("1","true","yes","y","on") def _server(): return os.environ.get("COMFYUI_SERVER", DEFAULT_SERVER).rstrip("/") def _ping(server): try: r = requests.get(f"{server}/system_stats", timeout=3) return r.status_code == 200 except: return False def _dimensions(aspect_ratio, image_size): image_size = normalize_image_size(image_size) long_edge = {"0.5K":512,"512px":512,"1K":1024,"2K":1536,"4K":2048}.get(image_size, 1024) w_r, h_r = [int(x) for x in aspect_ratio.split(":")] if w_r >= h_r: width, height = long_edge, round(long_edge * h_r / w_r) else: height, width = long_edge, round(long_edge * w_r / h_r) # SD checkpoints want multiples of 8 width = max(256, (width // 8) * 8) height = max(256, (height // 8) * 8) return width, height def _build_workflow(prompt, negative, width, height, model): """Build a standard txt2img workflow for ComfyUI API.""" seed = random.randint(1, 2**31 - 1) # Check if user has a custom workflow JSON workflow_path = os.environ.get("COMFYUI_WORKFLOW") if workflow_path and Path(workflow_path).exists(): data = json.loads(Path(workflow_path).read_text(encoding="utf-8")) raw = json.dumps(data) for k, v in {"{{prompt}}": prompt, "{{negative_prompt}}": negative, "{{width}}": str(width), "{{height}}": str(height), "{{seed}}": str(seed), "{{model}}": model or ""}.items(): raw = raw.replace(k, v) return json.loads(raw) # Default workflow: CheckpointLoader -> CLIP -> KSampler -> VAEDecode -> SaveImage if not model: raise RuntimeError( "No COMFYUI_MODEL or COMFYUI_WORKFLOW set. " "Set COMFYUI_MODEL to the checkpoint filename in ComfyUI/models/checkpoints/." ) return { "1": { "class_type": "CheckpointLoaderSimple", "inputs": {"ckpt_name": model} }, "2": { "class_type": "CLIPTextEncode", "inputs": {"text": prompt, "clip": ["1", 1]} }, "3": { "class_type": "CLIPTextEncode", "inputs": {"text": negative, "clip": ["1", 1]} }, "4": { "class_type": "EmptyLatentImage", "inputs": {"width": width, "height": height, "batch_size": 1} }, "5": { "class_type": "KSampler", "inputs": { "seed": seed, "steps": 25, "cfg": 7.0, "sampler_name": "euler_ancestral", "scheduler": "normal", "denoise": 1.0, "model": ["1", 0], "positive": ["2", 0], "negative": ["3", 0], "latent_image": ["4", 0] } }, "6": { "class_type": "VAEDecode", "inputs": {"samples": ["5", 0], "vae": ["1", 2]} }, "7": { "class_type": "SaveImage", "inputs": {"filename_prefix": "pptmaster", "images": ["6", 0]} } } def _queue_and_wait(server, workflow, timeout=600): """Queue prompt on ComfyUI and wait for the result image.""" client_id = str(uuid.uuid4()) # Queue the prompt resp = requests.post( f"{server}/prompt", json={"prompt": workflow, "client_id": client_id}, timeout=30 ) if resp.status_code >= 400: raise RuntimeError(f"ComfyUI /prompt error {resp.status_code}: {resp.text[:500]}") prompt_id = resp.json()["prompt_id"] print(f" Queued prompt_id: {prompt_id}") # Poll history until done deadline = time.time() + timeout while time.time() < deadline: try: hist = requests.get(f"{server}/history/{prompt_id}", timeout=10) if hist.status_code == 200: data = hist.json() if prompt_id in data: outputs = data[prompt_id].get("outputs", {}) for node_output in outputs.values(): images = node_output.get("images", []) for img_info in images: # Download the generated image params = { "filename": img_info["filename"], "subfolder": img_info.get("subfolder", ""), "type": img_info.get("type", "output") } img_resp = requests.get(f"{server}/view", params=params, timeout=60) img_resp.raise_for_status() return img_resp.content # If outputs exist but no images, there was an error if outputs: raise RuntimeError(f"ComfyUI completed but no images in output: {json.dumps(outputs)[:500]}") except requests.RequestException: pass time.sleep(1.5) raise RuntimeError(f"Timeout ({timeout}s) waiting for ComfyUI generation.") def _placeholder(prompt, width, height): """Fallback: generate a deterministic placeholder image.""" rng = random.Random(hash(prompt) & 0xFFFFFFFF) img = Image.new("RGB", (width, height), (9, 12, 24)) d = ImageDraw.Draw(img, "RGBA") pal = [(28,38,70),(52,67,105),(121,33,48),(186,137,64),(30,90,96),(88,20,60)] for i in range(30): c = pal[i % len(pal)] + (rng.randint(50, 140),) x0, y0 = rng.randint(-width//4, width), rng.randint(-height//4, height) r = rng.randint(max(60, width//10), max(160, width//3)) d.ellipse((x0-r, y0-r, x0+r, y0+r), fill=c) img = img.filter(ImageFilter.GaussianBlur(radius=max(8, width//100))) d = ImageDraw.Draw(img, "RGBA") for i in range(60): d.rectangle((i, i, width-i, height-i), outline=(0,0,0,max(0,140-int(2.5*i))), width=2) try: fb = ImageFont.truetype("DejaVuSans-Bold.ttf", max(28, width//22)) fs = ImageFont.truetype("DejaVuSans.ttf", max(14, width//50)) except: fb = fs = ImageFont.load_default() pad = max(24, width//30) bh = max(100, height//5) d.rounded_rectangle((pad, height-bh-pad, width-pad, height-pad), radius=18, fill=(0,0,0,130), outline=(200,160,80,200), width=2) d.text((pad*1.6, height-bh-pad+18), "PLACEHOLDER (ComfyUI offline)", fill=(240,220,170,255), font=fb) d.text((pad*1.6, height-bh-pad+60), prompt[:120], fill=(230,235,245,220), font=fs) buf = io.BytesIO() img.save(buf, format="PNG") return buf.getvalue() def generate(prompt, aspect_ratio="1:1", image_size="1K", output_dir=None, filename=None, model=None, max_retries=MAX_RETRIES): """Main entry point called by image_gen.py.""" if aspect_ratio not in VALID_ASPECT_RATIOS: raise ValueError(f"Invalid aspect ratio '{aspect_ratio}'. Valid: {VALID_ASPECT_RATIOS}") image_size = normalize_image_size(image_size) if image_size not in VALID_IMAGE_SIZES: raise ValueError(f"Invalid image size '{image_size}'. Valid: {VALID_IMAGE_SIZES}") width, height = _dimensions(aspect_ratio, image_size) path = resolve_output_path(prompt, output_dir, filename, ".png") server = _server() model = model or os.environ.get("COMFYUI_MODEL") negative = os.environ.get("COMFYUI_NEGATIVE_PROMPT", "low quality, blurry, watermark, text, distorted, deformed, ugly, nsfw") fallback = os.environ.get("COMFYUI_FALLBACK", "error").strip().lower() print(f"[ComfyUI]") print(f" Server: {server}") print(f" Model: {model or '(not set)'}") print(f" Prompt: {prompt[:100]}{'...' if len(prompt)>100 else ''}") print(f" Size: {width}x{height}") print(f" Aspect Ratio: {aspect_ratio}") print() # Check if server is reachable if not _ping(server): msg = (f"ComfyUI server not reachable at {server}.\n" f" Launch ComfyUI first: python main.py --listen 127.0.0.1 --port 8188") if fallback == "placeholder": print(f" [WARN] {msg}") print(f" [WARN] Using placeholder image.") return save_image_bytes(_placeholder(prompt, width, height), path) raise RuntimeError(msg) # Build and queue workflow try: workflow = _build_workflow(prompt, negative, width, height, model) start = time.time() print(f" Generating...", end="", flush=True) # Heartbeat stop_evt = threading.Event() def heartbeat(): while not stop_evt.is_set(): stop_evt.wait(5) if not stop_evt.is_set(): print(f" {time.time()-start:.0f}s...", end="", flush=True) hb = threading.Thread(target=heartbeat, daemon=True) hb.start() try: image_bytes = _queue_and_wait(server, workflow) finally: stop_evt.set() hb.join(timeout=1) elapsed = time.time() - start print(f"\n [DONE] Generated in {elapsed:.1f}s") return save_image_bytes(image_bytes, path) except Exception as e: if fallback == "placeholder": print(f"\n [WARN] Generation failed: {e}") print(f" [WARN] Using placeholder image.") return save_image_bytes(_placeholder(prompt, width, height), path) raise