Albator2570 commited on
Commit
4eb549a
·
verified ·
1 Parent(s): f6eeb4a

Upload backend_comfyui.py

Browse files
Files changed (1) hide show
  1. backend_comfyui.py +234 -0
backend_comfyui.py ADDED
@@ -0,0 +1,234 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ """
3
+ ComfyUI local image generation backend.
4
+
5
+ This backend talks to a local ComfyUI HTTP server and is designed to be the
6
+ PPT Master default replacement for cloud image providers.
7
+
8
+ Configuration keys:
9
+ COMFYUI_SERVER Optional, default http://127.0.0.1:8188
10
+ COMFYUI_WORKFLOW Optional path to a ComfyUI API workflow JSON
11
+ COMFYUI_MODEL Optional checkpoint name for the fallback workflow
12
+ COMFYUI_AUTO_START Optional true/false, default true
13
+ COMFYUI_DIR Optional path to ComfyUI checkout, default /app/ComfyUI
14
+ COMFYUI_PYTHON Optional python executable for ComfyUI server
15
+
16
+ If ComfyUI is unavailable or has no usable model/workflow in this CPU sandbox,
17
+ set COMFYUI_FALLBACK=placeholder (default) to create a deterministic local
18
+ cinematic placeholder image instead of failing the PPT pipeline.
19
+ """
20
+
21
+ from __future__ import annotations
22
+
23
+ import io
24
+ import json
25
+ import os
26
+ import random
27
+ import subprocess
28
+ import sys
29
+ import time
30
+ import uuid
31
+ from pathlib import Path
32
+ from typing import Any
33
+
34
+ import requests
35
+ from PIL import Image, ImageDraw, ImageFont, ImageFilter
36
+
37
+ from image_backends.backend_common import MAX_RETRIES, normalize_image_size, resolve_output_path, save_image_bytes
38
+
39
+ DEFAULT_SERVER = "http://127.0.0.1:8188"
40
+ DEFAULT_MODEL = os.environ.get("COMFYUI_MODEL", "local-comfyui")
41
+ VALID_ASPECT_RATIOS = ["1:1", "2:3", "3:2", "3:4", "4:3", "4:5", "5:4", "9:16", "16:9", "21:9"]
42
+ VALID_IMAGE_SIZES = ["512px", "1K", "2K", "4K", "0.5K"]
43
+
44
+
45
+ def _bool_env(name: str, default: bool) -> bool:
46
+ value = os.environ.get(name)
47
+ if value is None:
48
+ return default
49
+ return value.strip().lower() in {"1", "true", "yes", "y", "on"}
50
+
51
+
52
+ def _server() -> str:
53
+ return os.environ.get("COMFYUI_SERVER", DEFAULT_SERVER).rstrip("/")
54
+
55
+
56
+ def _ping(server: str) -> bool:
57
+ try:
58
+ response = requests.get(f"{server}/system_stats", timeout=2)
59
+ return response.status_code == 200
60
+ except requests.RequestException:
61
+ return False
62
+
63
+
64
+ def _start_server_if_needed(server: str) -> bool:
65
+ if _ping(server):
66
+ return True
67
+ if not _bool_env("COMFYUI_AUTO_START", True):
68
+ return False
69
+
70
+ comfy_dir = Path(os.environ.get("COMFYUI_DIR", "/app/ComfyUI"))
71
+ main_py = comfy_dir / "main.py"
72
+ if not main_py.exists():
73
+ return False
74
+
75
+ python_exe = os.environ.get("COMFYUI_PYTHON", sys.executable)
76
+ log_path = Path(os.environ.get("COMFYUI_LOG", "/app/comfyui.log"))
77
+ log_fh = log_path.open("ab")
78
+ # GPU by default: do NOT force --cpu. This lets ComfyUI use CUDA on
79
+ # machines with an NVIDIA GPU such as RTX 5070 Ti. If CPU mode is desired,
80
+ # set COMFYUI_EXTRA_ARGS=--cpu explicitly.
81
+ extra_args = os.environ.get("COMFYUI_EXTRA_ARGS", "").split()
82
+ subprocess.Popen(
83
+ [python_exe, str(main_py), "--listen", "127.0.0.1", "--port", "8188", *extra_args],
84
+ cwd=str(comfy_dir),
85
+ stdout=log_fh,
86
+ stderr=subprocess.STDOUT,
87
+ start_new_session=True,
88
+ )
89
+ for _ in range(60):
90
+ if _ping(server):
91
+ return True
92
+ time.sleep(1)
93
+ return False
94
+
95
+
96
+ def _dimensions(aspect_ratio: str, image_size: str) -> tuple[int, int]:
97
+ image_size = normalize_image_size(image_size)
98
+ long_edge = {"0.5K": 512, "512px": 512, "1K": 1024, "2K": 1536, "4K": 2048}.get(image_size, 1024)
99
+ w_ratio, h_ratio = [int(x) for x in aspect_ratio.split(":")]
100
+ if w_ratio >= h_ratio:
101
+ width = long_edge
102
+ height = round(long_edge * h_ratio / w_ratio)
103
+ else:
104
+ height = long_edge
105
+ width = round(long_edge * w_ratio / h_ratio)
106
+ # Stable Diffusion checkpoints usually want multiples of 8.
107
+ width = max(256, int(round(width / 8) * 8))
108
+ height = max(256, int(round(height / 8) * 8))
109
+ return width, height
110
+
111
+
112
+ def _load_workflow(prompt: str, negative_prompt: str, width: int, height: int, model: str | None) -> dict[str, Any]:
113
+ workflow_path = os.environ.get("COMFYUI_WORKFLOW")
114
+ if workflow_path:
115
+ data = json.loads(Path(workflow_path).read_text(encoding="utf-8"))
116
+ raw = json.dumps(data)
117
+ replacements = {
118
+ "{{prompt}}": prompt,
119
+ "{{negative_prompt}}": negative_prompt,
120
+ "{{width}}": str(width),
121
+ "{{height}}": str(height),
122
+ "{{seed}}": str(random.randint(1, 2**31 - 1)),
123
+ "{{model}}": model or os.environ.get("COMFYUI_MODEL", ""),
124
+ }
125
+ for k, v in replacements.items():
126
+ raw = raw.replace(k, v)
127
+ return json.loads(raw)
128
+
129
+ ckpt = model or os.environ.get("COMFYUI_MODEL")
130
+ if not ckpt:
131
+ raise RuntimeError("No COMFYUI_WORKFLOW or COMFYUI_MODEL configured for real ComfyUI generation.")
132
+
133
+ return {
134
+ "1": {"class_type": "CheckpointLoaderSimple", "inputs": {"ckpt_name": ckpt}},
135
+ "2": {"class_type": "CLIPTextEncode", "inputs": {"text": prompt, "clip": ["1", 1]}},
136
+ "3": {"class_type": "CLIPTextEncode", "inputs": {"text": negative_prompt, "clip": ["1", 1]}},
137
+ "4": {"class_type": "EmptyLatentImage", "inputs": {"width": width, "height": height, "batch_size": 1}},
138
+ "5": {"class_type": "KSampler", "inputs": {"seed": random.randint(1, 2**31 - 1), "steps": 24, "cfg": 7.0, "sampler_name": "euler", "scheduler": "normal", "denoise": 1.0, "model": ["1", 0], "positive": ["2", 0], "negative": ["3", 0], "latent_image": ["4", 0]}},
139
+ "6": {"class_type": "VAEDecode", "inputs": {"samples": ["5", 0], "vae": ["1", 2]}},
140
+ "7": {"class_type": "SaveImage", "inputs": {"filename_prefix": "pptmaster", "images": ["6", 0]}},
141
+ }
142
+
143
+
144
+ def _queue_and_fetch(server: str, workflow: dict[str, Any]) -> bytes:
145
+ client_id = str(uuid.uuid4())
146
+ queue_response = requests.post(f"{server}/prompt", json={"prompt": workflow, "client_id": client_id}, timeout=30)
147
+ if queue_response.status_code >= 400:
148
+ raise RuntimeError(f"ComfyUI /prompt failed {queue_response.status_code}: {queue_response.text[:1000]}")
149
+ prompt_id = queue_response.json()["prompt_id"]
150
+
151
+ deadline = time.time() + 600
152
+ while time.time() < deadline:
153
+ hist = requests.get(f"{server}/history/{prompt_id}", timeout=10)
154
+ if hist.status_code == 200:
155
+ data = hist.json()
156
+ if prompt_id in data:
157
+ outputs = data[prompt_id].get("outputs", {})
158
+ for output in outputs.values():
159
+ for image in output.get("images", []):
160
+ params = {"filename": image["filename"], "subfolder": image.get("subfolder", ""), "type": image.get("type", "output")}
161
+ img = requests.get(f"{server}/view", params=params, timeout=60)
162
+ img.raise_for_status()
163
+ return img.content
164
+ time.sleep(1)
165
+ raise RuntimeError("Timed out waiting for ComfyUI image generation.")
166
+
167
+
168
+ def _placeholder(prompt: str, width: int, height: int) -> bytes:
169
+ # Local deterministic fallback keeps the full PPT Master pipeline testable in
170
+ # CPU-only/headless environments where model weights cannot be installed.
171
+ rng = random.Random(hash(prompt) & 0xFFFFFFFF)
172
+ img = Image.new("RGB", (width, height), (9, 12, 24))
173
+ draw = ImageDraw.Draw(img, "RGBA")
174
+ palette = [(28, 38, 70), (52, 67, 105), (121, 33, 48), (186, 137, 64), (30, 90, 96)]
175
+ for i in range(26):
176
+ color = palette[i % len(palette)] + (rng.randint(40, 120),)
177
+ x0 = rng.randint(-width // 4, width)
178
+ y0 = rng.randint(-height // 4, height)
179
+ r = rng.randint(max(60, width // 12), max(120, width // 3))
180
+ draw.ellipse((x0 - r, y0 - r, x0 + r, y0 + r), fill=color)
181
+ img = img.filter(ImageFilter.GaussianBlur(radius=max(6, width // 160)))
182
+ draw = ImageDraw.Draw(img, "RGBA")
183
+ # subtle vignette
184
+ for i in range(80):
185
+ alpha = int(2.2 * i)
186
+ draw.rectangle((i, i, width - i, height - i), outline=(0, 0, 0, max(0, 120 - alpha)), width=2)
187
+ title = "ComfyUI local"
188
+ subtitle = prompt[:110] + ("..." if len(prompt) > 110 else "")
189
+ try:
190
+ font_big = ImageFont.truetype("DejaVuSans-Bold.ttf", max(32, width // 18))
191
+ font_small = ImageFont.truetype("DejaVuSans.ttf", max(16, width // 42))
192
+ except OSError:
193
+ font_big = ImageFont.load_default()
194
+ font_small = ImageFont.load_default()
195
+ pad = max(28, width // 28)
196
+ box_h = max(120, height // 4)
197
+ draw.rounded_rectangle((pad, height - box_h - pad, width - pad, height - pad), radius=24, fill=(0, 0, 0, 120), outline=(212, 171, 92, 180), width=2)
198
+ draw.text((pad * 1.5, height - box_h - pad + 26), title, fill=(245, 226, 180, 255), font=font_big)
199
+ draw.text((pad * 1.5, height - box_h - pad + 86), subtitle, fill=(235, 238, 245, 230), font=font_small)
200
+ buf = io.BytesIO()
201
+ img.save(buf, format="PNG")
202
+ return buf.getvalue()
203
+
204
+
205
+ def generate(prompt: str, aspect_ratio: str = "1:1", image_size: str = "1K", output_dir: str = None, filename: str = None, model: str = None, max_retries: int = MAX_RETRIES) -> str:
206
+ if aspect_ratio not in VALID_ASPECT_RATIOS:
207
+ raise ValueError(f"Invalid aspect ratio '{aspect_ratio}'. Valid: {VALID_ASPECT_RATIOS}")
208
+ image_size = normalize_image_size(image_size)
209
+ if image_size not in VALID_IMAGE_SIZES:
210
+ raise ValueError(f"Invalid image size '{image_size}'. Valid: {VALID_IMAGE_SIZES}")
211
+ width, height = _dimensions(aspect_ratio, image_size)
212
+ path = resolve_output_path(prompt, output_dir, filename, ".png")
213
+ server = _server()
214
+ negative = os.environ.get("COMFYUI_NEGATIVE_PROMPT", "low quality, blurry, watermark, distorted text")
215
+
216
+ print("[ComfyUI]")
217
+ print(f" Server: {server}")
218
+ print(f" Model: {model or os.environ.get('COMFYUI_MODEL') or DEFAULT_MODEL}")
219
+ print(f" Prompt: {prompt[:120]}{'...' if len(prompt) > 120 else ''}")
220
+ print(f" Aspect Ratio: {aspect_ratio}")
221
+ print(f" Size: {width}x{height}")
222
+
223
+ try:
224
+ if _start_server_if_needed(server):
225
+ workflow = _load_workflow(prompt, negative, width, height, model)
226
+ image_bytes = _queue_and_fetch(server, workflow)
227
+ return save_image_bytes(image_bytes, path)
228
+ raise RuntimeError("ComfyUI server is not reachable.")
229
+ except Exception as exc:
230
+ if os.environ.get("COMFYUI_FALLBACK", "placeholder").strip().lower() == "placeholder":
231
+ print(f" [WARN] Real ComfyUI generation unavailable: {exc}")
232
+ print(" [WARN] Writing local placeholder image so the PPT workflow remains testable.")
233
+ return save_image_bytes(_placeholder(prompt, width, height), path)
234
+ raise RuntimeError(str(exc)) from exc