Spaces:
Running
Running
File size: 19,818 Bytes
173c19f 1cb4402 9ea44fd 1cb4402 9ea44fd e7a4ed3 d014e57 c223ae8 1cb4402 e7a4ed3 1cb4402 e7a4ed3 c223ae8 e90ff32 c223ae8 e90ff32 c223ae8 e90ff32 c223ae8 e90ff32 f7908e9 e90ff32 2cdf689 e90ff32 c223ae8 1cb4402 9ea44fd 4e29acc c223ae8 1cb4402 d014e57 c223ae8 d014e57 1cb4402 d014e57 173c19f 1cb4402 cc269fd 1cb4402 9ea44fd 4f7f42e cc269fd 4f7f42e 7101a66 4f7f42e 1cb4402 9ea44fd d014e57 cc269fd d014e57 cc269fd d014e57 946f702 c223ae8 d014e57 922f0a4 9ea44fd cc269fd 9ea44fd 2078ac3 9ea44fd d014e57 9ea44fd d014e57 8fb17dc d014e57 8fb17dc 1cb4402 c223ae8 1cb4402 8fb17dc c2d5e9a 8fb17dc c2d5e9a 8fb17dc c2d5e9a 8fb17dc c052244 8fb17dc c2d5e9a 1585b71 c052244 1585b71 8fb17dc c2d5e9a 8fb17dc c2d5e9a ef0c181 c2d5e9a 8fb17dc c2d5e9a 8fb17dc c052244 8fb17dc 1cb4402 e7a4ed3 173c19f 1cb4402 922f0a4 1cb4402 922f0a4 9ea44fd 8fb17dc 9ea44fd 1cb4402 9ea44fd c223ae8 8fb17dc c2d5e9a 8fb17dc 1cb4402 8fb17dc 9ea44fd 8fb17dc 9ea44fd 8fb17dc c223ae8 8fb17dc 9ea44fd c223ae8 9ea44fd c223ae8 9ea44fd c223ae8 9ea44fd e7a4ed3 2db7255 9ea44fd 2db7255 8fb17dc 2db7255 d880ef2 2db7255 c223ae8 2db7255 c2d5e9a c223ae8 c2d5e9a c223ae8 c2d5e9a c223ae8 c2d5e9a 922f0a4 c2d5e9a c223ae8 c2d5e9a c223ae8 c2d5e9a 922f0a4 1cb4402 c2d5e9a 1cb4402 c2d5e9a 411c354 2bce5ed c1cbd58 2bce5ed c1cbd58 2bce5ed c1cbd58 2bce5ed 922f0a4 c223ae8 ec9719b c223ae8 ec9719b 922f0a4 848c819 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 | import os
import sys
import subprocess
import time
import requests
import json
import gradio as gr
from PIL import Image
import spaces
from huggingface_hub import hf_hub_download
os.environ["HF_HUB_ENABLE_HF_TRANSFER"] = "1"
# Suppress unnecessary warnings
os.environ["PYTHONWARNINGS"] = "ignore"
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3"
# MIG GPU compatibility - disable problematic CUDA optimizations
os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True"
os.environ["CUDA_LAUNCH_BLOCKING"] = "0"
# Force stable CUDA kernels for MIG compatibility
os.environ["CUBLAS_WORKSPACE_CONFIG"] = ":4096:8"
# Configuration
REPO_URL = "https://github.com/00quebec/Synthid-Bypass"
COMFYUI_URL = "https://github.com/comfyanonymous/ComfyUI"
PYTHON_EXTENSION_URL = "https://github.com/pydn/ComfyUI-to-Python-Extension"
ROOT_DIR = os.getcwd()
COMFYUI_DIR = os.path.join(ROOT_DIR, "ComfyUI")
BYPASS_REPO_DIR = os.path.join(ROOT_DIR, "reference_repo")
def _patch_qwen_for_mig_gpu():
"""Patch Qwen text encoder for MIG GPU - force CPU fallback on CUBLAS errors"""
# Patch 1: llama.py - frequency computation
llama_file = os.path.join(COMFYUI_DIR, "comfy/text_encoders/llama.py")
if os.path.exists(llama_file):
with open(llama_file, 'r') as f:
lines = f.readlines()
content = ''.join(lines)
if 'MIG GPU: force CPU' not in content:
patched = False
for i, line in enumerate(lines):
if 'freqs = (inv_freq_expanded.float() @ position_ids_expanded.float()).transpose(1, 2)' in line and not patched:
indent = len(line) - len(line.lstrip())
space = ' ' * indent
new_lines = [
f'{space}# MIG GPU: force CPU for matmul\n',
f'{space}try:\n',
f'{space} freqs = (inv_freq_expanded.float() @ position_ids_expanded.float()).transpose(1, 2)\n',
f'{space}except RuntimeError as e:\n',
f'{space} if "CUBLAS" in str(e):\n',
f'{space} device = inv_freq_expanded.device\n',
f'{space} freqs = (inv_freq_expanded.float().cpu() @ position_ids_expanded.float().cpu()).transpose(1, 2).to(device)\n',
f'{space} else:\n',
f'{space} raise\n'
]
lines[i:i+1] = new_lines
patched = True
break
if patched:
with open(llama_file, 'w') as f:
f.writelines(lines)
print("[OK] Patched llama.py freqs computation")
# Patch 2: ops.py - all linear operations
ops_file = os.path.join(COMFYUI_DIR, "comfy/ops.py")
if os.path.exists(ops_file):
with open(ops_file, 'r') as f:
lines = f.readlines()
content = ''.join(lines)
if 'MIG GPU CUBLAS' not in content:
patched = False
for i, line in enumerate(lines):
if 'x = torch.nn.functional.linear(input, weight, bias)' in line and 'forward_comfy_cast_weights' in ''.join(lines[max(0,i-10):i]) and not patched:
indent = len(line) - len(line.lstrip())
space = ' ' * indent
new_lines = [
f'{space}# MIG GPU CUBLAS fix\n',
f'{space}try:\n',
f'{space} x = torch.nn.functional.linear(input, weight, bias)\n',
f'{space}except RuntimeError as e:\n',
f'{space} if "CUBLAS" in str(e):\n',
f'{space} device = input.device\n',
f'{space} x = torch.nn.functional.linear(input.cpu().float(), weight.cpu().float(), bias.cpu().float() if bias is not None else None)\n',
f'{space} x = x.to(device).to(input.dtype)\n',
f'{space} else:\n',
f'{space} raise\n'
]
lines[i:i+1] = new_lines
patched = True
break
if patched:
with open(ops_file, 'w') as f:
f.writelines(lines)
print("[OK] Patched ops.py linear operations")
def setup():
"""Environment setup for Hugging Face Space"""
# Check if a known model file exists to skip setup
already_initialized = os.path.exists(os.path.join(COMFYUI_DIR, "models/vae/ae.safetensors"))
if already_initialized:
print("=== ENVIRONMENT ALREADY INITIALIZED ===")
# Still apply critical patches even if already initialized
_patch_qwen_for_mig_gpu()
return
print("=== FIRST TIME SETUP ===")
# 1. Clone Repos
subprocess.run(["git", "clone", COMFYUI_URL, COMFYUI_DIR], check=True, capture_output=True)
subprocess.run(["git", "clone", REPO_URL, BYPASS_REPO_DIR], check=True, capture_output=True)
# 2. Setup Custom Nodes
nodes = [
"https://github.com/ltdrdata/ComfyUI-Impact-Pack",
"https://github.com/ltdrdata/ComfyUI-Impact-Subpack",
"https://github.com/wildminder/ComfyUI-dype",
"https://github.com/rgthree/rgthree-comfy",
"https://github.com/BadCafeCode/masquerade-nodes-comfyui",
"https://github.com/lquesada/ComfyUI-Inpaint-CropAndStitch",
PYTHON_EXTENSION_URL
]
custom_nodes_path = os.path.join(COMFYUI_DIR, "custom_nodes")
os.makedirs(custom_nodes_path, exist_ok=True)
# Pin Impact packs to exact versions used in reference workflow
IMPACT_PACK_COMMIT = "61bd8397a18e7e7668e6a24e95168967768c2bed"
IMPACT_SUBPACK_VERSION = "1.3.4" # Using 1.3.4 (latest available, ref workflow uses "1.3.5" which doesn't exist as tag)
for url in nodes:
name = url.split("/")[-1]
node_dest = os.path.join(custom_nodes_path, name)
if not os.path.exists(node_dest):
subprocess.run(["git", "clone", url, node_dest], check=True, capture_output=True)
# Checkout specific versions for Impact packs to match reference workflow
if name == "ComfyUI-Impact-Pack":
subprocess.run(["git", "checkout", IMPACT_PACK_COMMIT], cwd=node_dest, check=True, capture_output=True)
elif name == "ComfyUI-Impact-Subpack":
subprocess.run(["git", "checkout", IMPACT_SUBPACK_VERSION], cwd=node_dest, check=True, capture_output=True)
elif name == "ComfyUI-dype":
# changed to v2.1.0 (Nov 29, 2025) before the update introduces new parameters
subprocess.run(["git", "checkout", "db90fe5012f94c6aef406d743292daf76b4477a6"], cwd=node_dest, check=True, capture_output=True)
print("[OK] Custom nodes installed")
# Apply MIG GPU compatibility patches
_patch_qwen_for_mig_gpu()
# Install performance optimizations (SageAttention, Flash Attention)
print("Installing performance optimizations...")
subprocess.run([
sys.executable, "-m", "pip", "install",
"sageattention", "flash-attn", "--no-cache-dir"
], capture_output=True, check=False) # Don't fail if these can't install
# 3. Models Download logic (Using hf_transfer for speed)
model_configs = [
{"repo": "Comfy-Org/z_image_turbo", "file": "split_files/vae/ae.safetensors", "dest": "models/vae/ae.safetensors"},
{"repo": "Comfy-Org/z_image_turbo", "file": "split_files/diffusion_models/z_image_turbo_bf16.safetensors", "dest": "models/diffusion_models/z_image_turbo_bf16.safetensors"},
{"repo": "Comfy-Org/z_image_turbo", "file": "split_files/text_encoders/qwen_3_4b.safetensors", "dest": "models/text_encoders/qwen_3_4b.safetensors"},
{"repo": "alibaba-pai/Z-Image-Turbo-Fun-Controlnet-Union", "file": "Z-Image-Turbo-Fun-Controlnet-Union.safetensors", "dest": "models/model_patches/Z-Image-Turbo-Fun-Controlnet-Union.safetensors"},
{"repo": "deepghs/yolo-face", "file": "yolov8n-face/model.pt", "dest": "models/ultralytics/bbox/yolov8n-face.pt"},
{"repo": "YouLiXiya/YL-SAM", "file": "sam_vit_b_01ec64.pth", "dest": "models/sams/sam_vit_b_01ec64.pth"}
]
print("Downloading models (fast with HF_TRANSFER)...")
for i, cfg in enumerate(model_configs, 1):
out_path = os.path.join(COMFYUI_DIR, cfg['dest'])
if not os.path.exists(out_path):
os.makedirs(os.path.dirname(out_path), exist_ok=True)
print(f" [{i}/{len(model_configs)}] {cfg['file'].split('/')[-1]}")
hf_hub_download(
repo_id=cfg['repo'],
filename=cfg['file'],
local_dir=COMFYUI_DIR,
local_dir_use_symlinks=False
)
actual_downloaded_path = os.path.join(COMFYUI_DIR, cfg['file'])
if actual_downloaded_path != out_path and os.path.exists(actual_downloaded_path):
os.rename(actual_downloaded_path, out_path)
print("[OK] Setup complete")
def convert_to_api(web_workflow):
"""
Robustly converts ComfyUI Web JSON (UI format) to API Prompt format.
Requires mapping links to actual node connections.
"""
nodes = web_workflow.get("nodes", [])
links = web_workflow.get("links", [])
# Map link_id -> [origin_node_id, origin_slot_index]
link_map = {}
for link in links:
if link:
l_id, node_from, slot_from, node_to, slot_to, l_type = link
link_map[l_id] = [str(node_from), slot_from]
api_prompt = {}
skipped_nodes = []
for node in nodes:
node_id = str(node["id"])
class_type = node["type"]
# Skip UI-only nodes, provider/loader nodes, and primitive types
skip_types = ["Note", "Group", "Reroute", "Float", "Int", "String", "Boolean"]
# Also skip any node with "Provider" or "Loader" in the name (these are config nodes)
if class_type in skip_types or "Provider" in class_type or "Loader" in class_type and class_type not in ["UNETLoader", "VAELoader", "CLIPLoader"]:
skipped_nodes.append(f"{node_id}:{class_type}")
continue
inputs = {}
# 1. Handle Connections (from links)
for inp in node.get("inputs", []):
l_id = inp.get("link")
if l_id and l_id in link_map:
inputs[inp["name"]] = link_map[l_id]
# 2. Handle Widgets (from widgets_values)
# This is where it gets tricky since Web format stores values in a list
# and API format expects them as named keys.
# We'll use a known mapping for core nodes if possible.
# For custom nodes, it depends on the node's implementation of 'INPUT_TYPES'.
# Note: If the workflow was saved with 'widgets_values', we inject them.
# We'll try to guess common input names or just pass them as indices if the server allows.
# For SynthID-Bypass, we'll hardcode the critical ones if needed.
# Fallback: Many nodes put widgets after connections in their registration.
# If we don't have names, it might fail.
# However, many modern workflows save 'widgets_values' which we need to map.
# For this specific bypass tool, we'll use the pre-known node names for key nodes.
w_values = node.get("widgets_values", [])
if class_type == "CLIPTextEncode" and w_values:
inputs["text"] = w_values[0]
elif class_type == "KSampler" and len(w_values) >= 7:
inputs["seed"] = w_values[0]
inputs["steps"] = w_values[2]
inputs["cfg"] = w_values[3]
inputs["sampler_name"] = w_values[4]
inputs["scheduler"] = w_values[5]
inputs["denoise"] = w_values[6]
elif class_type == "VAELoader" and w_values:
inputs["vae_name"] = w_values[0]
elif class_type == "UNETLoader" and w_values:
inputs["unet_name"] = w_values[0]
elif class_type == "LoadImage" and w_values:
inputs["image"] = w_values[0]
inputs["upload"] = w_values[1] if len(w_values) > 1 else "image"
elif class_type == "ModelSamplingAuraFlow" and w_values:
inputs["shift"] = w_values[0]
elif class_type == "DyPE_FLUX" and len(w_values) >= 4:
inputs["width"] = w_values[0]
inputs["height"] = w_values[1]
inputs["preset"] = w_values[2]
inputs["pe_type"] = w_values[3]
# Advanced Peet's parameters... usually defaults are okay but we can add more if needed
# Add any other widget values that might be present
# This is a guestimate, but is usually how API conversion works
api_prompt[node_id] = {
"class_type": class_type,
"inputs": inputs
}
print(f"Converted {len(api_prompt)} nodes, skipped {len(skipped_nodes)} nodes: {', '.join(skipped_nodes[:10])}")
return api_prompt
# Execute setup on boot
setup()
@spaces.GPU(duration=120)
def remove_watermark(input_image):
if input_image is None:
return None
# 1. Prepare Paths
input_dir = os.path.join(COMFYUI_DIR, "input")
output_dir = os.path.join(COMFYUI_DIR, "output")
os.makedirs(input_dir, exist_ok=True)
os.makedirs(output_dir, exist_ok=True)
# Save input image with a fixed name for the workflow
input_filename = "input.png"
input_path = os.path.join(input_dir, input_filename)
input_image.save(input_path)
# 2. Launch ComfyUI (Headless)
print("Starting ComfyUI server...")
# Using the correct CWD is critical for ComfyUI to find its models and custom nodes
cmd = [sys.executable, "main.py", "--listen", "127.0.0.1", "--port", "8188", "--disable-auto-launch"]
proc = subprocess.Popen(cmd, cwd=COMFYUI_DIR)
# Wait for server to be ready (increased timeout and added logging)
server_ready = False
for i in range(45): # 90 seconds max
try:
resp = requests.get("http://127.0.0.1:8188/history", timeout=2)
if resp.status_code == 200:
server_ready = True
print("Server ready")
break
except:
if i % 5 == 0:
print(f"Waiting for server... ({i*2}s)")
time.sleep(2)
if not server_ready:
proc.terminate()
raise RuntimeError("ComfyUI server failed to start within 90 seconds")
try:
# 3. Load pre-converted API workflow
workflow_path = os.path.join(ROOT_DIR, "simple_api_workflow.json")
with open(workflow_path, 'r') as f:
api_prompt = json.load(f)
# Update node 11 (LoadImage) to point to our input.png
if "11" in api_prompt:
api_prompt["11"]["inputs"]["image"] = input_filename
# Send to ComfyUI
print(f"Processing workflow ({len(api_prompt)} nodes)...")
prompt_data = {"prompt": api_prompt}
resp = requests.post("http://127.0.0.1:8188/prompt", json=prompt_data)
if resp.status_code != 200:
error_data = resp.json() if resp.headers.get('content-type') == 'application/json' else {"error": resp.text}
error_msg = error_data.get('error', {}).get('message', str(error_data))
raise RuntimeError(f"Workflow validation failed: {error_msg}")
prompt_id = resp.json().get("prompt_id")
print(f"Workflow queued (ID: {prompt_id[:8]}...)")
# 4. Wait for completion
# We poll the history endpoint until the prompt_id appears
max_poll = 120 # 120 seconds for processing
finished = False
output_filename = None
for p in range(max_poll):
history_resp = requests.get(f"http://127.0.0.1:8188/history/{prompt_id}")
if history_resp.status_code == 200:
history = history_resp.json()
if prompt_id in history:
# Extract output filename from the SaveImage node (ID 62)
output_data = history[prompt_id]['outputs'].get('62')
if output_data and 'images' in output_data:
output_filename = output_data['images'][0]['filename']
finished = True
print("Processing complete!")
break
if p % 10 == 0 and p > 0:
print(f"Still processing... ({p}s)")
time.sleep(1)
if not finished:
raise RuntimeError("Processing timed out or failed to save image.")
# 5. Return result
output_path = os.path.join(output_dir, output_filename)
return Image.open(output_path).copy() # Copy to avoid library closing issues
finally:
print("Shutting down ComfyUI server...")
proc.terminate()
try:
proc.wait(timeout=5)
except subprocess.TimeoutExpired:
proc.kill()
# Cleanup input file
if os.path.exists(input_path): os.remove(input_path)
# Premium UI with Fixed Height and No Share Buttons
css = """
#container {
max-width: 1200px;
margin: 0 auto;
}
.image-preview {
max-height: 512px !important;
}
footer {display: none !important;}
"""
with gr.Blocks(title="SynthID Remover") as demo:
with gr.Column(elem_id="container"):
gr.Markdown("# SynthID Remover")
gr.Markdown("This tool removes SynthID watermarks by re-rendering images through a high-fidelity diffusion reconstruction pipeline. It is specifically designed to bypass SynthID detection while maintaining the original image structure.")
with gr.Row():
with gr.Column():
input_img = gr.Image(type="pil", label="Input Image", height=512)
with gr.Column():
output_img = gr.Image(type="pil", label="Cleaned Image", height=512, interactive=False)
submit_btn = gr.Button("Remove Watermark", variant="primary")
submit_btn.click(
fn=remove_watermark,
inputs=[input_img],
outputs=[output_img]
)
with gr.Accordion("How it works", open=False):
gr.Markdown("""
### Technical Breakdown
This tool removes SynthID watermarks by re-processing images through a specialized diffusion pipeline:
1. **Image Scaling**: Input is scaled to 2.5 megapixels for optimal processing
2. **Edge Detection**: Canny edge detection extracts the structural outline
3. **ControlNet Guidance**: Z-Image-Turbo ControlNet uses edges to maintain composition
4. **Multi-Pass Denoising**: Three sequential KSampler passes with 0.2 denoise strength:
- Each pass subtly replaces pixel noise while preserving structure
- Low denoise (0.2) ensures minimal visual changes
- 9 sampling steps per pass for efficiency
5. **Face Refinement**: FaceDetailer with YOLOv8 detection enhances facial details
6. **Output**: Final image maintains visual fidelity while removing watermark artifacts
**Models Used:**
- Z-Image-Turbo (12.3GB) - Main diffusion model
- Qwen 3 4B (8GB) - Text encoder
- ControlNet Union (3.1GB) - Structural guidance
- YOLOv8-Face - Face detection for refinement
""")
if __name__ == "__main__":
# In Gradio 6.0+, css moved to launch(), but title remains in Blocks()
demo.launch(css=css)
|