Spaces:
Sleeping
Sleeping
Kyle Pearson Qwen-Coder commited on
Commit ·
459ac47
1
Parent(s): b1e7bdb
Remove Python cache files and update .gitignore
Browse files- Delete all __pycache__/*.pyc files from src/ and src/ui/
- Add .huggingface/ to .gitignore to prevent committing cached tokens
- Update .gitignore with new patterns
Co-authored-by: Qwen-Coder <qwen-coder@alibabacloud.com>
- .gitignore +36 -0
- README.md +34 -1
- app.py +12 -11
- requirements.txt +4 -0
- src/__pycache__/__init__.cpython-311.pyc +0 -0
- src/__pycache__/__init__.cpython-313.pyc +0 -0
- src/__pycache__/config.cpython-311.pyc +0 -0
- src/__pycache__/config.cpython-313.pyc +0 -0
- src/__pycache__/downloader.cpython-311.pyc +0 -0
- src/__pycache__/downloader.cpython-313.pyc +0 -0
- src/__pycache__/exporter.cpython-311.pyc +0 -0
- src/__pycache__/exporter.cpython-313.pyc +0 -0
- src/__pycache__/generator.cpython-311.pyc +0 -0
- src/__pycache__/generator.cpython-313.pyc +0 -0
- src/__pycache__/pipeline.cpython-311.pyc +0 -0
- src/__pycache__/pipeline.cpython-313.pyc +0 -0
- src/config.py +76 -11
- src/downloader.py +9 -2
- src/exporter.py +7 -1
- src/generator.py +16 -2
- src/pipeline.py +22 -5
- src/ui/__pycache__/__init__.cpython-311.pyc +0 -0
- src/ui/__pycache__/__init__.cpython-313.pyc +0 -0
- src/ui/__pycache__/exporter_tab.cpython-311.pyc +0 -0
- src/ui/__pycache__/exporter_tab.cpython-313.pyc +0 -0
- src/ui/__pycache__/generator_tab.cpython-311.pyc +0 -0
- src/ui/__pycache__/generator_tab.cpython-313.pyc +0 -0
- src/ui/__pycache__/header.cpython-311.pyc +0 -0
- src/ui/__pycache__/header.cpython-313.pyc +0 -0
- src/ui/__pycache__/loader_tab.cpython-311.pyc +0 -0
- src/ui/__pycache__/loader_tab.cpython-313.pyc +0 -0
.gitignore
ADDED
|
@@ -0,0 +1,36 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Python cache
|
| 2 |
+
__pycache__/
|
| 3 |
+
*.py[cod]
|
| 4 |
+
*$py.class
|
| 5 |
+
|
| 6 |
+
# Virtual environment
|
| 7 |
+
venv/
|
| 8 |
+
.venv/
|
| 9 |
+
env/
|
| 10 |
+
|
| 11 |
+
# IDE files
|
| 12 |
+
.vscode/
|
| 13 |
+
.idea/
|
| 14 |
+
*.swp
|
| 15 |
+
*.swo
|
| 16 |
+
|
| 17 |
+
# Environment variables (contains sensitive data)
|
| 18 |
+
.env
|
| 19 |
+
.env.local
|
| 20 |
+
.env.*.local
|
| 21 |
+
|
| 22 |
+
# Model cache (large files)
|
| 23 |
+
.cache/
|
| 24 |
+
|
| 25 |
+
# HuggingFace cache (contains tokens and configs)
|
| 26 |
+
.huggingface/
|
| 27 |
+
*.safetensors
|
| 28 |
+
*.bin
|
| 29 |
+
|
| 30 |
+
# Logs
|
| 31 |
+
*.log
|
| 32 |
+
logs/
|
| 33 |
+
|
| 34 |
+
# OS files
|
| 35 |
+
.DS_Store
|
| 36 |
+
Thumbs.db
|
README.md
CHANGED
|
@@ -12,4 +12,37 @@ license: mit
|
|
| 12 |
short_description: Merge SDXL checkpoints & LoRA and export with quantization
|
| 13 |
---
|
| 14 |
|
| 15 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 12 |
short_description: Merge SDXL checkpoints & LoRA and export with quantization
|
| 13 |
---
|
| 14 |
|
| 15 |
+
# SDXL Model Merger
|
| 16 |
+
|
| 17 |
+
A Gradio-based web application for merging, generating with, and exporting Stable Diffusion XL (SDXL) checkpoints.
|
| 18 |
+
|
| 19 |
+
## Features
|
| 20 |
+
|
| 21 |
+
- **Load pipelines** from HuggingFace URLs with optional VAE and multiple LoRAs
|
| 22 |
+
- **Generate images** with seamless tiling support for panoramic/360° outputs
|
| 23 |
+
- **Export merged models** with quantization options (int8, int4, float8)
|
| 24 |
+
|
| 25 |
+
## Usage on HuggingFace Spaces
|
| 26 |
+
|
| 27 |
+
This app is optimized for both local and Space deployments:
|
| 28 |
+
|
| 29 |
+
```bash
|
| 30 |
+
# Local deployment
|
| 31 |
+
python app.py
|
| 32 |
+
|
| 33 |
+
# Space deployment with CPU fallback
|
| 34 |
+
export DEPLOYMENT_ENV=spaces
|
| 35 |
+
python app.py
|
| 36 |
+
```
|
| 37 |
+
|
| 38 |
+
For best results:
|
| 39 |
+
- Use **GPU** (NVIDIA) for fast generation - ~8GB VRAM recommended
|
| 40 |
+
- CPU mode is available but will be slower and use more RAM (~16GB+)
|
| 41 |
+
|
| 42 |
+
## Requirements
|
| 43 |
+
|
| 44 |
+
- Python 3.10+
|
| 45 |
+
- PyTorch 2.0+
|
| 46 |
+
- 4GB+ VRAM (GPU) or 16GB+ RAM (CPU)
|
| 47 |
+
- ~2GB disk space for cached models
|
| 48 |
+
|
app.py
CHANGED
|
@@ -361,6 +361,8 @@ def create_app():
|
|
| 361 |
return (
|
| 362 |
'<div class="status-warning">⏳ Loading started...</div>',
|
| 363 |
"Starting download...",
|
|
|
|
|
|
|
| 364 |
)
|
| 365 |
|
| 366 |
def on_load_pipeline_complete(status_msg, progress_text):
|
|
@@ -369,19 +371,22 @@ def create_app():
|
|
| 369 |
return (
|
| 370 |
'<div class="status-success">✅ Pipeline loaded successfully!</div>',
|
| 371 |
progress_text,
|
| 372 |
-
gr.update(interactive=True)
|
|
|
|
| 373 |
)
|
| 374 |
elif "⚠️" in status_msg or "cancelled" in status_msg.lower():
|
| 375 |
return (
|
| 376 |
'<div class="status-warning">⚠️ Download cancelled</div>',
|
| 377 |
progress_text,
|
| 378 |
-
gr.update(interactive=True)
|
|
|
|
| 379 |
)
|
| 380 |
else:
|
| 381 |
return (
|
| 382 |
f'<div class="status-error">{status_msg}</div>',
|
| 383 |
progress_text,
|
| 384 |
-
gr.update(interactive=True)
|
|
|
|
| 385 |
)
|
| 386 |
|
| 387 |
# Cancel button for pipeline loading
|
|
@@ -390,24 +395,20 @@ def create_app():
|
|
| 390 |
load_btn.click(
|
| 391 |
fn=on_load_pipeline_start,
|
| 392 |
inputs=[],
|
| 393 |
-
outputs=[load_status, load_progress],
|
| 394 |
-
).then(
|
| 395 |
-
fn=lambda: (gr.update(visible=True), gr.update(interactive=False)),
|
| 396 |
-
inputs=[],
|
| 397 |
-
outputs=[cancel_load_btn, load_btn],
|
| 398 |
).then(
|
| 399 |
fn=load_pipeline,
|
| 400 |
inputs=[checkpoint_url, vae_url, lora_urls, lora_strengths],
|
| 401 |
outputs=[load_status, load_progress],
|
| 402 |
show_progress="full",
|
| 403 |
).then(
|
| 404 |
-
fn=lambda: (gr.update(
|
| 405 |
inputs=[],
|
| 406 |
-
outputs=[cancel_load_btn,
|
| 407 |
).then(
|
| 408 |
fn=on_load_pipeline_complete,
|
| 409 |
inputs=[load_status, load_progress],
|
| 410 |
-
outputs=[load_status, load_progress, load_btn],
|
| 411 |
).then(
|
| 412 |
fn=lambda: (
|
| 413 |
gr.update(choices=["(None found)"] + get_cached_checkpoints()),
|
|
|
|
| 361 |
return (
|
| 362 |
'<div class="status-warning">⏳ Loading started...</div>',
|
| 363 |
"Starting download...",
|
| 364 |
+
gr.update(interactive=False),
|
| 365 |
+
gr.update(visible=True)
|
| 366 |
)
|
| 367 |
|
| 368 |
def on_load_pipeline_complete(status_msg, progress_text):
|
|
|
|
| 371 |
return (
|
| 372 |
'<div class="status-success">✅ Pipeline loaded successfully!</div>',
|
| 373 |
progress_text,
|
| 374 |
+
gr.update(interactive=True),
|
| 375 |
+
gr.update(visible=False)
|
| 376 |
)
|
| 377 |
elif "⚠️" in status_msg or "cancelled" in status_msg.lower():
|
| 378 |
return (
|
| 379 |
'<div class="status-warning">⚠️ Download cancelled</div>',
|
| 380 |
progress_text,
|
| 381 |
+
gr.update(interactive=True),
|
| 382 |
+
gr.update(visible=False)
|
| 383 |
)
|
| 384 |
else:
|
| 385 |
return (
|
| 386 |
f'<div class="status-error">{status_msg}</div>',
|
| 387 |
progress_text,
|
| 388 |
+
gr.update(interactive=True),
|
| 389 |
+
gr.update(visible=False)
|
| 390 |
)
|
| 391 |
|
| 392 |
# Cancel button for pipeline loading
|
|
|
|
| 395 |
load_btn.click(
|
| 396 |
fn=on_load_pipeline_start,
|
| 397 |
inputs=[],
|
| 398 |
+
outputs=[load_status, load_progress, load_btn, cancel_load_btn],
|
|
|
|
|
|
|
|
|
|
|
|
|
| 399 |
).then(
|
| 400 |
fn=load_pipeline,
|
| 401 |
inputs=[checkpoint_url, vae_url, lora_urls, lora_strengths],
|
| 402 |
outputs=[load_status, load_progress],
|
| 403 |
show_progress="full",
|
| 404 |
).then(
|
| 405 |
+
fn=lambda: (gr.update(interactive=True), gr.update(visible=False)),
|
| 406 |
inputs=[],
|
| 407 |
+
outputs=[cancel_load_btn, cancel_load_btn], # Just to hide it
|
| 408 |
).then(
|
| 409 |
fn=on_load_pipeline_complete,
|
| 410 |
inputs=[load_status, load_progress],
|
| 411 |
+
outputs=[load_status, load_progress, load_btn, cancel_load_btn],
|
| 412 |
).then(
|
| 413 |
fn=lambda: (
|
| 414 |
gr.update(choices=["(None found)"] + get_cached_checkpoints()),
|
requirements.txt
CHANGED
|
@@ -19,6 +19,10 @@ gradio>=6.9.0
|
|
| 19 |
# Download utilities
|
| 20 |
tqdm>=4.65.0
|
| 21 |
requests>=2.31.0
|
|
|
|
|
|
|
|
|
|
|
|
|
| 22 |
|
| 23 |
# Optional: quantization support
|
| 24 |
optimum-quanto>=0.2.0
|
|
|
|
| 19 |
# Download utilities
|
| 20 |
tqdm>=4.65.0
|
| 21 |
requests>=2.31.0
|
| 22 |
+
huggingface-hub>=0.23.0
|
| 23 |
+
|
| 24 |
+
# System monitoring (memory detection)
|
| 25 |
+
psutil>=5.9.0
|
| 26 |
|
| 27 |
# Optional: quantization support
|
| 28 |
optimum-quanto>=0.2.0
|
src/__pycache__/__init__.cpython-311.pyc
DELETED
|
Binary file (233 Bytes)
|
|
|
src/__pycache__/__init__.cpython-313.pyc
DELETED
|
Binary file (270 Bytes)
|
|
|
src/__pycache__/config.cpython-311.pyc
DELETED
|
Binary file (5.32 kB)
|
|
|
src/__pycache__/config.cpython-313.pyc
DELETED
|
Binary file (1.44 kB)
|
|
|
src/__pycache__/downloader.cpython-311.pyc
DELETED
|
Binary file (11.1 kB)
|
|
|
src/__pycache__/downloader.cpython-313.pyc
DELETED
|
Binary file (5.4 kB)
|
|
|
src/__pycache__/exporter.cpython-311.pyc
DELETED
|
Binary file (6.9 kB)
|
|
|
src/__pycache__/exporter.cpython-313.pyc
DELETED
|
Binary file (5.67 kB)
|
|
|
src/__pycache__/generator.cpython-311.pyc
DELETED
|
Binary file (2.65 kB)
|
|
|
src/__pycache__/generator.cpython-313.pyc
DELETED
|
Binary file (2.24 kB)
|
|
|
src/__pycache__/pipeline.cpython-311.pyc
DELETED
|
Binary file (10.2 kB)
|
|
|
src/__pycache__/pipeline.cpython-313.pyc
DELETED
|
Binary file (8.11 kB)
|
|
|
src/config.py
CHANGED
|
@@ -1,6 +1,7 @@
|
|
| 1 |
"""Configuration constants and global settings for SDXL Model Merger."""
|
| 2 |
|
| 3 |
import os
|
|
|
|
| 4 |
from pathlib import Path
|
| 5 |
|
| 6 |
# ──────────────────────────────────────────────
|
|
@@ -11,11 +12,37 @@ CACHE_DIR = SCRIPT_DIR / ".cache"
|
|
| 11 |
CACHE_DIR.mkdir(exist_ok=True)
|
| 12 |
|
| 13 |
# ──────────────────────────────────────────────
|
| 14 |
-
#
|
| 15 |
# ──────────────────────────────────────────────
|
| 16 |
-
|
| 17 |
-
|
| 18 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 19 |
|
| 20 |
# ──────────────────────────────────────────────
|
| 21 |
# PyTorch & Device Settings
|
|
@@ -26,27 +53,65 @@ import torch
|
|
| 26 |
def get_device_info() -> tuple[str, str]:
|
| 27 |
"""
|
| 28 |
Detect and return the optimal device for ML inference.
|
| 29 |
-
|
| 30 |
Returns:
|
| 31 |
Tuple of (device_name, device_description)
|
| 32 |
"""
|
| 33 |
if torch.cuda.is_available():
|
| 34 |
device_name = "cuda"
|
| 35 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 36 |
elif torch.backends.mps.is_available():
|
| 37 |
device_name = "mps"
|
| 38 |
-
|
| 39 |
-
else:
|
| 40 |
-
device_name = "cpu"
|
| 41 |
-
device_desc = "CPU (no GPU available)"
|
| 42 |
|
| 43 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 44 |
|
| 45 |
|
| 46 |
device, device_description = get_device_info()
|
| 47 |
dtype = torch.float16
|
| 48 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 49 |
print(f"🚀 Using device: {device_description}")
|
|
|
|
| 50 |
|
| 51 |
# ──────────────────────────────────────────────
|
| 52 |
# Global State
|
|
|
|
| 1 |
"""Configuration constants and global settings for SDXL Model Merger."""
|
| 2 |
|
| 3 |
import os
|
| 4 |
+
import sys
|
| 5 |
from pathlib import Path
|
| 6 |
|
| 7 |
# ──────────────────────────────────────────────
|
|
|
|
| 12 |
CACHE_DIR.mkdir(exist_ok=True)
|
| 13 |
|
| 14 |
# ──────────────────────────────────────────────
|
| 15 |
+
# Deployment Environment Detection
|
| 16 |
# ──────────────────────────────────────────────
|
| 17 |
+
DEPLOYMENT_ENV = os.environ.get("DEPLOYMENT_ENV", "local").lower()
|
| 18 |
+
|
| 19 |
+
if DEPLOYMENT_ENV not in ("local", "spaces"):
|
| 20 |
+
print(f"⚠️ Unknown DEPLOYMENT_ENV '{DEPLOYMENT_ENV}', defaulting to 'local'")
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
def is_running_on_spaces() -> bool:
|
| 24 |
+
"""Check if running on HuggingFace Spaces."""
|
| 25 |
+
return DEPLOYMENT_ENV == "spaces"
|
| 26 |
+
|
| 27 |
+
|
| 28 |
+
# ──────────────────────────────────────────────
|
| 29 |
+
# Default URLs - Use HF models for Spaces compatibility
|
| 30 |
+
# ──────────────────────────────────────────────
|
| 31 |
+
DEFAULT_CHECKPOINT_URL = os.environ.get(
|
| 32 |
+
"DEFAULT_CHECKPOINT_URL",
|
| 33 |
+
"https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0/resolve/main/sd_xl_base_1.0.safetensors?download=true"
|
| 34 |
+
)
|
| 35 |
+
|
| 36 |
+
DEFAULT_VAE_URL = os.environ.get(
|
| 37 |
+
"DEFAULT_VAE_URL",
|
| 38 |
+
"https://huggingface.co/madebyollin/sdxl-vae-fp16-fix/resolve/main/sdxl.vae.safetensors?download=true"
|
| 39 |
+
)
|
| 40 |
+
|
| 41 |
+
# Default LoRA - using HF instead of CivitAI
|
| 42 |
+
DEFAULT_LORA_URLS = os.environ.get(
|
| 43 |
+
"DEFAULT_LORA_URLS",
|
| 44 |
+
"https://huggingface.co/nerijs/pixel-art-xl/resolve/main/pixel-art-xl.safetensors?download=true"
|
| 45 |
+
)
|
| 46 |
|
| 47 |
# ──────────────────────────────────────────────
|
| 48 |
# PyTorch & Device Settings
|
|
|
|
| 53 |
def get_device_info() -> tuple[str, str]:
|
| 54 |
"""
|
| 55 |
Detect and return the optimal device for ML inference.
|
| 56 |
+
|
| 57 |
Returns:
|
| 58 |
Tuple of (device_name, device_description)
|
| 59 |
"""
|
| 60 |
if torch.cuda.is_available():
|
| 61 |
device_name = "cuda"
|
| 62 |
+
gpu_name = torch.cuda.get_device_name(0)
|
| 63 |
+
# Check available VRAM
|
| 64 |
+
try:
|
| 65 |
+
vram_total = torch.cuda.get_device_properties(0).total_memory / (1024**3)
|
| 66 |
+
if vram_total < 8.0:
|
| 67 |
+
return device_name, f"CUDA (GPU: {gpu_name}, {vram_total:.1f}GB VRAM - low memory)"
|
| 68 |
+
except Exception:
|
| 69 |
+
pass
|
| 70 |
+
return device_name, f"CUDA (GPU: {gpu_name})"
|
| 71 |
+
|
| 72 |
elif torch.backends.mps.is_available():
|
| 73 |
device_name = "mps"
|
| 74 |
+
return device_name, "Apple Silicon MPS"
|
|
|
|
|
|
|
|
|
|
| 75 |
|
| 76 |
+
else:
|
| 77 |
+
# CPU fallback - check available RAM
|
| 78 |
+
try:
|
| 79 |
+
import psutil
|
| 80 |
+
ram_gb = psutil.virtual_memory().total / (1024**3)
|
| 81 |
+
if ram_gb < 16.0:
|
| 82 |
+
return "cpu", f"CPU (WARNING: {ram_gb:.1f}GB RAM - may be insufficient)"
|
| 83 |
+
return "cpu", f"CPU ({ram_gb:.1f}GB RAM)"
|
| 84 |
+
except Exception:
|
| 85 |
+
return "cpu", "CPU (no GPU available)"
|
| 86 |
|
| 87 |
|
| 88 |
device, device_description = get_device_info()
|
| 89 |
dtype = torch.float16
|
| 90 |
|
| 91 |
+
# Check if we're on low-memory hardware and warn
|
| 92 |
+
def check_memory_requirements() -> bool:
|
| 93 |
+
"""Check if system meets minimum requirements. Returns True if OK."""
|
| 94 |
+
min_ram_gb = 8.0 if device == "cpu" else 4.0
|
| 95 |
+
|
| 96 |
+
try:
|
| 97 |
+
import psutil
|
| 98 |
+
total_ram = psutil.virtual_memory().total / (1024**3)
|
| 99 |
+
|
| 100 |
+
# On Spaces with CPU, RAM is limited - use float32 for safety
|
| 101 |
+
if is_running_on_spaces() and device == "cpu":
|
| 102 |
+
print(f"ℹ️ Spaces CPU mode detected: using float32 for stability")
|
| 103 |
+
return True
|
| 104 |
+
|
| 105 |
+
if total_ram < min_ram_gb:
|
| 106 |
+
print(f"⚠️ Warning: Low memory ({total_ram:.1f}GB < {min_ram_gb}GB required)")
|
| 107 |
+
return False
|
| 108 |
+
except Exception:
|
| 109 |
+
pass
|
| 110 |
+
|
| 111 |
+
return True
|
| 112 |
+
|
| 113 |
print(f"🚀 Using device: {device_description}")
|
| 114 |
+
check_memory_requirements()
|
| 115 |
|
| 116 |
# ──────────────────────────────────────────────
|
| 117 |
# Global State
|
src/downloader.py
CHANGED
|
@@ -14,6 +14,11 @@ def extract_model_id(url: str) -> str | None:
|
|
| 14 |
return match.group(1) if match else None
|
| 15 |
|
| 16 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 17 |
def get_safe_filename_from_url(
|
| 18 |
url: str,
|
| 19 |
default_name: str = "model.safetensors",
|
|
@@ -198,6 +203,8 @@ def download_file_with_progress(url: str, output_path: Path, progress_bar=None)
|
|
| 198 |
|
| 199 |
Checks for existing cached files before downloading. If a valid cache
|
| 200 |
exists (file exists with matching expected size), skips re-download.
|
|
|
|
|
|
|
| 201 |
|
| 202 |
Args:
|
| 203 |
url: File URL to download (http/https/file)
|
|
@@ -219,9 +226,9 @@ def download_file_with_progress(url: str, output_path: Path, progress_bar=None)
|
|
| 219 |
if local_path.exists():
|
| 220 |
import shutil
|
| 221 |
output_path.parent.mkdir(parents=True, exist_ok=True)
|
| 222 |
-
|
| 223 |
print(f" 📁 Copying from cache: {local_path.name} → {output_path.name}")
|
| 224 |
-
|
| 225 |
# Copy the file to cache location
|
| 226 |
shutil.copy2(str(local_path), str(output_path))
|
| 227 |
|
|
|
|
| 14 |
return match.group(1) if match else None
|
| 15 |
|
| 16 |
|
| 17 |
+
def is_huggingface_url(url: str) -> bool:
|
| 18 |
+
"""Check if URL is a HuggingFace model download URL."""
|
| 19 |
+
return "huggingface.co" in url.lower()
|
| 20 |
+
|
| 21 |
+
|
| 22 |
def get_safe_filename_from_url(
|
| 23 |
url: str,
|
| 24 |
default_name: str = "model.safetensors",
|
|
|
|
| 203 |
|
| 204 |
Checks for existing cached files before downloading. If a valid cache
|
| 205 |
exists (file exists with matching expected size), skips re-download.
|
| 206 |
+
|
| 207 |
+
Supports both HTTP(S) and HuggingFace Hub URLs.
|
| 208 |
|
| 209 |
Args:
|
| 210 |
url: File URL to download (http/https/file)
|
|
|
|
| 226 |
if local_path.exists():
|
| 227 |
import shutil
|
| 228 |
output_path.parent.mkdir(parents=True, exist_ok=True)
|
| 229 |
+
|
| 230 |
print(f" 📁 Copying from cache: {local_path.name} → {output_path.name}")
|
| 231 |
+
|
| 232 |
# Copy the file to cache location
|
| 233 |
shutil.copy2(str(local_path), str(output_path))
|
| 234 |
|
src/exporter.py
CHANGED
|
@@ -31,12 +31,18 @@ def export_merged_model(
|
|
| 31 |
return None, "⚠️ Please load a pipeline first."
|
| 32 |
|
| 33 |
try:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 34 |
# Step 1: Unload LoRAs
|
| 35 |
yield "💾 Exporting model...", "Unloading LoRAs..."
|
| 36 |
if include_lora:
|
| 37 |
try:
|
| 38 |
global_pipe.unload_lora_weights()
|
| 39 |
-
except Exception:
|
|
|
|
| 40 |
pass
|
| 41 |
|
| 42 |
merged_state_dict = {}
|
|
|
|
| 31 |
return None, "⚠️ Please load a pipeline first."
|
| 32 |
|
| 33 |
try:
|
| 34 |
+
# Validate quantization type
|
| 35 |
+
valid_qtypes = ("none", "int8", "int4", "float8")
|
| 36 |
+
if qtype not in valid_qtypes:
|
| 37 |
+
return None, f"❌ Invalid quantization type: {qtype}. Must be one of: {valid_qtypes}"
|
| 38 |
+
|
| 39 |
# Step 1: Unload LoRAs
|
| 40 |
yield "💾 Exporting model...", "Unloading LoRAs..."
|
| 41 |
if include_lora:
|
| 42 |
try:
|
| 43 |
global_pipe.unload_lora_weights()
|
| 44 |
+
except Exception as e:
|
| 45 |
+
print(f" ℹ️ Could not unload LoRAs: {e}")
|
| 46 |
pass
|
| 47 |
|
| 48 |
merged_state_dict = {}
|
src/generator.py
CHANGED
|
@@ -2,7 +2,7 @@
|
|
| 2 |
|
| 3 |
import torch
|
| 4 |
|
| 5 |
-
from .config import device, dtype, pipe as global_pipe
|
| 6 |
from .pipeline import enable_seamless_tiling
|
| 7 |
|
| 8 |
|
|
@@ -35,13 +35,27 @@ def generate_image(
|
|
| 35 |
if not global_pipe:
|
| 36 |
return None, "⚠️ Please load a pipeline first."
|
| 37 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 38 |
# Enable seamless tiling on UNet & VAE decoder
|
| 39 |
enable_seamless_tiling(global_pipe.unet, tile_x=tile_x, tile_y=tile_y)
|
| 40 |
enable_seamless_tiling(global_pipe.vae.decoder, tile_x=tile_x, tile_y=tile_y)
|
| 41 |
|
| 42 |
yield "🎨 Generating image...", f"Steps: 0/{steps} | CFG: {cfg}"
|
| 43 |
|
| 44 |
-
generator = torch.Generator(device=
|
| 45 |
result = global_pipe(
|
| 46 |
prompt=prompt,
|
| 47 |
negative_prompt=negative_prompt,
|
|
|
|
| 2 |
|
| 3 |
import torch
|
| 4 |
|
| 5 |
+
from .config import device, dtype, pipe as global_pipe, is_running_on_spaces
|
| 6 |
from .pipeline import enable_seamless_tiling
|
| 7 |
|
| 8 |
|
|
|
|
| 35 |
if not global_pipe:
|
| 36 |
return None, "⚠️ Please load a pipeline first."
|
| 37 |
|
| 38 |
+
# For CPU mode, use float32 and warn about slow generation
|
| 39 |
+
effective_dtype = dtype
|
| 40 |
+
effective_device = device
|
| 41 |
+
|
| 42 |
+
if is_running_on_spaces() and device == "cpu":
|
| 43 |
+
print(" ℹ️ CPU mode: using float32 for stability (generation will be slower)")
|
| 44 |
+
effective_dtype = torch.float32
|
| 45 |
+
# Update pipeline to use float32
|
| 46 |
+
global_pipe.unet.to(dtype=torch.float32)
|
| 47 |
+
global_pipe.text_encoder.to(dtype=torch.float32)
|
| 48 |
+
global_pipe.text_encoder_2.to(dtype=torch.float32)
|
| 49 |
+
if global_pipe.vae:
|
| 50 |
+
global_pipe.vae.to(dtype=torch.float32)
|
| 51 |
+
|
| 52 |
# Enable seamless tiling on UNet & VAE decoder
|
| 53 |
enable_seamless_tiling(global_pipe.unet, tile_x=tile_x, tile_y=tile_y)
|
| 54 |
enable_seamless_tiling(global_pipe.vae.decoder, tile_x=tile_x, tile_y=tile_y)
|
| 55 |
|
| 56 |
yield "🎨 Generating image...", f"Steps: 0/{steps} | CFG: {cfg}"
|
| 57 |
|
| 58 |
+
generator = torch.Generator(device=effective_device).manual_seed(42) # Fixed seed for reproducibility
|
| 59 |
result = global_pipe(
|
| 60 |
prompt=prompt,
|
| 61 |
negative_prompt=negative_prompt,
|
src/pipeline.py
CHANGED
|
@@ -9,8 +9,7 @@ from diffusers import (
|
|
| 9 |
DPMSolverSDEScheduler,
|
| 10 |
)
|
| 11 |
|
| 12 |
-
from .config import device, dtype, pipe as global_pipe, CACHE_DIR, device_description
|
| 13 |
-
from .downloader import download_file_with_progress, get_safe_filename_from_url, set_download_cancelled
|
| 14 |
|
| 15 |
|
| 16 |
def _make_asymmetric_forward(module, pad_h: int, pad_w: int, tile_x: bool, tile_y: bool):
|
|
@@ -163,11 +162,23 @@ def load_pipeline(
|
|
| 163 |
if progress:
|
| 164 |
progress(0.3, desc="Loading text encoders...")
|
| 165 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 166 |
global_pipe = StableDiffusionXLPipeline.from_single_file(
|
| 167 |
str(checkpoint_path),
|
| 168 |
-
|
| 169 |
-
use_safetensors=True,
|
| 170 |
-
safety_checker=None,
|
| 171 |
)
|
| 172 |
print(" ✅ Text encoders loaded")
|
| 173 |
|
|
@@ -175,6 +186,12 @@ def load_pipeline(
|
|
| 175 |
progress(0.5, desc="Loading UNet...")
|
| 176 |
|
| 177 |
print(" ✅ UNet loaded")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 178 |
yield "⚙️ Pipeline loaded, setting up components...", f"Using device: {device_description}"
|
| 179 |
|
| 180 |
# Load VAE into pipeline if provided
|
|
|
|
| 9 |
DPMSolverSDEScheduler,
|
| 10 |
)
|
| 11 |
|
| 12 |
+
from .config import device, dtype, pipe as global_pipe, CACHE_DIR, device_description, is_running_on_spaces
|
|
|
|
| 13 |
|
| 14 |
|
| 15 |
def _make_asymmetric_forward(module, pad_h: int, pad_w: int, tile_x: bool, tile_y: bool):
|
|
|
|
| 162 |
if progress:
|
| 163 |
progress(0.3, desc="Loading text encoders...")
|
| 164 |
|
| 165 |
+
# For CPU/low-memory environments on Spaces, use device_map for better RAM management
|
| 166 |
+
load_kwargs = {
|
| 167 |
+
"torch_dtype": dtype,
|
| 168 |
+
"use_safetensors": True,
|
| 169 |
+
"safety_checker": None,
|
| 170 |
+
}
|
| 171 |
+
|
| 172 |
+
if is_running_on_spaces() and device == "cpu":
|
| 173 |
+
print(" ℹ️ CPU mode detected: enabling device_map='auto' for better RAM management")
|
| 174 |
+
load_kwargs["device_map"] = "auto"
|
| 175 |
+
else:
|
| 176 |
+
# For GPU, we'll move to device after loading
|
| 177 |
+
load_kwargs["variant"] = "fp16" if dtype == torch.float16 else None
|
| 178 |
+
|
| 179 |
global_pipe = StableDiffusionXLPipeline.from_single_file(
|
| 180 |
str(checkpoint_path),
|
| 181 |
+
**load_kwargs,
|
|
|
|
|
|
|
| 182 |
)
|
| 183 |
print(" ✅ Text encoders loaded")
|
| 184 |
|
|
|
|
| 186 |
progress(0.5, desc="Loading UNet...")
|
| 187 |
|
| 188 |
print(" ✅ UNet loaded")
|
| 189 |
+
|
| 190 |
+
# Move to device (unless using device_map='auto' which handles this automatically)
|
| 191 |
+
if not is_running_on_spaces() or device != "cpu":
|
| 192 |
+
print(f" ⚙️ Moving pipeline to device: {device_description}...")
|
| 193 |
+
global_pipe = global_pipe.to(device=device, dtype=dtype)
|
| 194 |
+
|
| 195 |
yield "⚙️ Pipeline loaded, setting up components...", f"Using device: {device_description}"
|
| 196 |
|
| 197 |
# Load VAE into pipeline if provided
|
src/ui/__pycache__/__init__.cpython-311.pyc
DELETED
|
Binary file (510 Bytes)
|
|
|
src/ui/__pycache__/__init__.cpython-313.pyc
DELETED
|
Binary file (477 Bytes)
|
|
|
src/ui/__pycache__/exporter_tab.cpython-311.pyc
DELETED
|
Binary file (5.23 kB)
|
|
|
src/ui/__pycache__/exporter_tab.cpython-313.pyc
DELETED
|
Binary file (4.41 kB)
|
|
|
src/ui/__pycache__/generator_tab.cpython-311.pyc
DELETED
|
Binary file (5.4 kB)
|
|
|
src/ui/__pycache__/generator_tab.cpython-313.pyc
DELETED
|
Binary file (4.51 kB)
|
|
|
src/ui/__pycache__/header.cpython-311.pyc
DELETED
|
Binary file (5.51 kB)
|
|
|
src/ui/__pycache__/header.cpython-313.pyc
DELETED
|
Binary file (5.09 kB)
|
|
|
src/ui/__pycache__/loader_tab.cpython-311.pyc
DELETED
|
Binary file (3.68 kB)
|
|
|
src/ui/__pycache__/loader_tab.cpython-313.pyc
DELETED
|
Binary file (3.07 kB)
|
|
|