Spaces:
Running on Zero
Running on Zero
Commit ·
7ed8c57
1
Parent(s): a203745
Fix: Load Juggernaut-Z weights via safetensors instead of broken pipeline format
Browse filesRunDiffusion/Juggernaut-Z-Image only provides safetensors files, not a complete diffusers pipeline.
This fix loads the base Z-Image pipeline from Tongyi-MAI/Z-Image and then loads Juggernaut-Z
safetensors weights into the transformer component.
app.py
CHANGED
|
@@ -11,7 +11,8 @@ import gradio as gr
|
|
| 11 |
from diffusers import ZImagePipeline
|
| 12 |
|
| 13 |
# ==================== Configuration ====================
|
| 14 |
-
|
|
|
|
| 15 |
|
| 16 |
# ==================== Resolution Choices ====================
|
| 17 |
RES_CHOICES = {
|
|
@@ -74,14 +75,38 @@ def get_resolution(resolution: str) -> tuple[int, int]:
|
|
| 74 |
|
| 75 |
|
| 76 |
# ==================== Model Loading (Global Context) ====================
|
| 77 |
-
print(f"Loading
|
| 78 |
pipe = ZImagePipeline.from_pretrained(
|
| 79 |
-
|
| 80 |
torch_dtype=torch.bfloat16,
|
| 81 |
-
low_cpu_mem_usage=False,
|
| 82 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 83 |
pipe.to("cuda")
|
| 84 |
-
print("Pipeline loaded successfully!")
|
| 85 |
|
| 86 |
|
| 87 |
# ==================== Generation Function ====================
|
|
@@ -113,7 +138,9 @@ def generate(
|
|
| 113 |
).images[0]
|
| 114 |
|
| 115 |
meta = {
|
| 116 |
-
"model":
|
|
|
|
|
|
|
| 117 |
"prompt": prompt,
|
| 118 |
"negative_prompt": negative_prompt,
|
| 119 |
"resolution": f"{width} x {height}",
|
|
|
|
| 11 |
from diffusers import ZImagePipeline
|
| 12 |
|
| 13 |
# ==================== Configuration ====================
|
| 14 |
+
BASE_MODEL = "Tongyi-MAI/Z-Image"
|
| 15 |
+
WEIGHTS_FILE = "RunDiffusion/Juggernaut-Z-Image/Juggernaut_Z_V1_by_RunDiffusion.safetensors"
|
| 16 |
|
| 17 |
# ==================== Resolution Choices ====================
|
| 18 |
RES_CHOICES = {
|
|
|
|
| 75 |
|
| 76 |
|
| 77 |
# ==================== Model Loading (Global Context) ====================
|
| 78 |
+
print(f"Loading base Z-Image pipeline from {BASE_MODEL}...")
|
| 79 |
pipe = ZImagePipeline.from_pretrained(
|
| 80 |
+
BASE_MODEL,
|
| 81 |
torch_dtype=torch.bfloat16,
|
|
|
|
| 82 |
)
|
| 83 |
+
|
| 84 |
+
print(f"Loading Juggernaut-Z weights from {WEIGHTS_FILE}...")
|
| 85 |
+
from huggingface_hub import hf_hub_download
|
| 86 |
+
from safetensors.torch import load_file
|
| 87 |
+
|
| 88 |
+
weights_path = hf_hub_download(
|
| 89 |
+
repo_id="RunDiffusion/Juggernaut-Z-Image",
|
| 90 |
+
filename="Juggernaut_Z_V1_by_RunDiffusion.safetensors",
|
| 91 |
+
)
|
| 92 |
+
state_dict = load_file(weights_path)
|
| 93 |
+
|
| 94 |
+
# Load transformer weights (key component for Z-Image)
|
| 95 |
+
if hasattr(pipe, 'transformer'):
|
| 96 |
+
pipe.transformer.load_state_dict(state_dict, strict=False)
|
| 97 |
+
else:
|
| 98 |
+
# Alternative: load into the main unet/transformer component
|
| 99 |
+
for name, component in pipe.components.items():
|
| 100 |
+
if hasattr(component, 'load_state_dict'):
|
| 101 |
+
try:
|
| 102 |
+
component.load_state_dict(state_dict, strict=False)
|
| 103 |
+
print(f"Loaded weights into {name}")
|
| 104 |
+
break
|
| 105 |
+
except Exception as e:
|
| 106 |
+
print(f"Could not load into {name}: {e}")
|
| 107 |
+
|
| 108 |
pipe.to("cuda")
|
| 109 |
+
print("Pipeline loaded successfully with Juggernaut-Z weights!")
|
| 110 |
|
| 111 |
|
| 112 |
# ==================== Generation Function ====================
|
|
|
|
| 138 |
).images[0]
|
| 139 |
|
| 140 |
meta = {
|
| 141 |
+
"model": "Juggernaut-Z (RunDiffusion)",
|
| 142 |
+
"base_model": BASE_MODEL,
|
| 143 |
+
"weights": "Juggernaut_Z_V1_by_RunDiffusion.safetensors",
|
| 144 |
"prompt": prompt,
|
| 145 |
"negative_prompt": negative_prompt,
|
| 146 |
"resolution": f"{width} x {height}",
|