Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes. See raw diff
- logs/ltx2_cache_gpu0.log +0 -0
- logs/ltx2_cache_gpu1.log +0 -0
- logs/ltx2_cache_gpu3.log +0 -0
- logs/ltx2_cache_gpu4.log +0 -0
- logs/ltx2_cache_gpu5.log +0 -0
- logs/ltx2_cache_gpu6.log +0 -0
- src/musubi_tuner/__init__.py +0 -0
- src/musubi_tuner/dataset/__init__.py +0 -0
- src/musubi_tuner/dataset/audio_quota_sampler.py +318 -0
- src/musubi_tuner/dataset/config_utils.py +703 -0
- src/musubi_tuner/dataset/dataset_config.md +3 -0
- src/musubi_tuner/dataset/image_video_dataset.py +0 -0
- src/musubi_tuner/flux/flux_models.py +1065 -0
- src/musubi_tuner/flux/flux_utils.py +408 -0
- src/musubi_tuner/flux_2/flux2_models.py +1020 -0
- src/musubi_tuner/flux_2/flux2_utils.py +816 -0
- src/musubi_tuner/flux_kontext_cache_latents.py +135 -0
- src/musubi_tuner/flux_kontext_cache_text_encoder_outputs.py +123 -0
- src/musubi_tuner/flux_kontext_generate_image.py +1183 -0
- src/musubi_tuner/flux_kontext_train_network.py +405 -0
- src/musubi_tuner/fpack_cache_latents.py +501 -0
- src/musubi_tuner/frame_pack/__init__.py +0 -0
- src/musubi_tuner/frame_pack/bucket_tools.py +29 -0
- src/musubi_tuner/frame_pack/clip_vision.py +13 -0
- src/musubi_tuner/frame_pack/framepack_utils.py +274 -0
- src/musubi_tuner/frame_pack/hunyuan.py +134 -0
- src/musubi_tuner/frame_pack/hunyuan_video_packed.py +2141 -0
- src/musubi_tuner/frame_pack/hunyuan_video_packed_inference.py +340 -0
- src/musubi_tuner/frame_pack/k_diffusion_hunyuan.py +128 -0
- src/musubi_tuner/frame_pack/uni_pc_fm.py +144 -0
- src/musubi_tuner/frame_pack/utils.py +616 -0
- src/musubi_tuner/frame_pack/wrapper.py +55 -0
- src/musubi_tuner/gui/config_manager.py +105 -0
- src/musubi_tuner/gui/gui.ja.md +444 -0
- src/musubi_tuner/gui/gui.md +444 -0
- src/musubi_tuner/gui/gui.py +1134 -0
- src/musubi_tuner/gui/gui_implementation_plan.md +97 -0
- src/musubi_tuner/gui/i18n_data.py +186 -0
- src/musubi_tuner/gui_dashboard/__init__.py +10 -0
- src/musubi_tuner/gui_dashboard/command_builder.py +881 -0
- src/musubi_tuner/gui_dashboard/management_server.py +146 -0
- src/musubi_tuner/gui_dashboard/metrics_writer.py +144 -0
- src/musubi_tuner/gui_dashboard/process_manager.py +165 -0
- src/musubi_tuner/gui_dashboard/project_schema.py +481 -0
- src/musubi_tuner/gui_dashboard/server.py +111 -0
- src/musubi_tuner/gui_dashboard/toml_export.py +176 -0
- src/musubi_tuner/hunyuan_model/__init__.py +0 -0
- src/musubi_tuner/hunyuan_model/activation_layers.py +23 -0
- src/musubi_tuner/hunyuan_model/attention.py +309 -0
- src/musubi_tuner/hunyuan_model/embed_layers.py +131 -0
logs/ltx2_cache_gpu0.log
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
logs/ltx2_cache_gpu1.log
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
logs/ltx2_cache_gpu3.log
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
logs/ltx2_cache_gpu4.log
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
logs/ltx2_cache_gpu5.log
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
logs/ltx2_cache_gpu6.log
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
src/musubi_tuner/__init__.py
ADDED
|
File without changes
|
src/musubi_tuner/dataset/__init__.py
ADDED
|
File without changes
|
src/musubi_tuner/dataset/audio_quota_sampler.py
ADDED
|
@@ -0,0 +1,318 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from __future__ import annotations
|
| 2 |
+
|
| 3 |
+
import os
|
| 4 |
+
import random
|
| 5 |
+
from typing import Any, Dict, List, Optional
|
| 6 |
+
|
| 7 |
+
import torch
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
class AudioQuotaIndexSampler(torch.utils.data.Sampler[int]):
|
| 11 |
+
"""Opt-in sampler that enforces a minimum audio-batch quota per accumulation window."""
|
| 12 |
+
|
| 13 |
+
def __init__(
|
| 14 |
+
self,
|
| 15 |
+
*,
|
| 16 |
+
audio_indices: List[int],
|
| 17 |
+
non_audio_indices: List[int],
|
| 18 |
+
accumulation_steps: int,
|
| 19 |
+
min_audio_batches_per_accum: int,
|
| 20 |
+
seed: int,
|
| 21 |
+
) -> None:
|
| 22 |
+
if accumulation_steps < 1:
|
| 23 |
+
raise ValueError(f"accumulation_steps must be >= 1, got {accumulation_steps}")
|
| 24 |
+
if min_audio_batches_per_accum < 0:
|
| 25 |
+
raise ValueError(
|
| 26 |
+
f"min_audio_batches_per_accum must be >= 0, got {min_audio_batches_per_accum}"
|
| 27 |
+
)
|
| 28 |
+
if min_audio_batches_per_accum > accumulation_steps:
|
| 29 |
+
raise ValueError(
|
| 30 |
+
"min_audio_batches_per_accum must be <= gradient_accumulation_steps "
|
| 31 |
+
f"(got {min_audio_batches_per_accum} > {accumulation_steps})"
|
| 32 |
+
)
|
| 33 |
+
|
| 34 |
+
self.audio_indices = list(audio_indices)
|
| 35 |
+
self.non_audio_indices = list(non_audio_indices)
|
| 36 |
+
self.accumulation_steps = int(accumulation_steps)
|
| 37 |
+
self.min_audio_batches_per_accum = int(min_audio_batches_per_accum)
|
| 38 |
+
self.seed = int(seed)
|
| 39 |
+
self.epoch = 0
|
| 40 |
+
|
| 41 |
+
def set_epoch(self, epoch: int) -> None:
|
| 42 |
+
self.epoch = int(epoch)
|
| 43 |
+
|
| 44 |
+
def update_groups(self, audio_indices: List[int], non_audio_indices: List[int]) -> None:
|
| 45 |
+
self.audio_indices = list(audio_indices)
|
| 46 |
+
self.non_audio_indices = list(non_audio_indices)
|
| 47 |
+
|
| 48 |
+
def __len__(self) -> int:
|
| 49 |
+
return len(self.audio_indices) + len(self.non_audio_indices)
|
| 50 |
+
|
| 51 |
+
def __iter__(self):
|
| 52 |
+
rng = random.Random(self.seed + self.epoch)
|
| 53 |
+
self.epoch += 1
|
| 54 |
+
|
| 55 |
+
audio = self.audio_indices.copy()
|
| 56 |
+
non_audio = self.non_audio_indices.copy()
|
| 57 |
+
rng.shuffle(audio)
|
| 58 |
+
rng.shuffle(non_audio)
|
| 59 |
+
|
| 60 |
+
ai = 0
|
| 61 |
+
ni = 0
|
| 62 |
+
total = len(audio) + len(non_audio)
|
| 63 |
+
ordered: List[int] = []
|
| 64 |
+
|
| 65 |
+
while (ai + ni) < total:
|
| 66 |
+
slots = min(self.accumulation_steps, total - (ai + ni))
|
| 67 |
+
audio_now = min(self.min_audio_batches_per_accum, slots, len(audio) - ai)
|
| 68 |
+
|
| 69 |
+
required_non_audio = slots - audio_now
|
| 70 |
+
available_non_audio = len(non_audio) - ni
|
| 71 |
+
if available_non_audio < required_non_audio:
|
| 72 |
+
extra_audio_needed = required_non_audio - available_non_audio
|
| 73 |
+
audio_now = min(slots, audio_now + extra_audio_needed, len(audio) - ai)
|
| 74 |
+
|
| 75 |
+
non_audio_now = slots - audio_now
|
| 76 |
+
window = audio[ai : ai + audio_now] + non_audio[ni : ni + non_audio_now]
|
| 77 |
+
ai += audio_now
|
| 78 |
+
ni += non_audio_now
|
| 79 |
+
|
| 80 |
+
missing = slots - len(window)
|
| 81 |
+
if missing > 0 and ai < len(audio):
|
| 82 |
+
take = min(missing, len(audio) - ai)
|
| 83 |
+
window.extend(audio[ai : ai + take])
|
| 84 |
+
ai += take
|
| 85 |
+
missing -= take
|
| 86 |
+
if missing > 0 and ni < len(non_audio):
|
| 87 |
+
take = min(missing, len(non_audio) - ni)
|
| 88 |
+
window.extend(non_audio[ni : ni + take])
|
| 89 |
+
ni += take
|
| 90 |
+
|
| 91 |
+
rng.shuffle(window)
|
| 92 |
+
ordered.extend(window)
|
| 93 |
+
|
| 94 |
+
return iter(ordered)
|
| 95 |
+
|
| 96 |
+
|
| 97 |
+
class AudioProbabilityIndexSampler(torch.utils.data.Sampler[int]):
|
| 98 |
+
"""Opt-in sampler that draws from audio/non-audio pools using a target probability."""
|
| 99 |
+
|
| 100 |
+
def __init__(
|
| 101 |
+
self,
|
| 102 |
+
*,
|
| 103 |
+
audio_indices: List[int],
|
| 104 |
+
non_audio_indices: List[int],
|
| 105 |
+
audio_batch_probability: float,
|
| 106 |
+
seed: int,
|
| 107 |
+
) -> None:
|
| 108 |
+
if not (0.0 <= audio_batch_probability <= 1.0):
|
| 109 |
+
raise ValueError(
|
| 110 |
+
f"audio_batch_probability must be in [0, 1], got {audio_batch_probability}"
|
| 111 |
+
)
|
| 112 |
+
|
| 113 |
+
self.audio_indices = list(audio_indices)
|
| 114 |
+
self.non_audio_indices = list(non_audio_indices)
|
| 115 |
+
self.audio_batch_probability = float(audio_batch_probability)
|
| 116 |
+
self.seed = int(seed)
|
| 117 |
+
self.epoch = 0
|
| 118 |
+
|
| 119 |
+
def set_epoch(self, epoch: int) -> None:
|
| 120 |
+
self.epoch = int(epoch)
|
| 121 |
+
|
| 122 |
+
def update_groups(self, audio_indices: List[int], non_audio_indices: List[int]) -> None:
|
| 123 |
+
self.audio_indices = list(audio_indices)
|
| 124 |
+
self.non_audio_indices = list(non_audio_indices)
|
| 125 |
+
|
| 126 |
+
def __len__(self) -> int:
|
| 127 |
+
return len(self.audio_indices) + len(self.non_audio_indices)
|
| 128 |
+
|
| 129 |
+
def __iter__(self):
|
| 130 |
+
rng = random.Random(self.seed + self.epoch)
|
| 131 |
+
self.epoch += 1
|
| 132 |
+
|
| 133 |
+
audio = self.audio_indices.copy()
|
| 134 |
+
non_audio = self.non_audio_indices.copy()
|
| 135 |
+
rng.shuffle(audio)
|
| 136 |
+
rng.shuffle(non_audio)
|
| 137 |
+
|
| 138 |
+
ai = 0
|
| 139 |
+
ni = 0
|
| 140 |
+
total = len(audio) + len(non_audio)
|
| 141 |
+
ordered: List[int] = []
|
| 142 |
+
|
| 143 |
+
while len(ordered) < total:
|
| 144 |
+
audio_remaining = ai < len(audio)
|
| 145 |
+
non_audio_remaining = ni < len(non_audio)
|
| 146 |
+
|
| 147 |
+
if audio_remaining and non_audio_remaining:
|
| 148 |
+
choose_audio = rng.random() < self.audio_batch_probability
|
| 149 |
+
if choose_audio:
|
| 150 |
+
ordered.append(audio[ai])
|
| 151 |
+
ai += 1
|
| 152 |
+
else:
|
| 153 |
+
ordered.append(non_audio[ni])
|
| 154 |
+
ni += 1
|
| 155 |
+
elif audio_remaining:
|
| 156 |
+
ordered.append(audio[ai])
|
| 157 |
+
ai += 1
|
| 158 |
+
elif non_audio_remaining:
|
| 159 |
+
ordered.append(non_audio[ni])
|
| 160 |
+
ni += 1
|
| 161 |
+
else:
|
| 162 |
+
break
|
| 163 |
+
|
| 164 |
+
return iter(ordered)
|
| 165 |
+
|
| 166 |
+
|
| 167 |
+
def sync_dataset_group_epoch_without_loading(dataset_group, target_epoch: int, logger=None) -> None:
|
| 168 |
+
datasets = getattr(dataset_group, "datasets", None)
|
| 169 |
+
if datasets is None:
|
| 170 |
+
return
|
| 171 |
+
|
| 172 |
+
for dataset in datasets:
|
| 173 |
+
current = getattr(dataset, "current_epoch", None)
|
| 174 |
+
shuffle_fn = getattr(dataset, "shuffle_buckets", None)
|
| 175 |
+
if current is None or not callable(shuffle_fn):
|
| 176 |
+
continue
|
| 177 |
+
|
| 178 |
+
if target_epoch > current:
|
| 179 |
+
for _ in range(target_epoch - current):
|
| 180 |
+
dataset.current_epoch += 1
|
| 181 |
+
shuffle_fn()
|
| 182 |
+
elif target_epoch < current:
|
| 183 |
+
if logger is not None:
|
| 184 |
+
logger.warning(
|
| 185 |
+
"epoch is not incremented. current_epoch: %s, epoch: %s",
|
| 186 |
+
current,
|
| 187 |
+
target_epoch,
|
| 188 |
+
)
|
| 189 |
+
dataset.current_epoch = target_epoch
|
| 190 |
+
|
| 191 |
+
|
| 192 |
+
def _batch_has_audio_from_batch_manager(
|
| 193 |
+
batch_manager: Any,
|
| 194 |
+
local_idx: int,
|
| 195 |
+
exists_cache: Dict[str, bool],
|
| 196 |
+
) -> bool:
|
| 197 |
+
bucket_reso, batch_idx = batch_manager.bucket_batch_indices[local_idx]
|
| 198 |
+
bucket = batch_manager.buckets[bucket_reso]
|
| 199 |
+
start = batch_idx * batch_manager.batch_size
|
| 200 |
+
end = min(start + batch_manager.batch_size, len(bucket))
|
| 201 |
+
|
| 202 |
+
for item_info in bucket[start:end]:
|
| 203 |
+
audio_cache_path = getattr(item_info, "audio_latent_cache_path", None)
|
| 204 |
+
if not audio_cache_path:
|
| 205 |
+
continue
|
| 206 |
+
|
| 207 |
+
exists = exists_cache.get(audio_cache_path)
|
| 208 |
+
if exists is None:
|
| 209 |
+
exists = os.path.exists(audio_cache_path)
|
| 210 |
+
exists_cache[audio_cache_path] = exists
|
| 211 |
+
if exists:
|
| 212 |
+
return True
|
| 213 |
+
|
| 214 |
+
return False
|
| 215 |
+
|
| 216 |
+
|
| 217 |
+
def split_concat_indices_by_audio(dataset_group) -> tuple[List[int], List[int]]:
|
| 218 |
+
datasets = getattr(dataset_group, "datasets", None)
|
| 219 |
+
if datasets is None:
|
| 220 |
+
total = len(dataset_group)
|
| 221 |
+
return [], list(range(total))
|
| 222 |
+
|
| 223 |
+
audio_indices: List[int] = []
|
| 224 |
+
non_audio_indices: List[int] = []
|
| 225 |
+
global_offset = 0
|
| 226 |
+
exists_cache: Dict[str, bool] = {}
|
| 227 |
+
|
| 228 |
+
for dataset in datasets:
|
| 229 |
+
dataset_len = len(dataset)
|
| 230 |
+
batch_manager = getattr(dataset, "batch_manager", None)
|
| 231 |
+
|
| 232 |
+
if batch_manager is None:
|
| 233 |
+
non_audio_indices.extend(range(global_offset, global_offset + dataset_len))
|
| 234 |
+
global_offset += dataset_len
|
| 235 |
+
continue
|
| 236 |
+
|
| 237 |
+
for local_idx in range(dataset_len):
|
| 238 |
+
global_idx = global_offset + local_idx
|
| 239 |
+
if _batch_has_audio_from_batch_manager(batch_manager, local_idx, exists_cache):
|
| 240 |
+
audio_indices.append(global_idx)
|
| 241 |
+
else:
|
| 242 |
+
non_audio_indices.append(global_idx)
|
| 243 |
+
|
| 244 |
+
global_offset += dataset_len
|
| 245 |
+
|
| 246 |
+
return audio_indices, non_audio_indices
|
| 247 |
+
|
| 248 |
+
|
| 249 |
+
def build_audio_sampler(
|
| 250 |
+
*,
|
| 251 |
+
dataset_group,
|
| 252 |
+
gradient_accumulation_steps: int,
|
| 253 |
+
min_audio_batches_per_accum: int = 0,
|
| 254 |
+
audio_batch_probability: Optional[float] = None,
|
| 255 |
+
seed: int = 0,
|
| 256 |
+
) -> tuple[Optional[torch.utils.data.Sampler[int]], Optional[str], Dict[str, Any]]:
|
| 257 |
+
"""Build an opt-in audio-aware sampler.
|
| 258 |
+
|
| 259 |
+
Returns:
|
| 260 |
+
(sampler, mode, stats)
|
| 261 |
+
- sampler: None when both controls are disabled
|
| 262 |
+
- mode: "quota", "probability", or None
|
| 263 |
+
- stats: includes audio/non-audio counts and effective control values
|
| 264 |
+
"""
|
| 265 |
+
stats: Dict[str, Any] = {}
|
| 266 |
+
min_audio_batches_per_accum = int(min_audio_batches_per_accum or 0)
|
| 267 |
+
|
| 268 |
+
if audio_batch_probability is not None:
|
| 269 |
+
audio_batch_probability = float(audio_batch_probability)
|
| 270 |
+
if not (0.0 <= audio_batch_probability <= 1.0):
|
| 271 |
+
raise ValueError(f"audio_batch_probability must be in [0, 1], got {audio_batch_probability}")
|
| 272 |
+
|
| 273 |
+
if min_audio_batches_per_accum > 0 and audio_batch_probability is not None:
|
| 274 |
+
raise ValueError(
|
| 275 |
+
"--min_audio_batches_per_accum and --audio_batch_probability are mutually exclusive. "
|
| 276 |
+
"Set only one of them."
|
| 277 |
+
)
|
| 278 |
+
|
| 279 |
+
if min_audio_batches_per_accum <= 0 and audio_batch_probability is None:
|
| 280 |
+
return None, None, stats
|
| 281 |
+
|
| 282 |
+
if min_audio_batches_per_accum > 0 and min_audio_batches_per_accum > int(gradient_accumulation_steps):
|
| 283 |
+
raise ValueError(
|
| 284 |
+
"min_audio_batches_per_accum must be <= gradient_accumulation_steps "
|
| 285 |
+
f"(got {min_audio_batches_per_accum} > {gradient_accumulation_steps})"
|
| 286 |
+
)
|
| 287 |
+
|
| 288 |
+
audio_indices, non_audio_indices = split_concat_indices_by_audio(dataset_group)
|
| 289 |
+
if len(audio_indices) == 0:
|
| 290 |
+
if min_audio_batches_per_accum > 0:
|
| 291 |
+
raise ValueError(
|
| 292 |
+
"--min_audio_batches_per_accum is set, but no audio-bearing batches were found in the training dataset."
|
| 293 |
+
)
|
| 294 |
+
raise ValueError("--audio_batch_probability is set, but no audio-bearing batches were found in the training dataset.")
|
| 295 |
+
|
| 296 |
+
stats["audio_batches"] = len(audio_indices)
|
| 297 |
+
stats["non_audio_batches"] = len(non_audio_indices)
|
| 298 |
+
|
| 299 |
+
if min_audio_batches_per_accum > 0:
|
| 300 |
+
sampler = AudioQuotaIndexSampler(
|
| 301 |
+
audio_indices=audio_indices,
|
| 302 |
+
non_audio_indices=non_audio_indices,
|
| 303 |
+
accumulation_steps=int(gradient_accumulation_steps),
|
| 304 |
+
min_audio_batches_per_accum=min_audio_batches_per_accum,
|
| 305 |
+
seed=int(seed),
|
| 306 |
+
)
|
| 307 |
+
stats["min_audio_batches_per_accum"] = min_audio_batches_per_accum
|
| 308 |
+
stats["accumulation_steps"] = int(gradient_accumulation_steps)
|
| 309 |
+
return sampler, "quota", stats
|
| 310 |
+
|
| 311 |
+
sampler = AudioProbabilityIndexSampler(
|
| 312 |
+
audio_indices=audio_indices,
|
| 313 |
+
non_audio_indices=non_audio_indices,
|
| 314 |
+
audio_batch_probability=float(audio_batch_probability),
|
| 315 |
+
seed=int(seed),
|
| 316 |
+
)
|
| 317 |
+
stats["audio_batch_probability"] = float(audio_batch_probability)
|
| 318 |
+
return sampler, "probability", stats
|
src/musubi_tuner/dataset/config_utils.py
ADDED
|
@@ -0,0 +1,703 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import argparse
|
| 2 |
+
from dataclasses import (
|
| 3 |
+
asdict,
|
| 4 |
+
dataclass,
|
| 5 |
+
fields,
|
| 6 |
+
)
|
| 7 |
+
import functools
|
| 8 |
+
import random
|
| 9 |
+
from textwrap import dedent, indent
|
| 10 |
+
import json
|
| 11 |
+
from datetime import datetime, timezone
|
| 12 |
+
from pathlib import Path
|
| 13 |
+
|
| 14 |
+
from typing import List, Optional, Sequence, Tuple, Union, TYPE_CHECKING
|
| 15 |
+
|
| 16 |
+
if TYPE_CHECKING:
|
| 17 |
+
from multiprocessing.sharedctypes import Synchronized
|
| 18 |
+
|
| 19 |
+
SharedEpoch = Optional["Synchronized[int]"]
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
import toml
|
| 23 |
+
import voluptuous
|
| 24 |
+
from voluptuous import Any, ExactSequence, MultipleInvalid, Object, Optional as VOptional, Schema
|
| 25 |
+
|
| 26 |
+
from musubi_tuner.dataset.image_video_dataset import DatasetGroup, ImageDataset, VideoDataset, AudioDataset
|
| 27 |
+
|
| 28 |
+
import logging
|
| 29 |
+
|
| 30 |
+
logger = logging.getLogger(__name__)
|
| 31 |
+
logging.basicConfig(level=logging.INFO)
|
| 32 |
+
|
| 33 |
+
|
| 34 |
+
@dataclass
|
| 35 |
+
class BaseDatasetParams:
|
| 36 |
+
resolution: Tuple[int, int] = (960, 544)
|
| 37 |
+
enable_bucket: bool = False
|
| 38 |
+
bucket_no_upscale: bool = False
|
| 39 |
+
caption_extension: Optional[str] = None
|
| 40 |
+
batch_size: int = 1
|
| 41 |
+
num_repeats: int = 1
|
| 42 |
+
video_loss_weight: Optional[float] = None
|
| 43 |
+
audio_loss_weight: Optional[float] = None
|
| 44 |
+
cache_directory: Optional[str] = None
|
| 45 |
+
reference_cache_directory: Optional[str] = None
|
| 46 |
+
reference_audio_cache_directory: Optional[str] = None
|
| 47 |
+
separate_audio_buckets: bool = False
|
| 48 |
+
cache_only: bool = False
|
| 49 |
+
debug_dataset: bool = False
|
| 50 |
+
architecture: str = "no_default" # short style like "hv" or "wan"
|
| 51 |
+
|
| 52 |
+
|
| 53 |
+
@dataclass
|
| 54 |
+
class ImageDatasetParams(BaseDatasetParams):
|
| 55 |
+
image_directory: Optional[str] = None
|
| 56 |
+
image_jsonl_file: Optional[str] = None
|
| 57 |
+
control_directory: Optional[str] = None
|
| 58 |
+
multiple_target: Optional[bool] = False
|
| 59 |
+
|
| 60 |
+
# FramePack dependent parameters
|
| 61 |
+
fp_latent_window_size: Optional[int] = 9
|
| 62 |
+
fp_1f_clean_indices: Optional[Sequence[int]] = None
|
| 63 |
+
fp_1f_target_index: Optional[int] = None
|
| 64 |
+
fp_1f_no_post: Optional[bool] = False
|
| 65 |
+
|
| 66 |
+
no_resize_control: Optional[bool] = False # if True, control images are not resized to target resolution
|
| 67 |
+
control_resolution: Optional[Tuple[int, int]] = None # if set, control images are resized to this resolution
|
| 68 |
+
|
| 69 |
+
|
| 70 |
+
@dataclass
|
| 71 |
+
class VideoDatasetParams(BaseDatasetParams):
|
| 72 |
+
video_directory: Optional[str] = None
|
| 73 |
+
video_jsonl_file: Optional[str] = None
|
| 74 |
+
control_directory: Optional[str] = None
|
| 75 |
+
reference_directory: Optional[str] = None
|
| 76 |
+
reference_audio_directory: Optional[str] = None
|
| 77 |
+
target_frames: Sequence[int] = (1,)
|
| 78 |
+
frame_extraction: Optional[str] = "head"
|
| 79 |
+
frame_stride: Optional[int] = 1
|
| 80 |
+
frame_sample: Optional[int] = 1
|
| 81 |
+
max_frames: Optional[int] = 129
|
| 82 |
+
source_fps: Optional[float] = None
|
| 83 |
+
target_fps: Optional[float] = None
|
| 84 |
+
|
| 85 |
+
# FramePack dependent parameters
|
| 86 |
+
fp_latent_window_size: Optional[int] = 9
|
| 87 |
+
|
| 88 |
+
|
| 89 |
+
@dataclass
|
| 90 |
+
class AudioDatasetParams(BaseDatasetParams):
|
| 91 |
+
audio_directory: Optional[str] = None
|
| 92 |
+
audio_jsonl_file: Optional[str] = None
|
| 93 |
+
audio_bucket_strategy: str = "pad" # "pad" (default) or "truncate"
|
| 94 |
+
audio_bucket_interval: float = 2.0 # bucket step in seconds
|
| 95 |
+
|
| 96 |
+
|
| 97 |
+
@dataclass
|
| 98 |
+
class DatasetBlueprint:
|
| 99 |
+
dataset_type: str # "image", "video", "audio"
|
| 100 |
+
params: Union[ImageDatasetParams, VideoDatasetParams, AudioDatasetParams]
|
| 101 |
+
|
| 102 |
+
|
| 103 |
+
@dataclass
|
| 104 |
+
class DatasetGroupBlueprint:
|
| 105 |
+
datasets: Sequence[DatasetBlueprint]
|
| 106 |
+
|
| 107 |
+
|
| 108 |
+
@dataclass
|
| 109 |
+
class Blueprint:
|
| 110 |
+
dataset_group: DatasetGroupBlueprint
|
| 111 |
+
|
| 112 |
+
|
| 113 |
+
class ConfigSanitizer:
|
| 114 |
+
# @curry
|
| 115 |
+
@staticmethod
|
| 116 |
+
def __validate_and_convert_twodim(klass, value: Sequence) -> Tuple:
|
| 117 |
+
Schema(ExactSequence([klass, klass]))(value)
|
| 118 |
+
return tuple(value)
|
| 119 |
+
|
| 120 |
+
# @curry
|
| 121 |
+
@staticmethod
|
| 122 |
+
def __validate_and_convert_scalar_or_twodim(klass, value: Union[float, Sequence]) -> Tuple:
|
| 123 |
+
Schema(Any(klass, ExactSequence([klass, klass])))(value)
|
| 124 |
+
try:
|
| 125 |
+
Schema(klass)(value)
|
| 126 |
+
return (value, value)
|
| 127 |
+
except:
|
| 128 |
+
return ConfigSanitizer.__validate_and_convert_twodim(klass, value)
|
| 129 |
+
|
| 130 |
+
# datasets schema
|
| 131 |
+
DATASET_ASCENDABLE_SCHEMA = {
|
| 132 |
+
"caption_extension": str,
|
| 133 |
+
"batch_size": int,
|
| 134 |
+
"num_repeats": int,
|
| 135 |
+
"resolution": functools.partial(__validate_and_convert_scalar_or_twodim.__func__, int),
|
| 136 |
+
"enable_bucket": bool,
|
| 137 |
+
"bucket_no_upscale": bool,
|
| 138 |
+
"video_loss_weight": float,
|
| 139 |
+
"audio_loss_weight": float,
|
| 140 |
+
"cache_directory": str,
|
| 141 |
+
"reference_cache_directory": str,
|
| 142 |
+
"reference_audio_cache_directory": str,
|
| 143 |
+
"separate_audio_buckets": bool,
|
| 144 |
+
"cache_only": bool,
|
| 145 |
+
}
|
| 146 |
+
IMAGE_DATASET_DISTINCT_SCHEMA = {
|
| 147 |
+
"image_directory": str,
|
| 148 |
+
"image_jsonl_file": str,
|
| 149 |
+
"control_directory": str,
|
| 150 |
+
"multiple_target": bool,
|
| 151 |
+
"fp_latent_window_size": int,
|
| 152 |
+
"fp_1f_clean_indices": [int],
|
| 153 |
+
"fp_1f_target_index": int,
|
| 154 |
+
"fp_1f_no_post": bool,
|
| 155 |
+
"no_resize_control": bool,
|
| 156 |
+
"control_resolution": functools.partial(__validate_and_convert_scalar_or_twodim.__func__, int),
|
| 157 |
+
}
|
| 158 |
+
AUDIO_DATASET_DISTINCT_SCHEMA = {
|
| 159 |
+
"audio_directory": str,
|
| 160 |
+
"audio_jsonl_file": str,
|
| 161 |
+
"audio_bucket_strategy": str,
|
| 162 |
+
"audio_bucket_interval": float,
|
| 163 |
+
}
|
| 164 |
+
VIDEO_DATASET_DISTINCT_SCHEMA = {
|
| 165 |
+
"video_directory": str,
|
| 166 |
+
"video_jsonl_file": str,
|
| 167 |
+
"control_directory": str,
|
| 168 |
+
"reference_directory": str,
|
| 169 |
+
"reference_audio_directory": str,
|
| 170 |
+
"target_frames": [int],
|
| 171 |
+
"frame_extraction": str,
|
| 172 |
+
"frame_stride": int,
|
| 173 |
+
"frame_sample": int,
|
| 174 |
+
"max_frames": int,
|
| 175 |
+
"source_fps": float,
|
| 176 |
+
"target_fps": float,
|
| 177 |
+
"fp_latent_window_size": int,
|
| 178 |
+
}
|
| 179 |
+
|
| 180 |
+
# options handled by argparse but not handled by user config
|
| 181 |
+
ARGPARSE_SPECIFIC_SCHEMA = {
|
| 182 |
+
"debug_dataset": bool,
|
| 183 |
+
}
|
| 184 |
+
|
| 185 |
+
def __init__(self) -> None:
|
| 186 |
+
self.image_dataset_schema = self.__merge_dict(
|
| 187 |
+
self.DATASET_ASCENDABLE_SCHEMA,
|
| 188 |
+
self.IMAGE_DATASET_DISTINCT_SCHEMA,
|
| 189 |
+
)
|
| 190 |
+
self.audio_dataset_schema = self.__merge_dict(
|
| 191 |
+
self.DATASET_ASCENDABLE_SCHEMA,
|
| 192 |
+
self.AUDIO_DATASET_DISTINCT_SCHEMA,
|
| 193 |
+
)
|
| 194 |
+
self.video_dataset_schema = self.__merge_dict(
|
| 195 |
+
self.DATASET_ASCENDABLE_SCHEMA,
|
| 196 |
+
self.VIDEO_DATASET_DISTINCT_SCHEMA,
|
| 197 |
+
)
|
| 198 |
+
|
| 199 |
+
def validate_flex_dataset(dataset_config: dict):
|
| 200 |
+
if "audio_directory" in dataset_config or "audio_jsonl_file" in dataset_config:
|
| 201 |
+
return Schema(self.audio_dataset_schema)(dataset_config)
|
| 202 |
+
if "video_directory" in dataset_config or "video_jsonl_file" in dataset_config:
|
| 203 |
+
return Schema(self.video_dataset_schema)(dataset_config)
|
| 204 |
+
else:
|
| 205 |
+
return Schema(self.image_dataset_schema)(dataset_config)
|
| 206 |
+
|
| 207 |
+
self.dataset_schema = validate_flex_dataset
|
| 208 |
+
|
| 209 |
+
self.general_schema = self.__merge_dict(
|
| 210 |
+
self.DATASET_ASCENDABLE_SCHEMA,
|
| 211 |
+
)
|
| 212 |
+
self.user_config_validator = Schema(
|
| 213 |
+
{
|
| 214 |
+
"general": self.general_schema,
|
| 215 |
+
"datasets": [self.dataset_schema],
|
| 216 |
+
VOptional("validation_datasets"): [self.dataset_schema],
|
| 217 |
+
}
|
| 218 |
+
)
|
| 219 |
+
self.argparse_schema = self.__merge_dict(
|
| 220 |
+
self.ARGPARSE_SPECIFIC_SCHEMA,
|
| 221 |
+
)
|
| 222 |
+
self.argparse_config_validator = Schema(Object(self.argparse_schema), extra=voluptuous.ALLOW_EXTRA)
|
| 223 |
+
|
| 224 |
+
def sanitize_user_config(self, user_config: dict) -> dict:
|
| 225 |
+
try:
|
| 226 |
+
return self.user_config_validator(user_config)
|
| 227 |
+
except MultipleInvalid:
|
| 228 |
+
# TODO: clarify the error message
|
| 229 |
+
logger.error("Invalid user config / ユーザ設定の形式が正しくないようです")
|
| 230 |
+
raise
|
| 231 |
+
|
| 232 |
+
# NOTE: In nature, argument parser result is not needed to be sanitize
|
| 233 |
+
# However this will help us to detect program bug
|
| 234 |
+
def sanitize_argparse_namespace(self, argparse_namespace: argparse.Namespace) -> argparse.Namespace:
|
| 235 |
+
try:
|
| 236 |
+
return self.argparse_config_validator(argparse_namespace)
|
| 237 |
+
except MultipleInvalid:
|
| 238 |
+
# XXX: this should be a bug
|
| 239 |
+
logger.error(
|
| 240 |
+
"Invalid cmdline parsed arguments. This should be a bug. / コマンドラインのパース結果が正しくないようです。プログラムのバグの可能性が高いです。"
|
| 241 |
+
)
|
| 242 |
+
raise
|
| 243 |
+
|
| 244 |
+
# NOTE: value would be overwritten by latter dict if there is already the same key
|
| 245 |
+
@staticmethod
|
| 246 |
+
def __merge_dict(*dict_list: dict) -> dict:
|
| 247 |
+
merged = {}
|
| 248 |
+
for schema in dict_list:
|
| 249 |
+
# merged |= schema
|
| 250 |
+
for k, v in schema.items():
|
| 251 |
+
merged[k] = v
|
| 252 |
+
return merged
|
| 253 |
+
|
| 254 |
+
|
| 255 |
+
class BlueprintGenerator:
|
| 256 |
+
BLUEPRINT_PARAM_NAME_TO_CONFIG_OPTNAME = {}
|
| 257 |
+
|
| 258 |
+
def __init__(self, sanitizer: ConfigSanitizer):
|
| 259 |
+
self.sanitizer = sanitizer
|
| 260 |
+
|
| 261 |
+
# runtime_params is for parameters which is only configurable on runtime, such as tokenizer
|
| 262 |
+
def generate(self, user_config: dict, argparse_namespace: argparse.Namespace, **runtime_params) -> Blueprint:
|
| 263 |
+
sanitized_user_config = self.sanitizer.sanitize_user_config(user_config)
|
| 264 |
+
sanitized_argparse_namespace = self.sanitizer.sanitize_argparse_namespace(argparse_namespace)
|
| 265 |
+
|
| 266 |
+
argparse_config = {k: v for k, v in vars(sanitized_argparse_namespace).items() if v is not None}
|
| 267 |
+
general_config = sanitized_user_config.get("general", {})
|
| 268 |
+
|
| 269 |
+
dataset_blueprints = []
|
| 270 |
+
for dataset_config in sanitized_user_config.get("datasets", []):
|
| 271 |
+
is_audio_dataset = "audio_directory" in dataset_config or "audio_jsonl_file" in dataset_config
|
| 272 |
+
is_image_dataset = "image_directory" in dataset_config or "image_jsonl_file" in dataset_config
|
| 273 |
+
if is_audio_dataset:
|
| 274 |
+
dataset_params_klass = AudioDatasetParams
|
| 275 |
+
dataset_type = "audio"
|
| 276 |
+
elif is_image_dataset:
|
| 277 |
+
dataset_params_klass = ImageDatasetParams
|
| 278 |
+
dataset_type = "image"
|
| 279 |
+
else:
|
| 280 |
+
dataset_params_klass = VideoDatasetParams
|
| 281 |
+
dataset_type = "video"
|
| 282 |
+
|
| 283 |
+
params = self.generate_params_by_fallbacks(
|
| 284 |
+
dataset_params_klass, [dataset_config, general_config, argparse_config, runtime_params]
|
| 285 |
+
)
|
| 286 |
+
dataset_blueprints.append(DatasetBlueprint(dataset_type, params))
|
| 287 |
+
|
| 288 |
+
dataset_group_blueprint = DatasetGroupBlueprint(dataset_blueprints)
|
| 289 |
+
|
| 290 |
+
return Blueprint(dataset_group_blueprint)
|
| 291 |
+
|
| 292 |
+
@staticmethod
|
| 293 |
+
def generate_params_by_fallbacks(param_klass, fallbacks: Sequence[dict]):
|
| 294 |
+
name_map = BlueprintGenerator.BLUEPRINT_PARAM_NAME_TO_CONFIG_OPTNAME
|
| 295 |
+
search_value = BlueprintGenerator.search_value
|
| 296 |
+
default_params = asdict(param_klass())
|
| 297 |
+
param_names = default_params.keys()
|
| 298 |
+
|
| 299 |
+
params = {name: search_value(name_map.get(name, name), fallbacks, default_params.get(name)) for name in param_names}
|
| 300 |
+
|
| 301 |
+
return param_klass(**params)
|
| 302 |
+
|
| 303 |
+
@staticmethod
|
| 304 |
+
def search_value(key: str, fallbacks: Sequence[dict], default_value=None):
|
| 305 |
+
for cand in fallbacks:
|
| 306 |
+
value = cand.get(key)
|
| 307 |
+
if value is not None:
|
| 308 |
+
return value
|
| 309 |
+
|
| 310 |
+
return default_value
|
| 311 |
+
|
| 312 |
+
|
| 313 |
+
# if training is True, it will return a dataset group for training, otherwise for caching
|
| 314 |
+
def generate_dataset_group_by_blueprint(
|
| 315 |
+
dataset_group_blueprint: DatasetGroupBlueprint,
|
| 316 |
+
training: bool = False,
|
| 317 |
+
num_timestep_buckets: Optional[int] = None,
|
| 318 |
+
shared_epoch: SharedEpoch = None,
|
| 319 |
+
) -> DatasetGroup:
|
| 320 |
+
datasets: List[Union[ImageDataset, VideoDataset, AudioDataset]] = []
|
| 321 |
+
|
| 322 |
+
for dataset_blueprint in dataset_group_blueprint.datasets:
|
| 323 |
+
if dataset_blueprint.dataset_type == "audio":
|
| 324 |
+
dataset_klass = AudioDataset
|
| 325 |
+
elif dataset_blueprint.dataset_type == "image":
|
| 326 |
+
dataset_klass = ImageDataset
|
| 327 |
+
else:
|
| 328 |
+
dataset_klass = VideoDataset
|
| 329 |
+
|
| 330 |
+
dataset = dataset_klass(**asdict(dataset_blueprint.params))
|
| 331 |
+
datasets.append(dataset)
|
| 332 |
+
|
| 333 |
+
# assertion
|
| 334 |
+
cache_directories = [dataset.cache_directory for dataset in datasets]
|
| 335 |
+
num_of_unique_cache_directories = len(set(cache_directories))
|
| 336 |
+
if num_of_unique_cache_directories != len(cache_directories):
|
| 337 |
+
raise ValueError(
|
| 338 |
+
"cache directory should be unique for each dataset (note that cache directory is image/video directory if not specified)"
|
| 339 |
+
+ " / cache directory は各データセットごとに異なる必要があります(指定されていない場合はimage/video directoryが使われるので注意)"
|
| 340 |
+
)
|
| 341 |
+
|
| 342 |
+
# print info
|
| 343 |
+
info = ""
|
| 344 |
+
for i, dataset in enumerate(datasets):
|
| 345 |
+
is_image_dataset = isinstance(dataset, ImageDataset)
|
| 346 |
+
is_audio_dataset = isinstance(dataset, AudioDataset)
|
| 347 |
+
info += dedent(
|
| 348 |
+
f"""\
|
| 349 |
+
[Dataset {i}]
|
| 350 |
+
dataset_type: {"audio" if is_audio_dataset else "image" if is_image_dataset else "video"}
|
| 351 |
+
resolution: {dataset.resolution}
|
| 352 |
+
batch_size: {dataset.batch_size}
|
| 353 |
+
num_repeats: {dataset.num_repeats}
|
| 354 |
+
video_loss_weight: {getattr(dataset, "video_loss_weight", None)}
|
| 355 |
+
audio_loss_weight: {getattr(dataset, "audio_loss_weight", None)}
|
| 356 |
+
caption_extension: "{dataset.caption_extension}"
|
| 357 |
+
enable_bucket: {dataset.enable_bucket}
|
| 358 |
+
bucket_no_upscale: {dataset.bucket_no_upscale}
|
| 359 |
+
separate_audio_buckets: {getattr(dataset, "separate_audio_buckets", False)}
|
| 360 |
+
cache_only: {getattr(dataset, "cache_only", False)}
|
| 361 |
+
cache_directory: "{dataset.cache_directory}"
|
| 362 |
+
debug_dataset: {dataset.debug_dataset}
|
| 363 |
+
"""
|
| 364 |
+
)
|
| 365 |
+
|
| 366 |
+
if is_audio_dataset:
|
| 367 |
+
info += indent(
|
| 368 |
+
dedent(
|
| 369 |
+
f"""\
|
| 370 |
+
audio_directory: "{dataset.audio_directory}"
|
| 371 |
+
audio_jsonl_file: "{dataset.audio_jsonl_file}"
|
| 372 |
+
audio_bucket_strategy: {getattr(dataset, "audio_bucket_strategy", "pad")}
|
| 373 |
+
audio_bucket_interval: {getattr(dataset, "audio_bucket_interval", 2.0)}
|
| 374 |
+
\n"""
|
| 375 |
+
),
|
| 376 |
+
" ",
|
| 377 |
+
)
|
| 378 |
+
elif is_image_dataset:
|
| 379 |
+
info += indent(
|
| 380 |
+
dedent(
|
| 381 |
+
f"""\
|
| 382 |
+
image_directory: "{dataset.image_directory}"
|
| 383 |
+
image_jsonl_file: "{dataset.image_jsonl_file}"
|
| 384 |
+
control_directory: "{dataset.control_directory}"
|
| 385 |
+
multiple_target: {dataset.multiple_target}
|
| 386 |
+
fp_latent_window_size: {dataset.fp_latent_window_size}
|
| 387 |
+
fp_1f_clean_indices: {dataset.fp_1f_clean_indices}
|
| 388 |
+
fp_1f_target_index: {dataset.fp_1f_target_index}
|
| 389 |
+
fp_1f_no_post: {dataset.fp_1f_no_post}
|
| 390 |
+
no_resize_control: {dataset.no_resize_control}
|
| 391 |
+
control_resolution: {dataset.control_resolution}
|
| 392 |
+
\n"""
|
| 393 |
+
),
|
| 394 |
+
" ",
|
| 395 |
+
)
|
| 396 |
+
else:
|
| 397 |
+
info += indent(
|
| 398 |
+
dedent(
|
| 399 |
+
f"""\
|
| 400 |
+
video_directory: "{dataset.video_directory}"
|
| 401 |
+
video_jsonl_file: "{dataset.video_jsonl_file}"
|
| 402 |
+
control_directory: "{dataset.control_directory}"
|
| 403 |
+
reference_directory: "{getattr(dataset, 'reference_directory', None)}"
|
| 404 |
+
reference_audio_directory: "{getattr(dataset, 'reference_audio_directory', None)}"
|
| 405 |
+
reference_audio_cache_directory: "{getattr(dataset, 'reference_audio_cache_directory', None)}"
|
| 406 |
+
target_frames: {dataset.target_frames}
|
| 407 |
+
frame_extraction: {dataset.frame_extraction}
|
| 408 |
+
frame_stride: {dataset.frame_stride}
|
| 409 |
+
frame_sample: {dataset.frame_sample}
|
| 410 |
+
max_frames: {dataset.max_frames}
|
| 411 |
+
source_fps: {dataset.source_fps}
|
| 412 |
+
target_fps: {getattr(dataset, "target_fps", None)}
|
| 413 |
+
fp_latent_window_size: {dataset.fp_latent_window_size}
|
| 414 |
+
\n"""
|
| 415 |
+
),
|
| 416 |
+
" ",
|
| 417 |
+
)
|
| 418 |
+
logger.info(f"{info}")
|
| 419 |
+
|
| 420 |
+
# make buckets first because it determines the length of dataset
|
| 421 |
+
# and set the same seed for all datasets
|
| 422 |
+
seed = random.randint(0, 2**31) # actual seed is seed + epoch_no
|
| 423 |
+
for i, dataset in enumerate(datasets):
|
| 424 |
+
# logger.info(f"[Dataset {i}]")
|
| 425 |
+
dataset.set_seed(seed, shared_epoch)
|
| 426 |
+
if training:
|
| 427 |
+
dataset.prepare_for_training(num_timestep_buckets=num_timestep_buckets)
|
| 428 |
+
|
| 429 |
+
return DatasetGroup(datasets)
|
| 430 |
+
|
| 431 |
+
|
| 432 |
+
def _manifest_params_with_cache_only(dataset_type: str, params: dict) -> dict:
|
| 433 |
+
params = dict(params)
|
| 434 |
+
|
| 435 |
+
if not params.get("cache_directory"):
|
| 436 |
+
if dataset_type == "audio":
|
| 437 |
+
params["cache_directory"] = params.get("audio_directory")
|
| 438 |
+
elif dataset_type == "image":
|
| 439 |
+
params["cache_directory"] = params.get("image_directory")
|
| 440 |
+
else:
|
| 441 |
+
params["cache_directory"] = params.get("video_directory")
|
| 442 |
+
|
| 443 |
+
if not params.get("cache_directory"):
|
| 444 |
+
raise ValueError(
|
| 445 |
+
f"cache_directory is required to create a cache-only manifest for {dataset_type} datasets. "
|
| 446 |
+
"Set cache_directory in dataset config."
|
| 447 |
+
)
|
| 448 |
+
|
| 449 |
+
params["cache_only"] = True
|
| 450 |
+
|
| 451 |
+
# Strip source references to guarantee source-free training from manifest.
|
| 452 |
+
if dataset_type == "audio":
|
| 453 |
+
params["audio_directory"] = None
|
| 454 |
+
params["audio_jsonl_file"] = None
|
| 455 |
+
elif dataset_type == "image":
|
| 456 |
+
params["image_directory"] = None
|
| 457 |
+
params["image_jsonl_file"] = None
|
| 458 |
+
params["control_directory"] = None
|
| 459 |
+
params["multiple_target"] = False
|
| 460 |
+
else:
|
| 461 |
+
params["video_directory"] = None
|
| 462 |
+
params["video_jsonl_file"] = None
|
| 463 |
+
params["control_directory"] = None
|
| 464 |
+
params["reference_directory"] = None
|
| 465 |
+
params["reference_audio_directory"] = None
|
| 466 |
+
|
| 467 |
+
return params
|
| 468 |
+
|
| 469 |
+
|
| 470 |
+
def _blueprint_to_manifest_entries(dataset_group_blueprint: DatasetGroupBlueprint) -> list[dict]:
|
| 471 |
+
entries: list[dict] = []
|
| 472 |
+
for dataset_blueprint in dataset_group_blueprint.datasets:
|
| 473 |
+
params = _manifest_params_with_cache_only(dataset_blueprint.dataset_type, asdict(dataset_blueprint.params))
|
| 474 |
+
entries.append(
|
| 475 |
+
{
|
| 476 |
+
"dataset_type": dataset_blueprint.dataset_type,
|
| 477 |
+
"params": params,
|
| 478 |
+
}
|
| 479 |
+
)
|
| 480 |
+
return entries
|
| 481 |
+
|
| 482 |
+
|
| 483 |
+
def create_cache_only_dataset_manifest(
|
| 484 |
+
user_config: dict,
|
| 485 |
+
argparse_namespace: argparse.Namespace,
|
| 486 |
+
architecture: str,
|
| 487 |
+
source_dataset_config: Optional[Union[str, Path]] = None,
|
| 488 |
+
) -> dict:
|
| 489 |
+
blueprint_generator = BlueprintGenerator(ConfigSanitizer())
|
| 490 |
+
blueprint = blueprint_generator.generate(user_config, argparse_namespace, architecture=architecture)
|
| 491 |
+
|
| 492 |
+
manifest: dict = {
|
| 493 |
+
"format": "musubi_tuner_dataset_manifest",
|
| 494 |
+
"version": 1,
|
| 495 |
+
"architecture": architecture,
|
| 496 |
+
"generated_at": datetime.now(timezone.utc).isoformat(),
|
| 497 |
+
"datasets": _blueprint_to_manifest_entries(blueprint.dataset_group),
|
| 498 |
+
}
|
| 499 |
+
|
| 500 |
+
if source_dataset_config is not None:
|
| 501 |
+
manifest["source_dataset_config"] = str(source_dataset_config)
|
| 502 |
+
|
| 503 |
+
if user_config.get("validation_datasets"):
|
| 504 |
+
validation_user_config = {
|
| 505 |
+
"general": user_config.get("general", {}),
|
| 506 |
+
"datasets": user_config.get("validation_datasets", []),
|
| 507 |
+
}
|
| 508 |
+
validation_blueprint = blueprint_generator.generate(
|
| 509 |
+
validation_user_config,
|
| 510 |
+
argparse_namespace,
|
| 511 |
+
architecture=architecture,
|
| 512 |
+
)
|
| 513 |
+
manifest["validation_datasets"] = _blueprint_to_manifest_entries(validation_blueprint.dataset_group)
|
| 514 |
+
|
| 515 |
+
return manifest
|
| 516 |
+
|
| 517 |
+
|
| 518 |
+
def save_dataset_manifest(manifest: dict, manifest_path: Union[str, Path]) -> Path:
|
| 519 |
+
path = Path(manifest_path)
|
| 520 |
+
if path.parent and not path.parent.exists():
|
| 521 |
+
path.parent.mkdir(parents=True, exist_ok=True)
|
| 522 |
+
with open(path, "w", encoding="utf-8") as f:
|
| 523 |
+
json.dump(manifest, f, indent=2)
|
| 524 |
+
return path
|
| 525 |
+
|
| 526 |
+
|
| 527 |
+
def load_dataset_manifest(manifest_path: Union[str, Path]) -> dict:
|
| 528 |
+
path = Path(manifest_path)
|
| 529 |
+
if not path.is_file():
|
| 530 |
+
raise ValueError(f"dataset manifest not found: {path}")
|
| 531 |
+
|
| 532 |
+
try:
|
| 533 |
+
with open(path, "r", encoding="utf-8") as f:
|
| 534 |
+
manifest = json.load(f)
|
| 535 |
+
except Exception as e:
|
| 536 |
+
raise ValueError(f"failed to load dataset manifest: {path}") from e
|
| 537 |
+
|
| 538 |
+
if not isinstance(manifest, dict):
|
| 539 |
+
raise ValueError(f"invalid dataset manifest format: {path}")
|
| 540 |
+
if manifest.get("version") != 1:
|
| 541 |
+
raise ValueError(f"unsupported dataset manifest version: {manifest.get('version')}")
|
| 542 |
+
if not isinstance(manifest.get("datasets"), list):
|
| 543 |
+
raise ValueError(f"dataset manifest must contain a datasets array: {path}")
|
| 544 |
+
|
| 545 |
+
return manifest
|
| 546 |
+
|
| 547 |
+
|
| 548 |
+
def _normalize_manifest_params(params: dict) -> dict:
|
| 549 |
+
normalized = dict(params)
|
| 550 |
+
if isinstance(normalized.get("resolution"), list):
|
| 551 |
+
normalized["resolution"] = tuple(normalized["resolution"])
|
| 552 |
+
if isinstance(normalized.get("control_resolution"), list):
|
| 553 |
+
normalized["control_resolution"] = tuple(normalized["control_resolution"])
|
| 554 |
+
if isinstance(normalized.get("target_frames"), list):
|
| 555 |
+
normalized["target_frames"] = tuple(normalized["target_frames"])
|
| 556 |
+
return normalized
|
| 557 |
+
|
| 558 |
+
|
| 559 |
+
def _manifest_entries_to_blueprint(entries: Sequence[dict], default_architecture: Optional[str] = None) -> DatasetGroupBlueprint:
|
| 560 |
+
dataset_blueprints: list[DatasetBlueprint] = []
|
| 561 |
+
for i, entry in enumerate(entries):
|
| 562 |
+
dataset_type = entry.get("dataset_type")
|
| 563 |
+
params = entry.get("params")
|
| 564 |
+
if dataset_type not in {"audio", "image", "video"}:
|
| 565 |
+
raise ValueError(f"invalid dataset_type in manifest entry {i}: {dataset_type}")
|
| 566 |
+
if not isinstance(params, dict):
|
| 567 |
+
raise ValueError(f"manifest entry {i} has invalid params")
|
| 568 |
+
|
| 569 |
+
if dataset_type == "audio":
|
| 570 |
+
dataset_params_klass = AudioDatasetParams
|
| 571 |
+
elif dataset_type == "image":
|
| 572 |
+
dataset_params_klass = ImageDatasetParams
|
| 573 |
+
else:
|
| 574 |
+
dataset_params_klass = VideoDatasetParams
|
| 575 |
+
|
| 576 |
+
normalized_params = _normalize_manifest_params(params)
|
| 577 |
+
normalized_params["cache_only"] = True
|
| 578 |
+
if default_architecture and normalized_params.get("architecture") in {None, "no_default"}:
|
| 579 |
+
normalized_params["architecture"] = default_architecture
|
| 580 |
+
|
| 581 |
+
valid_fields = {f.name for f in fields(dataset_params_klass)}
|
| 582 |
+
filtered_params = {k: v for k, v in normalized_params.items() if k in valid_fields}
|
| 583 |
+
dataset_params = dataset_params_klass(**filtered_params)
|
| 584 |
+
dataset_blueprints.append(DatasetBlueprint(dataset_type, dataset_params))
|
| 585 |
+
|
| 586 |
+
return DatasetGroupBlueprint(dataset_blueprints)
|
| 587 |
+
|
| 588 |
+
|
| 589 |
+
def generate_dataset_group_by_manifest(
|
| 590 |
+
manifest: dict,
|
| 591 |
+
split: str = "train",
|
| 592 |
+
training: bool = False,
|
| 593 |
+
num_timestep_buckets: Optional[int] = None,
|
| 594 |
+
shared_epoch: SharedEpoch = None,
|
| 595 |
+
) -> Optional[DatasetGroup]:
|
| 596 |
+
if split not in {"train", "validation"}:
|
| 597 |
+
raise ValueError(f"invalid manifest split: {split}")
|
| 598 |
+
|
| 599 |
+
key = "datasets" if split == "train" else "validation_datasets"
|
| 600 |
+
entries = manifest.get(key, [])
|
| 601 |
+
if not entries:
|
| 602 |
+
return None
|
| 603 |
+
|
| 604 |
+
default_architecture = manifest.get("architecture")
|
| 605 |
+
dataset_group_blueprint = _manifest_entries_to_blueprint(entries, default_architecture=default_architecture)
|
| 606 |
+
return generate_dataset_group_by_blueprint(
|
| 607 |
+
dataset_group_blueprint,
|
| 608 |
+
training=training,
|
| 609 |
+
num_timestep_buckets=num_timestep_buckets,
|
| 610 |
+
shared_epoch=shared_epoch,
|
| 611 |
+
)
|
| 612 |
+
|
| 613 |
+
|
| 614 |
+
def load_user_config(file: str) -> dict:
|
| 615 |
+
file: Path = Path(file)
|
| 616 |
+
if not file.is_file():
|
| 617 |
+
raise ValueError(f"file not found / ファイルが見つかりません: {file}")
|
| 618 |
+
|
| 619 |
+
if file.name.lower().endswith(".json"):
|
| 620 |
+
try:
|
| 621 |
+
with open(file, "r", encoding="utf-8") as f:
|
| 622 |
+
config = json.load(f)
|
| 623 |
+
except Exception:
|
| 624 |
+
logger.error(
|
| 625 |
+
f"Error on parsing JSON config file. Please check the format. / JSON 形式の設定ファイルの読み込みに失敗しました。文法が正しいか確認してください。: {file}"
|
| 626 |
+
)
|
| 627 |
+
raise
|
| 628 |
+
elif file.name.lower().endswith(".toml"):
|
| 629 |
+
try:
|
| 630 |
+
config = toml.load(file)
|
| 631 |
+
except Exception:
|
| 632 |
+
logger.error(
|
| 633 |
+
f"Error on parsing TOML config file. Please check the format. / TOML 形式の設定ファイルの読み込みに失敗しました。文法が正しいか確認してください。: {file}"
|
| 634 |
+
)
|
| 635 |
+
raise
|
| 636 |
+
else:
|
| 637 |
+
raise ValueError(f"not supported config file format / 対応していない設定ファイルの形式です: {file}")
|
| 638 |
+
|
| 639 |
+
deprecated_key_map = {
|
| 640 |
+
"flux_kontext_no_resize_control": "no_resize_control",
|
| 641 |
+
"qwen_image_edit_no_resize_control": "no_resize_control",
|
| 642 |
+
"qwen_image_edit_control_resolution": "control_resolution",
|
| 643 |
+
}
|
| 644 |
+
|
| 645 |
+
def normalize_deprecated_keys(section: dict, section_name: str) -> None:
|
| 646 |
+
for old_key, new_key in deprecated_key_map.items():
|
| 647 |
+
if old_key not in section:
|
| 648 |
+
continue
|
| 649 |
+
if new_key in section:
|
| 650 |
+
logger.warning(
|
| 651 |
+
f"Deprecated config key '{old_key}' is ignored because '{new_key}' is already set in {section_name}."
|
| 652 |
+
)
|
| 653 |
+
else:
|
| 654 |
+
section[new_key] = section[old_key]
|
| 655 |
+
logger.warning(f"Deprecated config key '{old_key}' found in {section_name}; use '{new_key}' instead.")
|
| 656 |
+
del section[old_key]
|
| 657 |
+
|
| 658 |
+
general_config = config.get("general")
|
| 659 |
+
if isinstance(general_config, dict):
|
| 660 |
+
normalize_deprecated_keys(general_config, "general")
|
| 661 |
+
|
| 662 |
+
datasets_config = config.get("datasets", [])
|
| 663 |
+
if isinstance(datasets_config, list):
|
| 664 |
+
for idx, dataset_config in enumerate(datasets_config):
|
| 665 |
+
if isinstance(dataset_config, dict):
|
| 666 |
+
normalize_deprecated_keys(dataset_config, f"datasets[{idx}]")
|
| 667 |
+
|
| 668 |
+
return config
|
| 669 |
+
|
| 670 |
+
|
| 671 |
+
# for config test
|
| 672 |
+
if __name__ == "__main__":
|
| 673 |
+
parser = argparse.ArgumentParser()
|
| 674 |
+
parser.add_argument("dataset_config")
|
| 675 |
+
config_args, remain = parser.parse_known_args()
|
| 676 |
+
|
| 677 |
+
parser = argparse.ArgumentParser()
|
| 678 |
+
parser.add_argument("--debug_dataset", action="store_true")
|
| 679 |
+
argparse_namespace = parser.parse_args(remain)
|
| 680 |
+
|
| 681 |
+
logger.info("[argparse_namespace]")
|
| 682 |
+
logger.info(f"{vars(argparse_namespace)}")
|
| 683 |
+
|
| 684 |
+
user_config = load_user_config(config_args.dataset_config)
|
| 685 |
+
|
| 686 |
+
logger.info("")
|
| 687 |
+
logger.info("[user_config]")
|
| 688 |
+
logger.info(f"{user_config}")
|
| 689 |
+
|
| 690 |
+
sanitizer = ConfigSanitizer()
|
| 691 |
+
sanitized_user_config = sanitizer.sanitize_user_config(user_config)
|
| 692 |
+
|
| 693 |
+
logger.info("")
|
| 694 |
+
logger.info("[sanitized_user_config]")
|
| 695 |
+
logger.info(f"{sanitized_user_config}")
|
| 696 |
+
|
| 697 |
+
blueprint = BlueprintGenerator(sanitizer).generate(user_config, argparse_namespace)
|
| 698 |
+
|
| 699 |
+
logger.info("")
|
| 700 |
+
logger.info("[blueprint]")
|
| 701 |
+
logger.info(f"{blueprint}")
|
| 702 |
+
|
| 703 |
+
dataset_group = generate_dataset_group_by_blueprint(blueprint.dataset_group)
|
src/musubi_tuner/dataset/dataset_config.md
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
This document has been moved to [`docs/dataset_config.md`](../../../docs/dataset_config.md).
|
| 2 |
+
|
| 3 |
+
このドキュメントは[`docs/dataset_config.md`](../../../docs/dataset_config.md)に移動しました。
|
src/musubi_tuner/dataset/image_video_dataset.py
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
src/musubi_tuner/flux/flux_models.py
ADDED
|
@@ -0,0 +1,1065 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# copy from FLUX repo: https://github.com/black-forest-labs/flux
|
| 2 |
+
# license: Apache-2.0 License
|
| 3 |
+
|
| 4 |
+
|
| 5 |
+
import math
|
| 6 |
+
import os
|
| 7 |
+
import time
|
| 8 |
+
from concurrent.futures import Future, ThreadPoolExecutor
|
| 9 |
+
from dataclasses import dataclass
|
| 10 |
+
from typing import Dict, List, Optional, Union
|
| 11 |
+
|
| 12 |
+
import torch
|
| 13 |
+
from einops import rearrange
|
| 14 |
+
from torch import Tensor, nn
|
| 15 |
+
from torch.utils.checkpoint import checkpoint
|
| 16 |
+
|
| 17 |
+
from musubi_tuner.modules.custom_offloading_utils import ModelOffloader
|
| 18 |
+
from musubi_tuner.hunyuan_model.attention import attention as hunyuan_attention
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
import logging
|
| 22 |
+
|
| 23 |
+
from musubi_tuner.utils.model_utils import create_cpu_offloading_wrapper
|
| 24 |
+
|
| 25 |
+
logger = logging.getLogger(__name__)
|
| 26 |
+
logging.basicConfig(level=logging.INFO)
|
| 27 |
+
|
| 28 |
+
|
| 29 |
+
# USE_REENTRANT = True
|
| 30 |
+
|
| 31 |
+
|
| 32 |
+
@dataclass
|
| 33 |
+
class FluxParams:
|
| 34 |
+
in_channels: int
|
| 35 |
+
vec_in_dim: int
|
| 36 |
+
context_in_dim: int
|
| 37 |
+
hidden_size: int
|
| 38 |
+
mlp_ratio: float
|
| 39 |
+
num_heads: int
|
| 40 |
+
depth: int
|
| 41 |
+
depth_single_blocks: int
|
| 42 |
+
axes_dim: list[int]
|
| 43 |
+
theta: int
|
| 44 |
+
qkv_bias: bool
|
| 45 |
+
guidance_embed: bool
|
| 46 |
+
|
| 47 |
+
|
| 48 |
+
# region autoencoder
|
| 49 |
+
|
| 50 |
+
|
| 51 |
+
@dataclass
|
| 52 |
+
class AutoEncoderParams:
|
| 53 |
+
resolution: int
|
| 54 |
+
in_channels: int
|
| 55 |
+
ch: int
|
| 56 |
+
out_ch: int
|
| 57 |
+
ch_mult: list[int]
|
| 58 |
+
num_res_blocks: int
|
| 59 |
+
z_channels: int
|
| 60 |
+
scale_factor: float
|
| 61 |
+
shift_factor: float
|
| 62 |
+
|
| 63 |
+
|
| 64 |
+
def swish(x: Tensor) -> Tensor:
|
| 65 |
+
return x * torch.sigmoid(x)
|
| 66 |
+
|
| 67 |
+
|
| 68 |
+
class AttnBlock(nn.Module):
|
| 69 |
+
def __init__(self, in_channels: int):
|
| 70 |
+
super().__init__()
|
| 71 |
+
self.in_channels = in_channels
|
| 72 |
+
|
| 73 |
+
self.norm = nn.GroupNorm(num_groups=32, num_channels=in_channels, eps=1e-6, affine=True)
|
| 74 |
+
|
| 75 |
+
self.q = nn.Conv2d(in_channels, in_channels, kernel_size=1)
|
| 76 |
+
self.k = nn.Conv2d(in_channels, in_channels, kernel_size=1)
|
| 77 |
+
self.v = nn.Conv2d(in_channels, in_channels, kernel_size=1)
|
| 78 |
+
self.proj_out = nn.Conv2d(in_channels, in_channels, kernel_size=1)
|
| 79 |
+
|
| 80 |
+
def attention(self, h_: Tensor) -> Tensor:
|
| 81 |
+
h_ = self.norm(h_)
|
| 82 |
+
q = self.q(h_)
|
| 83 |
+
k = self.k(h_)
|
| 84 |
+
v = self.v(h_)
|
| 85 |
+
|
| 86 |
+
b, c, h, w = q.shape
|
| 87 |
+
q = rearrange(q, "b c h w -> b 1 (h w) c").contiguous()
|
| 88 |
+
k = rearrange(k, "b c h w -> b 1 (h w) c").contiguous()
|
| 89 |
+
v = rearrange(v, "b c h w -> b 1 (h w) c").contiguous()
|
| 90 |
+
h_ = nn.functional.scaled_dot_product_attention(q, k, v)
|
| 91 |
+
|
| 92 |
+
return rearrange(h_, "b 1 (h w) c -> b c h w", h=h, w=w, c=c, b=b)
|
| 93 |
+
|
| 94 |
+
def forward(self, x: Tensor) -> Tensor:
|
| 95 |
+
return x + self.proj_out(self.attention(x))
|
| 96 |
+
|
| 97 |
+
|
| 98 |
+
class ResnetBlock(nn.Module):
|
| 99 |
+
def __init__(self, in_channels: int, out_channels: int):
|
| 100 |
+
super().__init__()
|
| 101 |
+
self.in_channels = in_channels
|
| 102 |
+
out_channels = in_channels if out_channels is None else out_channels
|
| 103 |
+
self.out_channels = out_channels
|
| 104 |
+
|
| 105 |
+
self.norm1 = nn.GroupNorm(num_groups=32, num_channels=in_channels, eps=1e-6, affine=True)
|
| 106 |
+
self.conv1 = nn.Conv2d(in_channels, out_channels, kernel_size=3, stride=1, padding=1)
|
| 107 |
+
self.norm2 = nn.GroupNorm(num_groups=32, num_channels=out_channels, eps=1e-6, affine=True)
|
| 108 |
+
self.conv2 = nn.Conv2d(out_channels, out_channels, kernel_size=3, stride=1, padding=1)
|
| 109 |
+
if self.in_channels != self.out_channels:
|
| 110 |
+
self.nin_shortcut = nn.Conv2d(in_channels, out_channels, kernel_size=1, stride=1, padding=0)
|
| 111 |
+
|
| 112 |
+
def forward(self, x):
|
| 113 |
+
h = x
|
| 114 |
+
h = self.norm1(h)
|
| 115 |
+
h = swish(h)
|
| 116 |
+
h = self.conv1(h)
|
| 117 |
+
|
| 118 |
+
h = self.norm2(h)
|
| 119 |
+
h = swish(h)
|
| 120 |
+
h = self.conv2(h)
|
| 121 |
+
|
| 122 |
+
if self.in_channels != self.out_channels:
|
| 123 |
+
x = self.nin_shortcut(x)
|
| 124 |
+
|
| 125 |
+
return x + h
|
| 126 |
+
|
| 127 |
+
|
| 128 |
+
class Downsample(nn.Module):
|
| 129 |
+
def __init__(self, in_channels: int):
|
| 130 |
+
super().__init__()
|
| 131 |
+
# no asymmetric padding in torch conv, must do it ourselves
|
| 132 |
+
self.conv = nn.Conv2d(in_channels, in_channels, kernel_size=3, stride=2, padding=0)
|
| 133 |
+
|
| 134 |
+
def forward(self, x: Tensor):
|
| 135 |
+
pad = (0, 1, 0, 1)
|
| 136 |
+
x = nn.functional.pad(x, pad, mode="constant", value=0)
|
| 137 |
+
x = self.conv(x)
|
| 138 |
+
return x
|
| 139 |
+
|
| 140 |
+
|
| 141 |
+
class Upsample(nn.Module):
|
| 142 |
+
def __init__(self, in_channels: int):
|
| 143 |
+
super().__init__()
|
| 144 |
+
self.conv = nn.Conv2d(in_channels, in_channels, kernel_size=3, stride=1, padding=1)
|
| 145 |
+
|
| 146 |
+
def forward(self, x: Tensor):
|
| 147 |
+
x = nn.functional.interpolate(x, scale_factor=2.0, mode="nearest")
|
| 148 |
+
x = self.conv(x)
|
| 149 |
+
return x
|
| 150 |
+
|
| 151 |
+
|
| 152 |
+
class Encoder(nn.Module):
|
| 153 |
+
def __init__(
|
| 154 |
+
self,
|
| 155 |
+
resolution: int,
|
| 156 |
+
in_channels: int,
|
| 157 |
+
ch: int,
|
| 158 |
+
ch_mult: list[int],
|
| 159 |
+
num_res_blocks: int,
|
| 160 |
+
z_channels: int,
|
| 161 |
+
):
|
| 162 |
+
super().__init__()
|
| 163 |
+
self.ch = ch
|
| 164 |
+
self.num_resolutions = len(ch_mult)
|
| 165 |
+
self.num_res_blocks = num_res_blocks
|
| 166 |
+
self.resolution = resolution
|
| 167 |
+
self.in_channels = in_channels
|
| 168 |
+
# downsampling
|
| 169 |
+
self.conv_in = nn.Conv2d(in_channels, self.ch, kernel_size=3, stride=1, padding=1)
|
| 170 |
+
|
| 171 |
+
curr_res = resolution
|
| 172 |
+
in_ch_mult = (1,) + tuple(ch_mult)
|
| 173 |
+
self.in_ch_mult = in_ch_mult
|
| 174 |
+
self.down = nn.ModuleList()
|
| 175 |
+
block_in = self.ch
|
| 176 |
+
for i_level in range(self.num_resolutions):
|
| 177 |
+
block = nn.ModuleList()
|
| 178 |
+
attn = nn.ModuleList()
|
| 179 |
+
block_in = ch * in_ch_mult[i_level]
|
| 180 |
+
block_out = ch * ch_mult[i_level]
|
| 181 |
+
for _ in range(self.num_res_blocks):
|
| 182 |
+
block.append(ResnetBlock(in_channels=block_in, out_channels=block_out))
|
| 183 |
+
block_in = block_out
|
| 184 |
+
down = nn.Module()
|
| 185 |
+
down.block = block
|
| 186 |
+
down.attn = attn
|
| 187 |
+
if i_level != self.num_resolutions - 1:
|
| 188 |
+
down.downsample = Downsample(block_in)
|
| 189 |
+
curr_res = curr_res // 2
|
| 190 |
+
self.down.append(down)
|
| 191 |
+
|
| 192 |
+
# middle
|
| 193 |
+
self.mid = nn.Module()
|
| 194 |
+
self.mid.block_1 = ResnetBlock(in_channels=block_in, out_channels=block_in)
|
| 195 |
+
self.mid.attn_1 = AttnBlock(block_in)
|
| 196 |
+
self.mid.block_2 = ResnetBlock(in_channels=block_in, out_channels=block_in)
|
| 197 |
+
|
| 198 |
+
# end
|
| 199 |
+
self.norm_out = nn.GroupNorm(num_groups=32, num_channels=block_in, eps=1e-6, affine=True)
|
| 200 |
+
self.conv_out = nn.Conv2d(block_in, 2 * z_channels, kernel_size=3, stride=1, padding=1)
|
| 201 |
+
|
| 202 |
+
def forward(self, x: Tensor) -> Tensor:
|
| 203 |
+
# downsampling
|
| 204 |
+
hs = [self.conv_in(x)]
|
| 205 |
+
for i_level in range(self.num_resolutions):
|
| 206 |
+
for i_block in range(self.num_res_blocks):
|
| 207 |
+
h = self.down[i_level].block[i_block](hs[-1])
|
| 208 |
+
if len(self.down[i_level].attn) > 0:
|
| 209 |
+
h = self.down[i_level].attn[i_block](h)
|
| 210 |
+
hs.append(h)
|
| 211 |
+
if i_level != self.num_resolutions - 1:
|
| 212 |
+
hs.append(self.down[i_level].downsample(hs[-1]))
|
| 213 |
+
|
| 214 |
+
# middle
|
| 215 |
+
h = hs[-1]
|
| 216 |
+
h = self.mid.block_1(h)
|
| 217 |
+
h = self.mid.attn_1(h)
|
| 218 |
+
h = self.mid.block_2(h)
|
| 219 |
+
# end
|
| 220 |
+
h = self.norm_out(h)
|
| 221 |
+
h = swish(h)
|
| 222 |
+
h = self.conv_out(h)
|
| 223 |
+
return h
|
| 224 |
+
|
| 225 |
+
|
| 226 |
+
class Decoder(nn.Module):
|
| 227 |
+
def __init__(
|
| 228 |
+
self,
|
| 229 |
+
ch: int,
|
| 230 |
+
out_ch: int,
|
| 231 |
+
ch_mult: list[int],
|
| 232 |
+
num_res_blocks: int,
|
| 233 |
+
in_channels: int,
|
| 234 |
+
resolution: int,
|
| 235 |
+
z_channels: int,
|
| 236 |
+
):
|
| 237 |
+
super().__init__()
|
| 238 |
+
self.ch = ch
|
| 239 |
+
self.num_resolutions = len(ch_mult)
|
| 240 |
+
self.num_res_blocks = num_res_blocks
|
| 241 |
+
self.resolution = resolution
|
| 242 |
+
self.in_channels = in_channels
|
| 243 |
+
self.ffactor = 2 ** (self.num_resolutions - 1)
|
| 244 |
+
|
| 245 |
+
# compute in_ch_mult, block_in and curr_res at lowest res
|
| 246 |
+
block_in = ch * ch_mult[self.num_resolutions - 1]
|
| 247 |
+
curr_res = resolution // 2 ** (self.num_resolutions - 1)
|
| 248 |
+
self.z_shape = (1, z_channels, curr_res, curr_res)
|
| 249 |
+
|
| 250 |
+
# z to block_in
|
| 251 |
+
self.conv_in = nn.Conv2d(z_channels, block_in, kernel_size=3, stride=1, padding=1)
|
| 252 |
+
|
| 253 |
+
# middle
|
| 254 |
+
self.mid = nn.Module()
|
| 255 |
+
self.mid.block_1 = ResnetBlock(in_channels=block_in, out_channels=block_in)
|
| 256 |
+
self.mid.attn_1 = AttnBlock(block_in)
|
| 257 |
+
self.mid.block_2 = ResnetBlock(in_channels=block_in, out_channels=block_in)
|
| 258 |
+
|
| 259 |
+
# upsampling
|
| 260 |
+
self.up = nn.ModuleList()
|
| 261 |
+
for i_level in reversed(range(self.num_resolutions)):
|
| 262 |
+
block = nn.ModuleList()
|
| 263 |
+
attn = nn.ModuleList()
|
| 264 |
+
block_out = ch * ch_mult[i_level]
|
| 265 |
+
for _ in range(self.num_res_blocks + 1):
|
| 266 |
+
block.append(ResnetBlock(in_channels=block_in, out_channels=block_out))
|
| 267 |
+
block_in = block_out
|
| 268 |
+
up = nn.Module()
|
| 269 |
+
up.block = block
|
| 270 |
+
up.attn = attn
|
| 271 |
+
if i_level != 0:
|
| 272 |
+
up.upsample = Upsample(block_in)
|
| 273 |
+
curr_res = curr_res * 2
|
| 274 |
+
self.up.insert(0, up) # prepend to get consistent order
|
| 275 |
+
|
| 276 |
+
# end
|
| 277 |
+
self.norm_out = nn.GroupNorm(num_groups=32, num_channels=block_in, eps=1e-6, affine=True)
|
| 278 |
+
self.conv_out = nn.Conv2d(block_in, out_ch, kernel_size=3, stride=1, padding=1)
|
| 279 |
+
|
| 280 |
+
def forward(self, z: Tensor) -> Tensor:
|
| 281 |
+
# z to block_in
|
| 282 |
+
h = self.conv_in(z)
|
| 283 |
+
|
| 284 |
+
# middle
|
| 285 |
+
h = self.mid.block_1(h)
|
| 286 |
+
h = self.mid.attn_1(h)
|
| 287 |
+
h = self.mid.block_2(h)
|
| 288 |
+
|
| 289 |
+
# upsampling
|
| 290 |
+
for i_level in reversed(range(self.num_resolutions)):
|
| 291 |
+
for i_block in range(self.num_res_blocks + 1):
|
| 292 |
+
h = self.up[i_level].block[i_block](h)
|
| 293 |
+
if len(self.up[i_level].attn) > 0:
|
| 294 |
+
h = self.up[i_level].attn[i_block](h)
|
| 295 |
+
if i_level != 0:
|
| 296 |
+
h = self.up[i_level].upsample(h)
|
| 297 |
+
|
| 298 |
+
# end
|
| 299 |
+
h = self.norm_out(h)
|
| 300 |
+
h = swish(h)
|
| 301 |
+
h = self.conv_out(h)
|
| 302 |
+
return h
|
| 303 |
+
|
| 304 |
+
|
| 305 |
+
class DiagonalGaussian(nn.Module):
|
| 306 |
+
def __init__(self, sample: bool = True, chunk_dim: int = 1):
|
| 307 |
+
super().__init__()
|
| 308 |
+
self.sample = sample
|
| 309 |
+
self.chunk_dim = chunk_dim
|
| 310 |
+
|
| 311 |
+
def forward(self, z: Tensor) -> Tensor:
|
| 312 |
+
mean, logvar = torch.chunk(z, 2, dim=self.chunk_dim)
|
| 313 |
+
if self.sample:
|
| 314 |
+
std = torch.exp(0.5 * logvar)
|
| 315 |
+
return mean + std * torch.randn_like(mean)
|
| 316 |
+
else:
|
| 317 |
+
return mean
|
| 318 |
+
|
| 319 |
+
|
| 320 |
+
class AutoEncoder(nn.Module):
|
| 321 |
+
def __init__(self, params: AutoEncoderParams):
|
| 322 |
+
super().__init__()
|
| 323 |
+
self.encoder = Encoder(
|
| 324 |
+
resolution=params.resolution,
|
| 325 |
+
in_channels=params.in_channels,
|
| 326 |
+
ch=params.ch,
|
| 327 |
+
ch_mult=params.ch_mult,
|
| 328 |
+
num_res_blocks=params.num_res_blocks,
|
| 329 |
+
z_channels=params.z_channels,
|
| 330 |
+
)
|
| 331 |
+
self.decoder = Decoder(
|
| 332 |
+
resolution=params.resolution,
|
| 333 |
+
in_channels=params.in_channels,
|
| 334 |
+
ch=params.ch,
|
| 335 |
+
out_ch=params.out_ch,
|
| 336 |
+
ch_mult=params.ch_mult,
|
| 337 |
+
num_res_blocks=params.num_res_blocks,
|
| 338 |
+
z_channels=params.z_channels,
|
| 339 |
+
)
|
| 340 |
+
self.reg = DiagonalGaussian()
|
| 341 |
+
|
| 342 |
+
self.scale_factor = params.scale_factor
|
| 343 |
+
self.shift_factor = params.shift_factor
|
| 344 |
+
|
| 345 |
+
@property
|
| 346 |
+
def device(self) -> torch.device:
|
| 347 |
+
return next(self.parameters()).device
|
| 348 |
+
|
| 349 |
+
@property
|
| 350 |
+
def dtype(self) -> torch.dtype:
|
| 351 |
+
return next(self.parameters()).dtype
|
| 352 |
+
|
| 353 |
+
def encode(self, x: Tensor) -> Tensor:
|
| 354 |
+
z = self.reg(self.encoder(x))
|
| 355 |
+
z = self.scale_factor * (z - self.shift_factor)
|
| 356 |
+
return z
|
| 357 |
+
|
| 358 |
+
def decode(self, z: Tensor) -> Tensor:
|
| 359 |
+
z = z / self.scale_factor + self.shift_factor
|
| 360 |
+
return self.decoder(z)
|
| 361 |
+
|
| 362 |
+
def forward(self, x: Tensor) -> Tensor:
|
| 363 |
+
return self.decode(self.encode(x))
|
| 364 |
+
|
| 365 |
+
|
| 366 |
+
# endregion
|
| 367 |
+
# region config
|
| 368 |
+
|
| 369 |
+
|
| 370 |
+
@dataclass
|
| 371 |
+
class ModelSpec:
|
| 372 |
+
params: FluxParams
|
| 373 |
+
ae_params: AutoEncoderParams
|
| 374 |
+
# ckpt_path: str | None
|
| 375 |
+
# ae_path: str | None
|
| 376 |
+
# repo_id: str | None
|
| 377 |
+
# repo_flow: str | None
|
| 378 |
+
# repo_ae: str | None
|
| 379 |
+
|
| 380 |
+
|
| 381 |
+
configs_flux_dev_context = ModelSpec(
|
| 382 |
+
# repo_id="black-forest-labs/FLUX.1-Kontext-dev",
|
| 383 |
+
# repo_flow="flux1-kontext-dev.safetensors",
|
| 384 |
+
# repo_ae="ae.safetensors",
|
| 385 |
+
params=FluxParams(
|
| 386 |
+
in_channels=64,
|
| 387 |
+
# out_channels=64,
|
| 388 |
+
vec_in_dim=768,
|
| 389 |
+
context_in_dim=4096,
|
| 390 |
+
hidden_size=3072,
|
| 391 |
+
mlp_ratio=4.0,
|
| 392 |
+
num_heads=24,
|
| 393 |
+
depth=19,
|
| 394 |
+
depth_single_blocks=38,
|
| 395 |
+
axes_dim=[16, 56, 56],
|
| 396 |
+
theta=10_000,
|
| 397 |
+
qkv_bias=True,
|
| 398 |
+
guidance_embed=True,
|
| 399 |
+
),
|
| 400 |
+
ae_params=AutoEncoderParams(
|
| 401 |
+
resolution=256,
|
| 402 |
+
in_channels=3,
|
| 403 |
+
ch=128,
|
| 404 |
+
out_ch=3,
|
| 405 |
+
ch_mult=[1, 2, 4, 4],
|
| 406 |
+
num_res_blocks=2,
|
| 407 |
+
z_channels=16,
|
| 408 |
+
scale_factor=0.3611,
|
| 409 |
+
shift_factor=0.1159,
|
| 410 |
+
),
|
| 411 |
+
)
|
| 412 |
+
|
| 413 |
+
T5XXL_MAX_LENGTH = 512
|
| 414 |
+
|
| 415 |
+
# endregion
|
| 416 |
+
|
| 417 |
+
# region math
|
| 418 |
+
|
| 419 |
+
|
| 420 |
+
def attention(
|
| 421 |
+
q: Tensor,
|
| 422 |
+
k: Tensor,
|
| 423 |
+
v: Tensor,
|
| 424 |
+
pe: Tensor,
|
| 425 |
+
attn_mask: Optional[Tensor] = None,
|
| 426 |
+
attn_mode: str = "torch",
|
| 427 |
+
split_attn: bool = False,
|
| 428 |
+
control_lengths: Optional[list[int]] = None,
|
| 429 |
+
) -> Tensor:
|
| 430 |
+
assert attn_mask is None, "attn_mask is not supported in flux attention"
|
| 431 |
+
|
| 432 |
+
q, k = apply_rope(q, k, pe)
|
| 433 |
+
|
| 434 |
+
# x = torch.nn.functional.scaled_dot_product_attention(q, k, v, attn_mask=attn_mask)
|
| 435 |
+
# x = rearrange(x, "B H L D -> B L (H D)")
|
| 436 |
+
|
| 437 |
+
if control_lengths is not None:
|
| 438 |
+
max_control_length = max(control_lengths)
|
| 439 |
+
min_control_length = min(control_lengths)
|
| 440 |
+
else:
|
| 441 |
+
max_control_length = 0
|
| 442 |
+
min_control_length = 0
|
| 443 |
+
|
| 444 |
+
if split_attn or max_control_length != min_control_length:
|
| 445 |
+
if control_lengths is None or max_control_length == min_control_length:
|
| 446 |
+
# normal split attention, no trimming
|
| 447 |
+
total_len = torch.tensor([q.shape[-2]] * q.shape[0], dtype=torch.long) # (sequence length, sequence length ...)
|
| 448 |
+
else:
|
| 449 |
+
# split attention with different control lengths, trim to each control length
|
| 450 |
+
max_control_length = max(control_lengths)
|
| 451 |
+
total_len = torch.tensor([q.shape[-2] - max_control_length + cl for cl in control_lengths], dtype=torch.long)
|
| 452 |
+
# print(f"Max control length: {max_control_length}, control lengths: {control_lengths}, total_len: {total_len}")
|
| 453 |
+
else:
|
| 454 |
+
# inference time or same length for all controls
|
| 455 |
+
total_len = None
|
| 456 |
+
|
| 457 |
+
q = q.transpose(1, 2) # B, H, L, D -> B, L, H, D
|
| 458 |
+
k = k.transpose(1, 2) # B, H, L, D -> B, L, H, D
|
| 459 |
+
v = v.transpose(1, 2) # B, H, L, D -> B, L, H, D
|
| 460 |
+
x = hunyuan_attention([q, k, v], mode=attn_mode, total_len=total_len) # B, L, D
|
| 461 |
+
|
| 462 |
+
return x
|
| 463 |
+
|
| 464 |
+
|
| 465 |
+
def rope(pos: Tensor, dim: int, theta: int) -> Tensor:
|
| 466 |
+
assert dim % 2 == 0
|
| 467 |
+
scale = torch.arange(0, dim, 2, dtype=torch.float64, device=pos.device) / dim
|
| 468 |
+
omega = 1.0 / (theta**scale)
|
| 469 |
+
out = torch.einsum("...n,d->...nd", pos, omega)
|
| 470 |
+
out = torch.stack([torch.cos(out), -torch.sin(out), torch.sin(out), torch.cos(out)], dim=-1)
|
| 471 |
+
out = rearrange(out, "b n d (i j) -> b n d i j", i=2, j=2)
|
| 472 |
+
return out.float()
|
| 473 |
+
|
| 474 |
+
|
| 475 |
+
def apply_rope(xq: Tensor, xk: Tensor, freqs_cis: Tensor) -> tuple[Tensor, Tensor]:
|
| 476 |
+
xq_ = xq.float().reshape(*xq.shape[:-1], -1, 1, 2)
|
| 477 |
+
xk_ = xk.float().reshape(*xk.shape[:-1], -1, 1, 2)
|
| 478 |
+
xq_out = freqs_cis[..., 0] * xq_[..., 0] + freqs_cis[..., 1] * xq_[..., 1]
|
| 479 |
+
xk_out = freqs_cis[..., 0] * xk_[..., 0] + freqs_cis[..., 1] * xk_[..., 1]
|
| 480 |
+
return xq_out.reshape(*xq.shape).type_as(xq), xk_out.reshape(*xk.shape).type_as(xk)
|
| 481 |
+
|
| 482 |
+
|
| 483 |
+
# endregion
|
| 484 |
+
|
| 485 |
+
|
| 486 |
+
# region layers
|
| 487 |
+
|
| 488 |
+
|
| 489 |
+
class EmbedND(nn.Module):
|
| 490 |
+
def __init__(self, dim: int, theta: int, axes_dim: list[int]):
|
| 491 |
+
super().__init__()
|
| 492 |
+
self.dim = dim
|
| 493 |
+
self.theta = theta
|
| 494 |
+
self.axes_dim = axes_dim
|
| 495 |
+
|
| 496 |
+
def forward(self, ids: Tensor) -> Tensor:
|
| 497 |
+
n_axes = ids.shape[-1]
|
| 498 |
+
emb = torch.cat(
|
| 499 |
+
[rope(ids[..., i], self.axes_dim[i], self.theta) for i in range(n_axes)],
|
| 500 |
+
dim=-3,
|
| 501 |
+
)
|
| 502 |
+
|
| 503 |
+
return emb.unsqueeze(1)
|
| 504 |
+
|
| 505 |
+
|
| 506 |
+
def timestep_embedding(t: Tensor, dim, max_period=10000, time_factor: float = 1000.0):
|
| 507 |
+
"""
|
| 508 |
+
Create sinusoidal timestep embeddings.
|
| 509 |
+
:param t: a 1-D Tensor of N indices, one per batch element.
|
| 510 |
+
These may be fractional.
|
| 511 |
+
:param dim: the dimension of the output.
|
| 512 |
+
:param max_period: controls the minimum frequency of the embeddings.
|
| 513 |
+
:return: an (N, D) Tensor of positional embeddings.
|
| 514 |
+
"""
|
| 515 |
+
t = time_factor * t
|
| 516 |
+
half = dim // 2
|
| 517 |
+
freqs = torch.exp(-math.log(max_period) * torch.arange(start=0, end=half, dtype=torch.float32) / half).to(t.device)
|
| 518 |
+
|
| 519 |
+
args = t[:, None].float() * freqs[None]
|
| 520 |
+
embedding = torch.cat([torch.cos(args), torch.sin(args)], dim=-1)
|
| 521 |
+
if dim % 2:
|
| 522 |
+
embedding = torch.cat([embedding, torch.zeros_like(embedding[:, :1])], dim=-1)
|
| 523 |
+
if torch.is_floating_point(t):
|
| 524 |
+
embedding = embedding.to(t)
|
| 525 |
+
return embedding
|
| 526 |
+
|
| 527 |
+
|
| 528 |
+
class MLPEmbedder(nn.Module):
|
| 529 |
+
def __init__(self, in_dim: int, hidden_dim: int):
|
| 530 |
+
super().__init__()
|
| 531 |
+
self.in_layer = nn.Linear(in_dim, hidden_dim, bias=True)
|
| 532 |
+
self.silu = nn.SiLU()
|
| 533 |
+
self.out_layer = nn.Linear(hidden_dim, hidden_dim, bias=True)
|
| 534 |
+
|
| 535 |
+
self.gradient_checkpointing = False
|
| 536 |
+
|
| 537 |
+
def enable_gradient_checkpointing(self):
|
| 538 |
+
self.gradient_checkpointing = True
|
| 539 |
+
|
| 540 |
+
def disable_gradient_checkpointing(self):
|
| 541 |
+
self.gradient_checkpointing = False
|
| 542 |
+
|
| 543 |
+
def _forward(self, x: Tensor) -> Tensor:
|
| 544 |
+
return self.out_layer(self.silu(self.in_layer(x)))
|
| 545 |
+
|
| 546 |
+
def forward(self, *args, **kwargs):
|
| 547 |
+
if self.training and self.gradient_checkpointing:
|
| 548 |
+
return checkpoint(self._forward, *args, use_reentrant=False, **kwargs)
|
| 549 |
+
else:
|
| 550 |
+
return self._forward(*args, **kwargs)
|
| 551 |
+
|
| 552 |
+
# def forward(self, x):
|
| 553 |
+
# if self.training and self.gradient_checkpointing:
|
| 554 |
+
# def create_custom_forward(func):
|
| 555 |
+
# def custom_forward(*inputs):
|
| 556 |
+
# return func(*inputs)
|
| 557 |
+
# return custom_forward
|
| 558 |
+
# return torch.utils.checkpoint.checkpoint(create_custom_forward(self._forward), x, use_reentrant=USE_REENTRANT)
|
| 559 |
+
# else:
|
| 560 |
+
# return self._forward(x)
|
| 561 |
+
|
| 562 |
+
|
| 563 |
+
class RMSNorm(torch.nn.Module):
|
| 564 |
+
def __init__(self, dim: int):
|
| 565 |
+
super().__init__()
|
| 566 |
+
self.scale = nn.Parameter(torch.ones(dim))
|
| 567 |
+
|
| 568 |
+
def forward(self, x: Tensor):
|
| 569 |
+
x_dtype = x.dtype
|
| 570 |
+
x = x.float()
|
| 571 |
+
rrms = torch.rsqrt(torch.mean(x**2, dim=-1, keepdim=True) + 1e-6)
|
| 572 |
+
# return (x * rrms).to(dtype=x_dtype) * self.scale
|
| 573 |
+
return ((x * rrms) * self.scale.float()).to(dtype=x_dtype)
|
| 574 |
+
|
| 575 |
+
|
| 576 |
+
class QKNorm(torch.nn.Module):
|
| 577 |
+
def __init__(self, dim: int):
|
| 578 |
+
super().__init__()
|
| 579 |
+
self.query_norm = RMSNorm(dim)
|
| 580 |
+
self.key_norm = RMSNorm(dim)
|
| 581 |
+
|
| 582 |
+
def forward(self, q: Tensor, k: Tensor, v: Tensor) -> tuple[Tensor, Tensor]:
|
| 583 |
+
q = self.query_norm(q)
|
| 584 |
+
k = self.key_norm(k)
|
| 585 |
+
return q.to(v), k.to(v)
|
| 586 |
+
|
| 587 |
+
|
| 588 |
+
class SelfAttention(nn.Module):
|
| 589 |
+
def __init__(self, dim: int, num_heads: int = 8, qkv_bias: bool = False):
|
| 590 |
+
super().__init__()
|
| 591 |
+
self.num_heads = num_heads
|
| 592 |
+
head_dim = dim // num_heads
|
| 593 |
+
|
| 594 |
+
self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
|
| 595 |
+
self.norm = QKNorm(head_dim)
|
| 596 |
+
self.proj = nn.Linear(dim, dim)
|
| 597 |
+
|
| 598 |
+
# this is not called from DoubleStreamBlock/SingleStreamBlock because they uses attention function directly
|
| 599 |
+
def forward(self, x: Tensor, pe: Tensor) -> Tensor:
|
| 600 |
+
qkv = self.qkv(x)
|
| 601 |
+
q, k, v = rearrange(qkv, "B L (K H D) -> K B H L D", K=3, H=self.num_heads)
|
| 602 |
+
q, k = self.norm(q, k, v)
|
| 603 |
+
x = attention(q, k, v, pe=pe)
|
| 604 |
+
x = self.proj(x)
|
| 605 |
+
return x
|
| 606 |
+
|
| 607 |
+
|
| 608 |
+
@dataclass
|
| 609 |
+
class ModulationOut:
|
| 610 |
+
shift: Tensor
|
| 611 |
+
scale: Tensor
|
| 612 |
+
gate: Tensor
|
| 613 |
+
|
| 614 |
+
|
| 615 |
+
class Modulation(nn.Module):
|
| 616 |
+
def __init__(self, dim: int, double: bool):
|
| 617 |
+
super().__init__()
|
| 618 |
+
self.is_double = double
|
| 619 |
+
self.multiplier = 6 if double else 3
|
| 620 |
+
self.lin = nn.Linear(dim, self.multiplier * dim, bias=True)
|
| 621 |
+
|
| 622 |
+
def forward(self, vec: Tensor) -> tuple[ModulationOut, ModulationOut | None]:
|
| 623 |
+
out = self.lin(nn.functional.silu(vec))[:, None, :].chunk(self.multiplier, dim=-1)
|
| 624 |
+
|
| 625 |
+
return (
|
| 626 |
+
ModulationOut(*out[:3]),
|
| 627 |
+
ModulationOut(*out[3:]) if self.is_double else None,
|
| 628 |
+
)
|
| 629 |
+
|
| 630 |
+
|
| 631 |
+
class DoubleStreamBlock(nn.Module):
|
| 632 |
+
def __init__(
|
| 633 |
+
self,
|
| 634 |
+
hidden_size: int,
|
| 635 |
+
num_heads: int,
|
| 636 |
+
mlp_ratio: float,
|
| 637 |
+
qkv_bias: bool = False,
|
| 638 |
+
attn_mode: str = "torch",
|
| 639 |
+
split_attn: bool = False,
|
| 640 |
+
):
|
| 641 |
+
super().__init__()
|
| 642 |
+
|
| 643 |
+
mlp_hidden_dim = int(hidden_size * mlp_ratio)
|
| 644 |
+
self.num_heads = num_heads
|
| 645 |
+
self.hidden_size = hidden_size
|
| 646 |
+
self.attn_mode = attn_mode
|
| 647 |
+
self.split_attn = split_attn
|
| 648 |
+
|
| 649 |
+
self.img_mod = Modulation(hidden_size, double=True)
|
| 650 |
+
self.img_norm1 = nn.LayerNorm(hidden_size, elementwise_affine=False, eps=1e-6)
|
| 651 |
+
self.img_attn = SelfAttention(dim=hidden_size, num_heads=num_heads, qkv_bias=qkv_bias)
|
| 652 |
+
|
| 653 |
+
self.img_norm2 = nn.LayerNorm(hidden_size, elementwise_affine=False, eps=1e-6)
|
| 654 |
+
self.img_mlp = nn.Sequential(
|
| 655 |
+
nn.Linear(hidden_size, mlp_hidden_dim, bias=True),
|
| 656 |
+
nn.GELU(approximate="tanh"),
|
| 657 |
+
nn.Linear(mlp_hidden_dim, hidden_size, bias=True),
|
| 658 |
+
)
|
| 659 |
+
|
| 660 |
+
self.txt_mod = Modulation(hidden_size, double=True)
|
| 661 |
+
self.txt_norm1 = nn.LayerNorm(hidden_size, elementwise_affine=False, eps=1e-6)
|
| 662 |
+
self.txt_attn = SelfAttention(dim=hidden_size, num_heads=num_heads, qkv_bias=qkv_bias)
|
| 663 |
+
|
| 664 |
+
self.txt_norm2 = nn.LayerNorm(hidden_size, elementwise_affine=False, eps=1e-6)
|
| 665 |
+
self.txt_mlp = nn.Sequential(
|
| 666 |
+
nn.Linear(hidden_size, mlp_hidden_dim, bias=True),
|
| 667 |
+
nn.GELU(approximate="tanh"),
|
| 668 |
+
nn.Linear(mlp_hidden_dim, hidden_size, bias=True),
|
| 669 |
+
)
|
| 670 |
+
|
| 671 |
+
self.gradient_checkpointing = False
|
| 672 |
+
self.activation_cpu_offloading = False
|
| 673 |
+
|
| 674 |
+
def enable_gradient_checkpointing(self, activation_cpu_offloading: bool = False):
|
| 675 |
+
self.gradient_checkpointing = True
|
| 676 |
+
self.activation_cpu_offloading = activation_cpu_offloading
|
| 677 |
+
|
| 678 |
+
def disable_gradient_checkpointing(self):
|
| 679 |
+
self.gradient_checkpointing = False
|
| 680 |
+
self.activation_cpu_offloading = False
|
| 681 |
+
|
| 682 |
+
def _forward(
|
| 683 |
+
self, img: Tensor, txt: Tensor, vec: Tensor, pe: Tensor, control_lengths: Optional[list[int]] = None
|
| 684 |
+
) -> tuple[Tensor, Tensor]:
|
| 685 |
+
img_mod1, img_mod2 = self.img_mod(vec)
|
| 686 |
+
txt_mod1, txt_mod2 = self.txt_mod(vec)
|
| 687 |
+
|
| 688 |
+
# prepare image for attention
|
| 689 |
+
img_modulated = self.img_norm1(img)
|
| 690 |
+
img_modulated = (1 + img_mod1.scale) * img_modulated + img_mod1.shift
|
| 691 |
+
img_qkv = self.img_attn.qkv(img_modulated)
|
| 692 |
+
img_q, img_k, img_v = rearrange(img_qkv, "B L (K H D) -> K B H L D", K=3, H=self.num_heads)
|
| 693 |
+
img_q, img_k = self.img_attn.norm(img_q, img_k, img_v)
|
| 694 |
+
|
| 695 |
+
# prepare txt for attention
|
| 696 |
+
txt_modulated = self.txt_norm1(txt)
|
| 697 |
+
txt_modulated = (1 + txt_mod1.scale) * txt_modulated + txt_mod1.shift
|
| 698 |
+
txt_qkv = self.txt_attn.qkv(txt_modulated)
|
| 699 |
+
txt_q, txt_k, txt_v = rearrange(txt_qkv, "B L (K H D) -> K B H L D", K=3, H=self.num_heads)
|
| 700 |
+
txt_q, txt_k = self.txt_attn.norm(txt_q, txt_k, txt_v)
|
| 701 |
+
|
| 702 |
+
# run actual attention
|
| 703 |
+
q = torch.cat((txt_q, img_q), dim=2)
|
| 704 |
+
k = torch.cat((txt_k, img_k), dim=2)
|
| 705 |
+
v = torch.cat((txt_v, img_v), dim=2)
|
| 706 |
+
|
| 707 |
+
# attention mask is not supported in flux
|
| 708 |
+
attn = attention(q, k, v, pe=pe, attn_mode=self.attn_mode, split_attn=self.split_attn, control_lengths=control_lengths)
|
| 709 |
+
txt_attn, img_attn = attn[:, : txt.shape[1]], attn[:, txt.shape[1] :]
|
| 710 |
+
|
| 711 |
+
# calculate the img blocks
|
| 712 |
+
img = torch.addcmul(img, img_mod1.gate, self.img_attn.proj(img_attn))
|
| 713 |
+
img = torch.addcmul(img, img_mod2.gate, self.img_mlp((1 + img_mod2.scale) * self.img_norm2(img) + img_mod2.shift))
|
| 714 |
+
# calculate the txt blocks
|
| 715 |
+
txt = torch.addcmul(txt, txt_mod1.gate, self.txt_attn.proj(txt_attn))
|
| 716 |
+
txt = torch.addcmul(txt, txt_mod2.gate, self.txt_mlp((1 + txt_mod2.scale) * self.txt_norm2(txt) + txt_mod2.shift))
|
| 717 |
+
return img, txt
|
| 718 |
+
|
| 719 |
+
def forward(
|
| 720 |
+
self, img: Tensor, txt: Tensor, vec: Tensor, pe: Tensor, control_lengths: Optional[list[int]] = None
|
| 721 |
+
) -> tuple[Tensor, Tensor]:
|
| 722 |
+
if self.training and self.gradient_checkpointing:
|
| 723 |
+
forward_fn = self._forward
|
| 724 |
+
if self.activation_cpu_offloading:
|
| 725 |
+
forward_fn = create_cpu_offloading_wrapper(forward_fn, self.img_mlp[0].weight.device)
|
| 726 |
+
return checkpoint(forward_fn, img, txt, vec, pe, control_lengths, use_reentrant=False)
|
| 727 |
+
else:
|
| 728 |
+
return self._forward(img, txt, vec, pe, control_lengths)
|
| 729 |
+
|
| 730 |
+
|
| 731 |
+
class SingleStreamBlock(nn.Module):
|
| 732 |
+
"""
|
| 733 |
+
A DiT block with parallel linear layers as described in
|
| 734 |
+
https://arxiv.org/abs/2302.05442 and adapted modulation interface.
|
| 735 |
+
"""
|
| 736 |
+
|
| 737 |
+
def __init__(
|
| 738 |
+
self,
|
| 739 |
+
hidden_size: int,
|
| 740 |
+
num_heads: int,
|
| 741 |
+
mlp_ratio: float = 4.0,
|
| 742 |
+
qk_scale: float | None = None,
|
| 743 |
+
attn_mode: str = "torch",
|
| 744 |
+
split_attn: bool = False,
|
| 745 |
+
):
|
| 746 |
+
super().__init__()
|
| 747 |
+
self.hidden_dim = hidden_size
|
| 748 |
+
self.num_heads = num_heads
|
| 749 |
+
head_dim = hidden_size // num_heads
|
| 750 |
+
self.scale = qk_scale or head_dim**-0.5
|
| 751 |
+
self.attn_mode = attn_mode
|
| 752 |
+
self.split_attn = split_attn
|
| 753 |
+
|
| 754 |
+
self.mlp_hidden_dim = int(hidden_size * mlp_ratio)
|
| 755 |
+
# qkv and mlp_in
|
| 756 |
+
self.linear1 = nn.Linear(hidden_size, hidden_size * 3 + self.mlp_hidden_dim)
|
| 757 |
+
# proj and mlp_out
|
| 758 |
+
self.linear2 = nn.Linear(hidden_size + self.mlp_hidden_dim, hidden_size)
|
| 759 |
+
|
| 760 |
+
self.norm = QKNorm(head_dim)
|
| 761 |
+
|
| 762 |
+
self.hidden_size = hidden_size
|
| 763 |
+
self.pre_norm = nn.LayerNorm(hidden_size, elementwise_affine=False, eps=1e-6)
|
| 764 |
+
|
| 765 |
+
self.mlp_act = nn.GELU(approximate="tanh")
|
| 766 |
+
self.modulation = Modulation(hidden_size, double=False)
|
| 767 |
+
|
| 768 |
+
self.gradient_checkpointing = False
|
| 769 |
+
self.activation_cpu_offloading = False
|
| 770 |
+
|
| 771 |
+
def enable_gradient_checkpointing(self, activation_cpu_offloading: bool = False):
|
| 772 |
+
self.gradient_checkpointing = True
|
| 773 |
+
self.activation_cpu_offloading = activation_cpu_offloading
|
| 774 |
+
|
| 775 |
+
def disable_gradient_checkpointing(self):
|
| 776 |
+
self.gradient_checkpointing = False
|
| 777 |
+
self.activation_cpu_offloading = False
|
| 778 |
+
|
| 779 |
+
def _forward(self, x: Tensor, vec: Tensor, pe: Tensor, control_lengths: Optional[list[int]] = None) -> Tensor:
|
| 780 |
+
mod, _ = self.modulation(vec)
|
| 781 |
+
x_mod = (1 + mod.scale) * self.pre_norm(x) + mod.shift
|
| 782 |
+
qkv, mlp = torch.split(self.linear1(x_mod), [3 * self.hidden_size, self.mlp_hidden_dim], dim=-1)
|
| 783 |
+
|
| 784 |
+
q, k, v = rearrange(qkv, "B L (K H D) -> K B H L D", K=3, H=self.num_heads)
|
| 785 |
+
q, k = self.norm(q, k, v)
|
| 786 |
+
|
| 787 |
+
# compute attention
|
| 788 |
+
attn = attention(q, k, v, pe=pe, attn_mode=self.attn_mode, split_attn=self.split_attn, control_lengths=control_lengths)
|
| 789 |
+
|
| 790 |
+
# compute activation in mlp stream, cat again and run second linear layer
|
| 791 |
+
output = self.linear2(torch.cat((attn, self.mlp_act(mlp)), 2))
|
| 792 |
+
return torch.addcmul(x, mod.gate, output)
|
| 793 |
+
|
| 794 |
+
def forward(self, x: Tensor, vec: Tensor, pe: Tensor, control_lengths: Optional[list[int]] = None) -> Tensor:
|
| 795 |
+
if self.training and self.gradient_checkpointing:
|
| 796 |
+
forward_fn = self._forward
|
| 797 |
+
if self.activation_cpu_offloading:
|
| 798 |
+
forward_fn = create_cpu_offloading_wrapper(forward_fn, self.linear1.weight.device)
|
| 799 |
+
return checkpoint(forward_fn, x, vec, pe, control_lengths, use_reentrant=False)
|
| 800 |
+
else:
|
| 801 |
+
return self._forward(x, vec, pe, control_lengths)
|
| 802 |
+
|
| 803 |
+
|
| 804 |
+
class LastLayer(nn.Module):
|
| 805 |
+
def __init__(self, hidden_size: int, patch_size: int, out_channels: int):
|
| 806 |
+
super().__init__()
|
| 807 |
+
self.norm_final = nn.LayerNorm(hidden_size, elementwise_affine=False, eps=1e-6)
|
| 808 |
+
self.linear = nn.Linear(hidden_size, patch_size * patch_size * out_channels, bias=True)
|
| 809 |
+
self.adaLN_modulation = nn.Sequential(nn.SiLU(), nn.Linear(hidden_size, 2 * hidden_size, bias=True))
|
| 810 |
+
|
| 811 |
+
def forward(self, x: Tensor, vec: Tensor) -> Tensor:
|
| 812 |
+
shift, scale = self.adaLN_modulation(vec).chunk(2, dim=1)
|
| 813 |
+
x = (1 + scale[:, None, :]) * self.norm_final(x) + shift[:, None, :]
|
| 814 |
+
x = self.linear(x)
|
| 815 |
+
return x
|
| 816 |
+
|
| 817 |
+
|
| 818 |
+
# endregion
|
| 819 |
+
|
| 820 |
+
|
| 821 |
+
class Flux(nn.Module):
|
| 822 |
+
"""
|
| 823 |
+
Transformer model for flow matching on sequences.
|
| 824 |
+
"""
|
| 825 |
+
|
| 826 |
+
def __init__(self, params: FluxParams, attn_mode: str = "flash", split_attn: bool = False) -> None:
|
| 827 |
+
super().__init__()
|
| 828 |
+
|
| 829 |
+
self.params = params
|
| 830 |
+
self.in_channels = params.in_channels
|
| 831 |
+
self.out_channels = self.in_channels
|
| 832 |
+
if params.hidden_size % params.num_heads != 0:
|
| 833 |
+
raise ValueError(f"Hidden size {params.hidden_size} must be divisible by num_heads {params.num_heads}")
|
| 834 |
+
pe_dim = params.hidden_size // params.num_heads
|
| 835 |
+
if sum(params.axes_dim) != pe_dim:
|
| 836 |
+
raise ValueError(f"Got {params.axes_dim} but expected positional dim {pe_dim}")
|
| 837 |
+
self.hidden_size = params.hidden_size
|
| 838 |
+
self.num_heads = params.num_heads
|
| 839 |
+
|
| 840 |
+
self.attn_mode = attn_mode
|
| 841 |
+
self.split_attn = split_attn
|
| 842 |
+
|
| 843 |
+
self.pe_embedder = EmbedND(dim=pe_dim, theta=params.theta, axes_dim=params.axes_dim)
|
| 844 |
+
self.img_in = nn.Linear(self.in_channels, self.hidden_size, bias=True)
|
| 845 |
+
self.time_in = MLPEmbedder(in_dim=256, hidden_dim=self.hidden_size)
|
| 846 |
+
self.vector_in = MLPEmbedder(params.vec_in_dim, self.hidden_size)
|
| 847 |
+
self.guidance_in = MLPEmbedder(in_dim=256, hidden_dim=self.hidden_size) if params.guidance_embed else nn.Identity()
|
| 848 |
+
self.txt_in = nn.Linear(params.context_in_dim, self.hidden_size)
|
| 849 |
+
|
| 850 |
+
self.double_blocks = nn.ModuleList(
|
| 851 |
+
[
|
| 852 |
+
DoubleStreamBlock(
|
| 853 |
+
self.hidden_size,
|
| 854 |
+
self.num_heads,
|
| 855 |
+
mlp_ratio=params.mlp_ratio,
|
| 856 |
+
qkv_bias=params.qkv_bias,
|
| 857 |
+
attn_mode=self.attn_mode,
|
| 858 |
+
split_attn=self.split_attn,
|
| 859 |
+
)
|
| 860 |
+
for _ in range(params.depth)
|
| 861 |
+
]
|
| 862 |
+
)
|
| 863 |
+
|
| 864 |
+
self.single_blocks = nn.ModuleList(
|
| 865 |
+
[
|
| 866 |
+
SingleStreamBlock(
|
| 867 |
+
self.hidden_size,
|
| 868 |
+
self.num_heads,
|
| 869 |
+
mlp_ratio=params.mlp_ratio,
|
| 870 |
+
attn_mode=self.attn_mode,
|
| 871 |
+
split_attn=self.split_attn,
|
| 872 |
+
)
|
| 873 |
+
for _ in range(params.depth_single_blocks)
|
| 874 |
+
]
|
| 875 |
+
)
|
| 876 |
+
|
| 877 |
+
self.final_layer = LastLayer(self.hidden_size, 1, self.out_channels)
|
| 878 |
+
|
| 879 |
+
self.gradient_checkpointing = False
|
| 880 |
+
self.activation_cpu_offloading = False
|
| 881 |
+
self.blocks_to_swap = None
|
| 882 |
+
|
| 883 |
+
self.offloader_double = None
|
| 884 |
+
self.offloader_single = None
|
| 885 |
+
self.num_double_blocks = len(self.double_blocks)
|
| 886 |
+
self.num_single_blocks = len(self.single_blocks)
|
| 887 |
+
|
| 888 |
+
def get_model_type(self) -> str:
|
| 889 |
+
return "flux"
|
| 890 |
+
|
| 891 |
+
@property
|
| 892 |
+
def device(self):
|
| 893 |
+
return next(self.parameters()).device
|
| 894 |
+
|
| 895 |
+
@property
|
| 896 |
+
def dtype(self):
|
| 897 |
+
return next(self.parameters()).dtype
|
| 898 |
+
|
| 899 |
+
def fp8_optimization(
|
| 900 |
+
self, state_dict: dict[str, torch.Tensor], device: torch.device, move_to_device: bool, use_scaled_mm: bool = False
|
| 901 |
+
) -> int:
|
| 902 |
+
"""
|
| 903 |
+
Optimize the model state_dict with fp8.
|
| 904 |
+
|
| 905 |
+
Args:
|
| 906 |
+
state_dict (dict[str, torch.Tensor]):
|
| 907 |
+
The state_dict of the model.
|
| 908 |
+
device (torch.device):
|
| 909 |
+
The device to calculate the weight.
|
| 910 |
+
move_to_device (bool):
|
| 911 |
+
Whether to move the weight to the device after optimization.
|
| 912 |
+
"""
|
| 913 |
+
TARGET_KEYS = ["single_blocks", "double_blocks"]
|
| 914 |
+
EXCLUDE_KEYS = [
|
| 915 |
+
"norm",
|
| 916 |
+
"mod",
|
| 917 |
+
]
|
| 918 |
+
|
| 919 |
+
from musubi_tuner.modules.fp8_optimization_utils import apply_fp8_monkey_patch, optimize_state_dict_with_fp8
|
| 920 |
+
|
| 921 |
+
# inplace optimization
|
| 922 |
+
state_dict = optimize_state_dict_with_fp8(state_dict, device, TARGET_KEYS, EXCLUDE_KEYS, move_to_device=move_to_device)
|
| 923 |
+
|
| 924 |
+
# apply monkey patching
|
| 925 |
+
apply_fp8_monkey_patch(self, state_dict, use_scaled_mm=use_scaled_mm)
|
| 926 |
+
|
| 927 |
+
return state_dict
|
| 928 |
+
|
| 929 |
+
def enable_gradient_checkpointing(self, activation_cpu_offloading: bool = False):
|
| 930 |
+
self.gradient_checkpointing = True
|
| 931 |
+
self.activation_cpu_offloading = activation_cpu_offloading
|
| 932 |
+
|
| 933 |
+
self.time_in.enable_gradient_checkpointing()
|
| 934 |
+
self.vector_in.enable_gradient_checkpointing()
|
| 935 |
+
if self.guidance_in.__class__ != nn.Identity:
|
| 936 |
+
self.guidance_in.enable_gradient_checkpointing()
|
| 937 |
+
|
| 938 |
+
for block in self.double_blocks + self.single_blocks:
|
| 939 |
+
block.enable_gradient_checkpointing(activation_cpu_offloading)
|
| 940 |
+
|
| 941 |
+
print(f"FLUX: Gradient checkpointing enabled. Activation CPU offloading: {activation_cpu_offloading}")
|
| 942 |
+
|
| 943 |
+
def disable_gradient_checkpointing(self):
|
| 944 |
+
self.gradient_checkpointing = False
|
| 945 |
+
|
| 946 |
+
self.time_in.disable_gradient_checkpointing()
|
| 947 |
+
self.vector_in.disable_gradient_checkpointing()
|
| 948 |
+
if self.guidance_in.__class__ != nn.Identity:
|
| 949 |
+
self.guidance_in.disable_gradient_checkpointing()
|
| 950 |
+
|
| 951 |
+
for block in self.double_blocks + self.single_blocks:
|
| 952 |
+
block.disable_gradient_checkpointing()
|
| 953 |
+
|
| 954 |
+
print("FLUX: Gradient checkpointing disabled.")
|
| 955 |
+
|
| 956 |
+
def enable_block_swap(self, num_blocks: int, device: torch.device, supports_backward: bool, use_pinned_memory: bool = False):
|
| 957 |
+
self.blocks_to_swap = num_blocks
|
| 958 |
+
double_blocks_to_swap = num_blocks // 2
|
| 959 |
+
single_blocks_to_swap = (num_blocks - double_blocks_to_swap) * 2 + 1
|
| 960 |
+
|
| 961 |
+
assert double_blocks_to_swap <= self.num_double_blocks - 2 and single_blocks_to_swap <= self.num_single_blocks - 2, (
|
| 962 |
+
f"Cannot swap more than {self.num_double_blocks - 2} double blocks and {self.num_single_blocks - 2} single blocks. "
|
| 963 |
+
f"Requested {double_blocks_to_swap} double blocks and {single_blocks_to_swap} single blocks."
|
| 964 |
+
)
|
| 965 |
+
|
| 966 |
+
self.offloader_double = ModelOffloader(
|
| 967 |
+
"double", self.double_blocks, self.num_double_blocks, double_blocks_to_swap, supports_backward, device, use_pinned_memory # , debug=True
|
| 968 |
+
)
|
| 969 |
+
self.offloader_single = ModelOffloader(
|
| 970 |
+
"single", self.single_blocks, self.num_single_blocks, single_blocks_to_swap, supports_backward, device, use_pinned_memory # , debug=True
|
| 971 |
+
)
|
| 972 |
+
print(
|
| 973 |
+
f"FLUX: Block swap enabled. Swapping {num_blocks} blocks, double blocks: {double_blocks_to_swap}, single blocks: {single_blocks_to_swap}."
|
| 974 |
+
)
|
| 975 |
+
|
| 976 |
+
def switch_block_swap_for_inference(self):
|
| 977 |
+
if self.blocks_to_swap:
|
| 978 |
+
self.offloader_double.set_forward_only(True)
|
| 979 |
+
self.offloader_single.set_forward_only(True)
|
| 980 |
+
self.prepare_block_swap_before_forward()
|
| 981 |
+
print(f"FLUX: Block swap set to forward only.")
|
| 982 |
+
|
| 983 |
+
def switch_block_swap_for_training(self):
|
| 984 |
+
if self.blocks_to_swap:
|
| 985 |
+
self.offloader_double.set_forward_only(False)
|
| 986 |
+
self.offloader_single.set_forward_only(False)
|
| 987 |
+
self.prepare_block_swap_before_forward()
|
| 988 |
+
print(f"FLUX: Block swap set to forward and backward.")
|
| 989 |
+
|
| 990 |
+
def move_to_device_except_swap_blocks(self, device: torch.device):
|
| 991 |
+
# assume model is on cpu. do not move blocks to device to reduce temporary memory usage
|
| 992 |
+
if self.blocks_to_swap:
|
| 993 |
+
save_double_blocks = self.double_blocks
|
| 994 |
+
save_single_blocks = self.single_blocks
|
| 995 |
+
self.double_blocks = None
|
| 996 |
+
self.single_blocks = None
|
| 997 |
+
|
| 998 |
+
self.to(device)
|
| 999 |
+
|
| 1000 |
+
if self.blocks_to_swap:
|
| 1001 |
+
self.double_blocks = save_double_blocks
|
| 1002 |
+
self.single_blocks = save_single_blocks
|
| 1003 |
+
|
| 1004 |
+
def prepare_block_swap_before_forward(self):
|
| 1005 |
+
if self.blocks_to_swap is None or self.blocks_to_swap == 0:
|
| 1006 |
+
return
|
| 1007 |
+
self.offloader_double.prepare_block_devices_before_forward(self.double_blocks)
|
| 1008 |
+
self.offloader_single.prepare_block_devices_before_forward(self.single_blocks)
|
| 1009 |
+
|
| 1010 |
+
def forward(
|
| 1011 |
+
self,
|
| 1012 |
+
img: Tensor,
|
| 1013 |
+
img_ids: Tensor,
|
| 1014 |
+
txt: Tensor,
|
| 1015 |
+
txt_ids: Tensor,
|
| 1016 |
+
timesteps: Tensor,
|
| 1017 |
+
y: Tensor,
|
| 1018 |
+
guidance: Tensor | None = None,
|
| 1019 |
+
control_lengths: Optional[list[int]] = None,
|
| 1020 |
+
) -> Tensor:
|
| 1021 |
+
if img.ndim != 3 or txt.ndim != 3:
|
| 1022 |
+
raise ValueError("Input img and txt tensors must have 3 dimensions.")
|
| 1023 |
+
|
| 1024 |
+
# running on sequences img
|
| 1025 |
+
img = self.img_in(img)
|
| 1026 |
+
vec = self.time_in(timestep_embedding(timesteps, 256))
|
| 1027 |
+
if self.params.guidance_embed:
|
| 1028 |
+
if guidance is None:
|
| 1029 |
+
raise ValueError("Didn't get guidance strength for guidance distilled model.")
|
| 1030 |
+
vec = vec + self.guidance_in(timestep_embedding(guidance, 256))
|
| 1031 |
+
vec = vec + self.vector_in(y)
|
| 1032 |
+
txt = self.txt_in(txt)
|
| 1033 |
+
|
| 1034 |
+
ids = torch.cat((txt_ids, img_ids), dim=1)
|
| 1035 |
+
pe = self.pe_embedder(ids)
|
| 1036 |
+
|
| 1037 |
+
input_device = img.device
|
| 1038 |
+
for block_idx, block in enumerate(self.double_blocks):
|
| 1039 |
+
if self.blocks_to_swap:
|
| 1040 |
+
self.offloader_double.wait_for_block(block_idx)
|
| 1041 |
+
|
| 1042 |
+
img, txt = block(img=img, txt=txt, vec=vec, pe=pe, control_lengths=control_lengths)
|
| 1043 |
+
|
| 1044 |
+
if self.blocks_to_swap:
|
| 1045 |
+
self.offloader_double.submit_move_blocks_forward(self.double_blocks, block_idx)
|
| 1046 |
+
|
| 1047 |
+
img = torch.cat((txt, img), 1)
|
| 1048 |
+
|
| 1049 |
+
for block_idx, block in enumerate(self.single_blocks):
|
| 1050 |
+
if self.blocks_to_swap:
|
| 1051 |
+
self.offloader_single.wait_for_block(block_idx)
|
| 1052 |
+
|
| 1053 |
+
img = block(img, vec=vec, pe=pe, control_lengths=control_lengths)
|
| 1054 |
+
|
| 1055 |
+
if self.blocks_to_swap:
|
| 1056 |
+
self.offloader_single.submit_move_blocks_forward(self.single_blocks, block_idx)
|
| 1057 |
+
|
| 1058 |
+
img = img[:, txt.shape[1] :, ...]
|
| 1059 |
+
|
| 1060 |
+
if img.device != input_device:
|
| 1061 |
+
img = img.to(input_device)
|
| 1062 |
+
|
| 1063 |
+
img = self.final_layer(img, vec) # (N, T, patch_size ** 2 * out_channels)
|
| 1064 |
+
|
| 1065 |
+
return img
|
src/musubi_tuner/flux/flux_utils.py
ADDED
|
@@ -0,0 +1,408 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import json
|
| 2 |
+
import math
|
| 3 |
+
from PIL import Image
|
| 4 |
+
from typing import Optional, Union
|
| 5 |
+
import einops
|
| 6 |
+
import numpy as np
|
| 7 |
+
import torch
|
| 8 |
+
from transformers import CLIPConfig, CLIPTextModel, T5Config, T5EncoderModel, CLIPTokenizer, T5Tokenizer
|
| 9 |
+
from accelerate import init_empty_weights
|
| 10 |
+
|
| 11 |
+
from musubi_tuner.flux import flux_models
|
| 12 |
+
from musubi_tuner.utils import image_utils
|
| 13 |
+
from musubi_tuner.utils.safetensors_utils import load_safetensors
|
| 14 |
+
from musubi_tuner.utils.train_utils import get_lin_function
|
| 15 |
+
|
| 16 |
+
import logging
|
| 17 |
+
|
| 18 |
+
logger = logging.getLogger(__name__)
|
| 19 |
+
logging.basicConfig(level=logging.INFO)
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
CLIP_L_TOKENIZER_ID = "openai/clip-vit-large-patch14"
|
| 23 |
+
T5_XXL_TOKENIZER_ID = "google/t5-v1_1-xxl"
|
| 24 |
+
|
| 25 |
+
# copy from https://github.com/black-forest-labs/flux/blob/main/src/flux/util.py
|
| 26 |
+
PREFERED_KONTEXT_RESOLUTIONS = [
|
| 27 |
+
(672, 1568),
|
| 28 |
+
(688, 1504),
|
| 29 |
+
(720, 1456),
|
| 30 |
+
(752, 1392),
|
| 31 |
+
(800, 1328),
|
| 32 |
+
(832, 1248),
|
| 33 |
+
(880, 1184),
|
| 34 |
+
(944, 1104),
|
| 35 |
+
(1024, 1024),
|
| 36 |
+
(1104, 944),
|
| 37 |
+
(1184, 880),
|
| 38 |
+
(1248, 832),
|
| 39 |
+
(1328, 800),
|
| 40 |
+
(1392, 752),
|
| 41 |
+
(1456, 720),
|
| 42 |
+
(1504, 688),
|
| 43 |
+
(1568, 672),
|
| 44 |
+
]
|
| 45 |
+
|
| 46 |
+
|
| 47 |
+
def preprocess_control_image(
|
| 48 |
+
control_image_path: str, resize_to_prefered: bool = True
|
| 49 |
+
) -> tuple[torch.Tensor, np.ndarray, Optional[np.ndarray]]:
|
| 50 |
+
"""
|
| 51 |
+
Preprocess the control image for the model. See `preprocess_image` for details.
|
| 52 |
+
Args:
|
| 53 |
+
control_image_path (str): Path to the control image.
|
| 54 |
+
Returns:
|
| 55 |
+
Tuple[torch.Tensor, np.ndarray, Optional[np.ndarray]]: same as `preprocess_image`.
|
| 56 |
+
"""
|
| 57 |
+
# find appropriate bucket for the image size. reference: https://github.com/black-forest-labs/flux/blob/main/src/flux/sampling.py
|
| 58 |
+
control_image = Image.open(control_image_path)
|
| 59 |
+
width, height = control_image.size
|
| 60 |
+
aspect_ratio = width / height
|
| 61 |
+
|
| 62 |
+
if resize_to_prefered:
|
| 63 |
+
# Kontext is trained on specific resolutions, using one of them is recommended
|
| 64 |
+
_, bucket_width, bucket_height = min((abs(aspect_ratio - w / h), w, h) for w, h in PREFERED_KONTEXT_RESOLUTIONS)
|
| 65 |
+
control_latent_width = int(bucket_width / 16)
|
| 66 |
+
control_latent_height = int(bucket_height / 16)
|
| 67 |
+
bucket_width = control_latent_width * 16
|
| 68 |
+
bucket_height = control_latent_height * 16
|
| 69 |
+
else:
|
| 70 |
+
# use the original image size, but make sure it's divisible by 16
|
| 71 |
+
control_latent_width = int(math.floor(width / 16))
|
| 72 |
+
control_latent_height = int(math.floor(height / 16))
|
| 73 |
+
bucket_width = control_latent_width * 16
|
| 74 |
+
bucket_height = control_latent_height * 16
|
| 75 |
+
control_image = control_image.crop((0, 0, bucket_width, bucket_height))
|
| 76 |
+
|
| 77 |
+
return image_utils.preprocess_image(control_image, bucket_width, bucket_height)
|
| 78 |
+
|
| 79 |
+
|
| 80 |
+
def is_fp8(dt):
|
| 81 |
+
return dt in [torch.float8_e4m3fn, torch.float8_e4m3fnuz, torch.float8_e5m2, torch.float8_e5m2fnuz]
|
| 82 |
+
|
| 83 |
+
|
| 84 |
+
def prepare_img_ids(batch_size: int, packed_latent_height: int, packed_latent_width: int, is_ctrl: bool = False) -> torch.Tensor:
|
| 85 |
+
img_ids = torch.zeros(packed_latent_height, packed_latent_width, 3)
|
| 86 |
+
img_ids[..., 1] = img_ids[..., 1] + torch.arange(packed_latent_height)[:, None]
|
| 87 |
+
img_ids[..., 2] = img_ids[..., 2] + torch.arange(packed_latent_width)[None, :]
|
| 88 |
+
if is_ctrl:
|
| 89 |
+
img_ids[..., 0] = 1
|
| 90 |
+
img_ids = einops.repeat(img_ids, "h w c -> b (h w) c", b=batch_size)
|
| 91 |
+
return img_ids
|
| 92 |
+
|
| 93 |
+
|
| 94 |
+
def time_shift(mu: float, sigma: float, t: torch.Tensor):
|
| 95 |
+
return math.exp(mu) / (math.exp(mu) + (1 / t - 1) ** sigma)
|
| 96 |
+
|
| 97 |
+
|
| 98 |
+
def get_schedule(
|
| 99 |
+
num_steps: int,
|
| 100 |
+
image_seq_len: int,
|
| 101 |
+
base_shift: float = 0.5,
|
| 102 |
+
max_shift: float = 1.15,
|
| 103 |
+
shift_value: Optional[float] = None,
|
| 104 |
+
) -> list[float]:
|
| 105 |
+
# shifting the schedule to favor high timesteps for higher signal images
|
| 106 |
+
# extra step for zero
|
| 107 |
+
timesteps = torch.linspace(1, 0, num_steps + 1)
|
| 108 |
+
|
| 109 |
+
if shift_value is None:
|
| 110 |
+
# eastimate mu based on linear estimation between two points
|
| 111 |
+
mu = get_lin_function(y1=base_shift, y2=max_shift)(image_seq_len)
|
| 112 |
+
timesteps = time_shift(mu, 1.0, timesteps)
|
| 113 |
+
else:
|
| 114 |
+
# logits_norm = torch.randn((1,), device=device)
|
| 115 |
+
# timesteps = logits_norm.sigmoid()
|
| 116 |
+
# timesteps = (timesteps * shift_value) / (1 + (shift_value - 1) * timesteps)
|
| 117 |
+
|
| 118 |
+
timesteps = torch.linspace(1, 0, num_steps + 1)
|
| 119 |
+
sigmas = timesteps
|
| 120 |
+
sigmas = shift_value * sigmas / (1 + (shift_value - 1) * sigmas)
|
| 121 |
+
timesteps = sigmas
|
| 122 |
+
|
| 123 |
+
return timesteps.tolist()
|
| 124 |
+
|
| 125 |
+
|
| 126 |
+
def load_flow_model(
|
| 127 |
+
ckpt_path: str,
|
| 128 |
+
dtype: Optional[torch.dtype],
|
| 129 |
+
device: Union[str, torch.device],
|
| 130 |
+
disable_mmap: bool = False,
|
| 131 |
+
attn_mode: str = "torch",
|
| 132 |
+
split_attn: bool = False,
|
| 133 |
+
loading_device: Optional[Union[str, torch.device]] = None,
|
| 134 |
+
fp8_scaled: bool = False,
|
| 135 |
+
) -> flux_models.Flux:
|
| 136 |
+
if loading_device is None:
|
| 137 |
+
loading_device = device
|
| 138 |
+
|
| 139 |
+
device = torch.device(device)
|
| 140 |
+
loading_device = torch.device(loading_device) if loading_device is not None else device
|
| 141 |
+
flux_kontext_loading_device = loading_device if not fp8_scaled else torch.device("cpu")
|
| 142 |
+
|
| 143 |
+
# build model
|
| 144 |
+
with init_empty_weights():
|
| 145 |
+
params = flux_models.configs_flux_dev_context.params
|
| 146 |
+
|
| 147 |
+
model = flux_models.Flux(params, attn_mode, split_attn)
|
| 148 |
+
if dtype is not None:
|
| 149 |
+
model = model.to(dtype)
|
| 150 |
+
|
| 151 |
+
# load_sft doesn't support torch.device
|
| 152 |
+
logger.info(f"Loading state dict from {ckpt_path} to {flux_kontext_loading_device}")
|
| 153 |
+
sd = load_safetensors(ckpt_path, device=flux_kontext_loading_device, disable_mmap=disable_mmap, dtype=dtype)
|
| 154 |
+
|
| 155 |
+
# if the key has annoying prefix, remove it
|
| 156 |
+
for key in list(sd.keys()):
|
| 157 |
+
new_key = key.replace("model.diffusion_model.", "")
|
| 158 |
+
if new_key == key:
|
| 159 |
+
break # the model doesn't have annoying prefix
|
| 160 |
+
sd[new_key] = sd.pop(key)
|
| 161 |
+
|
| 162 |
+
# if fp8_scaled is True, convert the model to fp8
|
| 163 |
+
if fp8_scaled:
|
| 164 |
+
# fp8 optimization: calculate on CUDA, move back to CPU if loading_device is CPU (block swap)
|
| 165 |
+
logger.info("Optimizing model weights to fp8. This may take a while.")
|
| 166 |
+
sd = model.fp8_optimization(sd, device, move_to_device=loading_device.type == "cpu")
|
| 167 |
+
|
| 168 |
+
if loading_device.type != "cpu":
|
| 169 |
+
# make sure all the model weights are on the loading_device
|
| 170 |
+
logger.info(f"Moving weights to {loading_device}")
|
| 171 |
+
for key in sd.keys():
|
| 172 |
+
sd[key] = sd[key].to(loading_device)
|
| 173 |
+
|
| 174 |
+
info = model.load_state_dict(sd, strict=True, assign=True)
|
| 175 |
+
logger.info(f"Loaded Flux: {info}")
|
| 176 |
+
return model
|
| 177 |
+
|
| 178 |
+
|
| 179 |
+
def load_ae(
|
| 180 |
+
ckpt_path: str, dtype: torch.dtype, device: Union[str, torch.device], disable_mmap: bool = False
|
| 181 |
+
) -> flux_models.AutoEncoder:
|
| 182 |
+
logger.info("Building AutoEncoder")
|
| 183 |
+
with init_empty_weights():
|
| 184 |
+
# dev and schnell have the same AE params
|
| 185 |
+
ae = flux_models.AutoEncoder(flux_models.configs_flux_dev_context.ae_params).to(dtype)
|
| 186 |
+
|
| 187 |
+
logger.info(f"Loading state dict from {ckpt_path}")
|
| 188 |
+
sd = load_safetensors(ckpt_path, device=str(device), disable_mmap=disable_mmap, dtype=dtype)
|
| 189 |
+
info = ae.load_state_dict(sd, strict=True, assign=True)
|
| 190 |
+
logger.info(f"Loaded AE: {info}")
|
| 191 |
+
return ae
|
| 192 |
+
|
| 193 |
+
|
| 194 |
+
def load_clip_l(
|
| 195 |
+
ckpt_path: Optional[str],
|
| 196 |
+
dtype: torch.dtype,
|
| 197 |
+
device: Union[str, torch.device],
|
| 198 |
+
disable_mmap: bool = False,
|
| 199 |
+
state_dict: Optional[dict] = None,
|
| 200 |
+
) -> tuple[CLIPTokenizer, CLIPTextModel]:
|
| 201 |
+
logger.info("Building CLIP-L")
|
| 202 |
+
CLIPL_CONFIG = {
|
| 203 |
+
"_name_or_path": "clip-vit-large-patch14/",
|
| 204 |
+
"architectures": ["CLIPModel"],
|
| 205 |
+
"initializer_factor": 1.0,
|
| 206 |
+
"logit_scale_init_value": 2.6592,
|
| 207 |
+
"model_type": "clip",
|
| 208 |
+
"projection_dim": 768,
|
| 209 |
+
# "text_config": {
|
| 210 |
+
"_name_or_path": "",
|
| 211 |
+
"add_cross_attention": False,
|
| 212 |
+
"architectures": None,
|
| 213 |
+
"attention_dropout": 0.0,
|
| 214 |
+
"bad_words_ids": None,
|
| 215 |
+
"bos_token_id": 0,
|
| 216 |
+
"chunk_size_feed_forward": 0,
|
| 217 |
+
"cross_attention_hidden_size": None,
|
| 218 |
+
"decoder_start_token_id": None,
|
| 219 |
+
"diversity_penalty": 0.0,
|
| 220 |
+
"do_sample": False,
|
| 221 |
+
"dropout": 0.0,
|
| 222 |
+
"early_stopping": False,
|
| 223 |
+
"encoder_no_repeat_ngram_size": 0,
|
| 224 |
+
"eos_token_id": 2,
|
| 225 |
+
"finetuning_task": None,
|
| 226 |
+
"forced_bos_token_id": None,
|
| 227 |
+
"forced_eos_token_id": None,
|
| 228 |
+
"hidden_act": "quick_gelu",
|
| 229 |
+
"hidden_size": 768,
|
| 230 |
+
"id2label": {"0": "LABEL_0", "1": "LABEL_1"},
|
| 231 |
+
"initializer_factor": 1.0,
|
| 232 |
+
"initializer_range": 0.02,
|
| 233 |
+
"intermediate_size": 3072,
|
| 234 |
+
"is_decoder": False,
|
| 235 |
+
"is_encoder_decoder": False,
|
| 236 |
+
"label2id": {"LABEL_0": 0, "LABEL_1": 1},
|
| 237 |
+
"layer_norm_eps": 1e-05,
|
| 238 |
+
"length_penalty": 1.0,
|
| 239 |
+
"max_length": 20,
|
| 240 |
+
"max_position_embeddings": 77,
|
| 241 |
+
"min_length": 0,
|
| 242 |
+
"model_type": "clip_text_model",
|
| 243 |
+
"no_repeat_ngram_size": 0,
|
| 244 |
+
"num_attention_heads": 12,
|
| 245 |
+
"num_beam_groups": 1,
|
| 246 |
+
"num_beams": 1,
|
| 247 |
+
"num_hidden_layers": 12,
|
| 248 |
+
"num_return_sequences": 1,
|
| 249 |
+
"output_attentions": False,
|
| 250 |
+
"output_hidden_states": False,
|
| 251 |
+
"output_scores": False,
|
| 252 |
+
"pad_token_id": 1,
|
| 253 |
+
"prefix": None,
|
| 254 |
+
"problem_type": None,
|
| 255 |
+
"projection_dim": 768,
|
| 256 |
+
"pruned_heads": {},
|
| 257 |
+
"remove_invalid_values": False,
|
| 258 |
+
"repetition_penalty": 1.0,
|
| 259 |
+
"return_dict": True,
|
| 260 |
+
"return_dict_in_generate": False,
|
| 261 |
+
"sep_token_id": None,
|
| 262 |
+
"task_specific_params": None,
|
| 263 |
+
"temperature": 1.0,
|
| 264 |
+
"tie_encoder_decoder": False,
|
| 265 |
+
"tie_word_embeddings": True,
|
| 266 |
+
"tokenizer_class": None,
|
| 267 |
+
"top_k": 50,
|
| 268 |
+
"top_p": 1.0,
|
| 269 |
+
"torch_dtype": None,
|
| 270 |
+
"torchscript": False,
|
| 271 |
+
"transformers_version": "4.16.0.dev0",
|
| 272 |
+
"use_bfloat16": False,
|
| 273 |
+
"vocab_size": 49408,
|
| 274 |
+
"hidden_act": "gelu",
|
| 275 |
+
"hidden_size": 1280,
|
| 276 |
+
"intermediate_size": 5120,
|
| 277 |
+
"num_attention_heads": 20,
|
| 278 |
+
"num_hidden_layers": 32,
|
| 279 |
+
# },
|
| 280 |
+
# "text_config_dict": {
|
| 281 |
+
"hidden_size": 768,
|
| 282 |
+
"intermediate_size": 3072,
|
| 283 |
+
"num_attention_heads": 12,
|
| 284 |
+
"num_hidden_layers": 12,
|
| 285 |
+
"projection_dim": 768,
|
| 286 |
+
# },
|
| 287 |
+
# "torch_dtype": "float32",
|
| 288 |
+
# "transformers_version": None,
|
| 289 |
+
}
|
| 290 |
+
config = CLIPConfig(**CLIPL_CONFIG)
|
| 291 |
+
with init_empty_weights():
|
| 292 |
+
clip = CLIPTextModel._from_config(config)
|
| 293 |
+
|
| 294 |
+
if state_dict is not None:
|
| 295 |
+
sd = state_dict
|
| 296 |
+
else:
|
| 297 |
+
logger.info(f"Loading state dict from {ckpt_path}")
|
| 298 |
+
sd = load_safetensors(ckpt_path, device=str(device), disable_mmap=disable_mmap, dtype=dtype)
|
| 299 |
+
info = clip.load_state_dict(sd, strict=True, assign=True)
|
| 300 |
+
logger.info(f"Loaded CLIP-L: {info}")
|
| 301 |
+
clip.to(device)
|
| 302 |
+
|
| 303 |
+
if dtype is not None:
|
| 304 |
+
if is_fp8(dtype):
|
| 305 |
+
logger.info(f"prepare CLIP-L for fp8: set to {dtype}, set embeddings to {torch.bfloat16}")
|
| 306 |
+
clip.to(dtype) # fp8
|
| 307 |
+
clip.text_model.embeddings.to(dtype=torch.bfloat16)
|
| 308 |
+
else:
|
| 309 |
+
logger.info(f"Setting CLIP-L to dtype: {dtype}")
|
| 310 |
+
clip.to(dtype)
|
| 311 |
+
|
| 312 |
+
tokenizer = CLIPTokenizer.from_pretrained(CLIP_L_TOKENIZER_ID)
|
| 313 |
+
return tokenizer, clip
|
| 314 |
+
|
| 315 |
+
|
| 316 |
+
def load_t5xxl(
|
| 317 |
+
ckpt_path: str,
|
| 318 |
+
dtype: Optional[torch.dtype],
|
| 319 |
+
device: Union[str, torch.device],
|
| 320 |
+
disable_mmap: bool = False,
|
| 321 |
+
state_dict: Optional[dict] = None,
|
| 322 |
+
) -> tuple[T5Tokenizer, T5EncoderModel]:
|
| 323 |
+
T5_CONFIG_JSON = """
|
| 324 |
+
{
|
| 325 |
+
"architectures": [
|
| 326 |
+
"T5EncoderModel"
|
| 327 |
+
],
|
| 328 |
+
"classifier_dropout": 0.0,
|
| 329 |
+
"d_ff": 10240,
|
| 330 |
+
"d_kv": 64,
|
| 331 |
+
"d_model": 4096,
|
| 332 |
+
"decoder_start_token_id": 0,
|
| 333 |
+
"dense_act_fn": "gelu_new",
|
| 334 |
+
"dropout_rate": 0.1,
|
| 335 |
+
"eos_token_id": 1,
|
| 336 |
+
"feed_forward_proj": "gated-gelu",
|
| 337 |
+
"initializer_factor": 1.0,
|
| 338 |
+
"is_encoder_decoder": true,
|
| 339 |
+
"is_gated_act": true,
|
| 340 |
+
"layer_norm_epsilon": 1e-06,
|
| 341 |
+
"model_type": "t5",
|
| 342 |
+
"num_decoder_layers": 24,
|
| 343 |
+
"num_heads": 64,
|
| 344 |
+
"num_layers": 24,
|
| 345 |
+
"output_past": true,
|
| 346 |
+
"pad_token_id": 0,
|
| 347 |
+
"relative_attention_max_distance": 128,
|
| 348 |
+
"relative_attention_num_buckets": 32,
|
| 349 |
+
"tie_word_embeddings": false,
|
| 350 |
+
"torch_dtype": "float16",
|
| 351 |
+
"transformers_version": "4.41.2",
|
| 352 |
+
"use_cache": true,
|
| 353 |
+
"vocab_size": 32128
|
| 354 |
+
}
|
| 355 |
+
"""
|
| 356 |
+
config = json.loads(T5_CONFIG_JSON)
|
| 357 |
+
config = T5Config(**config)
|
| 358 |
+
with init_empty_weights():
|
| 359 |
+
t5xxl = T5EncoderModel._from_config(config)
|
| 360 |
+
|
| 361 |
+
if state_dict is not None:
|
| 362 |
+
sd = state_dict
|
| 363 |
+
else:
|
| 364 |
+
logger.info(f"Loading state dict from {ckpt_path}")
|
| 365 |
+
sd = load_safetensors(ckpt_path, device=str(device), disable_mmap=disable_mmap, dtype=dtype)
|
| 366 |
+
info = t5xxl.load_state_dict(sd, strict=True, assign=True)
|
| 367 |
+
logger.info(f"Loaded T5xxl: {info}")
|
| 368 |
+
t5xxl.to(device)
|
| 369 |
+
|
| 370 |
+
if dtype is not None:
|
| 371 |
+
if is_fp8(dtype):
|
| 372 |
+
logger.info(f"prepare T5xxl for fp8: set to {dtype}")
|
| 373 |
+
|
| 374 |
+
def prepare_fp8(text_encoder, target_dtype):
|
| 375 |
+
def forward_hook(module):
|
| 376 |
+
def forward(hidden_states):
|
| 377 |
+
hidden_gelu = module.act(module.wi_0(hidden_states))
|
| 378 |
+
hidden_linear = module.wi_1(hidden_states)
|
| 379 |
+
hidden_states = hidden_gelu * hidden_linear
|
| 380 |
+
hidden_states = module.dropout(hidden_states)
|
| 381 |
+
|
| 382 |
+
hidden_states = module.wo(hidden_states)
|
| 383 |
+
return hidden_states
|
| 384 |
+
|
| 385 |
+
return forward
|
| 386 |
+
|
| 387 |
+
for module in text_encoder.modules():
|
| 388 |
+
if module.__class__.__name__ in ["T5LayerNorm", "Embedding"]:
|
| 389 |
+
# print("set", module.__class__.__name__, "to", target_dtype)
|
| 390 |
+
module.to(target_dtype)
|
| 391 |
+
if module.__class__.__name__ in ["T5DenseGatedActDense"]:
|
| 392 |
+
# print("set", module.__class__.__name__, "hooks")
|
| 393 |
+
module.forward = forward_hook(module)
|
| 394 |
+
|
| 395 |
+
t5xxl.to(dtype)
|
| 396 |
+
prepare_fp8(t5xxl.encoder, torch.bfloat16)
|
| 397 |
+
else:
|
| 398 |
+
logger.info(f"Setting T5xxl to dtype: {dtype}")
|
| 399 |
+
t5xxl.to(dtype)
|
| 400 |
+
|
| 401 |
+
# Load tokenizer
|
| 402 |
+
tokenizer = T5Tokenizer.from_pretrained(T5_XXL_TOKENIZER_ID)
|
| 403 |
+
return tokenizer, t5xxl
|
| 404 |
+
|
| 405 |
+
|
| 406 |
+
def get_t5xxl_actual_dtype(t5xxl: T5EncoderModel) -> torch.dtype:
|
| 407 |
+
# nn.Embedding is the first layer, but it could be casted to bfloat16 or float32
|
| 408 |
+
return t5xxl.encoder.block[0].layer[0].SelfAttention.q.weight.dtype
|
src/musubi_tuner/flux_2/flux2_models.py
ADDED
|
@@ -0,0 +1,1020 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# # copy from FLUX repo: https://github.com/black-forest-labs/flux
|
| 2 |
+
# # license: Apache-2.0 License
|
| 3 |
+
import math
|
| 4 |
+
from dataclasses import dataclass, field
|
| 5 |
+
|
| 6 |
+
import torch
|
| 7 |
+
from einops import rearrange
|
| 8 |
+
from torch import Tensor, nn
|
| 9 |
+
from torch.utils.checkpoint import checkpoint
|
| 10 |
+
|
| 11 |
+
from musubi_tuner.modules.attention import AttentionParams
|
| 12 |
+
from musubi_tuner.modules.custom_offloading_utils import ModelOffloader
|
| 13 |
+
from musubi_tuner.modules.attention import attention as unified_attention
|
| 14 |
+
|
| 15 |
+
from musubi_tuner.utils.model_utils import create_cpu_offloading_wrapper
|
| 16 |
+
|
| 17 |
+
# import logging
|
| 18 |
+
# logger = logging.getLogger(__name__)
|
| 19 |
+
# logging.basicConfig(level=logging.INFO)
|
| 20 |
+
|
| 21 |
+
# USE_REENTRANT = True
|
| 22 |
+
|
| 23 |
+
FP8_OPTIMIZATION_TARGET_KEYS = ["double_blocks", "single_blocks"]
|
| 24 |
+
FP8_OPTIMIZATION_EXCLUDE_KEYS = ["norm", "pe_embedder", "time_in", "_modulation"]
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
@dataclass
|
| 28 |
+
class Flux2Params:
|
| 29 |
+
in_channels: int = 128 # packed latent channels
|
| 30 |
+
context_in_dim: int = 15360
|
| 31 |
+
hidden_size: int = 6144
|
| 32 |
+
num_heads: int = 48
|
| 33 |
+
depth: int = 8
|
| 34 |
+
depth_single_blocks: int = 48
|
| 35 |
+
axes_dim: list[int] = field(default_factory=lambda: [32, 32, 32, 32])
|
| 36 |
+
theta: int = 2000
|
| 37 |
+
mlp_ratio: float = 3.0
|
| 38 |
+
use_guidance_embed: bool = True
|
| 39 |
+
|
| 40 |
+
|
| 41 |
+
@dataclass
|
| 42 |
+
class Klein9BParams(Flux2Params):
|
| 43 |
+
context_in_dim: int = 12288
|
| 44 |
+
hidden_size: int = 4096
|
| 45 |
+
num_heads: int = 32
|
| 46 |
+
depth: int = 8
|
| 47 |
+
depth_single_blocks: int = 24
|
| 48 |
+
axes_dim: list[int] = field(default_factory=lambda: [32, 32, 32, 32])
|
| 49 |
+
theta: int = 2000
|
| 50 |
+
mlp_ratio: float = 3.0
|
| 51 |
+
use_guidance_embed: bool = False
|
| 52 |
+
|
| 53 |
+
|
| 54 |
+
@dataclass
|
| 55 |
+
class Klein4BParams(Flux2Params):
|
| 56 |
+
context_in_dim: int = 7680
|
| 57 |
+
hidden_size: int = 3072
|
| 58 |
+
num_heads: int = 24
|
| 59 |
+
depth: int = 5
|
| 60 |
+
depth_single_blocks: int = 20
|
| 61 |
+
axes_dim: list[int] = field(default_factory=lambda: [32, 32, 32, 32])
|
| 62 |
+
theta: int = 2000
|
| 63 |
+
mlp_ratio: float = 3.0
|
| 64 |
+
use_guidance_embed: bool = False
|
| 65 |
+
|
| 66 |
+
|
| 67 |
+
# region autoencoder
|
| 68 |
+
|
| 69 |
+
|
| 70 |
+
@dataclass
|
| 71 |
+
class AutoEncoderParams:
|
| 72 |
+
resolution: int = 256
|
| 73 |
+
in_channels: int = 3
|
| 74 |
+
ch: int = 128
|
| 75 |
+
out_ch: int = 3
|
| 76 |
+
ch_mult: list[int] = field(default_factory=lambda: [1, 2, 4, 4])
|
| 77 |
+
num_res_blocks: int = 2
|
| 78 |
+
z_channels: int = 32
|
| 79 |
+
|
| 80 |
+
|
| 81 |
+
def swish(x: Tensor) -> Tensor:
|
| 82 |
+
return x * torch.sigmoid(x)
|
| 83 |
+
|
| 84 |
+
|
| 85 |
+
class AttnBlock(nn.Module):
|
| 86 |
+
def __init__(self, in_channels: int):
|
| 87 |
+
super().__init__()
|
| 88 |
+
self.in_channels = in_channels
|
| 89 |
+
|
| 90 |
+
self.norm = nn.GroupNorm(num_groups=32, num_channels=in_channels, eps=1e-6, affine=True)
|
| 91 |
+
|
| 92 |
+
self.q = nn.Conv2d(in_channels, in_channels, kernel_size=1)
|
| 93 |
+
self.k = nn.Conv2d(in_channels, in_channels, kernel_size=1)
|
| 94 |
+
self.v = nn.Conv2d(in_channels, in_channels, kernel_size=1)
|
| 95 |
+
self.proj_out = nn.Conv2d(in_channels, in_channels, kernel_size=1)
|
| 96 |
+
|
| 97 |
+
def attention(self, h_: Tensor) -> Tensor:
|
| 98 |
+
h_ = self.norm(h_)
|
| 99 |
+
q = self.q(h_)
|
| 100 |
+
k = self.k(h_)
|
| 101 |
+
v = self.v(h_)
|
| 102 |
+
|
| 103 |
+
b, c, h, w = q.shape
|
| 104 |
+
q = rearrange(q, "b c h w -> b 1 (h w) c").contiguous()
|
| 105 |
+
k = rearrange(k, "b c h w -> b 1 (h w) c").contiguous()
|
| 106 |
+
v = rearrange(v, "b c h w -> b 1 (h w) c").contiguous()
|
| 107 |
+
h_ = nn.functional.scaled_dot_product_attention(q, k, v)
|
| 108 |
+
|
| 109 |
+
return rearrange(h_, "b 1 (h w) c -> b c h w", h=h, w=w, c=c, b=b)
|
| 110 |
+
|
| 111 |
+
def forward(self, x: Tensor) -> Tensor:
|
| 112 |
+
return x + self.proj_out(self.attention(x))
|
| 113 |
+
|
| 114 |
+
|
| 115 |
+
class ResnetBlock(nn.Module):
|
| 116 |
+
def __init__(self, in_channels: int, out_channels: int):
|
| 117 |
+
super().__init__()
|
| 118 |
+
self.in_channels = in_channels
|
| 119 |
+
out_channels = in_channels if out_channels is None else out_channels
|
| 120 |
+
self.out_channels = out_channels
|
| 121 |
+
|
| 122 |
+
self.norm1 = nn.GroupNorm(num_groups=32, num_channels=in_channels, eps=1e-6, affine=True)
|
| 123 |
+
self.conv1 = nn.Conv2d(in_channels, out_channels, kernel_size=3, stride=1, padding=1)
|
| 124 |
+
self.norm2 = nn.GroupNorm(num_groups=32, num_channels=out_channels, eps=1e-6, affine=True)
|
| 125 |
+
self.conv2 = nn.Conv2d(out_channels, out_channels, kernel_size=3, stride=1, padding=1)
|
| 126 |
+
if self.in_channels != self.out_channels:
|
| 127 |
+
self.nin_shortcut = nn.Conv2d(in_channels, out_channels, kernel_size=1, stride=1, padding=0)
|
| 128 |
+
|
| 129 |
+
def forward(self, x):
|
| 130 |
+
h = x
|
| 131 |
+
h = self.norm1(h)
|
| 132 |
+
h = swish(h)
|
| 133 |
+
h = self.conv1(h)
|
| 134 |
+
|
| 135 |
+
h = self.norm2(h)
|
| 136 |
+
h = swish(h)
|
| 137 |
+
h = self.conv2(h)
|
| 138 |
+
|
| 139 |
+
if self.in_channels != self.out_channels:
|
| 140 |
+
x = self.nin_shortcut(x)
|
| 141 |
+
|
| 142 |
+
return x + h
|
| 143 |
+
|
| 144 |
+
|
| 145 |
+
class Downsample(nn.Module):
|
| 146 |
+
def __init__(self, in_channels: int):
|
| 147 |
+
super().__init__()
|
| 148 |
+
# no asymmetric padding in torch conv, must do it ourselves
|
| 149 |
+
self.conv = nn.Conv2d(in_channels, in_channels, kernel_size=3, stride=2, padding=0)
|
| 150 |
+
|
| 151 |
+
def forward(self, x: Tensor):
|
| 152 |
+
pad = (0, 1, 0, 1)
|
| 153 |
+
x = nn.functional.pad(x, pad, mode="constant", value=0)
|
| 154 |
+
x = self.conv(x)
|
| 155 |
+
return x
|
| 156 |
+
|
| 157 |
+
|
| 158 |
+
class Upsample(nn.Module):
|
| 159 |
+
def __init__(self, in_channels: int):
|
| 160 |
+
super().__init__()
|
| 161 |
+
self.conv = nn.Conv2d(in_channels, in_channels, kernel_size=3, stride=1, padding=1)
|
| 162 |
+
|
| 163 |
+
def forward(self, x: Tensor):
|
| 164 |
+
x = nn.functional.interpolate(x, scale_factor=2.0, mode="nearest")
|
| 165 |
+
x = self.conv(x)
|
| 166 |
+
return x
|
| 167 |
+
|
| 168 |
+
|
| 169 |
+
class Encoder(nn.Module):
|
| 170 |
+
def __init__(
|
| 171 |
+
self,
|
| 172 |
+
resolution: int,
|
| 173 |
+
in_channels: int,
|
| 174 |
+
ch: int,
|
| 175 |
+
ch_mult: list[int],
|
| 176 |
+
num_res_blocks: int,
|
| 177 |
+
z_channels: int,
|
| 178 |
+
):
|
| 179 |
+
super().__init__()
|
| 180 |
+
self.quant_conv = torch.nn.Conv2d(2 * z_channels, 2 * z_channels, 1)
|
| 181 |
+
self.ch = ch
|
| 182 |
+
self.num_resolutions = len(ch_mult)
|
| 183 |
+
self.num_res_blocks = num_res_blocks
|
| 184 |
+
self.resolution = resolution
|
| 185 |
+
self.in_channels = in_channels
|
| 186 |
+
# downsampling
|
| 187 |
+
self.conv_in = nn.Conv2d(in_channels, self.ch, kernel_size=3, stride=1, padding=1)
|
| 188 |
+
|
| 189 |
+
curr_res = resolution
|
| 190 |
+
in_ch_mult = (1,) + tuple(ch_mult)
|
| 191 |
+
self.in_ch_mult = in_ch_mult
|
| 192 |
+
self.down = nn.ModuleList()
|
| 193 |
+
block_in = self.ch
|
| 194 |
+
for i_level in range(self.num_resolutions):
|
| 195 |
+
block = nn.ModuleList()
|
| 196 |
+
attn = nn.ModuleList()
|
| 197 |
+
block_in = ch * in_ch_mult[i_level]
|
| 198 |
+
block_out = ch * ch_mult[i_level]
|
| 199 |
+
for _ in range(self.num_res_blocks):
|
| 200 |
+
block.append(ResnetBlock(in_channels=block_in, out_channels=block_out))
|
| 201 |
+
block_in = block_out
|
| 202 |
+
down = nn.Module()
|
| 203 |
+
down.block = block
|
| 204 |
+
down.attn = attn
|
| 205 |
+
if i_level != self.num_resolutions - 1:
|
| 206 |
+
down.downsample = Downsample(block_in)
|
| 207 |
+
curr_res = curr_res // 2
|
| 208 |
+
self.down.append(down)
|
| 209 |
+
|
| 210 |
+
# middle
|
| 211 |
+
self.mid = nn.Module()
|
| 212 |
+
self.mid.block_1 = ResnetBlock(in_channels=block_in, out_channels=block_in)
|
| 213 |
+
self.mid.attn_1 = AttnBlock(block_in)
|
| 214 |
+
self.mid.block_2 = ResnetBlock(in_channels=block_in, out_channels=block_in)
|
| 215 |
+
|
| 216 |
+
# end
|
| 217 |
+
self.norm_out = nn.GroupNorm(num_groups=32, num_channels=block_in, eps=1e-6, affine=True)
|
| 218 |
+
self.conv_out = nn.Conv2d(block_in, 2 * z_channels, kernel_size=3, stride=1, padding=1)
|
| 219 |
+
|
| 220 |
+
def forward(self, x: Tensor) -> Tensor:
|
| 221 |
+
# downsampling
|
| 222 |
+
hs = [self.conv_in(x)]
|
| 223 |
+
for i_level in range(self.num_resolutions):
|
| 224 |
+
for i_block in range(self.num_res_blocks):
|
| 225 |
+
h = self.down[i_level].block[i_block](hs[-1])
|
| 226 |
+
if len(self.down[i_level].attn) > 0:
|
| 227 |
+
h = self.down[i_level].attn[i_block](h)
|
| 228 |
+
hs.append(h)
|
| 229 |
+
if i_level != self.num_resolutions - 1:
|
| 230 |
+
hs.append(self.down[i_level].downsample(hs[-1]))
|
| 231 |
+
|
| 232 |
+
# middle
|
| 233 |
+
h = hs[-1]
|
| 234 |
+
h = self.mid.block_1(h)
|
| 235 |
+
h = self.mid.attn_1(h)
|
| 236 |
+
h = self.mid.block_2(h)
|
| 237 |
+
# end
|
| 238 |
+
h = self.norm_out(h)
|
| 239 |
+
h = swish(h)
|
| 240 |
+
h = self.conv_out(h)
|
| 241 |
+
h = self.quant_conv(h)
|
| 242 |
+
return h
|
| 243 |
+
|
| 244 |
+
|
| 245 |
+
class Decoder(nn.Module):
|
| 246 |
+
def __init__(
|
| 247 |
+
self,
|
| 248 |
+
ch: int,
|
| 249 |
+
out_ch: int,
|
| 250 |
+
ch_mult: list[int],
|
| 251 |
+
num_res_blocks: int,
|
| 252 |
+
in_channels: int,
|
| 253 |
+
resolution: int,
|
| 254 |
+
z_channels: int,
|
| 255 |
+
):
|
| 256 |
+
super().__init__()
|
| 257 |
+
self.post_quant_conv = torch.nn.Conv2d(z_channels, z_channels, 1)
|
| 258 |
+
self.ch = ch
|
| 259 |
+
self.num_resolutions = len(ch_mult)
|
| 260 |
+
self.num_res_blocks = num_res_blocks
|
| 261 |
+
self.resolution = resolution
|
| 262 |
+
self.in_channels = in_channels
|
| 263 |
+
self.ffactor = 2 ** (self.num_resolutions - 1)
|
| 264 |
+
|
| 265 |
+
# compute in_ch_mult, block_in and curr_res at lowest res
|
| 266 |
+
block_in = ch * ch_mult[self.num_resolutions - 1]
|
| 267 |
+
curr_res = resolution // 2 ** (self.num_resolutions - 1)
|
| 268 |
+
self.z_shape = (1, z_channels, curr_res, curr_res)
|
| 269 |
+
|
| 270 |
+
# z to block_in
|
| 271 |
+
self.conv_in = nn.Conv2d(z_channels, block_in, kernel_size=3, stride=1, padding=1)
|
| 272 |
+
|
| 273 |
+
# middle
|
| 274 |
+
self.mid = nn.Module()
|
| 275 |
+
self.mid.block_1 = ResnetBlock(in_channels=block_in, out_channels=block_in)
|
| 276 |
+
self.mid.attn_1 = AttnBlock(block_in)
|
| 277 |
+
self.mid.block_2 = ResnetBlock(in_channels=block_in, out_channels=block_in)
|
| 278 |
+
|
| 279 |
+
# upsampling
|
| 280 |
+
self.up = nn.ModuleList()
|
| 281 |
+
for i_level in reversed(range(self.num_resolutions)):
|
| 282 |
+
block = nn.ModuleList()
|
| 283 |
+
attn = nn.ModuleList()
|
| 284 |
+
block_out = ch * ch_mult[i_level]
|
| 285 |
+
for _ in range(self.num_res_blocks + 1):
|
| 286 |
+
block.append(ResnetBlock(in_channels=block_in, out_channels=block_out))
|
| 287 |
+
block_in = block_out
|
| 288 |
+
up = nn.Module()
|
| 289 |
+
up.block = block
|
| 290 |
+
up.attn = attn
|
| 291 |
+
if i_level != 0:
|
| 292 |
+
up.upsample = Upsample(block_in)
|
| 293 |
+
curr_res = curr_res * 2
|
| 294 |
+
self.up.insert(0, up) # prepend to get consistent order
|
| 295 |
+
|
| 296 |
+
# end
|
| 297 |
+
self.norm_out = nn.GroupNorm(num_groups=32, num_channels=block_in, eps=1e-6, affine=True)
|
| 298 |
+
self.conv_out = nn.Conv2d(block_in, out_ch, kernel_size=3, stride=1, padding=1)
|
| 299 |
+
|
| 300 |
+
def forward(self, z: Tensor) -> Tensor:
|
| 301 |
+
z = self.post_quant_conv(z)
|
| 302 |
+
|
| 303 |
+
# get dtype for proper tracing
|
| 304 |
+
upscale_dtype = next(self.up.parameters()).dtype
|
| 305 |
+
|
| 306 |
+
# z to block_in
|
| 307 |
+
h = self.conv_in(z)
|
| 308 |
+
|
| 309 |
+
# middle
|
| 310 |
+
h = self.mid.block_1(h)
|
| 311 |
+
h = self.mid.attn_1(h)
|
| 312 |
+
h = self.mid.block_2(h)
|
| 313 |
+
|
| 314 |
+
# cast to proper dtype
|
| 315 |
+
h = h.to(upscale_dtype)
|
| 316 |
+
# upsampling
|
| 317 |
+
for i_level in reversed(range(self.num_resolutions)):
|
| 318 |
+
for i_block in range(self.num_res_blocks + 1):
|
| 319 |
+
h = self.up[i_level].block[i_block](h)
|
| 320 |
+
if len(self.up[i_level].attn) > 0:
|
| 321 |
+
h = self.up[i_level].attn[i_block](h)
|
| 322 |
+
if i_level != 0:
|
| 323 |
+
h = self.up[i_level].upsample(h)
|
| 324 |
+
|
| 325 |
+
# end
|
| 326 |
+
h = self.norm_out(h)
|
| 327 |
+
h = swish(h)
|
| 328 |
+
h = self.conv_out(h)
|
| 329 |
+
return h
|
| 330 |
+
|
| 331 |
+
|
| 332 |
+
class AutoEncoder(nn.Module):
|
| 333 |
+
def __init__(self, params: AutoEncoderParams):
|
| 334 |
+
super().__init__()
|
| 335 |
+
self.params = params
|
| 336 |
+
self.encoder = Encoder(
|
| 337 |
+
resolution=params.resolution,
|
| 338 |
+
in_channels=params.in_channels,
|
| 339 |
+
ch=params.ch,
|
| 340 |
+
ch_mult=params.ch_mult,
|
| 341 |
+
num_res_blocks=params.num_res_blocks,
|
| 342 |
+
z_channels=params.z_channels,
|
| 343 |
+
)
|
| 344 |
+
self.decoder = Decoder(
|
| 345 |
+
resolution=params.resolution,
|
| 346 |
+
in_channels=params.in_channels,
|
| 347 |
+
ch=params.ch,
|
| 348 |
+
out_ch=params.out_ch,
|
| 349 |
+
ch_mult=params.ch_mult,
|
| 350 |
+
num_res_blocks=params.num_res_blocks,
|
| 351 |
+
z_channels=params.z_channels,
|
| 352 |
+
)
|
| 353 |
+
|
| 354 |
+
self.bn_eps = 1e-4
|
| 355 |
+
self.bn_momentum = 0.1
|
| 356 |
+
self.ps = [2, 2]
|
| 357 |
+
self.bn = torch.nn.BatchNorm2d(
|
| 358 |
+
math.prod(self.ps) * params.z_channels,
|
| 359 |
+
eps=self.bn_eps,
|
| 360 |
+
momentum=self.bn_momentum,
|
| 361 |
+
affine=False,
|
| 362 |
+
track_running_stats=True,
|
| 363 |
+
)
|
| 364 |
+
|
| 365 |
+
def normalize(self, z):
|
| 366 |
+
self.bn.eval()
|
| 367 |
+
return self.bn(z)
|
| 368 |
+
|
| 369 |
+
def inv_normalize(self, z):
|
| 370 |
+
self.bn.eval()
|
| 371 |
+
s = torch.sqrt(self.bn.running_var.view(1, -1, 1, 1) + self.bn_eps)
|
| 372 |
+
m = self.bn.running_mean.view(1, -1, 1, 1)
|
| 373 |
+
return z * s + m
|
| 374 |
+
|
| 375 |
+
def encode(self, x: Tensor) -> Tensor:
|
| 376 |
+
moments = self.encoder(x)
|
| 377 |
+
mean = torch.chunk(moments, 2, dim=1)[0]
|
| 378 |
+
|
| 379 |
+
z = rearrange(
|
| 380 |
+
mean,
|
| 381 |
+
"... c (i pi) (j pj) -> ... (c pi pj) i j",
|
| 382 |
+
pi=self.ps[0],
|
| 383 |
+
pj=self.ps[1],
|
| 384 |
+
)
|
| 385 |
+
z = self.normalize(z)
|
| 386 |
+
return z
|
| 387 |
+
|
| 388 |
+
def decode(self, z: Tensor) -> Tensor:
|
| 389 |
+
z = self.inv_normalize(z)
|
| 390 |
+
z = rearrange(
|
| 391 |
+
z,
|
| 392 |
+
"... (c pi pj) i j -> ... c (i pi) (j pj)",
|
| 393 |
+
pi=self.ps[0],
|
| 394 |
+
pj=self.ps[1],
|
| 395 |
+
)
|
| 396 |
+
dec = self.decoder(z)
|
| 397 |
+
return dec
|
| 398 |
+
|
| 399 |
+
@property
|
| 400 |
+
def device(self) -> torch.device:
|
| 401 |
+
return next(self.parameters()).device
|
| 402 |
+
|
| 403 |
+
@property
|
| 404 |
+
def dtype(self) -> torch.dtype:
|
| 405 |
+
return next(self.parameters()).dtype
|
| 406 |
+
|
| 407 |
+
|
| 408 |
+
# endregion
|
| 409 |
+
|
| 410 |
+
# region model
|
| 411 |
+
|
| 412 |
+
|
| 413 |
+
class Flux2(nn.Module):
|
| 414 |
+
def __init__(self, params: Flux2Params, attn_mode: str = "flash", split_attn: bool = False) -> None:
|
| 415 |
+
super().__init__()
|
| 416 |
+
|
| 417 |
+
self.in_channels = params.in_channels
|
| 418 |
+
self.out_channels = params.in_channels
|
| 419 |
+
if params.hidden_size % params.num_heads != 0:
|
| 420 |
+
raise ValueError(f"Hidden size {params.hidden_size} must be divisible by num_heads {params.num_heads}")
|
| 421 |
+
pe_dim = params.hidden_size // params.num_heads
|
| 422 |
+
if sum(params.axes_dim) != pe_dim:
|
| 423 |
+
raise ValueError(f"Got {params.axes_dim} but expected positional dim {pe_dim}")
|
| 424 |
+
self.hidden_size = params.hidden_size
|
| 425 |
+
self.num_heads = params.num_heads
|
| 426 |
+
|
| 427 |
+
self.pe_embedder = EmbedND(dim=pe_dim, theta=params.theta, axes_dim=params.axes_dim)
|
| 428 |
+
self.img_in = nn.Linear(self.in_channels, self.hidden_size, bias=False)
|
| 429 |
+
self.time_in = MLPEmbedder(in_dim=256, hidden_dim=self.hidden_size, disable_bias=True)
|
| 430 |
+
self.txt_in = nn.Linear(params.context_in_dim, self.hidden_size, bias=False)
|
| 431 |
+
|
| 432 |
+
self.use_guidance_embed = params.use_guidance_embed
|
| 433 |
+
if self.use_guidance_embed:
|
| 434 |
+
self.guidance_in = MLPEmbedder(in_dim=256, hidden_dim=self.hidden_size, disable_bias=True)
|
| 435 |
+
|
| 436 |
+
self.attn_mode = attn_mode
|
| 437 |
+
self.split_attn = split_attn
|
| 438 |
+
|
| 439 |
+
self.double_blocks = nn.ModuleList(
|
| 440 |
+
[DoubleStreamBlock(self.hidden_size, self.num_heads, mlp_ratio=params.mlp_ratio) for _ in range(params.depth)]
|
| 441 |
+
)
|
| 442 |
+
self.single_blocks = nn.ModuleList(
|
| 443 |
+
[
|
| 444 |
+
SingleStreamBlock(self.hidden_size, self.num_heads, mlp_ratio=params.mlp_ratio)
|
| 445 |
+
for _ in range(params.depth_single_blocks)
|
| 446 |
+
]
|
| 447 |
+
)
|
| 448 |
+
|
| 449 |
+
self.double_stream_modulation_img = Modulation(self.hidden_size, double=True, disable_bias=True)
|
| 450 |
+
self.double_stream_modulation_txt = Modulation(self.hidden_size, double=True, disable_bias=True)
|
| 451 |
+
self.single_stream_modulation = Modulation(self.hidden_size, double=False, disable_bias=True)
|
| 452 |
+
|
| 453 |
+
self.final_layer = LastLayer(self.hidden_size, self.out_channels)
|
| 454 |
+
|
| 455 |
+
self.gradient_checkpointing = False
|
| 456 |
+
self.activation_cpu_offloading = False
|
| 457 |
+
self.blocks_to_swap = None
|
| 458 |
+
|
| 459 |
+
self.offloader_double = None
|
| 460 |
+
self.offloader_single = None
|
| 461 |
+
self.num_double_blocks = len(self.double_blocks)
|
| 462 |
+
self.num_single_blocks = len(self.single_blocks)
|
| 463 |
+
|
| 464 |
+
def get_model_type(self) -> str:
|
| 465 |
+
return "flux_2"
|
| 466 |
+
|
| 467 |
+
@property
|
| 468 |
+
def device(self):
|
| 469 |
+
return next(self.parameters()).device
|
| 470 |
+
|
| 471 |
+
@property
|
| 472 |
+
def dtype(self):
|
| 473 |
+
return next(self.parameters()).dtype
|
| 474 |
+
|
| 475 |
+
def enable_gradient_checkpointing(self, activation_cpu_offloading: bool = False):
|
| 476 |
+
self.gradient_checkpointing = True
|
| 477 |
+
self.activation_cpu_offloading = activation_cpu_offloading
|
| 478 |
+
|
| 479 |
+
self.time_in.enable_gradient_checkpointing()
|
| 480 |
+
if self.use_guidance_embed and self.guidance_in.__class__ != nn.Identity:
|
| 481 |
+
self.guidance_in.enable_gradient_checkpointing()
|
| 482 |
+
|
| 483 |
+
for block in self.double_blocks + self.single_blocks:
|
| 484 |
+
block.enable_gradient_checkpointing(activation_cpu_offloading)
|
| 485 |
+
|
| 486 |
+
print(f"FLUX2: Gradient checkpointing enabled. Activation CPU offloading: {activation_cpu_offloading}")
|
| 487 |
+
|
| 488 |
+
def disable_gradient_checkpointing(self):
|
| 489 |
+
self.gradient_checkpointing = False
|
| 490 |
+
|
| 491 |
+
self.time_in.disable_gradient_checkpointing()
|
| 492 |
+
if self.use_guidance_embed and self.guidance_in.__class__ != nn.Identity:
|
| 493 |
+
self.guidance_in.disable_gradient_checkpointing()
|
| 494 |
+
|
| 495 |
+
for block in self.double_blocks + self.single_blocks:
|
| 496 |
+
block.disable_gradient_checkpointing()
|
| 497 |
+
|
| 498 |
+
print("FLUX2: Gradient checkpointing disabled.")
|
| 499 |
+
|
| 500 |
+
def enable_block_swap(self, num_blocks: int, device: torch.device, supports_backward: bool, use_pinned_memory: bool = False):
|
| 501 |
+
self.blocks_to_swap = num_blocks
|
| 502 |
+
if num_blocks <= 0:
|
| 503 |
+
double_blocks_to_swap = 0
|
| 504 |
+
single_blocks_to_swap = 0
|
| 505 |
+
elif self.num_double_blocks == 0:
|
| 506 |
+
double_blocks_to_swap = 0
|
| 507 |
+
single_blocks_to_swap = num_blocks
|
| 508 |
+
elif self.num_single_blocks == 0:
|
| 509 |
+
double_blocks_to_swap = num_blocks
|
| 510 |
+
single_blocks_to_swap = 0
|
| 511 |
+
else:
|
| 512 |
+
swap_ratio = self.num_single_blocks / self.num_double_blocks
|
| 513 |
+
double_blocks_to_swap = int(round(num_blocks / (1.0 + swap_ratio / 2.0)))
|
| 514 |
+
single_blocks_to_swap = int(round(double_blocks_to_swap * swap_ratio))
|
| 515 |
+
|
| 516 |
+
# adjust if we exceed available blocks
|
| 517 |
+
if self.num_double_blocks * 2 < self.num_single_blocks:
|
| 518 |
+
while double_blocks_to_swap >= 1 and double_blocks_to_swap > self.num_double_blocks - 2:
|
| 519 |
+
double_blocks_to_swap -= 1
|
| 520 |
+
single_blocks_to_swap += 2
|
| 521 |
+
else:
|
| 522 |
+
while single_blocks_to_swap >= 2 and single_blocks_to_swap > self.num_single_blocks - 2:
|
| 523 |
+
single_blocks_to_swap -= 2
|
| 524 |
+
double_blocks_to_swap += 1
|
| 525 |
+
|
| 526 |
+
if double_blocks_to_swap == 0 and single_blocks_to_swap == 0:
|
| 527 |
+
if self.num_single_blocks >= self.num_double_blocks:
|
| 528 |
+
single_blocks_to_swap = 1
|
| 529 |
+
else:
|
| 530 |
+
double_blocks_to_swap = 1
|
| 531 |
+
|
| 532 |
+
assert double_blocks_to_swap <= self.num_double_blocks - 2 and single_blocks_to_swap <= self.num_single_blocks - 2, (
|
| 533 |
+
f"Cannot swap more than {self.num_double_blocks - 2} double blocks and {self.num_single_blocks - 2} single blocks. "
|
| 534 |
+
f"Requested {double_blocks_to_swap} double blocks and {single_blocks_to_swap} single blocks."
|
| 535 |
+
)
|
| 536 |
+
|
| 537 |
+
self.offloader_double = ModelOffloader(
|
| 538 |
+
"double",
|
| 539 |
+
self.double_blocks,
|
| 540 |
+
self.num_double_blocks,
|
| 541 |
+
double_blocks_to_swap,
|
| 542 |
+
supports_backward,
|
| 543 |
+
device,
|
| 544 |
+
use_pinned_memory,
|
| 545 |
+
# , debug=True
|
| 546 |
+
)
|
| 547 |
+
self.offloader_single = ModelOffloader(
|
| 548 |
+
"single",
|
| 549 |
+
self.single_blocks,
|
| 550 |
+
self.num_single_blocks,
|
| 551 |
+
single_blocks_to_swap,
|
| 552 |
+
supports_backward,
|
| 553 |
+
device,
|
| 554 |
+
use_pinned_memory,
|
| 555 |
+
# , debug=True
|
| 556 |
+
)
|
| 557 |
+
print(
|
| 558 |
+
f"FLUX: Block swap enabled. Swapping {num_blocks} blocks, double blocks: {double_blocks_to_swap}, single blocks: {single_blocks_to_swap}."
|
| 559 |
+
)
|
| 560 |
+
|
| 561 |
+
def switch_block_swap_for_inference(self):
|
| 562 |
+
if self.blocks_to_swap:
|
| 563 |
+
self.offloader_double.set_forward_only(True)
|
| 564 |
+
self.offloader_single.set_forward_only(True)
|
| 565 |
+
self.prepare_block_swap_before_forward()
|
| 566 |
+
print("FLUX: Block swap set to forward only.")
|
| 567 |
+
|
| 568 |
+
def switch_block_swap_for_training(self):
|
| 569 |
+
if self.blocks_to_swap:
|
| 570 |
+
self.offloader_double.set_forward_only(False)
|
| 571 |
+
self.offloader_single.set_forward_only(False)
|
| 572 |
+
self.prepare_block_swap_before_forward()
|
| 573 |
+
print("FLUX: Block swap set to forward and backward.")
|
| 574 |
+
|
| 575 |
+
def move_to_device_except_swap_blocks(self, device: torch.device):
|
| 576 |
+
# assume model is on cpu. do not move blocks to device to reduce temporary memory usage
|
| 577 |
+
if self.blocks_to_swap:
|
| 578 |
+
save_double_blocks = self.double_blocks
|
| 579 |
+
save_single_blocks = self.single_blocks
|
| 580 |
+
self.double_blocks = nn.ModuleList()
|
| 581 |
+
self.single_blocks = nn.ModuleList()
|
| 582 |
+
|
| 583 |
+
self.to(device)
|
| 584 |
+
|
| 585 |
+
if self.blocks_to_swap:
|
| 586 |
+
self.double_blocks = save_double_blocks
|
| 587 |
+
self.single_blocks = save_single_blocks
|
| 588 |
+
|
| 589 |
+
def prepare_block_swap_before_forward(self):
|
| 590 |
+
if self.blocks_to_swap is None or self.blocks_to_swap == 0:
|
| 591 |
+
return
|
| 592 |
+
self.offloader_double.prepare_block_devices_before_forward(self.double_blocks)
|
| 593 |
+
self.offloader_single.prepare_block_devices_before_forward(self.single_blocks)
|
| 594 |
+
|
| 595 |
+
def forward(self, x: Tensor, x_ids: Tensor, timesteps: Tensor, ctx: Tensor, ctx_ids: Tensor, guidance: Tensor | None) -> Tensor:
|
| 596 |
+
num_txt_tokens = ctx.shape[1]
|
| 597 |
+
|
| 598 |
+
timestep_emb = timestep_embedding(timesteps, 256)
|
| 599 |
+
del timesteps
|
| 600 |
+
vec = self.time_in(timestep_emb)
|
| 601 |
+
del timestep_emb
|
| 602 |
+
if self.use_guidance_embed:
|
| 603 |
+
guidance_emb = timestep_embedding(guidance, 256)
|
| 604 |
+
vec = vec + self.guidance_in(guidance_emb)
|
| 605 |
+
del guidance_emb
|
| 606 |
+
|
| 607 |
+
double_block_mod_img = self.double_stream_modulation_img(vec)
|
| 608 |
+
double_block_mod_txt = self.double_stream_modulation_txt(vec)
|
| 609 |
+
single_block_mod, _ = self.single_stream_modulation(vec)
|
| 610 |
+
|
| 611 |
+
img = self.img_in(x)
|
| 612 |
+
del x
|
| 613 |
+
txt = self.txt_in(ctx)
|
| 614 |
+
del ctx
|
| 615 |
+
pe_x = self.pe_embedder(x_ids)
|
| 616 |
+
del x_ids
|
| 617 |
+
pe_ctx = self.pe_embedder(ctx_ids)
|
| 618 |
+
del ctx_ids
|
| 619 |
+
|
| 620 |
+
attn_params = AttentionParams.create_attention_params(self.attn_mode, self.split_attn) # No attention mask
|
| 621 |
+
|
| 622 |
+
for block_idx, block in enumerate(self.double_blocks):
|
| 623 |
+
if self.blocks_to_swap:
|
| 624 |
+
self.offloader_double.wait_for_block(block_idx)
|
| 625 |
+
|
| 626 |
+
img, txt = block(img, txt, pe_x, pe_ctx, double_block_mod_img, double_block_mod_txt, attn_params)
|
| 627 |
+
|
| 628 |
+
if self.blocks_to_swap:
|
| 629 |
+
self.offloader_double.submit_move_blocks_forward(self.double_blocks, block_idx)
|
| 630 |
+
|
| 631 |
+
del double_block_mod_img, double_block_mod_txt
|
| 632 |
+
|
| 633 |
+
img = torch.cat((txt, img), dim=1)
|
| 634 |
+
del txt
|
| 635 |
+
pe = torch.cat((pe_ctx, pe_x), dim=2)
|
| 636 |
+
del pe_ctx, pe_x
|
| 637 |
+
|
| 638 |
+
for block_idx, block in enumerate(self.single_blocks):
|
| 639 |
+
if self.blocks_to_swap:
|
| 640 |
+
self.offloader_single.wait_for_block(block_idx)
|
| 641 |
+
|
| 642 |
+
img = block(img, pe, single_block_mod, attn_params)
|
| 643 |
+
|
| 644 |
+
if self.blocks_to_swap:
|
| 645 |
+
self.offloader_single.submit_move_blocks_forward(self.single_blocks, block_idx)
|
| 646 |
+
|
| 647 |
+
del single_block_mod, pe
|
| 648 |
+
|
| 649 |
+
img = img.to(vec.device) # move to gpu if gradient checkpointing cpu offloading is used
|
| 650 |
+
|
| 651 |
+
img = img[:, num_txt_tokens:, ...]
|
| 652 |
+
|
| 653 |
+
img = self.final_layer(img, vec)
|
| 654 |
+
return img
|
| 655 |
+
|
| 656 |
+
|
| 657 |
+
class SelfAttention(nn.Module):
|
| 658 |
+
def __init__(self, dim: int, num_heads: int = 8):
|
| 659 |
+
super().__init__()
|
| 660 |
+
self.num_heads = num_heads
|
| 661 |
+
head_dim = dim // num_heads
|
| 662 |
+
self.qkv = nn.Linear(dim, dim * 3, bias=False)
|
| 663 |
+
|
| 664 |
+
self.norm = QKNorm(head_dim)
|
| 665 |
+
self.proj = nn.Linear(dim, dim, bias=False)
|
| 666 |
+
|
| 667 |
+
|
| 668 |
+
class SiLUActivation(nn.Module):
|
| 669 |
+
def __init__(self):
|
| 670 |
+
super().__init__()
|
| 671 |
+
self.gate_fn = nn.SiLU()
|
| 672 |
+
|
| 673 |
+
def forward(self, x: Tensor) -> Tensor:
|
| 674 |
+
x1, x2 = x.chunk(2, dim=-1)
|
| 675 |
+
return self.gate_fn(x1) * x2
|
| 676 |
+
|
| 677 |
+
|
| 678 |
+
class Modulation(nn.Module):
|
| 679 |
+
def __init__(self, dim: int, double: bool, disable_bias: bool = False):
|
| 680 |
+
super().__init__()
|
| 681 |
+
self.is_double = double
|
| 682 |
+
self.multiplier = 6 if double else 3
|
| 683 |
+
self.lin = nn.Linear(dim, self.multiplier * dim, bias=not disable_bias)
|
| 684 |
+
|
| 685 |
+
def forward(self, vec: torch.Tensor):
|
| 686 |
+
org_dtype = vec.dtype
|
| 687 |
+
vec = vec.to(torch.float32) # for numerical stability
|
| 688 |
+
out = self.lin(nn.functional.silu(vec))
|
| 689 |
+
if out.ndim == 2:
|
| 690 |
+
out = out[:, None, :]
|
| 691 |
+
out = out.to(org_dtype)
|
| 692 |
+
out = out.chunk(self.multiplier, dim=-1)
|
| 693 |
+
return out[:3], out[3:] if self.is_double else None
|
| 694 |
+
|
| 695 |
+
|
| 696 |
+
class LastLayer(nn.Module):
|
| 697 |
+
def __init__(self, hidden_size: int, out_channels: int):
|
| 698 |
+
super().__init__()
|
| 699 |
+
self.norm_final = nn.LayerNorm(hidden_size, elementwise_affine=False, eps=1e-6)
|
| 700 |
+
self.linear = nn.Linear(hidden_size, out_channels, bias=False)
|
| 701 |
+
self.adaLN_modulation = nn.Sequential(nn.SiLU(), nn.Linear(hidden_size, 2 * hidden_size, bias=False))
|
| 702 |
+
|
| 703 |
+
def forward(self, x: torch.Tensor, vec: torch.Tensor) -> torch.Tensor:
|
| 704 |
+
org_dtype = x.dtype
|
| 705 |
+
vec = vec.to(torch.float32) # for numerical stability
|
| 706 |
+
mod = self.adaLN_modulation(vec)
|
| 707 |
+
shift, scale = mod.chunk(2, dim=-1)
|
| 708 |
+
if shift.ndim == 2:
|
| 709 |
+
shift = shift[:, None, :]
|
| 710 |
+
scale = scale[:, None, :]
|
| 711 |
+
x = x.to(torch.float32) # for numerical stability
|
| 712 |
+
x = (1 + scale) * self.norm_final(x) + shift
|
| 713 |
+
x = self.linear(x)
|
| 714 |
+
return x.to(org_dtype)
|
| 715 |
+
|
| 716 |
+
|
| 717 |
+
class SingleStreamBlock(nn.Module):
|
| 718 |
+
def __init__(self, hidden_size: int, num_heads: int, mlp_ratio: float = 4.0):
|
| 719 |
+
super().__init__()
|
| 720 |
+
|
| 721 |
+
self.hidden_dim = hidden_size
|
| 722 |
+
self.num_heads = num_heads
|
| 723 |
+
head_dim = hidden_size // num_heads
|
| 724 |
+
self.scale = head_dim**-0.5
|
| 725 |
+
self.mlp_hidden_dim = int(hidden_size * mlp_ratio)
|
| 726 |
+
self.mlp_mult_factor = 2
|
| 727 |
+
|
| 728 |
+
self.linear1 = nn.Linear(hidden_size, hidden_size * 3 + self.mlp_hidden_dim * self.mlp_mult_factor, bias=False)
|
| 729 |
+
self.linear2 = nn.Linear(hidden_size + self.mlp_hidden_dim, hidden_size, bias=False)
|
| 730 |
+
|
| 731 |
+
self.norm = QKNorm(head_dim)
|
| 732 |
+
self.hidden_size = hidden_size
|
| 733 |
+
self.pre_norm = nn.LayerNorm(hidden_size, elementwise_affine=False, eps=1e-6)
|
| 734 |
+
self.mlp_act = SiLUActivation()
|
| 735 |
+
|
| 736 |
+
self.gradient_checkpointing = False
|
| 737 |
+
self.activation_cpu_offloading = False
|
| 738 |
+
|
| 739 |
+
def enable_gradient_checkpointing(self, activation_cpu_offloading: bool = False):
|
| 740 |
+
self.gradient_checkpointing = True
|
| 741 |
+
self.activation_cpu_offloading = activation_cpu_offloading
|
| 742 |
+
|
| 743 |
+
def disable_gradient_checkpointing(self):
|
| 744 |
+
self.gradient_checkpointing = False
|
| 745 |
+
self.activation_cpu_offloading = False
|
| 746 |
+
|
| 747 |
+
def _forward(self, x: Tensor, pe: Tensor, mod: tuple[Tensor, Tensor], attn_params: AttentionParams) -> Tensor:
|
| 748 |
+
mod_shift, mod_scale, mod_gate = mod
|
| 749 |
+
del mod
|
| 750 |
+
x_mod = (1 + mod_scale) * self.pre_norm(x) + mod_shift
|
| 751 |
+
del mod_scale, mod_shift
|
| 752 |
+
|
| 753 |
+
qkv, mlp = torch.split(self.linear1(x_mod), [3 * self.hidden_size, self.mlp_hidden_dim * self.mlp_mult_factor], dim=-1)
|
| 754 |
+
|
| 755 |
+
q, k, v = rearrange(qkv, "B L (K H D) -> K B H L D", K=3, H=self.num_heads)
|
| 756 |
+
del qkv
|
| 757 |
+
q, k = self.norm(q, k, v)
|
| 758 |
+
|
| 759 |
+
qkv_list = [q, k, v]
|
| 760 |
+
del q, k, v
|
| 761 |
+
attn = attention(qkv_list, pe, attn_params)
|
| 762 |
+
del qkv_list, pe
|
| 763 |
+
|
| 764 |
+
# compute activation in mlp stream, cat again and run second linear layer
|
| 765 |
+
output = self.linear2(torch.cat((attn, self.mlp_act(mlp)), 2))
|
| 766 |
+
return x + mod_gate * output
|
| 767 |
+
|
| 768 |
+
def forward(self, x: Tensor, pe: Tensor, mod: tuple[Tensor, Tensor], attn_params: AttentionParams) -> Tensor:
|
| 769 |
+
if self.training and self.gradient_checkpointing:
|
| 770 |
+
forward_fn = self._forward
|
| 771 |
+
if self.activation_cpu_offloading:
|
| 772 |
+
forward_fn = create_cpu_offloading_wrapper(forward_fn, self.linear1.weight.device)
|
| 773 |
+
return checkpoint(forward_fn, x, pe, mod, attn_params, use_reentrant=False)
|
| 774 |
+
else:
|
| 775 |
+
return self._forward(x, pe, mod, attn_params)
|
| 776 |
+
|
| 777 |
+
|
| 778 |
+
class DoubleStreamBlock(nn.Module):
|
| 779 |
+
def __init__(self, hidden_size: int, num_heads: int, mlp_ratio: float):
|
| 780 |
+
super().__init__()
|
| 781 |
+
mlp_hidden_dim = int(hidden_size * mlp_ratio)
|
| 782 |
+
self.num_heads = num_heads
|
| 783 |
+
assert hidden_size % num_heads == 0, f"{hidden_size=} must be divisible by {num_heads=}"
|
| 784 |
+
|
| 785 |
+
self.hidden_size = hidden_size
|
| 786 |
+
self.img_norm1 = nn.LayerNorm(hidden_size, elementwise_affine=False, eps=1e-6)
|
| 787 |
+
self.mlp_mult_factor = 2
|
| 788 |
+
|
| 789 |
+
self.img_attn = SelfAttention(dim=hidden_size, num_heads=num_heads)
|
| 790 |
+
self.img_norm2 = nn.LayerNorm(hidden_size, elementwise_affine=False, eps=1e-6)
|
| 791 |
+
self.img_mlp = nn.Sequential(
|
| 792 |
+
nn.Linear(hidden_size, mlp_hidden_dim * self.mlp_mult_factor, bias=False),
|
| 793 |
+
SiLUActivation(),
|
| 794 |
+
nn.Linear(mlp_hidden_dim, hidden_size, bias=False),
|
| 795 |
+
)
|
| 796 |
+
|
| 797 |
+
self.txt_norm1 = nn.LayerNorm(hidden_size, elementwise_affine=False, eps=1e-6)
|
| 798 |
+
self.txt_attn = SelfAttention(dim=hidden_size, num_heads=num_heads)
|
| 799 |
+
self.txt_norm2 = nn.LayerNorm(hidden_size, elementwise_affine=False, eps=1e-6)
|
| 800 |
+
self.txt_mlp = nn.Sequential(
|
| 801 |
+
nn.Linear(hidden_size, mlp_hidden_dim * self.mlp_mult_factor, bias=False),
|
| 802 |
+
SiLUActivation(),
|
| 803 |
+
nn.Linear(mlp_hidden_dim, hidden_size, bias=False),
|
| 804 |
+
)
|
| 805 |
+
self.gradient_checkpointing = False
|
| 806 |
+
self.activation_cpu_offloading = False
|
| 807 |
+
|
| 808 |
+
def enable_gradient_checkpointing(self, activation_cpu_offloading: bool = False):
|
| 809 |
+
self.gradient_checkpointing = True
|
| 810 |
+
self.activation_cpu_offloading = activation_cpu_offloading
|
| 811 |
+
|
| 812 |
+
def disable_gradient_checkpointing(self):
|
| 813 |
+
self.gradient_checkpointing = False
|
| 814 |
+
self.activation_cpu_offloading = False
|
| 815 |
+
|
| 816 |
+
def _forward(
|
| 817 |
+
self,
|
| 818 |
+
img: Tensor,
|
| 819 |
+
txt: Tensor,
|
| 820 |
+
pe: Tensor,
|
| 821 |
+
pe_ctx: Tensor,
|
| 822 |
+
mod_img: tuple[Tensor, Tensor],
|
| 823 |
+
mod_txt: tuple[Tensor, Tensor],
|
| 824 |
+
attn_params: AttentionParams,
|
| 825 |
+
) -> tuple[Tensor, Tensor]:
|
| 826 |
+
img_mod1, img_mod2 = mod_img
|
| 827 |
+
txt_mod1, txt_mod2 = mod_txt
|
| 828 |
+
del mod_img, mod_txt
|
| 829 |
+
|
| 830 |
+
img_mod1_shift, img_mod1_scale, img_mod1_gate = img_mod1
|
| 831 |
+
img_mod2_shift, img_mod2_scale, img_mod2_gate = img_mod2
|
| 832 |
+
txt_mod1_shift, txt_mod1_scale, txt_mod1_gate = txt_mod1
|
| 833 |
+
txt_mod2_shift, txt_mod2_scale, txt_mod2_gate = txt_mod2
|
| 834 |
+
del img_mod1, img_mod2, txt_mod1, txt_mod2
|
| 835 |
+
|
| 836 |
+
# prepare image for attention
|
| 837 |
+
img_modulated = self.img_norm1(img)
|
| 838 |
+
img_modulated = (1 + img_mod1_scale) * img_modulated + img_mod1_shift
|
| 839 |
+
del img_mod1_scale, img_mod1_shift
|
| 840 |
+
|
| 841 |
+
img_qkv = self.img_attn.qkv(img_modulated)
|
| 842 |
+
del img_modulated
|
| 843 |
+
img_q, img_k, img_v = rearrange(img_qkv, "B L (K H D) -> K B H L D", K=3, H=self.num_heads)
|
| 844 |
+
del img_qkv
|
| 845 |
+
img_q, img_k = self.img_attn.norm(img_q, img_k, img_v)
|
| 846 |
+
|
| 847 |
+
# prepare txt for attention
|
| 848 |
+
txt_modulated = self.txt_norm1(txt)
|
| 849 |
+
txt_modulated = (1 + txt_mod1_scale) * txt_modulated + txt_mod1_shift
|
| 850 |
+
del txt_mod1_scale, txt_mod1_shift
|
| 851 |
+
txt_qkv = self.txt_attn.qkv(txt_modulated)
|
| 852 |
+
del txt_modulated
|
| 853 |
+
txt_q, txt_k, txt_v = rearrange(txt_qkv, "B L (K H D) -> K B H L D", K=3, H=self.num_heads)
|
| 854 |
+
del txt_qkv
|
| 855 |
+
txt_q, txt_k = self.txt_attn.norm(txt_q, txt_k, txt_v)
|
| 856 |
+
|
| 857 |
+
txt_len = txt_q.shape[2]
|
| 858 |
+
q = torch.cat((txt_q, img_q), dim=2)
|
| 859 |
+
del txt_q, img_q
|
| 860 |
+
k = torch.cat((txt_k, img_k), dim=2)
|
| 861 |
+
del txt_k, img_k
|
| 862 |
+
v = torch.cat((txt_v, img_v), dim=2)
|
| 863 |
+
del txt_v, img_v
|
| 864 |
+
|
| 865 |
+
pe = torch.cat((pe_ctx, pe), dim=2)
|
| 866 |
+
del pe_ctx
|
| 867 |
+
qkv_list = [q, k, v]
|
| 868 |
+
del q, k, v
|
| 869 |
+
attn = attention(qkv_list, pe, attn_params)
|
| 870 |
+
del qkv_list, pe
|
| 871 |
+
txt_attn, img_attn = attn[:, :txt_len], attn[:, txt_len:]
|
| 872 |
+
del attn
|
| 873 |
+
|
| 874 |
+
# calculate the img blocks
|
| 875 |
+
img = img + img_mod1_gate * self.img_attn.proj(img_attn)
|
| 876 |
+
del img_mod1_gate, img_attn
|
| 877 |
+
img = img + img_mod2_gate * self.img_mlp((1 + img_mod2_scale) * (self.img_norm2(img)) + img_mod2_shift)
|
| 878 |
+
del img_mod2_gate, img_mod2_scale, img_mod2_shift
|
| 879 |
+
|
| 880 |
+
# calculate the txt blocks
|
| 881 |
+
txt = txt + txt_mod1_gate * self.txt_attn.proj(txt_attn)
|
| 882 |
+
del txt_mod1_gate, txt_attn
|
| 883 |
+
txt = txt + txt_mod2_gate * self.txt_mlp((1 + txt_mod2_scale) * (self.txt_norm2(txt)) + txt_mod2_shift)
|
| 884 |
+
del txt_mod2_gate, txt_mod2_scale, txt_mod2_shift
|
| 885 |
+
return img, txt
|
| 886 |
+
|
| 887 |
+
def forward(
|
| 888 |
+
self,
|
| 889 |
+
img: Tensor,
|
| 890 |
+
txt: Tensor,
|
| 891 |
+
pe: Tensor,
|
| 892 |
+
pe_ctx: Tensor,
|
| 893 |
+
mod_img: tuple[Tensor, Tensor],
|
| 894 |
+
mod_txt: tuple[Tensor, Tensor],
|
| 895 |
+
attn_params: AttentionParams,
|
| 896 |
+
) -> tuple[Tensor, Tensor]:
|
| 897 |
+
if self.training and self.gradient_checkpointing:
|
| 898 |
+
forward_fn = self._forward
|
| 899 |
+
if self.activation_cpu_offloading:
|
| 900 |
+
forward_fn = create_cpu_offloading_wrapper(forward_fn, self.img_mlp[0].weight.device)
|
| 901 |
+
return checkpoint(forward_fn, img, txt, pe, pe_ctx, mod_img, mod_txt, attn_params, use_reentrant=False)
|
| 902 |
+
else:
|
| 903 |
+
return self._forward(img, txt, pe, pe_ctx, mod_img, mod_txt, attn_params)
|
| 904 |
+
|
| 905 |
+
|
| 906 |
+
class MLPEmbedder(nn.Module):
|
| 907 |
+
def __init__(self, in_dim: int, hidden_dim: int, disable_bias: bool = False):
|
| 908 |
+
super().__init__()
|
| 909 |
+
self.in_layer = nn.Linear(in_dim, hidden_dim, bias=not disable_bias)
|
| 910 |
+
self.silu = nn.SiLU()
|
| 911 |
+
self.out_layer = nn.Linear(hidden_dim, hidden_dim, bias=not disable_bias)
|
| 912 |
+
self.gradient_checkpointing = False
|
| 913 |
+
|
| 914 |
+
def enable_gradient_checkpointing(self):
|
| 915 |
+
self.gradient_checkpointing = True
|
| 916 |
+
|
| 917 |
+
def disable_gradient_checkpointing(self):
|
| 918 |
+
self.gradient_checkpointing = False
|
| 919 |
+
|
| 920 |
+
def _forward(self, x: Tensor) -> Tensor:
|
| 921 |
+
return self.out_layer(self.silu(self.in_layer(x)))
|
| 922 |
+
|
| 923 |
+
def forward(self, *args, **kwargs):
|
| 924 |
+
if self.training and self.gradient_checkpointing:
|
| 925 |
+
return checkpoint(self._forward, *args, use_reentrant=False, **kwargs)
|
| 926 |
+
else:
|
| 927 |
+
return self._forward(*args, **kwargs)
|
| 928 |
+
|
| 929 |
+
|
| 930 |
+
class EmbedND(nn.Module):
|
| 931 |
+
def __init__(self, dim: int, theta: int, axes_dim: list[int]):
|
| 932 |
+
super().__init__()
|
| 933 |
+
self.dim = dim
|
| 934 |
+
self.theta = theta
|
| 935 |
+
self.axes_dim = axes_dim
|
| 936 |
+
|
| 937 |
+
def forward(self, ids: Tensor) -> Tensor:
|
| 938 |
+
emb = torch.cat([rope(ids[..., i], self.axes_dim[i], self.theta) for i in range(len(self.axes_dim))], dim=-3)
|
| 939 |
+
return emb.unsqueeze(1)
|
| 940 |
+
|
| 941 |
+
|
| 942 |
+
def timestep_embedding(t: Tensor, dim, max_period=10000, time_factor: float = 1000.0):
|
| 943 |
+
"""
|
| 944 |
+
Create sinusoidal timestep embeddings.
|
| 945 |
+
:param t: a 1-D Tensor of N indices, one per batch element.
|
| 946 |
+
These may be fractional.
|
| 947 |
+
:param dim: the dimension of the output.
|
| 948 |
+
:param max_period: controls the minimum frequency of the embeddings.
|
| 949 |
+
:return: an (N, D) Tensor of positional embeddings.
|
| 950 |
+
"""
|
| 951 |
+
t = time_factor * t
|
| 952 |
+
half = dim // 2
|
| 953 |
+
freqs = torch.exp(-math.log(max_period) * torch.arange(start=0, end=half, device=t.device, dtype=torch.float32) / half)
|
| 954 |
+
|
| 955 |
+
args = t[:, None].float() * freqs[None]
|
| 956 |
+
embedding = torch.cat([torch.cos(args), torch.sin(args)], dim=-1)
|
| 957 |
+
if dim % 2:
|
| 958 |
+
embedding = torch.cat([embedding, torch.zeros_like(embedding[:, :1])], dim=-1)
|
| 959 |
+
if torch.is_floating_point(t):
|
| 960 |
+
embedding = embedding.to(t)
|
| 961 |
+
return embedding
|
| 962 |
+
|
| 963 |
+
|
| 964 |
+
class RMSNorm(torch.nn.Module):
|
| 965 |
+
def __init__(self, dim: int):
|
| 966 |
+
super().__init__()
|
| 967 |
+
self.scale = nn.Parameter(torch.ones(dim))
|
| 968 |
+
|
| 969 |
+
def forward(self, x: Tensor):
|
| 970 |
+
x_dtype = x.dtype
|
| 971 |
+
x = x.float()
|
| 972 |
+
rrms = torch.rsqrt(torch.mean(x**2, dim=-1, keepdim=True) + 1e-6)
|
| 973 |
+
return (x * rrms).to(dtype=x_dtype) * self.scale
|
| 974 |
+
|
| 975 |
+
|
| 976 |
+
class QKNorm(torch.nn.Module):
|
| 977 |
+
def __init__(self, dim: int):
|
| 978 |
+
super().__init__()
|
| 979 |
+
self.query_norm = RMSNorm(dim)
|
| 980 |
+
self.key_norm = RMSNorm(dim)
|
| 981 |
+
|
| 982 |
+
def forward(self, q: Tensor, k: Tensor, v: Tensor) -> tuple[Tensor, Tensor]:
|
| 983 |
+
q = self.query_norm(q)
|
| 984 |
+
k = self.key_norm(k)
|
| 985 |
+
return q.to(v), k.to(v)
|
| 986 |
+
|
| 987 |
+
|
| 988 |
+
def attention(qkv_list: list[Tensor], pe: Tensor, attn_params: AttentionParams) -> Tensor:
|
| 989 |
+
q, k, v = qkv_list
|
| 990 |
+
del qkv_list
|
| 991 |
+
q, k = apply_rope(q, k, pe)
|
| 992 |
+
|
| 993 |
+
q = q.transpose(1, 2) # B, H, L, D -> B, L, H, D
|
| 994 |
+
k = k.transpose(1, 2) # B, H, L, D -> B, L, H, D
|
| 995 |
+
v = v.transpose(1, 2) # B, H, L, D -> B, L, H, D
|
| 996 |
+
qkv_list = [q, k, v]
|
| 997 |
+
del q, k, v
|
| 998 |
+
x = unified_attention(qkv_list, attn_params=attn_params)
|
| 999 |
+
return x
|
| 1000 |
+
|
| 1001 |
+
|
| 1002 |
+
def rope(pos: Tensor, dim: int, theta: int) -> Tensor:
|
| 1003 |
+
assert dim % 2 == 0
|
| 1004 |
+
scale = torch.arange(0, dim, 2, dtype=torch.float64, device=pos.device) / dim
|
| 1005 |
+
omega = 1.0 / (theta**scale)
|
| 1006 |
+
out = torch.einsum("...n,d->...nd", pos, omega)
|
| 1007 |
+
out = torch.stack([torch.cos(out), -torch.sin(out), torch.sin(out), torch.cos(out)], dim=-1)
|
| 1008 |
+
out = rearrange(out, "b n d (i j) -> b n d i j", i=2, j=2)
|
| 1009 |
+
return out.float()
|
| 1010 |
+
|
| 1011 |
+
|
| 1012 |
+
def apply_rope(xq: Tensor, xk: Tensor, freqs_cis: Tensor) -> tuple[Tensor, Tensor]:
|
| 1013 |
+
xq_ = xq.float().reshape(*xq.shape[:-1], -1, 1, 2)
|
| 1014 |
+
xk_ = xk.float().reshape(*xk.shape[:-1], -1, 1, 2)
|
| 1015 |
+
xq_out = freqs_cis[..., 0] * xq_[..., 0] + freqs_cis[..., 1] * xq_[..., 1]
|
| 1016 |
+
xk_out = freqs_cis[..., 0] * xk_[..., 0] + freqs_cis[..., 1] * xk_[..., 1]
|
| 1017 |
+
return xq_out.reshape(*xq.shape).type_as(xq), xk_out.reshape(*xk.shape).type_as(xk)
|
| 1018 |
+
|
| 1019 |
+
|
| 1020 |
+
# endregion
|
src/musubi_tuner/flux_2/flux2_utils.py
ADDED
|
@@ -0,0 +1,816 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import argparse
|
| 2 |
+
import json
|
| 3 |
+
import numpy as np
|
| 4 |
+
import torch
|
| 5 |
+
import math
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
from dataclasses import dataclass
|
| 9 |
+
from accelerate import init_empty_weights
|
| 10 |
+
from einops import rearrange
|
| 11 |
+
from PIL import Image
|
| 12 |
+
from typing import Optional, Tuple, Union
|
| 13 |
+
from torch import Tensor
|
| 14 |
+
from torch import nn
|
| 15 |
+
from transformers import (
|
| 16 |
+
Mistral3ForConditionalGeneration,
|
| 17 |
+
Mistral3Config,
|
| 18 |
+
AutoProcessor,
|
| 19 |
+
Qwen2Tokenizer,
|
| 20 |
+
Qwen3ForCausalLM,
|
| 21 |
+
)
|
| 22 |
+
from tqdm import tqdm
|
| 23 |
+
|
| 24 |
+
from musubi_tuner.dataset.image_video_dataset import (
|
| 25 |
+
ARCHITECTURE_FLUX_2_DEV,
|
| 26 |
+
ARCHITECTURE_FLUX_2_DEV_FULL,
|
| 27 |
+
ARCHITECTURE_FLUX_2_KLEIN_4B,
|
| 28 |
+
ARCHITECTURE_FLUX_2_KLEIN_4B_FULL,
|
| 29 |
+
ARCHITECTURE_FLUX_2_KLEIN_9B,
|
| 30 |
+
ARCHITECTURE_FLUX_2_KLEIN_9B_FULL,
|
| 31 |
+
BucketSelector,
|
| 32 |
+
)
|
| 33 |
+
from musubi_tuner.modules.fp8_optimization_utils import apply_fp8_monkey_patch
|
| 34 |
+
from musubi_tuner.utils import image_utils
|
| 35 |
+
from musubi_tuner.utils.lora_utils import load_safetensors_with_lora_and_fp8
|
| 36 |
+
from musubi_tuner.zimage.zimage_utils import load_qwen3
|
| 37 |
+
|
| 38 |
+
from .flux2_models import Flux2, Flux2Params, Klein4BParams, Klein9BParams
|
| 39 |
+
|
| 40 |
+
from musubi_tuner.flux_2 import flux2_models
|
| 41 |
+
from musubi_tuner.utils.safetensors_utils import load_split_weights
|
| 42 |
+
|
| 43 |
+
import logging
|
| 44 |
+
|
| 45 |
+
logger = logging.getLogger(__name__)
|
| 46 |
+
logging.basicConfig(level=logging.INFO)
|
| 47 |
+
|
| 48 |
+
M3_TOKENIZER_ID = "mistralai/Mistral-Small-3.1-24B-Instruct-2503"
|
| 49 |
+
OUTPUT_LAYERS_MISTRAL = [10, 20, 30]
|
| 50 |
+
OUTPUT_LAYERS_QWEN3 = [9, 18, 27]
|
| 51 |
+
MAX_LENGTH = 512
|
| 52 |
+
UPSAMPLING_MAX_IMAGE_SIZE = 768**2
|
| 53 |
+
SYSTEM_MESSAGE = """You are an AI that reasons about image descriptions. You give structured responses focusing on object relationships, object
|
| 54 |
+
attribution and actions without speculation."""
|
| 55 |
+
|
| 56 |
+
|
| 57 |
+
@dataclass(frozen=True)
|
| 58 |
+
class Flux2ModelInfo:
|
| 59 |
+
params: Flux2Params
|
| 60 |
+
defaults: dict[str, float | int]
|
| 61 |
+
fixed_params: set[str]
|
| 62 |
+
guidance_distilled: bool
|
| 63 |
+
architecture: str
|
| 64 |
+
architecture_full: str
|
| 65 |
+
qwen_variant: Optional[str] = None # None for Mistral
|
| 66 |
+
|
| 67 |
+
|
| 68 |
+
FLUX2_MODEL_INFO = {
|
| 69 |
+
"klein-4b": Flux2ModelInfo(
|
| 70 |
+
params=Klein4BParams(),
|
| 71 |
+
qwen_variant="4B",
|
| 72 |
+
defaults={"guidance": 1.0, "num_steps": 4},
|
| 73 |
+
fixed_params={"guidance", "num_steps"},
|
| 74 |
+
guidance_distilled=True,
|
| 75 |
+
architecture=ARCHITECTURE_FLUX_2_KLEIN_4B,
|
| 76 |
+
architecture_full=ARCHITECTURE_FLUX_2_KLEIN_4B_FULL,
|
| 77 |
+
),
|
| 78 |
+
"klein-base-4b": Flux2ModelInfo(
|
| 79 |
+
params=Klein4BParams(),
|
| 80 |
+
qwen_variant="4B",
|
| 81 |
+
defaults={"guidance": 4.0, "num_steps": 50},
|
| 82 |
+
fixed_params=set(),
|
| 83 |
+
guidance_distilled=False,
|
| 84 |
+
architecture=ARCHITECTURE_FLUX_2_KLEIN_4B,
|
| 85 |
+
architecture_full=ARCHITECTURE_FLUX_2_KLEIN_4B_FULL,
|
| 86 |
+
),
|
| 87 |
+
"klein-9b": Flux2ModelInfo(
|
| 88 |
+
params=Klein9BParams(),
|
| 89 |
+
qwen_variant="8B",
|
| 90 |
+
defaults={"guidance": 1.0, "num_steps": 4},
|
| 91 |
+
fixed_params={"guidance", "num_steps"},
|
| 92 |
+
guidance_distilled=True,
|
| 93 |
+
architecture=ARCHITECTURE_FLUX_2_KLEIN_9B,
|
| 94 |
+
architecture_full=ARCHITECTURE_FLUX_2_KLEIN_9B_FULL,
|
| 95 |
+
),
|
| 96 |
+
"klein-base-9b": Flux2ModelInfo(
|
| 97 |
+
params=Klein9BParams(),
|
| 98 |
+
qwen_variant="8B",
|
| 99 |
+
defaults={"guidance": 4.0, "num_steps": 50},
|
| 100 |
+
fixed_params=set(),
|
| 101 |
+
guidance_distilled=False,
|
| 102 |
+
architecture=ARCHITECTURE_FLUX_2_KLEIN_9B,
|
| 103 |
+
architecture_full=ARCHITECTURE_FLUX_2_KLEIN_9B_FULL,
|
| 104 |
+
),
|
| 105 |
+
"dev": Flux2ModelInfo(
|
| 106 |
+
params=Flux2Params(),
|
| 107 |
+
defaults={"guidance": 4.0, "num_steps": 50},
|
| 108 |
+
fixed_params=set(),
|
| 109 |
+
guidance_distilled=True,
|
| 110 |
+
architecture=ARCHITECTURE_FLUX_2_DEV,
|
| 111 |
+
architecture_full=ARCHITECTURE_FLUX_2_DEV_FULL,
|
| 112 |
+
),
|
| 113 |
+
}
|
| 114 |
+
|
| 115 |
+
|
| 116 |
+
def add_model_version_args(parser: argparse.ArgumentParser):
|
| 117 |
+
choices = list(FLUX2_MODEL_INFO.keys())
|
| 118 |
+
parser.add_argument("--model_version", type=str, default="dev", choices=choices, help="model version")
|
| 119 |
+
|
| 120 |
+
|
| 121 |
+
def is_fp8(dt):
|
| 122 |
+
return dt in [torch.float8_e4m3fn, torch.float8_e4m3fnuz, torch.float8_e5m2, torch.float8_e5m2fnuz]
|
| 123 |
+
|
| 124 |
+
|
| 125 |
+
def compress_time(t_ids: Tensor) -> Tensor:
|
| 126 |
+
assert t_ids.ndim == 1
|
| 127 |
+
t_ids_max = torch.max(t_ids)
|
| 128 |
+
t_remap = torch.zeros((t_ids_max + 1,), device=t_ids.device, dtype=t_ids.dtype)
|
| 129 |
+
t_unique_sorted_ids = torch.unique(t_ids, sorted=True)
|
| 130 |
+
t_remap[t_unique_sorted_ids] = torch.arange(len(t_unique_sorted_ids), device=t_ids.device, dtype=t_ids.dtype)
|
| 131 |
+
t_ids_compressed = t_remap[t_ids]
|
| 132 |
+
return t_ids_compressed
|
| 133 |
+
|
| 134 |
+
|
| 135 |
+
def scatter_ids(x: Tensor, x_ids: Tensor) -> list[Tensor]:
|
| 136 |
+
"""
|
| 137 |
+
using position ids to scatter tokens into place
|
| 138 |
+
"""
|
| 139 |
+
x_list = []
|
| 140 |
+
t_coords = []
|
| 141 |
+
for data, pos in zip(x, x_ids):
|
| 142 |
+
_, ch = data.shape # noqa: F841
|
| 143 |
+
t_ids = pos[:, 0].to(torch.int64)
|
| 144 |
+
h_ids = pos[:, 1].to(torch.int64)
|
| 145 |
+
w_ids = pos[:, 2].to(torch.int64)
|
| 146 |
+
|
| 147 |
+
t_ids_cmpr = compress_time(t_ids)
|
| 148 |
+
|
| 149 |
+
t = torch.max(t_ids_cmpr) + 1
|
| 150 |
+
h = torch.max(h_ids) + 1
|
| 151 |
+
w = torch.max(w_ids) + 1
|
| 152 |
+
|
| 153 |
+
flat_ids = t_ids_cmpr * w * h + h_ids * w + w_ids
|
| 154 |
+
|
| 155 |
+
out = torch.zeros((t * h * w, ch), device=data.device, dtype=data.dtype)
|
| 156 |
+
out.scatter_(0, flat_ids.unsqueeze(1).expand(-1, ch), data)
|
| 157 |
+
|
| 158 |
+
x_list.append(rearrange(out, "(t h w) c -> 1 c t h w", t=t, h=h, w=w))
|
| 159 |
+
t_coords.append(torch.unique(t_ids, sorted=True))
|
| 160 |
+
return x_list
|
| 161 |
+
|
| 162 |
+
|
| 163 |
+
def pack_control_latent(control_latent: list[Tensor] | None) -> tuple[Optional[Tensor], Optional[Tensor]]:
|
| 164 |
+
if control_latent is None:
|
| 165 |
+
return None, None
|
| 166 |
+
|
| 167 |
+
scale = 10
|
| 168 |
+
ndim = control_latent[0].ndim # 3 or 4
|
| 169 |
+
|
| 170 |
+
# Create time offsets for each reference
|
| 171 |
+
t_off = [scale + scale * t for t in torch.arange(0, len(control_latent))]
|
| 172 |
+
t_off = [t.view(-1) for t in t_off]
|
| 173 |
+
|
| 174 |
+
# Process with position IDs
|
| 175 |
+
ref_tokens, ref_ids = listed_prc_img(control_latent, t_coord=t_off) # list[(HW, C)], list[(HW, 4)]
|
| 176 |
+
|
| 177 |
+
# Concatenate all references along sequence dimension
|
| 178 |
+
cat_dimension = 0 if ndim == 3 else 1
|
| 179 |
+
ref_tokens = torch.cat(ref_tokens, dim=cat_dimension) # (total_ref_tokens, C) or (B, total_ref_tokens, C)
|
| 180 |
+
ref_ids = torch.cat(ref_ids, dim=cat_dimension) # (total_ref_tokens, 4) or (B, total_ref_tokens, 4)
|
| 181 |
+
|
| 182 |
+
# Add batch dimension
|
| 183 |
+
if ndim == 3:
|
| 184 |
+
ref_tokens = ref_tokens.unsqueeze(0) # (1, total_ref_tokens, C)
|
| 185 |
+
ref_ids = ref_ids.unsqueeze(0) # (1, total_ref_tokens, 4)
|
| 186 |
+
return ref_tokens, ref_ids
|
| 187 |
+
|
| 188 |
+
|
| 189 |
+
def prc_txt(x: Tensor, t_coord: Tensor | None = None) -> tuple[Tensor, Tensor]:
|
| 190 |
+
_l = x.shape[-2]
|
| 191 |
+
|
| 192 |
+
coords = {
|
| 193 |
+
"t": torch.arange(1) if t_coord is None else t_coord,
|
| 194 |
+
"h": torch.arange(1), # dummy dimension
|
| 195 |
+
"w": torch.arange(1), # dummy dimension
|
| 196 |
+
"l": torch.arange(_l),
|
| 197 |
+
}
|
| 198 |
+
x_ids = torch.cartesian_prod(coords["t"], coords["h"], coords["w"], coords["l"])
|
| 199 |
+
if x.ndim == 3:
|
| 200 |
+
x_ids = x_ids.unsqueeze(0).expand(x.shape[0], -1, -1) # (B, L, 4)
|
| 201 |
+
return x, x_ids.to(x.device)
|
| 202 |
+
|
| 203 |
+
|
| 204 |
+
# This function doesn't work properly, because t_coord is per-sample, but we need per-subsequence within sample
|
| 205 |
+
# kept for reference
|
| 206 |
+
# def batched_wrapper(fn):
|
| 207 |
+
# def batched_prc(x: Tensor, t_coord: Tensor | None = None) -> tuple[Tensor, Tensor]:
|
| 208 |
+
# results = []
|
| 209 |
+
# for i in range(len(x)):
|
| 210 |
+
# results.append(fn(x[i], t_coord[i] if t_coord is not None else None))
|
| 211 |
+
# x, x_ids = zip(*results)
|
| 212 |
+
# return torch.stack(x), torch.stack(x_ids)
|
| 213 |
+
# return batched_prc
|
| 214 |
+
|
| 215 |
+
|
| 216 |
+
def listed_wrapper(fn):
|
| 217 |
+
def listed_prc(x: list[Tensor], t_coord: list[Tensor] | None = None) -> tuple[list[Tensor], list[Tensor]]:
|
| 218 |
+
results = []
|
| 219 |
+
for i in range(len(x)):
|
| 220 |
+
results.append(fn(x[i], t_coord[i] if t_coord is not None else None))
|
| 221 |
+
x, x_ids = zip(*results)
|
| 222 |
+
return list(x), list(x_ids)
|
| 223 |
+
|
| 224 |
+
return listed_prc
|
| 225 |
+
|
| 226 |
+
|
| 227 |
+
def prc_img(x: Tensor, t_coord: Tensor | None = None) -> tuple[Tensor, Tensor]:
|
| 228 |
+
# x: (C, H, W) or (B, C, H, W)
|
| 229 |
+
h = x.shape[-2]
|
| 230 |
+
w = x.shape[-1]
|
| 231 |
+
x_coords = {
|
| 232 |
+
"t": torch.arange(1) if t_coord is None else t_coord,
|
| 233 |
+
"h": torch.arange(h),
|
| 234 |
+
"w": torch.arange(w),
|
| 235 |
+
"l": torch.arange(1),
|
| 236 |
+
}
|
| 237 |
+
x_ids = torch.cartesian_prod(x_coords["t"], x_coords["h"], x_coords["w"], x_coords["l"])
|
| 238 |
+
x = rearrange(x, "c h w -> (h w) c") if x.ndim == 3 else rearrange(x, "b c h w -> b (h w) c")
|
| 239 |
+
if x.ndim == 3: # after rearrange
|
| 240 |
+
x_ids = x_ids.unsqueeze(0).expand(x.shape[0], -1, -1) # (B, HW, 4)
|
| 241 |
+
return x, x_ids.to(x.device)
|
| 242 |
+
|
| 243 |
+
|
| 244 |
+
listed_prc_img = listed_wrapper(prc_img)
|
| 245 |
+
# batched_prc_img = batched_wrapper(prc_img)
|
| 246 |
+
# batched_prc_txt = batched_wrapper(prc_txt)
|
| 247 |
+
|
| 248 |
+
|
| 249 |
+
# This function is used from Mistral3Embedder
|
| 250 |
+
def cap_pixels(img: Image.Image | list[Image.Image], k):
|
| 251 |
+
if isinstance(img, list):
|
| 252 |
+
return [cap_pixels(_img, k) for _img in img]
|
| 253 |
+
w, h = img.size
|
| 254 |
+
pixel_count = w * h
|
| 255 |
+
|
| 256 |
+
if pixel_count <= k:
|
| 257 |
+
return img
|
| 258 |
+
|
| 259 |
+
# Scaling factor to reduce total pixels below K
|
| 260 |
+
scale = math.sqrt(k / pixel_count)
|
| 261 |
+
new_w = int(w * scale)
|
| 262 |
+
new_h = int(h * scale)
|
| 263 |
+
|
| 264 |
+
return img.resize((new_w, new_h), Image.Resampling.LANCZOS)
|
| 265 |
+
|
| 266 |
+
|
| 267 |
+
def preprocess_control_image(
|
| 268 |
+
control_image_path: str, limit_size: Optional[Tuple[int, int]] = None
|
| 269 |
+
) -> tuple[torch.Tensor, np.ndarray, Optional[np.ndarray]]:
|
| 270 |
+
"""
|
| 271 |
+
Preprocess the control image for the model. See `preprocess_image` for details.
|
| 272 |
+
|
| 273 |
+
Args:
|
| 274 |
+
control_image_path (str): Path to the control image.
|
| 275 |
+
limit_size (Optional[Tuple[int, int]]): Limit the size for resizing with (width, height).
|
| 276 |
+
If None or larger than the control image size, only resizing to the nearest bucket size and cropping is performed.
|
| 277 |
+
|
| 278 |
+
Returns:
|
| 279 |
+
Tuple[torch.Tensor, np.ndarray, Optional[np.ndarray]]: A tuple containing:
|
| 280 |
+
- control_image_tensor (torch.Tensor): The preprocessed control image tensor for the model. NCHW format.
|
| 281 |
+
- control_image_np (np.ndarray): The preprocessed control image as a NumPy array for conditioning. HWC format.
|
| 282 |
+
- None: Placeholder for compatibility (no additional data returned).
|
| 283 |
+
"""
|
| 284 |
+
control_image = Image.open(control_image_path)
|
| 285 |
+
|
| 286 |
+
if limit_size is None or limit_size[0] * limit_size[1] >= control_image.size[0] * control_image.size[1]: # No resizing needed
|
| 287 |
+
# All FLUX 2 architectures require dimensions to be multiples of 16
|
| 288 |
+
resize_size = BucketSelector.calculate_bucket_resolution(
|
| 289 |
+
control_image.size, control_image.size, architecture=ARCHITECTURE_FLUX_2_DEV
|
| 290 |
+
)
|
| 291 |
+
else:
|
| 292 |
+
resize_size = limit_size
|
| 293 |
+
|
| 294 |
+
control_image_tensor, control_image_np, _ = image_utils.preprocess_image(control_image, *resize_size, handle_alpha=False)
|
| 295 |
+
return control_image_tensor, control_image_np, None
|
| 296 |
+
|
| 297 |
+
|
| 298 |
+
def generalized_time_snr_shift(t: Tensor, mu: float, sigma: float) -> Tensor:
|
| 299 |
+
return math.exp(mu) / (math.exp(mu) + (1 / t - 1) ** sigma)
|
| 300 |
+
|
| 301 |
+
|
| 302 |
+
def get_schedule(num_steps: int, image_seq_len: int, flow_shift: Optional[float] = None) -> list[float]:
|
| 303 |
+
mu = compute_empirical_mu(image_seq_len, num_steps)
|
| 304 |
+
timesteps = torch.linspace(1, 0, num_steps + 1)
|
| 305 |
+
if flow_shift is not None:
|
| 306 |
+
timesteps = (timesteps * flow_shift) / (1 + (flow_shift - 1) * timesteps)
|
| 307 |
+
else:
|
| 308 |
+
timesteps = generalized_time_snr_shift(timesteps, mu, 1.0)
|
| 309 |
+
return timesteps.tolist()
|
| 310 |
+
|
| 311 |
+
|
| 312 |
+
def compute_empirical_mu(image_seq_len: int, num_steps: int) -> float:
|
| 313 |
+
a1, b1 = 8.73809524e-05, 1.89833333
|
| 314 |
+
a2, b2 = 0.00016927, 0.45666666
|
| 315 |
+
|
| 316 |
+
if image_seq_len > 4300:
|
| 317 |
+
mu = a2 * image_seq_len + b2
|
| 318 |
+
return float(mu)
|
| 319 |
+
|
| 320 |
+
m_200 = a2 * image_seq_len + b2
|
| 321 |
+
m_10 = a1 * image_seq_len + b1
|
| 322 |
+
|
| 323 |
+
a = (m_200 - m_10) / 190.0
|
| 324 |
+
b = m_200 - 200.0 * a
|
| 325 |
+
mu = a * num_steps + b
|
| 326 |
+
|
| 327 |
+
return float(mu)
|
| 328 |
+
|
| 329 |
+
|
| 330 |
+
def denoise(
|
| 331 |
+
model: Flux2,
|
| 332 |
+
# model input
|
| 333 |
+
img: Tensor,
|
| 334 |
+
img_ids: Tensor,
|
| 335 |
+
txt: Tensor,
|
| 336 |
+
txt_ids: Tensor,
|
| 337 |
+
# sampling parameters
|
| 338 |
+
timesteps: list[float],
|
| 339 |
+
guidance: float,
|
| 340 |
+
# extra img tokens (sequence-wise)
|
| 341 |
+
img_cond_seq: Tensor | None = None,
|
| 342 |
+
img_cond_seq_ids: Tensor | None = None,
|
| 343 |
+
):
|
| 344 |
+
guidance_vec = torch.full((img.shape[0],), guidance, device=img.device, dtype=img.dtype)
|
| 345 |
+
for t_curr, t_prev in zip(tqdm(timesteps[:-1]), timesteps[1:]):
|
| 346 |
+
t_vec = torch.full((img.shape[0],), t_curr, dtype=img.dtype, device=img.device)
|
| 347 |
+
img_input = img
|
| 348 |
+
img_input_ids = img_ids
|
| 349 |
+
if img_cond_seq is not None:
|
| 350 |
+
assert img_cond_seq_ids is not None, "You need to provide either both or neither of the sequence conditioning"
|
| 351 |
+
img_input = torch.cat((img_input, img_cond_seq), dim=1)
|
| 352 |
+
img_input_ids = torch.cat((img_input_ids, img_cond_seq_ids), dim=1)
|
| 353 |
+
|
| 354 |
+
with torch.no_grad(), torch.autocast(device_type=img.device.type, dtype=img.dtype):
|
| 355 |
+
pred = model(x=img_input, x_ids=img_input_ids, timesteps=t_vec, ctx=txt, ctx_ids=txt_ids, guidance=guidance_vec)
|
| 356 |
+
|
| 357 |
+
if img_input_ids is not None:
|
| 358 |
+
pred = pred[:, : img.shape[1]]
|
| 359 |
+
|
| 360 |
+
img = img + (t_prev - t_curr) * pred
|
| 361 |
+
|
| 362 |
+
return img
|
| 363 |
+
|
| 364 |
+
|
| 365 |
+
def vanilla_guidance(x: torch.Tensor, cfg_val: float) -> torch.Tensor:
|
| 366 |
+
x_u, x_c = x.chunk(2)
|
| 367 |
+
x = x_u + cfg_val * (x_c - x_u)
|
| 368 |
+
return x
|
| 369 |
+
|
| 370 |
+
|
| 371 |
+
def denoise_cfg(
|
| 372 |
+
model: Flux2,
|
| 373 |
+
img: Tensor,
|
| 374 |
+
img_ids: Tensor,
|
| 375 |
+
txt: Tensor,
|
| 376 |
+
txt_ids: Tensor,
|
| 377 |
+
uncond_txt: Tensor,
|
| 378 |
+
uncond_txt_ids: Tensor,
|
| 379 |
+
timesteps: list[float],
|
| 380 |
+
guidance: float,
|
| 381 |
+
img_cond_seq: Tensor | None = None,
|
| 382 |
+
img_cond_seq_ids: Tensor | None = None,
|
| 383 |
+
):
|
| 384 |
+
for t_curr, t_prev in zip(tqdm(timesteps[:-1]), timesteps[1:]):
|
| 385 |
+
t_vec = torch.full((img.shape[0],), t_curr, dtype=img.dtype, device=img.device)
|
| 386 |
+
|
| 387 |
+
img_input = img
|
| 388 |
+
img_input_ids = img_ids
|
| 389 |
+
if img_cond_seq is not None:
|
| 390 |
+
img_input = torch.cat((img_input, img_cond_seq), dim=1)
|
| 391 |
+
img_input_ids = torch.cat((img_input_ids, img_cond_seq_ids), dim=1)
|
| 392 |
+
|
| 393 |
+
with torch.no_grad(), torch.autocast(device_type=img.device.type, dtype=img.dtype):
|
| 394 |
+
pred_cond = model(x=img_input, x_ids=img_input_ids, timesteps=t_vec, ctx=txt, ctx_ids=txt_ids, guidance=None)
|
| 395 |
+
pred_uncond = model(
|
| 396 |
+
x=img_input, x_ids=img_input_ids, timesteps=t_vec, ctx=uncond_txt, ctx_ids=uncond_txt_ids, guidance=None
|
| 397 |
+
)
|
| 398 |
+
|
| 399 |
+
if img_cond_seq is not None:
|
| 400 |
+
pred_cond = pred_cond[:, : img.shape[1]]
|
| 401 |
+
pred_uncond = pred_uncond[:, : img.shape[1]]
|
| 402 |
+
|
| 403 |
+
pred = pred_uncond + guidance * (pred_cond - pred_uncond)
|
| 404 |
+
img = img + (t_prev - t_curr) * pred
|
| 405 |
+
|
| 406 |
+
return img
|
| 407 |
+
|
| 408 |
+
|
| 409 |
+
def concatenate_images(
|
| 410 |
+
images: list[Image.Image],
|
| 411 |
+
) -> Image.Image:
|
| 412 |
+
"""
|
| 413 |
+
Concatenate a list of PIL images horizontally with center alignment and white background.
|
| 414 |
+
"""
|
| 415 |
+
|
| 416 |
+
# If only one image, return a copy of it
|
| 417 |
+
if len(images) == 1:
|
| 418 |
+
return images[0].copy()
|
| 419 |
+
|
| 420 |
+
# Convert all images to RGB if not already
|
| 421 |
+
images = [img.convert("RGB") if img.mode != "RGB" else img for img in images]
|
| 422 |
+
|
| 423 |
+
# Calculate dimensions for horizontal concatenation
|
| 424 |
+
total_width = sum(img.width for img in images)
|
| 425 |
+
max_height = max(img.height for img in images)
|
| 426 |
+
|
| 427 |
+
# Create new image with white background
|
| 428 |
+
background_color = (255, 255, 255)
|
| 429 |
+
new_img = Image.new("RGB", (total_width, max_height), background_color)
|
| 430 |
+
|
| 431 |
+
# Paste images with center alignment
|
| 432 |
+
x_offset = 0
|
| 433 |
+
for img in images:
|
| 434 |
+
y_offset = (max_height - img.height) // 2
|
| 435 |
+
new_img.paste(img, (x_offset, y_offset))
|
| 436 |
+
x_offset += img.width
|
| 437 |
+
|
| 438 |
+
return new_img
|
| 439 |
+
|
| 440 |
+
|
| 441 |
+
def load_flow_model(
|
| 442 |
+
device: Union[str, torch.device],
|
| 443 |
+
model_version_info: Flux2ModelInfo,
|
| 444 |
+
dit_path: str,
|
| 445 |
+
attn_mode: str,
|
| 446 |
+
split_attn: bool,
|
| 447 |
+
loading_device: Union[str, torch.device],
|
| 448 |
+
dit_weight_dtype: Optional[torch.dtype] = None,
|
| 449 |
+
fp8_scaled: bool = False,
|
| 450 |
+
lora_weights_list: Optional[dict[str, torch.Tensor]] = None,
|
| 451 |
+
lora_multipliers: Optional[list[float]] = None,
|
| 452 |
+
disable_numpy_memmap: bool = False,
|
| 453 |
+
) -> flux2_models.Flux2:
|
| 454 |
+
# dit_weight_dtype is None for fp8_scaled
|
| 455 |
+
assert (not fp8_scaled and dit_weight_dtype is not None) or (fp8_scaled and dit_weight_dtype is None)
|
| 456 |
+
|
| 457 |
+
device = torch.device(device)
|
| 458 |
+
loading_device = torch.device(loading_device)
|
| 459 |
+
|
| 460 |
+
# build model
|
| 461 |
+
with init_empty_weights():
|
| 462 |
+
params = model_version_info.params
|
| 463 |
+
model = flux2_models.Flux2(params, attn_mode, split_attn)
|
| 464 |
+
if dit_weight_dtype is not None:
|
| 465 |
+
model.to(dit_weight_dtype)
|
| 466 |
+
|
| 467 |
+
# load model weights with dynamic fp8 optimization and LoRA merging if needed
|
| 468 |
+
logger.info(f"Loading DiT model from {dit_path}, device={loading_device}")
|
| 469 |
+
sd = load_safetensors_with_lora_and_fp8(
|
| 470 |
+
model_files=dit_path,
|
| 471 |
+
lora_weights_list=lora_weights_list,
|
| 472 |
+
lora_multipliers=lora_multipliers,
|
| 473 |
+
fp8_optimization=fp8_scaled,
|
| 474 |
+
calc_device=device,
|
| 475 |
+
move_to_device=(loading_device == device),
|
| 476 |
+
dit_weight_dtype=dit_weight_dtype,
|
| 477 |
+
target_keys=flux2_models.FP8_OPTIMIZATION_TARGET_KEYS,
|
| 478 |
+
exclude_keys=flux2_models.FP8_OPTIMIZATION_EXCLUDE_KEYS,
|
| 479 |
+
disable_numpy_memmap=disable_numpy_memmap,
|
| 480 |
+
)
|
| 481 |
+
|
| 482 |
+
if fp8_scaled:
|
| 483 |
+
apply_fp8_monkey_patch(model, sd, use_scaled_mm=False)
|
| 484 |
+
|
| 485 |
+
if loading_device.type != "cpu":
|
| 486 |
+
# make sure all the model weights are on the loading_device
|
| 487 |
+
logger.info(f"Moving weights to {loading_device}")
|
| 488 |
+
for key in sd.keys():
|
| 489 |
+
sd[key] = sd[key].to(loading_device)
|
| 490 |
+
|
| 491 |
+
info = model.load_state_dict(sd, strict=True, assign=True)
|
| 492 |
+
logger.info(f"Loaded Flux 2: {info}")
|
| 493 |
+
|
| 494 |
+
return model
|
| 495 |
+
|
| 496 |
+
|
| 497 |
+
def load_ae(
|
| 498 |
+
ckpt_path: str, dtype: torch.dtype, device: Union[str, torch.device], disable_mmap: bool = False
|
| 499 |
+
) -> flux2_models.AutoEncoder:
|
| 500 |
+
logger.info("Building AutoEncoder")
|
| 501 |
+
with init_empty_weights():
|
| 502 |
+
# dev and schnell have the same AE params
|
| 503 |
+
ae = flux2_models.AutoEncoder(flux2_models.AutoEncoderParams()).to(dtype)
|
| 504 |
+
|
| 505 |
+
logger.info(f"Loading state dict from {ckpt_path}")
|
| 506 |
+
sd = load_split_weights(ckpt_path, device=str(device), disable_mmap=disable_mmap, dtype=dtype)
|
| 507 |
+
info = ae.load_state_dict(sd, strict=True, assign=True)
|
| 508 |
+
logger.info(f"Loaded AE: {info}")
|
| 509 |
+
return ae
|
| 510 |
+
|
| 511 |
+
|
| 512 |
+
class Mistral3Embedder(nn.Module):
|
| 513 |
+
def __init__(
|
| 514 |
+
self,
|
| 515 |
+
ckpt_path: str,
|
| 516 |
+
dtype: Optional[torch.dtype],
|
| 517 |
+
device: Union[str, torch.device],
|
| 518 |
+
disable_mmap: bool = False,
|
| 519 |
+
state_dict: Optional[dict] = None,
|
| 520 |
+
) -> tuple[AutoProcessor, Mistral3ForConditionalGeneration]:
|
| 521 |
+
super().__init__()
|
| 522 |
+
|
| 523 |
+
M3_CONFIG_JSON = """
|
| 524 |
+
{
|
| 525 |
+
"architectures": [
|
| 526 |
+
"Mistral3ForConditionalGeneration"
|
| 527 |
+
],
|
| 528 |
+
"dtype": "bfloat16",
|
| 529 |
+
"image_token_index": 10,
|
| 530 |
+
"model_type": "mistral3",
|
| 531 |
+
"multimodal_projector_bias": false,
|
| 532 |
+
"projector_hidden_act": "gelu",
|
| 533 |
+
"spatial_merge_size": 2,
|
| 534 |
+
"text_config": {
|
| 535 |
+
"attention_dropout": 0.0,
|
| 536 |
+
"dtype": "bfloat16",
|
| 537 |
+
"head_dim": 128,
|
| 538 |
+
"hidden_act": "silu",
|
| 539 |
+
"hidden_size": 5120,
|
| 540 |
+
"initializer_range": 0.02,
|
| 541 |
+
"intermediate_size": 32768,
|
| 542 |
+
"max_position_embeddings": 131072,
|
| 543 |
+
"model_type": "mistral",
|
| 544 |
+
"num_attention_heads": 32,
|
| 545 |
+
"num_hidden_layers": 40,
|
| 546 |
+
"num_key_value_heads": 8,
|
| 547 |
+
"rms_norm_eps": 1e-05,
|
| 548 |
+
"rope_theta": 1000000000.0,
|
| 549 |
+
"sliding_window": null,
|
| 550 |
+
"use_cache": true,
|
| 551 |
+
"vocab_size": 131072
|
| 552 |
+
},
|
| 553 |
+
"transformers_version": "4.57.1",
|
| 554 |
+
"vision_config": {
|
| 555 |
+
"attention_dropout": 0.0,
|
| 556 |
+
"dtype": "bfloat16",
|
| 557 |
+
"head_dim": 64,
|
| 558 |
+
"hidden_act": "silu",
|
| 559 |
+
"hidden_size": 1024,
|
| 560 |
+
"image_size": 1540,
|
| 561 |
+
"initializer_range": 0.02,
|
| 562 |
+
"intermediate_size": 4096,
|
| 563 |
+
"model_type": "pixtral",
|
| 564 |
+
"num_attention_heads": 16,
|
| 565 |
+
"num_channels": 3,
|
| 566 |
+
"num_hidden_layers": 24,
|
| 567 |
+
"patch_size": 14,
|
| 568 |
+
"rope_theta": 10000.0
|
| 569 |
+
},
|
| 570 |
+
"vision_feature_layer": -1
|
| 571 |
+
}
|
| 572 |
+
"""
|
| 573 |
+
config = json.loads(M3_CONFIG_JSON)
|
| 574 |
+
config = Mistral3Config(**config)
|
| 575 |
+
with init_empty_weights():
|
| 576 |
+
self.mistral3 = Mistral3ForConditionalGeneration._from_config(config)
|
| 577 |
+
|
| 578 |
+
if state_dict is not None:
|
| 579 |
+
sd = state_dict
|
| 580 |
+
else:
|
| 581 |
+
logger.info(f"Loading state dict from {ckpt_path}")
|
| 582 |
+
sd = load_split_weights(ckpt_path, device=str(device), disable_mmap=disable_mmap, dtype=dtype)
|
| 583 |
+
|
| 584 |
+
# if the key has annoying prefix, remove it
|
| 585 |
+
for key in list(sd.keys()):
|
| 586 |
+
new_key = key.replace("language_model.lm_", "lm_")
|
| 587 |
+
new_key = new_key.replace("language_model.model.", "model.language_model.")
|
| 588 |
+
new_key = new_key.replace("multi_modal_projector.", "model.multi_modal_projector.")
|
| 589 |
+
new_key = new_key.replace("vision_tower.", "model.vision_tower.")
|
| 590 |
+
sd[new_key] = sd.pop(key)
|
| 591 |
+
|
| 592 |
+
info = self.mistral3.load_state_dict(sd, strict=True, assign=True)
|
| 593 |
+
logger.info(f"Loaded Mistral 3: {info}")
|
| 594 |
+
self.mistral3.to(device)
|
| 595 |
+
|
| 596 |
+
if dtype is not None:
|
| 597 |
+
if is_fp8(dtype):
|
| 598 |
+
logger.info(f"prepare Mistral 3 for fp8: set to {dtype}")
|
| 599 |
+
raise NotImplementedError(f"Mistral 3 {dtype}") # TODO
|
| 600 |
+
else:
|
| 601 |
+
logger.info(f"Setting Mistral 3 to dtype: {dtype}")
|
| 602 |
+
self.mistral3.to(dtype)
|
| 603 |
+
|
| 604 |
+
# Load tokenizer
|
| 605 |
+
self.tokenizer = AutoProcessor.from_pretrained(M3_TOKENIZER_ID, use_fast=False)
|
| 606 |
+
|
| 607 |
+
@property
|
| 608 |
+
def dtype(self):
|
| 609 |
+
return self.mistral3.dtype
|
| 610 |
+
|
| 611 |
+
@property
|
| 612 |
+
def device(self):
|
| 613 |
+
return self.mistral3.device
|
| 614 |
+
|
| 615 |
+
def to(self, *args, **kwargs):
|
| 616 |
+
return self.mistral3.to(*args, **kwargs)
|
| 617 |
+
|
| 618 |
+
def forward(self, txt: list[str]):
|
| 619 |
+
if not isinstance(txt, list):
|
| 620 |
+
txt = [txt]
|
| 621 |
+
|
| 622 |
+
# Format input messages
|
| 623 |
+
messages_batch = self.format_input(txt=txt)
|
| 624 |
+
|
| 625 |
+
# Process all messages at once
|
| 626 |
+
# with image processing a too short max length can throw an error in here.
|
| 627 |
+
inputs = self.tokenizer.apply_chat_template(
|
| 628 |
+
messages_batch,
|
| 629 |
+
add_generation_prompt=False,
|
| 630 |
+
tokenize=True,
|
| 631 |
+
return_dict=True,
|
| 632 |
+
return_tensors="pt",
|
| 633 |
+
padding="max_length",
|
| 634 |
+
truncation=True,
|
| 635 |
+
max_length=MAX_LENGTH,
|
| 636 |
+
)
|
| 637 |
+
|
| 638 |
+
# Move to device
|
| 639 |
+
input_ids = inputs["input_ids"].to(self.mistral3.device)
|
| 640 |
+
attention_mask = inputs["attention_mask"].to(self.mistral3.device)
|
| 641 |
+
|
| 642 |
+
# Forward pass through the model
|
| 643 |
+
output = self.mistral3(
|
| 644 |
+
input_ids=input_ids,
|
| 645 |
+
attention_mask=attention_mask,
|
| 646 |
+
output_hidden_states=True,
|
| 647 |
+
use_cache=False,
|
| 648 |
+
)
|
| 649 |
+
|
| 650 |
+
out = torch.stack([output.hidden_states[k] for k in OUTPUT_LAYERS_MISTRAL], dim=1)
|
| 651 |
+
return rearrange(out, "b c l d -> b l (c d)")
|
| 652 |
+
|
| 653 |
+
@staticmethod
|
| 654 |
+
def _validate_and_process_images(img: list[list[Image.Image]] | list[Image.Image]) -> list[list[Image.Image]]:
|
| 655 |
+
# Simple validation: ensure it's a list of PIL images or list of lists of PIL images
|
| 656 |
+
if not img:
|
| 657 |
+
return []
|
| 658 |
+
|
| 659 |
+
# Check if it's a list of lists or a list of images
|
| 660 |
+
if isinstance(img[0], Image.Image):
|
| 661 |
+
# It's a list of images, convert to list of lists
|
| 662 |
+
img = [[im] for im in img]
|
| 663 |
+
|
| 664 |
+
# potentially concatenate multiple images to reduce the size
|
| 665 |
+
img = [[concatenate_images(img_i)] if len(img_i) > 1 else img_i for img_i in img]
|
| 666 |
+
|
| 667 |
+
# cap the pixels
|
| 668 |
+
img = [[cap_pixels(img_i, UPSAMPLING_MAX_IMAGE_SIZE) for img_i in img_i] for img_i in img]
|
| 669 |
+
return img
|
| 670 |
+
|
| 671 |
+
def format_input(
|
| 672 |
+
self,
|
| 673 |
+
txt: list[str],
|
| 674 |
+
system_message: str = SYSTEM_MESSAGE,
|
| 675 |
+
img: list[Image.Image] | list[list[Image.Image]] | None = None,
|
| 676 |
+
) -> list[list[dict]]:
|
| 677 |
+
"""
|
| 678 |
+
Format a batch of text prompts into the conversation format expected by apply_chat_template.
|
| 679 |
+
Optionally, add images to the input.
|
| 680 |
+
|
| 681 |
+
Args:
|
| 682 |
+
txt: List of text prompts
|
| 683 |
+
system_message: System message to use (default: CREATIVE_SYSTEM_MESSAGE)
|
| 684 |
+
img: List of images to add to the input.
|
| 685 |
+
|
| 686 |
+
Returns:
|
| 687 |
+
List of conversations, where each conversation is a list of message dicts
|
| 688 |
+
"""
|
| 689 |
+
# Remove [IMG] tokens from prompts to avoid Pixtral validation issues
|
| 690 |
+
# when truncation is enabled. The processor counts [IMG] tokens and fails
|
| 691 |
+
# if the count changes after truncation.
|
| 692 |
+
cleaned_txt = [prompt.replace("[IMG]", "") for prompt in txt]
|
| 693 |
+
|
| 694 |
+
if img is None or len(img) == 0:
|
| 695 |
+
return [
|
| 696 |
+
[
|
| 697 |
+
{
|
| 698 |
+
"role": "system",
|
| 699 |
+
"content": [{"type": "text", "text": system_message}],
|
| 700 |
+
},
|
| 701 |
+
{"role": "user", "content": [{"type": "text", "text": prompt}]},
|
| 702 |
+
]
|
| 703 |
+
for prompt in cleaned_txt
|
| 704 |
+
]
|
| 705 |
+
else:
|
| 706 |
+
assert len(img) == len(txt), "Number of images must match number of prompts"
|
| 707 |
+
img = self._validate_and_process_images(img)
|
| 708 |
+
|
| 709 |
+
messages = [
|
| 710 |
+
[
|
| 711 |
+
{
|
| 712 |
+
"role": "system",
|
| 713 |
+
"content": [{"type": "text", "text": system_message}],
|
| 714 |
+
},
|
| 715 |
+
]
|
| 716 |
+
for _ in cleaned_txt
|
| 717 |
+
]
|
| 718 |
+
|
| 719 |
+
for i, (el, images) in enumerate(zip(messages, img)):
|
| 720 |
+
# optionally add the images per batch element.
|
| 721 |
+
if images is not None:
|
| 722 |
+
el.append(
|
| 723 |
+
{
|
| 724 |
+
"role": "user",
|
| 725 |
+
"content": [{"type": "image", "image": image_obj} for image_obj in images],
|
| 726 |
+
}
|
| 727 |
+
)
|
| 728 |
+
# add the text.
|
| 729 |
+
el.append(
|
| 730 |
+
{
|
| 731 |
+
"role": "user",
|
| 732 |
+
"content": [{"type": "text", "text": cleaned_txt[i]}],
|
| 733 |
+
}
|
| 734 |
+
)
|
| 735 |
+
|
| 736 |
+
return messages
|
| 737 |
+
|
| 738 |
+
|
| 739 |
+
class Qwen3Embedder(nn.Module):
|
| 740 |
+
def __init__(
|
| 741 |
+
self,
|
| 742 |
+
tokenizer: Qwen2Tokenizer,
|
| 743 |
+
model: Qwen3ForCausalLM,
|
| 744 |
+
):
|
| 745 |
+
super().__init__()
|
| 746 |
+
|
| 747 |
+
self.model = model
|
| 748 |
+
self.tokenizer = tokenizer
|
| 749 |
+
self.max_length = MAX_LENGTH
|
| 750 |
+
|
| 751 |
+
@property
|
| 752 |
+
def dtype(self):
|
| 753 |
+
return self.model.dtype
|
| 754 |
+
|
| 755 |
+
@property
|
| 756 |
+
def device(self):
|
| 757 |
+
return self.model.device
|
| 758 |
+
|
| 759 |
+
def to(self, *args, **kwargs):
|
| 760 |
+
# FIXME: chainging dtype not supported yet
|
| 761 |
+
return self.model.to(*args, **kwargs)
|
| 762 |
+
|
| 763 |
+
def forward(self, txt: list[str]):
|
| 764 |
+
all_input_ids = []
|
| 765 |
+
all_attention_masks = []
|
| 766 |
+
|
| 767 |
+
for prompt in txt:
|
| 768 |
+
messages = [{"role": "user", "content": prompt}]
|
| 769 |
+
text = self.tokenizer.apply_chat_template(
|
| 770 |
+
messages,
|
| 771 |
+
tokenize=False,
|
| 772 |
+
add_generation_prompt=True,
|
| 773 |
+
enable_thinking=False,
|
| 774 |
+
)
|
| 775 |
+
|
| 776 |
+
model_inputs = self.tokenizer(
|
| 777 |
+
text,
|
| 778 |
+
return_tensors="pt",
|
| 779 |
+
padding="max_length",
|
| 780 |
+
truncation=True,
|
| 781 |
+
max_length=self.max_length,
|
| 782 |
+
)
|
| 783 |
+
|
| 784 |
+
all_input_ids.append(model_inputs["input_ids"])
|
| 785 |
+
all_attention_masks.append(model_inputs["attention_mask"])
|
| 786 |
+
|
| 787 |
+
input_ids = torch.cat(all_input_ids, dim=0).to(self.model.device)
|
| 788 |
+
attention_mask = torch.cat(all_attention_masks, dim=0).to(self.model.device)
|
| 789 |
+
|
| 790 |
+
output = self.model(
|
| 791 |
+
input_ids=input_ids,
|
| 792 |
+
attention_mask=attention_mask,
|
| 793 |
+
output_hidden_states=True,
|
| 794 |
+
use_cache=False,
|
| 795 |
+
)
|
| 796 |
+
|
| 797 |
+
out = torch.stack([output.hidden_states[k] for k in OUTPUT_LAYERS_QWEN3], dim=1)
|
| 798 |
+
return rearrange(out, "b c l d -> b l (c d)")
|
| 799 |
+
|
| 800 |
+
|
| 801 |
+
def load_text_embedder(
|
| 802 |
+
model_version_info: Flux2ModelInfo,
|
| 803 |
+
ckpt_path: str,
|
| 804 |
+
dtype: Optional[torch.dtype],
|
| 805 |
+
device: Union[str, torch.device],
|
| 806 |
+
disable_mmap: bool = False,
|
| 807 |
+
state_dict: Optional[dict] = None,
|
| 808 |
+
) -> Union[Mistral3Embedder, Qwen3Embedder]:
|
| 809 |
+
if model_version_info.qwen_variant is None:
|
| 810 |
+
return Mistral3Embedder(ckpt_path, dtype, device, disable_mmap, state_dict)
|
| 811 |
+
|
| 812 |
+
variant = model_version_info.qwen_variant
|
| 813 |
+
is_8b = variant == "8B"
|
| 814 |
+
tokenizer_id = "Qwen/Qwen3-8B" if is_8b else "Qwen/Qwen3-4B"
|
| 815 |
+
tokenizer, qwen3 = load_qwen3(ckpt_path, dtype, device, disable_mmap, state_dict, is_8b=is_8b, tokenizer_id=tokenizer_id)
|
| 816 |
+
return Qwen3Embedder(tokenizer, qwen3)
|
src/musubi_tuner/flux_kontext_cache_latents.py
ADDED
|
@@ -0,0 +1,135 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import logging
|
| 2 |
+
from typing import List
|
| 3 |
+
|
| 4 |
+
import numpy as np
|
| 5 |
+
import torch
|
| 6 |
+
|
| 7 |
+
from musubi_tuner.dataset import config_utils
|
| 8 |
+
from musubi_tuner.dataset.config_utils import BlueprintGenerator, ConfigSanitizer
|
| 9 |
+
from musubi_tuner.dataset.image_video_dataset import (
|
| 10 |
+
ItemInfo,
|
| 11 |
+
ARCHITECTURE_FLUX_KONTEXT,
|
| 12 |
+
save_latent_cache_flux_kontext,
|
| 13 |
+
)
|
| 14 |
+
from musubi_tuner.flux import flux_utils
|
| 15 |
+
from musubi_tuner.flux import flux_models
|
| 16 |
+
import musubi_tuner.cache_latents as cache_latents
|
| 17 |
+
|
| 18 |
+
logger = logging.getLogger(__name__)
|
| 19 |
+
logging.basicConfig(level=logging.INFO)
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
def preprocess_contents_flux_kontext(batch: List[ItemInfo]) -> tuple[torch.Tensor, List[List[np.ndarray]]]:
|
| 23 |
+
# item.content: target image (H, W, C)
|
| 24 |
+
# item.control_content: list of images (H, W, C), the length of the list is 1 for FLUX.1 Kontext
|
| 25 |
+
|
| 26 |
+
# Stack batch into target tensor (B,H,W,C) in RGB order and control images list of tensors (H, W, C)
|
| 27 |
+
contents = []
|
| 28 |
+
controls = []
|
| 29 |
+
for item in batch:
|
| 30 |
+
contents.append(torch.from_numpy(item.content)) # target image
|
| 31 |
+
|
| 32 |
+
if isinstance(item.control_content[0], np.ndarray):
|
| 33 |
+
control_image = item.control_content[0] # np.ndarray
|
| 34 |
+
control_image = control_image[..., :3] # ensure RGB, remove alpha if present
|
| 35 |
+
else:
|
| 36 |
+
control_image = item.control_content[0] # PIL.Image
|
| 37 |
+
control_image = control_image.convert("RGB") # convert to RGB if RGBA
|
| 38 |
+
controls.append(torch.from_numpy(np.array(control_image)))
|
| 39 |
+
|
| 40 |
+
contents = torch.stack(contents, dim=0) # B, H, W, C
|
| 41 |
+
contents = contents.permute(0, 3, 1, 2) # B, H, W, C -> B, C, H, W
|
| 42 |
+
contents = contents / 127.5 - 1.0 # normalize to [-1, 1]
|
| 43 |
+
|
| 44 |
+
# we can stack controls because they are all the same size (bucketed)
|
| 45 |
+
controls = torch.stack(controls, dim=0) # B, H, W, C
|
| 46 |
+
controls = controls.permute(0, 3, 1, 2) # B, H, W, C -> B, C, H, W
|
| 47 |
+
controls = controls / 127.5 - 1.0 # normalize to [-1, 1]
|
| 48 |
+
|
| 49 |
+
return contents, controls
|
| 50 |
+
|
| 51 |
+
|
| 52 |
+
def encode_and_save_batch(ae: flux_models.AutoEncoder, batch: List[ItemInfo]):
|
| 53 |
+
# item.content: target image (H, W, C)
|
| 54 |
+
# item.control_content: list of images (H, W, C)
|
| 55 |
+
|
| 56 |
+
# assert all items in the batch have the one control content
|
| 57 |
+
if not all(len(item.control_content) == 1 for item in batch):
|
| 58 |
+
raise ValueError("FLUX.1 Kontext requires exactly one control content per item.")
|
| 59 |
+
|
| 60 |
+
# _, _, contents, content_masks = preprocess_contents(batch)
|
| 61 |
+
contents, controls = preprocess_contents_flux_kontext(batch)
|
| 62 |
+
|
| 63 |
+
with torch.no_grad():
|
| 64 |
+
latents = ae.encode(contents.to(ae.device, dtype=ae.dtype)) # B, C, H, W
|
| 65 |
+
control_latents = ae.encode(controls.to(ae.device, dtype=ae.dtype)) # B, C, H, W
|
| 66 |
+
|
| 67 |
+
# save cache for each item in the batch
|
| 68 |
+
for b, item in enumerate(batch):
|
| 69 |
+
target_latent = latents[b] # C, H, W. Target latents for this image (ground truth)
|
| 70 |
+
control_latent = control_latents[b] # C, H, W
|
| 71 |
+
|
| 72 |
+
print(
|
| 73 |
+
f"Saving cache for item {item.item_key} at {item.latent_cache_path}. control latents shape: {control_latent.shape}, target latents shape: {target_latent.shape}"
|
| 74 |
+
)
|
| 75 |
+
|
| 76 |
+
# save cache (file path is inside item.latent_cache_path pattern), remove batch dim
|
| 77 |
+
save_latent_cache_flux_kontext(
|
| 78 |
+
item_info=item,
|
| 79 |
+
latent=target_latent, # Ground truth for this image
|
| 80 |
+
control_latent=control_latent, # Control latent for this image
|
| 81 |
+
)
|
| 82 |
+
|
| 83 |
+
|
| 84 |
+
# def flux_kontext_setup_parser(parser: argparse.ArgumentParser) -> argparse.ArgumentParser:
|
| 85 |
+
# return parser
|
| 86 |
+
|
| 87 |
+
|
| 88 |
+
def main():
|
| 89 |
+
parser = cache_latents.setup_parser_common()
|
| 90 |
+
parser = cache_latents.hv_setup_parser(parser) # VAE
|
| 91 |
+
# parser = flux_kontext_setup_parser(parser)
|
| 92 |
+
|
| 93 |
+
args = parser.parse_args()
|
| 94 |
+
|
| 95 |
+
if args.disable_cudnn_backend:
|
| 96 |
+
logger.info("Disabling cuDNN PyTorch backend.")
|
| 97 |
+
torch.backends.cudnn.enabled = False
|
| 98 |
+
|
| 99 |
+
if args.vae_dtype is not None:
|
| 100 |
+
raise ValueError("VAE dtype is not supported in FLUX.1 Kontext.")
|
| 101 |
+
|
| 102 |
+
device = args.device if hasattr(args, "device") and args.device else ("cuda" if torch.cuda.is_available() else "cpu")
|
| 103 |
+
device = torch.device(device)
|
| 104 |
+
|
| 105 |
+
# Load dataset config
|
| 106 |
+
blueprint_generator = BlueprintGenerator(ConfigSanitizer())
|
| 107 |
+
logger.info(f"Load dataset config from {args.dataset_config}")
|
| 108 |
+
user_config = config_utils.load_user_config(args.dataset_config)
|
| 109 |
+
blueprint = blueprint_generator.generate(user_config, args, architecture=ARCHITECTURE_FLUX_KONTEXT)
|
| 110 |
+
train_dataset_group = config_utils.generate_dataset_group_by_blueprint(blueprint.dataset_group)
|
| 111 |
+
|
| 112 |
+
datasets = train_dataset_group.datasets
|
| 113 |
+
|
| 114 |
+
if args.debug_mode is not None:
|
| 115 |
+
cache_latents.show_datasets(
|
| 116 |
+
datasets, args.debug_mode, args.console_width, args.console_back, args.console_num_images, fps=16
|
| 117 |
+
)
|
| 118 |
+
return
|
| 119 |
+
|
| 120 |
+
assert args.vae is not None, "ae checkpoint is required"
|
| 121 |
+
|
| 122 |
+
logger.info(f"Loading AE model from {args.vae}")
|
| 123 |
+
ae = flux_utils.load_ae(args.vae, dtype=torch.float32, device=device, disable_mmap=True)
|
| 124 |
+
ae.to(device)
|
| 125 |
+
|
| 126 |
+
# encoding closure
|
| 127 |
+
def encode(batch: List[ItemInfo]):
|
| 128 |
+
encode_and_save_batch(ae, batch)
|
| 129 |
+
|
| 130 |
+
# reuse core loop from cache_latents with no change
|
| 131 |
+
cache_latents.encode_datasets(datasets, encode, args)
|
| 132 |
+
|
| 133 |
+
|
| 134 |
+
if __name__ == "__main__":
|
| 135 |
+
main()
|
src/musubi_tuner/flux_kontext_cache_text_encoder_outputs.py
ADDED
|
@@ -0,0 +1,123 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import argparse
|
| 2 |
+
|
| 3 |
+
import torch
|
| 4 |
+
from transformers import CLIPTextModel, T5EncoderModel, CLIPTokenizer, T5Tokenizer
|
| 5 |
+
|
| 6 |
+
from musubi_tuner.dataset import config_utils
|
| 7 |
+
from musubi_tuner.dataset.config_utils import BlueprintGenerator, ConfigSanitizer
|
| 8 |
+
|
| 9 |
+
from musubi_tuner.dataset.image_video_dataset import (
|
| 10 |
+
ARCHITECTURE_FLUX_KONTEXT,
|
| 11 |
+
ItemInfo,
|
| 12 |
+
save_text_encoder_output_cache_flux_kontext,
|
| 13 |
+
)
|
| 14 |
+
|
| 15 |
+
from musubi_tuner.flux import flux_models
|
| 16 |
+
from musubi_tuner.flux import flux_utils
|
| 17 |
+
import musubi_tuner.cache_text_encoder_outputs as cache_text_encoder_outputs
|
| 18 |
+
import logging
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
logger = logging.getLogger(__name__)
|
| 22 |
+
logging.basicConfig(level=logging.INFO)
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
def encode_and_save_batch(
|
| 26 |
+
tokenizer1: T5Tokenizer,
|
| 27 |
+
text_encoder1: T5EncoderModel,
|
| 28 |
+
tokenizer2: CLIPTokenizer,
|
| 29 |
+
text_encoder2: CLIPTextModel,
|
| 30 |
+
batch: list[ItemInfo],
|
| 31 |
+
device: torch.device,
|
| 32 |
+
):
|
| 33 |
+
prompts = [item.caption for item in batch]
|
| 34 |
+
# print(prompts)
|
| 35 |
+
|
| 36 |
+
# encode prompt
|
| 37 |
+
t5_tokens = tokenizer1(
|
| 38 |
+
prompts,
|
| 39 |
+
max_length=flux_models.T5XXL_MAX_LENGTH,
|
| 40 |
+
padding="max_length",
|
| 41 |
+
return_length=False,
|
| 42 |
+
return_overflowing_tokens=False,
|
| 43 |
+
truncation=True,
|
| 44 |
+
return_tensors="pt",
|
| 45 |
+
)["input_ids"]
|
| 46 |
+
l_tokens = tokenizer2(prompts, max_length=77, padding="max_length", truncation=True, return_tensors="pt")["input_ids"]
|
| 47 |
+
|
| 48 |
+
with torch.autocast(device_type=device.type, dtype=text_encoder1.dtype), torch.no_grad():
|
| 49 |
+
t5_vec = text_encoder1(input_ids=t5_tokens.to(text_encoder1.device), attention_mask=None, output_hidden_states=False)[
|
| 50 |
+
"last_hidden_state"
|
| 51 |
+
]
|
| 52 |
+
assert torch.isnan(t5_vec).any() == False, "T5 vector contains NaN values"
|
| 53 |
+
t5_vec = t5_vec.cpu()
|
| 54 |
+
|
| 55 |
+
with torch.autocast(device_type=device.type, dtype=text_encoder2.dtype), torch.no_grad():
|
| 56 |
+
clip_l_pooler = text_encoder2(l_tokens.to(text_encoder2.device))["pooler_output"]
|
| 57 |
+
clip_l_pooler = clip_l_pooler.cpu()
|
| 58 |
+
|
| 59 |
+
# save prompt cache
|
| 60 |
+
for item, t5_vec, clip_ctx in zip(batch, t5_vec, clip_l_pooler):
|
| 61 |
+
save_text_encoder_output_cache_flux_kontext(item, t5_vec, clip_ctx)
|
| 62 |
+
|
| 63 |
+
|
| 64 |
+
def main():
|
| 65 |
+
parser = cache_text_encoder_outputs.setup_parser_common()
|
| 66 |
+
parser = flux_kontext_setup_parser(parser)
|
| 67 |
+
|
| 68 |
+
args = parser.parse_args()
|
| 69 |
+
|
| 70 |
+
device = args.device if args.device is not None else "cuda" if torch.cuda.is_available() else "cpu"
|
| 71 |
+
device = torch.device(device)
|
| 72 |
+
|
| 73 |
+
# Load dataset config
|
| 74 |
+
blueprint_generator = BlueprintGenerator(ConfigSanitizer())
|
| 75 |
+
logger.info(f"Load dataset config from {args.dataset_config}")
|
| 76 |
+
user_config = config_utils.load_user_config(args.dataset_config)
|
| 77 |
+
blueprint = blueprint_generator.generate(user_config, args, architecture=ARCHITECTURE_FLUX_KONTEXT)
|
| 78 |
+
train_dataset_group = config_utils.generate_dataset_group_by_blueprint(blueprint.dataset_group)
|
| 79 |
+
|
| 80 |
+
datasets = train_dataset_group.datasets
|
| 81 |
+
|
| 82 |
+
# prepare cache files and paths: all_cache_files_for_dataset = exisiting cache files, all_cache_paths_for_dataset = all cache paths in the dataset
|
| 83 |
+
all_cache_files_for_dataset, all_cache_paths_for_dataset = cache_text_encoder_outputs.prepare_cache_files_and_paths(datasets)
|
| 84 |
+
|
| 85 |
+
# Load T5 and CLIP text encoders
|
| 86 |
+
t5_dtype = torch.float8e4m3fn if args.fp8_t5 else torch.bfloat16
|
| 87 |
+
tokenizer1, text_encoder1 = flux_utils.load_t5xxl(args.text_encoder1, dtype=t5_dtype, device=device, disable_mmap=True)
|
| 88 |
+
tokenizer2, text_encoder2 = flux_utils.load_clip_l(args.text_encoder2, dtype=torch.bfloat16, device=device, disable_mmap=True)
|
| 89 |
+
|
| 90 |
+
# Encode with T5 and CLIP text encoders
|
| 91 |
+
logger.info("Encoding with T5 and CLIP text encoders")
|
| 92 |
+
|
| 93 |
+
def encode_for_text_encoder(batch: list[ItemInfo]):
|
| 94 |
+
nonlocal tokenizer1, text_encoder1, tokenizer2, text_encoder2
|
| 95 |
+
encode_and_save_batch(tokenizer1, text_encoder1, tokenizer2, text_encoder2, batch, device)
|
| 96 |
+
|
| 97 |
+
cache_text_encoder_outputs.process_text_encoder_batches(
|
| 98 |
+
args.num_workers,
|
| 99 |
+
args.skip_existing,
|
| 100 |
+
args.batch_size,
|
| 101 |
+
datasets,
|
| 102 |
+
all_cache_files_for_dataset,
|
| 103 |
+
all_cache_paths_for_dataset,
|
| 104 |
+
encode_for_text_encoder,
|
| 105 |
+
)
|
| 106 |
+
del text_encoder1
|
| 107 |
+
del text_encoder2
|
| 108 |
+
|
| 109 |
+
# remove cache files not in dataset
|
| 110 |
+
cache_text_encoder_outputs.post_process_cache_files(
|
| 111 |
+
datasets, all_cache_files_for_dataset, all_cache_paths_for_dataset, args.keep_cache
|
| 112 |
+
)
|
| 113 |
+
|
| 114 |
+
|
| 115 |
+
def flux_kontext_setup_parser(parser: argparse.ArgumentParser) -> argparse.ArgumentParser:
|
| 116 |
+
parser.add_argument("--text_encoder1", type=str, default=None, required=True, help="text encoder (T5XXL) checkpoint path")
|
| 117 |
+
parser.add_argument("--text_encoder2", type=str, default=None, required=True, help="text encoder 2 (CLIP-L) checkpoint path")
|
| 118 |
+
parser.add_argument("--fp8_t5", action="store_true", help="use fp8 for Text Encoder model")
|
| 119 |
+
return parser
|
| 120 |
+
|
| 121 |
+
|
| 122 |
+
if __name__ == "__main__":
|
| 123 |
+
main()
|
src/musubi_tuner/flux_kontext_generate_image.py
ADDED
|
@@ -0,0 +1,1183 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import argparse
|
| 2 |
+
from importlib.util import find_spec
|
| 3 |
+
import random
|
| 4 |
+
import os
|
| 5 |
+
import time
|
| 6 |
+
import copy
|
| 7 |
+
from typing import Tuple, Optional, List, Any, Dict
|
| 8 |
+
|
| 9 |
+
from einops import rearrange
|
| 10 |
+
import torch
|
| 11 |
+
from safetensors.torch import load_file, save_file
|
| 12 |
+
from safetensors import safe_open
|
| 13 |
+
from tqdm import tqdm
|
| 14 |
+
|
| 15 |
+
from musubi_tuner.flux import flux_utils
|
| 16 |
+
from musubi_tuner.flux.flux_utils import load_flow_model
|
| 17 |
+
from musubi_tuner.flux import flux_models
|
| 18 |
+
from musubi_tuner.utils import model_utils
|
| 19 |
+
|
| 20 |
+
lycoris_available = find_spec("lycoris") is not None
|
| 21 |
+
|
| 22 |
+
from musubi_tuner.networks import lora_flux
|
| 23 |
+
from musubi_tuner.utils.device_utils import clean_memory_on_device
|
| 24 |
+
from musubi_tuner.hv_generate_video import get_time_flag, save_images_grid, setup_parser_compile, synchronize_device
|
| 25 |
+
from musubi_tuner.wan_generate_video import merge_lora_weights
|
| 26 |
+
|
| 27 |
+
import logging
|
| 28 |
+
|
| 29 |
+
logger = logging.getLogger(__name__)
|
| 30 |
+
logging.basicConfig(level=logging.INFO)
|
| 31 |
+
|
| 32 |
+
|
| 33 |
+
class GenerationSettings:
|
| 34 |
+
def __init__(self, device: torch.device, dit_weight_dtype: Optional[torch.dtype] = None):
|
| 35 |
+
self.device = device
|
| 36 |
+
self.dit_weight_dtype = dit_weight_dtype # not used currently because model may be optimized
|
| 37 |
+
|
| 38 |
+
|
| 39 |
+
def parse_args() -> argparse.Namespace:
|
| 40 |
+
"""parse command line arguments"""
|
| 41 |
+
parser = argparse.ArgumentParser(description="FLUX.1 Kontext inference script")
|
| 42 |
+
|
| 43 |
+
# WAN arguments
|
| 44 |
+
# parser.add_argument("--ckpt_dir", type=str, default=None, help="The path to the checkpoint directory (Wan 2.1 official).")
|
| 45 |
+
# parser.add_argument(
|
| 46 |
+
# "--sample_solver", type=str, default="unipc", choices=["unipc", "dpm++", "vanilla"], help="The solver used to sample."
|
| 47 |
+
# )
|
| 48 |
+
|
| 49 |
+
parser.add_argument("--dit", type=str, default=None, help="DiT directory or path")
|
| 50 |
+
parser.add_argument("--vae", type=str, default=None, help="AE directory or path")
|
| 51 |
+
parser.add_argument("--text_encoder1", type=str, required=True, help="Text Encoder 1 (T5) directory or path")
|
| 52 |
+
parser.add_argument("--text_encoder2", type=str, required=True, help="Text Encoder 2 (CLIP-L) directory or path")
|
| 53 |
+
|
| 54 |
+
# LoRA
|
| 55 |
+
parser.add_argument("--lora_weight", type=str, nargs="*", required=False, default=None, help="LoRA weight path")
|
| 56 |
+
parser.add_argument("--lora_multiplier", type=float, nargs="*", default=1.0, help="LoRA multiplier")
|
| 57 |
+
parser.add_argument("--include_patterns", type=str, nargs="*", default=None, help="LoRA module include patterns")
|
| 58 |
+
parser.add_argument("--exclude_patterns", type=str, nargs="*", default=None, help="LoRA module exclude patterns")
|
| 59 |
+
parser.add_argument(
|
| 60 |
+
"--save_merged_model",
|
| 61 |
+
type=str,
|
| 62 |
+
default=None,
|
| 63 |
+
help="Save merged model to path. If specified, no inference will be performed.",
|
| 64 |
+
)
|
| 65 |
+
|
| 66 |
+
# inference
|
| 67 |
+
parser.add_argument("--prompt", type=str, default=None, help="prompt for generation")
|
| 68 |
+
parser.add_argument("--image_size", type=int, nargs=2, default=[256, 256], help="image size, height and width")
|
| 69 |
+
parser.add_argument("--control_image_path", type=str, default=None, help="path to control (reference) image for Kontext.")
|
| 70 |
+
parser.add_argument("--no_resize_control", action="store_true", help="do not resize control image")
|
| 71 |
+
parser.add_argument("--infer_steps", type=int, default=25, help="number of inference steps, default is 25")
|
| 72 |
+
parser.add_argument("--save_path", type=str, required=True, help="path to save generated video")
|
| 73 |
+
parser.add_argument("--seed", type=int, default=None, help="Seed for evaluation.")
|
| 74 |
+
# parser.add_argument(
|
| 75 |
+
# "--cpu_noise", action="store_true", help="Use CPU to generate noise (compatible with ComfyUI). Default is False."
|
| 76 |
+
# )
|
| 77 |
+
parser.add_argument(
|
| 78 |
+
"--embedded_cfg_scale", type=float, default=2.5, help="Embeded CFG scale (distilled CFG Scale), default is 2.5"
|
| 79 |
+
)
|
| 80 |
+
# parser.add_argument("--video_path", type=str, default=None, help="path to video for video2video inference")
|
| 81 |
+
# parser.add_argument(
|
| 82 |
+
# "--image_path",
|
| 83 |
+
# type=str,
|
| 84 |
+
# default=None,
|
| 85 |
+
# help="path to image for image2video inference. If `;;;` is used, it will be used as section images. The notation is same as `--prompt`.",
|
| 86 |
+
# )
|
| 87 |
+
|
| 88 |
+
# Flow Matching
|
| 89 |
+
parser.add_argument(
|
| 90 |
+
"--flow_shift",
|
| 91 |
+
type=float,
|
| 92 |
+
default=None,
|
| 93 |
+
help="Shift factor for flow matching schedulers. Default is None (FLUX.1 default).",
|
| 94 |
+
)
|
| 95 |
+
|
| 96 |
+
parser.add_argument("--fp8", action="store_true", help="use fp8 for DiT model")
|
| 97 |
+
parser.add_argument("--fp8_scaled", action="store_true", help="use scaled fp8 for DiT, only for fp8")
|
| 98 |
+
# parser.add_argument("--fp8_fast", action="store_true", help="Enable fast FP8 arithmetic (RTX 4XXX+), only for fp8_scaled")
|
| 99 |
+
|
| 100 |
+
parser.add_argument("--fp8_t5", action="store_true", help="use fp8 for Text Encoder 1 (T5)")
|
| 101 |
+
parser.add_argument(
|
| 102 |
+
"--device", type=str, default=None, help="device to use for inference. If None, use CUDA if available, otherwise use CPU"
|
| 103 |
+
)
|
| 104 |
+
parser.add_argument(
|
| 105 |
+
"--attn_mode",
|
| 106 |
+
type=str,
|
| 107 |
+
default="torch",
|
| 108 |
+
choices=["flash", "torch", "sageattn", "xformers", "sdpa"], # "flash2", "flash3",
|
| 109 |
+
help="attention mode",
|
| 110 |
+
)
|
| 111 |
+
parser.add_argument("--blocks_to_swap", type=int, default=0, help="number of blocks to swap in the model")
|
| 112 |
+
parser.add_argument(
|
| 113 |
+
"--use_pinned_memory_for_block_swap",
|
| 114 |
+
action="store_true",
|
| 115 |
+
help="use pinned memory for block swapping, which may speed up data transfer between CPU and GPU but uses more shared GPU memory on Windows",
|
| 116 |
+
)
|
| 117 |
+
parser.add_argument(
|
| 118 |
+
"--output_type",
|
| 119 |
+
type=str,
|
| 120 |
+
default="images",
|
| 121 |
+
choices=["images", "latent", "latent_images"],
|
| 122 |
+
help="output type",
|
| 123 |
+
)
|
| 124 |
+
parser.add_argument("--no_metadata", action="store_true", help="do not save metadata")
|
| 125 |
+
parser.add_argument("--latent_path", type=str, nargs="*", default=None, help="path to latent for decode. no inference")
|
| 126 |
+
parser.add_argument(
|
| 127 |
+
"--lycoris", action="store_true", help=f"use lycoris for inference{'' if lycoris_available else ' (not available)'}"
|
| 128 |
+
)
|
| 129 |
+
setup_parser_compile(parser)
|
| 130 |
+
|
| 131 |
+
# New arguments for batch and interactive modes
|
| 132 |
+
parser.add_argument("--from_file", type=str, default=None, help="Read prompts from a file")
|
| 133 |
+
parser.add_argument("--interactive", action="store_true", help="Interactive mode: read prompts from console")
|
| 134 |
+
|
| 135 |
+
args = parser.parse_args()
|
| 136 |
+
|
| 137 |
+
# Validate arguments
|
| 138 |
+
if args.from_file and args.interactive:
|
| 139 |
+
raise ValueError("Cannot use both --from_file and --interactive at the same time")
|
| 140 |
+
|
| 141 |
+
if args.latent_path is None or len(args.latent_path) == 0:
|
| 142 |
+
if args.prompt is None and not args.from_file and not args.interactive:
|
| 143 |
+
raise ValueError("Either --prompt, --from_file or --interactive must be specified")
|
| 144 |
+
|
| 145 |
+
if args.lycoris and not lycoris_available:
|
| 146 |
+
raise ValueError("install lycoris: https://github.com/KohakuBlueleaf/LyCORIS")
|
| 147 |
+
|
| 148 |
+
return args
|
| 149 |
+
|
| 150 |
+
|
| 151 |
+
def parse_prompt_line(line: str) -> Dict[str, Any]:
|
| 152 |
+
"""Parse a prompt line into a dictionary of argument overrides
|
| 153 |
+
|
| 154 |
+
Args:
|
| 155 |
+
line: Prompt line with options
|
| 156 |
+
|
| 157 |
+
Returns:
|
| 158 |
+
Dict[str, Any]: Dictionary of argument overrides
|
| 159 |
+
"""
|
| 160 |
+
# TODO common function with hv_train_network.line_to_prompt_dict
|
| 161 |
+
parts = line.split(" --")
|
| 162 |
+
prompt = parts[0].strip()
|
| 163 |
+
|
| 164 |
+
# Create dictionary of overrides
|
| 165 |
+
overrides = {"prompt": prompt}
|
| 166 |
+
|
| 167 |
+
for part in parts[1:]:
|
| 168 |
+
if not part.strip():
|
| 169 |
+
continue
|
| 170 |
+
option_parts = part.split(" ", 1)
|
| 171 |
+
option = option_parts[0].strip()
|
| 172 |
+
value = option_parts[1].strip() if len(option_parts) > 1 else ""
|
| 173 |
+
|
| 174 |
+
# Map options to argument names
|
| 175 |
+
if option == "w":
|
| 176 |
+
overrides["image_size_width"] = int(value)
|
| 177 |
+
elif option == "h":
|
| 178 |
+
overrides["image_size_height"] = int(value)
|
| 179 |
+
elif option == "d":
|
| 180 |
+
overrides["seed"] = int(value)
|
| 181 |
+
elif option == "s":
|
| 182 |
+
overrides["infer_steps"] = int(value)
|
| 183 |
+
# elif option == "g" or option == "l":
|
| 184 |
+
# overrides["guidance_scale"] = float(value)
|
| 185 |
+
elif option == "fs":
|
| 186 |
+
overrides["flow_shift"] = float(value)
|
| 187 |
+
elif option == "i":
|
| 188 |
+
overrides["image_path"] = value
|
| 189 |
+
# elif option == "im":
|
| 190 |
+
# overrides["image_mask_path"] = value
|
| 191 |
+
# elif option == "cn":
|
| 192 |
+
# overrides["control_path"] = value
|
| 193 |
+
# elif option == "n":
|
| 194 |
+
# overrides["negative_prompt"] = value
|
| 195 |
+
elif option == "ci": # control_image_path
|
| 196 |
+
overrides["control_image_path"] = value
|
| 197 |
+
|
| 198 |
+
return overrides
|
| 199 |
+
|
| 200 |
+
|
| 201 |
+
def apply_overrides(args: argparse.Namespace, overrides: Dict[str, Any]) -> argparse.Namespace:
|
| 202 |
+
"""Apply overrides to args
|
| 203 |
+
|
| 204 |
+
Args:
|
| 205 |
+
args: Original arguments
|
| 206 |
+
overrides: Dictionary of overrides
|
| 207 |
+
|
| 208 |
+
Returns:
|
| 209 |
+
argparse.Namespace: New arguments with overrides applied
|
| 210 |
+
"""
|
| 211 |
+
args_copy = copy.deepcopy(args)
|
| 212 |
+
|
| 213 |
+
for key, value in overrides.items():
|
| 214 |
+
if key == "image_size_width":
|
| 215 |
+
args_copy.image_size[1] = value
|
| 216 |
+
elif key == "image_size_height":
|
| 217 |
+
args_copy.image_size[0] = value
|
| 218 |
+
else:
|
| 219 |
+
setattr(args_copy, key, value)
|
| 220 |
+
|
| 221 |
+
return args_copy
|
| 222 |
+
|
| 223 |
+
|
| 224 |
+
def check_inputs(args: argparse.Namespace) -> Tuple[int, int]:
|
| 225 |
+
"""Validate video size and length
|
| 226 |
+
|
| 227 |
+
Args:
|
| 228 |
+
args: command line arguments
|
| 229 |
+
|
| 230 |
+
Returns:
|
| 231 |
+
Tuple[int, int]: (height, width)
|
| 232 |
+
"""
|
| 233 |
+
height = args.image_size[0]
|
| 234 |
+
width = args.image_size[1]
|
| 235 |
+
|
| 236 |
+
if height % 16 != 0 or width % 16 != 0:
|
| 237 |
+
raise ValueError(f"`height` and `width` have to be divisible by 16 but are {height} and {width}.")
|
| 238 |
+
|
| 239 |
+
return height, width
|
| 240 |
+
|
| 241 |
+
|
| 242 |
+
# region DiT model
|
| 243 |
+
|
| 244 |
+
|
| 245 |
+
def load_dit_model(args: argparse.Namespace, device: torch.device) -> flux_models.Flux:
|
| 246 |
+
"""load DiT model
|
| 247 |
+
|
| 248 |
+
Args:
|
| 249 |
+
args: command line arguments
|
| 250 |
+
device: device to use
|
| 251 |
+
dit_dtype: data type for the model
|
| 252 |
+
dit_weight_dtype: data type for the model weights. None for as-is
|
| 253 |
+
|
| 254 |
+
Returns:
|
| 255 |
+
flux_models.Flux: DiT model
|
| 256 |
+
"""
|
| 257 |
+
loading_device = "cpu"
|
| 258 |
+
if args.blocks_to_swap == 0 and not args.fp8_scaled and args.lora_weight is None:
|
| 259 |
+
loading_device = device
|
| 260 |
+
|
| 261 |
+
# do not fp8 optimize because we will merge LoRA weights
|
| 262 |
+
model = load_flow_model(
|
| 263 |
+
ckpt_path=args.dit,
|
| 264 |
+
dtype=None,
|
| 265 |
+
device=loading_device,
|
| 266 |
+
disable_mmap=True,
|
| 267 |
+
attn_mode=args.attn_mode,
|
| 268 |
+
split_attn=False,
|
| 269 |
+
)
|
| 270 |
+
|
| 271 |
+
return model
|
| 272 |
+
|
| 273 |
+
|
| 274 |
+
def optimize_model(model: flux_models.Flux, args: argparse.Namespace, device: torch.device) -> None:
|
| 275 |
+
"""optimize the model (FP8 conversion, device move etc.)
|
| 276 |
+
|
| 277 |
+
Args:
|
| 278 |
+
model: dit model
|
| 279 |
+
args: command line arguments
|
| 280 |
+
device: device to use
|
| 281 |
+
"""
|
| 282 |
+
if args.fp8_scaled:
|
| 283 |
+
# load state dict as-is and optimize to fp8
|
| 284 |
+
state_dict = model.state_dict()
|
| 285 |
+
|
| 286 |
+
# if no blocks to swap, we can move the weights to GPU after optimization on GPU (omit redundant CPU->GPU copy)
|
| 287 |
+
move_to_device = args.blocks_to_swap == 0 # if blocks_to_swap > 0, we will keep the model on CPU
|
| 288 |
+
state_dict = model.fp8_optimization(state_dict, device, move_to_device, use_scaled_mm=False) # args.fp8_fast)
|
| 289 |
+
|
| 290 |
+
info = model.load_state_dict(state_dict, strict=True, assign=True)
|
| 291 |
+
logger.info(f"Loaded FP8 optimized weights: {info}")
|
| 292 |
+
|
| 293 |
+
if args.blocks_to_swap == 0:
|
| 294 |
+
model.to(device) # make sure all parameters are on the right device (e.g. RoPE etc.)
|
| 295 |
+
else:
|
| 296 |
+
# simple cast to dit_dtype
|
| 297 |
+
target_dtype = None # load as-is (dit_weight_dtype == dtype of the weights in state_dict)
|
| 298 |
+
target_device = None
|
| 299 |
+
|
| 300 |
+
if args.fp8:
|
| 301 |
+
target_dtype = torch.float8e4m3fn
|
| 302 |
+
|
| 303 |
+
if args.blocks_to_swap == 0:
|
| 304 |
+
logger.info(f"Move model to device: {device}")
|
| 305 |
+
target_device = device
|
| 306 |
+
|
| 307 |
+
if target_device is not None and target_dtype is not None:
|
| 308 |
+
model.to(target_device, target_dtype) # move and cast at the same time. this reduces redundant copy operations
|
| 309 |
+
|
| 310 |
+
if args.blocks_to_swap > 0:
|
| 311 |
+
logger.info(f"Enable swap {args.blocks_to_swap} blocks to CPU from device: {device}")
|
| 312 |
+
model.enable_block_swap(
|
| 313 |
+
args.blocks_to_swap, device, supports_backward=False, use_pinned_memory=args.use_pinned_memory_for_block_swap
|
| 314 |
+
)
|
| 315 |
+
model.move_to_device_except_swap_blocks(device)
|
| 316 |
+
model.prepare_block_swap_before_forward()
|
| 317 |
+
else:
|
| 318 |
+
# make sure the model is on the right device
|
| 319 |
+
model.to(device)
|
| 320 |
+
|
| 321 |
+
if args.compile:
|
| 322 |
+
model = model_utils.compile_transformer(
|
| 323 |
+
args, model, [model.double_blocks, model.single_blocks], disable_linear=args.blocks_to_swap > 0
|
| 324 |
+
)
|
| 325 |
+
|
| 326 |
+
model.eval().requires_grad_(False)
|
| 327 |
+
clean_memory_on_device(device)
|
| 328 |
+
|
| 329 |
+
|
| 330 |
+
# endregion
|
| 331 |
+
|
| 332 |
+
|
| 333 |
+
def decode_latent(ae: flux_models.AutoEncoder, latent: torch.Tensor, device: torch.device) -> torch.Tensor:
|
| 334 |
+
logger.info("Decoding image...")
|
| 335 |
+
if latent.ndim == 3:
|
| 336 |
+
latent = latent.unsqueeze(0) # add batch dimension
|
| 337 |
+
|
| 338 |
+
ae.to(device)
|
| 339 |
+
with torch.no_grad():
|
| 340 |
+
pixels = ae.decode(latent.to(device)) # decode to pixels
|
| 341 |
+
pixels = pixels.to("cpu")
|
| 342 |
+
ae.to("cpu")
|
| 343 |
+
|
| 344 |
+
logger.info(f"Decoded. Pixel shape {pixels.shape}")
|
| 345 |
+
return pixels[0] # remove batch dimension
|
| 346 |
+
|
| 347 |
+
|
| 348 |
+
def prepare_image_inputs(args: argparse.Namespace, device: torch.device, ae: flux_models.AutoEncoder) -> Dict[str, Any]:
|
| 349 |
+
"""Prepare image-related inputs for Kontext: AE encoding."""
|
| 350 |
+
height, width = check_inputs(args)
|
| 351 |
+
|
| 352 |
+
if args.control_image_path is not None:
|
| 353 |
+
control_image_tensor, _, _ = flux_utils.preprocess_control_image(args.control_image_path, not args.no_resize_control)
|
| 354 |
+
|
| 355 |
+
# AE encoding
|
| 356 |
+
logger.info("Encoding control image to latent space with AE")
|
| 357 |
+
ae_original_device = ae.device
|
| 358 |
+
ae.to(device)
|
| 359 |
+
|
| 360 |
+
with torch.no_grad():
|
| 361 |
+
control_latent = ae.encode(control_image_tensor.to(device, dtype=ae.dtype))
|
| 362 |
+
control_latent = control_latent.to(torch.bfloat16).to("cpu")
|
| 363 |
+
|
| 364 |
+
ae.to(ae_original_device) # Move VAE back to its original device
|
| 365 |
+
clean_memory_on_device(device)
|
| 366 |
+
|
| 367 |
+
control_latent = control_latent.cpu()
|
| 368 |
+
else:
|
| 369 |
+
control_latent = None
|
| 370 |
+
|
| 371 |
+
return {"height": height, "width": width, "control_latent": control_latent}
|
| 372 |
+
|
| 373 |
+
|
| 374 |
+
def prepare_text_inputs(
|
| 375 |
+
args: argparse.Namespace,
|
| 376 |
+
device: torch.device,
|
| 377 |
+
shared_models: Optional[Dict] = None,
|
| 378 |
+
) -> Dict[str, Any]:
|
| 379 |
+
"""Prepare text-related inputs for I2V: LLM and TextEncoder encoding."""
|
| 380 |
+
|
| 381 |
+
# load text encoder: conds_cache holds cached encodings for prompts without padding
|
| 382 |
+
conds_cache = {}
|
| 383 |
+
if shared_models is not None:
|
| 384 |
+
tokenizer1, text_encoder1 = shared_models.get("tokenizer1"), shared_models.get("text_encoder1")
|
| 385 |
+
tokenizer2, text_encoder2 = shared_models.get("tokenizer2"), shared_models.get("text_encoder2")
|
| 386 |
+
if "conds_cache" in shared_models: # Use shared cache if available
|
| 387 |
+
conds_cache = shared_models["conds_cache"]
|
| 388 |
+
# text_encoder1 and text_encoder2 are on device (batched inference) or CPU (interactive inference)
|
| 389 |
+
else: # Load if not in shared_models
|
| 390 |
+
# T5XXL is float16 by default, but it causes NaN values in some cases, so we use bfloat16 (or fp8 if specified)
|
| 391 |
+
t5_dtype = torch.float8e4m3fn if args.fp8_t5 else torch.bfloat16
|
| 392 |
+
tokenizer1, text_encoder1 = flux_utils.load_t5xxl(args.text_encoder1, dtype=t5_dtype, device=device, disable_mmap=True)
|
| 393 |
+
tokenizer2, text_encoder2 = flux_utils.load_clip_l(
|
| 394 |
+
args.text_encoder2, dtype=torch.bfloat16, device=device, disable_mmap=True
|
| 395 |
+
)
|
| 396 |
+
|
| 397 |
+
# Store original devices to move back later if they were shared. This does nothing if shared_models is None
|
| 398 |
+
text_encoder1_original_device = text_encoder1.device if text_encoder1 else None
|
| 399 |
+
text_encoder2_original_device = text_encoder2.device if text_encoder2 else None
|
| 400 |
+
|
| 401 |
+
logger.info("Encoding prompt with Text Encoders")
|
| 402 |
+
|
| 403 |
+
# Ensure text_encoder1 and text_encoder2 are not None before proceeding
|
| 404 |
+
if not text_encoder1 or not text_encoder2 or not tokenizer1 or not tokenizer2:
|
| 405 |
+
raise ValueError("Text encoders or tokenizers are not loaded properly.")
|
| 406 |
+
|
| 407 |
+
# Define a function to move models to device if needed
|
| 408 |
+
# This is to avoid moving models if not needed, especially in interactive mode
|
| 409 |
+
model_is_moved = False
|
| 410 |
+
|
| 411 |
+
def move_models_to_device_if_needed():
|
| 412 |
+
nonlocal model_is_moved
|
| 413 |
+
nonlocal shared_models
|
| 414 |
+
|
| 415 |
+
if model_is_moved:
|
| 416 |
+
return
|
| 417 |
+
model_is_moved = True
|
| 418 |
+
|
| 419 |
+
logger.info(f"Moving DiT and Text Encoders to appropriate device: {device} or CPU")
|
| 420 |
+
if shared_models and "model" in shared_models: # DiT model is shared
|
| 421 |
+
if args.blocks_to_swap > 0:
|
| 422 |
+
logger.info("Waiting for 5 seconds to finish block swap")
|
| 423 |
+
time.sleep(5)
|
| 424 |
+
model = shared_models["model"]
|
| 425 |
+
model.to("cpu")
|
| 426 |
+
clean_memory_on_device(device) # clean memory on device before moving models
|
| 427 |
+
|
| 428 |
+
text_encoder1.to(device)
|
| 429 |
+
text_encoder2.to(device)
|
| 430 |
+
|
| 431 |
+
prompt = args.prompt
|
| 432 |
+
if prompt in conds_cache:
|
| 433 |
+
t5_vec, clip_l_pooler = conds_cache[prompt]
|
| 434 |
+
else:
|
| 435 |
+
move_models_to_device_if_needed()
|
| 436 |
+
|
| 437 |
+
t5_tokens = tokenizer1(
|
| 438 |
+
prompt,
|
| 439 |
+
max_length=flux_models.T5XXL_MAX_LENGTH,
|
| 440 |
+
padding="max_length",
|
| 441 |
+
return_length=False,
|
| 442 |
+
return_overflowing_tokens=False,
|
| 443 |
+
truncation=True,
|
| 444 |
+
return_tensors="pt",
|
| 445 |
+
)["input_ids"]
|
| 446 |
+
l_tokens = tokenizer2(prompt, max_length=77, padding="max_length", truncation=True, return_tensors="pt")["input_ids"]
|
| 447 |
+
|
| 448 |
+
with torch.autocast(device_type=device.type, dtype=text_encoder1.dtype), torch.no_grad():
|
| 449 |
+
t5_vec = text_encoder1(input_ids=t5_tokens.to(text_encoder1.device), attention_mask=None, output_hidden_states=False)[
|
| 450 |
+
"last_hidden_state"
|
| 451 |
+
]
|
| 452 |
+
assert torch.isnan(t5_vec).any() == False, "T5 vector contains NaN values"
|
| 453 |
+
t5_vec = t5_vec.cpu()
|
| 454 |
+
|
| 455 |
+
with torch.autocast(device_type=device.type, dtype=text_encoder2.dtype), torch.no_grad():
|
| 456 |
+
clip_l_pooler = text_encoder2(l_tokens.to(text_encoder2.device))["pooler_output"]
|
| 457 |
+
clip_l_pooler = clip_l_pooler.cpu()
|
| 458 |
+
|
| 459 |
+
conds_cache[prompt] = (t5_vec, clip_l_pooler)
|
| 460 |
+
|
| 461 |
+
if not (shared_models and "text_encoder1" in shared_models): # if loaded locally
|
| 462 |
+
del tokenizer1, text_encoder1, tokenizer2, text_encoder2
|
| 463 |
+
else: # if shared, move back to original device (likely CPU)
|
| 464 |
+
if text_encoder1:
|
| 465 |
+
text_encoder1.to(text_encoder1_original_device)
|
| 466 |
+
if text_encoder2:
|
| 467 |
+
text_encoder2.to(text_encoder2_original_device)
|
| 468 |
+
|
| 469 |
+
clean_memory_on_device(device)
|
| 470 |
+
|
| 471 |
+
arg_c = {"t5_vec": t5_vec, "clip_l_pooler": clip_l_pooler, "prompt": prompt}
|
| 472 |
+
|
| 473 |
+
return arg_c
|
| 474 |
+
|
| 475 |
+
|
| 476 |
+
def prepare_i2v_inputs(
|
| 477 |
+
args: argparse.Namespace,
|
| 478 |
+
device: torch.device,
|
| 479 |
+
ae: flux_models.AutoEncoder,
|
| 480 |
+
shared_models: Optional[Dict] = None,
|
| 481 |
+
) -> Tuple[int, int, Dict[str, Any], Optional[torch.Tensor]]:
|
| 482 |
+
"""Prepare inputs for image2video generation: image encoding, text encoding, and AE encoding.
|
| 483 |
+
|
| 484 |
+
Args:
|
| 485 |
+
args: command line arguments
|
| 486 |
+
device: device to use
|
| 487 |
+
ae: AE model instance
|
| 488 |
+
shared_models: dictionary containing pre-loaded models (mainly for DiT)
|
| 489 |
+
|
| 490 |
+
Returns:
|
| 491 |
+
Tuple[int, int, Dict[str, Any], Optional[torch.Tensor]]: (height, width, context, end_latent)
|
| 492 |
+
"""
|
| 493 |
+
# prepare image inputs
|
| 494 |
+
image_inputs = prepare_image_inputs(args, device, ae)
|
| 495 |
+
control_latent = image_inputs["control_latent"]
|
| 496 |
+
|
| 497 |
+
# prepare text inputs
|
| 498 |
+
context = prepare_text_inputs(args, device, shared_models)
|
| 499 |
+
|
| 500 |
+
return image_inputs["height"], image_inputs["width"], context, control_latent
|
| 501 |
+
|
| 502 |
+
|
| 503 |
+
def generate(
|
| 504 |
+
args: argparse.Namespace,
|
| 505 |
+
gen_settings: GenerationSettings,
|
| 506 |
+
shared_models: Optional[Dict] = None,
|
| 507 |
+
precomputed_image_data: Optional[Dict] = None,
|
| 508 |
+
precomputed_text_data: Optional[Dict] = None,
|
| 509 |
+
) -> tuple[Optional[flux_models.AutoEncoder], torch.Tensor]: # AE can be Optional
|
| 510 |
+
"""main function for generation
|
| 511 |
+
|
| 512 |
+
Args:
|
| 513 |
+
args: command line arguments
|
| 514 |
+
shared_models: dictionary containing pre-loaded models (mainly for DiT)
|
| 515 |
+
precomputed_image_data: Optional dictionary with precomputed image data
|
| 516 |
+
precomputed_text_data: Optional dictionary with precomputed text data
|
| 517 |
+
|
| 518 |
+
Returns:
|
| 519 |
+
tuple: (flux_models.AutoEncoder model (vae) or None, torch.Tensor generated latent)
|
| 520 |
+
"""
|
| 521 |
+
device, dit_weight_dtype = (gen_settings.device, gen_settings.dit_weight_dtype)
|
| 522 |
+
vae_instance_for_return = None
|
| 523 |
+
|
| 524 |
+
# prepare seed
|
| 525 |
+
seed = args.seed if args.seed is not None else random.randint(0, 2**32 - 1)
|
| 526 |
+
args.seed = seed # set seed to args for saving
|
| 527 |
+
|
| 528 |
+
if precomputed_image_data is not None and precomputed_text_data is not None:
|
| 529 |
+
logger.info("Using precomputed image and text data.")
|
| 530 |
+
height = precomputed_image_data["height"]
|
| 531 |
+
width = precomputed_image_data["width"]
|
| 532 |
+
control_latent = precomputed_image_data["control_latent"]
|
| 533 |
+
|
| 534 |
+
context = precomputed_text_data
|
| 535 |
+
|
| 536 |
+
# VAE is not loaded here if data is precomputed; decoding VAE is handled by caller (e.g., process_batch_prompts)
|
| 537 |
+
# vae_instance_for_return remains None
|
| 538 |
+
else:
|
| 539 |
+
# Load VAE if not precomputed (for single/interactive mode)
|
| 540 |
+
# shared_models for single/interactive might contain text/image encoders, but not VAE after `load_shared_models` change.
|
| 541 |
+
# So, VAE will be loaded here for single/interactive.
|
| 542 |
+
logger.info("No precomputed data. Preparing image and text inputs.")
|
| 543 |
+
if shared_models and "ae" in shared_models: # Should not happen with new load_shared_models
|
| 544 |
+
vae_instance_for_return = shared_models["ae"]
|
| 545 |
+
else:
|
| 546 |
+
# the dtype of VAE weights is float32, but we can load it as bfloat16 for better performance in future
|
| 547 |
+
vae_instance_for_return = flux_utils.load_ae(args.vae, dtype=torch.float32, device=device, disable_mmap=True)
|
| 548 |
+
|
| 549 |
+
height, width, context, control_latent = prepare_i2v_inputs(args, device, vae_instance_for_return, shared_models)
|
| 550 |
+
|
| 551 |
+
if shared_models is None or "model" not in shared_models:
|
| 552 |
+
# load DiT model
|
| 553 |
+
model = load_dit_model(args, device)
|
| 554 |
+
|
| 555 |
+
# merge LoRA weights
|
| 556 |
+
if args.lora_weight is not None and len(args.lora_weight) > 0:
|
| 557 |
+
merge_lora_weights(
|
| 558 |
+
lora_flux,
|
| 559 |
+
model,
|
| 560 |
+
args.lora_weight,
|
| 561 |
+
args.lora_multiplier,
|
| 562 |
+
args.include_patterns,
|
| 563 |
+
args.exclude_patterns,
|
| 564 |
+
device,
|
| 565 |
+
args.lycoris,
|
| 566 |
+
args.save_merged_model,
|
| 567 |
+
)
|
| 568 |
+
|
| 569 |
+
# if we only want to save the model, we can skip the rest
|
| 570 |
+
if args.save_merged_model:
|
| 571 |
+
return None, None
|
| 572 |
+
|
| 573 |
+
# optimize model: fp8 conversion, block swap etc.
|
| 574 |
+
optimize_model(model, args, device)
|
| 575 |
+
|
| 576 |
+
if shared_models is not None:
|
| 577 |
+
shared_models["model"] = model
|
| 578 |
+
else:
|
| 579 |
+
# use shared model
|
| 580 |
+
model: flux_models.Flux = shared_models["model"]
|
| 581 |
+
model.move_to_device_except_swap_blocks(device) # Handles block swap correctly
|
| 582 |
+
model.prepare_block_swap_before_forward()
|
| 583 |
+
|
| 584 |
+
# set random generator
|
| 585 |
+
seed_g = torch.Generator(device="cpu")
|
| 586 |
+
seed_g.manual_seed(seed)
|
| 587 |
+
|
| 588 |
+
logger.info(f"Image size: {height}x{width} (HxW), infer_steps: {args.infer_steps}")
|
| 589 |
+
|
| 590 |
+
# image generation ######
|
| 591 |
+
|
| 592 |
+
# def get_latent_mask(mask_image: Image.Image) -> torch.Tensor:
|
| 593 |
+
# if mask_image.mode != "L":
|
| 594 |
+
# mask_image = mask_image.convert("L")
|
| 595 |
+
# mask_image = mask_image.resize((width // 8, height // 8), Image.LANCZOS)
|
| 596 |
+
# mask_image = np.array(mask_image) # PIL to numpy, HWC
|
| 597 |
+
# mask_image = torch.from_numpy(mask_image).float() / 255.0 # 0 to 1.0, HWC
|
| 598 |
+
# mask_image = mask_image.squeeze(-1) # HWC -> HW
|
| 599 |
+
# mask_image = mask_image.unsqueeze(0).unsqueeze(0).unsqueeze(0) # HW -> 111HW (BCFHW)
|
| 600 |
+
# mask_image = mask_image.to(torch.float32)
|
| 601 |
+
# return mask_image
|
| 602 |
+
|
| 603 |
+
logger.info(f"Prompt: {context['prompt']}")
|
| 604 |
+
|
| 605 |
+
t5_vec = context["t5_vec"].to(device, dtype=torch.bfloat16)
|
| 606 |
+
clip_l_pooler = context["clip_l_pooler"].to(device, dtype=torch.bfloat16)
|
| 607 |
+
|
| 608 |
+
txt_ids = torch.zeros(t5_vec.shape[0], t5_vec.shape[1], 3, device=t5_vec.device)
|
| 609 |
+
|
| 610 |
+
# make first noise with packed shape
|
| 611 |
+
# original: b,16,2*h//16,2*w//16, packed: b,h//16*w//16,16*2*2
|
| 612 |
+
packed_latent_height, packed_latent_width = height // 16, width // 16
|
| 613 |
+
noise_dtype = torch.float32
|
| 614 |
+
noise = torch.randn(
|
| 615 |
+
1,
|
| 616 |
+
packed_latent_height * packed_latent_width,
|
| 617 |
+
16 * 2 * 2,
|
| 618 |
+
dtype=noise_dtype,
|
| 619 |
+
generator=seed_g,
|
| 620 |
+
device="cpu",
|
| 621 |
+
).to(device, dtype=torch.bfloat16)
|
| 622 |
+
|
| 623 |
+
img_ids = flux_utils.prepare_img_ids(1, packed_latent_height, packed_latent_width).to(device)
|
| 624 |
+
|
| 625 |
+
# image ids are the same as base image with the first dimension set to 1 instead of 0
|
| 626 |
+
if control_latent is not None:
|
| 627 |
+
# pack control_latent
|
| 628 |
+
ctrl_packed_height = control_latent.shape[2] // 2
|
| 629 |
+
ctrl_packed_width = control_latent.shape[3] // 2
|
| 630 |
+
control_latent = rearrange(control_latent, "b c (h ph) (w pw) -> b (h w) (c ph pw)", ph=2, pw=2)
|
| 631 |
+
control_latent_ids = flux_utils.prepare_img_ids(1, ctrl_packed_height, ctrl_packed_width, is_ctrl=True).to(device)
|
| 632 |
+
|
| 633 |
+
control_latent = control_latent.to(device, dtype=torch.bfloat16)
|
| 634 |
+
else:
|
| 635 |
+
control_latent_ids = None
|
| 636 |
+
|
| 637 |
+
# denoise
|
| 638 |
+
timesteps = flux_utils.get_schedule(
|
| 639 |
+
num_steps=args.infer_steps, image_seq_len=packed_latent_height * packed_latent_width, shift_value=args.flow_shift
|
| 640 |
+
)
|
| 641 |
+
|
| 642 |
+
guidance = args.embedded_cfg_scale
|
| 643 |
+
x = noise
|
| 644 |
+
|
| 645 |
+
# logger.info(f"guidance: {guidance}, timesteps: {timesteps}")
|
| 646 |
+
guidance_vec = torch.full((x.shape[0],), guidance, device=x.device, dtype=x.dtype)
|
| 647 |
+
|
| 648 |
+
for t_curr, t_prev in zip(tqdm(timesteps[:-1]), timesteps[1:]):
|
| 649 |
+
t_vec = torch.full((x.shape[0],), t_curr, dtype=x.dtype, device=x.device)
|
| 650 |
+
|
| 651 |
+
img_input = x
|
| 652 |
+
img_input_ids = img_ids
|
| 653 |
+
if control_latent is not None:
|
| 654 |
+
# if control_latent is provided, concatenate it to the input
|
| 655 |
+
img_input = torch.cat((img_input, control_latent), dim=1)
|
| 656 |
+
img_input_ids = torch.cat((img_input_ids, control_latent_ids), dim=1)
|
| 657 |
+
|
| 658 |
+
with torch.no_grad():
|
| 659 |
+
pred = model(
|
| 660 |
+
img=img_input,
|
| 661 |
+
img_ids=img_input_ids,
|
| 662 |
+
txt=t5_vec,
|
| 663 |
+
txt_ids=txt_ids,
|
| 664 |
+
y=clip_l_pooler,
|
| 665 |
+
timesteps=t_vec,
|
| 666 |
+
guidance=guidance_vec,
|
| 667 |
+
)
|
| 668 |
+
pred = pred[:, : x.shape[1]]
|
| 669 |
+
|
| 670 |
+
x = x + (t_prev - t_curr) * pred
|
| 671 |
+
|
| 672 |
+
# unpack
|
| 673 |
+
x = x.float()
|
| 674 |
+
x = rearrange(x, "b (h w) (c ph pw) -> b c (h ph) (w pw)", h=packed_latent_height, w=packed_latent_width, ph=2, pw=2)
|
| 675 |
+
|
| 676 |
+
return vae_instance_for_return, x
|
| 677 |
+
|
| 678 |
+
|
| 679 |
+
def save_latent(latent: torch.Tensor, args: argparse.Namespace, height: int, width: int) -> str:
|
| 680 |
+
"""Save latent to file
|
| 681 |
+
|
| 682 |
+
Args:
|
| 683 |
+
latent: Latent tensor
|
| 684 |
+
args: command line arguments
|
| 685 |
+
height: height of frame
|
| 686 |
+
width: width of frame
|
| 687 |
+
|
| 688 |
+
Returns:
|
| 689 |
+
str: Path to saved latent file
|
| 690 |
+
"""
|
| 691 |
+
save_path = args.save_path
|
| 692 |
+
os.makedirs(save_path, exist_ok=True)
|
| 693 |
+
time_flag = get_time_flag()
|
| 694 |
+
|
| 695 |
+
seed = args.seed
|
| 696 |
+
|
| 697 |
+
latent_path = f"{save_path}/{time_flag}_{seed}_latent.safetensors"
|
| 698 |
+
|
| 699 |
+
if args.no_metadata:
|
| 700 |
+
metadata = None
|
| 701 |
+
else:
|
| 702 |
+
metadata = {
|
| 703 |
+
"seeds": f"{seed}",
|
| 704 |
+
"prompt": f"{args.prompt}",
|
| 705 |
+
"height": f"{height}",
|
| 706 |
+
"width": f"{width}",
|
| 707 |
+
"infer_steps": f"{args.infer_steps}",
|
| 708 |
+
"embedded_cfg_scale": f"{args.embedded_cfg_scale}",
|
| 709 |
+
}
|
| 710 |
+
# if args.negative_prompt is not None:
|
| 711 |
+
# metadata["negative_prompt"] = f"{args.negative_prompt}"
|
| 712 |
+
|
| 713 |
+
sd = {"latent": latent.contiguous()}
|
| 714 |
+
save_file(sd, latent_path, metadata=metadata)
|
| 715 |
+
logger.info(f"Latent saved to: {latent_path}")
|
| 716 |
+
|
| 717 |
+
return latent_path
|
| 718 |
+
|
| 719 |
+
|
| 720 |
+
def save_images(sample: torch.Tensor, args: argparse.Namespace, original_base_name: Optional[str] = None) -> str:
|
| 721 |
+
"""Save images to directory
|
| 722 |
+
|
| 723 |
+
Args:
|
| 724 |
+
sample: Video tensor
|
| 725 |
+
args: command line arguments
|
| 726 |
+
original_base_name: Original base name (if latents are loaded from files)
|
| 727 |
+
|
| 728 |
+
Returns:
|
| 729 |
+
str: Path to saved images directory
|
| 730 |
+
"""
|
| 731 |
+
save_path = args.save_path
|
| 732 |
+
os.makedirs(save_path, exist_ok=True)
|
| 733 |
+
time_flag = get_time_flag()
|
| 734 |
+
|
| 735 |
+
seed = args.seed
|
| 736 |
+
original_name = "" if original_base_name is None else f"_{original_base_name}"
|
| 737 |
+
image_name = f"{time_flag}_{seed}{original_name}"
|
| 738 |
+
sample = sample.unsqueeze(0).unsqueeze(2) # C,HW -> BCTHW, where B=1, C=3, T=1
|
| 739 |
+
save_images_grid(sample, save_path, image_name, rescale=True, create_subdir=False)
|
| 740 |
+
logger.info(f"Sample images saved to: {save_path}/{image_name}")
|
| 741 |
+
|
| 742 |
+
return f"{save_path}/{image_name}"
|
| 743 |
+
|
| 744 |
+
|
| 745 |
+
def save_output(
|
| 746 |
+
args: argparse.Namespace,
|
| 747 |
+
ae: flux_models.AutoEncoder, # Expect a VAE instance for decoding
|
| 748 |
+
latent: torch.Tensor,
|
| 749 |
+
device: torch.device,
|
| 750 |
+
original_base_names: Optional[List[str]] = None,
|
| 751 |
+
) -> None:
|
| 752 |
+
"""save output
|
| 753 |
+
|
| 754 |
+
Args:
|
| 755 |
+
args: command line arguments
|
| 756 |
+
vae: VAE model
|
| 757 |
+
latent: latent tensor
|
| 758 |
+
device: device to use
|
| 759 |
+
original_base_names: original base names (if latents are loaded from files)
|
| 760 |
+
"""
|
| 761 |
+
height, width = latent.shape[-2], latent.shape[-1] # BCTHW
|
| 762 |
+
height *= 8
|
| 763 |
+
width *= 8
|
| 764 |
+
# print(f"Saving output. Latent shape {latent.shape}; pixel shape {height}x{width}")
|
| 765 |
+
if args.output_type == "latent" or args.output_type == "latent_images":
|
| 766 |
+
# save latent
|
| 767 |
+
save_latent(latent, args, height, width)
|
| 768 |
+
if args.output_type == "latent":
|
| 769 |
+
return
|
| 770 |
+
|
| 771 |
+
if ae is None:
|
| 772 |
+
logger.error("AE is None, cannot decode latents for saving video/images.")
|
| 773 |
+
return
|
| 774 |
+
|
| 775 |
+
video = decode_latent(ae, latent, device)
|
| 776 |
+
|
| 777 |
+
if args.output_type == "images" or args.output_type == "latent_images":
|
| 778 |
+
# save images
|
| 779 |
+
original_name = "" if original_base_names is None else f"_{original_base_names[0]}"
|
| 780 |
+
save_images(video, args, original_name)
|
| 781 |
+
|
| 782 |
+
|
| 783 |
+
def preprocess_prompts_for_batch(prompt_lines: List[str], base_args: argparse.Namespace) -> List[Dict]:
|
| 784 |
+
"""Process multiple prompts for batch mode
|
| 785 |
+
|
| 786 |
+
Args:
|
| 787 |
+
prompt_lines: List of prompt lines
|
| 788 |
+
base_args: Base command line arguments
|
| 789 |
+
|
| 790 |
+
Returns:
|
| 791 |
+
List[Dict]: List of prompt data dictionaries
|
| 792 |
+
"""
|
| 793 |
+
prompts_data = []
|
| 794 |
+
|
| 795 |
+
for line in prompt_lines:
|
| 796 |
+
line = line.strip()
|
| 797 |
+
if not line or line.startswith("#"): # Skip empty lines and comments
|
| 798 |
+
continue
|
| 799 |
+
|
| 800 |
+
# Parse prompt line and create override dictionary
|
| 801 |
+
prompt_data = parse_prompt_line(line)
|
| 802 |
+
logger.info(f"Parsed prompt data: {prompt_data}")
|
| 803 |
+
prompts_data.append(prompt_data)
|
| 804 |
+
|
| 805 |
+
return prompts_data
|
| 806 |
+
|
| 807 |
+
|
| 808 |
+
def load_shared_models(args: argparse.Namespace) -> Dict:
|
| 809 |
+
"""Load shared models for batch processing or interactive mode.
|
| 810 |
+
Models are loaded to CPU to save memory. VAE is NOT loaded here.
|
| 811 |
+
DiT model is also NOT loaded here, handled by process_batch_prompts or generate.
|
| 812 |
+
|
| 813 |
+
Args:
|
| 814 |
+
args: Base command line arguments
|
| 815 |
+
|
| 816 |
+
Returns:
|
| 817 |
+
Dict: Dictionary of shared models (text/image encoders)
|
| 818 |
+
"""
|
| 819 |
+
shared_models = {}
|
| 820 |
+
# Load text encoders to CPU
|
| 821 |
+
t5_dtype = torch.float8e4m3fn if args.fp8_t5 else torch.bfloat16
|
| 822 |
+
tokenizer1, text_encoder1 = flux_utils.load_t5xxl(args.text_encoder1, dtype=t5_dtype, device="cpu", disable_mmap=True)
|
| 823 |
+
tokenizer2, text_encoder2 = flux_utils.load_clip_l(args.text_encoder2, dtype=torch.bfloat16, device="cpu", disable_mmap=True)
|
| 824 |
+
|
| 825 |
+
shared_models["tokenizer1"] = tokenizer1
|
| 826 |
+
shared_models["text_encoder1"] = text_encoder1
|
| 827 |
+
shared_models["tokenizer2"] = tokenizer2
|
| 828 |
+
shared_models["text_encoder2"] = text_encoder2
|
| 829 |
+
|
| 830 |
+
return shared_models
|
| 831 |
+
|
| 832 |
+
|
| 833 |
+
def process_batch_prompts(prompts_data: List[Dict], args: argparse.Namespace) -> None:
|
| 834 |
+
"""Process multiple prompts with model reuse and batched precomputation
|
| 835 |
+
|
| 836 |
+
Args:
|
| 837 |
+
prompts_data: List of prompt data dictionaries
|
| 838 |
+
args: Base command line arguments
|
| 839 |
+
"""
|
| 840 |
+
if not prompts_data:
|
| 841 |
+
logger.warning("No valid prompts found")
|
| 842 |
+
return
|
| 843 |
+
|
| 844 |
+
gen_settings = get_generation_settings(args)
|
| 845 |
+
device = gen_settings.device
|
| 846 |
+
|
| 847 |
+
# 1. Precompute Image Data (AE and Image Encoders)
|
| 848 |
+
logger.info("Loading AE and Image Encoders for batch image preprocessing...")
|
| 849 |
+
ae_for_batch = flux_utils.load_ae(args.vae, dtype=torch.float32, device=device, disable_mmap=True)
|
| 850 |
+
|
| 851 |
+
all_precomputed_image_data = []
|
| 852 |
+
all_prompt_args_list = [apply_overrides(args, pd) for pd in prompts_data] # Create all arg instances first
|
| 853 |
+
|
| 854 |
+
logger.info("Preprocessing images and AE encoding for all prompts...")
|
| 855 |
+
|
| 856 |
+
# AE and Image Encoder to device for this phase, because we do not want to offload them to CPU
|
| 857 |
+
ae_for_batch.to(device)
|
| 858 |
+
|
| 859 |
+
for i, prompt_args_item in enumerate(all_prompt_args_list):
|
| 860 |
+
logger.info(f"Image preprocessing for prompt {i + 1}/{len(all_prompt_args_list)}: {prompt_args_item.prompt}")
|
| 861 |
+
# prepare_image_inputs will move ae/image_encoder to device temporarily
|
| 862 |
+
image_data = prepare_image_inputs(prompt_args_item, device, ae_for_batch)
|
| 863 |
+
all_precomputed_image_data.append(image_data)
|
| 864 |
+
|
| 865 |
+
# Models should be back on GPU because prepare_image_inputs moved them to the original device
|
| 866 |
+
ae_for_batch.to("cpu") # Move AE back to CPU
|
| 867 |
+
clean_memory_on_device(device)
|
| 868 |
+
|
| 869 |
+
# 2. Precompute Text Data (Text Encoders)
|
| 870 |
+
logger.info("Loading Text Encoders for batch text preprocessing...")
|
| 871 |
+
|
| 872 |
+
# Text Encoders loaded to CPU by load_text_encoder1/2
|
| 873 |
+
t5_dtype = torch.float8e4m3fn if args.fp8_t5 else torch.bfloat16
|
| 874 |
+
tokenizer1_batch, text_encoder1_batch = flux_utils.load_t5xxl(
|
| 875 |
+
args.text_encoder1, dtype=t5_dtype, device=device, disable_mmap=True
|
| 876 |
+
)
|
| 877 |
+
tokenizer2_batch, text_encoder2_batch = flux_utils.load_clip_l(
|
| 878 |
+
args.text_encoder2, dtype=torch.bfloat16, device=device, disable_mmap=True
|
| 879 |
+
)
|
| 880 |
+
|
| 881 |
+
# Text Encoders to device for this phase
|
| 882 |
+
text_encoder2_batch.to(device) # Moved into prepare_text_inputs logic
|
| 883 |
+
|
| 884 |
+
all_precomputed_text_data = []
|
| 885 |
+
conds_cache_batch = {}
|
| 886 |
+
|
| 887 |
+
logger.info("Preprocessing text and LLM/TextEncoder encoding for all prompts...")
|
| 888 |
+
temp_shared_models_txt = {
|
| 889 |
+
"tokenizer1": tokenizer1_batch,
|
| 890 |
+
"text_encoder1": text_encoder1_batch, # on GPU
|
| 891 |
+
"tokenizer2": tokenizer2_batch,
|
| 892 |
+
"text_encoder2": text_encoder2_batch, # on GPU
|
| 893 |
+
"conds_cache": conds_cache_batch,
|
| 894 |
+
}
|
| 895 |
+
|
| 896 |
+
for i, prompt_args_item in enumerate(all_prompt_args_list):
|
| 897 |
+
logger.info(f"Text preprocessing for prompt {i + 1}/{len(all_prompt_args_list)}: {prompt_args_item.prompt}")
|
| 898 |
+
# prepare_text_inputs will move text_encoders to device temporarily
|
| 899 |
+
text_data = prepare_text_inputs(prompt_args_item, device, temp_shared_models_txt)
|
| 900 |
+
all_precomputed_text_data.append(text_data)
|
| 901 |
+
|
| 902 |
+
# Models should be removed from device after prepare_text_inputs
|
| 903 |
+
del tokenizer1_batch, text_encoder1_batch, tokenizer2_batch, text_encoder2_batch, temp_shared_models_txt, conds_cache_batch
|
| 904 |
+
clean_memory_on_device(device)
|
| 905 |
+
|
| 906 |
+
# 3. Load DiT Model once
|
| 907 |
+
logger.info("Loading DiT model for batch generation...")
|
| 908 |
+
# Use args from the first prompt for DiT loading (LoRA etc. should be consistent for a batch)
|
| 909 |
+
first_prompt_args = all_prompt_args_list[0]
|
| 910 |
+
dit_model = load_dit_model(first_prompt_args, device) # Load directly to target device if possible
|
| 911 |
+
|
| 912 |
+
if first_prompt_args.lora_weight is not None and len(first_prompt_args.lora_weight) > 0:
|
| 913 |
+
logger.info("Merging LoRA weights into DiT model...")
|
| 914 |
+
merge_lora_weights(
|
| 915 |
+
lora_flux,
|
| 916 |
+
dit_model,
|
| 917 |
+
first_prompt_args.lora_weight,
|
| 918 |
+
first_prompt_args.lora_multiplier,
|
| 919 |
+
first_prompt_args.include_patterns,
|
| 920 |
+
first_prompt_args.exclude_patterns,
|
| 921 |
+
device,
|
| 922 |
+
first_prompt_args.lycoris,
|
| 923 |
+
first_prompt_args.save_merged_model,
|
| 924 |
+
)
|
| 925 |
+
if first_prompt_args.save_merged_model:
|
| 926 |
+
logger.info("Merged DiT model saved. Skipping generation.")
|
| 927 |
+
del dit_model
|
| 928 |
+
clean_memory_on_device(device)
|
| 929 |
+
return
|
| 930 |
+
|
| 931 |
+
logger.info("Optimizing DiT model...")
|
| 932 |
+
optimize_model(dit_model, first_prompt_args, device) # Handles device placement, fp8 etc.
|
| 933 |
+
|
| 934 |
+
shared_models_for_generate = {"model": dit_model} # Pass DiT via shared_models
|
| 935 |
+
|
| 936 |
+
all_latents = []
|
| 937 |
+
|
| 938 |
+
logger.info("Generating latents for all prompts...")
|
| 939 |
+
with torch.no_grad():
|
| 940 |
+
for i, prompt_args_item in enumerate(all_prompt_args_list):
|
| 941 |
+
current_image_data = all_precomputed_image_data[i]
|
| 942 |
+
current_text_data = all_precomputed_text_data[i]
|
| 943 |
+
|
| 944 |
+
logger.info(f"Generating latent for prompt {i + 1}/{len(all_prompt_args_list)}: {prompt_args_item.prompt}")
|
| 945 |
+
try:
|
| 946 |
+
# generate is called with precomputed data, so it won't load VAE/Text/Image encoders.
|
| 947 |
+
# It will use the DiT model from shared_models_for_generate.
|
| 948 |
+
# The VAE instance returned by generate will be None here.
|
| 949 |
+
_, latent = generate(
|
| 950 |
+
prompt_args_item, gen_settings, shared_models_for_generate, current_image_data, current_text_data
|
| 951 |
+
)
|
| 952 |
+
|
| 953 |
+
if latent is None: # and prompt_args_item.save_merged_model: # Should be caught earlier
|
| 954 |
+
continue
|
| 955 |
+
|
| 956 |
+
# Save latent if needed (using data from precomputed_image_data for H/W)
|
| 957 |
+
if prompt_args_item.output_type in ["latent", "latent_images"]:
|
| 958 |
+
height = current_image_data["height"]
|
| 959 |
+
width = current_image_data["width"]
|
| 960 |
+
save_latent(latent, prompt_args_item, height, width)
|
| 961 |
+
|
| 962 |
+
all_latents.append(latent)
|
| 963 |
+
except Exception as e:
|
| 964 |
+
logger.error(f"Error generating latent for prompt: {prompt_args_item.prompt}. Error: {e}", exc_info=True)
|
| 965 |
+
all_latents.append(None) # Add placeholder for failed generations
|
| 966 |
+
continue
|
| 967 |
+
|
| 968 |
+
# Free DiT model
|
| 969 |
+
logger.info("Releasing DiT model from memory...")
|
| 970 |
+
if args.blocks_to_swap > 0:
|
| 971 |
+
logger.info("Waiting for 5 seconds to finish block swap")
|
| 972 |
+
time.sleep(5)
|
| 973 |
+
|
| 974 |
+
del shared_models_for_generate["model"]
|
| 975 |
+
del dit_model
|
| 976 |
+
clean_memory_on_device(device)
|
| 977 |
+
synchronize_device(device) # Ensure memory is freed before loading VAE for decoding
|
| 978 |
+
|
| 979 |
+
# 4. Decode latents and save outputs (using vae_for_batch)
|
| 980 |
+
if args.output_type != "latent":
|
| 981 |
+
logger.info("Decoding latents to videos/images using batched VAE...")
|
| 982 |
+
ae_for_batch.to(device) # Move VAE to device for decoding
|
| 983 |
+
|
| 984 |
+
for i, latent in enumerate(all_latents):
|
| 985 |
+
if latent is None: # Skip failed generations
|
| 986 |
+
logger.warning(f"Skipping decoding for prompt {i + 1} due to previous error.")
|
| 987 |
+
continue
|
| 988 |
+
|
| 989 |
+
current_args = all_prompt_args_list[i]
|
| 990 |
+
logger.info(f"Decoding output {i + 1}/{len(all_latents)} for prompt: {current_args.prompt}")
|
| 991 |
+
|
| 992 |
+
# if args.output_type is "latent_images", we already saved latent above.
|
| 993 |
+
# so we skip saving latent here.
|
| 994 |
+
if current_args.output_type == "latent_images":
|
| 995 |
+
current_args.output_type = "images"
|
| 996 |
+
|
| 997 |
+
# save_output expects latent to be [BCTHW] or [CTHW]. generate returns [BCTHW] (batch size 1).
|
| 998 |
+
# latent[0] is correct if generate returns it with batch dim.
|
| 999 |
+
# The latent from generate is (1, C, T, H, W)
|
| 1000 |
+
save_output(current_args, ae_for_batch, latent[0], device) # Pass vae_for_batch
|
| 1001 |
+
|
| 1002 |
+
ae_for_batch.to("cpu") # Move VAE back to CPU
|
| 1003 |
+
|
| 1004 |
+
del ae_for_batch
|
| 1005 |
+
clean_memory_on_device(device)
|
| 1006 |
+
|
| 1007 |
+
|
| 1008 |
+
def process_interactive(args: argparse.Namespace) -> None:
|
| 1009 |
+
"""Process prompts in interactive mode
|
| 1010 |
+
|
| 1011 |
+
Args:
|
| 1012 |
+
args: Base command line arguments
|
| 1013 |
+
"""
|
| 1014 |
+
gen_settings = get_generation_settings(args)
|
| 1015 |
+
device = gen_settings.device
|
| 1016 |
+
shared_models = load_shared_models(args)
|
| 1017 |
+
shared_models["conds_cache"] = {} # Initialize empty cache for interactive mode
|
| 1018 |
+
|
| 1019 |
+
print("Interactive mode. Enter prompts (Ctrl+D or Ctrl+Z (Windows) to exit):")
|
| 1020 |
+
|
| 1021 |
+
try:
|
| 1022 |
+
import prompt_toolkit
|
| 1023 |
+
except ImportError:
|
| 1024 |
+
logger.warning("prompt_toolkit not found. Using basic input instead.")
|
| 1025 |
+
prompt_toolkit = None
|
| 1026 |
+
|
| 1027 |
+
if prompt_toolkit:
|
| 1028 |
+
session = prompt_toolkit.PromptSession()
|
| 1029 |
+
|
| 1030 |
+
def input_line(prompt: str) -> str:
|
| 1031 |
+
return session.prompt(prompt)
|
| 1032 |
+
|
| 1033 |
+
else:
|
| 1034 |
+
|
| 1035 |
+
def input_line(prompt: str) -> str:
|
| 1036 |
+
return input(prompt)
|
| 1037 |
+
|
| 1038 |
+
try:
|
| 1039 |
+
while True:
|
| 1040 |
+
try:
|
| 1041 |
+
line = input_line("> ")
|
| 1042 |
+
if not line.strip():
|
| 1043 |
+
continue
|
| 1044 |
+
if len(line.strip()) == 1 and line.strip() in ["\x04", "\x1a"]: # Ctrl+D or Ctrl+Z with prompt_toolkit
|
| 1045 |
+
raise EOFError # Exit on Ctrl+D or Ctrl+Z
|
| 1046 |
+
|
| 1047 |
+
# Parse prompt
|
| 1048 |
+
prompt_data = parse_prompt_line(line)
|
| 1049 |
+
prompt_args = apply_overrides(args, prompt_data)
|
| 1050 |
+
|
| 1051 |
+
# Generate latent
|
| 1052 |
+
# For interactive, precomputed data is None. shared_models contains text/image encoders.
|
| 1053 |
+
# generate will load VAE internally.
|
| 1054 |
+
returned_vae, latent = generate(prompt_args, gen_settings, shared_models)
|
| 1055 |
+
|
| 1056 |
+
# # If not one_frame_inference, move DiT model to CPU after generation
|
| 1057 |
+
# if prompt_args.blocks_to_swap > 0:
|
| 1058 |
+
# logger.info("Waiting for 5 seconds to finish block swap")
|
| 1059 |
+
# time.sleep(5)
|
| 1060 |
+
# model = shared_models.get("model")
|
| 1061 |
+
# model.to("cpu") # Move DiT model to CPU after generation
|
| 1062 |
+
|
| 1063 |
+
# Save latent and video
|
| 1064 |
+
# returned_vae from generate will be used for decoding here.
|
| 1065 |
+
save_output(prompt_args, returned_vae, latent[0], device)
|
| 1066 |
+
|
| 1067 |
+
except KeyboardInterrupt:
|
| 1068 |
+
print("\nInterrupted. Continue (Ctrl+D or Ctrl+Z (Windows) to exit)")
|
| 1069 |
+
continue
|
| 1070 |
+
|
| 1071 |
+
except EOFError:
|
| 1072 |
+
print("\nExiting interactive mode")
|
| 1073 |
+
|
| 1074 |
+
|
| 1075 |
+
def get_generation_settings(args: argparse.Namespace) -> GenerationSettings:
|
| 1076 |
+
device = torch.device(args.device)
|
| 1077 |
+
|
| 1078 |
+
dit_weight_dtype = None # default
|
| 1079 |
+
if args.fp8_scaled:
|
| 1080 |
+
dit_weight_dtype = None # various precision weights, so don't cast to specific dtype
|
| 1081 |
+
elif args.fp8:
|
| 1082 |
+
dit_weight_dtype = torch.float8_e4m3fn
|
| 1083 |
+
|
| 1084 |
+
logger.info(f"Using device: {device}, DiT weight weight precision: {dit_weight_dtype}")
|
| 1085 |
+
|
| 1086 |
+
gen_settings = GenerationSettings(device=device, dit_weight_dtype=dit_weight_dtype)
|
| 1087 |
+
return gen_settings
|
| 1088 |
+
|
| 1089 |
+
|
| 1090 |
+
def main():
|
| 1091 |
+
# Parse arguments
|
| 1092 |
+
args = parse_args()
|
| 1093 |
+
|
| 1094 |
+
# Check if latents are provided
|
| 1095 |
+
latents_mode = args.latent_path is not None and len(args.latent_path) > 0
|
| 1096 |
+
|
| 1097 |
+
# Set device
|
| 1098 |
+
device = args.device if args.device is not None else "cuda" if torch.cuda.is_available() else "cpu"
|
| 1099 |
+
device = torch.device(device)
|
| 1100 |
+
logger.info(f"Using device: {device}")
|
| 1101 |
+
args.device = device
|
| 1102 |
+
|
| 1103 |
+
if latents_mode:
|
| 1104 |
+
# Original latent decode mode
|
| 1105 |
+
original_base_names = []
|
| 1106 |
+
latents_list = []
|
| 1107 |
+
seeds = []
|
| 1108 |
+
|
| 1109 |
+
# assert len(args.latent_path) == 1, "Only one latent path is supported for now"
|
| 1110 |
+
|
| 1111 |
+
for latent_path in args.latent_path:
|
| 1112 |
+
original_base_names.append(os.path.splitext(os.path.basename(latent_path))[0])
|
| 1113 |
+
seed = 0
|
| 1114 |
+
|
| 1115 |
+
if os.path.splitext(latent_path)[1] != ".safetensors":
|
| 1116 |
+
latents = torch.load(latent_path, map_location="cpu")
|
| 1117 |
+
else:
|
| 1118 |
+
latents = load_file(latent_path)["latent"]
|
| 1119 |
+
with safe_open(latent_path, framework="pt") as f:
|
| 1120 |
+
metadata = f.metadata()
|
| 1121 |
+
if metadata is None:
|
| 1122 |
+
metadata = {}
|
| 1123 |
+
logger.info(f"Loaded metadata: {metadata}")
|
| 1124 |
+
|
| 1125 |
+
if "seeds" in metadata:
|
| 1126 |
+
seed = int(metadata["seeds"])
|
| 1127 |
+
if "height" in metadata and "width" in metadata:
|
| 1128 |
+
height = int(metadata["height"])
|
| 1129 |
+
width = int(metadata["width"])
|
| 1130 |
+
args.image_size = [height, width]
|
| 1131 |
+
|
| 1132 |
+
seeds.append(seed)
|
| 1133 |
+
logger.info(f"Loaded latent from {latent_path}. Shape: {latents.shape}")
|
| 1134 |
+
|
| 1135 |
+
if latents.ndim == 5: # [BCTHW]
|
| 1136 |
+
latents = latents.squeeze(0) # [CTHW]
|
| 1137 |
+
|
| 1138 |
+
latents_list.append(latents)
|
| 1139 |
+
|
| 1140 |
+
# latent = torch.stack(latents_list, dim=0) # [N, ...], must be same shape
|
| 1141 |
+
|
| 1142 |
+
for i, latent in enumerate(latents_list):
|
| 1143 |
+
args.seed = seeds[i]
|
| 1144 |
+
|
| 1145 |
+
ae = flux_utils.load_ae(args.vae, dtype=torch.float32, device=device, disable_mmap=True)
|
| 1146 |
+
save_output(args, ae, latent, device, original_base_names)
|
| 1147 |
+
|
| 1148 |
+
elif args.from_file:
|
| 1149 |
+
# Batch mode from file
|
| 1150 |
+
|
| 1151 |
+
# Read prompts from file
|
| 1152 |
+
with open(args.from_file, "r", encoding="utf-8") as f:
|
| 1153 |
+
prompt_lines = f.readlines()
|
| 1154 |
+
|
| 1155 |
+
# Process prompts
|
| 1156 |
+
prompts_data = preprocess_prompts_for_batch(prompt_lines, args)
|
| 1157 |
+
process_batch_prompts(prompts_data, args)
|
| 1158 |
+
|
| 1159 |
+
elif args.interactive:
|
| 1160 |
+
# Interactive mode
|
| 1161 |
+
process_interactive(args)
|
| 1162 |
+
|
| 1163 |
+
else:
|
| 1164 |
+
# Single prompt mode (original behavior)
|
| 1165 |
+
|
| 1166 |
+
# Generate latent
|
| 1167 |
+
gen_settings = get_generation_settings(args)
|
| 1168 |
+
# For single mode, precomputed data is None, shared_models is None.
|
| 1169 |
+
# generate will load all necessary models (VAE, Text/Image Encoders, DiT).
|
| 1170 |
+
returned_vae, latent = generate(args, gen_settings)
|
| 1171 |
+
# print(f"Generated latent shape: {latent.shape}")
|
| 1172 |
+
# if args.save_merged_model:
|
| 1173 |
+
# return
|
| 1174 |
+
|
| 1175 |
+
# Save latent and video
|
| 1176 |
+
# returned_vae from generate will be used for decoding here.
|
| 1177 |
+
save_output(args, returned_vae, latent[0], device)
|
| 1178 |
+
|
| 1179 |
+
logger.info("Done!")
|
| 1180 |
+
|
| 1181 |
+
|
| 1182 |
+
if __name__ == "__main__":
|
| 1183 |
+
main()
|
src/musubi_tuner/flux_kontext_train_network.py
ADDED
|
@@ -0,0 +1,405 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import argparse
|
| 2 |
+
from typing import Optional
|
| 3 |
+
|
| 4 |
+
|
| 5 |
+
from einops import rearrange
|
| 6 |
+
import torch
|
| 7 |
+
from tqdm import tqdm
|
| 8 |
+
from accelerate import Accelerator
|
| 9 |
+
|
| 10 |
+
from musubi_tuner.dataset.image_video_dataset import ARCHITECTURE_FLUX_KONTEXT, ARCHITECTURE_FLUX_KONTEXT_FULL
|
| 11 |
+
from musubi_tuner.flux import flux_models, flux_utils
|
| 12 |
+
from musubi_tuner.hv_train_network import (
|
| 13 |
+
NetworkTrainer,
|
| 14 |
+
load_prompts,
|
| 15 |
+
clean_memory_on_device,
|
| 16 |
+
setup_parser_common,
|
| 17 |
+
read_config_from_file,
|
| 18 |
+
)
|
| 19 |
+
|
| 20 |
+
import logging
|
| 21 |
+
|
| 22 |
+
from musubi_tuner.utils import model_utils
|
| 23 |
+
|
| 24 |
+
logger = logging.getLogger(__name__)
|
| 25 |
+
logging.basicConfig(level=logging.INFO)
|
| 26 |
+
|
| 27 |
+
|
| 28 |
+
class FluxKontextNetworkTrainer(NetworkTrainer):
|
| 29 |
+
def __init__(self):
|
| 30 |
+
super().__init__()
|
| 31 |
+
|
| 32 |
+
# region model specific
|
| 33 |
+
|
| 34 |
+
@property
|
| 35 |
+
def architecture(self) -> str:
|
| 36 |
+
return ARCHITECTURE_FLUX_KONTEXT
|
| 37 |
+
|
| 38 |
+
@property
|
| 39 |
+
def architecture_full_name(self) -> str:
|
| 40 |
+
return ARCHITECTURE_FLUX_KONTEXT_FULL
|
| 41 |
+
|
| 42 |
+
def handle_model_specific_args(self, args):
|
| 43 |
+
self.dit_dtype = torch.float16 if args.mixed_precision == "fp16" else torch.bfloat16
|
| 44 |
+
self._i2v_training = False
|
| 45 |
+
self._control_training = False # this means video training, not control image training
|
| 46 |
+
self.default_guidance_scale = 2.5 # embeded guidance scale for inference
|
| 47 |
+
|
| 48 |
+
def process_sample_prompts(
|
| 49 |
+
self,
|
| 50 |
+
args: argparse.Namespace,
|
| 51 |
+
accelerator: Accelerator,
|
| 52 |
+
sample_prompts: str,
|
| 53 |
+
):
|
| 54 |
+
device = accelerator.device
|
| 55 |
+
|
| 56 |
+
logger.info(f"cache Text Encoder outputs for sample prompt: {sample_prompts}")
|
| 57 |
+
prompts = load_prompts(sample_prompts)
|
| 58 |
+
|
| 59 |
+
# Load T5 and CLIP text encoders
|
| 60 |
+
t5_dtype = torch.float8e4m3fn if args.fp8_t5 else torch.bfloat16
|
| 61 |
+
tokenizer1, text_encoder1 = flux_utils.load_t5xxl(args.text_encoder1, dtype=t5_dtype, device=device, disable_mmap=True)
|
| 62 |
+
tokenizer2, text_encoder2 = flux_utils.load_clip_l(
|
| 63 |
+
args.text_encoder2, dtype=torch.bfloat16, device=device, disable_mmap=True
|
| 64 |
+
)
|
| 65 |
+
|
| 66 |
+
# Encode with T5 and CLIP text encoders
|
| 67 |
+
logger.info("Encoding with T5 and CLIP text encoders")
|
| 68 |
+
|
| 69 |
+
sample_prompts_te_outputs = {} # (prompt) -> (t5, clip)
|
| 70 |
+
with torch.amp.autocast(device_type=device.type, dtype=t5_dtype), torch.no_grad():
|
| 71 |
+
for prompt_dict in prompts:
|
| 72 |
+
prompt = prompt_dict.get("prompt", "")
|
| 73 |
+
if prompt is None or prompt in sample_prompts_te_outputs:
|
| 74 |
+
continue
|
| 75 |
+
|
| 76 |
+
# encode prompt
|
| 77 |
+
logger.info(f"cache Text Encoder outputs for prompt: {prompt}")
|
| 78 |
+
|
| 79 |
+
t5_tokens = tokenizer1(
|
| 80 |
+
prompt,
|
| 81 |
+
max_length=flux_models.T5XXL_MAX_LENGTH,
|
| 82 |
+
padding="max_length",
|
| 83 |
+
return_length=False,
|
| 84 |
+
return_overflowing_tokens=False,
|
| 85 |
+
truncation=True,
|
| 86 |
+
return_tensors="pt",
|
| 87 |
+
)["input_ids"]
|
| 88 |
+
l_tokens = tokenizer2(prompt, max_length=77, padding="max_length", truncation=True, return_tensors="pt")[
|
| 89 |
+
"input_ids"
|
| 90 |
+
]
|
| 91 |
+
|
| 92 |
+
with torch.autocast(device_type=device.type, dtype=text_encoder1.dtype), torch.no_grad():
|
| 93 |
+
t5_vec = text_encoder1(
|
| 94 |
+
input_ids=t5_tokens.to(text_encoder1.device), attention_mask=None, output_hidden_states=False
|
| 95 |
+
)["last_hidden_state"]
|
| 96 |
+
assert torch.isnan(t5_vec).any() == False, "T5 vector contains NaN values"
|
| 97 |
+
t5_vec = t5_vec.cpu()
|
| 98 |
+
|
| 99 |
+
with torch.autocast(device_type=device.type, dtype=text_encoder2.dtype), torch.no_grad():
|
| 100 |
+
clip_l_pooler = text_encoder2(l_tokens.to(text_encoder2.device))["pooler_output"]
|
| 101 |
+
clip_l_pooler = clip_l_pooler.cpu()
|
| 102 |
+
|
| 103 |
+
# save prompt cache
|
| 104 |
+
sample_prompts_te_outputs[prompt] = (t5_vec, clip_l_pooler)
|
| 105 |
+
|
| 106 |
+
del tokenizer1, text_encoder1, tokenizer2, text_encoder2
|
| 107 |
+
clean_memory_on_device(device)
|
| 108 |
+
|
| 109 |
+
# prepare sample parameters
|
| 110 |
+
sample_parameters = []
|
| 111 |
+
for prompt_dict in prompts:
|
| 112 |
+
prompt_dict_copy = prompt_dict.copy()
|
| 113 |
+
|
| 114 |
+
prompt = prompt_dict.get("prompt", "")
|
| 115 |
+
prompt_dict_copy["t5_vec"] = sample_prompts_te_outputs[prompt][0]
|
| 116 |
+
prompt_dict_copy["clip_l_pooler"] = sample_prompts_te_outputs[prompt][1]
|
| 117 |
+
|
| 118 |
+
sample_parameters.append(prompt_dict_copy)
|
| 119 |
+
|
| 120 |
+
clean_memory_on_device(accelerator.device)
|
| 121 |
+
|
| 122 |
+
return sample_parameters
|
| 123 |
+
|
| 124 |
+
def do_inference(
|
| 125 |
+
self,
|
| 126 |
+
accelerator,
|
| 127 |
+
args,
|
| 128 |
+
sample_parameter,
|
| 129 |
+
vae,
|
| 130 |
+
dit_dtype,
|
| 131 |
+
transformer,
|
| 132 |
+
discrete_flow_shift,
|
| 133 |
+
sample_steps,
|
| 134 |
+
width,
|
| 135 |
+
height,
|
| 136 |
+
frame_count,
|
| 137 |
+
generator,
|
| 138 |
+
do_classifier_free_guidance,
|
| 139 |
+
guidance_scale,
|
| 140 |
+
cfg_scale,
|
| 141 |
+
image_path=None,
|
| 142 |
+
control_video_path=None,
|
| 143 |
+
):
|
| 144 |
+
"""architecture dependent inference"""
|
| 145 |
+
model: flux_models.Flux = transformer
|
| 146 |
+
device = accelerator.device
|
| 147 |
+
|
| 148 |
+
# Get embeddings
|
| 149 |
+
t5_vec = sample_parameter["t5_vec"].to(device=device, dtype=torch.bfloat16)
|
| 150 |
+
clip_l_pooler = sample_parameter["clip_l_pooler"].to(device=device, dtype=torch.bfloat16)
|
| 151 |
+
|
| 152 |
+
txt_ids = torch.zeros(t5_vec.shape[0], t5_vec.shape[1], 3, device=t5_vec.device)
|
| 153 |
+
|
| 154 |
+
# Initialize latents
|
| 155 |
+
packed_latent_height, packed_latent_width = height // 16, width // 16
|
| 156 |
+
noise_dtype = torch.float32
|
| 157 |
+
noise = torch.randn(
|
| 158 |
+
1,
|
| 159 |
+
packed_latent_height * packed_latent_width,
|
| 160 |
+
16 * 2 * 2,
|
| 161 |
+
generator=generator,
|
| 162 |
+
dtype=noise_dtype,
|
| 163 |
+
device=device,
|
| 164 |
+
).to(device, dtype=torch.bfloat16)
|
| 165 |
+
|
| 166 |
+
img_ids = flux_utils.prepare_img_ids(1, packed_latent_height, packed_latent_width).to(device)
|
| 167 |
+
|
| 168 |
+
vae.to(device)
|
| 169 |
+
vae.eval()
|
| 170 |
+
|
| 171 |
+
# prepare control latent
|
| 172 |
+
control_latent = None
|
| 173 |
+
control_latent_ids = None
|
| 174 |
+
if "control_image_path" in sample_parameter:
|
| 175 |
+
control_image_path = sample_parameter["control_image_path"][0] # only use the first control image
|
| 176 |
+
control_image_tensor, _, _ = flux_utils.preprocess_control_image(control_image_path, resize_to_prefered=False)
|
| 177 |
+
|
| 178 |
+
with torch.no_grad():
|
| 179 |
+
control_latent = vae.encode(control_image_tensor.to(device, dtype=vae.dtype))
|
| 180 |
+
|
| 181 |
+
# pack control_latent
|
| 182 |
+
ctrl_packed_height = control_latent.shape[2] // 2
|
| 183 |
+
ctrl_packed_width = control_latent.shape[3] // 2
|
| 184 |
+
control_latent = rearrange(control_latent, "b c (h ph) (w pw) -> b (h w) (c ph pw)", ph=2, pw=2)
|
| 185 |
+
control_latent_ids = flux_utils.prepare_img_ids(1, ctrl_packed_height, ctrl_packed_width, is_ctrl=True).to(device)
|
| 186 |
+
|
| 187 |
+
control_latent = control_latent.to(torch.bfloat16)
|
| 188 |
+
|
| 189 |
+
vae.to("cpu")
|
| 190 |
+
clean_memory_on_device(device)
|
| 191 |
+
|
| 192 |
+
# denoise
|
| 193 |
+
discrete_flow_shift = discrete_flow_shift if discrete_flow_shift != 0 else None # None means no shift
|
| 194 |
+
timesteps = flux_utils.get_schedule(
|
| 195 |
+
num_steps=sample_steps, image_seq_len=packed_latent_height * packed_latent_width, shift_value=discrete_flow_shift
|
| 196 |
+
)
|
| 197 |
+
|
| 198 |
+
x = noise
|
| 199 |
+
del noise
|
| 200 |
+
guidance_vec = torch.full((x.shape[0],), guidance_scale, device=x.device, dtype=x.dtype)
|
| 201 |
+
|
| 202 |
+
for t_curr, t_prev in zip(tqdm(timesteps[:-1]), timesteps[1:]):
|
| 203 |
+
t_vec = torch.full((x.shape[0],), t_curr, dtype=x.dtype, device=x.device)
|
| 204 |
+
|
| 205 |
+
img_input = x
|
| 206 |
+
img_input_ids = img_ids
|
| 207 |
+
if control_latent is not None:
|
| 208 |
+
# if control_latent is provided, concatenate it to the input
|
| 209 |
+
img_input = torch.cat((img_input, control_latent), dim=1)
|
| 210 |
+
img_input_ids = torch.cat((img_input_ids, control_latent_ids), dim=1)
|
| 211 |
+
|
| 212 |
+
with torch.no_grad():
|
| 213 |
+
pred = model(
|
| 214 |
+
img=img_input,
|
| 215 |
+
img_ids=img_input_ids,
|
| 216 |
+
txt=t5_vec,
|
| 217 |
+
txt_ids=txt_ids,
|
| 218 |
+
y=clip_l_pooler,
|
| 219 |
+
timesteps=t_vec,
|
| 220 |
+
guidance=guidance_vec,
|
| 221 |
+
)
|
| 222 |
+
pred = pred[:, : x.shape[1]]
|
| 223 |
+
|
| 224 |
+
x = x + (t_prev - t_curr) * pred
|
| 225 |
+
|
| 226 |
+
# unpack
|
| 227 |
+
x = x.float()
|
| 228 |
+
x = rearrange(x, "b (h w) (c ph pw) -> b c (h ph) (w pw)", h=packed_latent_height, w=packed_latent_width, ph=2, pw=2)
|
| 229 |
+
latent = x.to(vae.dtype)
|
| 230 |
+
del x
|
| 231 |
+
|
| 232 |
+
# Move VAE to the appropriate device for sampling
|
| 233 |
+
vae.to(device)
|
| 234 |
+
vae.eval()
|
| 235 |
+
|
| 236 |
+
# Decode latents to video
|
| 237 |
+
logger.info(f"Decoding video from latents: {latent.shape}")
|
| 238 |
+
with torch.no_grad():
|
| 239 |
+
pixels = vae.decode(latent) # decode to pixels
|
| 240 |
+
del latent
|
| 241 |
+
|
| 242 |
+
logger.info("Decoding complete")
|
| 243 |
+
pixels = pixels.to(torch.float32).cpu()
|
| 244 |
+
pixels = (pixels / 2 + 0.5).clamp(0, 1) # -1 to 1 -> 0 to 1
|
| 245 |
+
|
| 246 |
+
vae.to("cpu")
|
| 247 |
+
clean_memory_on_device(device)
|
| 248 |
+
|
| 249 |
+
pixels = pixels.unsqueeze(2) # add a dummy dimension for video frames, B C H W -> B C 1 H W
|
| 250 |
+
return pixels
|
| 251 |
+
|
| 252 |
+
def load_vae(self, args: argparse.Namespace, vae_dtype: torch.dtype, vae_path: str):
|
| 253 |
+
vae_path = args.vae
|
| 254 |
+
|
| 255 |
+
logger.info(f"Loading AE model from {vae_path}")
|
| 256 |
+
ae = flux_utils.load_ae(vae_path, dtype=torch.float32, device="cpu", disable_mmap=True)
|
| 257 |
+
return ae
|
| 258 |
+
|
| 259 |
+
def load_transformer(
|
| 260 |
+
self,
|
| 261 |
+
accelerator: Accelerator,
|
| 262 |
+
args: argparse.Namespace,
|
| 263 |
+
dit_path: str,
|
| 264 |
+
attn_mode: str,
|
| 265 |
+
split_attn: bool,
|
| 266 |
+
loading_device: str,
|
| 267 |
+
dit_weight_dtype: Optional[torch.dtype],
|
| 268 |
+
):
|
| 269 |
+
model = flux_utils.load_flow_model(
|
| 270 |
+
ckpt_path=args.dit,
|
| 271 |
+
dtype=None,
|
| 272 |
+
device=loading_device,
|
| 273 |
+
disable_mmap=True,
|
| 274 |
+
attn_mode=attn_mode,
|
| 275 |
+
split_attn=split_attn,
|
| 276 |
+
loading_device=loading_device,
|
| 277 |
+
fp8_scaled=args.fp8_scaled,
|
| 278 |
+
)
|
| 279 |
+
return model
|
| 280 |
+
|
| 281 |
+
def compile_transformer(self, args, transformer):
|
| 282 |
+
transformer: flux_models.Flux = transformer
|
| 283 |
+
return model_utils.compile_transformer(
|
| 284 |
+
args, transformer, [transformer.double_blocks, transformer.single_blocks], disable_linear=self.blocks_to_swap > 0
|
| 285 |
+
)
|
| 286 |
+
|
| 287 |
+
def scale_shift_latents(self, latents):
|
| 288 |
+
return latents
|
| 289 |
+
|
| 290 |
+
def call_dit(
|
| 291 |
+
self,
|
| 292 |
+
args: argparse.Namespace,
|
| 293 |
+
accelerator: Accelerator,
|
| 294 |
+
transformer,
|
| 295 |
+
latents: torch.Tensor,
|
| 296 |
+
batch: dict[str, torch.Tensor],
|
| 297 |
+
noise: torch.Tensor,
|
| 298 |
+
noisy_model_input: torch.Tensor,
|
| 299 |
+
timesteps: torch.Tensor,
|
| 300 |
+
network_dtype: torch.dtype,
|
| 301 |
+
):
|
| 302 |
+
model: flux_models.Flux = transformer
|
| 303 |
+
|
| 304 |
+
bsize = latents.shape[0]
|
| 305 |
+
latents = batch["latents"] # B, C, H, W
|
| 306 |
+
control_latents = batch["latents_control"] # B, C, H, W
|
| 307 |
+
|
| 308 |
+
# pack latents
|
| 309 |
+
packed_latent_height = latents.shape[2] // 2
|
| 310 |
+
packed_latent_width = latents.shape[3] // 2
|
| 311 |
+
noisy_model_input = rearrange(noisy_model_input, "b c (h ph) (w pw) -> b (h w) (c ph pw)", ph=2, pw=2)
|
| 312 |
+
|
| 313 |
+
img_ids = flux_utils.prepare_img_ids(bsize, packed_latent_height, packed_latent_width)
|
| 314 |
+
|
| 315 |
+
# pack control latents
|
| 316 |
+
packed_control_latent_height = control_latents.shape[2] // 2
|
| 317 |
+
packed_control_latent_width = control_latents.shape[3] // 2
|
| 318 |
+
control_latents = rearrange(control_latents, "b c (h ph) (w pw) -> b (h w) (c ph pw)", ph=2, pw=2)
|
| 319 |
+
control_latent_lengths = [control_latents.shape[1]] * bsize
|
| 320 |
+
|
| 321 |
+
control_ids = flux_utils.prepare_img_ids(bsize, packed_control_latent_height, packed_control_latent_width, is_ctrl=True)
|
| 322 |
+
|
| 323 |
+
# context
|
| 324 |
+
t5_vec = batch["t5_vec"] # B, T, D
|
| 325 |
+
clip_l_pooler = batch["clip_l_pooler"] # B, T, D
|
| 326 |
+
txt_ids = torch.zeros(t5_vec.shape[0], t5_vec.shape[1], 3, device=accelerator.device)
|
| 327 |
+
|
| 328 |
+
# ensure the hidden state will require grad
|
| 329 |
+
if args.gradient_checkpointing:
|
| 330 |
+
noisy_model_input.requires_grad_(True)
|
| 331 |
+
control_latents.requires_grad_(True)
|
| 332 |
+
t5_vec.requires_grad_(True)
|
| 333 |
+
clip_l_pooler.requires_grad_(True)
|
| 334 |
+
|
| 335 |
+
# call DiT
|
| 336 |
+
noisy_model_input = noisy_model_input.to(device=accelerator.device, dtype=network_dtype)
|
| 337 |
+
img_ids = img_ids.to(device=accelerator.device)
|
| 338 |
+
control_latents = control_latents.to(device=accelerator.device, dtype=network_dtype)
|
| 339 |
+
control_ids = control_ids.to(device=accelerator.device)
|
| 340 |
+
t5_vec = t5_vec.to(device=accelerator.device, dtype=network_dtype)
|
| 341 |
+
clip_l_pooler = clip_l_pooler.to(device=accelerator.device, dtype=network_dtype)
|
| 342 |
+
|
| 343 |
+
# use 1.0 as guidance scale for FLUX.1 Kontext training
|
| 344 |
+
guidance_vec = torch.full((bsize,), 1.0, device=accelerator.device, dtype=network_dtype)
|
| 345 |
+
|
| 346 |
+
img_input = torch.cat((noisy_model_input, control_latents), dim=1)
|
| 347 |
+
img_input_ids = torch.cat((img_ids, control_ids), dim=1)
|
| 348 |
+
|
| 349 |
+
timesteps = timesteps / 1000.0
|
| 350 |
+
model_pred = model(
|
| 351 |
+
img=img_input,
|
| 352 |
+
img_ids=img_input_ids,
|
| 353 |
+
txt=t5_vec,
|
| 354 |
+
txt_ids=txt_ids,
|
| 355 |
+
y=clip_l_pooler,
|
| 356 |
+
timesteps=timesteps,
|
| 357 |
+
guidance=guidance_vec,
|
| 358 |
+
control_lengths=control_latent_lengths,
|
| 359 |
+
)
|
| 360 |
+
model_pred = model_pred[:, : noisy_model_input.shape[1]] # remove control latents
|
| 361 |
+
|
| 362 |
+
# unpack latents
|
| 363 |
+
model_pred = rearrange(
|
| 364 |
+
model_pred, "b (h w) (c ph pw) -> b c (h ph) (w pw)", h=packed_latent_height, w=packed_latent_width, ph=2, pw=2
|
| 365 |
+
)
|
| 366 |
+
|
| 367 |
+
# flow matching loss
|
| 368 |
+
target = noise - latents
|
| 369 |
+
|
| 370 |
+
return model_pred, target
|
| 371 |
+
|
| 372 |
+
# endregion model specific
|
| 373 |
+
|
| 374 |
+
|
| 375 |
+
def flux_kontext_setup_parser(parser: argparse.ArgumentParser) -> argparse.ArgumentParser:
|
| 376 |
+
"""Flux-Kontext specific parser setup"""
|
| 377 |
+
parser.add_argument("--fp8_scaled", action="store_true", help="use scaled fp8 for DiT / DiTにスケーリングされたfp8を使う")
|
| 378 |
+
parser.add_argument("--text_encoder1", type=str, default=None, help="text encoder (T5) checkpoint path")
|
| 379 |
+
parser.add_argument("--fp8_t5", action="store_true", help="use fp8 for Text Encoder model")
|
| 380 |
+
parser.add_argument(
|
| 381 |
+
"--text_encoder2",
|
| 382 |
+
type=str,
|
| 383 |
+
default=None,
|
| 384 |
+
help="text encoder (CLIP) checkpoint path, optional. If training I2V model, this is required",
|
| 385 |
+
)
|
| 386 |
+
return parser
|
| 387 |
+
|
| 388 |
+
|
| 389 |
+
def main():
|
| 390 |
+
parser = setup_parser_common()
|
| 391 |
+
parser = flux_kontext_setup_parser(parser)
|
| 392 |
+
|
| 393 |
+
args = parser.parse_args()
|
| 394 |
+
args = read_config_from_file(args, parser)
|
| 395 |
+
|
| 396 |
+
args.dit_dtype = None # set from mixed_precision
|
| 397 |
+
if args.vae_dtype is None:
|
| 398 |
+
args.vae_dtype = "bfloat16" # make bfloat16 as default for VAE
|
| 399 |
+
|
| 400 |
+
trainer = FluxKontextNetworkTrainer()
|
| 401 |
+
trainer.train(args)
|
| 402 |
+
|
| 403 |
+
|
| 404 |
+
if __name__ == "__main__":
|
| 405 |
+
main()
|
src/musubi_tuner/fpack_cache_latents.py
ADDED
|
@@ -0,0 +1,501 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import argparse
|
| 2 |
+
import logging
|
| 3 |
+
import math
|
| 4 |
+
import os
|
| 5 |
+
from typing import List
|
| 6 |
+
|
| 7 |
+
import numpy as np
|
| 8 |
+
import torch
|
| 9 |
+
import torch.nn.functional as F
|
| 10 |
+
from tqdm import tqdm
|
| 11 |
+
from transformers import SiglipImageProcessor, SiglipVisionModel
|
| 12 |
+
|
| 13 |
+
from musubi_tuner.dataset import config_utils
|
| 14 |
+
from musubi_tuner.dataset.config_utils import BlueprintGenerator, ConfigSanitizer
|
| 15 |
+
from musubi_tuner.dataset.image_video_dataset import BaseDataset, ItemInfo, save_latent_cache_framepack, ARCHITECTURE_FRAMEPACK
|
| 16 |
+
from musubi_tuner.frame_pack import hunyuan
|
| 17 |
+
from musubi_tuner.frame_pack.framepack_utils import load_image_encoders, load_vae
|
| 18 |
+
from musubi_tuner.hunyuan_model.autoencoder_kl_causal_3d import AutoencoderKLCausal3D
|
| 19 |
+
from musubi_tuner.frame_pack.clip_vision import hf_clip_vision_encode
|
| 20 |
+
import musubi_tuner.cache_latents as cache_latents
|
| 21 |
+
from musubi_tuner.cache_latents import preprocess_contents
|
| 22 |
+
|
| 23 |
+
logger = logging.getLogger(__name__)
|
| 24 |
+
logging.basicConfig(level=logging.INFO)
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
def encode_and_save_batch(
|
| 28 |
+
vae: AutoencoderKLCausal3D,
|
| 29 |
+
feature_extractor: SiglipImageProcessor,
|
| 30 |
+
image_encoder: SiglipVisionModel,
|
| 31 |
+
batch: List[ItemInfo],
|
| 32 |
+
vanilla_sampling: bool = False,
|
| 33 |
+
one_frame: bool = False,
|
| 34 |
+
one_frame_no_2x: bool = False,
|
| 35 |
+
one_frame_no_4x: bool = False,
|
| 36 |
+
):
|
| 37 |
+
"""Encode a batch of original RGB videos and save FramePack section caches."""
|
| 38 |
+
if one_frame:
|
| 39 |
+
encode_and_save_batch_one_frame(
|
| 40 |
+
vae, feature_extractor, image_encoder, batch, vanilla_sampling, one_frame_no_2x, one_frame_no_4x
|
| 41 |
+
)
|
| 42 |
+
return
|
| 43 |
+
|
| 44 |
+
latent_window_size = batch[0].fp_latent_window_size # all items should have the same window size
|
| 45 |
+
|
| 46 |
+
# Stack batch into tensor (B,C,F,H,W) in RGB order
|
| 47 |
+
contents = torch.stack([torch.from_numpy(item.content) for item in batch])
|
| 48 |
+
if len(contents.shape) == 4:
|
| 49 |
+
contents = contents.unsqueeze(1) # B, H, W, C -> B, F, H, W, C
|
| 50 |
+
|
| 51 |
+
contents = contents.permute(0, 4, 1, 2, 3).contiguous() # B, C, F, H, W
|
| 52 |
+
contents = contents.to(vae.device, dtype=vae.dtype)
|
| 53 |
+
contents = contents / 127.5 - 1.0 # normalize to [-1, 1]
|
| 54 |
+
|
| 55 |
+
height, width = contents.shape[3], contents.shape[4]
|
| 56 |
+
if height < 8 or width < 8:
|
| 57 |
+
item = batch[0] # other items should have the same size
|
| 58 |
+
raise ValueError(f"Image or video size too small: {item.item_key} and {len(batch) - 1} more, size: {item.original_size}")
|
| 59 |
+
|
| 60 |
+
# calculate latent frame count from original frame count (4n+1)
|
| 61 |
+
latent_f = (batch[0].frame_count - 1) // 4 + 1
|
| 62 |
+
|
| 63 |
+
# calculate the total number of sections (excluding the first frame, divided by window size)
|
| 64 |
+
total_latent_sections = math.floor((latent_f - 1) / latent_window_size)
|
| 65 |
+
if total_latent_sections < 1:
|
| 66 |
+
min_frames_needed = latent_window_size * 4 + 1
|
| 67 |
+
raise ValueError(
|
| 68 |
+
f"Not enough frames for FramePack: {batch[0].frame_count} frames ({latent_f} latent frames), minimum required: {min_frames_needed} frames ({latent_window_size + 1} latent frames)"
|
| 69 |
+
)
|
| 70 |
+
|
| 71 |
+
# actual latent frame count (aligned to section boundaries)
|
| 72 |
+
latent_f_aligned = total_latent_sections * latent_window_size + 1 if not one_frame else 1
|
| 73 |
+
|
| 74 |
+
# actual video frame count
|
| 75 |
+
frame_count_aligned = (latent_f_aligned - 1) * 4 + 1
|
| 76 |
+
if frame_count_aligned != batch[0].frame_count:
|
| 77 |
+
logger.info(
|
| 78 |
+
f"Frame count mismatch: required={frame_count_aligned} != actual={batch[0].frame_count}, trimming to {frame_count_aligned}"
|
| 79 |
+
)
|
| 80 |
+
contents = contents[:, :, :frame_count_aligned, :, :]
|
| 81 |
+
|
| 82 |
+
latent_f = latent_f_aligned # Update to the aligned value
|
| 83 |
+
|
| 84 |
+
# VAE encode (list of tensor -> stack)
|
| 85 |
+
latents = hunyuan.vae_encode(contents, vae) # include scaling factor
|
| 86 |
+
latents = latents.to("cpu") # (B, C, latent_f, H/8, W/8)
|
| 87 |
+
|
| 88 |
+
# Vision encoding per‑item (once)
|
| 89 |
+
images = np.stack([item.content[0] for item in batch], axis=0) # B, H, W, C
|
| 90 |
+
|
| 91 |
+
# encode image with image encoder
|
| 92 |
+
image_embeddings = []
|
| 93 |
+
with torch.no_grad():
|
| 94 |
+
for image in images:
|
| 95 |
+
if image.shape[-1] == 4:
|
| 96 |
+
image = image[..., :3]
|
| 97 |
+
image_encoder_output = hf_clip_vision_encode(image, feature_extractor, image_encoder)
|
| 98 |
+
image_embeddings.append(image_encoder_output.last_hidden_state)
|
| 99 |
+
image_embeddings = torch.cat(image_embeddings, dim=0) # B, LEN, 1152
|
| 100 |
+
image_embeddings = image_embeddings.to("cpu") # Save memory
|
| 101 |
+
|
| 102 |
+
if not vanilla_sampling:
|
| 103 |
+
# padding is reversed for inference (future to past)
|
| 104 |
+
latent_paddings = list(reversed(range(total_latent_sections)))
|
| 105 |
+
# Note: The padding trick for inference. See the paper for details.
|
| 106 |
+
if total_latent_sections > 4:
|
| 107 |
+
latent_paddings = [3] + [2] * (total_latent_sections - 3) + [1, 0]
|
| 108 |
+
|
| 109 |
+
for b, item in enumerate(batch):
|
| 110 |
+
original_latent_cache_path = item.latent_cache_path
|
| 111 |
+
video_lat = latents[b : b + 1] # keep batch dim, 1, C, F, H, W
|
| 112 |
+
|
| 113 |
+
# emulate inference step (history latents)
|
| 114 |
+
# Note: In inference, history_latents stores *generated* future latents.
|
| 115 |
+
# Here, for caching, we just need its shape and type for clean_* tensors.
|
| 116 |
+
# The actual content doesn't matter much as clean_* will be overwritten.
|
| 117 |
+
history_latents = torch.zeros(
|
| 118 |
+
(1, video_lat.shape[1], 1 + 2 + 16, video_lat.shape[3], video_lat.shape[4]), dtype=video_lat.dtype
|
| 119 |
+
) # C=16 for HY
|
| 120 |
+
|
| 121 |
+
latent_f_index = latent_f - latent_window_size # Start from the last section
|
| 122 |
+
section_index = total_latent_sections - 1
|
| 123 |
+
|
| 124 |
+
for latent_padding in latent_paddings:
|
| 125 |
+
is_last_section = section_index == 0 # the last section in inference order == the first section in time
|
| 126 |
+
latent_padding_size = latent_padding * latent_window_size
|
| 127 |
+
if is_last_section:
|
| 128 |
+
assert latent_f_index == 1, "Last section should be starting from frame 1"
|
| 129 |
+
|
| 130 |
+
# indices generation (same as inference)
|
| 131 |
+
indices = torch.arange(0, sum([1, latent_padding_size, latent_window_size, 1, 2, 16])).unsqueeze(0)
|
| 132 |
+
(
|
| 133 |
+
clean_latent_indices_pre, # Index for start_latent
|
| 134 |
+
blank_indices, # Indices for padding (future context in inference)
|
| 135 |
+
latent_indices, # Indices for the target latents to predict
|
| 136 |
+
clean_latent_indices_post, # Index for the most recent history frame
|
| 137 |
+
clean_latent_2x_indices, # Indices for the next 2 history frames
|
| 138 |
+
clean_latent_4x_indices, # Indices for the next 16 history frames
|
| 139 |
+
) = indices.split([1, latent_padding_size, latent_window_size, 1, 2, 16], dim=1)
|
| 140 |
+
|
| 141 |
+
# Indices for clean_latents (start + recent history)
|
| 142 |
+
clean_latent_indices = torch.cat([clean_latent_indices_pre, clean_latent_indices_post], dim=1)
|
| 143 |
+
|
| 144 |
+
# clean latents preparation (emulating inference)
|
| 145 |
+
clean_latents_pre = video_lat[:, :, 0:1, :, :] # Always the first frame (start_latent)
|
| 146 |
+
clean_latents_post, clean_latents_2x, clean_latents_4x = history_latents[:, :, : 1 + 2 + 16, :, :].split(
|
| 147 |
+
[1, 2, 16], dim=2
|
| 148 |
+
)
|
| 149 |
+
clean_latents = torch.cat([clean_latents_pre, clean_latents_post], dim=2) # Combine start frame + placeholder
|
| 150 |
+
|
| 151 |
+
# Target latents for this section (ground truth)
|
| 152 |
+
target_latents = video_lat[:, :, latent_f_index : latent_f_index + latent_window_size, :, :]
|
| 153 |
+
|
| 154 |
+
# save cache (file path is inside item.latent_cache_path pattern), remove batch dim
|
| 155 |
+
item.latent_cache_path = append_section_idx_to_latent_cache_path(original_latent_cache_path, section_index)
|
| 156 |
+
save_latent_cache_framepack(
|
| 157 |
+
item_info=item,
|
| 158 |
+
latent=target_latents.squeeze(0), # Ground truth for this section
|
| 159 |
+
latent_indices=latent_indices.squeeze(0), # Indices for the ground truth section
|
| 160 |
+
clean_latents=clean_latents.squeeze(0), # Start frame + history placeholder
|
| 161 |
+
clean_latent_indices=clean_latent_indices.squeeze(0), # Indices for start frame + history placeholder
|
| 162 |
+
clean_latents_2x=clean_latents_2x.squeeze(0), # History placeholder
|
| 163 |
+
clean_latent_2x_indices=clean_latent_2x_indices.squeeze(0), # Indices for history placeholder
|
| 164 |
+
clean_latents_4x=clean_latents_4x.squeeze(0), # History placeholder
|
| 165 |
+
clean_latent_4x_indices=clean_latent_4x_indices.squeeze(0), # Indices for history placeholder
|
| 166 |
+
image_embeddings=image_embeddings[b],
|
| 167 |
+
)
|
| 168 |
+
|
| 169 |
+
if is_last_section: # If this was the first section generated in inference (time=0)
|
| 170 |
+
# History gets the start frame + the generated first section
|
| 171 |
+
generated_latents_for_history = video_lat[:, :, : latent_window_size + 1, :, :]
|
| 172 |
+
else:
|
| 173 |
+
# History gets the generated current section
|
| 174 |
+
generated_latents_for_history = target_latents # Use true latents as stand-in for generated
|
| 175 |
+
|
| 176 |
+
history_latents = torch.cat([generated_latents_for_history, history_latents], dim=2)
|
| 177 |
+
|
| 178 |
+
section_index -= 1
|
| 179 |
+
latent_f_index -= latent_window_size
|
| 180 |
+
|
| 181 |
+
else:
|
| 182 |
+
# Vanilla Sampling Logic
|
| 183 |
+
for b, item in enumerate(batch):
|
| 184 |
+
original_latent_cache_path = item.latent_cache_path
|
| 185 |
+
video_lat = latents[b : b + 1] # Keep batch dim: 1, C, F_aligned, H, W
|
| 186 |
+
img_emb = image_embeddings[b] # LEN, 1152
|
| 187 |
+
|
| 188 |
+
for section_index in range(total_latent_sections):
|
| 189 |
+
target_start_f = section_index * latent_window_size + 1
|
| 190 |
+
target_end_f = target_start_f + latent_window_size
|
| 191 |
+
target_latents = video_lat[:, :, target_start_f:target_end_f, :, :]
|
| 192 |
+
start_latent = video_lat[:, :, 0:1, :, :]
|
| 193 |
+
|
| 194 |
+
# Clean latents preparation (Vanilla)
|
| 195 |
+
clean_latents_total_count = 1 + 2 + 16
|
| 196 |
+
history_latents = torch.zeros(
|
| 197 |
+
size=(1, 16, clean_latents_total_count, video_lat.shape[-2], video_lat.shape[-1]),
|
| 198 |
+
device=video_lat.device,
|
| 199 |
+
dtype=video_lat.dtype,
|
| 200 |
+
)
|
| 201 |
+
|
| 202 |
+
history_start_f = 0
|
| 203 |
+
video_start_f = target_start_f - clean_latents_total_count
|
| 204 |
+
copy_count = clean_latents_total_count
|
| 205 |
+
if video_start_f < 0:
|
| 206 |
+
history_start_f = -video_start_f
|
| 207 |
+
copy_count = clean_latents_total_count - history_start_f
|
| 208 |
+
video_start_f = 0
|
| 209 |
+
if copy_count > 0:
|
| 210 |
+
history_latents[:, :, history_start_f:] = video_lat[:, :, video_start_f : video_start_f + copy_count, :, :]
|
| 211 |
+
|
| 212 |
+
# indices generation (Vanilla): copy from FramePack-F1
|
| 213 |
+
indices = torch.arange(0, sum([1, 16, 2, 1, latent_window_size])).unsqueeze(0)
|
| 214 |
+
(
|
| 215 |
+
clean_latent_indices_start,
|
| 216 |
+
clean_latent_4x_indices,
|
| 217 |
+
clean_latent_2x_indices,
|
| 218 |
+
clean_latent_1x_indices,
|
| 219 |
+
latent_indices,
|
| 220 |
+
) = indices.split([1, 16, 2, 1, latent_window_size], dim=1)
|
| 221 |
+
clean_latent_indices = torch.cat([clean_latent_indices_start, clean_latent_1x_indices], dim=1)
|
| 222 |
+
|
| 223 |
+
clean_latents_4x, clean_latents_2x, clean_latents_1x = history_latents.split([16, 2, 1], dim=2)
|
| 224 |
+
clean_latents = torch.cat([start_latent, clean_latents_1x], dim=2)
|
| 225 |
+
|
| 226 |
+
# Save cache
|
| 227 |
+
item.latent_cache_path = append_section_idx_to_latent_cache_path(original_latent_cache_path, section_index)
|
| 228 |
+
save_latent_cache_framepack(
|
| 229 |
+
item_info=item,
|
| 230 |
+
latent=target_latents.squeeze(0),
|
| 231 |
+
latent_indices=latent_indices.squeeze(0), # Indices for target section i
|
| 232 |
+
clean_latents=clean_latents.squeeze(0), # Past clean frames
|
| 233 |
+
clean_latent_indices=clean_latent_indices.squeeze(0), # Indices for clean_latents_pre/post
|
| 234 |
+
clean_latents_2x=clean_latents_2x.squeeze(0), # Past clean frames (2x)
|
| 235 |
+
clean_latent_2x_indices=clean_latent_2x_indices.squeeze(0), # Indices for clean_latents_2x
|
| 236 |
+
clean_latents_4x=clean_latents_4x.squeeze(0), # Past clean frames (4x)
|
| 237 |
+
clean_latent_4x_indices=clean_latent_4x_indices.squeeze(0), # Indices for clean_latents_4x
|
| 238 |
+
image_embeddings=img_emb,
|
| 239 |
+
# Note: We don't explicitly save past_offset_indices,
|
| 240 |
+
# but its size influences the absolute values in other indices.
|
| 241 |
+
)
|
| 242 |
+
|
| 243 |
+
|
| 244 |
+
def encode_and_save_batch_one_frame(
|
| 245 |
+
vae: AutoencoderKLCausal3D,
|
| 246 |
+
feature_extractor: SiglipImageProcessor,
|
| 247 |
+
image_encoder: SiglipVisionModel,
|
| 248 |
+
batch: List[ItemInfo],
|
| 249 |
+
vanilla_sampling: bool = False,
|
| 250 |
+
one_frame_no_2x: bool = False,
|
| 251 |
+
one_frame_no_4x: bool = False,
|
| 252 |
+
):
|
| 253 |
+
# item.content: target image (H, W, C)
|
| 254 |
+
# item.control_content: list of images (H, W, C)
|
| 255 |
+
_, _, contents, content_masks = preprocess_contents(batch)
|
| 256 |
+
contents = contents.to(vae.device, dtype=vae.dtype) # B, C, F, H, W
|
| 257 |
+
|
| 258 |
+
# VAE encode: we need to encode one frame at a time because VAE encoder has stride=4 for the time dimension except for the first frame.
|
| 259 |
+
latents = [hunyuan.vae_encode(contents[:, :, idx : idx + 1], vae).to("cpu") for idx in range(contents.shape[2])]
|
| 260 |
+
latents = torch.cat(latents, dim=2) # B, C, F, H/8, W/8
|
| 261 |
+
|
| 262 |
+
# apply alphas to latents
|
| 263 |
+
for b, item in enumerate(batch):
|
| 264 |
+
for i, content_mask in enumerate(content_masks[b]):
|
| 265 |
+
if content_mask is not None:
|
| 266 |
+
# apply mask to the latents
|
| 267 |
+
# print(f"Applying content mask for item {item.item_key}, frame {i}")
|
| 268 |
+
latents[b : b + 1, :, i : i + 1] *= content_mask
|
| 269 |
+
|
| 270 |
+
# Vision encoding per‑item (once): use control content because it is the start image
|
| 271 |
+
images = [item.control_content[0] for item in batch] # list of [H, W, C]
|
| 272 |
+
|
| 273 |
+
# encode image with image encoder
|
| 274 |
+
image_embeddings = []
|
| 275 |
+
with torch.no_grad():
|
| 276 |
+
for image in images:
|
| 277 |
+
if image.shape[-1] == 4:
|
| 278 |
+
image = image[..., :3]
|
| 279 |
+
image_encoder_output = hf_clip_vision_encode(image, feature_extractor, image_encoder)
|
| 280 |
+
image_embeddings.append(image_encoder_output.last_hidden_state)
|
| 281 |
+
image_embeddings = torch.cat(image_embeddings, dim=0) # B, LEN, 1152
|
| 282 |
+
image_embeddings = image_embeddings.to("cpu") # Save memory
|
| 283 |
+
|
| 284 |
+
# save cache for each item in the batch
|
| 285 |
+
for b, item in enumerate(batch):
|
| 286 |
+
# indices generation (same as inference): each item may have different clean_latent_indices, so we generate them per item
|
| 287 |
+
clean_latent_indices = item.fp_1f_clean_indices # list of indices for clean latents
|
| 288 |
+
if clean_latent_indices is None or len(clean_latent_indices) == 0:
|
| 289 |
+
logger.warning(
|
| 290 |
+
f"Item {item.item_key} has no clean_latent_indices defined, using default indices for one frame training."
|
| 291 |
+
)
|
| 292 |
+
clean_latent_indices = [0]
|
| 293 |
+
|
| 294 |
+
if not item.fp_1f_no_post:
|
| 295 |
+
clean_latent_indices = clean_latent_indices + [1 + item.fp_latent_window_size]
|
| 296 |
+
clean_latent_indices = torch.Tensor(clean_latent_indices).long() # N
|
| 297 |
+
|
| 298 |
+
latent_index = torch.Tensor([item.fp_1f_target_index]).long() # 1
|
| 299 |
+
|
| 300 |
+
# zero values is not needed to cache even if one_frame_no_2x or 4x is False
|
| 301 |
+
clean_latents_2x = None
|
| 302 |
+
clean_latents_4x = None
|
| 303 |
+
|
| 304 |
+
if one_frame_no_2x:
|
| 305 |
+
clean_latent_2x_indices = None
|
| 306 |
+
else:
|
| 307 |
+
index = 1 + item.fp_latent_window_size + 1
|
| 308 |
+
clean_latent_2x_indices = torch.arange(index, index + 2) # 2
|
| 309 |
+
|
| 310 |
+
if one_frame_no_4x:
|
| 311 |
+
clean_latent_4x_indices = None
|
| 312 |
+
else:
|
| 313 |
+
index = 1 + item.fp_latent_window_size + 1 + 2
|
| 314 |
+
clean_latent_4x_indices = torch.arange(index, index + 16) # 16
|
| 315 |
+
|
| 316 |
+
# clean latents preparation (emulating inference)
|
| 317 |
+
clean_latents = latents[b, :, :-1] # C, F, H, W
|
| 318 |
+
if not item.fp_1f_no_post:
|
| 319 |
+
# If zero post is enabled, we need to add a zero frame at the end
|
| 320 |
+
clean_latents = F.pad(clean_latents, (0, 0, 0, 0, 0, 1), value=0.0) # C, F+1, H, W
|
| 321 |
+
|
| 322 |
+
# Target latents for this section (ground truth)
|
| 323 |
+
target_latents = latents[b, :, -1:] # C, 1, H, W
|
| 324 |
+
|
| 325 |
+
print(f"Saving cache for item {item.item_key} at {item.latent_cache_path}. no_post: {item.fp_1f_no_post}")
|
| 326 |
+
print(f" Clean latent indices: {clean_latent_indices}, latent index: {latent_index}")
|
| 327 |
+
print(f" Clean latents: {clean_latents.shape}, target latents: {target_latents.shape}")
|
| 328 |
+
print(f" Clean latents 2x indices: {clean_latent_2x_indices}, clean latents 4x indices: {clean_latent_4x_indices}")
|
| 329 |
+
print(
|
| 330 |
+
f" Clean latents 2x: {clean_latents_2x.shape if clean_latents_2x is not None else 'None'}, "
|
| 331 |
+
f"Clean latents 4x: {clean_latents_4x.shape if clean_latents_4x is not None else 'None'}"
|
| 332 |
+
)
|
| 333 |
+
print(f" Image embeddings: {image_embeddings[b].shape}")
|
| 334 |
+
|
| 335 |
+
# save cache (file path is inside item.latent_cache_path pattern), remove batch dim
|
| 336 |
+
save_latent_cache_framepack(
|
| 337 |
+
item_info=item,
|
| 338 |
+
latent=target_latents, # Ground truth for this section
|
| 339 |
+
latent_indices=latent_index, # Indices for the ground truth section
|
| 340 |
+
clean_latents=clean_latents, # Start frame + history placeholder
|
| 341 |
+
clean_latent_indices=clean_latent_indices, # Indices for start frame + history placeholder
|
| 342 |
+
clean_latents_2x=clean_latents_2x, # History placeholder
|
| 343 |
+
clean_latent_2x_indices=clean_latent_2x_indices, # Indices for history placeholder
|
| 344 |
+
clean_latents_4x=clean_latents_4x, # History placeholder
|
| 345 |
+
clean_latent_4x_indices=clean_latent_4x_indices, # Indices for history placeholder
|
| 346 |
+
image_embeddings=image_embeddings[b],
|
| 347 |
+
)
|
| 348 |
+
|
| 349 |
+
|
| 350 |
+
def framepack_setup_parser(parser: argparse.ArgumentParser) -> argparse.ArgumentParser:
|
| 351 |
+
parser.add_argument("--image_encoder", type=str, required=True, help="Image encoder (CLIP) checkpoint path or directory")
|
| 352 |
+
parser.add_argument(
|
| 353 |
+
"--f1",
|
| 354 |
+
action="store_true",
|
| 355 |
+
help="Generate cache for F1 model (vanilla (autoregressive) sampling) instead of Inverted anti-drifting (plain FramePack)",
|
| 356 |
+
)
|
| 357 |
+
parser.add_argument(
|
| 358 |
+
"--one_frame",
|
| 359 |
+
action="store_true",
|
| 360 |
+
help="Generate cache for one frame training (single frame, single section). latent_window_size is used as the index of the target frame.",
|
| 361 |
+
)
|
| 362 |
+
parser.add_argument(
|
| 363 |
+
"--one_frame_no_2x",
|
| 364 |
+
action="store_true",
|
| 365 |
+
help="Do not use clean_latents_2x and clean_latent_2x_indices for one frame training.",
|
| 366 |
+
)
|
| 367 |
+
parser.add_argument(
|
| 368 |
+
"--one_frame_no_4x",
|
| 369 |
+
action="store_true",
|
| 370 |
+
help="Do not use clean_latents_4x and clean_latent_4x_indices for one frame training.",
|
| 371 |
+
)
|
| 372 |
+
return parser
|
| 373 |
+
|
| 374 |
+
|
| 375 |
+
def main():
|
| 376 |
+
parser = cache_latents.setup_parser_common()
|
| 377 |
+
parser = cache_latents.hv_setup_parser(parser) # VAE
|
| 378 |
+
parser = framepack_setup_parser(parser)
|
| 379 |
+
|
| 380 |
+
args = parser.parse_args()
|
| 381 |
+
|
| 382 |
+
if args.disable_cudnn_backend:
|
| 383 |
+
logger.info("Disabling cuDNN PyTorch backend.")
|
| 384 |
+
torch.backends.cudnn.enabled = False
|
| 385 |
+
|
| 386 |
+
if args.vae_dtype is not None:
|
| 387 |
+
raise ValueError("VAE dtype is not supported in FramePack")
|
| 388 |
+
# if args.batch_size != 1:
|
| 389 |
+
# args.batch_size = 1
|
| 390 |
+
# logger.info("Batch size is set to 1 for FramePack.")
|
| 391 |
+
|
| 392 |
+
device = args.device if hasattr(args, "device") and args.device else ("cuda" if torch.cuda.is_available() else "cpu")
|
| 393 |
+
device = torch.device(device)
|
| 394 |
+
|
| 395 |
+
# Load dataset config
|
| 396 |
+
blueprint_generator = BlueprintGenerator(ConfigSanitizer())
|
| 397 |
+
logger.info(f"Load dataset config from {args.dataset_config}")
|
| 398 |
+
user_config = config_utils.load_user_config(args.dataset_config)
|
| 399 |
+
blueprint = blueprint_generator.generate(user_config, args, architecture=ARCHITECTURE_FRAMEPACK)
|
| 400 |
+
train_dataset_group = config_utils.generate_dataset_group_by_blueprint(blueprint.dataset_group)
|
| 401 |
+
|
| 402 |
+
datasets = train_dataset_group.datasets
|
| 403 |
+
|
| 404 |
+
if args.debug_mode is not None:
|
| 405 |
+
cache_latents.show_datasets(
|
| 406 |
+
datasets, args.debug_mode, args.console_width, args.console_back, args.console_num_images, fps=16
|
| 407 |
+
)
|
| 408 |
+
return
|
| 409 |
+
|
| 410 |
+
assert args.vae is not None, "vae checkpoint is required"
|
| 411 |
+
|
| 412 |
+
logger.info(f"Loading VAE model from {args.vae}")
|
| 413 |
+
vae = load_vae(args.vae, args.vae_chunk_size, args.vae_spatial_tile_sample_min_size, args.vae_tiling, device=device)
|
| 414 |
+
vae.to(device)
|
| 415 |
+
|
| 416 |
+
logger.info(f"Loading image encoder from {args.image_encoder}")
|
| 417 |
+
feature_extractor, image_encoder = load_image_encoders(args)
|
| 418 |
+
image_encoder.eval()
|
| 419 |
+
image_encoder.to(device)
|
| 420 |
+
|
| 421 |
+
logger.info(f"Cache generation mode: {'Vanilla Sampling' if args.f1 else 'Inference Emulation'}")
|
| 422 |
+
|
| 423 |
+
# encoding closure
|
| 424 |
+
def encode(batch: List[ItemInfo]):
|
| 425 |
+
encode_and_save_batch(
|
| 426 |
+
vae, feature_extractor, image_encoder, batch, args.f1, args.one_frame, args.one_frame_no_2x, args.one_frame_no_4x
|
| 427 |
+
)
|
| 428 |
+
|
| 429 |
+
# reuse core loop from cache_latents with no change
|
| 430 |
+
encode_datasets_framepack(datasets, encode, args)
|
| 431 |
+
|
| 432 |
+
|
| 433 |
+
def append_section_idx_to_latent_cache_path(latent_cache_path: str, section_idx: int) -> str:
|
| 434 |
+
tokens = latent_cache_path.split("_")
|
| 435 |
+
tokens[-3] = f"{tokens[-3]}-{section_idx:04d}" # append section index to "frame_pos-count"
|
| 436 |
+
return "_".join(tokens)
|
| 437 |
+
|
| 438 |
+
|
| 439 |
+
def encode_datasets_framepack(datasets: list[BaseDataset], encode: callable, args: argparse.Namespace):
|
| 440 |
+
num_workers = args.num_workers if args.num_workers is not None else max(1, os.cpu_count() - 1)
|
| 441 |
+
for i, dataset in enumerate(datasets):
|
| 442 |
+
logger.info(f"Encoding dataset [{i}]")
|
| 443 |
+
all_latent_cache_paths = []
|
| 444 |
+
for _, batch in tqdm(dataset.retrieve_latent_cache_batches(num_workers)):
|
| 445 |
+
batch: list[ItemInfo] = batch # type: ignore
|
| 446 |
+
|
| 447 |
+
# make sure content has 3 channels
|
| 448 |
+
for item in batch:
|
| 449 |
+
if isinstance(item.content, np.ndarray):
|
| 450 |
+
if item.content.shape[-1] == 4:
|
| 451 |
+
item.content = item.content[..., :3]
|
| 452 |
+
else:
|
| 453 |
+
item.content = [img[..., :3] if img.shape[-1] == 4 else img for img in item.content]
|
| 454 |
+
|
| 455 |
+
# latent_cache_path is "{basename}_{w:04d}x{h:04d}_{self.architecture}.safetensors"
|
| 456 |
+
# For video dataset,we expand it to "{basename}_{section_idx:04d}_{w:04d}x{h:04d}_{self.architecture}.safetensors"
|
| 457 |
+
filtered_batch = []
|
| 458 |
+
for item in batch:
|
| 459 |
+
if item.frame_count is None:
|
| 460 |
+
# image dataset
|
| 461 |
+
all_latent_cache_paths.append(item.latent_cache_path)
|
| 462 |
+
all_existing = os.path.exists(item.latent_cache_path)
|
| 463 |
+
else:
|
| 464 |
+
latent_f = (item.frame_count - 1) // 4 + 1
|
| 465 |
+
num_sections = max(1, math.floor((latent_f - 1) / item.fp_latent_window_size)) # min 1 section
|
| 466 |
+
all_existing = True
|
| 467 |
+
for sec in range(num_sections):
|
| 468 |
+
p = append_section_idx_to_latent_cache_path(item.latent_cache_path, sec)
|
| 469 |
+
all_latent_cache_paths.append(p)
|
| 470 |
+
all_existing = all_existing and os.path.exists(p)
|
| 471 |
+
|
| 472 |
+
if not all_existing: # if any section cache is missing
|
| 473 |
+
filtered_batch.append(item)
|
| 474 |
+
|
| 475 |
+
if args.skip_existing:
|
| 476 |
+
if len(filtered_batch) == 0: # all sections exist
|
| 477 |
+
logger.info(f"All sections exist for {batch[0].item_key}, skipping")
|
| 478 |
+
continue
|
| 479 |
+
batch = filtered_batch # update batch to only missing sections
|
| 480 |
+
|
| 481 |
+
bs = args.batch_size if args.batch_size is not None else len(batch)
|
| 482 |
+
for i in range(0, len(batch), bs):
|
| 483 |
+
encode(batch[i : i + bs])
|
| 484 |
+
|
| 485 |
+
# normalize paths
|
| 486 |
+
all_latent_cache_paths = [os.path.normpath(p) for p in all_latent_cache_paths]
|
| 487 |
+
all_latent_cache_paths = set(all_latent_cache_paths)
|
| 488 |
+
|
| 489 |
+
# remove old cache files not in the dataset
|
| 490 |
+
all_cache_files = dataset.get_all_latent_cache_files()
|
| 491 |
+
for cache_file in all_cache_files:
|
| 492 |
+
if os.path.normpath(cache_file) not in all_latent_cache_paths:
|
| 493 |
+
if args.keep_cache:
|
| 494 |
+
logger.info(f"Keep cache file not in the dataset: {cache_file}")
|
| 495 |
+
else:
|
| 496 |
+
os.remove(cache_file)
|
| 497 |
+
logger.info(f"Removed old cache file: {cache_file}")
|
| 498 |
+
|
| 499 |
+
|
| 500 |
+
if __name__ == "__main__":
|
| 501 |
+
main()
|
src/musubi_tuner/frame_pack/__init__.py
ADDED
|
File without changes
|
src/musubi_tuner/frame_pack/bucket_tools.py
ADDED
|
@@ -0,0 +1,29 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
bucket_options = {
|
| 2 |
+
640: [
|
| 3 |
+
(416, 960),
|
| 4 |
+
(448, 864),
|
| 5 |
+
(480, 832),
|
| 6 |
+
(512, 768),
|
| 7 |
+
(544, 704),
|
| 8 |
+
(576, 672),
|
| 9 |
+
(608, 640),
|
| 10 |
+
(640, 608),
|
| 11 |
+
(672, 576),
|
| 12 |
+
(704, 544),
|
| 13 |
+
(768, 512),
|
| 14 |
+
(832, 480),
|
| 15 |
+
(864, 448),
|
| 16 |
+
(960, 416),
|
| 17 |
+
],
|
| 18 |
+
}
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
def find_nearest_bucket(h, w, resolution=640):
|
| 22 |
+
min_metric = float("inf")
|
| 23 |
+
best_bucket = None
|
| 24 |
+
for bucket_h, bucket_w in bucket_options[resolution]:
|
| 25 |
+
metric = abs(h * bucket_w - w * bucket_h)
|
| 26 |
+
if metric <= min_metric:
|
| 27 |
+
min_metric = metric
|
| 28 |
+
best_bucket = (bucket_h, bucket_w)
|
| 29 |
+
return best_bucket
|
src/musubi_tuner/frame_pack/clip_vision.py
ADDED
|
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import numpy as np
|
| 2 |
+
|
| 3 |
+
|
| 4 |
+
def hf_clip_vision_encode(image, feature_extractor, image_encoder):
|
| 5 |
+
assert isinstance(image, np.ndarray)
|
| 6 |
+
assert image.ndim == 3 and image.shape[2] == 3
|
| 7 |
+
assert image.dtype == np.uint8
|
| 8 |
+
|
| 9 |
+
preprocessed = feature_extractor.preprocess(images=image, return_tensors="pt")
|
| 10 |
+
preprocessed = preprocessed.to(device=image_encoder.device, dtype=image_encoder.dtype)
|
| 11 |
+
image_encoder_output = image_encoder(**preprocessed)
|
| 12 |
+
|
| 13 |
+
return image_encoder_output
|
src/musubi_tuner/frame_pack/framepack_utils.py
ADDED
|
@@ -0,0 +1,274 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import logging
|
| 3 |
+
from typing import Optional, Union
|
| 4 |
+
|
| 5 |
+
from accelerate import init_empty_weights
|
| 6 |
+
import torch
|
| 7 |
+
from safetensors.torch import load_file
|
| 8 |
+
from transformers import (
|
| 9 |
+
LlamaTokenizerFast,
|
| 10 |
+
LlamaConfig,
|
| 11 |
+
LlamaModel,
|
| 12 |
+
CLIPTokenizer,
|
| 13 |
+
CLIPTextModel,
|
| 14 |
+
CLIPConfig,
|
| 15 |
+
SiglipImageProcessor,
|
| 16 |
+
SiglipVisionModel,
|
| 17 |
+
SiglipVisionConfig,
|
| 18 |
+
)
|
| 19 |
+
|
| 20 |
+
from musubi_tuner.utils.safetensors_utils import load_split_weights
|
| 21 |
+
from musubi_tuner.hunyuan_model.vae import load_vae as hunyuan_load_vae
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
logger = logging.getLogger(__name__)
|
| 25 |
+
logging.basicConfig(level=logging.INFO)
|
| 26 |
+
|
| 27 |
+
|
| 28 |
+
def load_vae(
|
| 29 |
+
vae_path: str,
|
| 30 |
+
vae_chunk_size: Optional[int],
|
| 31 |
+
vae_spatial_tile_sample_min_size: Optional[int],
|
| 32 |
+
vae_tiling: bool,
|
| 33 |
+
device: Union[str, torch.device],
|
| 34 |
+
):
|
| 35 |
+
# single file and directory (contains 'vae') support
|
| 36 |
+
if os.path.isdir(vae_path):
|
| 37 |
+
vae_path = os.path.join(vae_path, "vae", "diffusion_pytorch_model.safetensors")
|
| 38 |
+
else:
|
| 39 |
+
vae_path = vae_path
|
| 40 |
+
|
| 41 |
+
vae_dtype = torch.float16 # if vae_dtype is None else str_to_dtype(vae_dtype)
|
| 42 |
+
vae, _, s_ratio, t_ratio = hunyuan_load_vae(vae_dtype=vae_dtype, device=device, vae_path=vae_path)
|
| 43 |
+
vae.eval()
|
| 44 |
+
# vae_kwargs = {"s_ratio": s_ratio, "t_ratio": t_ratio}
|
| 45 |
+
|
| 46 |
+
# set chunk_size to CausalConv3d recursively
|
| 47 |
+
chunk_size = vae_chunk_size
|
| 48 |
+
if chunk_size is not None:
|
| 49 |
+
vae.set_chunk_size_for_causal_conv_3d(chunk_size)
|
| 50 |
+
logger.info(f"Set chunk_size to {chunk_size} for CausalConv3d")
|
| 51 |
+
|
| 52 |
+
if vae_spatial_tile_sample_min_size is not None:
|
| 53 |
+
vae.enable_spatial_tiling(True)
|
| 54 |
+
vae.tile_sample_min_size = vae_spatial_tile_sample_min_size
|
| 55 |
+
vae.tile_latent_min_size = vae_spatial_tile_sample_min_size // 8
|
| 56 |
+
logger.info(f"Enabled spatial tiling with min size {vae_spatial_tile_sample_min_size}")
|
| 57 |
+
elif vae_tiling:
|
| 58 |
+
vae.enable_spatial_tiling(True)
|
| 59 |
+
|
| 60 |
+
return vae
|
| 61 |
+
|
| 62 |
+
|
| 63 |
+
# region Text Encoders
|
| 64 |
+
|
| 65 |
+
# Text Encoder configs are copied from HunyuanVideo repo
|
| 66 |
+
|
| 67 |
+
LLAMA_CONFIG = {
|
| 68 |
+
"architectures": ["LlamaModel"],
|
| 69 |
+
"attention_bias": False,
|
| 70 |
+
"attention_dropout": 0.0,
|
| 71 |
+
"bos_token_id": 128000,
|
| 72 |
+
"eos_token_id": 128001,
|
| 73 |
+
"head_dim": 128,
|
| 74 |
+
"hidden_act": "silu",
|
| 75 |
+
"hidden_size": 4096,
|
| 76 |
+
"initializer_range": 0.02,
|
| 77 |
+
"intermediate_size": 14336,
|
| 78 |
+
"max_position_embeddings": 8192,
|
| 79 |
+
"mlp_bias": False,
|
| 80 |
+
"model_type": "llama",
|
| 81 |
+
"num_attention_heads": 32,
|
| 82 |
+
"num_hidden_layers": 32,
|
| 83 |
+
"num_key_value_heads": 8,
|
| 84 |
+
"pretraining_tp": 1,
|
| 85 |
+
"rms_norm_eps": 1e-05,
|
| 86 |
+
"rope_scaling": None,
|
| 87 |
+
"rope_theta": 500000.0,
|
| 88 |
+
"tie_word_embeddings": False,
|
| 89 |
+
"torch_dtype": "float16",
|
| 90 |
+
"transformers_version": "4.46.3",
|
| 91 |
+
"use_cache": True,
|
| 92 |
+
"vocab_size": 128320,
|
| 93 |
+
}
|
| 94 |
+
|
| 95 |
+
CLIP_CONFIG = {
|
| 96 |
+
# "_name_or_path": "/raid/aryan/llava-llama-3-8b-v1_1-extracted/text_encoder_2",
|
| 97 |
+
"architectures": ["CLIPTextModel"],
|
| 98 |
+
"attention_dropout": 0.0,
|
| 99 |
+
"bos_token_id": 0,
|
| 100 |
+
"dropout": 0.0,
|
| 101 |
+
"eos_token_id": 2,
|
| 102 |
+
"hidden_act": "quick_gelu",
|
| 103 |
+
"hidden_size": 768,
|
| 104 |
+
"initializer_factor": 1.0,
|
| 105 |
+
"initializer_range": 0.02,
|
| 106 |
+
"intermediate_size": 3072,
|
| 107 |
+
"layer_norm_eps": 1e-05,
|
| 108 |
+
"max_position_embeddings": 77,
|
| 109 |
+
"model_type": "clip_text_model",
|
| 110 |
+
"num_attention_heads": 12,
|
| 111 |
+
"num_hidden_layers": 12,
|
| 112 |
+
"pad_token_id": 1,
|
| 113 |
+
"projection_dim": 768,
|
| 114 |
+
"torch_dtype": "float16",
|
| 115 |
+
"transformers_version": "4.48.0.dev0",
|
| 116 |
+
"vocab_size": 49408,
|
| 117 |
+
}
|
| 118 |
+
|
| 119 |
+
|
| 120 |
+
def load_text_encoder1(
|
| 121 |
+
args, fp8_llm: Optional[bool] = False, device: Optional[Union[str, torch.device]] = None
|
| 122 |
+
) -> tuple[LlamaTokenizerFast, LlamaModel]:
|
| 123 |
+
# single file, split file and directory (contains 'text_encoder') support
|
| 124 |
+
logger.info("Loading text encoder 1 tokenizer")
|
| 125 |
+
tokenizer1 = LlamaTokenizerFast.from_pretrained("hunyuanvideo-community/HunyuanVideo", subfolder="tokenizer")
|
| 126 |
+
|
| 127 |
+
logger.info(f"Loading text encoder 1 from {args.text_encoder1}")
|
| 128 |
+
if os.path.isdir(args.text_encoder1):
|
| 129 |
+
# load from directory, configs are in the directory
|
| 130 |
+
text_encoder1 = LlamaModel.from_pretrained(args.text_encoder1, subfolder="text_encoder", torch_dtype=torch.float16)
|
| 131 |
+
else:
|
| 132 |
+
# load from file, we create the model with the appropriate config
|
| 133 |
+
config = LlamaConfig(**LLAMA_CONFIG)
|
| 134 |
+
with init_empty_weights():
|
| 135 |
+
text_encoder1 = LlamaModel._from_config(config, torch_dtype=torch.float16)
|
| 136 |
+
|
| 137 |
+
state_dict = load_split_weights(args.text_encoder1)
|
| 138 |
+
|
| 139 |
+
# support weights from ComfyUI
|
| 140 |
+
if "model.embed_tokens.weight" in state_dict:
|
| 141 |
+
for key in list(state_dict.keys()):
|
| 142 |
+
if key.startswith("model."):
|
| 143 |
+
new_key = key.replace("model.", "")
|
| 144 |
+
state_dict[new_key] = state_dict[key]
|
| 145 |
+
del state_dict[key]
|
| 146 |
+
if "tokenizer" in state_dict:
|
| 147 |
+
state_dict.pop("tokenizer")
|
| 148 |
+
if "lm_head.weight" in state_dict:
|
| 149 |
+
state_dict.pop("lm_head.weight")
|
| 150 |
+
|
| 151 |
+
# # support weights from ComfyUI
|
| 152 |
+
# if "tokenizer" in state_dict:
|
| 153 |
+
# state_dict.pop("tokenizer")
|
| 154 |
+
|
| 155 |
+
text_encoder1.load_state_dict(state_dict, strict=True, assign=True)
|
| 156 |
+
|
| 157 |
+
if fp8_llm:
|
| 158 |
+
org_dtype = text_encoder1.dtype
|
| 159 |
+
logger.info(f"Moving and casting text encoder to {device} and torch.float8_e4m3fn")
|
| 160 |
+
text_encoder1.to(device=device, dtype=torch.float8_e4m3fn)
|
| 161 |
+
|
| 162 |
+
# prepare LLM for fp8
|
| 163 |
+
def prepare_fp8(llama_model: LlamaModel, target_dtype):
|
| 164 |
+
def forward_hook(module):
|
| 165 |
+
def forward(hidden_states):
|
| 166 |
+
input_dtype = hidden_states.dtype
|
| 167 |
+
hidden_states = hidden_states.to(torch.float32)
|
| 168 |
+
variance = hidden_states.pow(2).mean(-1, keepdim=True)
|
| 169 |
+
hidden_states = hidden_states * torch.rsqrt(variance + module.variance_epsilon)
|
| 170 |
+
return module.weight.to(input_dtype) * hidden_states.to(input_dtype)
|
| 171 |
+
|
| 172 |
+
return forward
|
| 173 |
+
|
| 174 |
+
for module in llama_model.modules():
|
| 175 |
+
if module.__class__.__name__ in ["Embedding"]:
|
| 176 |
+
# print("set", module.__class__.__name__, "to", target_dtype)
|
| 177 |
+
module.to(target_dtype)
|
| 178 |
+
if module.__class__.__name__ in ["LlamaRMSNorm"]:
|
| 179 |
+
# print("set", module.__class__.__name__, "hooks")
|
| 180 |
+
module.forward = forward_hook(module)
|
| 181 |
+
|
| 182 |
+
prepare_fp8(text_encoder1, org_dtype)
|
| 183 |
+
else:
|
| 184 |
+
text_encoder1.to(device)
|
| 185 |
+
|
| 186 |
+
text_encoder1.eval()
|
| 187 |
+
return tokenizer1, text_encoder1
|
| 188 |
+
|
| 189 |
+
|
| 190 |
+
def load_text_encoder2(args) -> tuple[CLIPTokenizer, CLIPTextModel]:
|
| 191 |
+
# single file and directory (contains 'text_encoder_2') support
|
| 192 |
+
logger.info("Loading text encoder 2 tokenizer")
|
| 193 |
+
tokenizer2 = CLIPTokenizer.from_pretrained("hunyuanvideo-community/HunyuanVideo", subfolder="tokenizer_2")
|
| 194 |
+
|
| 195 |
+
logger.info(f"Loading text encoder 2 from {args.text_encoder2}")
|
| 196 |
+
if os.path.isdir(args.text_encoder2):
|
| 197 |
+
# load from directory, configs are in the directory
|
| 198 |
+
text_encoder2 = CLIPTextModel.from_pretrained(args.text_encoder2, subfolder="text_encoder_2", torch_dtype=torch.float16)
|
| 199 |
+
else:
|
| 200 |
+
# we only have one file, so we can load it directly
|
| 201 |
+
config = CLIPConfig(**CLIP_CONFIG)
|
| 202 |
+
with init_empty_weights():
|
| 203 |
+
text_encoder2 = CLIPTextModel._from_config(config, torch_dtype=torch.float16)
|
| 204 |
+
|
| 205 |
+
state_dict = load_file(args.text_encoder2)
|
| 206 |
+
|
| 207 |
+
text_encoder2.load_state_dict(state_dict, strict=True, assign=True)
|
| 208 |
+
|
| 209 |
+
text_encoder2.eval()
|
| 210 |
+
return tokenizer2, text_encoder2
|
| 211 |
+
|
| 212 |
+
|
| 213 |
+
# endregion
|
| 214 |
+
|
| 215 |
+
# region image encoder
|
| 216 |
+
|
| 217 |
+
# Siglip configs are copied from FramePack repo
|
| 218 |
+
FEATURE_EXTRACTOR_CONFIG = {
|
| 219 |
+
"do_convert_rgb": None,
|
| 220 |
+
"do_normalize": True,
|
| 221 |
+
"do_rescale": True,
|
| 222 |
+
"do_resize": True,
|
| 223 |
+
"image_mean": [0.5, 0.5, 0.5],
|
| 224 |
+
"image_processor_type": "SiglipImageProcessor",
|
| 225 |
+
"image_std": [0.5, 0.5, 0.5],
|
| 226 |
+
"processor_class": "SiglipProcessor",
|
| 227 |
+
"resample": 3,
|
| 228 |
+
"rescale_factor": 0.00392156862745098,
|
| 229 |
+
"size": {"height": 384, "width": 384},
|
| 230 |
+
}
|
| 231 |
+
IMAGE_ENCODER_CONFIG = {
|
| 232 |
+
"_name_or_path": "/home/lvmin/.cache/huggingface/hub/models--black-forest-labs--FLUX.1-Redux-dev/snapshots/1282f955f706b5240161278f2ef261d2a29ad649/image_encoder",
|
| 233 |
+
"architectures": ["SiglipVisionModel"],
|
| 234 |
+
"attention_dropout": 0.0,
|
| 235 |
+
"hidden_act": "gelu_pytorch_tanh",
|
| 236 |
+
"hidden_size": 1152,
|
| 237 |
+
"image_size": 384,
|
| 238 |
+
"intermediate_size": 4304,
|
| 239 |
+
"layer_norm_eps": 1e-06,
|
| 240 |
+
"model_type": "siglip_vision_model",
|
| 241 |
+
"num_attention_heads": 16,
|
| 242 |
+
"num_channels": 3,
|
| 243 |
+
"num_hidden_layers": 27,
|
| 244 |
+
"patch_size": 14,
|
| 245 |
+
"torch_dtype": "bfloat16",
|
| 246 |
+
"transformers_version": "4.46.2",
|
| 247 |
+
}
|
| 248 |
+
|
| 249 |
+
|
| 250 |
+
# TODO do not take args, take only necessary parameters
|
| 251 |
+
def load_image_encoders(args):
|
| 252 |
+
logger.info("Loading image encoder feature extractor")
|
| 253 |
+
feature_extractor = SiglipImageProcessor(**FEATURE_EXTRACTOR_CONFIG)
|
| 254 |
+
|
| 255 |
+
# single file, split file and directory (contains 'image_encoder') support
|
| 256 |
+
logger.info(f"Loading image encoder from {args.image_encoder}")
|
| 257 |
+
if os.path.isdir(args.image_encoder):
|
| 258 |
+
# load from directory, configs are in the directory
|
| 259 |
+
image_encoder = SiglipVisionModel.from_pretrained(args.image_encoder, subfolder="image_encoder", torch_dtype=torch.float16)
|
| 260 |
+
else:
|
| 261 |
+
# load from file, we create the model with the appropriate config
|
| 262 |
+
config = SiglipVisionConfig(**IMAGE_ENCODER_CONFIG)
|
| 263 |
+
with init_empty_weights():
|
| 264 |
+
image_encoder = SiglipVisionModel._from_config(config, torch_dtype=torch.float16)
|
| 265 |
+
|
| 266 |
+
state_dict = load_file(args.image_encoder)
|
| 267 |
+
|
| 268 |
+
image_encoder.load_state_dict(state_dict, strict=True, assign=True)
|
| 269 |
+
|
| 270 |
+
image_encoder.eval()
|
| 271 |
+
return feature_extractor, image_encoder
|
| 272 |
+
|
| 273 |
+
|
| 274 |
+
# endregion
|
src/musubi_tuner/frame_pack/hunyuan.py
ADDED
|
@@ -0,0 +1,134 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# original code: https://github.com/lllyasviel/FramePack
|
| 2 |
+
# original license: Apache-2.0
|
| 3 |
+
|
| 4 |
+
import torch
|
| 5 |
+
|
| 6 |
+
# from diffusers.pipelines.hunyuan_video.pipeline_hunyuan_video import DEFAULT_PROMPT_TEMPLATE
|
| 7 |
+
# from diffusers_helper.utils import crop_or_pad_yield_mask
|
| 8 |
+
from musubi_tuner.hunyuan_model.autoencoder_kl_causal_3d import AutoencoderKLCausal3D
|
| 9 |
+
from musubi_tuner.hunyuan_model.text_encoder import PROMPT_TEMPLATE
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
@torch.no_grad()
|
| 13 |
+
def encode_prompt_conds(prompt, text_encoder, text_encoder_2, tokenizer, tokenizer_2, max_length=256, custom_system_prompt=None):
|
| 14 |
+
assert isinstance(prompt, str)
|
| 15 |
+
|
| 16 |
+
prompt = [prompt]
|
| 17 |
+
|
| 18 |
+
# LLAMA
|
| 19 |
+
|
| 20 |
+
# We can verify crop_start by checking the token count of the prompt:
|
| 21 |
+
# custom_system_prompt = (
|
| 22 |
+
# "Describe the video by detailing the following aspects: "
|
| 23 |
+
# "1. The main content and theme of the video."
|
| 24 |
+
# "2. The color, shape, size, texture, quantity, text, and spatial relationships of the objects."
|
| 25 |
+
# "3. Actions, events, behaviors temporal relationships, physical movement changes of the objects."
|
| 26 |
+
# "4. background environment, light, style and atmosphere."
|
| 27 |
+
# "5. camera angles, movements, and transitions used in the video:"
|
| 28 |
+
# )
|
| 29 |
+
if custom_system_prompt is None:
|
| 30 |
+
prompt_llama = [PROMPT_TEMPLATE["dit-llm-encode-video"]["template"].format(p) for p in prompt]
|
| 31 |
+
crop_start = PROMPT_TEMPLATE["dit-llm-encode-video"]["crop_start"]
|
| 32 |
+
else:
|
| 33 |
+
# count tokens for custom_system_prompt
|
| 34 |
+
full_prompt = f"<|start_header_id|>system<|end_header_id|>\n\n{custom_system_prompt}<|eot_id|><|start_header_id|>user<|end_header_id|>\n\n"
|
| 35 |
+
print(f"Custom system prompt: {full_prompt}")
|
| 36 |
+
system_prompt_tokens = tokenizer(full_prompt, return_tensors="pt", truncation=True).input_ids[0].shape[0]
|
| 37 |
+
print(f"Custom system prompt token count: {system_prompt_tokens}")
|
| 38 |
+
prompt_llama = [full_prompt + p + "<|eot_id|>" for p in prompt]
|
| 39 |
+
crop_start = system_prompt_tokens
|
| 40 |
+
|
| 41 |
+
llama_inputs = tokenizer(
|
| 42 |
+
prompt_llama,
|
| 43 |
+
padding="max_length",
|
| 44 |
+
max_length=max_length + crop_start,
|
| 45 |
+
truncation=True,
|
| 46 |
+
return_tensors="pt",
|
| 47 |
+
return_length=False,
|
| 48 |
+
return_overflowing_tokens=False,
|
| 49 |
+
return_attention_mask=True,
|
| 50 |
+
)
|
| 51 |
+
|
| 52 |
+
llama_input_ids = llama_inputs.input_ids.to(text_encoder.device)
|
| 53 |
+
llama_attention_mask = llama_inputs.attention_mask.to(text_encoder.device)
|
| 54 |
+
llama_attention_length = int(llama_attention_mask.sum())
|
| 55 |
+
|
| 56 |
+
llama_outputs = text_encoder(
|
| 57 |
+
input_ids=llama_input_ids,
|
| 58 |
+
attention_mask=llama_attention_mask,
|
| 59 |
+
output_hidden_states=True,
|
| 60 |
+
)
|
| 61 |
+
|
| 62 |
+
llama_vec = llama_outputs.hidden_states[-3][:, crop_start:llama_attention_length]
|
| 63 |
+
# llama_vec_remaining = llama_outputs.hidden_states[-3][:, llama_attention_length:]
|
| 64 |
+
llama_attention_mask = llama_attention_mask[:, crop_start:llama_attention_length]
|
| 65 |
+
|
| 66 |
+
assert torch.all(llama_attention_mask.bool())
|
| 67 |
+
|
| 68 |
+
# CLIP
|
| 69 |
+
|
| 70 |
+
clip_l_input_ids = tokenizer_2(
|
| 71 |
+
prompt,
|
| 72 |
+
padding="max_length",
|
| 73 |
+
max_length=77,
|
| 74 |
+
truncation=True,
|
| 75 |
+
return_overflowing_tokens=False,
|
| 76 |
+
return_length=False,
|
| 77 |
+
return_tensors="pt",
|
| 78 |
+
).input_ids
|
| 79 |
+
clip_l_pooler = text_encoder_2(clip_l_input_ids.to(text_encoder_2.device), output_hidden_states=False).pooler_output
|
| 80 |
+
|
| 81 |
+
return llama_vec, clip_l_pooler
|
| 82 |
+
|
| 83 |
+
|
| 84 |
+
@torch.no_grad()
|
| 85 |
+
def vae_decode_fake(latents):
|
| 86 |
+
latent_rgb_factors = [
|
| 87 |
+
[-0.0395, -0.0331, 0.0445],
|
| 88 |
+
[0.0696, 0.0795, 0.0518],
|
| 89 |
+
[0.0135, -0.0945, -0.0282],
|
| 90 |
+
[0.0108, -0.0250, -0.0765],
|
| 91 |
+
[-0.0209, 0.0032, 0.0224],
|
| 92 |
+
[-0.0804, -0.0254, -0.0639],
|
| 93 |
+
[-0.0991, 0.0271, -0.0669],
|
| 94 |
+
[-0.0646, -0.0422, -0.0400],
|
| 95 |
+
[-0.0696, -0.0595, -0.0894],
|
| 96 |
+
[-0.0799, -0.0208, -0.0375],
|
| 97 |
+
[0.1166, 0.1627, 0.0962],
|
| 98 |
+
[0.1165, 0.0432, 0.0407],
|
| 99 |
+
[-0.2315, -0.1920, -0.1355],
|
| 100 |
+
[-0.0270, 0.0401, -0.0821],
|
| 101 |
+
[-0.0616, -0.0997, -0.0727],
|
| 102 |
+
[0.0249, -0.0469, -0.1703],
|
| 103 |
+
] # From comfyui
|
| 104 |
+
|
| 105 |
+
latent_rgb_factors_bias = [0.0259, -0.0192, -0.0761]
|
| 106 |
+
|
| 107 |
+
weight = torch.tensor(latent_rgb_factors, device=latents.device, dtype=latents.dtype).transpose(0, 1)[:, :, None, None, None]
|
| 108 |
+
bias = torch.tensor(latent_rgb_factors_bias, device=latents.device, dtype=latents.dtype)
|
| 109 |
+
|
| 110 |
+
images = torch.nn.functional.conv3d(latents, weight, bias=bias, stride=1, padding=0, dilation=1, groups=1)
|
| 111 |
+
images = images.clamp(0.0, 1.0)
|
| 112 |
+
|
| 113 |
+
return images
|
| 114 |
+
|
| 115 |
+
|
| 116 |
+
@torch.no_grad()
|
| 117 |
+
def vae_decode(latents, vae, image_mode=False) -> torch.Tensor:
|
| 118 |
+
latents = latents / vae.config.scaling_factor
|
| 119 |
+
|
| 120 |
+
if not image_mode:
|
| 121 |
+
image = vae.decode(latents.to(device=vae.device, dtype=vae.dtype)).sample
|
| 122 |
+
else:
|
| 123 |
+
latents = latents.to(device=vae.device, dtype=vae.dtype).unbind(2)
|
| 124 |
+
image = [vae.decode(l.unsqueeze(2)).sample for l in latents]
|
| 125 |
+
image = torch.cat(image, dim=2)
|
| 126 |
+
|
| 127 |
+
return image
|
| 128 |
+
|
| 129 |
+
|
| 130 |
+
@torch.no_grad()
|
| 131 |
+
def vae_encode(image, vae: AutoencoderKLCausal3D) -> torch.Tensor:
|
| 132 |
+
latents = vae.encode(image.to(device=vae.device, dtype=vae.dtype)).latent_dist.sample()
|
| 133 |
+
latents = latents * vae.config.scaling_factor
|
| 134 |
+
return latents
|
src/musubi_tuner/frame_pack/hunyuan_video_packed.py
ADDED
|
@@ -0,0 +1,2141 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# original code: https://github.com/lllyasviel/FramePack
|
| 2 |
+
# original license: Apache-2.0
|
| 3 |
+
|
| 4 |
+
import glob
|
| 5 |
+
import math
|
| 6 |
+
import numbers
|
| 7 |
+
import os
|
| 8 |
+
from types import SimpleNamespace
|
| 9 |
+
from typing import Any, Dict, List, Optional, Tuple, Union
|
| 10 |
+
|
| 11 |
+
import torch
|
| 12 |
+
import einops
|
| 13 |
+
import torch.nn as nn
|
| 14 |
+
import torch.nn.functional as F
|
| 15 |
+
import numpy as np
|
| 16 |
+
|
| 17 |
+
from musubi_tuner.modules.custom_offloading_utils import ModelOffloader
|
| 18 |
+
from musubi_tuner.utils.lora_utils import load_safetensors_with_lora_and_fp8
|
| 19 |
+
from musubi_tuner.utils.model_utils import create_cpu_offloading_wrapper
|
| 20 |
+
from musubi_tuner.utils.safetensors_utils import load_split_weights
|
| 21 |
+
from musubi_tuner.modules.fp8_optimization_utils import apply_fp8_monkey_patch, optimize_state_dict_with_fp8
|
| 22 |
+
from accelerate import init_empty_weights
|
| 23 |
+
|
| 24 |
+
try:
|
| 25 |
+
# raise NotImplementedError
|
| 26 |
+
from xformers.ops import memory_efficient_attention as xformers_attn_func
|
| 27 |
+
|
| 28 |
+
print("Xformers is installed!")
|
| 29 |
+
except:
|
| 30 |
+
print("Xformers is not installed!")
|
| 31 |
+
xformers_attn_func = None
|
| 32 |
+
|
| 33 |
+
try:
|
| 34 |
+
# raise NotImplementedError
|
| 35 |
+
from flash_attn import flash_attn_varlen_func, flash_attn_func
|
| 36 |
+
|
| 37 |
+
print("Flash Attn is installed!")
|
| 38 |
+
except:
|
| 39 |
+
print("Flash Attn is not installed!")
|
| 40 |
+
flash_attn_varlen_func = None
|
| 41 |
+
flash_attn_func = None
|
| 42 |
+
|
| 43 |
+
try:
|
| 44 |
+
# raise NotImplementedError
|
| 45 |
+
from sageattention import sageattn_varlen, sageattn
|
| 46 |
+
|
| 47 |
+
print("Sage Attn is installed!")
|
| 48 |
+
except:
|
| 49 |
+
print("Sage Attn is not installed!")
|
| 50 |
+
sageattn_varlen = None
|
| 51 |
+
sageattn = None
|
| 52 |
+
|
| 53 |
+
|
| 54 |
+
import logging
|
| 55 |
+
|
| 56 |
+
logger = logging.getLogger(__name__)
|
| 57 |
+
logging.basicConfig(level=logging.INFO)
|
| 58 |
+
|
| 59 |
+
# region diffusers
|
| 60 |
+
|
| 61 |
+
# copied from diffusers with some modifications to minimize dependencies
|
| 62 |
+
# original code: https://github.com/huggingface/diffusers/
|
| 63 |
+
# original license: Apache-2.0
|
| 64 |
+
|
| 65 |
+
ACT2CLS = {
|
| 66 |
+
"swish": nn.SiLU,
|
| 67 |
+
"silu": nn.SiLU,
|
| 68 |
+
"mish": nn.Mish,
|
| 69 |
+
"gelu": nn.GELU,
|
| 70 |
+
"relu": nn.ReLU,
|
| 71 |
+
}
|
| 72 |
+
|
| 73 |
+
|
| 74 |
+
def get_activation(act_fn: str) -> nn.Module:
|
| 75 |
+
"""Helper function to get activation function from string.
|
| 76 |
+
|
| 77 |
+
Args:
|
| 78 |
+
act_fn (str): Name of activation function.
|
| 79 |
+
|
| 80 |
+
Returns:
|
| 81 |
+
nn.Module: Activation function.
|
| 82 |
+
"""
|
| 83 |
+
|
| 84 |
+
act_fn = act_fn.lower()
|
| 85 |
+
if act_fn in ACT2CLS:
|
| 86 |
+
return ACT2CLS[act_fn]()
|
| 87 |
+
else:
|
| 88 |
+
raise ValueError(f"activation function {act_fn} not found in ACT2FN mapping {list(ACT2CLS.keys())}")
|
| 89 |
+
|
| 90 |
+
|
| 91 |
+
def get_timestep_embedding(
|
| 92 |
+
timesteps: torch.Tensor,
|
| 93 |
+
embedding_dim: int,
|
| 94 |
+
flip_sin_to_cos: bool = False,
|
| 95 |
+
downscale_freq_shift: float = 1,
|
| 96 |
+
scale: float = 1,
|
| 97 |
+
max_period: int = 10000,
|
| 98 |
+
):
|
| 99 |
+
"""
|
| 100 |
+
This matches the implementation in Denoising Diffusion Probabilistic Models: Create sinusoidal timestep embeddings.
|
| 101 |
+
|
| 102 |
+
Args
|
| 103 |
+
timesteps (torch.Tensor):
|
| 104 |
+
a 1-D Tensor of N indices, one per batch element. These may be fractional.
|
| 105 |
+
embedding_dim (int):
|
| 106 |
+
the dimension of the output.
|
| 107 |
+
flip_sin_to_cos (bool):
|
| 108 |
+
Whether the embedding order should be `cos, sin` (if True) or `sin, cos` (if False)
|
| 109 |
+
downscale_freq_shift (float):
|
| 110 |
+
Controls the delta between frequencies between dimensions
|
| 111 |
+
scale (float):
|
| 112 |
+
Scaling factor applied to the embeddings.
|
| 113 |
+
max_period (int):
|
| 114 |
+
Controls the maximum frequency of the embeddings
|
| 115 |
+
Returns
|
| 116 |
+
torch.Tensor: an [N x dim] Tensor of positional embeddings.
|
| 117 |
+
"""
|
| 118 |
+
assert len(timesteps.shape) == 1, "Timesteps should be a 1d-array"
|
| 119 |
+
|
| 120 |
+
half_dim = embedding_dim // 2
|
| 121 |
+
exponent = -math.log(max_period) * torch.arange(start=0, end=half_dim, dtype=torch.float32, device=timesteps.device)
|
| 122 |
+
exponent = exponent / (half_dim - downscale_freq_shift)
|
| 123 |
+
|
| 124 |
+
emb = torch.exp(exponent)
|
| 125 |
+
emb = timesteps[:, None].float() * emb[None, :]
|
| 126 |
+
|
| 127 |
+
# scale embeddings
|
| 128 |
+
emb = scale * emb
|
| 129 |
+
|
| 130 |
+
# concat sine and cosine embeddings
|
| 131 |
+
emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=-1)
|
| 132 |
+
|
| 133 |
+
# flip sine and cosine embeddings
|
| 134 |
+
if flip_sin_to_cos:
|
| 135 |
+
emb = torch.cat([emb[:, half_dim:], emb[:, :half_dim]], dim=-1)
|
| 136 |
+
|
| 137 |
+
# zero pad
|
| 138 |
+
if embedding_dim % 2 == 1:
|
| 139 |
+
emb = torch.nn.functional.pad(emb, (0, 1, 0, 0))
|
| 140 |
+
return emb
|
| 141 |
+
|
| 142 |
+
|
| 143 |
+
class TimestepEmbedding(nn.Module):
|
| 144 |
+
def __init__(
|
| 145 |
+
self,
|
| 146 |
+
in_channels: int,
|
| 147 |
+
time_embed_dim: int,
|
| 148 |
+
act_fn: str = "silu",
|
| 149 |
+
out_dim: int = None,
|
| 150 |
+
post_act_fn: Optional[str] = None,
|
| 151 |
+
cond_proj_dim=None,
|
| 152 |
+
sample_proj_bias=True,
|
| 153 |
+
):
|
| 154 |
+
super().__init__()
|
| 155 |
+
|
| 156 |
+
self.linear_1 = nn.Linear(in_channels, time_embed_dim, sample_proj_bias)
|
| 157 |
+
|
| 158 |
+
if cond_proj_dim is not None:
|
| 159 |
+
self.cond_proj = nn.Linear(cond_proj_dim, in_channels, bias=False)
|
| 160 |
+
else:
|
| 161 |
+
self.cond_proj = None
|
| 162 |
+
|
| 163 |
+
self.act = get_activation(act_fn)
|
| 164 |
+
|
| 165 |
+
if out_dim is not None:
|
| 166 |
+
time_embed_dim_out = out_dim
|
| 167 |
+
else:
|
| 168 |
+
time_embed_dim_out = time_embed_dim
|
| 169 |
+
self.linear_2 = nn.Linear(time_embed_dim, time_embed_dim_out, sample_proj_bias)
|
| 170 |
+
|
| 171 |
+
if post_act_fn is None:
|
| 172 |
+
self.post_act = None
|
| 173 |
+
else:
|
| 174 |
+
self.post_act = get_activation(post_act_fn)
|
| 175 |
+
|
| 176 |
+
def forward(self, sample, condition=None):
|
| 177 |
+
if condition is not None:
|
| 178 |
+
sample = sample + self.cond_proj(condition)
|
| 179 |
+
sample = self.linear_1(sample)
|
| 180 |
+
|
| 181 |
+
if self.act is not None:
|
| 182 |
+
sample = self.act(sample)
|
| 183 |
+
|
| 184 |
+
sample = self.linear_2(sample)
|
| 185 |
+
|
| 186 |
+
if self.post_act is not None:
|
| 187 |
+
sample = self.post_act(sample)
|
| 188 |
+
return sample
|
| 189 |
+
|
| 190 |
+
|
| 191 |
+
class Timesteps(nn.Module):
|
| 192 |
+
def __init__(self, num_channels: int, flip_sin_to_cos: bool, downscale_freq_shift: float, scale: int = 1):
|
| 193 |
+
super().__init__()
|
| 194 |
+
self.num_channels = num_channels
|
| 195 |
+
self.flip_sin_to_cos = flip_sin_to_cos
|
| 196 |
+
self.downscale_freq_shift = downscale_freq_shift
|
| 197 |
+
self.scale = scale
|
| 198 |
+
|
| 199 |
+
def forward(self, timesteps):
|
| 200 |
+
t_emb = get_timestep_embedding(
|
| 201 |
+
timesteps,
|
| 202 |
+
self.num_channels,
|
| 203 |
+
flip_sin_to_cos=self.flip_sin_to_cos,
|
| 204 |
+
downscale_freq_shift=self.downscale_freq_shift,
|
| 205 |
+
scale=self.scale,
|
| 206 |
+
)
|
| 207 |
+
return t_emb
|
| 208 |
+
|
| 209 |
+
|
| 210 |
+
class FP32SiLU(nn.Module):
|
| 211 |
+
r"""
|
| 212 |
+
SiLU activation function with input upcasted to torch.float32.
|
| 213 |
+
"""
|
| 214 |
+
|
| 215 |
+
def __init__(self):
|
| 216 |
+
super().__init__()
|
| 217 |
+
|
| 218 |
+
def forward(self, inputs: torch.Tensor) -> torch.Tensor:
|
| 219 |
+
return F.silu(inputs.float(), inplace=False).to(inputs.dtype)
|
| 220 |
+
|
| 221 |
+
|
| 222 |
+
class GELU(nn.Module):
|
| 223 |
+
r"""
|
| 224 |
+
GELU activation function with tanh approximation support with `approximate="tanh"`.
|
| 225 |
+
|
| 226 |
+
Parameters:
|
| 227 |
+
dim_in (`int`): The number of channels in the input.
|
| 228 |
+
dim_out (`int`): The number of channels in the output.
|
| 229 |
+
approximate (`str`, *optional*, defaults to `"none"`): If `"tanh"`, use tanh approximation.
|
| 230 |
+
bias (`bool`, defaults to True): Whether to use a bias in the linear layer.
|
| 231 |
+
"""
|
| 232 |
+
|
| 233 |
+
def __init__(self, dim_in: int, dim_out: int, approximate: str = "none", bias: bool = True):
|
| 234 |
+
super().__init__()
|
| 235 |
+
self.proj = nn.Linear(dim_in, dim_out, bias=bias)
|
| 236 |
+
self.approximate = approximate
|
| 237 |
+
|
| 238 |
+
def gelu(self, gate: torch.Tensor) -> torch.Tensor:
|
| 239 |
+
# if gate.device.type == "mps" and is_torch_version("<", "2.0.0"):
|
| 240 |
+
# # fp16 gelu not supported on mps before torch 2.0
|
| 241 |
+
# return F.gelu(gate.to(dtype=torch.float32), approximate=self.approximate).to(dtype=gate.dtype)
|
| 242 |
+
return F.gelu(gate, approximate=self.approximate)
|
| 243 |
+
|
| 244 |
+
def forward(self, hidden_states):
|
| 245 |
+
hidden_states = self.proj(hidden_states)
|
| 246 |
+
hidden_states = self.gelu(hidden_states)
|
| 247 |
+
return hidden_states
|
| 248 |
+
|
| 249 |
+
|
| 250 |
+
class PixArtAlphaTextProjection(nn.Module):
|
| 251 |
+
"""
|
| 252 |
+
Projects caption embeddings. Also handles dropout for classifier-free guidance.
|
| 253 |
+
|
| 254 |
+
Adapted from https://github.com/PixArt-alpha/PixArt-alpha/blob/master/diffusion/model/nets/PixArt_blocks.py
|
| 255 |
+
"""
|
| 256 |
+
|
| 257 |
+
def __init__(self, in_features, hidden_size, out_features=None, act_fn="gelu_tanh"):
|
| 258 |
+
super().__init__()
|
| 259 |
+
if out_features is None:
|
| 260 |
+
out_features = hidden_size
|
| 261 |
+
self.linear_1 = nn.Linear(in_features=in_features, out_features=hidden_size, bias=True)
|
| 262 |
+
if act_fn == "gelu_tanh":
|
| 263 |
+
self.act_1 = nn.GELU(approximate="tanh")
|
| 264 |
+
elif act_fn == "silu":
|
| 265 |
+
self.act_1 = nn.SiLU()
|
| 266 |
+
elif act_fn == "silu_fp32":
|
| 267 |
+
self.act_1 = FP32SiLU()
|
| 268 |
+
else:
|
| 269 |
+
raise ValueError(f"Unknown activation function: {act_fn}")
|
| 270 |
+
self.linear_2 = nn.Linear(in_features=hidden_size, out_features=out_features, bias=True)
|
| 271 |
+
|
| 272 |
+
def forward(self, caption):
|
| 273 |
+
hidden_states = self.linear_1(caption)
|
| 274 |
+
hidden_states = self.act_1(hidden_states)
|
| 275 |
+
hidden_states = self.linear_2(hidden_states)
|
| 276 |
+
return hidden_states
|
| 277 |
+
|
| 278 |
+
|
| 279 |
+
class LayerNormFramePack(nn.LayerNorm):
|
| 280 |
+
# casting to dtype of input tensor is added
|
| 281 |
+
def forward(self, x: torch.Tensor) -> torch.Tensor:
|
| 282 |
+
return torch.nn.functional.layer_norm(x, self.normalized_shape, self.weight, self.bias, self.eps).to(x)
|
| 283 |
+
|
| 284 |
+
|
| 285 |
+
class FP32LayerNormFramePack(nn.LayerNorm):
|
| 286 |
+
def forward(self, x: torch.Tensor) -> torch.Tensor:
|
| 287 |
+
origin_dtype = x.dtype
|
| 288 |
+
return torch.nn.functional.layer_norm(
|
| 289 |
+
x.float(),
|
| 290 |
+
self.normalized_shape,
|
| 291 |
+
self.weight.float() if self.weight is not None else None,
|
| 292 |
+
self.bias.float() if self.bias is not None else None,
|
| 293 |
+
self.eps,
|
| 294 |
+
).to(origin_dtype)
|
| 295 |
+
|
| 296 |
+
|
| 297 |
+
class RMSNormFramePack(nn.Module):
|
| 298 |
+
r"""
|
| 299 |
+
RMS Norm as introduced in https://arxiv.org/abs/1910.07467 by Zhang et al.
|
| 300 |
+
|
| 301 |
+
Args:
|
| 302 |
+
dim (`int`): Number of dimensions to use for `weights`. Only effective when `elementwise_affine` is True.
|
| 303 |
+
eps (`float`): Small value to use when calculating the reciprocal of the square-root.
|
| 304 |
+
elementwise_affine (`bool`, defaults to `True`):
|
| 305 |
+
Boolean flag to denote if affine transformation should be applied.
|
| 306 |
+
bias (`bool`, defaults to False): If also training the `bias` param.
|
| 307 |
+
"""
|
| 308 |
+
|
| 309 |
+
def __init__(self, dim, eps: float, elementwise_affine: bool = True, bias: bool = False):
|
| 310 |
+
super().__init__()
|
| 311 |
+
|
| 312 |
+
self.eps = eps
|
| 313 |
+
self.elementwise_affine = elementwise_affine
|
| 314 |
+
|
| 315 |
+
if isinstance(dim, numbers.Integral):
|
| 316 |
+
dim = (dim,)
|
| 317 |
+
|
| 318 |
+
self.dim = torch.Size(dim)
|
| 319 |
+
|
| 320 |
+
self.weight = None
|
| 321 |
+
self.bias = None
|
| 322 |
+
|
| 323 |
+
if elementwise_affine:
|
| 324 |
+
self.weight = nn.Parameter(torch.ones(dim))
|
| 325 |
+
if bias:
|
| 326 |
+
self.bias = nn.Parameter(torch.zeros(dim))
|
| 327 |
+
|
| 328 |
+
def forward(self, hidden_states):
|
| 329 |
+
input_dtype = hidden_states.dtype
|
| 330 |
+
variance = hidden_states.to(torch.float32).pow(2).mean(-1, keepdim=True)
|
| 331 |
+
hidden_states = hidden_states * torch.rsqrt(variance + self.eps)
|
| 332 |
+
|
| 333 |
+
if self.weight is None:
|
| 334 |
+
return hidden_states.to(input_dtype)
|
| 335 |
+
|
| 336 |
+
return hidden_states.to(input_dtype) * self.weight.to(input_dtype)
|
| 337 |
+
|
| 338 |
+
|
| 339 |
+
class AdaLayerNormContinuousFramePack(nn.Module):
|
| 340 |
+
r"""
|
| 341 |
+
Adaptive normalization layer with a norm layer (layer_norm or rms_norm).
|
| 342 |
+
|
| 343 |
+
Args:
|
| 344 |
+
embedding_dim (`int`): Embedding dimension to use during projection.
|
| 345 |
+
conditioning_embedding_dim (`int`): Dimension of the input condition.
|
| 346 |
+
elementwise_affine (`bool`, defaults to `True`):
|
| 347 |
+
Boolean flag to denote if affine transformation should be applied.
|
| 348 |
+
eps (`float`, defaults to 1e-5): Epsilon factor.
|
| 349 |
+
bias (`bias`, defaults to `True`): Boolean flag to denote if bias should be use.
|
| 350 |
+
norm_type (`str`, defaults to `"layer_norm"`):
|
| 351 |
+
Normalization layer to use. Values supported: "layer_norm", "rms_norm".
|
| 352 |
+
"""
|
| 353 |
+
|
| 354 |
+
def __init__(
|
| 355 |
+
self,
|
| 356 |
+
embedding_dim: int,
|
| 357 |
+
conditioning_embedding_dim: int,
|
| 358 |
+
# NOTE: It is a bit weird that the norm layer can be configured to have scale and shift parameters
|
| 359 |
+
# because the output is immediately scaled and shifted by the projected conditioning embeddings.
|
| 360 |
+
# Note that AdaLayerNorm does not let the norm layer have scale and shift parameters.
|
| 361 |
+
# However, this is how it was implemented in the original code, and it's rather likely you should
|
| 362 |
+
# set `elementwise_affine` to False.
|
| 363 |
+
elementwise_affine=True,
|
| 364 |
+
eps=1e-5,
|
| 365 |
+
bias=True,
|
| 366 |
+
norm_type="layer_norm",
|
| 367 |
+
):
|
| 368 |
+
super().__init__()
|
| 369 |
+
self.silu = nn.SiLU()
|
| 370 |
+
self.linear = nn.Linear(conditioning_embedding_dim, embedding_dim * 2, bias=bias)
|
| 371 |
+
if norm_type == "layer_norm":
|
| 372 |
+
self.norm = LayerNormFramePack(embedding_dim, eps, elementwise_affine, bias)
|
| 373 |
+
elif norm_type == "rms_norm":
|
| 374 |
+
self.norm = RMSNormFramePack(embedding_dim, eps, elementwise_affine)
|
| 375 |
+
else:
|
| 376 |
+
raise ValueError(f"unknown norm_type {norm_type}")
|
| 377 |
+
|
| 378 |
+
def forward(self, x, conditioning_embedding):
|
| 379 |
+
emb = self.linear(self.silu(conditioning_embedding))
|
| 380 |
+
scale, shift = emb.chunk(2, dim=1)
|
| 381 |
+
x = self.norm(x) * (1 + scale)[:, None, :] + shift[:, None, :]
|
| 382 |
+
return x
|
| 383 |
+
|
| 384 |
+
|
| 385 |
+
class LinearActivation(nn.Module):
|
| 386 |
+
def __init__(self, dim_in: int, dim_out: int, bias: bool = True, activation: str = "silu"):
|
| 387 |
+
super().__init__()
|
| 388 |
+
|
| 389 |
+
self.proj = nn.Linear(dim_in, dim_out, bias=bias)
|
| 390 |
+
self.activation = get_activation(activation)
|
| 391 |
+
|
| 392 |
+
def forward(self, hidden_states):
|
| 393 |
+
hidden_states = self.proj(hidden_states)
|
| 394 |
+
return self.activation(hidden_states)
|
| 395 |
+
|
| 396 |
+
|
| 397 |
+
class FeedForward(nn.Module):
|
| 398 |
+
r"""
|
| 399 |
+
A feed-forward layer.
|
| 400 |
+
|
| 401 |
+
Parameters:
|
| 402 |
+
dim (`int`): The number of channels in the input.
|
| 403 |
+
dim_out (`int`, *optional*): The number of channels in the output. If not given, defaults to `dim`.
|
| 404 |
+
mult (`int`, *optional*, defaults to 4): The multiplier to use for the hidden dimension.
|
| 405 |
+
dropout (`float`, *optional*, defaults to 0.0): The dropout probability to use.
|
| 406 |
+
activation_fn (`str`, *optional*, defaults to `"geglu"`): Activation function to be used in feed-forward.
|
| 407 |
+
final_dropout (`bool` *optional*, defaults to False): Apply a final dropout.
|
| 408 |
+
bias (`bool`, defaults to True): Whether to use a bias in the linear layer.
|
| 409 |
+
"""
|
| 410 |
+
|
| 411 |
+
def __init__(
|
| 412 |
+
self,
|
| 413 |
+
dim: int,
|
| 414 |
+
dim_out: Optional[int] = None,
|
| 415 |
+
mult: int = 4,
|
| 416 |
+
dropout: float = 0.0,
|
| 417 |
+
activation_fn: str = "geglu",
|
| 418 |
+
final_dropout: bool = False,
|
| 419 |
+
inner_dim=None,
|
| 420 |
+
bias: bool = True,
|
| 421 |
+
):
|
| 422 |
+
super().__init__()
|
| 423 |
+
if inner_dim is None:
|
| 424 |
+
inner_dim = int(dim * mult)
|
| 425 |
+
dim_out = dim_out if dim_out is not None else dim
|
| 426 |
+
|
| 427 |
+
# if activation_fn == "gelu":
|
| 428 |
+
# act_fn = GELU(dim, inner_dim, bias=bias)
|
| 429 |
+
if activation_fn == "gelu-approximate":
|
| 430 |
+
act_fn = GELU(dim, inner_dim, approximate="tanh", bias=bias)
|
| 431 |
+
# elif activation_fn == "geglu":
|
| 432 |
+
# act_fn = GEGLU(dim, inner_dim, bias=bias)
|
| 433 |
+
# elif activation_fn == "geglu-approximate":
|
| 434 |
+
# act_fn = ApproximateGELU(dim, inner_dim, bias=bias)
|
| 435 |
+
# elif activation_fn == "swiglu":
|
| 436 |
+
# act_fn = SwiGLU(dim, inner_dim, bias=bias)
|
| 437 |
+
elif activation_fn == "linear-silu":
|
| 438 |
+
act_fn = LinearActivation(dim, inner_dim, bias=bias, activation="silu")
|
| 439 |
+
else:
|
| 440 |
+
raise ValueError(f"Unknown activation function: {activation_fn}")
|
| 441 |
+
|
| 442 |
+
self.net = nn.ModuleList([])
|
| 443 |
+
# project in
|
| 444 |
+
self.net.append(act_fn)
|
| 445 |
+
# project dropout
|
| 446 |
+
self.net.append(nn.Dropout(dropout))
|
| 447 |
+
# project out
|
| 448 |
+
self.net.append(nn.Linear(inner_dim, dim_out, bias=bias))
|
| 449 |
+
# FF as used in Vision Transformer, MLP-Mixer, etc. have a final dropout
|
| 450 |
+
if final_dropout:
|
| 451 |
+
self.net.append(nn.Dropout(dropout))
|
| 452 |
+
|
| 453 |
+
def forward(self, hidden_states: torch.Tensor, *args, **kwargs) -> torch.Tensor:
|
| 454 |
+
if len(args) > 0 or kwargs.get("scale", None) is not None:
|
| 455 |
+
# deprecation_message = "The `scale` argument is deprecated and will be ignored. Please remove it, as passing it will raise an error in the future. `scale` should directly be passed while calling the underlying pipeline component i.e., via `cross_attention_kwargs`."
|
| 456 |
+
# deprecate("scale", "1.0.0", deprecation_message)
|
| 457 |
+
raise ValueError("scale is not supported in this version. Please remove it.")
|
| 458 |
+
for module in self.net:
|
| 459 |
+
hidden_states = module(hidden_states)
|
| 460 |
+
return hidden_states
|
| 461 |
+
|
| 462 |
+
|
| 463 |
+
# @maybe_allow_in_graph
|
| 464 |
+
class Attention(nn.Module):
|
| 465 |
+
r"""
|
| 466 |
+
Minimal copy of Attention class from diffusers.
|
| 467 |
+
"""
|
| 468 |
+
|
| 469 |
+
def __init__(
|
| 470 |
+
self,
|
| 471 |
+
query_dim: int,
|
| 472 |
+
cross_attention_dim: Optional[int] = None,
|
| 473 |
+
heads: int = 8,
|
| 474 |
+
dim_head: int = 64,
|
| 475 |
+
bias: bool = False,
|
| 476 |
+
qk_norm: Optional[str] = None,
|
| 477 |
+
added_kv_proj_dim: Optional[int] = None,
|
| 478 |
+
eps: float = 1e-5,
|
| 479 |
+
processor: Optional[any] = None,
|
| 480 |
+
out_dim: int = None,
|
| 481 |
+
context_pre_only=None,
|
| 482 |
+
pre_only=False,
|
| 483 |
+
):
|
| 484 |
+
super().__init__()
|
| 485 |
+
self.inner_dim = out_dim if out_dim is not None else dim_head * heads
|
| 486 |
+
self.inner_kv_dim = self.inner_dim # if kv_heads is None else dim_head * kv_heads
|
| 487 |
+
self.query_dim = query_dim
|
| 488 |
+
self.use_bias = bias
|
| 489 |
+
self.cross_attention_dim = cross_attention_dim if cross_attention_dim is not None else query_dim
|
| 490 |
+
self.out_dim = out_dim if out_dim is not None else query_dim
|
| 491 |
+
self.out_context_dim = query_dim
|
| 492 |
+
self.context_pre_only = context_pre_only
|
| 493 |
+
self.pre_only = pre_only
|
| 494 |
+
|
| 495 |
+
self.scale = dim_head**-0.5
|
| 496 |
+
self.heads = out_dim // dim_head if out_dim is not None else heads
|
| 497 |
+
|
| 498 |
+
self.added_kv_proj_dim = added_kv_proj_dim
|
| 499 |
+
|
| 500 |
+
if qk_norm is None:
|
| 501 |
+
self.norm_q = None
|
| 502 |
+
self.norm_k = None
|
| 503 |
+
elif qk_norm == "rms_norm":
|
| 504 |
+
self.norm_q = RMSNormFramePack(dim_head, eps=eps)
|
| 505 |
+
self.norm_k = RMSNormFramePack(dim_head, eps=eps)
|
| 506 |
+
else:
|
| 507 |
+
raise ValueError(
|
| 508 |
+
f"unknown qk_norm: {qk_norm}. Should be one of None, 'layer_norm', 'fp32_layer_norm', 'layer_norm_across_heads', 'rms_norm', 'rms_norm_across_heads', 'l2'."
|
| 509 |
+
)
|
| 510 |
+
|
| 511 |
+
self.to_q = nn.Linear(query_dim, self.inner_dim, bias=bias)
|
| 512 |
+
self.to_k = nn.Linear(self.cross_attention_dim, self.inner_kv_dim, bias=bias)
|
| 513 |
+
self.to_v = nn.Linear(self.cross_attention_dim, self.inner_kv_dim, bias=bias)
|
| 514 |
+
|
| 515 |
+
self.added_proj_bias = True # added_proj_bias
|
| 516 |
+
if self.added_kv_proj_dim is not None:
|
| 517 |
+
self.add_k_proj = nn.Linear(added_kv_proj_dim, self.inner_kv_dim, bias=True)
|
| 518 |
+
self.add_v_proj = nn.Linear(added_kv_proj_dim, self.inner_kv_dim, bias=True)
|
| 519 |
+
if self.context_pre_only is not None:
|
| 520 |
+
self.add_q_proj = nn.Linear(added_kv_proj_dim, self.inner_dim, bias=True)
|
| 521 |
+
else:
|
| 522 |
+
self.add_q_proj = None
|
| 523 |
+
self.add_k_proj = None
|
| 524 |
+
self.add_v_proj = None
|
| 525 |
+
|
| 526 |
+
if not self.pre_only:
|
| 527 |
+
self.to_out = nn.ModuleList([])
|
| 528 |
+
self.to_out.append(nn.Linear(self.inner_dim, self.out_dim, bias=True))
|
| 529 |
+
# self.to_out.append(nn.Dropout(dropout))
|
| 530 |
+
self.to_out.append(nn.Identity()) # dropout=0.0
|
| 531 |
+
else:
|
| 532 |
+
self.to_out = None
|
| 533 |
+
|
| 534 |
+
if self.context_pre_only is not None and not self.context_pre_only:
|
| 535 |
+
self.to_add_out = nn.Linear(self.inner_dim, self.out_context_dim, bias=True)
|
| 536 |
+
else:
|
| 537 |
+
self.to_add_out = None
|
| 538 |
+
|
| 539 |
+
if qk_norm is not None and added_kv_proj_dim is not None:
|
| 540 |
+
if qk_norm == "rms_norm":
|
| 541 |
+
self.norm_added_q = RMSNormFramePack(dim_head, eps=eps)
|
| 542 |
+
self.norm_added_k = RMSNormFramePack(dim_head, eps=eps)
|
| 543 |
+
else:
|
| 544 |
+
raise ValueError(f"unknown qk_norm: {qk_norm}. Should be one of `None,'layer_norm','fp32_layer_norm','rms_norm'`")
|
| 545 |
+
else:
|
| 546 |
+
self.norm_added_q = None
|
| 547 |
+
self.norm_added_k = None
|
| 548 |
+
|
| 549 |
+
# set attention processor
|
| 550 |
+
# We use the AttnProcessor2_0 by default when torch 2.x is used which uses
|
| 551 |
+
# torch.nn.functional.scaled_dot_product_attention for native Flash/memory_efficient_attention
|
| 552 |
+
if processor is None:
|
| 553 |
+
processor = AttnProcessor2_0()
|
| 554 |
+
self.set_processor(processor)
|
| 555 |
+
|
| 556 |
+
def set_processor(self, processor: any) -> None:
|
| 557 |
+
self.processor = processor
|
| 558 |
+
|
| 559 |
+
def get_processor(self) -> any:
|
| 560 |
+
return self.processor
|
| 561 |
+
|
| 562 |
+
def forward(
|
| 563 |
+
self,
|
| 564 |
+
hidden_states: torch.Tensor,
|
| 565 |
+
encoder_hidden_states: Optional[torch.Tensor] = None,
|
| 566 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 567 |
+
**cross_attention_kwargs,
|
| 568 |
+
) -> torch.Tensor:
|
| 569 |
+
return self.processor(
|
| 570 |
+
self,
|
| 571 |
+
hidden_states,
|
| 572 |
+
encoder_hidden_states=encoder_hidden_states,
|
| 573 |
+
attention_mask=attention_mask,
|
| 574 |
+
**cross_attention_kwargs,
|
| 575 |
+
)
|
| 576 |
+
|
| 577 |
+
def prepare_attention_mask(
|
| 578 |
+
self, attention_mask: torch.Tensor, target_length: int, batch_size: int, out_dim: int = 3
|
| 579 |
+
) -> torch.Tensor:
|
| 580 |
+
r"""
|
| 581 |
+
Prepare the attention mask for the attention computation.
|
| 582 |
+
|
| 583 |
+
Args:
|
| 584 |
+
attention_mask (`torch.Tensor`):
|
| 585 |
+
The attention mask to prepare.
|
| 586 |
+
target_length (`int`):
|
| 587 |
+
The target length of the attention mask. This is the length of the attention mask after padding.
|
| 588 |
+
batch_size (`int`):
|
| 589 |
+
The batch size, which is used to repeat the attention mask.
|
| 590 |
+
out_dim (`int`, *optional*, defaults to `3`):
|
| 591 |
+
The output dimension of the attention mask. Can be either `3` or `4`.
|
| 592 |
+
|
| 593 |
+
Returns:
|
| 594 |
+
`torch.Tensor`: The prepared attention mask.
|
| 595 |
+
"""
|
| 596 |
+
head_size = self.heads
|
| 597 |
+
if attention_mask is None:
|
| 598 |
+
return attention_mask
|
| 599 |
+
|
| 600 |
+
current_length: int = attention_mask.shape[-1]
|
| 601 |
+
if current_length != target_length:
|
| 602 |
+
if attention_mask.device.type == "mps":
|
| 603 |
+
# HACK: MPS: Does not support padding by greater than dimension of input tensor.
|
| 604 |
+
# Instead, we can manually construct the padding tensor.
|
| 605 |
+
padding_shape = (attention_mask.shape[0], attention_mask.shape[1], target_length)
|
| 606 |
+
padding = torch.zeros(padding_shape, dtype=attention_mask.dtype, device=attention_mask.device)
|
| 607 |
+
attention_mask = torch.cat([attention_mask, padding], dim=2)
|
| 608 |
+
else:
|
| 609 |
+
# TODO: for pipelines such as stable-diffusion, padding cross-attn mask:
|
| 610 |
+
# we want to instead pad by (0, remaining_length), where remaining_length is:
|
| 611 |
+
# remaining_length: int = target_length - current_length
|
| 612 |
+
# TODO: re-enable tests/models/test_models_unet_2d_condition.py#test_model_xattn_padding
|
| 613 |
+
attention_mask = F.pad(attention_mask, (0, target_length), value=0.0)
|
| 614 |
+
|
| 615 |
+
if out_dim == 3:
|
| 616 |
+
if attention_mask.shape[0] < batch_size * head_size:
|
| 617 |
+
attention_mask = attention_mask.repeat_interleave(head_size, dim=0, output_size=attention_mask.shape[0] * head_size)
|
| 618 |
+
elif out_dim == 4:
|
| 619 |
+
attention_mask = attention_mask.unsqueeze(1)
|
| 620 |
+
attention_mask = attention_mask.repeat_interleave(head_size, dim=1, output_size=attention_mask.shape[1] * head_size)
|
| 621 |
+
|
| 622 |
+
return attention_mask
|
| 623 |
+
|
| 624 |
+
|
| 625 |
+
class AttnProcessor2_0:
|
| 626 |
+
r"""
|
| 627 |
+
Processor for implementing scaled dot-product attention (enabled by default if you're using PyTorch 2.0).
|
| 628 |
+
"""
|
| 629 |
+
|
| 630 |
+
def __init__(self):
|
| 631 |
+
if not hasattr(F, "scaled_dot_product_attention"):
|
| 632 |
+
raise ImportError("AttnProcessor2_0 requires PyTorch 2.0, to use it, please upgrade PyTorch to 2.0.")
|
| 633 |
+
|
| 634 |
+
def __call__(
|
| 635 |
+
self,
|
| 636 |
+
attn: Attention,
|
| 637 |
+
hidden_states: torch.Tensor,
|
| 638 |
+
encoder_hidden_states: Optional[torch.Tensor] = None,
|
| 639 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 640 |
+
temb: Optional[torch.Tensor] = None,
|
| 641 |
+
*args,
|
| 642 |
+
**kwargs,
|
| 643 |
+
) -> torch.Tensor:
|
| 644 |
+
input_ndim = hidden_states.ndim
|
| 645 |
+
|
| 646 |
+
if input_ndim == 4:
|
| 647 |
+
batch_size, channel, height, width = hidden_states.shape
|
| 648 |
+
hidden_states = hidden_states.view(batch_size, channel, height * width).transpose(1, 2)
|
| 649 |
+
|
| 650 |
+
batch_size, sequence_length, _ = hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape
|
| 651 |
+
|
| 652 |
+
if attention_mask is not None:
|
| 653 |
+
attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size)
|
| 654 |
+
# scaled_dot_product_attention expects attention_mask shape to be
|
| 655 |
+
# (batch, heads, source_length, target_length)
|
| 656 |
+
attention_mask = attention_mask.view(batch_size, attn.heads, -1, attention_mask.shape[-1])
|
| 657 |
+
|
| 658 |
+
query = attn.to_q(hidden_states)
|
| 659 |
+
query_dtype = query.dtype # store dtype before potentially deleting query
|
| 660 |
+
|
| 661 |
+
if encoder_hidden_states is None:
|
| 662 |
+
encoder_hidden_states = hidden_states
|
| 663 |
+
|
| 664 |
+
key = attn.to_k(encoder_hidden_states)
|
| 665 |
+
value = attn.to_v(encoder_hidden_states)
|
| 666 |
+
|
| 667 |
+
inner_dim = key.shape[-1]
|
| 668 |
+
head_dim = inner_dim // attn.heads
|
| 669 |
+
|
| 670 |
+
query = query.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2)
|
| 671 |
+
key = key.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2)
|
| 672 |
+
value = value.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2)
|
| 673 |
+
|
| 674 |
+
if attn.norm_q is not None:
|
| 675 |
+
query = attn.norm_q(query)
|
| 676 |
+
if attn.norm_k is not None:
|
| 677 |
+
key = attn.norm_k(key)
|
| 678 |
+
|
| 679 |
+
# the output of sdp = (batch, num_heads, seq_len, head_dim)
|
| 680 |
+
hidden_states = F.scaled_dot_product_attention(query, key, value, attn_mask=attention_mask, dropout_p=0.0, is_causal=False)
|
| 681 |
+
del query, key, value, attention_mask # free memory
|
| 682 |
+
|
| 683 |
+
hidden_states = hidden_states.transpose(1, 2).reshape(batch_size, -1, attn.heads * head_dim)
|
| 684 |
+
hidden_states = hidden_states.to(query_dtype) # use stored dtype
|
| 685 |
+
|
| 686 |
+
# linear proj
|
| 687 |
+
hidden_states = attn.to_out[0](hidden_states)
|
| 688 |
+
# dropout
|
| 689 |
+
hidden_states = attn.to_out[1](hidden_states)
|
| 690 |
+
|
| 691 |
+
if input_ndim == 4:
|
| 692 |
+
hidden_states = hidden_states.transpose(-1, -2).reshape(batch_size, channel, height, width)
|
| 693 |
+
|
| 694 |
+
return hidden_states
|
| 695 |
+
|
| 696 |
+
|
| 697 |
+
# endregion diffusers
|
| 698 |
+
|
| 699 |
+
|
| 700 |
+
def pad_for_3d_conv(x, kernel_size):
|
| 701 |
+
b, c, t, h, w = x.shape
|
| 702 |
+
pt, ph, pw = kernel_size
|
| 703 |
+
pad_t = (pt - (t % pt)) % pt
|
| 704 |
+
pad_h = (ph - (h % ph)) % ph
|
| 705 |
+
pad_w = (pw - (w % pw)) % pw
|
| 706 |
+
return torch.nn.functional.pad(x, (0, pad_w, 0, pad_h, 0, pad_t), mode="replicate")
|
| 707 |
+
|
| 708 |
+
|
| 709 |
+
def center_down_sample_3d(x, kernel_size):
|
| 710 |
+
# pt, ph, pw = kernel_size
|
| 711 |
+
# cp = (pt * ph * pw) // 2
|
| 712 |
+
# xp = einops.rearrange(x, 'b c (t pt) (h ph) (w pw) -> (pt ph pw) b c t h w', pt=pt, ph=ph, pw=pw)
|
| 713 |
+
# xc = xp[cp]
|
| 714 |
+
# return xc
|
| 715 |
+
return torch.nn.functional.avg_pool3d(x, kernel_size, stride=kernel_size)
|
| 716 |
+
|
| 717 |
+
|
| 718 |
+
def get_cu_seqlens(text_mask, img_len):
|
| 719 |
+
batch_size = text_mask.shape[0]
|
| 720 |
+
text_len = text_mask.sum(dim=1)
|
| 721 |
+
max_len = text_mask.shape[1] + img_len
|
| 722 |
+
|
| 723 |
+
cu_seqlens = torch.zeros([2 * batch_size + 1], dtype=torch.int32, device=text_mask.device) # ensure device match
|
| 724 |
+
|
| 725 |
+
for i in range(batch_size):
|
| 726 |
+
s = text_len[i] + img_len
|
| 727 |
+
s1 = i * max_len + s
|
| 728 |
+
s2 = (i + 1) * max_len
|
| 729 |
+
cu_seqlens[2 * i + 1] = s1
|
| 730 |
+
cu_seqlens[2 * i + 2] = s2
|
| 731 |
+
|
| 732 |
+
seq_len = text_len + img_len
|
| 733 |
+
return cu_seqlens, seq_len
|
| 734 |
+
|
| 735 |
+
|
| 736 |
+
def apply_rotary_emb_transposed(x, freqs_cis):
|
| 737 |
+
cos, sin = freqs_cis.unsqueeze(-2).chunk(2, dim=-1)
|
| 738 |
+
del freqs_cis
|
| 739 |
+
x_real, x_imag = x.unflatten(-1, (-1, 2)).unbind(-1)
|
| 740 |
+
x_rotated = torch.stack([-x_imag, x_real], dim=-1).flatten(3)
|
| 741 |
+
del x_real, x_imag
|
| 742 |
+
return (x.float() * cos + x_rotated.float() * sin).to(x.dtype)
|
| 743 |
+
|
| 744 |
+
|
| 745 |
+
def attn_varlen_func(q, k, v, cu_seqlens_q, cu_seqlens_kv, max_seqlen_q, max_seqlen_kv, seq_len, attn_mode=None, split_attn=False):
|
| 746 |
+
# q,k,v: [batch_size, seqlen, heads, head_dim]
|
| 747 |
+
if cu_seqlens_q is None and cu_seqlens_kv is None and max_seqlen_q is None and max_seqlen_kv is None:
|
| 748 |
+
if attn_mode == "sageattn" or attn_mode is None and sageattn is not None:
|
| 749 |
+
x = sageattn(q, k, v, tensor_layout="NHD")
|
| 750 |
+
return x
|
| 751 |
+
|
| 752 |
+
if attn_mode == "flash" or attn_mode is None and flash_attn_func is not None:
|
| 753 |
+
x = flash_attn_func(q, k, v)
|
| 754 |
+
return x
|
| 755 |
+
|
| 756 |
+
if attn_mode == "xformers" or attn_mode is None and xformers_attn_func is not None:
|
| 757 |
+
x = xformers_attn_func(q, k, v)
|
| 758 |
+
return x
|
| 759 |
+
|
| 760 |
+
x = torch.nn.functional.scaled_dot_product_attention(q.transpose(1, 2), k.transpose(1, 2), v.transpose(1, 2)).transpose(
|
| 761 |
+
1, 2
|
| 762 |
+
)
|
| 763 |
+
return x
|
| 764 |
+
if split_attn:
|
| 765 |
+
if attn_mode == "sageattn" or attn_mode is None and sageattn is not None:
|
| 766 |
+
x = []
|
| 767 |
+
for i in range(q.size(0)):
|
| 768 |
+
seq_len_i = seq_len[i]
|
| 769 |
+
x_i = sageattn(q[i : i + 1, :seq_len_i], k[i : i + 1, :seq_len_i], v[i : i + 1, :seq_len_i], tensor_layout="NHD")
|
| 770 |
+
if seq_len_i < max_seqlen_q:
|
| 771 |
+
x_i = torch.nn.functional.pad(x_i, (0, 0, 0, 0, 0, max_seqlen_q - seq_len_i), mode="constant", value=0.0)
|
| 772 |
+
x.append(x_i)
|
| 773 |
+
x = torch.cat(x, dim=0)
|
| 774 |
+
return x
|
| 775 |
+
|
| 776 |
+
if attn_mode == "flash" or attn_mode is None and flash_attn_func is not None:
|
| 777 |
+
x = []
|
| 778 |
+
for i in range(q.size(0)):
|
| 779 |
+
seq_len_i = seq_len[i]
|
| 780 |
+
x_i = flash_attn_func(q[i : i + 1, :seq_len_i], k[i : i + 1, :seq_len_i], v[i : i + 1, :seq_len_i])
|
| 781 |
+
if seq_len_i < max_seqlen_q:
|
| 782 |
+
x_i = torch.nn.functional.pad(x_i, (0, 0, 0, 0, 0, max_seqlen_q - seq_len_i), mode="constant", value=0.0)
|
| 783 |
+
x.append(x_i)
|
| 784 |
+
x = torch.cat(x, dim=0)
|
| 785 |
+
return x
|
| 786 |
+
|
| 787 |
+
if attn_mode == "xformers" or attn_mode is None and xformers_attn_func is not None:
|
| 788 |
+
x = []
|
| 789 |
+
for i in range(q.size(0)):
|
| 790 |
+
seq_len_i = seq_len[i]
|
| 791 |
+
x_i = xformers_attn_func(q[i : i + 1, :seq_len_i], k[i : i + 1, :seq_len_i], v[i : i + 1, :seq_len_i])
|
| 792 |
+
if seq_len_i < max_seqlen_q:
|
| 793 |
+
x_i = torch.nn.functional.pad(x_i, (0, 0, 0, 0, 0, max_seqlen_q - seq_len_i), mode="constant", value=0.0)
|
| 794 |
+
x.append(x_i)
|
| 795 |
+
x = torch.cat(x, dim=0)
|
| 796 |
+
return x
|
| 797 |
+
|
| 798 |
+
assert (
|
| 799 |
+
attn_mode is None or attn_mode == "torch"
|
| 800 |
+
), f"Unsupported attention mode: {attn_mode}. Supported modes: 'sageattn', 'flash', 'xformers', 'torch'."
|
| 801 |
+
q = q.transpose(1, 2)
|
| 802 |
+
k = k.transpose(1, 2)
|
| 803 |
+
v = v.transpose(1, 2)
|
| 804 |
+
x = []
|
| 805 |
+
for i in range(q.size(0)):
|
| 806 |
+
seq_len_i = seq_len[i]
|
| 807 |
+
x_i = torch.nn.functional.scaled_dot_product_attention(
|
| 808 |
+
q[i : i + 1, :, :seq_len_i], k[i : i + 1, :, :seq_len_i], v[i : i + 1, :, :seq_len_i]
|
| 809 |
+
)
|
| 810 |
+
if seq_len_i < max_seqlen_q:
|
| 811 |
+
x_i = torch.nn.functional.pad(x_i, (0, 0, 0, max_seqlen_q - seq_len_i, 0, 0), mode="constant", value=0.0)
|
| 812 |
+
x.append(x_i)
|
| 813 |
+
x = torch.cat(x, dim=0)
|
| 814 |
+
x = x.transpose(1, 2)
|
| 815 |
+
return x
|
| 816 |
+
|
| 817 |
+
batch_size = q.shape[0]
|
| 818 |
+
q = q.view(q.shape[0] * q.shape[1], *q.shape[2:])
|
| 819 |
+
k = k.view(k.shape[0] * k.shape[1], *k.shape[2:])
|
| 820 |
+
v = v.view(v.shape[0] * v.shape[1], *v.shape[2:])
|
| 821 |
+
if attn_mode == "sageattn" or attn_mode is None and sageattn_varlen is not None:
|
| 822 |
+
x = sageattn_varlen(q, k, v, cu_seqlens_q, cu_seqlens_kv, max_seqlen_q, max_seqlen_kv)
|
| 823 |
+
del q, k, v # free memory
|
| 824 |
+
elif attn_mode == "flash" or attn_mode is None and flash_attn_varlen_func is not None:
|
| 825 |
+
x = flash_attn_varlen_func(q, k, v, cu_seqlens_q, cu_seqlens_kv, max_seqlen_q, max_seqlen_kv)
|
| 826 |
+
del q, k, v # free memory
|
| 827 |
+
else:
|
| 828 |
+
raise NotImplementedError("No Attn Installed or batch_size > 1 is not supported in this configuration. Try `--split_attn`.")
|
| 829 |
+
x = x.view(batch_size, max_seqlen_q, *x.shape[1:])
|
| 830 |
+
return x
|
| 831 |
+
|
| 832 |
+
|
| 833 |
+
class HunyuanAttnProcessorFlashAttnDouble:
|
| 834 |
+
def __call__(
|
| 835 |
+
self,
|
| 836 |
+
attn: Attention,
|
| 837 |
+
hidden_states,
|
| 838 |
+
encoder_hidden_states,
|
| 839 |
+
attention_mask,
|
| 840 |
+
image_rotary_emb,
|
| 841 |
+
attn_mode: Optional[str] = None,
|
| 842 |
+
split_attn: Optional[bool] = False,
|
| 843 |
+
):
|
| 844 |
+
cu_seqlens_q, cu_seqlens_kv, max_seqlen_q, max_seqlen_kv, seq_len = attention_mask
|
| 845 |
+
|
| 846 |
+
# Project image latents
|
| 847 |
+
query = attn.to_q(hidden_states)
|
| 848 |
+
key = attn.to_k(hidden_states)
|
| 849 |
+
value = attn.to_v(hidden_states)
|
| 850 |
+
del hidden_states # free memory
|
| 851 |
+
|
| 852 |
+
query = query.unflatten(2, (attn.heads, -1))
|
| 853 |
+
key = key.unflatten(2, (attn.heads, -1))
|
| 854 |
+
value = value.unflatten(2, (attn.heads, -1))
|
| 855 |
+
|
| 856 |
+
query = attn.norm_q(query)
|
| 857 |
+
key = attn.norm_k(key)
|
| 858 |
+
|
| 859 |
+
query = apply_rotary_emb_transposed(query, image_rotary_emb)
|
| 860 |
+
key = apply_rotary_emb_transposed(key, image_rotary_emb)
|
| 861 |
+
del image_rotary_emb # free memory
|
| 862 |
+
|
| 863 |
+
# Project context (text/encoder) embeddings
|
| 864 |
+
encoder_query = attn.add_q_proj(encoder_hidden_states)
|
| 865 |
+
encoder_key = attn.add_k_proj(encoder_hidden_states)
|
| 866 |
+
encoder_value = attn.add_v_proj(encoder_hidden_states)
|
| 867 |
+
txt_length = encoder_hidden_states.shape[1] # store length before deleting
|
| 868 |
+
del encoder_hidden_states # free memory
|
| 869 |
+
|
| 870 |
+
encoder_query = encoder_query.unflatten(2, (attn.heads, -1))
|
| 871 |
+
encoder_key = encoder_key.unflatten(2, (attn.heads, -1))
|
| 872 |
+
encoder_value = encoder_value.unflatten(2, (attn.heads, -1))
|
| 873 |
+
|
| 874 |
+
encoder_query = attn.norm_added_q(encoder_query)
|
| 875 |
+
encoder_key = attn.norm_added_k(encoder_key)
|
| 876 |
+
|
| 877 |
+
# Concatenate image and context q, k, v
|
| 878 |
+
query = torch.cat([query, encoder_query], dim=1)
|
| 879 |
+
key = torch.cat([key, encoder_key], dim=1)
|
| 880 |
+
value = torch.cat([value, encoder_value], dim=1)
|
| 881 |
+
del encoder_query, encoder_key, encoder_value # free memory
|
| 882 |
+
|
| 883 |
+
hidden_states_attn = attn_varlen_func(
|
| 884 |
+
query,
|
| 885 |
+
key,
|
| 886 |
+
value,
|
| 887 |
+
cu_seqlens_q,
|
| 888 |
+
cu_seqlens_kv,
|
| 889 |
+
max_seqlen_q,
|
| 890 |
+
max_seqlen_kv,
|
| 891 |
+
seq_len,
|
| 892 |
+
attn_mode=attn_mode,
|
| 893 |
+
split_attn=split_attn,
|
| 894 |
+
)
|
| 895 |
+
del query, key, value # free memory
|
| 896 |
+
hidden_states_attn = hidden_states_attn.flatten(-2)
|
| 897 |
+
|
| 898 |
+
hidden_states, encoder_hidden_states = hidden_states_attn[:, :-txt_length], hidden_states_attn[:, -txt_length:]
|
| 899 |
+
del hidden_states_attn # free memory
|
| 900 |
+
|
| 901 |
+
# Apply output projections
|
| 902 |
+
hidden_states = attn.to_out[0](hidden_states)
|
| 903 |
+
hidden_states = attn.to_out[1](hidden_states) # Dropout/Identity
|
| 904 |
+
encoder_hidden_states = attn.to_add_out(encoder_hidden_states)
|
| 905 |
+
|
| 906 |
+
return hidden_states, encoder_hidden_states
|
| 907 |
+
|
| 908 |
+
|
| 909 |
+
class HunyuanAttnProcessorFlashAttnSingle:
|
| 910 |
+
def __call__(
|
| 911 |
+
self,
|
| 912 |
+
attn: Attention,
|
| 913 |
+
hidden_states,
|
| 914 |
+
encoder_hidden_states,
|
| 915 |
+
attention_mask,
|
| 916 |
+
image_rotary_emb,
|
| 917 |
+
attn_mode: Optional[str] = None,
|
| 918 |
+
split_attn: Optional[bool] = False,
|
| 919 |
+
):
|
| 920 |
+
cu_seqlens_q, cu_seqlens_kv, max_seqlen_q, max_seqlen_kv, seq_len = attention_mask
|
| 921 |
+
txt_length = encoder_hidden_states.shape[1] # Store text length
|
| 922 |
+
|
| 923 |
+
# Concatenate image and context inputs
|
| 924 |
+
hidden_states_cat = torch.cat([hidden_states, encoder_hidden_states], dim=1)
|
| 925 |
+
del hidden_states, encoder_hidden_states # free memory
|
| 926 |
+
|
| 927 |
+
# Project concatenated inputs
|
| 928 |
+
query = attn.to_q(hidden_states_cat)
|
| 929 |
+
key = attn.to_k(hidden_states_cat)
|
| 930 |
+
value = attn.to_v(hidden_states_cat)
|
| 931 |
+
del hidden_states_cat # free memory
|
| 932 |
+
|
| 933 |
+
query = query.unflatten(2, (attn.heads, -1))
|
| 934 |
+
key = key.unflatten(2, (attn.heads, -1))
|
| 935 |
+
value = value.unflatten(2, (attn.heads, -1))
|
| 936 |
+
|
| 937 |
+
query = attn.norm_q(query)
|
| 938 |
+
key = attn.norm_k(key)
|
| 939 |
+
|
| 940 |
+
query = torch.cat([apply_rotary_emb_transposed(query[:, :-txt_length], image_rotary_emb), query[:, -txt_length:]], dim=1)
|
| 941 |
+
key = torch.cat([apply_rotary_emb_transposed(key[:, :-txt_length], image_rotary_emb), key[:, -txt_length:]], dim=1)
|
| 942 |
+
del image_rotary_emb # free memory
|
| 943 |
+
|
| 944 |
+
hidden_states = attn_varlen_func(
|
| 945 |
+
query,
|
| 946 |
+
key,
|
| 947 |
+
value,
|
| 948 |
+
cu_seqlens_q,
|
| 949 |
+
cu_seqlens_kv,
|
| 950 |
+
max_seqlen_q,
|
| 951 |
+
max_seqlen_kv,
|
| 952 |
+
seq_len,
|
| 953 |
+
attn_mode=attn_mode,
|
| 954 |
+
split_attn=split_attn,
|
| 955 |
+
)
|
| 956 |
+
del query, key, value # free memory
|
| 957 |
+
hidden_states = hidden_states.flatten(-2)
|
| 958 |
+
|
| 959 |
+
hidden_states, encoder_hidden_states = hidden_states[:, :-txt_length], hidden_states[:, -txt_length:]
|
| 960 |
+
|
| 961 |
+
return hidden_states, encoder_hidden_states
|
| 962 |
+
|
| 963 |
+
|
| 964 |
+
class CombinedTimestepGuidanceTextProjEmbeddings(nn.Module):
|
| 965 |
+
def __init__(self, embedding_dim, pooled_projection_dim):
|
| 966 |
+
super().__init__()
|
| 967 |
+
|
| 968 |
+
self.time_proj = Timesteps(num_channels=256, flip_sin_to_cos=True, downscale_freq_shift=0)
|
| 969 |
+
self.timestep_embedder = TimestepEmbedding(in_channels=256, time_embed_dim=embedding_dim)
|
| 970 |
+
self.guidance_embedder = TimestepEmbedding(in_channels=256, time_embed_dim=embedding_dim)
|
| 971 |
+
self.text_embedder = PixArtAlphaTextProjection(pooled_projection_dim, embedding_dim, act_fn="silu")
|
| 972 |
+
|
| 973 |
+
def forward(self, timestep, guidance, pooled_projection):
|
| 974 |
+
timesteps_proj = self.time_proj(timestep)
|
| 975 |
+
timesteps_emb = self.timestep_embedder(timesteps_proj.to(dtype=pooled_projection.dtype))
|
| 976 |
+
|
| 977 |
+
guidance_proj = self.time_proj(guidance)
|
| 978 |
+
guidance_emb = self.guidance_embedder(guidance_proj.to(dtype=pooled_projection.dtype))
|
| 979 |
+
|
| 980 |
+
time_guidance_emb = timesteps_emb + guidance_emb
|
| 981 |
+
|
| 982 |
+
pooled_projections = self.text_embedder(pooled_projection)
|
| 983 |
+
conditioning = time_guidance_emb + pooled_projections
|
| 984 |
+
|
| 985 |
+
return conditioning
|
| 986 |
+
|
| 987 |
+
|
| 988 |
+
class CombinedTimestepTextProjEmbeddings(nn.Module):
|
| 989 |
+
def __init__(self, embedding_dim, pooled_projection_dim):
|
| 990 |
+
super().__init__()
|
| 991 |
+
|
| 992 |
+
self.time_proj = Timesteps(num_channels=256, flip_sin_to_cos=True, downscale_freq_shift=0)
|
| 993 |
+
self.timestep_embedder = TimestepEmbedding(in_channels=256, time_embed_dim=embedding_dim)
|
| 994 |
+
self.text_embedder = PixArtAlphaTextProjection(pooled_projection_dim, embedding_dim, act_fn="silu")
|
| 995 |
+
|
| 996 |
+
def forward(self, timestep, pooled_projection):
|
| 997 |
+
timesteps_proj = self.time_proj(timestep)
|
| 998 |
+
timesteps_emb = self.timestep_embedder(timesteps_proj.to(dtype=pooled_projection.dtype))
|
| 999 |
+
|
| 1000 |
+
pooled_projections = self.text_embedder(pooled_projection)
|
| 1001 |
+
|
| 1002 |
+
conditioning = timesteps_emb + pooled_projections
|
| 1003 |
+
|
| 1004 |
+
return conditioning
|
| 1005 |
+
|
| 1006 |
+
|
| 1007 |
+
class HunyuanVideoAdaNorm(nn.Module):
|
| 1008 |
+
def __init__(self, in_features: int, out_features: Optional[int] = None) -> None:
|
| 1009 |
+
super().__init__()
|
| 1010 |
+
|
| 1011 |
+
out_features = out_features or 2 * in_features
|
| 1012 |
+
self.linear = nn.Linear(in_features, out_features)
|
| 1013 |
+
self.nonlinearity = nn.SiLU()
|
| 1014 |
+
|
| 1015 |
+
def forward(self, temb: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]:
|
| 1016 |
+
temb = self.linear(self.nonlinearity(temb))
|
| 1017 |
+
gate_msa, gate_mlp = temb.chunk(2, dim=-1)
|
| 1018 |
+
gate_msa, gate_mlp = gate_msa.unsqueeze(1), gate_mlp.unsqueeze(1)
|
| 1019 |
+
return gate_msa, gate_mlp
|
| 1020 |
+
|
| 1021 |
+
|
| 1022 |
+
class HunyuanVideoIndividualTokenRefinerBlock(nn.Module):
|
| 1023 |
+
def __init__(
|
| 1024 |
+
self,
|
| 1025 |
+
num_attention_heads: int,
|
| 1026 |
+
attention_head_dim: int,
|
| 1027 |
+
mlp_width_ratio: float = 4.0,
|
| 1028 |
+
mlp_drop_rate: float = 0.0,
|
| 1029 |
+
attention_bias: bool = True,
|
| 1030 |
+
) -> None:
|
| 1031 |
+
super().__init__()
|
| 1032 |
+
|
| 1033 |
+
hidden_size = num_attention_heads * attention_head_dim
|
| 1034 |
+
|
| 1035 |
+
self.norm1 = LayerNormFramePack(hidden_size, elementwise_affine=True, eps=1e-6)
|
| 1036 |
+
self.attn = Attention(
|
| 1037 |
+
query_dim=hidden_size,
|
| 1038 |
+
cross_attention_dim=None,
|
| 1039 |
+
heads=num_attention_heads,
|
| 1040 |
+
dim_head=attention_head_dim,
|
| 1041 |
+
bias=attention_bias,
|
| 1042 |
+
)
|
| 1043 |
+
|
| 1044 |
+
self.norm2 = LayerNormFramePack(hidden_size, elementwise_affine=True, eps=1e-6)
|
| 1045 |
+
self.ff = FeedForward(hidden_size, mult=mlp_width_ratio, activation_fn="linear-silu", dropout=mlp_drop_rate)
|
| 1046 |
+
|
| 1047 |
+
self.norm_out = HunyuanVideoAdaNorm(hidden_size, 2 * hidden_size)
|
| 1048 |
+
|
| 1049 |
+
def forward(
|
| 1050 |
+
self,
|
| 1051 |
+
hidden_states: torch.Tensor,
|
| 1052 |
+
temb: torch.Tensor,
|
| 1053 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 1054 |
+
) -> torch.Tensor:
|
| 1055 |
+
norm_hidden_states = self.norm1(hidden_states)
|
| 1056 |
+
|
| 1057 |
+
# Self-attention
|
| 1058 |
+
attn_output = self.attn(
|
| 1059 |
+
hidden_states=norm_hidden_states,
|
| 1060 |
+
encoder_hidden_states=None,
|
| 1061 |
+
attention_mask=attention_mask,
|
| 1062 |
+
)
|
| 1063 |
+
del norm_hidden_states # free memory
|
| 1064 |
+
|
| 1065 |
+
gate_msa, gate_mlp = self.norm_out(temb)
|
| 1066 |
+
hidden_states = torch.addcmul(hidden_states, attn_output, gate_msa)
|
| 1067 |
+
del attn_output, gate_msa # free memory
|
| 1068 |
+
|
| 1069 |
+
ff_output = self.ff(self.norm2(hidden_states))
|
| 1070 |
+
hidden_states = torch.addcmul(hidden_states, ff_output, gate_mlp)
|
| 1071 |
+
del ff_output, gate_mlp # free memory
|
| 1072 |
+
|
| 1073 |
+
return hidden_states
|
| 1074 |
+
|
| 1075 |
+
|
| 1076 |
+
class HunyuanVideoIndividualTokenRefiner(nn.Module):
|
| 1077 |
+
def __init__(
|
| 1078 |
+
self,
|
| 1079 |
+
num_attention_heads: int,
|
| 1080 |
+
attention_head_dim: int,
|
| 1081 |
+
num_layers: int,
|
| 1082 |
+
mlp_width_ratio: float = 4.0,
|
| 1083 |
+
mlp_drop_rate: float = 0.0,
|
| 1084 |
+
attention_bias: bool = True,
|
| 1085 |
+
) -> None:
|
| 1086 |
+
super().__init__()
|
| 1087 |
+
|
| 1088 |
+
self.refiner_blocks = nn.ModuleList(
|
| 1089 |
+
[
|
| 1090 |
+
HunyuanVideoIndividualTokenRefinerBlock(
|
| 1091 |
+
num_attention_heads=num_attention_heads,
|
| 1092 |
+
attention_head_dim=attention_head_dim,
|
| 1093 |
+
mlp_width_ratio=mlp_width_ratio,
|
| 1094 |
+
mlp_drop_rate=mlp_drop_rate,
|
| 1095 |
+
attention_bias=attention_bias,
|
| 1096 |
+
)
|
| 1097 |
+
for _ in range(num_layers)
|
| 1098 |
+
]
|
| 1099 |
+
)
|
| 1100 |
+
|
| 1101 |
+
def forward(
|
| 1102 |
+
self,
|
| 1103 |
+
hidden_states: torch.Tensor,
|
| 1104 |
+
temb: torch.Tensor,
|
| 1105 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 1106 |
+
) -> torch.Tensor:
|
| 1107 |
+
self_attn_mask = None
|
| 1108 |
+
if attention_mask is not None:
|
| 1109 |
+
batch_size = attention_mask.shape[0]
|
| 1110 |
+
seq_len = attention_mask.shape[1]
|
| 1111 |
+
attention_mask = attention_mask.to(hidden_states.device).bool()
|
| 1112 |
+
self_attn_mask_1 = attention_mask.view(batch_size, 1, 1, seq_len).repeat(1, 1, seq_len, 1)
|
| 1113 |
+
self_attn_mask_2 = self_attn_mask_1.transpose(2, 3)
|
| 1114 |
+
self_attn_mask = (self_attn_mask_1 & self_attn_mask_2).bool()
|
| 1115 |
+
self_attn_mask[:, :, :, 0] = True
|
| 1116 |
+
|
| 1117 |
+
for block in self.refiner_blocks:
|
| 1118 |
+
hidden_states = block(hidden_states, temb, self_attn_mask)
|
| 1119 |
+
|
| 1120 |
+
return hidden_states
|
| 1121 |
+
|
| 1122 |
+
|
| 1123 |
+
class HunyuanVideoTokenRefiner(nn.Module):
|
| 1124 |
+
def __init__(
|
| 1125 |
+
self,
|
| 1126 |
+
in_channels: int,
|
| 1127 |
+
num_attention_heads: int,
|
| 1128 |
+
attention_head_dim: int,
|
| 1129 |
+
num_layers: int,
|
| 1130 |
+
mlp_ratio: float = 4.0,
|
| 1131 |
+
mlp_drop_rate: float = 0.0,
|
| 1132 |
+
attention_bias: bool = True,
|
| 1133 |
+
) -> None:
|
| 1134 |
+
super().__init__()
|
| 1135 |
+
|
| 1136 |
+
hidden_size = num_attention_heads * attention_head_dim
|
| 1137 |
+
|
| 1138 |
+
self.time_text_embed = CombinedTimestepTextProjEmbeddings(embedding_dim=hidden_size, pooled_projection_dim=in_channels)
|
| 1139 |
+
self.proj_in = nn.Linear(in_channels, hidden_size, bias=True)
|
| 1140 |
+
self.token_refiner = HunyuanVideoIndividualTokenRefiner(
|
| 1141 |
+
num_attention_heads=num_attention_heads,
|
| 1142 |
+
attention_head_dim=attention_head_dim,
|
| 1143 |
+
num_layers=num_layers,
|
| 1144 |
+
mlp_width_ratio=mlp_ratio,
|
| 1145 |
+
mlp_drop_rate=mlp_drop_rate,
|
| 1146 |
+
attention_bias=attention_bias,
|
| 1147 |
+
)
|
| 1148 |
+
|
| 1149 |
+
def forward(
|
| 1150 |
+
self,
|
| 1151 |
+
hidden_states: torch.Tensor,
|
| 1152 |
+
timestep: torch.LongTensor,
|
| 1153 |
+
attention_mask: Optional[torch.LongTensor] = None,
|
| 1154 |
+
) -> torch.Tensor:
|
| 1155 |
+
if attention_mask is None:
|
| 1156 |
+
pooled_projections = hidden_states.mean(dim=1)
|
| 1157 |
+
else:
|
| 1158 |
+
original_dtype = hidden_states.dtype
|
| 1159 |
+
mask_float = attention_mask.float().unsqueeze(-1)
|
| 1160 |
+
pooled_projections = (hidden_states * mask_float).sum(dim=1) / mask_float.sum(dim=1)
|
| 1161 |
+
pooled_projections = pooled_projections.to(original_dtype)
|
| 1162 |
+
|
| 1163 |
+
temb = self.time_text_embed(timestep, pooled_projections)
|
| 1164 |
+
del pooled_projections # free memory
|
| 1165 |
+
|
| 1166 |
+
hidden_states = self.proj_in(hidden_states)
|
| 1167 |
+
hidden_states = self.token_refiner(hidden_states, temb, attention_mask)
|
| 1168 |
+
del temb, attention_mask # free memory
|
| 1169 |
+
|
| 1170 |
+
return hidden_states
|
| 1171 |
+
|
| 1172 |
+
|
| 1173 |
+
class HunyuanVideoRotaryPosEmbed(nn.Module):
|
| 1174 |
+
def __init__(self, rope_dim, theta):
|
| 1175 |
+
super().__init__()
|
| 1176 |
+
self.DT, self.DY, self.DX = rope_dim
|
| 1177 |
+
self.theta = theta
|
| 1178 |
+
self.h_w_scaling_factor = 1.0
|
| 1179 |
+
|
| 1180 |
+
@torch.no_grad()
|
| 1181 |
+
def get_frequency(self, dim, pos):
|
| 1182 |
+
T, H, W = pos.shape
|
| 1183 |
+
freqs = 1.0 / (self.theta ** (torch.arange(0, dim, 2, dtype=torch.float32, device=pos.device)[: (dim // 2)] / dim))
|
| 1184 |
+
freqs = torch.outer(freqs, pos.reshape(-1)).unflatten(-1, (T, H, W)).repeat_interleave(2, dim=0)
|
| 1185 |
+
return freqs.cos(), freqs.sin()
|
| 1186 |
+
|
| 1187 |
+
@torch.no_grad()
|
| 1188 |
+
def forward_inner(self, frame_indices, height, width, device):
|
| 1189 |
+
GT, GY, GX = torch.meshgrid(
|
| 1190 |
+
frame_indices.to(device=device, dtype=torch.float32),
|
| 1191 |
+
torch.arange(0, height, device=device, dtype=torch.float32) * self.h_w_scaling_factor,
|
| 1192 |
+
torch.arange(0, width, device=device, dtype=torch.float32) * self.h_w_scaling_factor,
|
| 1193 |
+
indexing="ij",
|
| 1194 |
+
)
|
| 1195 |
+
|
| 1196 |
+
FCT, FST = self.get_frequency(self.DT, GT)
|
| 1197 |
+
del GT # free memory
|
| 1198 |
+
FCY, FSY = self.get_frequency(self.DY, GY)
|
| 1199 |
+
del GY # free memory
|
| 1200 |
+
FCX, FSX = self.get_frequency(self.DX, GX)
|
| 1201 |
+
del GX # free memory
|
| 1202 |
+
|
| 1203 |
+
result = torch.cat([FCT, FCY, FCX, FST, FSY, FSX], dim=0)
|
| 1204 |
+
del FCT, FCY, FCX, FST, FSY, FSX # free memory
|
| 1205 |
+
|
| 1206 |
+
# Return result already on the correct device
|
| 1207 |
+
return result # Shape (2 * total_dim / 2, T, H, W) -> (total_dim, T, H, W)
|
| 1208 |
+
|
| 1209 |
+
@torch.no_grad()
|
| 1210 |
+
def forward(self, frame_indices, height, width, device):
|
| 1211 |
+
frame_indices = frame_indices.unbind(0)
|
| 1212 |
+
results = [self.forward_inner(f, height, width, device) for f in frame_indices]
|
| 1213 |
+
results = torch.stack(results, dim=0)
|
| 1214 |
+
return results
|
| 1215 |
+
|
| 1216 |
+
|
| 1217 |
+
class AdaLayerNormZero(nn.Module):
|
| 1218 |
+
def __init__(self, embedding_dim: int, norm_type="layer_norm", bias=True):
|
| 1219 |
+
super().__init__()
|
| 1220 |
+
self.silu = nn.SiLU()
|
| 1221 |
+
self.linear = nn.Linear(embedding_dim, 6 * embedding_dim, bias=bias)
|
| 1222 |
+
if norm_type == "layer_norm":
|
| 1223 |
+
self.norm = LayerNormFramePack(embedding_dim, elementwise_affine=False, eps=1e-6)
|
| 1224 |
+
else:
|
| 1225 |
+
raise ValueError(f"unknown norm_type {norm_type}")
|
| 1226 |
+
|
| 1227 |
+
def forward(
|
| 1228 |
+
self, x: torch.Tensor, emb: Optional[torch.Tensor] = None
|
| 1229 |
+
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]:
|
| 1230 |
+
emb = emb.unsqueeze(-2)
|
| 1231 |
+
emb = self.linear(self.silu(emb))
|
| 1232 |
+
shift_msa, scale_msa, gate_msa, shift_mlp, scale_mlp, gate_mlp = emb.chunk(6, dim=-1)
|
| 1233 |
+
x = self.norm(x) * (1 + scale_msa) + shift_msa
|
| 1234 |
+
return x, gate_msa, shift_mlp, scale_mlp, gate_mlp
|
| 1235 |
+
|
| 1236 |
+
|
| 1237 |
+
class AdaLayerNormZeroSingle(nn.Module):
|
| 1238 |
+
def __init__(self, embedding_dim: int, norm_type="layer_norm", bias=True):
|
| 1239 |
+
super().__init__()
|
| 1240 |
+
|
| 1241 |
+
self.silu = nn.SiLU()
|
| 1242 |
+
self.linear = nn.Linear(embedding_dim, 3 * embedding_dim, bias=bias)
|
| 1243 |
+
if norm_type == "layer_norm":
|
| 1244 |
+
self.norm = LayerNormFramePack(embedding_dim, elementwise_affine=False, eps=1e-6)
|
| 1245 |
+
else:
|
| 1246 |
+
raise ValueError(f"unknown norm_type {norm_type}")
|
| 1247 |
+
|
| 1248 |
+
def forward(
|
| 1249 |
+
self,
|
| 1250 |
+
x: torch.Tensor,
|
| 1251 |
+
emb: Optional[torch.Tensor] = None,
|
| 1252 |
+
) -> Tuple[torch.Tensor, torch.Tensor]:
|
| 1253 |
+
emb = emb.unsqueeze(-2)
|
| 1254 |
+
emb = self.linear(self.silu(emb))
|
| 1255 |
+
shift_msa, scale_msa, gate_msa = emb.chunk(3, dim=-1)
|
| 1256 |
+
x = self.norm(x) * (1 + scale_msa) + shift_msa
|
| 1257 |
+
return x, gate_msa
|
| 1258 |
+
|
| 1259 |
+
|
| 1260 |
+
class AdaLayerNormContinuous(nn.Module):
|
| 1261 |
+
def __init__(
|
| 1262 |
+
self,
|
| 1263 |
+
embedding_dim: int,
|
| 1264 |
+
conditioning_embedding_dim: int,
|
| 1265 |
+
elementwise_affine=True,
|
| 1266 |
+
eps=1e-5,
|
| 1267 |
+
bias=True,
|
| 1268 |
+
norm_type="layer_norm",
|
| 1269 |
+
):
|
| 1270 |
+
super().__init__()
|
| 1271 |
+
self.silu = nn.SiLU()
|
| 1272 |
+
self.linear = nn.Linear(conditioning_embedding_dim, embedding_dim * 2, bias=bias)
|
| 1273 |
+
if norm_type == "layer_norm":
|
| 1274 |
+
self.norm = LayerNormFramePack(embedding_dim, eps, elementwise_affine, bias)
|
| 1275 |
+
else:
|
| 1276 |
+
raise ValueError(f"unknown norm_type {norm_type}")
|
| 1277 |
+
|
| 1278 |
+
def forward(self, x: torch.Tensor, emb: torch.Tensor) -> torch.Tensor:
|
| 1279 |
+
emb = emb.unsqueeze(-2)
|
| 1280 |
+
emb = self.linear(self.silu(emb))
|
| 1281 |
+
scale, shift = emb.chunk(2, dim=-1)
|
| 1282 |
+
del emb # free memory
|
| 1283 |
+
x = self.norm(x) * (1 + scale) + shift
|
| 1284 |
+
return x
|
| 1285 |
+
|
| 1286 |
+
|
| 1287 |
+
class HunyuanVideoSingleTransformerBlock(nn.Module):
|
| 1288 |
+
def __init__(
|
| 1289 |
+
self,
|
| 1290 |
+
num_attention_heads: int,
|
| 1291 |
+
attention_head_dim: int,
|
| 1292 |
+
mlp_ratio: float = 4.0,
|
| 1293 |
+
qk_norm: str = "rms_norm",
|
| 1294 |
+
attn_mode: Optional[str] = None,
|
| 1295 |
+
split_attn: Optional[bool] = False,
|
| 1296 |
+
) -> None:
|
| 1297 |
+
super().__init__()
|
| 1298 |
+
|
| 1299 |
+
hidden_size = num_attention_heads * attention_head_dim
|
| 1300 |
+
mlp_dim = int(hidden_size * mlp_ratio)
|
| 1301 |
+
self.attn_mode = attn_mode
|
| 1302 |
+
self.split_attn = split_attn
|
| 1303 |
+
|
| 1304 |
+
# Attention layer (pre_only=True means no output projection in Attention module itself)
|
| 1305 |
+
self.attn = Attention(
|
| 1306 |
+
query_dim=hidden_size,
|
| 1307 |
+
cross_attention_dim=None,
|
| 1308 |
+
dim_head=attention_head_dim,
|
| 1309 |
+
heads=num_attention_heads,
|
| 1310 |
+
out_dim=hidden_size,
|
| 1311 |
+
bias=True,
|
| 1312 |
+
processor=HunyuanAttnProcessorFlashAttnSingle(),
|
| 1313 |
+
qk_norm=qk_norm,
|
| 1314 |
+
eps=1e-6,
|
| 1315 |
+
pre_only=True, # Crucial: Attn processor will return raw attention output
|
| 1316 |
+
)
|
| 1317 |
+
|
| 1318 |
+
self.norm = AdaLayerNormZeroSingle(hidden_size, norm_type="layer_norm")
|
| 1319 |
+
self.proj_mlp = nn.Linear(hidden_size, mlp_dim)
|
| 1320 |
+
self.act_mlp = nn.GELU(approximate="tanh")
|
| 1321 |
+
self.proj_out = nn.Linear(hidden_size + mlp_dim, hidden_size)
|
| 1322 |
+
|
| 1323 |
+
def forward(
|
| 1324 |
+
self,
|
| 1325 |
+
hidden_states: torch.Tensor,
|
| 1326 |
+
encoder_hidden_states: torch.Tensor,
|
| 1327 |
+
temb: torch.Tensor,
|
| 1328 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 1329 |
+
image_rotary_emb: Optional[Tuple[torch.Tensor, torch.Tensor]] = None,
|
| 1330 |
+
) -> torch.Tensor:
|
| 1331 |
+
text_seq_length = encoder_hidden_states.shape[1]
|
| 1332 |
+
hidden_states = torch.cat([hidden_states, encoder_hidden_states], dim=1)
|
| 1333 |
+
del encoder_hidden_states # free memory
|
| 1334 |
+
|
| 1335 |
+
residual = hidden_states
|
| 1336 |
+
|
| 1337 |
+
# 1. Input normalization
|
| 1338 |
+
norm_hidden_states, gate = self.norm(hidden_states, emb=temb)
|
| 1339 |
+
mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states))
|
| 1340 |
+
|
| 1341 |
+
norm_hidden_states, norm_encoder_hidden_states = (
|
| 1342 |
+
norm_hidden_states[:, :-text_seq_length, :],
|
| 1343 |
+
norm_hidden_states[:, -text_seq_length:, :],
|
| 1344 |
+
)
|
| 1345 |
+
|
| 1346 |
+
# 2. Attention
|
| 1347 |
+
attn_output, context_attn_output = self.attn(
|
| 1348 |
+
hidden_states=norm_hidden_states,
|
| 1349 |
+
encoder_hidden_states=norm_encoder_hidden_states,
|
| 1350 |
+
attention_mask=attention_mask,
|
| 1351 |
+
image_rotary_emb=image_rotary_emb,
|
| 1352 |
+
attn_mode=self.attn_mode,
|
| 1353 |
+
split_attn=self.split_attn,
|
| 1354 |
+
)
|
| 1355 |
+
attn_output = torch.cat([attn_output, context_attn_output], dim=1)
|
| 1356 |
+
del norm_hidden_states, norm_encoder_hidden_states, context_attn_output # free memory
|
| 1357 |
+
del image_rotary_emb
|
| 1358 |
+
|
| 1359 |
+
# 3. Modulation and residual connection
|
| 1360 |
+
hidden_states = torch.cat([attn_output, mlp_hidden_states], dim=2)
|
| 1361 |
+
del attn_output, mlp_hidden_states # free memory
|
| 1362 |
+
hidden_states = gate * self.proj_out(hidden_states)
|
| 1363 |
+
hidden_states = hidden_states + residual
|
| 1364 |
+
|
| 1365 |
+
hidden_states, encoder_hidden_states = (
|
| 1366 |
+
hidden_states[:, :-text_seq_length, :],
|
| 1367 |
+
hidden_states[:, -text_seq_length:, :],
|
| 1368 |
+
)
|
| 1369 |
+
return hidden_states, encoder_hidden_states
|
| 1370 |
+
|
| 1371 |
+
|
| 1372 |
+
class HunyuanVideoTransformerBlock(nn.Module):
|
| 1373 |
+
def __init__(
|
| 1374 |
+
self,
|
| 1375 |
+
num_attention_heads: int,
|
| 1376 |
+
attention_head_dim: int,
|
| 1377 |
+
mlp_ratio: float,
|
| 1378 |
+
qk_norm: str = "rms_norm",
|
| 1379 |
+
attn_mode: Optional[str] = None,
|
| 1380 |
+
split_attn: Optional[bool] = False,
|
| 1381 |
+
) -> None:
|
| 1382 |
+
super().__init__()
|
| 1383 |
+
|
| 1384 |
+
hidden_size = num_attention_heads * attention_head_dim
|
| 1385 |
+
self.attn_mode = attn_mode
|
| 1386 |
+
self.split_attn = split_attn
|
| 1387 |
+
|
| 1388 |
+
self.norm1 = AdaLayerNormZero(hidden_size, norm_type="layer_norm")
|
| 1389 |
+
self.norm1_context = AdaLayerNormZero(hidden_size, norm_type="layer_norm")
|
| 1390 |
+
|
| 1391 |
+
self.attn = Attention(
|
| 1392 |
+
query_dim=hidden_size,
|
| 1393 |
+
cross_attention_dim=None,
|
| 1394 |
+
added_kv_proj_dim=hidden_size,
|
| 1395 |
+
dim_head=attention_head_dim,
|
| 1396 |
+
heads=num_attention_heads,
|
| 1397 |
+
out_dim=hidden_size,
|
| 1398 |
+
context_pre_only=False,
|
| 1399 |
+
bias=True,
|
| 1400 |
+
processor=HunyuanAttnProcessorFlashAttnDouble(),
|
| 1401 |
+
qk_norm=qk_norm,
|
| 1402 |
+
eps=1e-6,
|
| 1403 |
+
)
|
| 1404 |
+
|
| 1405 |
+
self.norm2 = LayerNormFramePack(hidden_size, elementwise_affine=False, eps=1e-6)
|
| 1406 |
+
self.ff = FeedForward(hidden_size, mult=mlp_ratio, activation_fn="gelu-approximate")
|
| 1407 |
+
|
| 1408 |
+
self.norm2_context = LayerNormFramePack(hidden_size, elementwise_affine=False, eps=1e-6)
|
| 1409 |
+
self.ff_context = FeedForward(hidden_size, mult=mlp_ratio, activation_fn="gelu-approximate")
|
| 1410 |
+
|
| 1411 |
+
def forward(
|
| 1412 |
+
self,
|
| 1413 |
+
hidden_states: torch.Tensor,
|
| 1414 |
+
encoder_hidden_states: torch.Tensor,
|
| 1415 |
+
temb: torch.Tensor,
|
| 1416 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 1417 |
+
freqs_cis: Optional[Tuple[torch.Tensor, torch.Tensor]] = None,
|
| 1418 |
+
) -> Tuple[torch.Tensor, torch.Tensor]:
|
| 1419 |
+
# 1. Input normalization
|
| 1420 |
+
norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb)
|
| 1421 |
+
norm_encoder_hidden_states, c_gate_msa, c_shift_mlp, c_scale_mlp, c_gate_mlp = self.norm1_context(
|
| 1422 |
+
encoder_hidden_states, emb=temb
|
| 1423 |
+
)
|
| 1424 |
+
|
| 1425 |
+
# 2. Joint attention
|
| 1426 |
+
attn_output, context_attn_output = self.attn(
|
| 1427 |
+
hidden_states=norm_hidden_states,
|
| 1428 |
+
encoder_hidden_states=norm_encoder_hidden_states,
|
| 1429 |
+
attention_mask=attention_mask,
|
| 1430 |
+
image_rotary_emb=freqs_cis,
|
| 1431 |
+
attn_mode=self.attn_mode,
|
| 1432 |
+
split_attn=self.split_attn,
|
| 1433 |
+
)
|
| 1434 |
+
del norm_hidden_states, norm_encoder_hidden_states, freqs_cis # free memory
|
| 1435 |
+
|
| 1436 |
+
# 3. Modulation and residual connection
|
| 1437 |
+
hidden_states = torch.addcmul(hidden_states, attn_output, gate_msa)
|
| 1438 |
+
del attn_output, gate_msa # free memory
|
| 1439 |
+
encoder_hidden_states = torch.addcmul(encoder_hidden_states, context_attn_output, c_gate_msa)
|
| 1440 |
+
del context_attn_output, c_gate_msa # free memory
|
| 1441 |
+
|
| 1442 |
+
norm_hidden_states = self.norm2(hidden_states)
|
| 1443 |
+
norm_encoder_hidden_states = self.norm2_context(encoder_hidden_states)
|
| 1444 |
+
|
| 1445 |
+
norm_hidden_states = norm_hidden_states * (1 + scale_mlp) + shift_mlp
|
| 1446 |
+
del shift_mlp, scale_mlp # free memory
|
| 1447 |
+
norm_encoder_hidden_states = torch.addcmul(c_shift_mlp, norm_encoder_hidden_states, (1 + c_scale_mlp))
|
| 1448 |
+
del c_shift_mlp, c_scale_mlp # free memory
|
| 1449 |
+
|
| 1450 |
+
# 4. Feed-forward
|
| 1451 |
+
ff_output = self.ff(norm_hidden_states)
|
| 1452 |
+
del norm_hidden_states # free memory
|
| 1453 |
+
context_ff_output = self.ff_context(norm_encoder_hidden_states)
|
| 1454 |
+
del norm_encoder_hidden_states # free memory
|
| 1455 |
+
|
| 1456 |
+
hidden_states = torch.addcmul(hidden_states, gate_mlp, ff_output)
|
| 1457 |
+
del ff_output, gate_mlp # free memory
|
| 1458 |
+
encoder_hidden_states = torch.addcmul(encoder_hidden_states, c_gate_mlp, context_ff_output)
|
| 1459 |
+
del context_ff_output, c_gate_mlp # free memory
|
| 1460 |
+
|
| 1461 |
+
return hidden_states, encoder_hidden_states
|
| 1462 |
+
|
| 1463 |
+
|
| 1464 |
+
class ClipVisionProjection(nn.Module):
|
| 1465 |
+
def __init__(self, in_channels, out_channels):
|
| 1466 |
+
super().__init__()
|
| 1467 |
+
self.up = nn.Linear(in_channels, out_channels * 3)
|
| 1468 |
+
self.down = nn.Linear(out_channels * 3, out_channels)
|
| 1469 |
+
|
| 1470 |
+
def forward(self, x):
|
| 1471 |
+
projected_x = self.down(nn.functional.silu(self.up(x)))
|
| 1472 |
+
return projected_x
|
| 1473 |
+
|
| 1474 |
+
|
| 1475 |
+
class HunyuanVideoPatchEmbed(nn.Module):
|
| 1476 |
+
def __init__(self, patch_size, in_chans, embed_dim):
|
| 1477 |
+
super().__init__()
|
| 1478 |
+
self.proj = nn.Conv3d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_size)
|
| 1479 |
+
|
| 1480 |
+
|
| 1481 |
+
class HunyuanVideoPatchEmbedForCleanLatents(nn.Module):
|
| 1482 |
+
def __init__(self, inner_dim):
|
| 1483 |
+
super().__init__()
|
| 1484 |
+
self.proj = nn.Conv3d(16, inner_dim, kernel_size=(1, 2, 2), stride=(1, 2, 2))
|
| 1485 |
+
self.proj_2x = nn.Conv3d(16, inner_dim, kernel_size=(2, 4, 4), stride=(2, 4, 4))
|
| 1486 |
+
self.proj_4x = nn.Conv3d(16, inner_dim, kernel_size=(4, 8, 8), stride=(4, 8, 8))
|
| 1487 |
+
|
| 1488 |
+
@torch.no_grad()
|
| 1489 |
+
def initialize_weight_from_another_conv3d(self, another_layer):
|
| 1490 |
+
weight = another_layer.weight.detach().clone()
|
| 1491 |
+
bias = another_layer.bias.detach().clone()
|
| 1492 |
+
|
| 1493 |
+
sd = {
|
| 1494 |
+
"proj.weight": weight.clone(),
|
| 1495 |
+
"proj.bias": bias.clone(),
|
| 1496 |
+
"proj_2x.weight": einops.repeat(weight, "b c t h w -> b c (t tk) (h hk) (w wk)", tk=2, hk=2, wk=2) / 8.0,
|
| 1497 |
+
"proj_2x.bias": bias.clone(),
|
| 1498 |
+
"proj_4x.weight": einops.repeat(weight, "b c t h w -> b c (t tk) (h hk) (w wk)", tk=4, hk=4, wk=4) / 64.0,
|
| 1499 |
+
"proj_4x.bias": bias.clone(),
|
| 1500 |
+
}
|
| 1501 |
+
|
| 1502 |
+
sd = {k: v.clone() for k, v in sd.items()}
|
| 1503 |
+
|
| 1504 |
+
self.load_state_dict(sd)
|
| 1505 |
+
return
|
| 1506 |
+
|
| 1507 |
+
|
| 1508 |
+
FP8_OPTIMIZATION_TARGET_KEYS = ["transformer_blocks", "single_transformer_blocks"]
|
| 1509 |
+
FP8_OPTIMIZATION_EXCLUDE_KEYS = ["norm"] # Exclude norm layers (e.g., LayerNorm, RMSNorm) from FP8
|
| 1510 |
+
|
| 1511 |
+
|
| 1512 |
+
class HunyuanVideoTransformer3DModelPacked(nn.Module): # (PreTrainedModelMixin, GenerationMixin,
|
| 1513 |
+
# ModelMixin, ConfigMixin, PeftAdapterMixin, FromOriginalModelMixin):
|
| 1514 |
+
# @register_to_config
|
| 1515 |
+
def __init__(
|
| 1516 |
+
self,
|
| 1517 |
+
in_channels: int = 16,
|
| 1518 |
+
out_channels: int = 16,
|
| 1519 |
+
num_attention_heads: int = 24,
|
| 1520 |
+
attention_head_dim: int = 128,
|
| 1521 |
+
num_layers: int = 20,
|
| 1522 |
+
num_single_layers: int = 40,
|
| 1523 |
+
num_refiner_layers: int = 2,
|
| 1524 |
+
mlp_ratio: float = 4.0,
|
| 1525 |
+
patch_size: int = 2,
|
| 1526 |
+
patch_size_t: int = 1,
|
| 1527 |
+
qk_norm: str = "rms_norm",
|
| 1528 |
+
guidance_embeds: bool = True,
|
| 1529 |
+
text_embed_dim: int = 4096,
|
| 1530 |
+
pooled_projection_dim: int = 768,
|
| 1531 |
+
rope_theta: float = 256.0,
|
| 1532 |
+
rope_axes_dim: Tuple[int] = (16, 56, 56),
|
| 1533 |
+
has_image_proj=False,
|
| 1534 |
+
image_proj_dim=1152,
|
| 1535 |
+
has_clean_x_embedder=False,
|
| 1536 |
+
attn_mode: Optional[str] = None,
|
| 1537 |
+
split_attn: Optional[bool] = False,
|
| 1538 |
+
) -> None:
|
| 1539 |
+
super().__init__()
|
| 1540 |
+
|
| 1541 |
+
inner_dim = num_attention_heads * attention_head_dim
|
| 1542 |
+
out_channels = out_channels or in_channels
|
| 1543 |
+
self.config_patch_size = patch_size
|
| 1544 |
+
self.config_patch_size_t = patch_size_t
|
| 1545 |
+
|
| 1546 |
+
# 1. Latent and condition embedders
|
| 1547 |
+
self.x_embedder = HunyuanVideoPatchEmbed((patch_size_t, patch_size, patch_size), in_channels, inner_dim)
|
| 1548 |
+
self.context_embedder = HunyuanVideoTokenRefiner(
|
| 1549 |
+
text_embed_dim, num_attention_heads, attention_head_dim, num_layers=num_refiner_layers
|
| 1550 |
+
)
|
| 1551 |
+
self.time_text_embed = CombinedTimestepGuidanceTextProjEmbeddings(inner_dim, pooled_projection_dim)
|
| 1552 |
+
|
| 1553 |
+
self.clean_x_embedder = None
|
| 1554 |
+
self.image_projection = None
|
| 1555 |
+
|
| 1556 |
+
# 2. RoPE
|
| 1557 |
+
self.rope = HunyuanVideoRotaryPosEmbed(rope_axes_dim, rope_theta)
|
| 1558 |
+
|
| 1559 |
+
# 3. Dual stream transformer blocks
|
| 1560 |
+
self.attn_mode = attn_mode
|
| 1561 |
+
self.split_attn = split_attn
|
| 1562 |
+
self.transformer_blocks = nn.ModuleList(
|
| 1563 |
+
[
|
| 1564 |
+
HunyuanVideoTransformerBlock(
|
| 1565 |
+
num_attention_heads,
|
| 1566 |
+
attention_head_dim,
|
| 1567 |
+
mlp_ratio=mlp_ratio,
|
| 1568 |
+
qk_norm=qk_norm,
|
| 1569 |
+
attn_mode=attn_mode,
|
| 1570 |
+
split_attn=split_attn,
|
| 1571 |
+
)
|
| 1572 |
+
for _ in range(num_layers)
|
| 1573 |
+
]
|
| 1574 |
+
)
|
| 1575 |
+
|
| 1576 |
+
# 4. Single stream transformer blocks
|
| 1577 |
+
self.single_transformer_blocks = nn.ModuleList(
|
| 1578 |
+
[
|
| 1579 |
+
HunyuanVideoSingleTransformerBlock(
|
| 1580 |
+
num_attention_heads,
|
| 1581 |
+
attention_head_dim,
|
| 1582 |
+
mlp_ratio=mlp_ratio,
|
| 1583 |
+
qk_norm=qk_norm,
|
| 1584 |
+
attn_mode=attn_mode,
|
| 1585 |
+
split_attn=split_attn,
|
| 1586 |
+
)
|
| 1587 |
+
for _ in range(num_single_layers)
|
| 1588 |
+
]
|
| 1589 |
+
)
|
| 1590 |
+
|
| 1591 |
+
# 5. Output projection
|
| 1592 |
+
self.norm_out = AdaLayerNormContinuous(inner_dim, inner_dim, elementwise_affine=False, eps=1e-6)
|
| 1593 |
+
self.proj_out = nn.Linear(inner_dim, patch_size_t * patch_size * patch_size * out_channels)
|
| 1594 |
+
|
| 1595 |
+
self.inner_dim = inner_dim
|
| 1596 |
+
self.use_gradient_checkpointing = False
|
| 1597 |
+
self.activation_cpu_offloading = False
|
| 1598 |
+
self.enable_teacache = False
|
| 1599 |
+
|
| 1600 |
+
# if has_image_proj:
|
| 1601 |
+
# self.install_image_projection(image_proj_dim)
|
| 1602 |
+
self.image_projection = ClipVisionProjection(in_channels=image_proj_dim, out_channels=self.inner_dim)
|
| 1603 |
+
# self.config["has_image_proj"] = True
|
| 1604 |
+
# self.config["image_proj_dim"] = in_channels
|
| 1605 |
+
|
| 1606 |
+
# if has_clean_x_embedder:
|
| 1607 |
+
# self.install_clean_x_embedder()
|
| 1608 |
+
self.clean_x_embedder = HunyuanVideoPatchEmbedForCleanLatents(self.inner_dim)
|
| 1609 |
+
# self.config["has_clean_x_embedder"] = True
|
| 1610 |
+
|
| 1611 |
+
self.high_quality_fp32_output_for_inference = True # False # change default to True
|
| 1612 |
+
|
| 1613 |
+
# Block swapping attributes (initialized to None)
|
| 1614 |
+
self.blocks_to_swap = None
|
| 1615 |
+
self.offloader_double = None
|
| 1616 |
+
self.offloader_single = None
|
| 1617 |
+
|
| 1618 |
+
# RoPE scaling
|
| 1619 |
+
self.rope_scaling_timestep_threshold: Optional[int] = None # scale RoPE above this timestep
|
| 1620 |
+
self.rope_scaling_factor: float = 1.0 # RoPE scaling factor
|
| 1621 |
+
|
| 1622 |
+
@property
|
| 1623 |
+
def device(self):
|
| 1624 |
+
return next(self.parameters()).device
|
| 1625 |
+
|
| 1626 |
+
@property
|
| 1627 |
+
def dtype(self):
|
| 1628 |
+
return next(self.parameters()).dtype
|
| 1629 |
+
|
| 1630 |
+
def enable_gradient_checkpointing(self, activation_cpu_offloading=False):
|
| 1631 |
+
self.use_gradient_checkpointing = True
|
| 1632 |
+
self.activation_cpu_offloading = activation_cpu_offloading
|
| 1633 |
+
print(
|
| 1634 |
+
f"Gradient checkpointing enabled for HunyuanVideoTransformer3DModelPacked. Activation CPU offloading: {activation_cpu_offloading}"
|
| 1635 |
+
) # Logging
|
| 1636 |
+
|
| 1637 |
+
def disable_gradient_checkpointing(self):
|
| 1638 |
+
self.use_gradient_checkpointing = False
|
| 1639 |
+
self.activation_cpu_offloading = False
|
| 1640 |
+
print("Gradient checkpointing disabled for HunyuanVideoTransformer3DModelPacked.") # Logging
|
| 1641 |
+
|
| 1642 |
+
def initialize_teacache(self, enable_teacache=True, num_steps=25, rel_l1_thresh=0.15):
|
| 1643 |
+
self.enable_teacache = enable_teacache
|
| 1644 |
+
self.cnt = 0
|
| 1645 |
+
self.num_steps = num_steps
|
| 1646 |
+
self.rel_l1_thresh = rel_l1_thresh # 0.1 for 1.6x speedup, 0.15 for 2.1x speedup
|
| 1647 |
+
self.accumulated_rel_l1_distance = 0
|
| 1648 |
+
self.previous_modulated_input = None
|
| 1649 |
+
self.previous_residual = None
|
| 1650 |
+
self.teacache_rescale_func = np.poly1d([7.33226126e02, -4.01131952e02, 6.75869174e01, -3.14987800e00, 9.61237896e-02])
|
| 1651 |
+
if enable_teacache:
|
| 1652 |
+
print(f"TeaCache enabled: num_steps={num_steps}, rel_l1_thresh={rel_l1_thresh}")
|
| 1653 |
+
else:
|
| 1654 |
+
print("TeaCache disabled.")
|
| 1655 |
+
|
| 1656 |
+
def gradient_checkpointing_method(self, block, *args):
|
| 1657 |
+
if self.use_gradient_checkpointing:
|
| 1658 |
+
if self.activation_cpu_offloading:
|
| 1659 |
+
block = create_cpu_offloading_wrapper(block, self.proj_out.weight.device)
|
| 1660 |
+
result = torch.utils.checkpoint.checkpoint(block, *args, use_reentrant=False)
|
| 1661 |
+
else:
|
| 1662 |
+
result = block(*args)
|
| 1663 |
+
return result
|
| 1664 |
+
|
| 1665 |
+
def enable_block_swap(self, num_blocks: int, device: torch.device, supports_backward: bool, use_pinned_memory: bool = False):
|
| 1666 |
+
self.blocks_to_swap = num_blocks
|
| 1667 |
+
self.num_double_blocks = len(self.transformer_blocks)
|
| 1668 |
+
self.num_single_blocks = len(self.single_transformer_blocks)
|
| 1669 |
+
double_blocks_to_swap = num_blocks // 2
|
| 1670 |
+
single_blocks_to_swap = (num_blocks - double_blocks_to_swap) * 2 + 1
|
| 1671 |
+
|
| 1672 |
+
assert double_blocks_to_swap <= self.num_double_blocks - 1 and single_blocks_to_swap <= self.num_single_blocks - 1, (
|
| 1673 |
+
f"Cannot swap more than {self.num_double_blocks - 1} double blocks and {self.num_single_blocks - 1} single blocks. "
|
| 1674 |
+
f"Requested {double_blocks_to_swap} double blocks and {single_blocks_to_swap} single blocks."
|
| 1675 |
+
)
|
| 1676 |
+
|
| 1677 |
+
self.offloader_double = ModelOffloader(
|
| 1678 |
+
"double",
|
| 1679 |
+
self.transformer_blocks,
|
| 1680 |
+
self.num_double_blocks,
|
| 1681 |
+
double_blocks_to_swap,
|
| 1682 |
+
supports_backward,
|
| 1683 |
+
device,
|
| 1684 |
+
use_pinned_memory,
|
| 1685 |
+
# debug=True # Optional debugging
|
| 1686 |
+
)
|
| 1687 |
+
self.offloader_single = ModelOffloader(
|
| 1688 |
+
"single",
|
| 1689 |
+
self.single_transformer_blocks,
|
| 1690 |
+
self.num_single_blocks,
|
| 1691 |
+
single_blocks_to_swap,
|
| 1692 |
+
supports_backward,
|
| 1693 |
+
device, # , debug=True
|
| 1694 |
+
use_pinned_memory,
|
| 1695 |
+
)
|
| 1696 |
+
print(
|
| 1697 |
+
f"HunyuanVideoTransformer3DModelPacked: Block swap enabled. Swapping {num_blocks} blocks, "
|
| 1698 |
+
+ f"double blocks: {double_blocks_to_swap}, single blocks: {single_blocks_to_swap}, supports_backward: {supports_backward}."
|
| 1699 |
+
)
|
| 1700 |
+
|
| 1701 |
+
def switch_block_swap_for_inference(self):
|
| 1702 |
+
if self.blocks_to_swap and self.blocks_to_swap > 0:
|
| 1703 |
+
self.offloader_double.set_forward_only(True)
|
| 1704 |
+
self.offloader_single.set_forward_only(True)
|
| 1705 |
+
self.prepare_block_swap_before_forward()
|
| 1706 |
+
print(f"HunyuanVideoTransformer3DModelPacked: Block swap set to forward only.")
|
| 1707 |
+
|
| 1708 |
+
def switch_block_swap_for_training(self):
|
| 1709 |
+
if self.blocks_to_swap and self.blocks_to_swap > 0:
|
| 1710 |
+
self.offloader_double.set_forward_only(False)
|
| 1711 |
+
self.offloader_single.set_forward_only(False)
|
| 1712 |
+
self.prepare_block_swap_before_forward()
|
| 1713 |
+
print(f"HunyuanVideoTransformer3DModelPacked: Block swap set to forward and backward.")
|
| 1714 |
+
|
| 1715 |
+
def move_to_device_except_swap_blocks(self, device: torch.device):
|
| 1716 |
+
# assume model is on cpu. do not move blocks to device to reduce temporary memory usage
|
| 1717 |
+
if self.blocks_to_swap:
|
| 1718 |
+
saved_double_blocks = self.transformer_blocks
|
| 1719 |
+
saved_single_blocks = self.single_transformer_blocks
|
| 1720 |
+
self.transformer_blocks = None
|
| 1721 |
+
self.single_transformer_blocks = None
|
| 1722 |
+
|
| 1723 |
+
self.to(device)
|
| 1724 |
+
|
| 1725 |
+
if self.blocks_to_swap:
|
| 1726 |
+
self.transformer_blocks = saved_double_blocks
|
| 1727 |
+
self.single_transformer_blocks = saved_single_blocks
|
| 1728 |
+
|
| 1729 |
+
def prepare_block_swap_before_forward(self):
|
| 1730 |
+
if self.blocks_to_swap is None or self.blocks_to_swap == 0:
|
| 1731 |
+
return
|
| 1732 |
+
self.offloader_double.prepare_block_devices_before_forward(self.transformer_blocks)
|
| 1733 |
+
self.offloader_single.prepare_block_devices_before_forward(self.single_transformer_blocks)
|
| 1734 |
+
|
| 1735 |
+
def enable_rope_scaling(self, timestep_threshold: Optional[int], rope_scaling_factor: float = 1.0):
|
| 1736 |
+
if timestep_threshold is not None and rope_scaling_factor > 0:
|
| 1737 |
+
self.rope_scaling_timestep_threshold = timestep_threshold
|
| 1738 |
+
self.rope_scaling_factor = rope_scaling_factor
|
| 1739 |
+
logger.info(f"RoPE scaling enabled: threshold={timestep_threshold}, scaling_factor={rope_scaling_factor}.")
|
| 1740 |
+
else:
|
| 1741 |
+
self.rope_scaling_timestep_threshold = None
|
| 1742 |
+
self.rope_scaling_factor = 1.0
|
| 1743 |
+
self.rope.h_w_scaling_factor = 1.0 # reset to default
|
| 1744 |
+
logger.info("RoPE scaling disabled.")
|
| 1745 |
+
|
| 1746 |
+
def process_input_hidden_states(
|
| 1747 |
+
self,
|
| 1748 |
+
latents,
|
| 1749 |
+
latent_indices=None,
|
| 1750 |
+
clean_latents=None,
|
| 1751 |
+
clean_latent_indices=None,
|
| 1752 |
+
clean_latents_2x=None,
|
| 1753 |
+
clean_latent_2x_indices=None,
|
| 1754 |
+
clean_latents_4x=None,
|
| 1755 |
+
clean_latent_4x_indices=None,
|
| 1756 |
+
):
|
| 1757 |
+
hidden_states = self.gradient_checkpointing_method(self.x_embedder.proj, latents)
|
| 1758 |
+
B, C, T, H, W = hidden_states.shape
|
| 1759 |
+
|
| 1760 |
+
if latent_indices is None:
|
| 1761 |
+
latent_indices = torch.arange(0, T).unsqueeze(0).expand(B, -1)
|
| 1762 |
+
|
| 1763 |
+
hidden_states = hidden_states.flatten(2).transpose(1, 2)
|
| 1764 |
+
|
| 1765 |
+
rope_freqs = self.rope(frame_indices=latent_indices, height=H, width=W, device=hidden_states.device)
|
| 1766 |
+
rope_freqs = rope_freqs.flatten(2).transpose(1, 2)
|
| 1767 |
+
|
| 1768 |
+
if clean_latents is not None and clean_latent_indices is not None:
|
| 1769 |
+
clean_latents = clean_latents.to(hidden_states)
|
| 1770 |
+
clean_latents = self.gradient_checkpointing_method(self.clean_x_embedder.proj, clean_latents)
|
| 1771 |
+
clean_latents = clean_latents.flatten(2).transpose(1, 2)
|
| 1772 |
+
|
| 1773 |
+
clean_latent_rope_freqs = self.rope(frame_indices=clean_latent_indices, height=H, width=W, device=clean_latents.device)
|
| 1774 |
+
clean_latent_rope_freqs = clean_latent_rope_freqs.flatten(2).transpose(1, 2)
|
| 1775 |
+
|
| 1776 |
+
hidden_states = torch.cat([clean_latents, hidden_states], dim=1)
|
| 1777 |
+
rope_freqs = torch.cat([clean_latent_rope_freqs, rope_freqs], dim=1)
|
| 1778 |
+
|
| 1779 |
+
if clean_latents_2x is not None and clean_latent_2x_indices is not None:
|
| 1780 |
+
clean_latents_2x = clean_latents_2x.to(hidden_states)
|
| 1781 |
+
clean_latents_2x = pad_for_3d_conv(clean_latents_2x, (2, 4, 4))
|
| 1782 |
+
clean_latents_2x = self.gradient_checkpointing_method(self.clean_x_embedder.proj_2x, clean_latents_2x)
|
| 1783 |
+
clean_latents_2x = clean_latents_2x.flatten(2).transpose(1, 2)
|
| 1784 |
+
|
| 1785 |
+
clean_latent_2x_rope_freqs = self.rope(
|
| 1786 |
+
frame_indices=clean_latent_2x_indices, height=H, width=W, device=clean_latents_2x.device
|
| 1787 |
+
)
|
| 1788 |
+
clean_latent_2x_rope_freqs = pad_for_3d_conv(clean_latent_2x_rope_freqs, (2, 2, 2))
|
| 1789 |
+
clean_latent_2x_rope_freqs = center_down_sample_3d(clean_latent_2x_rope_freqs, (2, 2, 2))
|
| 1790 |
+
clean_latent_2x_rope_freqs = clean_latent_2x_rope_freqs.flatten(2).transpose(1, 2)
|
| 1791 |
+
|
| 1792 |
+
hidden_states = torch.cat([clean_latents_2x, hidden_states], dim=1)
|
| 1793 |
+
rope_freqs = torch.cat([clean_latent_2x_rope_freqs, rope_freqs], dim=1)
|
| 1794 |
+
|
| 1795 |
+
if clean_latents_4x is not None and clean_latent_4x_indices is not None:
|
| 1796 |
+
clean_latents_4x = clean_latents_4x.to(hidden_states)
|
| 1797 |
+
clean_latents_4x = pad_for_3d_conv(clean_latents_4x, (4, 8, 8))
|
| 1798 |
+
clean_latents_4x = self.gradient_checkpointing_method(self.clean_x_embedder.proj_4x, clean_latents_4x)
|
| 1799 |
+
clean_latents_4x = clean_latents_4x.flatten(2).transpose(1, 2)
|
| 1800 |
+
|
| 1801 |
+
clean_latent_4x_rope_freqs = self.rope(
|
| 1802 |
+
frame_indices=clean_latent_4x_indices, height=H, width=W, device=clean_latents_4x.device
|
| 1803 |
+
)
|
| 1804 |
+
clean_latent_4x_rope_freqs = pad_for_3d_conv(clean_latent_4x_rope_freqs, (4, 4, 4))
|
| 1805 |
+
clean_latent_4x_rope_freqs = center_down_sample_3d(clean_latent_4x_rope_freqs, (4, 4, 4))
|
| 1806 |
+
clean_latent_4x_rope_freqs = clean_latent_4x_rope_freqs.flatten(2).transpose(1, 2)
|
| 1807 |
+
|
| 1808 |
+
hidden_states = torch.cat([clean_latents_4x, hidden_states], dim=1)
|
| 1809 |
+
rope_freqs = torch.cat([clean_latent_4x_rope_freqs, rope_freqs], dim=1)
|
| 1810 |
+
|
| 1811 |
+
return hidden_states, rope_freqs
|
| 1812 |
+
|
| 1813 |
+
def forward(
|
| 1814 |
+
self,
|
| 1815 |
+
hidden_states,
|
| 1816 |
+
timestep,
|
| 1817 |
+
encoder_hidden_states,
|
| 1818 |
+
encoder_attention_mask,
|
| 1819 |
+
pooled_projections,
|
| 1820 |
+
guidance,
|
| 1821 |
+
latent_indices=None,
|
| 1822 |
+
clean_latents=None,
|
| 1823 |
+
clean_latent_indices=None,
|
| 1824 |
+
clean_latents_2x=None,
|
| 1825 |
+
clean_latent_2x_indices=None,
|
| 1826 |
+
clean_latents_4x=None,
|
| 1827 |
+
clean_latent_4x_indices=None,
|
| 1828 |
+
image_embeddings=None,
|
| 1829 |
+
attention_kwargs=None,
|
| 1830 |
+
return_dict=True,
|
| 1831 |
+
):
|
| 1832 |
+
|
| 1833 |
+
if attention_kwargs is None:
|
| 1834 |
+
attention_kwargs = {}
|
| 1835 |
+
|
| 1836 |
+
# RoPE scaling: must be done before processing hidden states
|
| 1837 |
+
if self.rope_scaling_timestep_threshold is not None:
|
| 1838 |
+
if timestep >= self.rope_scaling_timestep_threshold:
|
| 1839 |
+
self.rope.h_w_scaling_factor = self.rope_scaling_factor
|
| 1840 |
+
else:
|
| 1841 |
+
self.rope.h_w_scaling_factor = 1.0
|
| 1842 |
+
|
| 1843 |
+
batch_size, num_channels, num_frames, height, width = hidden_states.shape
|
| 1844 |
+
p, p_t = self.config_patch_size, self.config_patch_size_t
|
| 1845 |
+
post_patch_num_frames = num_frames // p_t
|
| 1846 |
+
post_patch_height = height // p
|
| 1847 |
+
post_patch_width = width // p
|
| 1848 |
+
original_context_length = post_patch_num_frames * post_patch_height * post_patch_width
|
| 1849 |
+
|
| 1850 |
+
input_device = hidden_states.device
|
| 1851 |
+
|
| 1852 |
+
hidden_states, rope_freqs = self.process_input_hidden_states(
|
| 1853 |
+
hidden_states,
|
| 1854 |
+
latent_indices,
|
| 1855 |
+
clean_latents,
|
| 1856 |
+
clean_latent_indices,
|
| 1857 |
+
clean_latents_2x,
|
| 1858 |
+
clean_latent_2x_indices,
|
| 1859 |
+
clean_latents_4x,
|
| 1860 |
+
clean_latent_4x_indices,
|
| 1861 |
+
)
|
| 1862 |
+
del (
|
| 1863 |
+
latent_indices,
|
| 1864 |
+
clean_latents,
|
| 1865 |
+
clean_latent_indices,
|
| 1866 |
+
clean_latents_2x,
|
| 1867 |
+
clean_latent_2x_indices,
|
| 1868 |
+
clean_latents_4x,
|
| 1869 |
+
clean_latent_4x_indices,
|
| 1870 |
+
) # free memory
|
| 1871 |
+
|
| 1872 |
+
temb = self.gradient_checkpointing_method(self.time_text_embed, timestep, guidance, pooled_projections)
|
| 1873 |
+
encoder_hidden_states = self.gradient_checkpointing_method(
|
| 1874 |
+
self.context_embedder, encoder_hidden_states, timestep, encoder_attention_mask
|
| 1875 |
+
)
|
| 1876 |
+
|
| 1877 |
+
if self.image_projection is not None:
|
| 1878 |
+
assert image_embeddings is not None, "You must use image embeddings!"
|
| 1879 |
+
extra_encoder_hidden_states = self.gradient_checkpointing_method(self.image_projection, image_embeddings)
|
| 1880 |
+
extra_attention_mask = torch.ones(
|
| 1881 |
+
(batch_size, extra_encoder_hidden_states.shape[1]),
|
| 1882 |
+
dtype=encoder_attention_mask.dtype,
|
| 1883 |
+
device=encoder_attention_mask.device,
|
| 1884 |
+
)
|
| 1885 |
+
|
| 1886 |
+
# must cat before (not after) encoder_hidden_states, due to attn masking
|
| 1887 |
+
encoder_hidden_states = torch.cat([extra_encoder_hidden_states, encoder_hidden_states], dim=1)
|
| 1888 |
+
encoder_attention_mask = torch.cat([extra_attention_mask, encoder_attention_mask], dim=1)
|
| 1889 |
+
del extra_encoder_hidden_states, extra_attention_mask # free memory
|
| 1890 |
+
|
| 1891 |
+
with torch.no_grad():
|
| 1892 |
+
if batch_size == 1 and not self.split_attn:
|
| 1893 |
+
# When batch size is 1, we do not need any masks or var-len funcs since cropping is mathematically same to what we want
|
| 1894 |
+
# If they are not same, then their impls are wrong. Ours are always the correct one.
|
| 1895 |
+
text_len = encoder_attention_mask.sum().item()
|
| 1896 |
+
encoder_hidden_states = encoder_hidden_states[:, :text_len]
|
| 1897 |
+
attention_mask = None, None, None, None, None
|
| 1898 |
+
else:
|
| 1899 |
+
# If batch size = 1 and split_attn is True, it will be same as above
|
| 1900 |
+
img_seq_len = hidden_states.shape[1]
|
| 1901 |
+
txt_seq_len = encoder_hidden_states.shape[1]
|
| 1902 |
+
|
| 1903 |
+
cu_seqlens_q, seq_len = get_cu_seqlens(encoder_attention_mask, img_seq_len)
|
| 1904 |
+
cu_seqlens_kv = cu_seqlens_q
|
| 1905 |
+
max_seqlen_q = img_seq_len + txt_seq_len
|
| 1906 |
+
max_seqlen_kv = max_seqlen_q
|
| 1907 |
+
|
| 1908 |
+
attention_mask = cu_seqlens_q, cu_seqlens_kv, max_seqlen_q, max_seqlen_kv, seq_len
|
| 1909 |
+
del cu_seqlens_q, cu_seqlens_kv, max_seqlen_q, max_seqlen_kv, seq_len # free memory
|
| 1910 |
+
del encoder_attention_mask # free memory
|
| 1911 |
+
|
| 1912 |
+
if self.enable_teacache:
|
| 1913 |
+
modulated_inp = self.transformer_blocks[0].norm1(hidden_states, emb=temb)[0]
|
| 1914 |
+
|
| 1915 |
+
if self.cnt == 0 or self.cnt == self.num_steps - 1:
|
| 1916 |
+
should_calc = True
|
| 1917 |
+
self.accumulated_rel_l1_distance = 0
|
| 1918 |
+
else:
|
| 1919 |
+
curr_rel_l1 = (
|
| 1920 |
+
((modulated_inp - self.previous_modulated_input).abs().mean() / self.previous_modulated_input.abs().mean())
|
| 1921 |
+
.cpu()
|
| 1922 |
+
.item()
|
| 1923 |
+
)
|
| 1924 |
+
self.accumulated_rel_l1_distance += self.teacache_rescale_func(curr_rel_l1)
|
| 1925 |
+
should_calc = self.accumulated_rel_l1_distance >= self.rel_l1_thresh
|
| 1926 |
+
|
| 1927 |
+
if should_calc:
|
| 1928 |
+
self.accumulated_rel_l1_distance = 0
|
| 1929 |
+
|
| 1930 |
+
self.previous_modulated_input = modulated_inp
|
| 1931 |
+
self.cnt += 1
|
| 1932 |
+
|
| 1933 |
+
if self.cnt == self.num_steps:
|
| 1934 |
+
self.cnt = 0
|
| 1935 |
+
|
| 1936 |
+
if not should_calc:
|
| 1937 |
+
hidden_states = hidden_states + self.previous_residual
|
| 1938 |
+
else:
|
| 1939 |
+
ori_hidden_states = hidden_states.clone()
|
| 1940 |
+
|
| 1941 |
+
for block_id, block in enumerate(self.transformer_blocks):
|
| 1942 |
+
hidden_states, encoder_hidden_states = self.gradient_checkpointing_method(
|
| 1943 |
+
block, hidden_states, encoder_hidden_states, temb, attention_mask, rope_freqs
|
| 1944 |
+
)
|
| 1945 |
+
|
| 1946 |
+
for block_id, block in enumerate(self.single_transformer_blocks):
|
| 1947 |
+
hidden_states, encoder_hidden_states = self.gradient_checkpointing_method(
|
| 1948 |
+
block, hidden_states, encoder_hidden_states, temb, attention_mask, rope_freqs
|
| 1949 |
+
)
|
| 1950 |
+
|
| 1951 |
+
self.previous_residual = hidden_states - ori_hidden_states
|
| 1952 |
+
del ori_hidden_states # free memory
|
| 1953 |
+
else:
|
| 1954 |
+
for block_id, block in enumerate(self.transformer_blocks):
|
| 1955 |
+
if self.blocks_to_swap:
|
| 1956 |
+
self.offloader_double.wait_for_block(block_id)
|
| 1957 |
+
|
| 1958 |
+
hidden_states, encoder_hidden_states = self.gradient_checkpointing_method(
|
| 1959 |
+
block, hidden_states, encoder_hidden_states, temb, attention_mask, rope_freqs
|
| 1960 |
+
)
|
| 1961 |
+
|
| 1962 |
+
if self.blocks_to_swap:
|
| 1963 |
+
self.offloader_double.submit_move_blocks_forward(self.transformer_blocks, block_id)
|
| 1964 |
+
|
| 1965 |
+
for block_id, block in enumerate(self.single_transformer_blocks):
|
| 1966 |
+
if self.blocks_to_swap:
|
| 1967 |
+
self.offloader_single.wait_for_block(block_id)
|
| 1968 |
+
|
| 1969 |
+
hidden_states, encoder_hidden_states = self.gradient_checkpointing_method(
|
| 1970 |
+
block, hidden_states, encoder_hidden_states, temb, attention_mask, rope_freqs
|
| 1971 |
+
)
|
| 1972 |
+
|
| 1973 |
+
if self.blocks_to_swap:
|
| 1974 |
+
self.offloader_single.submit_move_blocks_forward(self.single_transformer_blocks, block_id)
|
| 1975 |
+
|
| 1976 |
+
del attention_mask, rope_freqs # free memory
|
| 1977 |
+
del encoder_hidden_states # free memory
|
| 1978 |
+
|
| 1979 |
+
hidden_states = self.gradient_checkpointing_method(self.norm_out, hidden_states, temb)
|
| 1980 |
+
|
| 1981 |
+
hidden_states = hidden_states[:, -original_context_length:, :]
|
| 1982 |
+
|
| 1983 |
+
if self.high_quality_fp32_output_for_inference:
|
| 1984 |
+
hidden_states = hidden_states.to(dtype=torch.float32)
|
| 1985 |
+
if self.proj_out.weight.dtype != torch.float32:
|
| 1986 |
+
self.proj_out.to(dtype=torch.float32)
|
| 1987 |
+
|
| 1988 |
+
hidden_states = self.gradient_checkpointing_method(self.proj_out, hidden_states)
|
| 1989 |
+
|
| 1990 |
+
if hidden_states.device != input_device:
|
| 1991 |
+
hidden_states = hidden_states.to(input_device)
|
| 1992 |
+
|
| 1993 |
+
hidden_states = einops.rearrange(
|
| 1994 |
+
hidden_states,
|
| 1995 |
+
"b (t h w) (c pt ph pw) -> b c (t pt) (h ph) (w pw)",
|
| 1996 |
+
t=post_patch_num_frames,
|
| 1997 |
+
h=post_patch_height,
|
| 1998 |
+
w=post_patch_width,
|
| 1999 |
+
pt=p_t,
|
| 2000 |
+
ph=p,
|
| 2001 |
+
pw=p,
|
| 2002 |
+
)
|
| 2003 |
+
|
| 2004 |
+
if return_dict:
|
| 2005 |
+
# return Transformer2DModelOutput(sample=hidden_states)
|
| 2006 |
+
return SimpleNamespace(sample=hidden_states)
|
| 2007 |
+
|
| 2008 |
+
return (hidden_states,)
|
| 2009 |
+
|
| 2010 |
+
def fp8_optimization(
|
| 2011 |
+
self, state_dict: dict[str, torch.Tensor], device: torch.device, move_to_device: bool, use_scaled_mm: bool = False
|
| 2012 |
+
) -> dict[str, torch.Tensor]: # Return type hint added
|
| 2013 |
+
"""
|
| 2014 |
+
Optimize the model state_dict with fp8.
|
| 2015 |
+
|
| 2016 |
+
Args:
|
| 2017 |
+
state_dict (dict[str, torch.Tensor]):
|
| 2018 |
+
The state_dict of the model.
|
| 2019 |
+
device (torch.device):
|
| 2020 |
+
The device to calculate the weight.
|
| 2021 |
+
move_to_device (bool):
|
| 2022 |
+
Whether to move the weight to the device after optimization.
|
| 2023 |
+
use_scaled_mm (bool):
|
| 2024 |
+
Whether to use scaled matrix multiplication for FP8.
|
| 2025 |
+
"""
|
| 2026 |
+
|
| 2027 |
+
# inplace optimization
|
| 2028 |
+
state_dict = optimize_state_dict_with_fp8(
|
| 2029 |
+
state_dict, device, FP8_OPTIMIZATION_TARGET_KEYS, FP8_OPTIMIZATION_EXCLUDE_KEYS, move_to_device=move_to_device
|
| 2030 |
+
)
|
| 2031 |
+
|
| 2032 |
+
# apply monkey patching
|
| 2033 |
+
apply_fp8_monkey_patch(self, state_dict, use_scaled_mm=use_scaled_mm)
|
| 2034 |
+
|
| 2035 |
+
return state_dict
|
| 2036 |
+
|
| 2037 |
+
|
| 2038 |
+
def load_packed_model(
|
| 2039 |
+
device: Union[str, torch.device],
|
| 2040 |
+
dit_path: str,
|
| 2041 |
+
attn_mode: str,
|
| 2042 |
+
loading_device: Union[str, torch.device],
|
| 2043 |
+
fp8_scaled: bool = False,
|
| 2044 |
+
split_attn: bool = False,
|
| 2045 |
+
for_inference: bool = False,
|
| 2046 |
+
lora_weights_list: Optional[Dict[str, torch.Tensor]] = None,
|
| 2047 |
+
lora_multipliers: Optional[List[float]] = None,
|
| 2048 |
+
disable_numpy_memmap: bool = False,
|
| 2049 |
+
) -> HunyuanVideoTransformer3DModelPacked:
|
| 2050 |
+
"""
|
| 2051 |
+
Load a packed DiT model from a given path.
|
| 2052 |
+
If fp8_scaled is True, the model will be optimized to fp8 dynamically.
|
| 2053 |
+
|
| 2054 |
+
Args:
|
| 2055 |
+
device (Union[str, torch.device]): The device to calculate etc. (usually "cuda").
|
| 2056 |
+
dit_path (str): The path to the DiT model file or directory.
|
| 2057 |
+
attn_mode (str): The attention mode to use.
|
| 2058 |
+
loading_device (Union[str, torch.device]): The device to load the model weights to.
|
| 2059 |
+
fp8_scaled (bool): Whether to optimize the model weights to fp8.
|
| 2060 |
+
split_attn (bool): Whether to use split attention.
|
| 2061 |
+
for_inference (bool): Whether to create the model for inference.
|
| 2062 |
+
lora_weights_list (Optional[Dict[str, torch.Tensor]]): List of state_dicts for LoRA weights.
|
| 2063 |
+
lora_multipliers (Optional[List[float]]): List of multipliers for LoRA weights.
|
| 2064 |
+
disable_numpy_memmap (bool): Whether to disable numpy memory mapping when loading weights.
|
| 2065 |
+
|
| 2066 |
+
Returns:
|
| 2067 |
+
HunyuanVideoTransformer3DModelPacked: The loaded DiT model.
|
| 2068 |
+
"""
|
| 2069 |
+
|
| 2070 |
+
# TODO support split_attn
|
| 2071 |
+
device = torch.device(device)
|
| 2072 |
+
loading_device = torch.device(loading_device)
|
| 2073 |
+
|
| 2074 |
+
if os.path.isdir(dit_path):
|
| 2075 |
+
# we don't support from_pretrained for now, so loading safetensors directly
|
| 2076 |
+
safetensor_files = glob.glob(os.path.join(dit_path, "*.safetensors"))
|
| 2077 |
+
if len(safetensor_files) == 0:
|
| 2078 |
+
raise ValueError(f"Cannot find safetensors file in {dit_path}")
|
| 2079 |
+
# sort by name and take the first one
|
| 2080 |
+
safetensor_files.sort()
|
| 2081 |
+
dit_path = safetensor_files[0]
|
| 2082 |
+
|
| 2083 |
+
with init_empty_weights():
|
| 2084 |
+
logger.info(f"Creating HunyuanVideoTransformer3DModelPacked")
|
| 2085 |
+
|
| 2086 |
+
# import here to avoid circular import issues
|
| 2087 |
+
from musubi_tuner.frame_pack.hunyuan_video_packed_inference import HunyuanVideoTransformer3DModelPackedInference
|
| 2088 |
+
|
| 2089 |
+
model_class = HunyuanVideoTransformer3DModelPackedInference if for_inference else HunyuanVideoTransformer3DModelPacked
|
| 2090 |
+
model = model_class(
|
| 2091 |
+
attention_head_dim=128,
|
| 2092 |
+
guidance_embeds=True,
|
| 2093 |
+
has_clean_x_embedder=True,
|
| 2094 |
+
has_image_proj=True,
|
| 2095 |
+
image_proj_dim=1152,
|
| 2096 |
+
in_channels=16,
|
| 2097 |
+
mlp_ratio=4.0,
|
| 2098 |
+
num_attention_heads=24,
|
| 2099 |
+
num_layers=20,
|
| 2100 |
+
num_refiner_layers=2,
|
| 2101 |
+
num_single_layers=40,
|
| 2102 |
+
out_channels=16,
|
| 2103 |
+
patch_size=2,
|
| 2104 |
+
patch_size_t=1,
|
| 2105 |
+
pooled_projection_dim=768,
|
| 2106 |
+
qk_norm="rms_norm",
|
| 2107 |
+
rope_axes_dim=(16, 56, 56),
|
| 2108 |
+
rope_theta=256.0,
|
| 2109 |
+
text_embed_dim=4096,
|
| 2110 |
+
attn_mode=attn_mode,
|
| 2111 |
+
split_attn=split_attn,
|
| 2112 |
+
)
|
| 2113 |
+
|
| 2114 |
+
# load model weights with dynamic fp8 optimization and LoRA merging if needed
|
| 2115 |
+
logger.info(f"Loading DiT model from {dit_path}, device={loading_device}")
|
| 2116 |
+
|
| 2117 |
+
sd = load_safetensors_with_lora_and_fp8(
|
| 2118 |
+
model_files=dit_path,
|
| 2119 |
+
lora_weights_list=lora_weights_list,
|
| 2120 |
+
lora_multipliers=lora_multipliers,
|
| 2121 |
+
fp8_optimization=fp8_scaled,
|
| 2122 |
+
calc_device=device,
|
| 2123 |
+
move_to_device=(loading_device == device),
|
| 2124 |
+
target_keys=FP8_OPTIMIZATION_TARGET_KEYS,
|
| 2125 |
+
exclude_keys=FP8_OPTIMIZATION_EXCLUDE_KEYS,
|
| 2126 |
+
disable_numpy_memmap=disable_numpy_memmap,
|
| 2127 |
+
)
|
| 2128 |
+
|
| 2129 |
+
if fp8_scaled:
|
| 2130 |
+
apply_fp8_monkey_patch(model, sd, use_scaled_mm=False)
|
| 2131 |
+
|
| 2132 |
+
if loading_device.type != "cpu":
|
| 2133 |
+
# make sure all the model weights are on the loading_device
|
| 2134 |
+
logger.info(f"Moving weights to {loading_device}")
|
| 2135 |
+
for key in sd.keys():
|
| 2136 |
+
sd[key] = sd[key].to(loading_device)
|
| 2137 |
+
|
| 2138 |
+
info = model.load_state_dict(sd, strict=True, assign=True)
|
| 2139 |
+
logger.info(f"Loaded DiT model from {dit_path}, info={info}")
|
| 2140 |
+
|
| 2141 |
+
return model
|
src/musubi_tuner/frame_pack/hunyuan_video_packed_inference.py
ADDED
|
@@ -0,0 +1,340 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Inference model for Hunyuan Video Packed
|
| 2 |
+
# We do not want to break the training accidentally, so we use a separate file for inference model.
|
| 3 |
+
|
| 4 |
+
# MagCache: modified from https://github.com/Zehong-Ma/MagCache/blob/main/MagCache4HunyuanVideo/magcache_sample_video.py
|
| 5 |
+
|
| 6 |
+
from types import SimpleNamespace
|
| 7 |
+
from typing import Optional
|
| 8 |
+
import einops
|
| 9 |
+
import numpy as np
|
| 10 |
+
import torch
|
| 11 |
+
from torch.nn import functional as F
|
| 12 |
+
from musubi_tuner.frame_pack.hunyuan_video_packed import HunyuanVideoTransformer3DModelPacked, get_cu_seqlens
|
| 13 |
+
|
| 14 |
+
import logging
|
| 15 |
+
|
| 16 |
+
logger = logging.getLogger(__name__)
|
| 17 |
+
logging.basicConfig(level=logging.INFO)
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
class HunyuanVideoTransformer3DModelPackedInference(HunyuanVideoTransformer3DModelPacked):
|
| 21 |
+
def __init__(self, *args, **kwargs):
|
| 22 |
+
super().__init__(*args, **kwargs)
|
| 23 |
+
self.enable_magcache = False
|
| 24 |
+
|
| 25 |
+
def initialize_magcache(
|
| 26 |
+
self,
|
| 27 |
+
enable: bool = True,
|
| 28 |
+
retention_ratio: float = 0.2,
|
| 29 |
+
mag_ratios: Optional[list[float]] = None,
|
| 30 |
+
magcache_thresh: float = 0.24,
|
| 31 |
+
K: int = 6,
|
| 32 |
+
calibration: bool = False,
|
| 33 |
+
):
|
| 34 |
+
if mag_ratios is None:
|
| 35 |
+
# Copy from original MagCache
|
| 36 |
+
mag_ratios = np.array(
|
| 37 |
+
[1.0]
|
| 38 |
+
+ [
|
| 39 |
+
1.06971,
|
| 40 |
+
1.29073,
|
| 41 |
+
1.11245,
|
| 42 |
+
1.09596,
|
| 43 |
+
1.05233,
|
| 44 |
+
1.01415,
|
| 45 |
+
1.05672,
|
| 46 |
+
1.00848,
|
| 47 |
+
1.03632,
|
| 48 |
+
1.02974,
|
| 49 |
+
1.00984,
|
| 50 |
+
1.03028,
|
| 51 |
+
1.00681,
|
| 52 |
+
1.06614,
|
| 53 |
+
1.05022,
|
| 54 |
+
1.02592,
|
| 55 |
+
1.01776,
|
| 56 |
+
1.02985,
|
| 57 |
+
1.00726,
|
| 58 |
+
1.03727,
|
| 59 |
+
1.01502,
|
| 60 |
+
1.00992,
|
| 61 |
+
1.03371,
|
| 62 |
+
0.9976,
|
| 63 |
+
1.02742,
|
| 64 |
+
1.0093,
|
| 65 |
+
1.01869,
|
| 66 |
+
1.00815,
|
| 67 |
+
1.01461,
|
| 68 |
+
1.01152,
|
| 69 |
+
1.03082,
|
| 70 |
+
1.0061,
|
| 71 |
+
1.02162,
|
| 72 |
+
1.01999,
|
| 73 |
+
0.99063,
|
| 74 |
+
1.01186,
|
| 75 |
+
1.0217,
|
| 76 |
+
0.99947,
|
| 77 |
+
1.01711,
|
| 78 |
+
0.9904,
|
| 79 |
+
1.00258,
|
| 80 |
+
1.00878,
|
| 81 |
+
0.97039,
|
| 82 |
+
0.97686,
|
| 83 |
+
0.94315,
|
| 84 |
+
0.97728,
|
| 85 |
+
0.91154,
|
| 86 |
+
0.86139,
|
| 87 |
+
0.76592,
|
| 88 |
+
]
|
| 89 |
+
)
|
| 90 |
+
self.enable_magcache = enable
|
| 91 |
+
self.calibration = calibration
|
| 92 |
+
self.retention_ratio = retention_ratio
|
| 93 |
+
self.default_mag_ratios = mag_ratios
|
| 94 |
+
self.magcache_thresh = magcache_thresh
|
| 95 |
+
self.K = K
|
| 96 |
+
self.reset_magcache()
|
| 97 |
+
|
| 98 |
+
def reset_magcache(self, num_steps: int = 50):
|
| 99 |
+
if not self.enable_magcache:
|
| 100 |
+
return
|
| 101 |
+
|
| 102 |
+
def nearest_interp(src_array, target_length):
|
| 103 |
+
src_length = len(src_array)
|
| 104 |
+
if target_length == 1:
|
| 105 |
+
return np.array([src_array[-1]])
|
| 106 |
+
|
| 107 |
+
scale = (src_length - 1) / (target_length - 1)
|
| 108 |
+
mapped_indices = np.round(np.arange(target_length) * scale).astype(int)
|
| 109 |
+
return np.array(src_array)[mapped_indices]
|
| 110 |
+
|
| 111 |
+
if not self.calibration and num_steps != len(self.default_mag_ratios):
|
| 112 |
+
logger.info(f"Interpolating mag_ratios from {len(self.default_mag_ratios)} to {num_steps} steps.")
|
| 113 |
+
self.mag_ratios = nearest_interp(self.default_mag_ratios, num_steps)
|
| 114 |
+
else:
|
| 115 |
+
self.mag_ratios = self.default_mag_ratios
|
| 116 |
+
|
| 117 |
+
self.cnt = 0
|
| 118 |
+
self.num_steps = num_steps
|
| 119 |
+
self.residual_cache = None
|
| 120 |
+
self.accumulated_ratio = 1.0
|
| 121 |
+
self.accumulated_steps = 0
|
| 122 |
+
self.accumulated_err = 0
|
| 123 |
+
self.norm_ratio = []
|
| 124 |
+
self.norm_std = []
|
| 125 |
+
self.cos_dis = []
|
| 126 |
+
|
| 127 |
+
def get_calibration_data(self) -> tuple[list[float], list[float], list[float]]:
|
| 128 |
+
if not self.enable_magcache or not self.calibration:
|
| 129 |
+
raise ValueError("MagCache is not enabled or calibration is not set.")
|
| 130 |
+
return self.norm_ratio, self.norm_std, self.cos_dis
|
| 131 |
+
|
| 132 |
+
def forward(self, *args, **kwargs):
|
| 133 |
+
# Forward pass for inference
|
| 134 |
+
if self.enable_magcache:
|
| 135 |
+
return self.magcache_forward(*args, **kwargs, calibration=self.calibration)
|
| 136 |
+
else:
|
| 137 |
+
return super().forward(*args, **kwargs)
|
| 138 |
+
|
| 139 |
+
def magcache_forward(
|
| 140 |
+
self,
|
| 141 |
+
hidden_states,
|
| 142 |
+
timestep,
|
| 143 |
+
encoder_hidden_states,
|
| 144 |
+
encoder_attention_mask,
|
| 145 |
+
pooled_projections,
|
| 146 |
+
guidance,
|
| 147 |
+
latent_indices=None,
|
| 148 |
+
clean_latents=None,
|
| 149 |
+
clean_latent_indices=None,
|
| 150 |
+
clean_latents_2x=None,
|
| 151 |
+
clean_latent_2x_indices=None,
|
| 152 |
+
clean_latents_4x=None,
|
| 153 |
+
clean_latent_4x_indices=None,
|
| 154 |
+
image_embeddings=None,
|
| 155 |
+
attention_kwargs=None,
|
| 156 |
+
return_dict=True,
|
| 157 |
+
calibration=False,
|
| 158 |
+
):
|
| 159 |
+
if attention_kwargs is None:
|
| 160 |
+
attention_kwargs = {}
|
| 161 |
+
|
| 162 |
+
# RoPE scaling: must be done before processing hidden states
|
| 163 |
+
if self.rope_scaling_timestep_threshold is not None:
|
| 164 |
+
if timestep >= self.rope_scaling_timestep_threshold:
|
| 165 |
+
self.rope.h_w_scaling_factor = self.rope_scaling_factor
|
| 166 |
+
else:
|
| 167 |
+
self.rope.h_w_scaling_factor = 1.0
|
| 168 |
+
|
| 169 |
+
batch_size, num_channels, num_frames, height, width = hidden_states.shape
|
| 170 |
+
p, p_t = self.config_patch_size, self.config_patch_size_t
|
| 171 |
+
post_patch_num_frames = num_frames // p_t
|
| 172 |
+
post_patch_height = height // p
|
| 173 |
+
post_patch_width = width // p
|
| 174 |
+
original_context_length = post_patch_num_frames * post_patch_height * post_patch_width
|
| 175 |
+
|
| 176 |
+
hidden_states, rope_freqs = self.process_input_hidden_states(
|
| 177 |
+
hidden_states,
|
| 178 |
+
latent_indices,
|
| 179 |
+
clean_latents,
|
| 180 |
+
clean_latent_indices,
|
| 181 |
+
clean_latents_2x,
|
| 182 |
+
clean_latent_2x_indices,
|
| 183 |
+
clean_latents_4x,
|
| 184 |
+
clean_latent_4x_indices,
|
| 185 |
+
)
|
| 186 |
+
del (
|
| 187 |
+
latent_indices,
|
| 188 |
+
clean_latents,
|
| 189 |
+
clean_latent_indices,
|
| 190 |
+
clean_latents_2x,
|
| 191 |
+
clean_latent_2x_indices,
|
| 192 |
+
clean_latents_4x,
|
| 193 |
+
clean_latent_4x_indices,
|
| 194 |
+
) # free memory
|
| 195 |
+
|
| 196 |
+
temb = self.gradient_checkpointing_method(self.time_text_embed, timestep, guidance, pooled_projections)
|
| 197 |
+
encoder_hidden_states = self.gradient_checkpointing_method(
|
| 198 |
+
self.context_embedder, encoder_hidden_states, timestep, encoder_attention_mask
|
| 199 |
+
)
|
| 200 |
+
|
| 201 |
+
if self.image_projection is not None:
|
| 202 |
+
assert image_embeddings is not None, "You must use image embeddings!"
|
| 203 |
+
extra_encoder_hidden_states = self.gradient_checkpointing_method(self.image_projection, image_embeddings)
|
| 204 |
+
extra_attention_mask = torch.ones(
|
| 205 |
+
(batch_size, extra_encoder_hidden_states.shape[1]),
|
| 206 |
+
dtype=encoder_attention_mask.dtype,
|
| 207 |
+
device=encoder_attention_mask.device,
|
| 208 |
+
)
|
| 209 |
+
|
| 210 |
+
# must cat before (not after) encoder_hidden_states, due to attn masking
|
| 211 |
+
encoder_hidden_states = torch.cat([extra_encoder_hidden_states, encoder_hidden_states], dim=1)
|
| 212 |
+
encoder_attention_mask = torch.cat([extra_attention_mask, encoder_attention_mask], dim=1)
|
| 213 |
+
del extra_encoder_hidden_states, extra_attention_mask # free memory
|
| 214 |
+
|
| 215 |
+
with torch.no_grad():
|
| 216 |
+
if batch_size == 1:
|
| 217 |
+
# When batch size is 1, we do not need any masks or var-len funcs since cropping is mathematically same to what we want
|
| 218 |
+
# If they are not same, then their impls are wrong. Ours are always the correct one.
|
| 219 |
+
text_len = encoder_attention_mask.sum().item()
|
| 220 |
+
encoder_hidden_states = encoder_hidden_states[:, :text_len]
|
| 221 |
+
attention_mask = None, None, None, None, None
|
| 222 |
+
else:
|
| 223 |
+
img_seq_len = hidden_states.shape[1]
|
| 224 |
+
txt_seq_len = encoder_hidden_states.shape[1]
|
| 225 |
+
|
| 226 |
+
cu_seqlens_q, seq_len = get_cu_seqlens(encoder_attention_mask, img_seq_len)
|
| 227 |
+
cu_seqlens_kv = cu_seqlens_q
|
| 228 |
+
max_seqlen_q = img_seq_len + txt_seq_len
|
| 229 |
+
max_seqlen_kv = max_seqlen_q
|
| 230 |
+
|
| 231 |
+
attention_mask = cu_seqlens_q, cu_seqlens_kv, max_seqlen_q, max_seqlen_kv, seq_len
|
| 232 |
+
del cu_seqlens_q, cu_seqlens_kv, max_seqlen_q, max_seqlen_kv, seq_len # free memory
|
| 233 |
+
del encoder_attention_mask # free memory
|
| 234 |
+
|
| 235 |
+
if self.enable_teacache:
|
| 236 |
+
raise NotImplementedError("TEACache is not implemented for inference model.")
|
| 237 |
+
|
| 238 |
+
skip_forward = False
|
| 239 |
+
if (
|
| 240 |
+
self.enable_magcache
|
| 241 |
+
and not calibration
|
| 242 |
+
and self.cnt >= max(int(self.retention_ratio * self.num_steps), 1)
|
| 243 |
+
and self.cnt < self.num_steps - 1
|
| 244 |
+
):
|
| 245 |
+
cur_mag_ratio = self.mag_ratios[self.cnt]
|
| 246 |
+
self.accumulated_ratio = self.accumulated_ratio * cur_mag_ratio
|
| 247 |
+
cur_skip_err = np.abs(1 - self.accumulated_ratio)
|
| 248 |
+
self.accumulated_err += cur_skip_err
|
| 249 |
+
self.accumulated_steps += 1
|
| 250 |
+
if self.accumulated_err <= self.magcache_thresh and self.accumulated_steps <= self.K:
|
| 251 |
+
skip_forward = True
|
| 252 |
+
else:
|
| 253 |
+
self.accumulated_ratio = 1.0
|
| 254 |
+
self.accumulated_steps = 0
|
| 255 |
+
self.accumulated_err = 0
|
| 256 |
+
|
| 257 |
+
if skip_forward:
|
| 258 |
+
# uncomment the following line to debug
|
| 259 |
+
# print(
|
| 260 |
+
# f"Skipping forward pass at step {self.cnt}, accumulated ratio: {self.accumulated_ratio:.4f}, "
|
| 261 |
+
# f"accumulated error: {self.accumulated_err:.4f}, accumulated steps: {self.accumulated_steps}"
|
| 262 |
+
# )
|
| 263 |
+
hidden_states = hidden_states + self.residual_cache
|
| 264 |
+
else:
|
| 265 |
+
ori_hidden_states = hidden_states
|
| 266 |
+
|
| 267 |
+
for block_id, block in enumerate(self.transformer_blocks):
|
| 268 |
+
if self.blocks_to_swap:
|
| 269 |
+
self.offloader_double.wait_for_block(block_id)
|
| 270 |
+
|
| 271 |
+
hidden_states, encoder_hidden_states = self.gradient_checkpointing_method(
|
| 272 |
+
block, hidden_states, encoder_hidden_states, temb, attention_mask, rope_freqs
|
| 273 |
+
)
|
| 274 |
+
|
| 275 |
+
if self.blocks_to_swap:
|
| 276 |
+
self.offloader_double.submit_move_blocks_forward(self.transformer_blocks, block_id)
|
| 277 |
+
|
| 278 |
+
for block_id, block in enumerate(self.single_transformer_blocks):
|
| 279 |
+
if self.blocks_to_swap:
|
| 280 |
+
self.offloader_single.wait_for_block(block_id)
|
| 281 |
+
|
| 282 |
+
hidden_states, encoder_hidden_states = self.gradient_checkpointing_method(
|
| 283 |
+
block, hidden_states, encoder_hidden_states, temb, attention_mask, rope_freqs
|
| 284 |
+
)
|
| 285 |
+
|
| 286 |
+
if self.blocks_to_swap:
|
| 287 |
+
self.offloader_single.submit_move_blocks_forward(self.single_transformer_blocks, block_id)
|
| 288 |
+
|
| 289 |
+
if self.enable_magcache:
|
| 290 |
+
cur_residual = hidden_states - ori_hidden_states
|
| 291 |
+
if calibration and self.cnt >= 1:
|
| 292 |
+
norm_ratio = ((cur_residual.norm(dim=-1) / self.residual_cache.norm(dim=-1)).mean()).item()
|
| 293 |
+
norm_std = (cur_residual.norm(dim=-1) / self.residual_cache.norm(dim=-1)).std().item()
|
| 294 |
+
cos_dis = (1 - F.cosine_similarity(cur_residual, self.residual_cache, dim=-1, eps=1e-8)).mean().item()
|
| 295 |
+
self.norm_ratio.append(round(norm_ratio, 5))
|
| 296 |
+
self.norm_std.append(round(norm_std, 5))
|
| 297 |
+
self.cos_dis.append(round(cos_dis, 5))
|
| 298 |
+
logger.info(f"time: {self.cnt}, norm_ratio: {norm_ratio}, norm_std: {norm_std}, cos_dis: {cos_dis}")
|
| 299 |
+
self.residual_cache = cur_residual
|
| 300 |
+
|
| 301 |
+
del ori_hidden_states # free memory
|
| 302 |
+
|
| 303 |
+
del attention_mask, rope_freqs # free memory
|
| 304 |
+
del encoder_hidden_states # free memory
|
| 305 |
+
|
| 306 |
+
hidden_states = self.gradient_checkpointing_method(self.norm_out, hidden_states, temb)
|
| 307 |
+
|
| 308 |
+
hidden_states = hidden_states[:, -original_context_length:, :]
|
| 309 |
+
|
| 310 |
+
if self.high_quality_fp32_output_for_inference:
|
| 311 |
+
hidden_states = hidden_states.to(dtype=torch.float32)
|
| 312 |
+
if self.proj_out.weight.dtype != torch.float32:
|
| 313 |
+
self.proj_out.to(dtype=torch.float32)
|
| 314 |
+
|
| 315 |
+
hidden_states = self.gradient_checkpointing_method(self.proj_out, hidden_states)
|
| 316 |
+
|
| 317 |
+
hidden_states = einops.rearrange(
|
| 318 |
+
hidden_states,
|
| 319 |
+
"b (t h w) (c pt ph pw) -> b c (t pt) (h ph) (w pw)",
|
| 320 |
+
t=post_patch_num_frames,
|
| 321 |
+
h=post_patch_height,
|
| 322 |
+
w=post_patch_width,
|
| 323 |
+
pt=p_t,
|
| 324 |
+
ph=p,
|
| 325 |
+
pw=p,
|
| 326 |
+
)
|
| 327 |
+
|
| 328 |
+
if self.enable_magcache:
|
| 329 |
+
self.cnt += 1
|
| 330 |
+
if self.cnt >= self.num_steps:
|
| 331 |
+
self.cnt = 0
|
| 332 |
+
self.accumulated_ratio = 1.0
|
| 333 |
+
self.accumulated_steps = 0
|
| 334 |
+
self.accumulated_err = 0
|
| 335 |
+
|
| 336 |
+
if return_dict:
|
| 337 |
+
# return Transformer2DModelOutput(sample=hidden_states)
|
| 338 |
+
return SimpleNamespace(sample=hidden_states)
|
| 339 |
+
|
| 340 |
+
return (hidden_states,)
|
src/musubi_tuner/frame_pack/k_diffusion_hunyuan.py
ADDED
|
@@ -0,0 +1,128 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# original code: https://github.com/lllyasviel/FramePack
|
| 2 |
+
# original license: Apache-2.0
|
| 3 |
+
|
| 4 |
+
import torch
|
| 5 |
+
import math
|
| 6 |
+
|
| 7 |
+
# from diffusers_helper.k_diffusion.uni_pc_fm import sample_unipc
|
| 8 |
+
# from diffusers_helper.k_diffusion.wrapper import fm_wrapper
|
| 9 |
+
# from diffusers_helper.utils import repeat_to_batch_size
|
| 10 |
+
from musubi_tuner.frame_pack.uni_pc_fm import sample_unipc
|
| 11 |
+
from musubi_tuner.frame_pack.wrapper import fm_wrapper
|
| 12 |
+
from musubi_tuner.frame_pack.utils import repeat_to_batch_size
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
def flux_time_shift(t, mu=1.15, sigma=1.0):
|
| 16 |
+
return math.exp(mu) / (math.exp(mu) + (1 / t - 1) ** sigma)
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
def calculate_flux_mu(context_length, x1=256, y1=0.5, x2=4096, y2=1.15, exp_max=7.0):
|
| 20 |
+
k = (y2 - y1) / (x2 - x1)
|
| 21 |
+
b = y1 - k * x1
|
| 22 |
+
mu = k * context_length + b
|
| 23 |
+
mu = min(mu, math.log(exp_max))
|
| 24 |
+
return mu
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
def get_flux_sigmas_from_mu(n, mu):
|
| 28 |
+
sigmas = torch.linspace(1, 0, steps=n + 1)
|
| 29 |
+
sigmas = flux_time_shift(sigmas, mu=mu)
|
| 30 |
+
return sigmas
|
| 31 |
+
|
| 32 |
+
|
| 33 |
+
# @torch.inference_mode()
|
| 34 |
+
def sample_hunyuan(
|
| 35 |
+
transformer,
|
| 36 |
+
sampler="unipc",
|
| 37 |
+
initial_latent=None,
|
| 38 |
+
concat_latent=None,
|
| 39 |
+
strength=1.0,
|
| 40 |
+
width=512,
|
| 41 |
+
height=512,
|
| 42 |
+
frames=16,
|
| 43 |
+
real_guidance_scale=1.0,
|
| 44 |
+
distilled_guidance_scale=6.0,
|
| 45 |
+
guidance_rescale=0.0,
|
| 46 |
+
shift=None,
|
| 47 |
+
num_inference_steps=25,
|
| 48 |
+
batch_size=None,
|
| 49 |
+
generator=None,
|
| 50 |
+
prompt_embeds=None,
|
| 51 |
+
prompt_embeds_mask=None,
|
| 52 |
+
prompt_poolers=None,
|
| 53 |
+
negative_prompt_embeds=None,
|
| 54 |
+
negative_prompt_embeds_mask=None,
|
| 55 |
+
negative_prompt_poolers=None,
|
| 56 |
+
dtype=torch.bfloat16,
|
| 57 |
+
device=None,
|
| 58 |
+
negative_kwargs=None,
|
| 59 |
+
callback=None,
|
| 60 |
+
**kwargs,
|
| 61 |
+
):
|
| 62 |
+
device = device or transformer.device
|
| 63 |
+
|
| 64 |
+
if batch_size is None:
|
| 65 |
+
batch_size = int(prompt_embeds.shape[0])
|
| 66 |
+
|
| 67 |
+
latents = torch.randn(
|
| 68 |
+
(batch_size, 16, (frames + 3) // 4, height // 8, width // 8), generator=generator, device=generator.device
|
| 69 |
+
).to(device=device, dtype=torch.float32)
|
| 70 |
+
|
| 71 |
+
B, C, T, H, W = latents.shape
|
| 72 |
+
seq_length = T * H * W // 4 # 9*80*80//4 = 14400
|
| 73 |
+
|
| 74 |
+
if shift is None:
|
| 75 |
+
mu = calculate_flux_mu(seq_length, exp_max=7.0) # 1.9459... if seq_len is large, mu is clipped.
|
| 76 |
+
else:
|
| 77 |
+
mu = math.log(shift)
|
| 78 |
+
|
| 79 |
+
sigmas = get_flux_sigmas_from_mu(num_inference_steps, mu).to(device)
|
| 80 |
+
|
| 81 |
+
k_model = fm_wrapper(transformer)
|
| 82 |
+
|
| 83 |
+
if initial_latent is not None:
|
| 84 |
+
sigmas = sigmas * strength
|
| 85 |
+
first_sigma = sigmas[0].to(device=device, dtype=torch.float32)
|
| 86 |
+
initial_latent = initial_latent.to(device=device, dtype=torch.float32)
|
| 87 |
+
latents = initial_latent.float() * (1.0 - first_sigma) + latents.float() * first_sigma
|
| 88 |
+
|
| 89 |
+
if concat_latent is not None:
|
| 90 |
+
concat_latent = concat_latent.to(latents)
|
| 91 |
+
|
| 92 |
+
distilled_guidance = torch.tensor([distilled_guidance_scale * 1000.0] * batch_size).to(device=device, dtype=dtype)
|
| 93 |
+
|
| 94 |
+
prompt_embeds = repeat_to_batch_size(prompt_embeds, batch_size)
|
| 95 |
+
prompt_embeds_mask = repeat_to_batch_size(prompt_embeds_mask, batch_size)
|
| 96 |
+
prompt_poolers = repeat_to_batch_size(prompt_poolers, batch_size)
|
| 97 |
+
negative_prompt_embeds = repeat_to_batch_size(negative_prompt_embeds, batch_size)
|
| 98 |
+
negative_prompt_embeds_mask = repeat_to_batch_size(negative_prompt_embeds_mask, batch_size)
|
| 99 |
+
negative_prompt_poolers = repeat_to_batch_size(negative_prompt_poolers, batch_size)
|
| 100 |
+
concat_latent = repeat_to_batch_size(concat_latent, batch_size)
|
| 101 |
+
|
| 102 |
+
sampler_kwargs = dict(
|
| 103 |
+
dtype=dtype,
|
| 104 |
+
cfg_scale=real_guidance_scale,
|
| 105 |
+
cfg_rescale=guidance_rescale,
|
| 106 |
+
concat_latent=concat_latent,
|
| 107 |
+
positive=dict(
|
| 108 |
+
pooled_projections=prompt_poolers,
|
| 109 |
+
encoder_hidden_states=prompt_embeds,
|
| 110 |
+
encoder_attention_mask=prompt_embeds_mask,
|
| 111 |
+
guidance=distilled_guidance,
|
| 112 |
+
**kwargs,
|
| 113 |
+
),
|
| 114 |
+
negative=dict(
|
| 115 |
+
pooled_projections=negative_prompt_poolers,
|
| 116 |
+
encoder_hidden_states=negative_prompt_embeds,
|
| 117 |
+
encoder_attention_mask=negative_prompt_embeds_mask,
|
| 118 |
+
guidance=distilled_guidance,
|
| 119 |
+
**(kwargs if negative_kwargs is None else {**kwargs, **negative_kwargs}),
|
| 120 |
+
),
|
| 121 |
+
)
|
| 122 |
+
|
| 123 |
+
if sampler == "unipc":
|
| 124 |
+
results = sample_unipc(k_model, latents, sigmas, extra_args=sampler_kwargs, disable=False, callback=callback)
|
| 125 |
+
else:
|
| 126 |
+
raise NotImplementedError(f"Sampler {sampler} is not supported.")
|
| 127 |
+
|
| 128 |
+
return results
|
src/musubi_tuner/frame_pack/uni_pc_fm.py
ADDED
|
@@ -0,0 +1,144 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Better Flow Matching UniPC by Lvmin Zhang
|
| 2 |
+
# (c) 2025
|
| 3 |
+
# CC BY-SA 4.0
|
| 4 |
+
# Attribution-ShareAlike 4.0 International Licence
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
import torch
|
| 8 |
+
|
| 9 |
+
from tqdm.auto import trange
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
def expand_dims(v, dims):
|
| 13 |
+
return v[(...,) + (None,) * (dims - 1)]
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
class FlowMatchUniPC:
|
| 17 |
+
def __init__(self, model, extra_args, variant="bh1"):
|
| 18 |
+
self.model = model
|
| 19 |
+
self.variant = variant
|
| 20 |
+
self.extra_args = extra_args
|
| 21 |
+
|
| 22 |
+
def model_fn(self, x, t):
|
| 23 |
+
return self.model(x, t, **self.extra_args)
|
| 24 |
+
|
| 25 |
+
def update_fn(self, x, model_prev_list, t_prev_list, t, order):
|
| 26 |
+
assert order <= len(model_prev_list)
|
| 27 |
+
dims = x.dim()
|
| 28 |
+
|
| 29 |
+
t_prev_0 = t_prev_list[-1]
|
| 30 |
+
lambda_prev_0 = -torch.log(t_prev_0)
|
| 31 |
+
lambda_t = -torch.log(t)
|
| 32 |
+
model_prev_0 = model_prev_list[-1]
|
| 33 |
+
|
| 34 |
+
h = lambda_t - lambda_prev_0
|
| 35 |
+
|
| 36 |
+
rks = []
|
| 37 |
+
D1s = []
|
| 38 |
+
for i in range(1, order):
|
| 39 |
+
t_prev_i = t_prev_list[-(i + 1)]
|
| 40 |
+
model_prev_i = model_prev_list[-(i + 1)]
|
| 41 |
+
lambda_prev_i = -torch.log(t_prev_i)
|
| 42 |
+
rk = ((lambda_prev_i - lambda_prev_0) / h)[0]
|
| 43 |
+
rks.append(rk)
|
| 44 |
+
D1s.append((model_prev_i - model_prev_0) / rk)
|
| 45 |
+
|
| 46 |
+
rks.append(1.0)
|
| 47 |
+
rks = torch.tensor(rks, device=x.device)
|
| 48 |
+
|
| 49 |
+
R = []
|
| 50 |
+
b = []
|
| 51 |
+
|
| 52 |
+
hh = -h[0]
|
| 53 |
+
h_phi_1 = torch.expm1(hh)
|
| 54 |
+
h_phi_k = h_phi_1 / hh - 1
|
| 55 |
+
|
| 56 |
+
factorial_i = 1
|
| 57 |
+
|
| 58 |
+
if self.variant == "bh1":
|
| 59 |
+
B_h = hh
|
| 60 |
+
elif self.variant == "bh2":
|
| 61 |
+
B_h = torch.expm1(hh)
|
| 62 |
+
else:
|
| 63 |
+
raise NotImplementedError("Bad variant!")
|
| 64 |
+
|
| 65 |
+
for i in range(1, order + 1):
|
| 66 |
+
R.append(torch.pow(rks, i - 1))
|
| 67 |
+
b.append(h_phi_k * factorial_i / B_h)
|
| 68 |
+
factorial_i *= i + 1
|
| 69 |
+
h_phi_k = h_phi_k / hh - 1 / factorial_i
|
| 70 |
+
|
| 71 |
+
R = torch.stack(R)
|
| 72 |
+
b = torch.tensor(b, device=x.device)
|
| 73 |
+
|
| 74 |
+
use_predictor = len(D1s) > 0
|
| 75 |
+
|
| 76 |
+
if use_predictor:
|
| 77 |
+
D1s = torch.stack(D1s, dim=1)
|
| 78 |
+
if order == 2:
|
| 79 |
+
rhos_p = torch.tensor([0.5], device=b.device)
|
| 80 |
+
else:
|
| 81 |
+
rhos_p = torch.linalg.solve(R[:-1, :-1], b[:-1])
|
| 82 |
+
else:
|
| 83 |
+
D1s = None
|
| 84 |
+
rhos_p = None
|
| 85 |
+
|
| 86 |
+
if order == 1:
|
| 87 |
+
rhos_c = torch.tensor([0.5], device=b.device)
|
| 88 |
+
else:
|
| 89 |
+
rhos_c = torch.linalg.solve(R, b)
|
| 90 |
+
|
| 91 |
+
x_t_ = expand_dims(t / t_prev_0, dims) * x - expand_dims(h_phi_1, dims) * model_prev_0
|
| 92 |
+
|
| 93 |
+
if use_predictor:
|
| 94 |
+
pred_res = torch.tensordot(D1s, rhos_p, dims=([1], [0]))
|
| 95 |
+
else:
|
| 96 |
+
pred_res = 0
|
| 97 |
+
|
| 98 |
+
x_t = x_t_ - expand_dims(B_h, dims) * pred_res
|
| 99 |
+
model_t = self.model_fn(x_t, t)
|
| 100 |
+
|
| 101 |
+
if D1s is not None:
|
| 102 |
+
corr_res = torch.tensordot(D1s, rhos_c[:-1], dims=([1], [0]))
|
| 103 |
+
else:
|
| 104 |
+
corr_res = 0
|
| 105 |
+
|
| 106 |
+
D1_t = model_t - model_prev_0
|
| 107 |
+
x_t = x_t_ - expand_dims(B_h, dims) * (corr_res + rhos_c[-1] * D1_t)
|
| 108 |
+
|
| 109 |
+
return x_t, model_t
|
| 110 |
+
|
| 111 |
+
def sample(self, x, sigmas, callback=None, disable_pbar=False):
|
| 112 |
+
order = min(3, len(sigmas) - 2)
|
| 113 |
+
model_prev_list, t_prev_list = [], []
|
| 114 |
+
for i in trange(len(sigmas) - 1, disable=disable_pbar):
|
| 115 |
+
vec_t = sigmas[i].expand(x.shape[0])
|
| 116 |
+
|
| 117 |
+
with torch.no_grad():
|
| 118 |
+
if i == 0:
|
| 119 |
+
model_prev_list = [self.model_fn(x, vec_t)]
|
| 120 |
+
t_prev_list = [vec_t]
|
| 121 |
+
elif i < order:
|
| 122 |
+
init_order = i
|
| 123 |
+
x, model_x = self.update_fn(x, model_prev_list, t_prev_list, vec_t, init_order)
|
| 124 |
+
model_prev_list.append(model_x)
|
| 125 |
+
t_prev_list.append(vec_t)
|
| 126 |
+
else:
|
| 127 |
+
x, model_x = self.update_fn(x, model_prev_list, t_prev_list, vec_t, order)
|
| 128 |
+
model_prev_list.append(model_x)
|
| 129 |
+
t_prev_list.append(vec_t)
|
| 130 |
+
|
| 131 |
+
model_prev_list = model_prev_list[-order:]
|
| 132 |
+
t_prev_list = t_prev_list[-order:]
|
| 133 |
+
|
| 134 |
+
if callback is not None:
|
| 135 |
+
callback({"x": x, "i": i, "denoised": model_prev_list[-1]})
|
| 136 |
+
|
| 137 |
+
return model_prev_list[-1]
|
| 138 |
+
|
| 139 |
+
|
| 140 |
+
def sample_unipc(model, noise, sigmas, extra_args=None, callback=None, disable=False, variant="bh1"):
|
| 141 |
+
assert variant in ["bh1", "bh2"]
|
| 142 |
+
return FlowMatchUniPC(model, extra_args=extra_args, variant=variant).sample(
|
| 143 |
+
noise, sigmas=sigmas, callback=callback, disable_pbar=disable
|
| 144 |
+
)
|
src/musubi_tuner/frame_pack/utils.py
ADDED
|
@@ -0,0 +1,616 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import cv2
|
| 3 |
+
import json
|
| 4 |
+
import random
|
| 5 |
+
import glob
|
| 6 |
+
import torch
|
| 7 |
+
import einops
|
| 8 |
+
import numpy as np
|
| 9 |
+
import datetime
|
| 10 |
+
import torchvision
|
| 11 |
+
|
| 12 |
+
from PIL import Image
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
def min_resize(x, m):
|
| 16 |
+
if x.shape[0] < x.shape[1]:
|
| 17 |
+
s0 = m
|
| 18 |
+
s1 = int(float(m) / float(x.shape[0]) * float(x.shape[1]))
|
| 19 |
+
else:
|
| 20 |
+
s0 = int(float(m) / float(x.shape[1]) * float(x.shape[0]))
|
| 21 |
+
s1 = m
|
| 22 |
+
new_max = max(s1, s0)
|
| 23 |
+
raw_max = max(x.shape[0], x.shape[1])
|
| 24 |
+
if new_max < raw_max:
|
| 25 |
+
interpolation = cv2.INTER_AREA
|
| 26 |
+
else:
|
| 27 |
+
interpolation = cv2.INTER_LANCZOS4
|
| 28 |
+
y = cv2.resize(x, (s1, s0), interpolation=interpolation)
|
| 29 |
+
return y
|
| 30 |
+
|
| 31 |
+
|
| 32 |
+
def d_resize(x, y):
|
| 33 |
+
H, W, C = y.shape
|
| 34 |
+
new_min = min(H, W)
|
| 35 |
+
raw_min = min(x.shape[0], x.shape[1])
|
| 36 |
+
if new_min < raw_min:
|
| 37 |
+
interpolation = cv2.INTER_AREA
|
| 38 |
+
else:
|
| 39 |
+
interpolation = cv2.INTER_LANCZOS4
|
| 40 |
+
y = cv2.resize(x, (W, H), interpolation=interpolation)
|
| 41 |
+
return y
|
| 42 |
+
|
| 43 |
+
|
| 44 |
+
def resize_and_center_crop(image, target_width, target_height):
|
| 45 |
+
if target_height == image.shape[0] and target_width == image.shape[1]:
|
| 46 |
+
return image
|
| 47 |
+
|
| 48 |
+
pil_image = Image.fromarray(image)
|
| 49 |
+
original_width, original_height = pil_image.size
|
| 50 |
+
scale_factor = max(target_width / original_width, target_height / original_height)
|
| 51 |
+
resized_width = int(round(original_width * scale_factor))
|
| 52 |
+
resized_height = int(round(original_height * scale_factor))
|
| 53 |
+
resized_image = pil_image.resize((resized_width, resized_height), Image.LANCZOS)
|
| 54 |
+
left = (resized_width - target_width) / 2
|
| 55 |
+
top = (resized_height - target_height) / 2
|
| 56 |
+
right = (resized_width + target_width) / 2
|
| 57 |
+
bottom = (resized_height + target_height) / 2
|
| 58 |
+
cropped_image = resized_image.crop((left, top, right, bottom))
|
| 59 |
+
return np.array(cropped_image)
|
| 60 |
+
|
| 61 |
+
|
| 62 |
+
def resize_and_center_crop_pytorch(image, target_width, target_height):
|
| 63 |
+
B, C, H, W = image.shape
|
| 64 |
+
|
| 65 |
+
if H == target_height and W == target_width:
|
| 66 |
+
return image
|
| 67 |
+
|
| 68 |
+
scale_factor = max(target_width / W, target_height / H)
|
| 69 |
+
resized_width = int(round(W * scale_factor))
|
| 70 |
+
resized_height = int(round(H * scale_factor))
|
| 71 |
+
|
| 72 |
+
resized = torch.nn.functional.interpolate(image, size=(resized_height, resized_width), mode="bilinear", align_corners=False)
|
| 73 |
+
|
| 74 |
+
top = (resized_height - target_height) // 2
|
| 75 |
+
left = (resized_width - target_width) // 2
|
| 76 |
+
cropped = resized[:, :, top : top + target_height, left : left + target_width]
|
| 77 |
+
|
| 78 |
+
return cropped
|
| 79 |
+
|
| 80 |
+
|
| 81 |
+
def resize_without_crop(image, target_width, target_height):
|
| 82 |
+
if target_height == image.shape[0] and target_width == image.shape[1]:
|
| 83 |
+
return image
|
| 84 |
+
|
| 85 |
+
pil_image = Image.fromarray(image)
|
| 86 |
+
resized_image = pil_image.resize((target_width, target_height), Image.LANCZOS)
|
| 87 |
+
return np.array(resized_image)
|
| 88 |
+
|
| 89 |
+
|
| 90 |
+
def just_crop(image, w, h):
|
| 91 |
+
if h == image.shape[0] and w == image.shape[1]:
|
| 92 |
+
return image
|
| 93 |
+
|
| 94 |
+
original_height, original_width = image.shape[:2]
|
| 95 |
+
k = min(original_height / h, original_width / w)
|
| 96 |
+
new_width = int(round(w * k))
|
| 97 |
+
new_height = int(round(h * k))
|
| 98 |
+
x_start = (original_width - new_width) // 2
|
| 99 |
+
y_start = (original_height - new_height) // 2
|
| 100 |
+
cropped_image = image[y_start : y_start + new_height, x_start : x_start + new_width]
|
| 101 |
+
return cropped_image
|
| 102 |
+
|
| 103 |
+
|
| 104 |
+
def write_to_json(data, file_path):
|
| 105 |
+
temp_file_path = file_path + ".tmp"
|
| 106 |
+
with open(temp_file_path, "wt", encoding="utf-8") as temp_file:
|
| 107 |
+
json.dump(data, temp_file, indent=4)
|
| 108 |
+
os.replace(temp_file_path, file_path)
|
| 109 |
+
return
|
| 110 |
+
|
| 111 |
+
|
| 112 |
+
def read_from_json(file_path):
|
| 113 |
+
with open(file_path, "rt", encoding="utf-8") as file:
|
| 114 |
+
data = json.load(file)
|
| 115 |
+
return data
|
| 116 |
+
|
| 117 |
+
|
| 118 |
+
def get_active_parameters(m):
|
| 119 |
+
return {k: v for k, v in m.named_parameters() if v.requires_grad}
|
| 120 |
+
|
| 121 |
+
|
| 122 |
+
def cast_training_params(m, dtype=torch.float32):
|
| 123 |
+
result = {}
|
| 124 |
+
for n, param in m.named_parameters():
|
| 125 |
+
if param.requires_grad:
|
| 126 |
+
param.data = param.to(dtype)
|
| 127 |
+
result[n] = param
|
| 128 |
+
return result
|
| 129 |
+
|
| 130 |
+
|
| 131 |
+
def separate_lora_AB(parameters, B_patterns=None):
|
| 132 |
+
parameters_normal = {}
|
| 133 |
+
parameters_B = {}
|
| 134 |
+
|
| 135 |
+
if B_patterns is None:
|
| 136 |
+
B_patterns = [".lora_B.", "__zero__"]
|
| 137 |
+
|
| 138 |
+
for k, v in parameters.items():
|
| 139 |
+
if any(B_pattern in k for B_pattern in B_patterns):
|
| 140 |
+
parameters_B[k] = v
|
| 141 |
+
else:
|
| 142 |
+
parameters_normal[k] = v
|
| 143 |
+
|
| 144 |
+
return parameters_normal, parameters_B
|
| 145 |
+
|
| 146 |
+
|
| 147 |
+
def set_attr_recursive(obj, attr, value):
|
| 148 |
+
attrs = attr.split(".")
|
| 149 |
+
for name in attrs[:-1]:
|
| 150 |
+
obj = getattr(obj, name)
|
| 151 |
+
setattr(obj, attrs[-1], value)
|
| 152 |
+
return
|
| 153 |
+
|
| 154 |
+
|
| 155 |
+
def print_tensor_list_size(tensors):
|
| 156 |
+
total_size = 0
|
| 157 |
+
total_elements = 0
|
| 158 |
+
|
| 159 |
+
if isinstance(tensors, dict):
|
| 160 |
+
tensors = tensors.values()
|
| 161 |
+
|
| 162 |
+
for tensor in tensors:
|
| 163 |
+
total_size += tensor.nelement() * tensor.element_size()
|
| 164 |
+
total_elements += tensor.nelement()
|
| 165 |
+
|
| 166 |
+
total_size_MB = total_size / (1024**2)
|
| 167 |
+
total_elements_B = total_elements / 1e9
|
| 168 |
+
|
| 169 |
+
print(f"Total number of tensors: {len(tensors)}")
|
| 170 |
+
print(f"Total size of tensors: {total_size_MB:.2f} MB")
|
| 171 |
+
print(f"Total number of parameters: {total_elements_B:.3f} billion")
|
| 172 |
+
return
|
| 173 |
+
|
| 174 |
+
|
| 175 |
+
@torch.no_grad()
|
| 176 |
+
def batch_mixture(a, b=None, probability_a=0.5, mask_a=None):
|
| 177 |
+
batch_size = a.size(0)
|
| 178 |
+
|
| 179 |
+
if b is None:
|
| 180 |
+
b = torch.zeros_like(a)
|
| 181 |
+
|
| 182 |
+
if mask_a is None:
|
| 183 |
+
mask_a = torch.rand(batch_size) < probability_a
|
| 184 |
+
|
| 185 |
+
mask_a = mask_a.to(a.device)
|
| 186 |
+
mask_a = mask_a.reshape((batch_size,) + (1,) * (a.dim() - 1))
|
| 187 |
+
result = torch.where(mask_a, a, b)
|
| 188 |
+
return result
|
| 189 |
+
|
| 190 |
+
|
| 191 |
+
@torch.no_grad()
|
| 192 |
+
def zero_module(module):
|
| 193 |
+
for p in module.parameters():
|
| 194 |
+
p.detach().zero_()
|
| 195 |
+
return module
|
| 196 |
+
|
| 197 |
+
|
| 198 |
+
@torch.no_grad()
|
| 199 |
+
def supress_lower_channels(m, k, alpha=0.01):
|
| 200 |
+
data = m.weight.data.clone()
|
| 201 |
+
|
| 202 |
+
assert int(data.shape[1]) >= k
|
| 203 |
+
|
| 204 |
+
data[:, :k] = data[:, :k] * alpha
|
| 205 |
+
m.weight.data = data.contiguous().clone()
|
| 206 |
+
return m
|
| 207 |
+
|
| 208 |
+
|
| 209 |
+
def freeze_module(m):
|
| 210 |
+
if not hasattr(m, "_forward_inside_frozen_module"):
|
| 211 |
+
m._forward_inside_frozen_module = m.forward
|
| 212 |
+
m.requires_grad_(False)
|
| 213 |
+
m.forward = torch.no_grad()(m.forward)
|
| 214 |
+
return m
|
| 215 |
+
|
| 216 |
+
|
| 217 |
+
def get_latest_safetensors(folder_path):
|
| 218 |
+
safetensors_files = glob.glob(os.path.join(folder_path, "*.safetensors"))
|
| 219 |
+
|
| 220 |
+
if not safetensors_files:
|
| 221 |
+
raise ValueError("No file to resume!")
|
| 222 |
+
|
| 223 |
+
latest_file = max(safetensors_files, key=os.path.getmtime)
|
| 224 |
+
latest_file = os.path.abspath(os.path.realpath(latest_file))
|
| 225 |
+
return latest_file
|
| 226 |
+
|
| 227 |
+
|
| 228 |
+
def generate_random_prompt_from_tags(tags_str, min_length=3, max_length=32):
|
| 229 |
+
tags = tags_str.split(", ")
|
| 230 |
+
tags = random.sample(tags, k=min(random.randint(min_length, max_length), len(tags)))
|
| 231 |
+
prompt = ", ".join(tags)
|
| 232 |
+
return prompt
|
| 233 |
+
|
| 234 |
+
|
| 235 |
+
def interpolate_numbers(a, b, n, round_to_int=False, gamma=1.0):
|
| 236 |
+
numbers = a + (b - a) * (np.linspace(0, 1, n) ** gamma)
|
| 237 |
+
if round_to_int:
|
| 238 |
+
numbers = np.round(numbers).astype(int)
|
| 239 |
+
return numbers.tolist()
|
| 240 |
+
|
| 241 |
+
|
| 242 |
+
def uniform_random_by_intervals(inclusive, exclusive, n, round_to_int=False):
|
| 243 |
+
edges = np.linspace(0, 1, n + 1)
|
| 244 |
+
points = np.random.uniform(edges[:-1], edges[1:])
|
| 245 |
+
numbers = inclusive + (exclusive - inclusive) * points
|
| 246 |
+
if round_to_int:
|
| 247 |
+
numbers = np.round(numbers).astype(int)
|
| 248 |
+
return numbers.tolist()
|
| 249 |
+
|
| 250 |
+
|
| 251 |
+
def soft_append_bcthw(history, current, overlap=0):
|
| 252 |
+
if overlap <= 0:
|
| 253 |
+
return torch.cat([history, current], dim=2)
|
| 254 |
+
|
| 255 |
+
assert history.shape[2] >= overlap, f"History length ({history.shape[2]}) must be >= overlap ({overlap})"
|
| 256 |
+
assert current.shape[2] >= overlap, f"Current length ({current.shape[2]}) must be >= overlap ({overlap})"
|
| 257 |
+
|
| 258 |
+
weights = torch.linspace(1, 0, overlap, dtype=history.dtype, device=history.device).view(1, 1, -1, 1, 1)
|
| 259 |
+
blended = weights * history[:, :, -overlap:] + (1 - weights) * current[:, :, :overlap]
|
| 260 |
+
output = torch.cat([history[:, :, :-overlap], blended, current[:, :, overlap:]], dim=2)
|
| 261 |
+
|
| 262 |
+
return output.to(history)
|
| 263 |
+
|
| 264 |
+
|
| 265 |
+
def save_bcthw_as_mp4(x, output_filename, fps=10):
|
| 266 |
+
b, c, t, h, w = x.shape
|
| 267 |
+
|
| 268 |
+
per_row = b
|
| 269 |
+
for p in [6, 5, 4, 3, 2]:
|
| 270 |
+
if b % p == 0:
|
| 271 |
+
per_row = p
|
| 272 |
+
break
|
| 273 |
+
|
| 274 |
+
os.makedirs(os.path.dirname(os.path.abspath(os.path.realpath(output_filename))), exist_ok=True)
|
| 275 |
+
x = torch.clamp(x.float(), -1.0, 1.0) * 127.5 + 127.5
|
| 276 |
+
x = x.detach().cpu().to(torch.uint8)
|
| 277 |
+
x = einops.rearrange(x, "(m n) c t h w -> t (m h) (n w) c", n=per_row)
|
| 278 |
+
torchvision.io.write_video(output_filename, x, fps=fps, video_codec="libx264", options={"crf": "0"})
|
| 279 |
+
|
| 280 |
+
# write tensor as .pt file
|
| 281 |
+
torch.save(x, output_filename.replace(".mp4", ".pt"))
|
| 282 |
+
|
| 283 |
+
return x
|
| 284 |
+
|
| 285 |
+
|
| 286 |
+
def save_bcthw_as_png(x, output_filename):
|
| 287 |
+
os.makedirs(os.path.dirname(os.path.abspath(os.path.realpath(output_filename))), exist_ok=True)
|
| 288 |
+
x = torch.clamp(x.float(), -1.0, 1.0) * 127.5 + 127.5
|
| 289 |
+
x = x.detach().cpu().to(torch.uint8)
|
| 290 |
+
x = einops.rearrange(x, "b c t h w -> c (b h) (t w)")
|
| 291 |
+
torchvision.io.write_png(x, output_filename)
|
| 292 |
+
return output_filename
|
| 293 |
+
|
| 294 |
+
|
| 295 |
+
def save_bchw_as_png(x, output_filename):
|
| 296 |
+
os.makedirs(os.path.dirname(os.path.abspath(os.path.realpath(output_filename))), exist_ok=True)
|
| 297 |
+
x = torch.clamp(x.float(), -1.0, 1.0) * 127.5 + 127.5
|
| 298 |
+
x = x.detach().cpu().to(torch.uint8)
|
| 299 |
+
x = einops.rearrange(x, "b c h w -> c h (b w)")
|
| 300 |
+
torchvision.io.write_png(x, output_filename)
|
| 301 |
+
return output_filename
|
| 302 |
+
|
| 303 |
+
|
| 304 |
+
def add_tensors_with_padding(tensor1, tensor2):
|
| 305 |
+
if tensor1.shape == tensor2.shape:
|
| 306 |
+
return tensor1 + tensor2
|
| 307 |
+
|
| 308 |
+
shape1 = tensor1.shape
|
| 309 |
+
shape2 = tensor2.shape
|
| 310 |
+
|
| 311 |
+
new_shape = tuple(max(s1, s2) for s1, s2 in zip(shape1, shape2))
|
| 312 |
+
|
| 313 |
+
padded_tensor1 = torch.zeros(new_shape)
|
| 314 |
+
padded_tensor2 = torch.zeros(new_shape)
|
| 315 |
+
|
| 316 |
+
padded_tensor1[tuple(slice(0, s) for s in shape1)] = tensor1
|
| 317 |
+
padded_tensor2[tuple(slice(0, s) for s in shape2)] = tensor2
|
| 318 |
+
|
| 319 |
+
result = padded_tensor1 + padded_tensor2
|
| 320 |
+
return result
|
| 321 |
+
|
| 322 |
+
|
| 323 |
+
def print_free_mem():
|
| 324 |
+
torch.cuda.empty_cache()
|
| 325 |
+
free_mem, total_mem = torch.cuda.mem_get_info(0)
|
| 326 |
+
free_mem_mb = free_mem / (1024**2)
|
| 327 |
+
total_mem_mb = total_mem / (1024**2)
|
| 328 |
+
print(f"Free memory: {free_mem_mb:.2f} MB")
|
| 329 |
+
print(f"Total memory: {total_mem_mb:.2f} MB")
|
| 330 |
+
return
|
| 331 |
+
|
| 332 |
+
|
| 333 |
+
def print_gpu_parameters(device, state_dict, log_count=1):
|
| 334 |
+
summary = {"device": device, "keys_count": len(state_dict)}
|
| 335 |
+
|
| 336 |
+
logged_params = {}
|
| 337 |
+
for i, (key, tensor) in enumerate(state_dict.items()):
|
| 338 |
+
if i >= log_count:
|
| 339 |
+
break
|
| 340 |
+
logged_params[key] = tensor.flatten()[:3].tolist()
|
| 341 |
+
|
| 342 |
+
summary["params"] = logged_params
|
| 343 |
+
|
| 344 |
+
print(str(summary))
|
| 345 |
+
return
|
| 346 |
+
|
| 347 |
+
|
| 348 |
+
def visualize_txt_as_img(width, height, text, font_path="font/DejaVuSans.ttf", size=18):
|
| 349 |
+
from PIL import Image, ImageDraw, ImageFont
|
| 350 |
+
|
| 351 |
+
txt = Image.new("RGB", (width, height), color="white")
|
| 352 |
+
draw = ImageDraw.Draw(txt)
|
| 353 |
+
font = ImageFont.truetype(font_path, size=size)
|
| 354 |
+
|
| 355 |
+
if text == "":
|
| 356 |
+
return np.array(txt)
|
| 357 |
+
|
| 358 |
+
# Split text into lines that fit within the image width
|
| 359 |
+
lines = []
|
| 360 |
+
words = text.split()
|
| 361 |
+
current_line = words[0]
|
| 362 |
+
|
| 363 |
+
for word in words[1:]:
|
| 364 |
+
line_with_word = f"{current_line} {word}"
|
| 365 |
+
if draw.textbbox((0, 0), line_with_word, font=font)[2] <= width:
|
| 366 |
+
current_line = line_with_word
|
| 367 |
+
else:
|
| 368 |
+
lines.append(current_line)
|
| 369 |
+
current_line = word
|
| 370 |
+
|
| 371 |
+
lines.append(current_line)
|
| 372 |
+
|
| 373 |
+
# Draw the text line by line
|
| 374 |
+
y = 0
|
| 375 |
+
line_height = draw.textbbox((0, 0), "A", font=font)[3]
|
| 376 |
+
|
| 377 |
+
for line in lines:
|
| 378 |
+
if y + line_height > height:
|
| 379 |
+
break # stop drawing if the next line will be outside the image
|
| 380 |
+
draw.text((0, y), line, fill="black", font=font)
|
| 381 |
+
y += line_height
|
| 382 |
+
|
| 383 |
+
return np.array(txt)
|
| 384 |
+
|
| 385 |
+
|
| 386 |
+
def blue_mark(x):
|
| 387 |
+
x = x.copy()
|
| 388 |
+
c = x[:, :, 2]
|
| 389 |
+
b = cv2.blur(c, (9, 9))
|
| 390 |
+
x[:, :, 2] = ((c - b) * 16.0 + b).clip(-1, 1)
|
| 391 |
+
return x
|
| 392 |
+
|
| 393 |
+
|
| 394 |
+
def green_mark(x):
|
| 395 |
+
x = x.copy()
|
| 396 |
+
x[:, :, 2] = -1
|
| 397 |
+
x[:, :, 0] = -1
|
| 398 |
+
return x
|
| 399 |
+
|
| 400 |
+
|
| 401 |
+
def frame_mark(x):
|
| 402 |
+
x = x.copy()
|
| 403 |
+
x[:64] = -1
|
| 404 |
+
x[-64:] = -1
|
| 405 |
+
x[:, :8] = 1
|
| 406 |
+
x[:, -8:] = 1
|
| 407 |
+
return x
|
| 408 |
+
|
| 409 |
+
|
| 410 |
+
@torch.inference_mode()
|
| 411 |
+
def pytorch2numpy(imgs):
|
| 412 |
+
results = []
|
| 413 |
+
for x in imgs:
|
| 414 |
+
y = x.movedim(0, -1)
|
| 415 |
+
y = y * 127.5 + 127.5
|
| 416 |
+
y = y.detach().float().cpu().numpy().clip(0, 255).astype(np.uint8)
|
| 417 |
+
results.append(y)
|
| 418 |
+
return results
|
| 419 |
+
|
| 420 |
+
|
| 421 |
+
@torch.inference_mode()
|
| 422 |
+
def numpy2pytorch(imgs):
|
| 423 |
+
h = torch.from_numpy(np.stack(imgs, axis=0)).float() / 127.5 - 1.0
|
| 424 |
+
h = h.movedim(-1, 1)
|
| 425 |
+
return h
|
| 426 |
+
|
| 427 |
+
|
| 428 |
+
@torch.no_grad()
|
| 429 |
+
def duplicate_prefix_to_suffix(x, count, zero_out=False):
|
| 430 |
+
if zero_out:
|
| 431 |
+
return torch.cat([x, torch.zeros_like(x[:count])], dim=0)
|
| 432 |
+
else:
|
| 433 |
+
return torch.cat([x, x[:count]], dim=0)
|
| 434 |
+
|
| 435 |
+
|
| 436 |
+
def weighted_mse(a, b, weight):
|
| 437 |
+
return torch.mean(weight.float() * (a.float() - b.float()) ** 2)
|
| 438 |
+
|
| 439 |
+
|
| 440 |
+
def clamped_linear_interpolation(x, x_min, y_min, x_max, y_max, sigma=1.0):
|
| 441 |
+
x = (x - x_min) / (x_max - x_min)
|
| 442 |
+
x = max(0.0, min(x, 1.0))
|
| 443 |
+
x = x**sigma
|
| 444 |
+
return y_min + x * (y_max - y_min)
|
| 445 |
+
|
| 446 |
+
|
| 447 |
+
def expand_to_dims(x, target_dims):
|
| 448 |
+
return x.view(*x.shape, *([1] * max(0, target_dims - x.dim())))
|
| 449 |
+
|
| 450 |
+
|
| 451 |
+
def repeat_to_batch_size(tensor: torch.Tensor, batch_size: int):
|
| 452 |
+
if tensor is None:
|
| 453 |
+
return None
|
| 454 |
+
|
| 455 |
+
first_dim = tensor.shape[0]
|
| 456 |
+
|
| 457 |
+
if first_dim == batch_size:
|
| 458 |
+
return tensor
|
| 459 |
+
|
| 460 |
+
if batch_size % first_dim != 0:
|
| 461 |
+
raise ValueError(f"Cannot evenly repeat first dim {first_dim} to match batch_size {batch_size}.")
|
| 462 |
+
|
| 463 |
+
repeat_times = batch_size // first_dim
|
| 464 |
+
|
| 465 |
+
return tensor.repeat(repeat_times, *[1] * (tensor.dim() - 1))
|
| 466 |
+
|
| 467 |
+
|
| 468 |
+
def dim5(x):
|
| 469 |
+
return expand_to_dims(x, 5)
|
| 470 |
+
|
| 471 |
+
|
| 472 |
+
def dim4(x):
|
| 473 |
+
return expand_to_dims(x, 4)
|
| 474 |
+
|
| 475 |
+
|
| 476 |
+
def dim3(x):
|
| 477 |
+
return expand_to_dims(x, 3)
|
| 478 |
+
|
| 479 |
+
|
| 480 |
+
def crop_or_pad_yield_mask(x, length):
|
| 481 |
+
B, F, C = x.shape
|
| 482 |
+
device = x.device
|
| 483 |
+
dtype = x.dtype
|
| 484 |
+
|
| 485 |
+
if F < length:
|
| 486 |
+
y = torch.zeros((B, length, C), dtype=dtype, device=device)
|
| 487 |
+
mask = torch.zeros((B, length), dtype=torch.bool, device=device)
|
| 488 |
+
y[:, :F, :] = x
|
| 489 |
+
mask[:, :F] = True
|
| 490 |
+
return y, mask
|
| 491 |
+
|
| 492 |
+
return x[:, :length, :], torch.ones((B, length), dtype=torch.bool, device=device)
|
| 493 |
+
|
| 494 |
+
|
| 495 |
+
def extend_dim(x, dim, minimal_length, zero_pad=False):
|
| 496 |
+
original_length = int(x.shape[dim])
|
| 497 |
+
|
| 498 |
+
if original_length >= minimal_length:
|
| 499 |
+
return x
|
| 500 |
+
|
| 501 |
+
if zero_pad:
|
| 502 |
+
padding_shape = list(x.shape)
|
| 503 |
+
padding_shape[dim] = minimal_length - original_length
|
| 504 |
+
padding = torch.zeros(padding_shape, dtype=x.dtype, device=x.device)
|
| 505 |
+
else:
|
| 506 |
+
idx = (slice(None),) * dim + (slice(-1, None),) + (slice(None),) * (len(x.shape) - dim - 1)
|
| 507 |
+
last_element = x[idx]
|
| 508 |
+
padding = last_element.repeat_interleave(minimal_length - original_length, dim=dim)
|
| 509 |
+
|
| 510 |
+
return torch.cat([x, padding], dim=dim)
|
| 511 |
+
|
| 512 |
+
|
| 513 |
+
def lazy_positional_encoding(t, repeats=None):
|
| 514 |
+
if not isinstance(t, list):
|
| 515 |
+
t = [t]
|
| 516 |
+
|
| 517 |
+
from diffusers.models.embeddings import get_timestep_embedding
|
| 518 |
+
|
| 519 |
+
te = torch.tensor(t)
|
| 520 |
+
te = get_timestep_embedding(timesteps=te, embedding_dim=256, flip_sin_to_cos=True, downscale_freq_shift=0.0, scale=1.0)
|
| 521 |
+
|
| 522 |
+
if repeats is None:
|
| 523 |
+
return te
|
| 524 |
+
|
| 525 |
+
te = te[:, None, :].expand(-1, repeats, -1)
|
| 526 |
+
|
| 527 |
+
return te
|
| 528 |
+
|
| 529 |
+
|
| 530 |
+
def state_dict_offset_merge(A, B, C=None):
|
| 531 |
+
result = {}
|
| 532 |
+
keys = A.keys()
|
| 533 |
+
|
| 534 |
+
for key in keys:
|
| 535 |
+
A_value = A[key]
|
| 536 |
+
B_value = B[key].to(A_value)
|
| 537 |
+
|
| 538 |
+
if C is None:
|
| 539 |
+
result[key] = A_value + B_value
|
| 540 |
+
else:
|
| 541 |
+
C_value = C[key].to(A_value)
|
| 542 |
+
result[key] = A_value + B_value - C_value
|
| 543 |
+
|
| 544 |
+
return result
|
| 545 |
+
|
| 546 |
+
|
| 547 |
+
def state_dict_weighted_merge(state_dicts, weights):
|
| 548 |
+
if len(state_dicts) != len(weights):
|
| 549 |
+
raise ValueError("Number of state dictionaries must match number of weights")
|
| 550 |
+
|
| 551 |
+
if not state_dicts:
|
| 552 |
+
return {}
|
| 553 |
+
|
| 554 |
+
total_weight = sum(weights)
|
| 555 |
+
|
| 556 |
+
if total_weight == 0:
|
| 557 |
+
raise ValueError("Sum of weights cannot be zero")
|
| 558 |
+
|
| 559 |
+
normalized_weights = [w / total_weight for w in weights]
|
| 560 |
+
|
| 561 |
+
keys = state_dicts[0].keys()
|
| 562 |
+
result = {}
|
| 563 |
+
|
| 564 |
+
for key in keys:
|
| 565 |
+
result[key] = state_dicts[0][key] * normalized_weights[0]
|
| 566 |
+
|
| 567 |
+
for i in range(1, len(state_dicts)):
|
| 568 |
+
state_dict_value = state_dicts[i][key].to(result[key])
|
| 569 |
+
result[key] += state_dict_value * normalized_weights[i]
|
| 570 |
+
|
| 571 |
+
return result
|
| 572 |
+
|
| 573 |
+
|
| 574 |
+
def group_files_by_folder(all_files):
|
| 575 |
+
grouped_files = {}
|
| 576 |
+
|
| 577 |
+
for file in all_files:
|
| 578 |
+
folder_name = os.path.basename(os.path.dirname(file))
|
| 579 |
+
if folder_name not in grouped_files:
|
| 580 |
+
grouped_files[folder_name] = []
|
| 581 |
+
grouped_files[folder_name].append(file)
|
| 582 |
+
|
| 583 |
+
list_of_lists = list(grouped_files.values())
|
| 584 |
+
return list_of_lists
|
| 585 |
+
|
| 586 |
+
|
| 587 |
+
def generate_timestamp():
|
| 588 |
+
now = datetime.datetime.now()
|
| 589 |
+
timestamp = now.strftime("%y%m%d_%H%M%S")
|
| 590 |
+
milliseconds = f"{int(now.microsecond / 1000):03d}"
|
| 591 |
+
random_number = random.randint(0, 9999)
|
| 592 |
+
return f"{timestamp}_{milliseconds}_{random_number}"
|
| 593 |
+
|
| 594 |
+
|
| 595 |
+
def write_PIL_image_with_png_info(image, metadata, path):
|
| 596 |
+
from PIL.PngImagePlugin import PngInfo
|
| 597 |
+
|
| 598 |
+
png_info = PngInfo()
|
| 599 |
+
for key, value in metadata.items():
|
| 600 |
+
png_info.add_text(key, value)
|
| 601 |
+
|
| 602 |
+
image.save(path, "PNG", pnginfo=png_info)
|
| 603 |
+
return image
|
| 604 |
+
|
| 605 |
+
|
| 606 |
+
def torch_safe_save(content, path):
|
| 607 |
+
torch.save(content, path + "_tmp")
|
| 608 |
+
os.replace(path + "_tmp", path)
|
| 609 |
+
return path
|
| 610 |
+
|
| 611 |
+
|
| 612 |
+
def move_optimizer_to_device(optimizer, device):
|
| 613 |
+
for state in optimizer.state.values():
|
| 614 |
+
for k, v in state.items():
|
| 615 |
+
if isinstance(v, torch.Tensor):
|
| 616 |
+
state[k] = v.to(device)
|
src/musubi_tuner/frame_pack/wrapper.py
ADDED
|
@@ -0,0 +1,55 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
|
| 3 |
+
|
| 4 |
+
def append_dims(x, target_dims):
|
| 5 |
+
return x[(...,) + (None,) * (target_dims - x.ndim)]
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
def rescale_noise_cfg(noise_cfg, noise_pred_text, guidance_rescale=1.0):
|
| 9 |
+
if guidance_rescale == 0:
|
| 10 |
+
return noise_cfg
|
| 11 |
+
|
| 12 |
+
std_text = noise_pred_text.std(dim=list(range(1, noise_pred_text.ndim)), keepdim=True)
|
| 13 |
+
std_cfg = noise_cfg.std(dim=list(range(1, noise_cfg.ndim)), keepdim=True)
|
| 14 |
+
noise_pred_rescaled = noise_cfg * (std_text / std_cfg)
|
| 15 |
+
noise_cfg = guidance_rescale * noise_pred_rescaled + (1.0 - guidance_rescale) * noise_cfg
|
| 16 |
+
return noise_cfg
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
def fm_wrapper(transformer, t_scale=1000.0):
|
| 20 |
+
def k_model(x, sigma, **extra_args):
|
| 21 |
+
dtype = extra_args["dtype"]
|
| 22 |
+
cfg_scale = extra_args["cfg_scale"]
|
| 23 |
+
cfg_rescale = extra_args["cfg_rescale"]
|
| 24 |
+
concat_latent = extra_args["concat_latent"]
|
| 25 |
+
|
| 26 |
+
original_dtype = x.dtype
|
| 27 |
+
sigma = sigma.float()
|
| 28 |
+
|
| 29 |
+
x = x.to(dtype)
|
| 30 |
+
timestep = sigma * t_scale # sigma is already float32
|
| 31 |
+
|
| 32 |
+
if concat_latent is None:
|
| 33 |
+
hidden_states = x
|
| 34 |
+
else:
|
| 35 |
+
hidden_states = torch.cat([x, concat_latent.to(x)], dim=1)
|
| 36 |
+
|
| 37 |
+
pred_positive = transformer(hidden_states=hidden_states, timestep=timestep, return_dict=False, **extra_args["positive"])[
|
| 38 |
+
0
|
| 39 |
+
].float()
|
| 40 |
+
|
| 41 |
+
if cfg_scale == 1.0:
|
| 42 |
+
pred_negative = torch.zeros_like(pred_positive)
|
| 43 |
+
else:
|
| 44 |
+
pred_negative = transformer(
|
| 45 |
+
hidden_states=hidden_states, timestep=timestep, return_dict=False, **extra_args["negative"]
|
| 46 |
+
)[0].float()
|
| 47 |
+
|
| 48 |
+
pred_cfg = pred_negative + cfg_scale * (pred_positive - pred_negative)
|
| 49 |
+
pred = rescale_noise_cfg(pred_cfg, pred_positive, guidance_rescale=cfg_rescale)
|
| 50 |
+
|
| 51 |
+
x0 = x.float() - pred.float() * append_dims(sigma, x.ndim)
|
| 52 |
+
|
| 53 |
+
return x0.to(dtype=original_dtype)
|
| 54 |
+
|
| 55 |
+
return k_model
|
src/musubi_tuner/gui/config_manager.py
ADDED
|
@@ -0,0 +1,105 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
|
| 3 |
+
|
| 4 |
+
class ConfigManager:
|
| 5 |
+
def __init__(self):
|
| 6 |
+
self.models = {
|
| 7 |
+
"Z-Image-Turbo": {
|
| 8 |
+
"resolution": (1024, 1024),
|
| 9 |
+
"vae_subpath": ["vae", "ae.safetensors"],
|
| 10 |
+
"te1_subpath": ["text_encoders", "qwen_3_4b.safetensors"],
|
| 11 |
+
"te2_subpath": None,
|
| 12 |
+
"dit_subpath": ["diffusion_models", "z_image_de_turbo_v1_bf16.safetensors"],
|
| 13 |
+
"training_base": {
|
| 14 |
+
"learning_rate": 1e-3,
|
| 15 |
+
"default_num_steps": 500,
|
| 16 |
+
"save_every_n_epochs": 1,
|
| 17 |
+
"discrete_flow_shift": 2.0,
|
| 18 |
+
"mixed_precision": "bf16",
|
| 19 |
+
"gradient_checkpointing": True,
|
| 20 |
+
"fp8_scaled": True,
|
| 21 |
+
"fp8_llm": True,
|
| 22 |
+
},
|
| 23 |
+
"vram_settings": {
|
| 24 |
+
"12": {"batch_size": 1, "block_swap": 0, "fp8_scaled": True, "fp8_llm": True},
|
| 25 |
+
"16": {"batch_size": 2, "block_swap": 0, "fp8_scaled": True, "fp8_llm": False},
|
| 26 |
+
"24": {"batch_size": 1, "block_swap": 0, "fp8_scaled": False, "fp8_llm": False},
|
| 27 |
+
"32": {"batch_size": 2, "block_swap": 0, "fp8_scaled": False, "fp8_llm": False},
|
| 28 |
+
">32": {"batch_size": 8, "block_swap": 0, "fp8_scaled": False, "fp8_llm": False},
|
| 29 |
+
},
|
| 30 |
+
},
|
| 31 |
+
"Qwen-Image": {
|
| 32 |
+
"resolution": (1328, 1328),
|
| 33 |
+
"vae_subpath": ["vae", "qwen_image_vae.safetensors"],
|
| 34 |
+
"te1_subpath": ["text_encoders", "qwen_2.5_vl_7b.safetensors"],
|
| 35 |
+
"te2_subpath": None,
|
| 36 |
+
"dit_subpath": ["diffusion_models", "qwen_image_bf16.safetensors"],
|
| 37 |
+
"training_base": {
|
| 38 |
+
"learning_rate": 1e-3,
|
| 39 |
+
"default_num_steps": 1000,
|
| 40 |
+
"save_every_n_epochs": 1,
|
| 41 |
+
"discrete_flow_shift": 2.2,
|
| 42 |
+
"mixed_precision": "bf16",
|
| 43 |
+
"gradient_checkpointing": True,
|
| 44 |
+
"fp8_scaled": True,
|
| 45 |
+
"fp8_llm": True,
|
| 46 |
+
},
|
| 47 |
+
"vram_settings": {
|
| 48 |
+
"12": {"batch_size": 1, "block_swap": 46, "fp8_scaled": True, "fp8_llm": True},
|
| 49 |
+
"16": {"batch_size": 1, "block_swap": 34, "fp8_scaled": True, "fp8_llm": True},
|
| 50 |
+
"24": {"batch_size": 1, "block_swap": 10, "fp8_scaled": True, "fp8_llm": False},
|
| 51 |
+
"32": {"batch_size": 2, "block_swap": 0, "fp8_scaled": True, "fp8_llm": False},
|
| 52 |
+
">32": {"batch_size": 2, "block_swap": 0, "fp8_scaled": False, "fp8_llm": False},
|
| 53 |
+
},
|
| 54 |
+
},
|
| 55 |
+
}
|
| 56 |
+
|
| 57 |
+
def get_resolution(self, model_name):
|
| 58 |
+
return self.models.get(model_name, {}).get("resolution", (1024, 1024))
|
| 59 |
+
|
| 60 |
+
def get_batch_size(self, model_name, vram_size):
|
| 61 |
+
# Default to "16" if vram_size is not provided or invalid, as a safe middle ground
|
| 62 |
+
if not vram_size:
|
| 63 |
+
vram_size = "16"
|
| 64 |
+
|
| 65 |
+
vram_conf = self.models.get(model_name, {}).get("vram_settings", {}).get(vram_size, {})
|
| 66 |
+
return vram_conf.get("batch_size", 1)
|
| 67 |
+
|
| 68 |
+
def get_preprocessing_paths(self, model_name, comfy_models_dir):
|
| 69 |
+
conf = self.models.get(model_name, {})
|
| 70 |
+
vae = conf.get("vae_subpath")
|
| 71 |
+
te1 = conf.get("te1_subpath")
|
| 72 |
+
te2 = conf.get("te2_subpath")
|
| 73 |
+
|
| 74 |
+
def join_path(subpath):
|
| 75 |
+
if subpath and comfy_models_dir:
|
| 76 |
+
return os.path.join(comfy_models_dir, *subpath)
|
| 77 |
+
return ""
|
| 78 |
+
|
| 79 |
+
return join_path(vae), join_path(te1), join_path(te2)
|
| 80 |
+
|
| 81 |
+
def get_training_defaults(self, model_name, vram_size, comfy_models_dir):
|
| 82 |
+
conf = self.models.get(model_name, {})
|
| 83 |
+
base = conf.get("training_base", {}).copy()
|
| 84 |
+
|
| 85 |
+
if not vram_size:
|
| 86 |
+
vram_size = "16"
|
| 87 |
+
|
| 88 |
+
# Merge VRAM settings
|
| 89 |
+
vram_conf = conf.get("vram_settings", {}).get(vram_size, {})
|
| 90 |
+
# Only take relevant keys for training params (block_swap), ignore batch_size as it's for dataset
|
| 91 |
+
if "block_swap" in vram_conf:
|
| 92 |
+
base["block_swap"] = vram_conf["block_swap"]
|
| 93 |
+
if "fp8_llm" in vram_conf:
|
| 94 |
+
base["fp8_llm"] = vram_conf["fp8_llm"]
|
| 95 |
+
if "fp8_scaled" in vram_conf:
|
| 96 |
+
base["fp8_scaled"] = vram_conf["fp8_scaled"]
|
| 97 |
+
|
| 98 |
+
# DiT path
|
| 99 |
+
dit_sub = conf.get("dit_subpath")
|
| 100 |
+
if dit_sub and comfy_models_dir:
|
| 101 |
+
base["dit_path"] = os.path.join(comfy_models_dir, *dit_sub)
|
| 102 |
+
else:
|
| 103 |
+
base["dit_path"] = ""
|
| 104 |
+
|
| 105 |
+
return base
|
src/musubi_tuner/gui/gui.ja.md
ADDED
|
@@ -0,0 +1,444 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
[English](./gui.md) | [日本語](./gui.ja.md)
|
| 2 |
+
|
| 3 |
+
# Musubi Tuner GUI - ユーザーガイド
|
| 4 |
+
|
| 5 |
+
このガイドでは、Musubi Tuner GUIのセットアップと使用方法について説明します。Z-Image-TurboやQwen-Imageなどの画像生成モデルでLoRAを学習できます。
|
| 6 |
+
|
| 7 |
+
## 目次
|
| 8 |
+
|
| 9 |
+
1. [必要な環境](#必要な環境)
|
| 10 |
+
2. [必要なソフトウェアのインストール](#必要なソフトウェアのインストール)
|
| 11 |
+
3. [Musubi Tunerのインストール](#musubi-tunerのインストール)
|
| 12 |
+
4. [GUIの起動](#guiの起動)
|
| 13 |
+
5. [作業手順ガイド](#作業手順ガイド)
|
| 14 |
+
6. [各項目の説明](#各項目の説明)
|
| 15 |
+
7. [トラブルシューティング](#トラブルシューティング)
|
| 16 |
+
|
| 17 |
+
---
|
| 18 |
+
|
| 19 |
+
## 必要な環境
|
| 20 |
+
|
| 21 |
+
始める前に、以下を確認してください:
|
| 22 |
+
|
| 23 |
+
- NVIDIA GPU搭載のWindows PC(VRAM 12GB以上推奨、VRAM+メインRAMで64GB以上推奨)
|
| 24 |
+
- インターネット接続
|
| 25 |
+
- ComfyUIがインストール済みで、必要なモデル(VAE、Text Encoder、DiT)がダウンロード済みであること
|
| 26 |
+
|
| 27 |
+
---
|
| 28 |
+
|
| 29 |
+
## 必要なソフトウェアのインストール
|
| 30 |
+
|
| 31 |
+
### ステップ1:Pythonのインストール
|
| 32 |
+
|
| 33 |
+
Musubi TunerにはPython 3.10、3.11、または3.12が必要です。システムにインストールされていない場合は、以下の手順でインストールしてください。
|
| 34 |
+
|
| 35 |
+
1. Pythonの公式サイトにアクセス:https://www.python.org/downloads/
|
| 36 |
+
2. Python 3.10、3.11、または3.12の、Windows用 64ビットインストーラーをダウンロード(互換性の観点から **Python 3.12** を推奨)
|
| 37 |
+
3. インストーラーを実行
|
| 38 |
+
4. **重要**:「Install Now」をクリックする前に、**「Add Python to PATH」にチェック**を入れてください
|
| 39 |
+
5. インストールを完了
|
| 40 |
+
|
| 41 |
+
**インストールの確認**:コマンドプロンプトを開き(`Win + R`を押して`cmd`と入力し、Enterを押す)、以下を実行:
|
| 42 |
+
```
|
| 43 |
+
python --version
|
| 44 |
+
```
|
| 45 |
+
`Python 3.12.x`のように表示されれば成功です。
|
| 46 |
+
|
| 47 |
+
### ステップ2:Gitのインストール
|
| 48 |
+
|
| 49 |
+
GitはMusubi Tunerのソースコードをダウンロードするために必要です。システムにインストールされていない場合は、以下の手順でインストールしてください。
|
| 50 |
+
|
| 51 |
+
1. Gitのサイトにアクセス:https://git-scm.com/downloads/win
|
| 52 |
+
2. Windowsインストーラーをダウンロード
|
| 53 |
+
3. デフォルト設定のままインストーラーを実行(「Next」をクリックし続ける)
|
| 54 |
+
4. インストールを完了
|
| 55 |
+
|
| 56 |
+
**インストールの確認**:コマンドプロンプトで以下を実行:
|
| 57 |
+
```
|
| 58 |
+
git --version
|
| 59 |
+
```
|
| 60 |
+
`git version 2.x.x`のように表示されれば成功です。
|
| 61 |
+
|
| 62 |
+
### ステップ3:uvのインストール
|
| 63 |
+
|
| 64 |
+
uvは依存関係の管理を簡単にする最新のPythonパッケージマネージャーです。
|
| 65 |
+
|
| 66 |
+
1. 管理者のコマンドプロンプトを開く
|
| 67 |
+
2. 以下のコマンドを実行:
|
| 68 |
+
```
|
| 69 |
+
powershell -c "irm https://astral.sh/uv/install.ps1 | iex"
|
| 70 |
+
```
|
| 71 |
+
3. 変更を反映させるため、**コマンドプロンプトを閉じて再度開く**(通常のコマンドプロンプト)
|
| 72 |
+
|
| 73 |
+
**インストールの確認**:新しいコマンドプロンプトで以下を実行:
|
| 74 |
+
```
|
| 75 |
+
uv --version
|
| 76 |
+
```
|
| 77 |
+
`uv 0.x.x`のように表示されれば成功です。
|
| 78 |
+
|
| 79 |
+
---
|
| 80 |
+
|
| 81 |
+
## Musubi Tunerのインストール
|
| 82 |
+
|
| 83 |
+
### ステップ1:ソースコードのダウンロード
|
| 84 |
+
|
| 85 |
+
1. コマンドプロンプトを開く
|
| 86 |
+
2. Musubi Tunerをインストールしたいフォルダに移動。例:
|
| 87 |
+
```
|
| 88 |
+
cd C:\Users\YourName\Documents
|
| 89 |
+
```
|
| 90 |
+
3. リポジトリをクローン:
|
| 91 |
+
```
|
| 92 |
+
git clone https://github.com/kohya-ss/musubi-tuner.git
|
| 93 |
+
```
|
| 94 |
+
4. フォルダに移動:
|
| 95 |
+
```
|
| 96 |
+
cd musubi-tuner
|
| 97 |
+
```
|
| 98 |
+
|
| 99 |
+
### ステップ2:初回セットアップ(自動)
|
| 100 |
+
|
| 101 |
+
GUIを初めて実行すると、uvが自動的にPyTorchを含むすべての必要な依存関係をダウンロード・インストールします。これには数分かかる場合があります。
|
| 102 |
+
|
| 103 |
+
---
|
| 104 |
+
|
| 105 |
+
## GUIの起動
|
| 106 |
+
|
| 107 |
+
コマンドプロンプトを開き、musubi-tunerフォルダに移動して、CUDAバージョンに応じて以下のいずれかのコマンドを実行します:
|
| 108 |
+
|
| 109 |
+
### CUDA 12.4の場合(安定したバージョン)
|
| 110 |
+
|
| 111 |
+
```
|
| 112 |
+
uv run --extra cu124 --extra gui python src/musubi_tuner/gui/gui.py
|
| 113 |
+
```
|
| 114 |
+
|
| 115 |
+
### CUDA 12.8の場合(新しいGPU向け)
|
| 116 |
+
|
| 117 |
+
```
|
| 118 |
+
uv run --extra cu128 --extra gui python src/musubi_tuner/gui/gui.py
|
| 119 |
+
```
|
| 120 |
+
|
| 121 |
+
**注意**:どのCUDAバージョンを使うか分からない場合は、まず`cu124`を試してください。
|
| 122 |
+
|
| 123 |
+
起動後、GUIは以下のようなURLを表示します:
|
| 124 |
+
```
|
| 125 |
+
Running on local URL: http://127.0.0.1:7860
|
| 126 |
+
```
|
| 127 |
+
|
| 128 |
+
このURLをWebブラウザで開いてGUIにアクセスします。
|
| 129 |
+
|
| 130 |
+
**ヒント**:より簡単にGUIを起動するために、バッチファイル(`.bat`)を作成できます:
|
| 131 |
+
|
| 132 |
+
1. `musubi-tuner`フォルダに`launch_gui.bat`というファイルを作成
|
| 133 |
+
2. 以下の内容を追加:
|
| 134 |
+
```batch
|
| 135 |
+
@echo off
|
| 136 |
+
cd /d "%~dp0"
|
| 137 |
+
uv run --extra cu124 --extra gui python src/musubi_tuner/gui/gui.py
|
| 138 |
+
pause
|
| 139 |
+
```
|
| 140 |
+
3. バッチファイルをダブルクリックしてGUIを起動
|
| 141 |
+
|
| 142 |
+
---
|
| 143 |
+
|
| 144 |
+
## 作業手順ガイド
|
| 145 |
+
|
| 146 |
+
GUIは上から下へ、作業を完了すべき順序で配置されています。
|
| 147 |
+
|
| 148 |
+
### ステップの概要
|
| 149 |
+
|
| 150 |
+
1. **プロジェクト設定** - プロジェクトフォルダの作成または読み込み
|
| 151 |
+
2. **モデル選択** - モデルアーキテクチャの選択とモデルパスの設定
|
| 152 |
+
3. **データセット設定** - 学習解像度とバッチサイズの設定
|
| 153 |
+
4. **前処理** - latentとtext encoderの事前キャッシュ
|
| 154 |
+
5. **学習** - 学習パラメータの設定と学習の開始
|
| 155 |
+
6. **後処理** - ComfyUI用にLoRAを変換(必要な場合)
|
| 156 |
+
|
| 157 |
+
---
|
| 158 |
+
|
| 159 |
+
### ステップ1:プロジェクト設定
|
| 160 |
+
|
| 161 |
+
1. **Project Directory(プロジェクトディレクトリ)**:プロジェクトフォルダのフルパスを入力(例:`C:\MyProjects\my_lora_project`)
|
| 162 |
+
2. **「Initialize / Load Project」** をクリック
|
| 163 |
+
|
| 164 |
+
これにより:
|
| 165 |
+
- プロジェクトフォルダが存在しない場合は作成されます
|
| 166 |
+
- 学習画像用の`training`サブフォルダが作成されます
|
| 167 |
+
- 以前使用したプロジェクトの場合、設定が読み込まれます
|
| 168 |
+
|
| 169 |
+
**初期化後**、`training`フォルダに学習データを配置してください:
|
| 170 |
+
- 画像ファイル(`.jpg`、`.png`など)
|
| 171 |
+
- キャプションファイル(画像と同じファイル名で、拡張子は`.txt`)
|
| 172 |
+
|
| 173 |
+
例:
|
| 174 |
+
```
|
| 175 |
+
my_lora_project/
|
| 176 |
+
training/
|
| 177 |
+
image001.jpg
|
| 178 |
+
image001.txt
|
| 179 |
+
image002.png
|
| 180 |
+
image002.txt
|
| 181 |
+
```
|
| 182 |
+
|
| 183 |
+
---
|
| 184 |
+
|
| 185 |
+
### ステップ2:モデル選択
|
| 186 |
+
|
| 187 |
+
1. **Model Architecture(モデルアーキテクチャ)**:学習したいモデルを選択
|
| 188 |
+
- `Z-Image-Turbo` - 高速な学習、BaseモデルがリリースされていないためLoRA学習がやや不安定
|
| 189 |
+
- `Qwen-Image` - より高品質、VRAMをより多く使用
|
| 190 |
+
|
| 191 |
+
2. **VRAM Size(VRAMサイズ)**:GPUのVRAMサイズを選択
|
| 192 |
+
- バッチサイズやブロックスワップなどの推奨設定に影響します
|
| 193 |
+
|
| 194 |
+
3. **ComfyUI Models Directory(ComfyUIモデルディレクトリ)**:ComfyUIの`models`フォルダのパスを入力
|
| 195 |
+
- 例:`C:\ComfyUI\models`
|
| 196 |
+
- このフォルダには`vae`、`text_encoders`、`diffusion_models`サブフォルダが含まれている必要があります
|
| 197 |
+
- 必要なモデルは[こちら](#使用するモデル一覧)を参考にしてください
|
| 198 |
+
|
| 199 |
+
4. **「Validate ComfyUI Models Directory」** をクリックしてフォルダ構造を確認
|
| 200 |
+
|
| 201 |
+
---
|
| 202 |
+
|
| 203 |
+
### ステップ3:データセット設定
|
| 204 |
+
|
| 205 |
+
1. **「Set Recommended Resolution & Batch Size」** をクリックして、選択したモデルとVRAMに応じた推奨値を自動入力
|
| 206 |
+
2. 必要に応じて調整:
|
| 207 |
+
- **Resolution(解像度)Width/Height**:学習画像の解像度
|
| 208 |
+
- **Batch Size(バッチサイズ)**:一度に処理する画像数(大きいほど高速だがVRAMを多く使用)
|
| 209 |
+
3. **「Generate Dataset Config」** をクリックして設定ファイルを作成
|
| 210 |
+
|
| 211 |
+
生成された設定はボタン下のプレビューエリアに表示されます。
|
| 212 |
+
|
| 213 |
+
---
|
| 214 |
+
|
| 215 |
+
### ステップ4:前処理
|
| 216 |
+
|
| 217 |
+
学習前に、latentとtext encoderの出力をキャッシュする必要があります。これにより、画像とキャプションがモデルが使用できる形式に変換されます。
|
| 218 |
+
|
| 219 |
+
1. **「Set Default Paths」** をクリックして、ComfyUIディレクトリに基づいてモデルパスを自動入力
|
| 220 |
+
2. パスが正しいことを確認:
|
| 221 |
+
- **VAE Path**:VAEモデルへのパス
|
| 222 |
+
- **Text Encoder 1 Path**:テキストエンコーダーモデルへのパス
|
| 223 |
+
- **Text Encoder 2 Path**:(一部のモデルのみ、空の場合もあります)
|
| 224 |
+
|
| 225 |
+
3. **「Cache Latents」** をクリックして完了を待つ
|
| 226 |
+
- 画像をlatent空間にエンコードします
|
| 227 |
+
- ログ出力で進捗を確認できます
|
| 228 |
+
|
| 229 |
+
4. **「Cache Text Encoder Outputs」** をクリックして完了を待つ
|
| 230 |
+
- キャプションをembeddingにエンコードします
|
| 231 |
+
- テキストエンコーダーの読み込みがあるため、初回は時間がかかる場合があります
|
| 232 |
+
|
| 233 |
+
---
|
| 234 |
+
|
| 235 |
+
### ステップ5:学習
|
| 236 |
+
|
| 237 |
+
1. **「Set Recommended Parameters」** をクリックして、モデルとVRAMに応じた学習設定を自動入力
|
| 238 |
+
|
| 239 |
+
2. **必須設定の確認**:
|
| 240 |
+
- **Base Model / DiT Path**:ベースとなるdiffusionモデル(DiT)へのパス(推奨ボタンをクリックすると自動入力)
|
| 241 |
+
- **Output Name**:LoRAファイルの名前(例:`my_character_lora`)
|
| 242 |
+
|
| 243 |
+
3. **基本パラメータ**(デフォルト値を使用可能):
|
| 244 |
+
- **LoRA Dim**:LoRAのランク/次元(4-32、大きいほど容量が増えるがファイルサイズも増加)
|
| 245 |
+
- **Learning Rate**:学習速度(デフォルト:1e-3 (0.001)、学習が不安定な場合は1e-4程度まで減少させることを推奨)
|
| 246 |
+
- **Epochs**:全画像での学習回数。デフォルトは画像数に基づいて調整されます。過学習になる場合は減少させてください。
|
| 247 |
+
- **Save Every N Epochs**:チェックポイントを保存する頻度
|
| 248 |
+
|
| 249 |
+
4. **詳細パラメータ**(「Advanced Parameters」アコーディオンを展開):
|
| 250 |
+
- **Discrete Flow Shift**:デノイジングのどのステップを重視するか(モデル固有のデフォルト値を使用することを推奨)
|
| 251 |
+
- **Block Swap**:モデルレイヤーをCPUにオフロード(VRAMが限られている場合に使用)
|
| 252 |
+
- **Mixed Precision**:精度モード(bf16推奨)
|
| 253 |
+
- **Gradient Checkpointing**:VRAM使用量を削減
|
| 254 |
+
- **FP8オプション**:さらなるメモリ最適化
|
| 255 |
+
|
| 256 |
+
5. **サンプル画像生成**(オプション):
|
| 257 |
+
- **「Generate sample images during training」** を有効にして進捗を確認
|
| 258 |
+
- 学習内容を表すサンプルプロンプトを入力
|
| 259 |
+
- サンプル画像のサイズと生成頻度を設定
|
| 260 |
+
|
| 261 |
+
6. **「Start Training」** をクリックして開始
|
| 262 |
+
- 学習進捗を表示する新しいコマンドウィンドウが開きます
|
| 263 |
+
- 学習の進捗は新しいウィンドウに表示されます
|
| 264 |
+
- GUIには学習が開始されたことを確認するメッセージが表示されます
|
| 265 |
+
|
| 266 |
+
---
|
| 267 |
+
|
| 268 |
+
### ステップ6:後処理(オプション)
|
| 269 |
+
|
| 270 |
+
Z-ImageのLoRAは、ComfyUIで使用するために変換が必要です。以下の手順に従ってください。
|
| 271 |
+
|
| 272 |
+
1. **「Set Default Paths」** をクリックして、出力名に基づいてパスを自動入力
|
| 273 |
+
2. パスを確認:
|
| 274 |
+
- **Input LoRA Path**:学習済みLoRAへのパス
|
| 275 |
+
- **Output ComfyUI LoRA Path**:変換後のLoRAの保存先
|
| 276 |
+
3. **「Convert to ComfyUI Format」** をクリック
|
| 277 |
+
|
| 278 |
+
---
|
| 279 |
+
|
| 280 |
+
## 使用するモデル一覧
|
| 281 |
+
|
| 282 |
+
### Z-Image-Turbo
|
| 283 |
+
|
| 284 |
+
text-encodersとvaeモデルファイルは、[こちら](https://huggingface.co/Comfy-Org/z_image_turbo) の `split_files` 以下の適切なディレクトリからダウンロードしてください。
|
| 285 |
+
|
| 286 |
+
| 種類 | モデルファイル |
|
| 287 |
+
|----------------------|--------------|
|
| 288 |
+
| diffusion-models | safetensors ostris氏の[De-Turbo](https://huggingface.co/ostris/Z-Image-De-Turbo)から z_image_de_turbo_v1_bf16.safetensors を使用|
|
| 289 |
+
| text-encoders | qwen_3_4b.safetensors |
|
| 290 |
+
| VAE | ae.safetensors |
|
| 291 |
+
|
| 292 |
+
### Qwen-Image
|
| 293 |
+
|
| 294 |
+
[こちら](https://huggingface.co/Comfy-Org/Qwen-Image_ComfyUI)の`split_files`以下の適切なディレクトリから必要なモデルファイルをダウンロードしてください。
|
| 295 |
+
|
| 296 |
+
| 種類 | モデルファイル |
|
| 297 |
+
|----------------------|-----------------------------|
|
| 298 |
+
| diffusion-models | qwen_image_bf16.safetensors |
|
| 299 |
+
| text-encoders | qwen_2.5_vl_7b.safetensors |
|
| 300 |
+
| VAE | qwen_image_vae.safetensors |
|
| 301 |
+
|
| 302 |
+
---
|
| 303 |
+
|
| 304 |
+
## 各項目の説明
|
| 305 |
+
|
| 306 |
+
### プロジェクト設定
|
| 307 |
+
|
| 308 |
+
| 項目 | 説明 |
|
| 309 |
+
|------|------|
|
| 310 |
+
| Project Directory | 学習プロジェクトのルートフォルダ。すべてのデータと出力がここに保存されます。 |
|
| 311 |
+
|
| 312 |
+
### モデル設定
|
| 313 |
+
|
| 314 |
+
| 項目 | 説明 |
|
| 315 |
+
|------|------|
|
| 316 |
+
| Model Architecture | LoRAを学習するベースモデル。Z-Image-Turboは高速、Qwen-Imageはより高品質。 |
|
| 317 |
+
| VRAM Size | GPUのビデオメモリ。推奨バッチサイズやメモリ最適化設定に影響します。 |
|
| 318 |
+
| ComfyUI Models Directory | 必要なモデルファイルが含まれるComfyUIの`models`フォルダへのパス。 |
|
| 319 |
+
|
| 320 |
+
### データセット設定
|
| 321 |
+
|
| 322 |
+
| 項目 | 説明 |
|
| 323 |
+
|------|------|
|
| 324 |
+
| Resolution (W/H) | 学習解像度。画像はこのサイズにリサイズ/クロップされます。 |
|
| 325 |
+
| Batch Size | 同時に処理する画像数。大きいほど高速ですが、VRAMをより多く使用します。 |
|
| 326 |
+
|
| 327 |
+
### 前処理
|
| 328 |
+
|
| 329 |
+
| 項目 | 説明 |
|
| 330 |
+
|------|------|
|
| 331 |
+
| VAE Path | 画像をlatent空間にエンコードするVAEモデルファイルへのパス。 |
|
| 332 |
+
| Text Encoder 1 Path | メインのテキストエンコーダーモデルへのパス。 |
|
| 333 |
+
| Text Encoder 2 Path | セカンダリのテキストエンコーダーへのパス(モデルによって必要な場合)。 |
|
| 334 |
+
|
| 335 |
+
### 学習パラメータ
|
| 336 |
+
|
| 337 |
+
| 項目 | 説明 |
|
| 338 |
+
|------|------|
|
| 339 |
+
| Base Model / DiT Path | ベースとなるdiffusionモデル(DiT)へのパス。 |
|
| 340 |
+
| Output Name | 保存されるLoRAファイルのベース名(拡張子なし)。 |
|
| 341 |
+
| LoRA Dim | LoRAのランク/次元。大きいほど詳細をキャプチャできますが、ファイルサイズも増加。一般的な値:4、8、16、32。 |
|
| 342 |
+
| Learning Rate | 学習速度。大きいほど速く学習しますが、オーバーシュートする可能性があります。デフォルト:1e-3(0.001)。 |
|
| 343 |
+
| Epochs | 全学習画像を何回学習するか。 |
|
| 344 |
+
| Save Every N Epochs | チェックポイントを保存する頻度。サンプル画像の生成頻度も制御します。 |
|
| 345 |
+
| Discrete Flow Shift | 学習のダイナミクスに影響するflow matchingパラメータ。モデル固有のデフォルト値が推奨されます。 |
|
| 346 |
+
| Block Swap | CPUにオフロードするtransformerブロック数。VRAMが限られている場合に使用。大きいほどVRAM使用量が減りますが、学習が遅くなります。 |
|
| 347 |
+
| Mixed Precision | 浮動小数点精度。最新のGPUではbf16を推奨。 |
|
| 348 |
+
| Gradient Checkpointing | 一部の値を再計算することでVRAM使用量を削減。やや遅くなりますが、メモリ使用量が減少。 |
|
| 349 |
+
| FP8 Scaled | ベースモデルにFP8精度を使用。品質への影響を最小限に抑えてメモリを削減。 |
|
| 350 |
+
| FP8 LLM | テキストエンコーダー(LLM)にFP8精度を使用。さらにメモリ使用量を削減。 |
|
| 351 |
+
| Additional Arguments | 上級ユーザー向けの追加コマンドライン引数。 |
|
| 352 |
+
|
| 353 |
+
### サンプル画像生成
|
| 354 |
+
|
| 355 |
+
| 項目 | 説明 |
|
| 356 |
+
|------|------|
|
| 357 |
+
| Generate sample images | 学習中にサンプル画像を生成するかどうか。 |
|
| 358 |
+
| Sample Prompt | サンプル画像を生成するために使用するテキストプロンプト。 |
|
| 359 |
+
| Negative Prompt | サンプル画像で避けたい内容。 |
|
| 360 |
+
| Sample Width/Height | サンプル画像の解像度。 |
|
| 361 |
+
| Sample Every N Epochs | サンプルを生成する頻度。 |
|
| 362 |
+
|
| 363 |
+
### 後処理
|
| 364 |
+
|
| 365 |
+
| 項目 | 説明 |
|
| 366 |
+
|------|------|
|
| 367 |
+
| Input LoRA Path | 学習済みLoRAファイル(Musubi Tuner形式)へのパス。 |
|
| 368 |
+
| Output ComfyUI LoRA Path | 変換後のLoRA(ComfyUI形式)の保存先。 |
|
| 369 |
+
|
| 370 |
+
---
|
| 371 |
+
|
| 372 |
+
## トラブルシューティング
|
| 373 |
+
|
| 374 |
+
### 「Python is not recognized」と表示される
|
| 375 |
+
- インストール時に「Add Python to PATH」にチェックを入れたか確認
|
| 376 |
+
- このオプションを有効にしてPythonを再インストールしてみる
|
| 377 |
+
- または、手動でPythonをシステムのPATHに追加
|
| 378 |
+
|
| 379 |
+
### 「uv is not recognized」と表示される
|
| 380 |
+
- uvをインストールした後、コマンドプロンプトを閉じて再度開く
|
| 381 |
+
- インストールコマンドを再度実行してみる
|
| 382 |
+
|
| 383 |
+
### CUDAエラーまたはメモリ不足
|
| 384 |
+
- GUIでより小さいVRAMサイズを選択して、より控えめな設定を取得
|
| 385 |
+
- Block Swapを有効にして一部の計算をCPUにオフロード
|
| 386 |
+
- バッチサイズを1に減らす
|
| 387 |
+
- FP8オプションを有効にしてさらにメモリを節約
|
| 388 |
+
|
| 389 |
+
### 学習スクリプトがすぐエラーで終了する
|
| 390 |
+
- エラーメッセージを確認
|
| 391 |
+
- すべてのパスが正しいか確認
|
| 392 |
+
- 前処理(Cache LatentsとCache Text Encoder)が正常に完了したか確認
|
| 393 |
+
|
| 394 |
+
### 学習が遅い
|
| 395 |
+
- Block Swapが有効な場合、学習は遅くなります(VRAMが限られている場合は想定内)
|
| 396 |
+
- VRAMが不足し、共有VRAMが使用されると、パフォーマンスが大幅に低下します。fp8オプションの使用、Block Swapを大きくする、バッチサイズを減らすなど、メモリ使用量を減らす方法を試してください。
|
| 397 |
+
- GPU(CPUではなく)を使用していることを確認
|
| 398 |
+
- GPUドライバーが最新であることを確認
|
| 399 |
+
|
| 400 |
+
### 「Model not found」エラー
|
| 401 |
+
- ComfyUIモデルディレクトリが正しいか確認
|
| 402 |
+
- 必要なモデルがダウンロードされているか確認
|
| 403 |
+
- モデルのファイル名がGUIが期待するものと一致しているか確認(正確なファイル名はconfig_manager.pyを参照)
|
| 404 |
+
|
| 405 |
+
### GUIが起動しない
|
| 406 |
+
- 正しいディレクトリ(musubi-tunerフォルダ)にいることを確認
|
| 407 |
+
- 正しいuvコマンドを使用していることを確認(CUDAバージョンに注意)
|
| 408 |
+
|
| 409 |
+
---
|
| 410 |
+
|
| 411 |
+
## プロジェクトフォルダの構造
|
| 412 |
+
|
| 413 |
+
GUIを使用した後、プロジェクトフォルダは以下のようになります:
|
| 414 |
+
|
| 415 |
+
```
|
| 416 |
+
my_lora_project/
|
| 417 |
+
training/ # 学習画像とキャプション
|
| 418 |
+
image001.jpg
|
| 419 |
+
image001.txt
|
| 420 |
+
...
|
| 421 |
+
cache/ # 前処理済みデータ(自動作成)
|
| 422 |
+
latent_cache/
|
| 423 |
+
text_encoder_cache/
|
| 424 |
+
models/ # 学習済みLoRAファイル(自動作成)
|
| 425 |
+
my_lora.safetensors
|
| 426 |
+
my_lora_comfy.safetensors
|
| 427 |
+
sample/ # 学習中に生成されたサンプル画像
|
| 428 |
+
logs/ # TensorBoardログ(自動作成)
|
| 429 |
+
dataset_config.toml # データセット設定(自動作成)
|
| 430 |
+
musubi_project.toml # GUIプロジェクト設定(自動作成)
|
| 431 |
+
sample_prompt.txt # サンプルプロンプトファイル(有効時に自動作成)
|
| 432 |
+
```
|
| 433 |
+
|
| 434 |
+
---
|
| 435 |
+
|
| 436 |
+
## 次のステップ
|
| 437 |
+
|
| 438 |
+
LoRAを学習した後:
|
| 439 |
+
|
| 440 |
+
1. ComfyUIで使用する必要がある場合(Z-ImageのLoRAは変換が必要)は、後処理セクションで変換
|
| 441 |
+
2. 変換したLoRAをComfyUIの`models/loras`フォルダにコピー
|
| 442 |
+
3. ComfyUIでLoRA loaderノードを使用して読み込み
|
| 443 |
+
|
| 444 |
+
より高度な学習オプションやコマンドライン��の使用方法については、`docs`フォルダ内のMusubi Tunerのメインドキュメントを参照してください。
|
src/musubi_tuner/gui/gui.md
ADDED
|
@@ -0,0 +1,444 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
[English](./gui.md) | [日本語](./gui.ja.md)
|
| 2 |
+
|
| 3 |
+
# Musubi Tuner GUI - User Guide
|
| 4 |
+
|
| 5 |
+
This guide will help you set up and use the Musubi Tuner GUI for training LoRA models with image generation architectures like Z-Image-Turbo and Qwen-Image.
|
| 6 |
+
|
| 7 |
+
## Table of Contents
|
| 8 |
+
|
| 9 |
+
1. [Prerequisites](#prerequisites)
|
| 10 |
+
2. [Installing Required Software](#installing-required-software)
|
| 11 |
+
3. [Installing Musubi Tuner](#installing-musubi-tuner)
|
| 12 |
+
4. [Launching the GUI](#launching-the-gui)
|
| 13 |
+
5. [Workflow Guide](#workflow-guide)
|
| 14 |
+
6. [Field Descriptions](#field-descriptions)
|
| 15 |
+
7. [Troubleshooting](#troubleshooting)
|
| 16 |
+
|
| 17 |
+
---
|
| 18 |
+
|
| 19 |
+
## Prerequisites
|
| 20 |
+
|
| 21 |
+
Before you begin, make sure you have:
|
| 22 |
+
|
| 23 |
+
- A Windows PC with an NVIDIA GPU (12GB+ VRAM recommended, 64GB+ VRAM + main RAM recommended)
|
| 24 |
+
- Internet connection
|
| 25 |
+
- ComfyUI installed with the required models (VAE, Text Encoder, DiT)
|
| 26 |
+
|
| 27 |
+
---
|
| 28 |
+
|
| 29 |
+
## Installing Required Software
|
| 30 |
+
|
| 31 |
+
### Step 1: Install Python
|
| 32 |
+
|
| 33 |
+
Musubi Tuner requires Python 3.10, 3.11, or 3.12. If you don't have it installed, follow these steps:
|
| 34 |
+
|
| 35 |
+
1. Go to the official Python website: https://www.python.org/downloads/
|
| 36 |
+
2. Download Python 3.10, 3.11, or 3.12 Windows 64-bit installer (we recommend **Python 3.12** for best compatibility)
|
| 37 |
+
3. Run the installer
|
| 38 |
+
4. **IMPORTANT**: Check the box that says **"Add Python to PATH"** before clicking "Install Now"
|
| 39 |
+
5. Complete the installation
|
| 40 |
+
|
| 41 |
+
**Verify installation**: Open Command Prompt (press `Win + R`, type `cmd`, press Enter) and run:
|
| 42 |
+
```
|
| 43 |
+
python --version
|
| 44 |
+
```
|
| 45 |
+
You should see something like `Python 3.12.x`.
|
| 46 |
+
|
| 47 |
+
### Step 2: Install Git
|
| 48 |
+
|
| 49 |
+
Git is needed to download the Musubi Tuner source code. If you don't have it installed, follow these steps:
|
| 50 |
+
|
| 51 |
+
1. Go to the Git website: https://git-scm.com/downloads/win
|
| 52 |
+
2. Download the Windows installer
|
| 53 |
+
3. Run the installer with default settings (keep clicking "Next")
|
| 54 |
+
4. Complete the installation
|
| 55 |
+
|
| 56 |
+
**Verify installation**: In Command Prompt, run:
|
| 57 |
+
```
|
| 58 |
+
git --version
|
| 59 |
+
```
|
| 60 |
+
You should see something like `git version 2.x.x`.
|
| 61 |
+
|
| 62 |
+
### Step 3: Install uv
|
| 63 |
+
|
| 64 |
+
uv is a modern Python package manager that simplifies dependency management.
|
| 65 |
+
|
| 66 |
+
1. Open Command Prompt as Administrator
|
| 67 |
+
2. Run the following command:
|
| 68 |
+
```
|
| 69 |
+
powershell -c "irm https://astral.sh/uv/install.ps1 | iex"
|
| 70 |
+
```
|
| 71 |
+
3. **Close and reopen** Command Prompt (normal, non-administrator) for the changes to take effect
|
| 72 |
+
|
| 73 |
+
**Verify installation**: In a new Command Prompt, run:
|
| 74 |
+
```
|
| 75 |
+
uv --version
|
| 76 |
+
```
|
| 77 |
+
You should see something like `uv 0.x.x`.
|
| 78 |
+
|
| 79 |
+
---
|
| 80 |
+
|
| 81 |
+
## Installing Musubi Tuner
|
| 82 |
+
|
| 83 |
+
### Step 1: Download the Source Code
|
| 84 |
+
|
| 85 |
+
1. Open Command Prompt
|
| 86 |
+
2. Navigate to a folder where you want to install Musubi Tuner. For example:
|
| 87 |
+
```
|
| 88 |
+
cd C:\Users\YourName\Documents
|
| 89 |
+
```
|
| 90 |
+
3. Clone the repository:
|
| 91 |
+
```
|
| 92 |
+
git clone https://github.com/kohya-ss/musubi-tuner.git
|
| 93 |
+
```
|
| 94 |
+
4. Navigate into the folder:
|
| 95 |
+
```
|
| 96 |
+
cd musubi-tuner
|
| 97 |
+
```
|
| 98 |
+
|
| 99 |
+
### Step 2: First-Time Setup (Automatic)
|
| 100 |
+
|
| 101 |
+
The first time you run the GUI, uv will automatically download and install all required dependencies including PyTorch. This may take several minutes.
|
| 102 |
+
|
| 103 |
+
---
|
| 104 |
+
|
| 105 |
+
## Launching the GUI
|
| 106 |
+
|
| 107 |
+
Open Command Prompt, navigate to the musubi-tuner folder, and run one of the following commands based on your CUDA version:
|
| 108 |
+
|
| 109 |
+
### For CUDA 12.4 (Stable version)
|
| 110 |
+
|
| 111 |
+
```
|
| 112 |
+
uv run --extra cu124 --extra gui python src/musubi_tuner/gui/gui.py
|
| 113 |
+
```
|
| 114 |
+
|
| 115 |
+
### For CUDA 12.8 (Newer GPUs)
|
| 116 |
+
|
| 117 |
+
```
|
| 118 |
+
uv run --extra cu128 --extra gui python src/musubi_tuner/gui/gui.py
|
| 119 |
+
```
|
| 120 |
+
|
| 121 |
+
**Note**: If you're unsure which CUDA version to use, try `cu124` first.
|
| 122 |
+
|
| 123 |
+
After launching, the GUI will start and display a URL like:
|
| 124 |
+
```
|
| 125 |
+
Running on local URL: http://127.0.0.1:7860
|
| 126 |
+
```
|
| 127 |
+
|
| 128 |
+
Open this URL in your web browser to access the GUI.
|
| 129 |
+
|
| 130 |
+
**Tip**: You can create a batch file (`.bat`) to launch the GUI more easily:
|
| 131 |
+
|
| 132 |
+
1. Create a file named `launch_gui.bat` in the `musubi-tuner` folder
|
| 133 |
+
2. Add the following content:
|
| 134 |
+
```batch
|
| 135 |
+
@echo off
|
| 136 |
+
cd /d "%~dp0"
|
| 137 |
+
uv run --extra cu124 --extra gui python src/musubi_tuner/gui/gui.py
|
| 138 |
+
pause
|
| 139 |
+
```
|
| 140 |
+
3. Double-click the batch file to launch the GUI
|
| 141 |
+
|
| 142 |
+
---
|
| 143 |
+
|
| 144 |
+
## Workflow Guide
|
| 145 |
+
|
| 146 |
+
The GUI is organized from top to bottom in the order you should complete each step.
|
| 147 |
+
|
| 148 |
+
### Overview of Steps
|
| 149 |
+
|
| 150 |
+
1. **Project Setup** - Create or open a project folder
|
| 151 |
+
2. **Model Selection** - Choose the model architecture and set up model paths
|
| 152 |
+
3. **Dataset Configuration** - Configure training resolution and batch size
|
| 153 |
+
4. **Preprocessing** - Cache latents and text encoder outputs
|
| 154 |
+
5. **Training** - Configure and start the training process
|
| 155 |
+
6. **Post-Processing** - Convert LoRA for ComfyUI (if needed)
|
| 156 |
+
|
| 157 |
+
---
|
| 158 |
+
|
| 159 |
+
### Step 1: Project Setup
|
| 160 |
+
|
| 161 |
+
1. **Project Directory**: Enter the full path to your project folder (e.g., `C:\MyProjects\my_lora_project`)
|
| 162 |
+
2. Click **"Initialize / Load Project"**
|
| 163 |
+
|
| 164 |
+
This will:
|
| 165 |
+
- Create the project folder if it doesn't exist
|
| 166 |
+
- Create a `training` subfolder for your training images
|
| 167 |
+
- Load previous settings if the project was used before
|
| 168 |
+
|
| 169 |
+
**After initialization**, place your training data in the `training` folder:
|
| 170 |
+
- Image files (`.jpg`, `.png`, etc.)
|
| 171 |
+
- Caption files (same filename as the image, but with `.txt` extension)
|
| 172 |
+
|
| 173 |
+
Example:
|
| 174 |
+
```
|
| 175 |
+
my_lora_project/
|
| 176 |
+
training/
|
| 177 |
+
image001.jpg
|
| 178 |
+
image001.txt
|
| 179 |
+
image002.png
|
| 180 |
+
image002.txt
|
| 181 |
+
```
|
| 182 |
+
|
| 183 |
+
---
|
| 184 |
+
|
| 185 |
+
### Step 2: Model Selection
|
| 186 |
+
|
| 187 |
+
1. **Model Architecture**: Select the model you want to train
|
| 188 |
+
- `Z-Image-Turbo` - Faster training; LoRA training may be slightly unstable because the Base model is not released yet
|
| 189 |
+
- `Qwen-Image` - Higher quality, requires more VRAM
|
| 190 |
+
|
| 191 |
+
2. **VRAM Size**: Select your GPU's VRAM size
|
| 192 |
+
- This affects recommended settings like batch size and block swap
|
| 193 |
+
|
| 194 |
+
3. **ComfyUI Models Directory**: Enter the path to your ComfyUI `models` folder
|
| 195 |
+
- Example: `C:\ComfyUI\models`
|
| 196 |
+
- This folder should contain `vae`, `text_encoders`, and `diffusion_models` subfolders
|
| 197 |
+
- Required models can be found in the [Required Model Files](#required-model-files) section below
|
| 198 |
+
|
| 199 |
+
4. Click **"Validate ComfyUI Models Directory"** to verify the folder structure
|
| 200 |
+
|
| 201 |
+
---
|
| 202 |
+
|
| 203 |
+
### Step 3: Dataset Configuration
|
| 204 |
+
|
| 205 |
+
1. Click **"Set Recommended Resolution & Batch Size"** to auto-fill recommended values for your selected model and VRAM
|
| 206 |
+
2. Adjust if needed:
|
| 207 |
+
- **Resolution (Width/Height)**: Training image resolution
|
| 208 |
+
- **Batch Size**: Number of images processed at once (higher = faster but more VRAM)
|
| 209 |
+
3. Click **"Generate Dataset Config"** to create the configuration file
|
| 210 |
+
|
| 211 |
+
The generated configuration will appear in the preview area below the button.
|
| 212 |
+
|
| 213 |
+
---
|
| 214 |
+
|
| 215 |
+
### Step 4: Preprocessing
|
| 216 |
+
|
| 217 |
+
Before training, you need to cache the latents and text encoder outputs. This converts your images and captions into a format the model can use.
|
| 218 |
+
|
| 219 |
+
1. Click **"Set Default Paths"** to auto-fill model paths based on your ComfyUI directory
|
| 220 |
+
2. Verify the paths are correct:
|
| 221 |
+
- **VAE Path**: Path to the VAE model
|
| 222 |
+
- **Text Encoder 1 Path**: Path to the text encoder model
|
| 223 |
+
- **Text Encoder 2 Path**: (Only for some models, may be empty)
|
| 224 |
+
|
| 225 |
+
3. Click **"Cache Latents"** and wait for it to complete
|
| 226 |
+
- This encodes your images into latent space
|
| 227 |
+
- Watch the log output for progress
|
| 228 |
+
|
| 229 |
+
4. Click **"Cache Text Encoder Outputs"** and wait for it to complete
|
| 230 |
+
- This encodes your captions into embeddings
|
| 231 |
+
- This may take a while for the first run as the text encoder is loaded
|
| 232 |
+
|
| 233 |
+
---
|
| 234 |
+
|
| 235 |
+
### Step 5: Training
|
| 236 |
+
|
| 237 |
+
1. Click **"Set Recommended Parameters"** to auto-fill training settings for your model and VRAM
|
| 238 |
+
|
| 239 |
+
2. **Configure Required Settings**:
|
| 240 |
+
- **Base Model / DiT Path**: Path to the diffusion model (auto-filled if you click the recommended button)
|
| 241 |
+
- **Output Name**: Name for your LoRA file (e.g., `my_character_lora`)
|
| 242 |
+
|
| 243 |
+
3. **Basic Parameters** (can use defaults):
|
| 244 |
+
- **LoRA Dim**: LoRA rank/dimension (4-32, higher = more capacity but larger file)
|
| 245 |
+
- **Learning Rate**: How fast the model learns (default: 1e-3 (0.001), can decrease if training is unstable)
|
| 246 |
+
- **Epochs**: Number of times to train on all images. Default is adjusted based on image count; reduce if overfitting occurs.
|
| 247 |
+
- **Save Every N Epochs**: How often to save checkpoints
|
| 248 |
+
|
| 249 |
+
4. **Advanced Parameters** (expand "Advanced Parameters" accordion):
|
| 250 |
+
- **Discrete Flow Shift**: Which denoising step to emphasize (model-specific defaults recommended)
|
| 251 |
+
- **Block Swap**: Offloads model layers to CPU (use if VRAM is limited)
|
| 252 |
+
- **Mixed Precision**: Precision mode (bf16 recommended)
|
| 253 |
+
- **Gradient Checkpointing**: Reduces VRAM usage
|
| 254 |
+
- **FP8 options**: Further memory optimization
|
| 255 |
+
|
| 256 |
+
5. **Sample Image Generation** (optional):
|
| 257 |
+
- Enable **"Generate sample images during training"** to see progress
|
| 258 |
+
- Enter a sample prompt that represents what you're training
|
| 259 |
+
- Set the sample image size and frequency
|
| 260 |
+
|
| 261 |
+
6. Click **"Start Training"** to begin
|
| 262 |
+
- A new command window will open showing training progress
|
| 263 |
+
- Training progress is displayed in the new window
|
| 264 |
+
- The GUI will show a message confirming training has started
|
| 265 |
+
|
| 266 |
+
---
|
| 267 |
+
|
| 268 |
+
### Step 6: Post-Processing (Optional)
|
| 269 |
+
|
| 270 |
+
Z-Image LoRAs need to be converted for use in ComfyUI. Follow these steps:
|
| 271 |
+
|
| 272 |
+
1. Click **"Set Default Paths"** to auto-fill paths based on your output name
|
| 273 |
+
2. Verify the paths:
|
| 274 |
+
- **Input LoRA Path**: Path to your trained LoRA
|
| 275 |
+
- **Output ComfyUI LoRA Path**: Where to save the converted LoRA
|
| 276 |
+
3. Click **"Convert to ComfyUI Format"**
|
| 277 |
+
|
| 278 |
+
---
|
| 279 |
+
|
| 280 |
+
## Required Model Files
|
| 281 |
+
|
| 282 |
+
### Z-Image-Turbo
|
| 283 |
+
|
| 284 |
+
For text encoder and VAE model files, download them from the appropriate directory under `split_files` here: https://huggingface.co/Comfy-Org/z_image_turbo
|
| 285 |
+
|
| 286 |
+
| Type | Model file |
|
| 287 |
+
|------|------------|
|
| 288 |
+
| diffusion-models | Use `z_image_de_turbo_v1_bf16.safetensors` from ostris's [De-Turbo](https://huggingface.co/ostris/Z-Image-De-Turbo) |
|
| 289 |
+
| text-encoders | `qwen_3_4b.safetensors` |
|
| 290 |
+
| VAE | `ae.safetensors` |
|
| 291 |
+
|
| 292 |
+
### Qwen-Image
|
| 293 |
+
|
| 294 |
+
Download the required model files from the appropriate directory under `split_files` here: https://huggingface.co/Comfy-Org/Qwen-Image_ComfyUI
|
| 295 |
+
|
| 296 |
+
| Type | Model file |
|
| 297 |
+
|------|------------|
|
| 298 |
+
| diffusion-models | `qwen_image_bf16.safetensors` |
|
| 299 |
+
| text-encoders | `qwen_2.5_vl_7b.safetensors` |
|
| 300 |
+
| VAE | `qwen_image_vae.safetensors` |
|
| 301 |
+
|
| 302 |
+
---
|
| 303 |
+
|
| 304 |
+
## Field Descriptions
|
| 305 |
+
|
| 306 |
+
### Project Settings
|
| 307 |
+
|
| 308 |
+
| Field | Description |
|
| 309 |
+
|-------|-------------|
|
| 310 |
+
| Project Directory | Root folder for your training project. All data and outputs will be stored here. |
|
| 311 |
+
|
| 312 |
+
### Model Settings
|
| 313 |
+
|
| 314 |
+
| Field | Description |
|
| 315 |
+
|-------|-------------|
|
| 316 |
+
| Model Architecture | The base model to train a LoRA for. Z-Image-Turbo is faster; Qwen-Image produces higher quality. |
|
| 317 |
+
| VRAM Size | Your GPU's video memory. Affects recommended batch size and memory optimization settings. |
|
| 318 |
+
| ComfyUI Models Directory | Path to ComfyUI's `models` folder containing the required model files. |
|
| 319 |
+
|
| 320 |
+
### Dataset Settings
|
| 321 |
+
|
| 322 |
+
| Field | Description |
|
| 323 |
+
|-------|-------------|
|
| 324 |
+
| Resolution (W/H) | Training resolution. Images will be resized/cropped to this size. |
|
| 325 |
+
| Batch Size | Number of images processed simultaneously. Higher values train faster but use more VRAM. |
|
| 326 |
+
|
| 327 |
+
### Preprocessing
|
| 328 |
+
|
| 329 |
+
| Field | Description |
|
| 330 |
+
|-------|-------------|
|
| 331 |
+
| VAE Path | Path to the VAE model file used to encode images into latent space. |
|
| 332 |
+
| Text Encoder 1 Path | Path to the main text encoder model. |
|
| 333 |
+
| Text Encoder 2 Path | Path to secondary text encoder (if required by the model). |
|
| 334 |
+
|
| 335 |
+
### Training Parameters
|
| 336 |
+
|
| 337 |
+
| Field | Description |
|
| 338 |
+
|-------|-------------|
|
| 339 |
+
| Base Model / DiT Path | Path to the base diffusion model (DiT). |
|
| 340 |
+
| Output Name | Base name for saved LoRA files (without extension). |
|
| 341 |
+
| LoRA Dim | LoRA rank/dimension. Higher values capture more detail but create larger files. Common values: 4, 8, 16, 32. |
|
| 342 |
+
| Learning Rate | Speed of training. Higher = faster learning but may overshoot. Default: 1e-3 (0.001). |
|
| 343 |
+
| Epochs | Number of complete passes through all training images. |
|
| 344 |
+
| Save Every N Epochs | Frequency of checkpoint saves. Also controls sample image generation frequency. |
|
| 345 |
+
| Discrete Flow Shift | Flow matching parameter that affects training dynamics. Model-specific defaults are recommended. |
|
| 346 |
+
| Block Swap | Number of transformer blocks to offload to CPU. Use when VRAM is limited. Higher = less VRAM but slower. |
|
| 347 |
+
| Mixed Precision | Floating-point precision. bf16 recommended for modern GPUs. |
|
| 348 |
+
| Gradient Checkpointing | Reduces VRAM usage by recomputing some values. Slightly slower but uses less memory. |
|
| 349 |
+
| FP8 Scaled | Use FP8 precision for the base model. Reduces memory with minimal quality loss. |
|
| 350 |
+
| FP8 LLM | Use FP8 precision for the text encoder (LLM). Further reduces memory usage. |
|
| 351 |
+
| Additional Arguments | Extra command-line arguments for advanced users. |
|
| 352 |
+
|
| 353 |
+
### Sample Image Generation
|
| 354 |
+
|
| 355 |
+
| Field | Description |
|
| 356 |
+
|-------|-------------|
|
| 357 |
+
| Generate sample images | Enable to generate sample images during training. |
|
| 358 |
+
| Sample Prompt | Text prompt used to generate sample images. |
|
| 359 |
+
| Negative Prompt | What to avoid in sample images. |
|
| 360 |
+
| Sample Width/Height | Resolution for sample images. |
|
| 361 |
+
| Sample Every N Epochs | How often to generate samples. |
|
| 362 |
+
|
| 363 |
+
### Post-Processing
|
| 364 |
+
|
| 365 |
+
| Field | Description |
|
| 366 |
+
|-------|-------------|
|
| 367 |
+
| Input LoRA Path | Path to the trained LoRA file (in Musubi Tuner format). |
|
| 368 |
+
| Output ComfyUI LoRA Path | Where to save the converted LoRA (in ComfyUI format). |
|
| 369 |
+
|
| 370 |
+
---
|
| 371 |
+
|
| 372 |
+
## Troubleshooting
|
| 373 |
+
|
| 374 |
+
### "Python is not recognized"
|
| 375 |
+
- Make sure you checked "Add Python to PATH" during installation
|
| 376 |
+
- Try reinstalling Python with this option enabled
|
| 377 |
+
- Or manually add Python to your system PATH
|
| 378 |
+
|
| 379 |
+
### "uv is not recognized"
|
| 380 |
+
- Close and reopen Command Prompt after installing uv
|
| 381 |
+
- Try running the installation command again
|
| 382 |
+
|
| 383 |
+
### CUDA errors or out of memory
|
| 384 |
+
- Select a smaller VRAM size in the GUI to get more conservative settings
|
| 385 |
+
- Enable Block Swap to offload some computation to CPU
|
| 386 |
+
- Reduce batch size to 1
|
| 387 |
+
- Enable FP8 options for additional memory savings
|
| 388 |
+
|
| 389 |
+
### Training script exits with errors immediately
|
| 390 |
+
- Check the error message for clues
|
| 391 |
+
- Check if all paths are correct
|
| 392 |
+
- Make sure preprocessing (Cache Latents and Cache Text Encoder) completed successfully
|
| 393 |
+
|
| 394 |
+
### Slow training
|
| 395 |
+
- If Block Swap is enabled, training will be slower (this is expected when VRAM is limited)
|
| 396 |
+
- If VRAM is insufficient and shared VRAM is being used, performance will degrade significantly. Try reducing memory usage by using FP8 options, increasing Block Swap, or lowering batch size.
|
| 397 |
+
- Make sure you're using a GPU (not CPU)
|
| 398 |
+
- Check that your GPU drivers are up to date
|
| 399 |
+
|
| 400 |
+
### "Model not found" errors
|
| 401 |
+
- Verify that your ComfyUI models directory is correct
|
| 402 |
+
- Make sure you have downloaded the required models
|
| 403 |
+
- Check that the model filenames match what the GUI expects (see config_manager.py for exact filenames)
|
| 404 |
+
|
| 405 |
+
### GUI won't start
|
| 406 |
+
- Make sure you're in the correct directory (musubi-tuner folder)
|
| 407 |
+
- Make sure you're using the correct uv command (check your CUDA version)
|
| 408 |
+
|
| 409 |
+
---
|
| 410 |
+
|
| 411 |
+
## Project Folder Structure
|
| 412 |
+
|
| 413 |
+
After using the GUI, your project folder will look like this:
|
| 414 |
+
|
| 415 |
+
```
|
| 416 |
+
my_lora_project/
|
| 417 |
+
training/ # Your training images and captions
|
| 418 |
+
image001.jpg
|
| 419 |
+
image001.txt
|
| 420 |
+
...
|
| 421 |
+
cache/ # Preprocessed data (auto-created)
|
| 422 |
+
latent_cache/
|
| 423 |
+
text_encoder_cache/
|
| 424 |
+
models/ # Trained LoRA files (auto-created)
|
| 425 |
+
my_lora.safetensors
|
| 426 |
+
my_lora_comfy.safetensors
|
| 427 |
+
sample/ # Sample images generated during training
|
| 428 |
+
logs/ # TensorBoard logs (auto-created)
|
| 429 |
+
dataset_config.toml # Dataset configuration (auto-created)
|
| 430 |
+
musubi_project.toml # GUI Project settings (auto-created)
|
| 431 |
+
sample_prompt.txt # Sample prompt file (auto-created if enabled)
|
| 432 |
+
```
|
| 433 |
+
|
| 434 |
+
---
|
| 435 |
+
|
| 436 |
+
## Next Steps
|
| 437 |
+
|
| 438 |
+
After training your LoRA:
|
| 439 |
+
|
| 440 |
+
1. If you need to use it in ComfyUI (Z-Image LoRAs need conversion), convert it using the Post-Processing section
|
| 441 |
+
2. Copy the converted LoRA to your ComfyUI `models/loras` folder
|
| 442 |
+
3. Load it in ComfyUI using a LoRA loader node
|
| 443 |
+
|
| 444 |
+
For more advanced training options and command-line usage, refer to the main Musubi Tuner documentation in the `docs` folder.
|
src/musubi_tuner/gui/gui.py
ADDED
|
@@ -0,0 +1,1134 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import glob
|
| 2 |
+
import gradio as gr
|
| 3 |
+
import os
|
| 4 |
+
import toml
|
| 5 |
+
from musubi_tuner.gui.config_manager import ConfigManager
|
| 6 |
+
from musubi_tuner.gui.i18n_data import I18N_DATA
|
| 7 |
+
|
| 8 |
+
config_manager = ConfigManager()
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
i18n = gr.I18n(en=I18N_DATA["en"], ja=I18N_DATA["ja"])
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
def construct_ui():
|
| 15 |
+
# I18N doesn't work for gr.Blocks title
|
| 16 |
+
# with gr.Blocks(title=i18n("app_title")) as demo:
|
| 17 |
+
with gr.Blocks(title="Musubi Tuner GUI") as demo:
|
| 18 |
+
gr.Markdown(i18n("app_header"))
|
| 19 |
+
gr.Markdown(i18n("app_desc"))
|
| 20 |
+
|
| 21 |
+
with gr.Accordion(i18n("acc_project"), open=True):
|
| 22 |
+
gr.Markdown(i18n("desc_project"))
|
| 23 |
+
with gr.Row():
|
| 24 |
+
project_dir = gr.Textbox(label=i18n("lbl_proj_dir"), placeholder=i18n("ph_proj_dir"), max_lines=1)
|
| 25 |
+
|
| 26 |
+
# Placeholder for project initialization or loading
|
| 27 |
+
init_btn = gr.Button(i18n("btn_init_project"))
|
| 28 |
+
project_status = gr.Markdown("")
|
| 29 |
+
|
| 30 |
+
with gr.Accordion(i18n("acc_model"), open=False):
|
| 31 |
+
gr.Markdown(i18n("desc_model"))
|
| 32 |
+
with gr.Row():
|
| 33 |
+
model_arch = gr.Dropdown(
|
| 34 |
+
label=i18n("lbl_model_arch"),
|
| 35 |
+
choices=[
|
| 36 |
+
"Qwen-Image",
|
| 37 |
+
"Z-Image-Turbo",
|
| 38 |
+
],
|
| 39 |
+
value="Qwen-Image",
|
| 40 |
+
)
|
| 41 |
+
vram_size = gr.Dropdown(label=i18n("lbl_vram"), choices=["12", "16", "24", "32", ">32"], value="24")
|
| 42 |
+
|
| 43 |
+
with gr.Row():
|
| 44 |
+
comfy_models_dir = gr.Textbox(label=i18n("lbl_comfy_dir"), placeholder=i18n("ph_comfy_dir"), max_lines=1)
|
| 45 |
+
|
| 46 |
+
# Validation for ComfyUI models directory
|
| 47 |
+
models_status = gr.Markdown("")
|
| 48 |
+
validate_models_btn = gr.Button(i18n("btn_validate_models"))
|
| 49 |
+
|
| 50 |
+
# Placeholder for Dataset Settings (Step 3)
|
| 51 |
+
gr.Markdown(i18n("header_dataset"))
|
| 52 |
+
gr.Markdown(i18n("desc_dataset"))
|
| 53 |
+
with gr.Row():
|
| 54 |
+
set_rec_settings_btn = gr.Button(i18n("btn_rec_res_batch"))
|
| 55 |
+
with gr.Row():
|
| 56 |
+
resolution_w = gr.Number(label=i18n("lbl_res_w"), value=1024, precision=0)
|
| 57 |
+
resolution_h = gr.Number(label=i18n("lbl_res_h"), value=1024, precision=0)
|
| 58 |
+
batch_size = gr.Number(label=i18n("lbl_batch_size"), value=1, precision=0)
|
| 59 |
+
|
| 60 |
+
gen_toml_btn = gr.Button(i18n("btn_gen_config"))
|
| 61 |
+
dataset_status = gr.Markdown("")
|
| 62 |
+
toml_preview = gr.Code(label=i18n("lbl_toml_preview"), interactive=False)
|
| 63 |
+
|
| 64 |
+
def load_project_settings(project_path):
|
| 65 |
+
settings = {}
|
| 66 |
+
try:
|
| 67 |
+
settings_path = os.path.join(project_path, "musubi_project.toml")
|
| 68 |
+
if os.path.exists(settings_path):
|
| 69 |
+
with open(settings_path, "r", encoding="utf-8") as f:
|
| 70 |
+
settings = toml.load(f)
|
| 71 |
+
except Exception as e:
|
| 72 |
+
print(f"Error loading project settings: {e}")
|
| 73 |
+
return settings
|
| 74 |
+
|
| 75 |
+
def load_dataset_config_content(project_path):
|
| 76 |
+
content = ""
|
| 77 |
+
try:
|
| 78 |
+
config_path = os.path.join(project_path, "dataset_config.toml")
|
| 79 |
+
if os.path.exists(config_path):
|
| 80 |
+
with open(config_path, "r", encoding="utf-8") as f:
|
| 81 |
+
content = f.read()
|
| 82 |
+
except Exception as e:
|
| 83 |
+
print(f"Error reading dataset config: {e}")
|
| 84 |
+
return content
|
| 85 |
+
|
| 86 |
+
def save_project_settings(project_path, **kwargs):
|
| 87 |
+
try:
|
| 88 |
+
# Load existing settings to support partial updates
|
| 89 |
+
settings = load_project_settings(project_path)
|
| 90 |
+
# Update with new values
|
| 91 |
+
settings.update(kwargs)
|
| 92 |
+
|
| 93 |
+
settings_path = os.path.join(project_path, "musubi_project.toml")
|
| 94 |
+
with open(settings_path, "w", encoding="utf-8") as f:
|
| 95 |
+
toml.dump(settings, f)
|
| 96 |
+
except Exception as e:
|
| 97 |
+
print(f"Error saving project settings: {e}")
|
| 98 |
+
|
| 99 |
+
def init_project(path):
|
| 100 |
+
if not path:
|
| 101 |
+
return (
|
| 102 |
+
"Please enter a project directory path.",
|
| 103 |
+
gr.update(),
|
| 104 |
+
gr.update(),
|
| 105 |
+
gr.update(),
|
| 106 |
+
gr.update(),
|
| 107 |
+
gr.update(),
|
| 108 |
+
gr.update(),
|
| 109 |
+
gr.update(),
|
| 110 |
+
gr.update(),
|
| 111 |
+
gr.update(),
|
| 112 |
+
gr.update(),
|
| 113 |
+
gr.update(),
|
| 114 |
+
gr.update(),
|
| 115 |
+
gr.update(),
|
| 116 |
+
gr.update(),
|
| 117 |
+
gr.update(),
|
| 118 |
+
gr.update(),
|
| 119 |
+
gr.update(),
|
| 120 |
+
gr.update(),
|
| 121 |
+
gr.update(),
|
| 122 |
+
gr.update(),
|
| 123 |
+
gr.update(),
|
| 124 |
+
gr.update(),
|
| 125 |
+
gr.update(),
|
| 126 |
+
gr.update(),
|
| 127 |
+
gr.update(),
|
| 128 |
+
gr.update(),
|
| 129 |
+
gr.update(),
|
| 130 |
+
gr.update(),
|
| 131 |
+
gr.update(),
|
| 132 |
+
gr.update(),
|
| 133 |
+
gr.update(),
|
| 134 |
+
gr.update(),
|
| 135 |
+
)
|
| 136 |
+
try:
|
| 137 |
+
os.makedirs(os.path.join(path, "training"), exist_ok=True)
|
| 138 |
+
|
| 139 |
+
# Load settings if available
|
| 140 |
+
settings = load_project_settings(path)
|
| 141 |
+
new_model = settings.get("model_arch", "Qwen-Image")
|
| 142 |
+
new_vram = settings.get("vram_size", "16")
|
| 143 |
+
new_comfy = settings.get("comfy_models_dir", "")
|
| 144 |
+
new_w = settings.get("resolution_w", 1328)
|
| 145 |
+
new_h = settings.get("resolution_h", 1328)
|
| 146 |
+
new_batch = settings.get("batch_size", 1)
|
| 147 |
+
new_vae = settings.get("vae_path", "")
|
| 148 |
+
new_te1 = settings.get("text_encoder1_path", "")
|
| 149 |
+
new_te2 = settings.get("text_encoder2_path", "")
|
| 150 |
+
|
| 151 |
+
# Training params
|
| 152 |
+
new_dit = settings.get("dit_path", "")
|
| 153 |
+
new_out_nm = settings.get("output_name", "my_lora")
|
| 154 |
+
new_dim = settings.get("network_dim", 4)
|
| 155 |
+
new_lr = settings.get("learning_rate", 1e-4)
|
| 156 |
+
new_epochs = settings.get("num_epochs", 16)
|
| 157 |
+
new_save_n = settings.get("save_every_n_epochs", 1)
|
| 158 |
+
new_flow = settings.get("discrete_flow_shift", 2.0)
|
| 159 |
+
new_swap = settings.get("block_swap", 0)
|
| 160 |
+
new_use_pinned_memory_for_block_swap = settings.get("use_pinned_memory_for_block_swap", False)
|
| 161 |
+
new_prec = settings.get("mixed_precision", "bf16")
|
| 162 |
+
new_grad_cp = settings.get("gradient_checkpointing", True)
|
| 163 |
+
new_fp8_s = settings.get("fp8_scaled", True)
|
| 164 |
+
new_fp8_l = settings.get("fp8_llm", True)
|
| 165 |
+
new_add_args = settings.get("additional_args", "")
|
| 166 |
+
|
| 167 |
+
# Sample image params
|
| 168 |
+
new_sample_enable = settings.get("sample_images", False)
|
| 169 |
+
new_sample_every_n = settings.get("sample_every_n_epochs", 1)
|
| 170 |
+
new_sample_prompt = settings.get("sample_prompt", "")
|
| 171 |
+
new_sample_negative = settings.get("sample_negative_prompt", "")
|
| 172 |
+
new_sample_w = settings.get("sample_w", new_w)
|
| 173 |
+
new_sample_h = settings.get("sample_h", new_h)
|
| 174 |
+
|
| 175 |
+
# Post-processing params
|
| 176 |
+
new_in_lora = settings.get("input_lora_path", "")
|
| 177 |
+
new_out_comfy = settings.get("output_comfy_lora_path", "")
|
| 178 |
+
|
| 179 |
+
# Load dataset config content
|
| 180 |
+
preview_content = load_dataset_config_content(path)
|
| 181 |
+
|
| 182 |
+
msg = f"Project initialized at {path}. "
|
| 183 |
+
if settings:
|
| 184 |
+
msg += " Settings loaded."
|
| 185 |
+
msg += " 'training' folder ready. Configure the dataset in the 'training' folder. Images and caption files (same name as image, extension is '.txt') should be placed in the 'training' folder."
|
| 186 |
+
msg += "\n\nプロジェクトが初期化されました。"
|
| 187 |
+
if settings:
|
| 188 |
+
msg += "設定が読み込まれました。"
|
| 189 |
+
msg += "'training' フォルダが準備されました。画像とキャプションファイル(画像と同じファイル名で拡張子は '.txt')を配置してください。"
|
| 190 |
+
|
| 191 |
+
return (
|
| 192 |
+
msg,
|
| 193 |
+
new_model,
|
| 194 |
+
new_vram,
|
| 195 |
+
new_comfy,
|
| 196 |
+
new_w,
|
| 197 |
+
new_h,
|
| 198 |
+
new_batch,
|
| 199 |
+
preview_content,
|
| 200 |
+
new_vae,
|
| 201 |
+
new_te1,
|
| 202 |
+
new_te2,
|
| 203 |
+
new_dit,
|
| 204 |
+
new_out_nm,
|
| 205 |
+
new_dim,
|
| 206 |
+
new_lr,
|
| 207 |
+
new_epochs,
|
| 208 |
+
new_save_n,
|
| 209 |
+
new_flow,
|
| 210 |
+
new_swap,
|
| 211 |
+
new_use_pinned_memory_for_block_swap,
|
| 212 |
+
new_prec,
|
| 213 |
+
new_grad_cp,
|
| 214 |
+
new_fp8_s,
|
| 215 |
+
new_fp8_l,
|
| 216 |
+
new_add_args,
|
| 217 |
+
new_sample_enable,
|
| 218 |
+
new_sample_every_n,
|
| 219 |
+
new_sample_prompt,
|
| 220 |
+
new_sample_negative,
|
| 221 |
+
new_sample_w,
|
| 222 |
+
new_sample_h,
|
| 223 |
+
new_in_lora,
|
| 224 |
+
new_out_comfy,
|
| 225 |
+
)
|
| 226 |
+
except Exception as e:
|
| 227 |
+
return (
|
| 228 |
+
f"Error initializing project: {str(e)}",
|
| 229 |
+
gr.update(),
|
| 230 |
+
gr.update(),
|
| 231 |
+
gr.update(),
|
| 232 |
+
gr.update(),
|
| 233 |
+
gr.update(),
|
| 234 |
+
gr.update(),
|
| 235 |
+
gr.update(),
|
| 236 |
+
gr.update(),
|
| 237 |
+
gr.update(),
|
| 238 |
+
gr.update(),
|
| 239 |
+
gr.update(),
|
| 240 |
+
gr.update(),
|
| 241 |
+
gr.update(),
|
| 242 |
+
gr.update(),
|
| 243 |
+
gr.update(),
|
| 244 |
+
gr.update(),
|
| 245 |
+
gr.update(),
|
| 246 |
+
gr.update(),
|
| 247 |
+
gr.update(),
|
| 248 |
+
gr.update(),
|
| 249 |
+
gr.update(),
|
| 250 |
+
gr.update(),
|
| 251 |
+
gr.update(),
|
| 252 |
+
gr.update(),
|
| 253 |
+
gr.update(),
|
| 254 |
+
gr.update(),
|
| 255 |
+
gr.update(),
|
| 256 |
+
gr.update(),
|
| 257 |
+
gr.update(),
|
| 258 |
+
gr.update(),
|
| 259 |
+
gr.update(),
|
| 260 |
+
gr.update(),
|
| 261 |
+
)
|
| 262 |
+
|
| 263 |
+
def generate_config(project_path, w, h, batch, model_val, vram_val, comfy_val, vae_val, te1_val, te2_val):
|
| 264 |
+
if not project_path:
|
| 265 |
+
return "Error: Project directory not specified.\nエラー: プロジェクトディレクトリが指定されていません。", ""
|
| 266 |
+
|
| 267 |
+
# Save project settings first
|
| 268 |
+
save_project_settings(
|
| 269 |
+
project_path,
|
| 270 |
+
model_arch=model_val,
|
| 271 |
+
vram_size=vram_val,
|
| 272 |
+
comfy_models_dir=comfy_val,
|
| 273 |
+
resolution_w=w,
|
| 274 |
+
resolution_h=h,
|
| 275 |
+
batch_size=batch,
|
| 276 |
+
vae_path=vae_val,
|
| 277 |
+
text_encoder1_path=te1_val,
|
| 278 |
+
text_encoder2_path=te2_val,
|
| 279 |
+
)
|
| 280 |
+
|
| 281 |
+
# Normalize paths
|
| 282 |
+
project_path = os.path.abspath(project_path)
|
| 283 |
+
image_dir = os.path.join(project_path, "training").replace("\\", "/")
|
| 284 |
+
cache_dir = os.path.join(project_path, "cache").replace("\\", "/")
|
| 285 |
+
|
| 286 |
+
toml_content = f"""# Auto-generated by Musubi Tuner GUI
|
| 287 |
+
|
| 288 |
+
[general]
|
| 289 |
+
resolution = [{int(w)}, {int(h)}]
|
| 290 |
+
caption_extension = ".txt"
|
| 291 |
+
batch_size = {int(batch)}
|
| 292 |
+
enable_bucket = true
|
| 293 |
+
bucket_no_upscale = false
|
| 294 |
+
|
| 295 |
+
[[datasets]]
|
| 296 |
+
image_directory = "{image_dir}"
|
| 297 |
+
cache_directory = "{cache_dir}"
|
| 298 |
+
num_repeats = 1
|
| 299 |
+
"""
|
| 300 |
+
try:
|
| 301 |
+
config_path = os.path.join(project_path, "dataset_config.toml")
|
| 302 |
+
with open(config_path, "w", encoding="utf-8") as f:
|
| 303 |
+
f.write(toml_content)
|
| 304 |
+
return f"Successfully generated config at / 設定ファイルが作成されました: {config_path}", toml_content
|
| 305 |
+
except Exception as e:
|
| 306 |
+
return f"Error generating config / 設定ファイルの生成に失敗しました: {str(e)}", ""
|
| 307 |
+
|
| 308 |
+
with gr.Accordion(i18n("acc_preprocessing"), open=False):
|
| 309 |
+
gr.Markdown(i18n("desc_preprocessing"))
|
| 310 |
+
with gr.Row():
|
| 311 |
+
set_preprocessing_defaults_btn = gr.Button(i18n("btn_set_paths"))
|
| 312 |
+
with gr.Row():
|
| 313 |
+
vae_path = gr.Textbox(label=i18n("lbl_vae_path"), placeholder=i18n("ph_vae_path"), max_lines=1)
|
| 314 |
+
text_encoder1_path = gr.Textbox(label=i18n("lbl_te1_path"), placeholder=i18n("ph_te1_path"), max_lines=1)
|
| 315 |
+
text_encoder2_path = gr.Textbox(label=i18n("lbl_te2_path"), placeholder=i18n("ph_te2_path"), max_lines=1)
|
| 316 |
+
|
| 317 |
+
with gr.Row():
|
| 318 |
+
cache_latents_btn = gr.Button(i18n("btn_cache_latents"))
|
| 319 |
+
cache_text_btn = gr.Button(i18n("btn_cache_text"))
|
| 320 |
+
|
| 321 |
+
# Simple output area for caching logs
|
| 322 |
+
caching_output = gr.Textbox(label=i18n("lbl_cache_log"), lines=10, interactive=False)
|
| 323 |
+
|
| 324 |
+
def validate_models_dir(path):
|
| 325 |
+
if not path:
|
| 326 |
+
return "Please enter a ComfyUI models directory. / ComfyUIのmodelsディレクトリを入力してください。"
|
| 327 |
+
|
| 328 |
+
required_subdirs = ["diffusion_models", "vae", "text_encoders"]
|
| 329 |
+
missing = []
|
| 330 |
+
for d in required_subdirs:
|
| 331 |
+
if not os.path.exists(os.path.join(path, d)):
|
| 332 |
+
missing.append(d)
|
| 333 |
+
|
| 334 |
+
if missing:
|
| 335 |
+
return f"Error: Missing subdirectories in models folder / modelsフォルダに以下のサブディレクトリ��見つかりません: {', '.join(missing)}"
|
| 336 |
+
|
| 337 |
+
return "Valid ComfyUI models directory structure found / 有効なComfyUI modelsディレクトリ構造が見つかりました。"
|
| 338 |
+
|
| 339 |
+
def set_recommended_settings(project_path, model_arch, vram_val):
|
| 340 |
+
w, h = config_manager.get_resolution(model_arch)
|
| 341 |
+
recommended_batch_size = config_manager.get_batch_size(model_arch, vram_val)
|
| 342 |
+
|
| 343 |
+
if project_path:
|
| 344 |
+
save_project_settings(project_path, resolution_w=w, resolution_h=h, batch_size=recommended_batch_size)
|
| 345 |
+
return w, h, recommended_batch_size
|
| 346 |
+
|
| 347 |
+
def set_preprocessing_defaults(project_path, comfy_models_dir, model_arch):
|
| 348 |
+
if not comfy_models_dir:
|
| 349 |
+
return gr.update(), gr.update(), gr.update()
|
| 350 |
+
|
| 351 |
+
vae_default, te1_default, te2_default = config_manager.get_preprocessing_paths(model_arch, comfy_models_dir)
|
| 352 |
+
if not te2_default:
|
| 353 |
+
te2_default = "" # Ensure empty string for text input
|
| 354 |
+
|
| 355 |
+
if project_path:
|
| 356 |
+
save_project_settings(
|
| 357 |
+
project_path, vae_path=vae_default, text_encoder1_path=te1_default, text_encoder2_path=te2_default
|
| 358 |
+
)
|
| 359 |
+
|
| 360 |
+
return vae_default, te1_default, te2_default
|
| 361 |
+
|
| 362 |
+
def set_training_defaults(project_path, comfy_models_dir, model_arch, vram_val):
|
| 363 |
+
# Get number of images from project_path to adjust num_epochs later
|
| 364 |
+
cache_dir = os.path.join(project_path, "cache")
|
| 365 |
+
pattern = "*" + ("_qi" if model_arch == "Qwen-Image" else "_zi") + ".safetensors"
|
| 366 |
+
num_images = len(glob.glob(os.path.join(cache_dir, pattern))) if os.path.exists(cache_dir) else 0
|
| 367 |
+
|
| 368 |
+
# Get training defaults from config manager
|
| 369 |
+
defaults = config_manager.get_training_defaults(model_arch, vram_val, comfy_models_dir)
|
| 370 |
+
|
| 371 |
+
# Adjust num_epochs based on number of images (simple heuristic)
|
| 372 |
+
default_num_steps = defaults.get("default_num_steps", 1000)
|
| 373 |
+
if num_images > 0:
|
| 374 |
+
adjusted_epochs = max(1, int((default_num_steps / num_images)))
|
| 375 |
+
else:
|
| 376 |
+
adjusted_epochs = 16 # Fallback default
|
| 377 |
+
sample_every_n_epochs = (adjusted_epochs // 4) if adjusted_epochs >= 4 else 1
|
| 378 |
+
|
| 379 |
+
dit_default = defaults.get("dit_path", "")
|
| 380 |
+
dim = defaults.get("network_dim", 4)
|
| 381 |
+
lr = defaults.get("learning_rate", 1e-4)
|
| 382 |
+
epochs = adjusted_epochs
|
| 383 |
+
save_n = defaults.get("save_every_n_epochs", 1)
|
| 384 |
+
flow = defaults.get("discrete_flow_shift", 2.0)
|
| 385 |
+
swap = defaults.get("block_swap", 0)
|
| 386 |
+
use_pinned_memory_for_block_swap = defaults.get("use_pinned_memory_for_block_swap", False)
|
| 387 |
+
prec = defaults.get("mixed_precision", "bf16")
|
| 388 |
+
grad_cp = defaults.get("gradient_checkpointing", True)
|
| 389 |
+
fp8_s = defaults.get("fp8_scaled", True)
|
| 390 |
+
fp8_l = defaults.get("fp8_llm", True)
|
| 391 |
+
|
| 392 |
+
sample_w_default, sample_h_default = config_manager.get_resolution(model_arch)
|
| 393 |
+
|
| 394 |
+
if project_path:
|
| 395 |
+
save_project_settings(
|
| 396 |
+
project_path,
|
| 397 |
+
dit_path=dit_default,
|
| 398 |
+
network_dim=dim,
|
| 399 |
+
learning_rate=lr,
|
| 400 |
+
num_epochs=epochs,
|
| 401 |
+
save_every_n_epochs=save_n,
|
| 402 |
+
discrete_flow_shift=flow,
|
| 403 |
+
block_swap=swap,
|
| 404 |
+
use_pinned_memory_for_block_swap=use_pinned_memory_for_block_swap,
|
| 405 |
+
mixed_precision=prec,
|
| 406 |
+
gradient_checkpointing=grad_cp,
|
| 407 |
+
fp8_scaled=fp8_s,
|
| 408 |
+
fp8_llm=fp8_l,
|
| 409 |
+
vram_size=vram_val, # Ensure VRAM size is saved
|
| 410 |
+
sample_every_n_epochs=sample_every_n_epochs,
|
| 411 |
+
sample_w=sample_w_default,
|
| 412 |
+
sample_h=sample_h_default,
|
| 413 |
+
)
|
| 414 |
+
|
| 415 |
+
return (
|
| 416 |
+
dit_default,
|
| 417 |
+
dim,
|
| 418 |
+
lr,
|
| 419 |
+
epochs,
|
| 420 |
+
save_n,
|
| 421 |
+
flow,
|
| 422 |
+
swap,
|
| 423 |
+
use_pinned_memory_for_block_swap,
|
| 424 |
+
prec,
|
| 425 |
+
grad_cp,
|
| 426 |
+
fp8_s,
|
| 427 |
+
fp8_l,
|
| 428 |
+
sample_every_n_epochs,
|
| 429 |
+
sample_w_default,
|
| 430 |
+
sample_h_default,
|
| 431 |
+
)
|
| 432 |
+
|
| 433 |
+
def set_post_processing_defaults(project_path, output_nm):
|
| 434 |
+
if not project_path or not output_nm:
|
| 435 |
+
return gr.update(), gr.update()
|
| 436 |
+
|
| 437 |
+
models_dir = os.path.join(project_path, "models")
|
| 438 |
+
in_lora = os.path.join(models_dir, f"{output_nm}.safetensors")
|
| 439 |
+
out_lora = os.path.join(models_dir, f"{output_nm}_comfy.safetensors")
|
| 440 |
+
|
| 441 |
+
save_project_settings(project_path, input_lora_path=in_lora, output_comfy_lora_path=out_lora)
|
| 442 |
+
|
| 443 |
+
return in_lora, out_lora
|
| 444 |
+
|
| 445 |
+
import subprocess
|
| 446 |
+
import sys
|
| 447 |
+
|
| 448 |
+
def run_command(command):
|
| 449 |
+
try:
|
| 450 |
+
process = subprocess.Popen(
|
| 451 |
+
command,
|
| 452 |
+
stdout=subprocess.PIPE,
|
| 453 |
+
stderr=subprocess.STDOUT,
|
| 454 |
+
shell=True,
|
| 455 |
+
text=True,
|
| 456 |
+
encoding="utf-8",
|
| 457 |
+
creationflags=subprocess.CREATE_NO_WINDOW if os.name == "nt" else 0,
|
| 458 |
+
)
|
| 459 |
+
|
| 460 |
+
output_log = command + "\n\n"
|
| 461 |
+
for line in process.stdout:
|
| 462 |
+
output_log += line
|
| 463 |
+
yield output_log
|
| 464 |
+
|
| 465 |
+
process.wait()
|
| 466 |
+
if process.returncode != 0:
|
| 467 |
+
output_log += (
|
| 468 |
+
f"\nError: Process exited with code / プロセスが次のコードでエラー終了しました: {process.returncode}"
|
| 469 |
+
)
|
| 470 |
+
yield output_log
|
| 471 |
+
else:
|
| 472 |
+
output_log += "\nProcess completed successfully / プロセスが正常に完了しました"
|
| 473 |
+
yield output_log
|
| 474 |
+
|
| 475 |
+
except Exception as e:
|
| 476 |
+
yield f"Error executing command / コマンドの実行中にエラーが発生しました: {str(e)}"
|
| 477 |
+
|
| 478 |
+
def cache_latents(project_path, vae_path_val, te1, te2, model, comfy, w, h, batch, vram_val):
|
| 479 |
+
if not project_path:
|
| 480 |
+
yield "Error: Project directory not set. / プロジェクトディレクトリが設定されていません。"
|
| 481 |
+
return
|
| 482 |
+
|
| 483 |
+
# Save settings first
|
| 484 |
+
save_project_settings(
|
| 485 |
+
project_path,
|
| 486 |
+
model_arch=model,
|
| 487 |
+
comfy_models_dir=comfy,
|
| 488 |
+
resolution_w=w,
|
| 489 |
+
resolution_h=h,
|
| 490 |
+
batch_size=batch,
|
| 491 |
+
vae_path=vae_path_val,
|
| 492 |
+
text_encoder1_path=te1,
|
| 493 |
+
text_encoder2_path=te2,
|
| 494 |
+
)
|
| 495 |
+
|
| 496 |
+
if not vae_path_val:
|
| 497 |
+
yield "Error: VAE path not set. / VAEのパスが設定されていません。"
|
| 498 |
+
return
|
| 499 |
+
|
| 500 |
+
if not os.path.exists(vae_path_val):
|
| 501 |
+
yield f"Error: VAE model not found at / 指定されたパスにVAEモデルが見つかりません: {vae_path_val}"
|
| 502 |
+
return
|
| 503 |
+
|
| 504 |
+
config_path = os.path.join(project_path, "dataset_config.toml")
|
| 505 |
+
if not os.path.exists(config_path):
|
| 506 |
+
yield f"Error: dataset_config.toml not found in {project_path}. Please generate it first. / dataset_config.tomlが {project_path} に見つかりません。先に設定ファイルを生成してください。"
|
| 507 |
+
return
|
| 508 |
+
|
| 509 |
+
script_name = "zimage_cache_latents.py"
|
| 510 |
+
if model == "Qwen-Image":
|
| 511 |
+
script_name = "qwen_image_cache_latents.py"
|
| 512 |
+
|
| 513 |
+
script_path = os.path.join("src", "musubi_tuner", script_name)
|
| 514 |
+
|
| 515 |
+
cmd = [sys.executable, script_path, "--dataset_config", config_path, "--vae", vae_path_val]
|
| 516 |
+
|
| 517 |
+
# Placeholder for argument modification
|
| 518 |
+
if model == "Z-Image-Turbo":
|
| 519 |
+
pass
|
| 520 |
+
elif model == "Qwen-Image":
|
| 521 |
+
pass
|
| 522 |
+
|
| 523 |
+
command_str = " ".join(cmd)
|
| 524 |
+
yield f"Starting Latent Caching. Please wait for the first log to appear. / Latentのキャッシュを開始します。最初のログが表示されるまでにしばらくかかります。\nCommand: {command_str}\n\n"
|
| 525 |
+
|
| 526 |
+
yield from run_command(command_str)
|
| 527 |
+
|
| 528 |
+
def cache_text_encoder(project_path, te1_path_val, te2_path_val, vae, model, comfy, w, h, batch, vram_val):
|
| 529 |
+
if not project_path:
|
| 530 |
+
yield "Error: Project directory not set. / プロジェクトディレクトリが設定されていません。"
|
| 531 |
+
return
|
| 532 |
+
|
| 533 |
+
# Save settings first
|
| 534 |
+
save_project_settings(
|
| 535 |
+
project_path,
|
| 536 |
+
model_arch=model,
|
| 537 |
+
comfy_models_dir=comfy,
|
| 538 |
+
resolution_w=w,
|
| 539 |
+
resolution_h=h,
|
| 540 |
+
batch_size=batch,
|
| 541 |
+
vae_path=vae,
|
| 542 |
+
text_encoder1_path=te1_path_val,
|
| 543 |
+
text_encoder2_path=te2_path_val,
|
| 544 |
+
)
|
| 545 |
+
|
| 546 |
+
if not te1_path_val:
|
| 547 |
+
yield "Error: Text Encoder 1 path not set. / Text Encoder 1のパスが設定されていません。"
|
| 548 |
+
return
|
| 549 |
+
|
| 550 |
+
if not os.path.exists(te1_path_val):
|
| 551 |
+
yield f"Error: Text Encoder 1 model not found at / 指定されたパスにText Encoder 1モデルが見つかりません: {te1_path_val}"
|
| 552 |
+
return
|
| 553 |
+
|
| 554 |
+
# Z-Image only uses te1 for now, but keeping te2 in signature if needed later or for other models
|
| 555 |
+
|
| 556 |
+
config_path = os.path.join(project_path, "dataset_config.toml")
|
| 557 |
+
if not os.path.exists(config_path):
|
| 558 |
+
yield f"Error: dataset_config.toml not found in {project_path}. Please generate it first. / dataset_config.tomlが {project_path} に見つかりません。先に設定ファイルを生成してください。"
|
| 559 |
+
return
|
| 560 |
+
|
| 561 |
+
script_name = "zimage_cache_text_encoder_outputs.py"
|
| 562 |
+
if model == "Qwen-Image":
|
| 563 |
+
script_name = "qwen_image_cache_text_encoder_outputs.py"
|
| 564 |
+
|
| 565 |
+
script_path = os.path.join("src", "musubi_tuner", script_name)
|
| 566 |
+
|
| 567 |
+
cmd = [
|
| 568 |
+
sys.executable,
|
| 569 |
+
script_path,
|
| 570 |
+
"--dataset_config",
|
| 571 |
+
config_path,
|
| 572 |
+
"--text_encoder",
|
| 573 |
+
te1_path_val,
|
| 574 |
+
"--batch_size",
|
| 575 |
+
"1", # Conservative default
|
| 576 |
+
]
|
| 577 |
+
|
| 578 |
+
# Model-specific argument modification
|
| 579 |
+
if model == "Z-Image-Turbo":
|
| 580 |
+
pass
|
| 581 |
+
elif model == "Qwen-Image":
|
| 582 |
+
# Add --fp8_vl for low VRAM (16GB or less)
|
| 583 |
+
if vram_val in ["12", "16"]:
|
| 584 |
+
cmd.append("--fp8_vl")
|
| 585 |
+
|
| 586 |
+
command_str = " ".join(cmd)
|
| 587 |
+
yield f"Starting Text Encoder Caching. Please wait for the first log to appear. / Text Encoderのキャッシュを開始します。最初のログが表示されるまでにしばらくかかります。\nCommand: {command_str}\n\n"
|
| 588 |
+
|
| 589 |
+
yield from run_command(command_str)
|
| 590 |
+
|
| 591 |
+
with gr.Accordion(i18n("acc_training"), open=False):
|
| 592 |
+
gr.Markdown(i18n("desc_training_basic"))
|
| 593 |
+
training_model_info = gr.Markdown(i18n("desc_training_zimage"))
|
| 594 |
+
|
| 595 |
+
with gr.Row():
|
| 596 |
+
set_training_defaults_btn = gr.Button(i18n("btn_rec_params"))
|
| 597 |
+
with gr.Row():
|
| 598 |
+
dit_path = gr.Textbox(label=i18n("lbl_dit_path"), placeholder=i18n("ph_dit_path"), max_lines=1)
|
| 599 |
+
|
| 600 |
+
with gr.Row():
|
| 601 |
+
output_name = gr.Textbox(label=i18n("lbl_output_name"), value="my_lora", max_lines=1)
|
| 602 |
+
|
| 603 |
+
with gr.Group():
|
| 604 |
+
gr.Markdown(i18n("header_basic_params"))
|
| 605 |
+
with gr.Row():
|
| 606 |
+
network_dim = gr.Number(label=i18n("lbl_dim"), value=4)
|
| 607 |
+
learning_rate = gr.Number(label=i18n("lbl_lr"), value=1e-4)
|
| 608 |
+
num_epochs = gr.Number(label=i18n("lbl_epochs"), value=16)
|
| 609 |
+
save_every_n_epochs = gr.Number(label=i18n("lbl_save_every"), value=1)
|
| 610 |
+
|
| 611 |
+
with gr.Group():
|
| 612 |
+
with gr.Row():
|
| 613 |
+
discrete_flow_shift = gr.Number(label=i18n("lbl_flow_shift"), value=2.0)
|
| 614 |
+
block_swap = gr.Slider(label=i18n("lbl_block_swap"), minimum=0, maximum=60, step=1, value=0)
|
| 615 |
+
use_pinned_memory_for_block_swap = gr.Checkbox(
|
| 616 |
+
label=i18n("lbl_use_pinned_memory_for_block_swap"),
|
| 617 |
+
value=False,
|
| 618 |
+
)
|
| 619 |
+
|
| 620 |
+
with gr.Accordion(i18n("accordion_advanced"), open=False):
|
| 621 |
+
gr.Markdown(i18n("desc_training_detailed"))
|
| 622 |
+
|
| 623 |
+
with gr.Row():
|
| 624 |
+
mixed_precision = gr.Dropdown(label=i18n("lbl_mixed_precision"), choices=["bf16", "fp16", "no"], value="bf16")
|
| 625 |
+
gradient_checkpointing = gr.Checkbox(label=i18n("lbl_grad_cp"), value=True)
|
| 626 |
+
|
| 627 |
+
with gr.Row():
|
| 628 |
+
fp8_scaled = gr.Checkbox(label=i18n("lbl_fp8_scaled"), value=True)
|
| 629 |
+
fp8_llm = gr.Checkbox(label=i18n("lbl_fp8_llm"), value=True)
|
| 630 |
+
|
| 631 |
+
with gr.Group():
|
| 632 |
+
gr.Markdown(i18n("header_sample_images"))
|
| 633 |
+
sample_images = gr.Checkbox(label=i18n("lbl_enable_sample"), value=False)
|
| 634 |
+
with gr.Row():
|
| 635 |
+
sample_prompt = gr.Textbox(label=i18n("lbl_sample_prompt"), placeholder=i18n("ph_sample_prompt"))
|
| 636 |
+
with gr.Row():
|
| 637 |
+
sample_negative_prompt = gr.Textbox(
|
| 638 |
+
label=i18n("lbl_sample_negative_prompt"),
|
| 639 |
+
placeholder=i18n("ph_sample_negative_prompt"),
|
| 640 |
+
)
|
| 641 |
+
with gr.Row():
|
| 642 |
+
sample_w = gr.Number(label=i18n("lbl_sample_w"), value=1024, precision=0)
|
| 643 |
+
sample_h = gr.Number(label=i18n("lbl_sample_h"), value=1024, precision=0)
|
| 644 |
+
sample_every_n = gr.Number(label=i18n("lbl_sample_every_n"), value=1, precision=0)
|
| 645 |
+
|
| 646 |
+
with gr.Accordion(i18n("accordion_additional"), open=False):
|
| 647 |
+
gr.Markdown(i18n("desc_additional_args"))
|
| 648 |
+
additional_args = gr.Textbox(label=i18n("lbl_additional_args"), placeholder=i18n("ph_additional_args"))
|
| 649 |
+
|
| 650 |
+
training_status = gr.Markdown("")
|
| 651 |
+
start_training_btn = gr.Button(i18n("btn_start_training"), variant="primary")
|
| 652 |
+
|
| 653 |
+
with gr.Accordion(i18n("acc_post_processing"), open=False):
|
| 654 |
+
gr.Markdown(i18n("desc_post_proc"))
|
| 655 |
+
with gr.Row():
|
| 656 |
+
set_post_proc_defaults_btn = gr.Button(i18n("btn_set_paths"))
|
| 657 |
+
with gr.Row():
|
| 658 |
+
input_lora = gr.Textbox(label=i18n("lbl_input_lora"), placeholder=i18n("ph_input_lora"), max_lines=1)
|
| 659 |
+
output_comfy_lora = gr.Textbox(label=i18n("lbl_output_comfy"), placeholder=i18n("ph_output_comfy"), max_lines=1)
|
| 660 |
+
|
| 661 |
+
convert_btn = gr.Button(i18n("btn_convert"))
|
| 662 |
+
conversion_log = gr.Textbox(label=i18n("lbl_conversion_log"), lines=5, interactive=False)
|
| 663 |
+
|
| 664 |
+
def convert_lora_to_comfy(project_path, input_path, output_path, model, comfy, w, h, batch, vae, te1, te2):
|
| 665 |
+
if not project_path:
|
| 666 |
+
yield "Error: Project directory not set. / プロジェクトディレクトリが設定されていません。"
|
| 667 |
+
return
|
| 668 |
+
|
| 669 |
+
# Save settings
|
| 670 |
+
save_project_settings(
|
| 671 |
+
project_path,
|
| 672 |
+
model_arch=model,
|
| 673 |
+
comfy_models_dir=comfy,
|
| 674 |
+
resolution_w=w,
|
| 675 |
+
resolution_h=h,
|
| 676 |
+
batch_size=batch,
|
| 677 |
+
vae_path=vae,
|
| 678 |
+
text_encoder1_path=te1,
|
| 679 |
+
text_encoder2_path=te2,
|
| 680 |
+
input_lora_path=input_path,
|
| 681 |
+
output_comfy_lora_path=output_path,
|
| 682 |
+
)
|
| 683 |
+
|
| 684 |
+
if not input_path or not output_path:
|
| 685 |
+
yield "Error: Input and Output paths must be specified. / 入力・出力パスを指定してください。"
|
| 686 |
+
return
|
| 687 |
+
|
| 688 |
+
if not os.path.exists(input_path):
|
| 689 |
+
yield f"Error: Input file not found at {input_path} / 入力ファイルが見つかりません: {input_path}"
|
| 690 |
+
return
|
| 691 |
+
|
| 692 |
+
# Script path
|
| 693 |
+
script_path = os.path.join("src", "musubi_tuner", "networks", "convert_z_image_lora_to_comfy.py")
|
| 694 |
+
if not os.path.exists(script_path):
|
| 695 |
+
yield f"Error: Conversion script not found at {script_path} / 変換スクリプトが見つかりません: {script_path}"
|
| 696 |
+
return
|
| 697 |
+
|
| 698 |
+
cmd = [sys.executable, script_path, input_path, output_path]
|
| 699 |
+
|
| 700 |
+
command_str = " ".join(cmd)
|
| 701 |
+
yield f"Starting Conversion. / 変換を開始します。\nCommand: {command_str}\n\n"
|
| 702 |
+
|
| 703 |
+
yield from run_command(command_str)
|
| 704 |
+
|
| 705 |
+
def start_training(
|
| 706 |
+
project_path,
|
| 707 |
+
model,
|
| 708 |
+
dit,
|
| 709 |
+
vae,
|
| 710 |
+
te1,
|
| 711 |
+
output_nm,
|
| 712 |
+
dim,
|
| 713 |
+
lr,
|
| 714 |
+
epochs,
|
| 715 |
+
save_n,
|
| 716 |
+
flow_shift,
|
| 717 |
+
swap,
|
| 718 |
+
use_pinned_memory_for_block_swap,
|
| 719 |
+
prec,
|
| 720 |
+
grad_cp,
|
| 721 |
+
fp8_s,
|
| 722 |
+
fp8_l,
|
| 723 |
+
add_args,
|
| 724 |
+
should_sample_images,
|
| 725 |
+
sample_every_n,
|
| 726 |
+
sample_prompt_val,
|
| 727 |
+
sample_negative_prompt_val,
|
| 728 |
+
sample_w_val,
|
| 729 |
+
sample_h_val,
|
| 730 |
+
):
|
| 731 |
+
import shlex
|
| 732 |
+
|
| 733 |
+
if not project_path:
|
| 734 |
+
return "Error: Project directory not set. / プロジェクトディレクトリが設定されていません。"
|
| 735 |
+
if not dit:
|
| 736 |
+
return "Error: Base Model / DiT Path not set. / Base Model / DiTのパスが設定されていません。"
|
| 737 |
+
if not os.path.exists(dit):
|
| 738 |
+
return f"Error: Base Model / DiT file not found at {dit} / Base Model / DiTファイルが見つかりません: {dit}"
|
| 739 |
+
if not vae:
|
| 740 |
+
return "Error: VAE Path not set (configure in Preprocessing). / VAEのパスが設定されていません (Preprocessingで設定してください)。"
|
| 741 |
+
if not te1:
|
| 742 |
+
return "Error: Text Encoder 1 Path not set (configure in Preprocessing). / Text Encoder 1のパスが設定されていません (Preprocessingで設定してください)。"
|
| 743 |
+
|
| 744 |
+
dataset_config = os.path.join(project_path, "dataset_config.toml")
|
| 745 |
+
if not os.path.exists(dataset_config):
|
| 746 |
+
return "Error: dataset_config.toml not found. Please generate it. / dataset_config.toml が見つかりません。生成してください。"
|
| 747 |
+
|
| 748 |
+
output_dir = os.path.join(project_path, "models")
|
| 749 |
+
logging_dir = os.path.join(project_path, "logs")
|
| 750 |
+
|
| 751 |
+
# Save settings
|
| 752 |
+
save_project_settings(
|
| 753 |
+
project_path,
|
| 754 |
+
dit_path=dit,
|
| 755 |
+
output_name=output_nm,
|
| 756 |
+
network_dim=dim,
|
| 757 |
+
learning_rate=lr,
|
| 758 |
+
num_epochs=epochs,
|
| 759 |
+
save_every_n_epochs=save_n,
|
| 760 |
+
discrete_flow_shift=flow_shift,
|
| 761 |
+
block_swap=swap,
|
| 762 |
+
use_pinned_memory_for_block_swap=use_pinned_memory_for_block_swap,
|
| 763 |
+
mixed_precision=prec,
|
| 764 |
+
gradient_checkpointing=grad_cp,
|
| 765 |
+
fp8_scaled=fp8_s,
|
| 766 |
+
fp8_llm=fp8_l,
|
| 767 |
+
vae_path=vae,
|
| 768 |
+
text_encoder1_path=te1,
|
| 769 |
+
additional_args=add_args,
|
| 770 |
+
sample_images=should_sample_images,
|
| 771 |
+
sample_every_n_epochs=sample_every_n,
|
| 772 |
+
sample_prompt=sample_prompt_val,
|
| 773 |
+
sample_negative_prompt=sample_negative_prompt_val,
|
| 774 |
+
sample_w=sample_w_val,
|
| 775 |
+
sample_h=sample_h_val,
|
| 776 |
+
)
|
| 777 |
+
|
| 778 |
+
# Model specific command modification
|
| 779 |
+
if model == "Z-Image-Turbo":
|
| 780 |
+
arch_name = "zimage"
|
| 781 |
+
elif model == "Qwen-Image":
|
| 782 |
+
arch_name = "qwen_image"
|
| 783 |
+
|
| 784 |
+
# Construct command for cmd /c to run and then pause
|
| 785 |
+
# We assume 'accelerate' is in the PATH.
|
| 786 |
+
script_path = os.path.join("src", "musubi_tuner", f"{arch_name}_train_network.py")
|
| 787 |
+
|
| 788 |
+
# Inner command list - arguments for accelerate launch
|
| 789 |
+
inner_cmd = [
|
| 790 |
+
"accelerate",
|
| 791 |
+
"launch",
|
| 792 |
+
# accelerate args: we don't configure default_config.yaml, so we need to specify all here
|
| 793 |
+
"--num_cpu_threads_per_process",
|
| 794 |
+
"1",
|
| 795 |
+
"--mixed_precision",
|
| 796 |
+
prec,
|
| 797 |
+
"--dynamo_backend=no",
|
| 798 |
+
"--gpu_ids",
|
| 799 |
+
"all",
|
| 800 |
+
"--machine_rank",
|
| 801 |
+
"0",
|
| 802 |
+
"--main_training_function",
|
| 803 |
+
"main",
|
| 804 |
+
"--num_machines",
|
| 805 |
+
"1",
|
| 806 |
+
"--num_processes",
|
| 807 |
+
"1",
|
| 808 |
+
# script and its args
|
| 809 |
+
script_path,
|
| 810 |
+
"--dit",
|
| 811 |
+
dit,
|
| 812 |
+
"--vae",
|
| 813 |
+
vae,
|
| 814 |
+
"--text_encoder",
|
| 815 |
+
te1,
|
| 816 |
+
"--dataset_config",
|
| 817 |
+
dataset_config,
|
| 818 |
+
"--output_dir",
|
| 819 |
+
output_dir,
|
| 820 |
+
"--output_name",
|
| 821 |
+
output_nm,
|
| 822 |
+
"--network_module",
|
| 823 |
+
f"networks.lora_{arch_name}",
|
| 824 |
+
"--network_dim",
|
| 825 |
+
str(int(dim)),
|
| 826 |
+
"--optimizer_type",
|
| 827 |
+
"adamw8bit",
|
| 828 |
+
"--learning_rate",
|
| 829 |
+
str(lr),
|
| 830 |
+
"--max_train_epochs",
|
| 831 |
+
str(int(epochs)),
|
| 832 |
+
"--save_every_n_epochs",
|
| 833 |
+
str(int(save_n)),
|
| 834 |
+
"--timestep_sampling",
|
| 835 |
+
"shift",
|
| 836 |
+
"--weighting_scheme",
|
| 837 |
+
"none",
|
| 838 |
+
"--discrete_flow_shift",
|
| 839 |
+
str(flow_shift),
|
| 840 |
+
"--max_data_loader_n_workers",
|
| 841 |
+
"2",
|
| 842 |
+
"--persistent_data_loader_workers",
|
| 843 |
+
"--seed",
|
| 844 |
+
"42",
|
| 845 |
+
"--logging_dir",
|
| 846 |
+
logging_dir,
|
| 847 |
+
"--log_with",
|
| 848 |
+
"tensorboard",
|
| 849 |
+
]
|
| 850 |
+
|
| 851 |
+
# Sample image generation options
|
| 852 |
+
if should_sample_images:
|
| 853 |
+
sample_prompt_path = os.path.join(project_path, "sample_prompt.txt")
|
| 854 |
+
templates = {
|
| 855 |
+
# prompt, negative prompt, width, height, flow shift, steps, CFG scale, seed
|
| 856 |
+
"Qwen-Image": "{prompt} --n {neg} --w {w} --h {h} --fs 2.2 --s 20 --l 4.0 --d 1234",
|
| 857 |
+
"Z-Image-Turbo": "{prompt} --n {neg} --w {w} --h {h} --fs 3.0 --s 20 --l 5.0 --d 1234",
|
| 858 |
+
}
|
| 859 |
+
template = templates.get(model, templates["Z-Image-Turbo"])
|
| 860 |
+
prompt_str = (sample_prompt_val or "").replace("\n", " ").strip()
|
| 861 |
+
neg_str = (sample_negative_prompt_val or "").replace("\n", " ").strip()
|
| 862 |
+
try:
|
| 863 |
+
w_int = int(sample_w_val)
|
| 864 |
+
h_int = int(sample_h_val)
|
| 865 |
+
except Exception:
|
| 866 |
+
return "Error: Sample width/height must be integers. / サンプル画像の幅と高さは整数で指定してください。"
|
| 867 |
+
|
| 868 |
+
line = template.format(prompt=prompt_str, neg=neg_str, w=w_int, h=h_int)
|
| 869 |
+
try:
|
| 870 |
+
with open(sample_prompt_path, "w", encoding="utf-8") as f:
|
| 871 |
+
f.write(line + "\n")
|
| 872 |
+
except Exception as e:
|
| 873 |
+
return f"Error writing sample_prompt.txt / sample_prompt.txt の作成に失敗しました: {str(e)}"
|
| 874 |
+
|
| 875 |
+
inner_cmd.extend(
|
| 876 |
+
[
|
| 877 |
+
"--sample_prompts",
|
| 878 |
+
sample_prompt_path,
|
| 879 |
+
"--sample_at_first",
|
| 880 |
+
"--sample_every_n_epochs",
|
| 881 |
+
str(int(sample_every_n)),
|
| 882 |
+
]
|
| 883 |
+
)
|
| 884 |
+
|
| 885 |
+
if prec != "no":
|
| 886 |
+
inner_cmd.extend(["--mixed_precision", prec])
|
| 887 |
+
|
| 888 |
+
if grad_cp:
|
| 889 |
+
inner_cmd.append("--gradient_checkpointing")
|
| 890 |
+
|
| 891 |
+
if fp8_s:
|
| 892 |
+
inner_cmd.append("--fp8_base")
|
| 893 |
+
inner_cmd.append("--fp8_scaled")
|
| 894 |
+
|
| 895 |
+
if fp8_l:
|
| 896 |
+
if model == "Z-Image-Turbo":
|
| 897 |
+
inner_cmd.append("--fp8_llm")
|
| 898 |
+
elif model == "Qwen-Image":
|
| 899 |
+
inner_cmd.append("--fp8_vl")
|
| 900 |
+
|
| 901 |
+
if swap > 0:
|
| 902 |
+
inner_cmd.extend(["--blocks_to_swap", str(int(swap))])
|
| 903 |
+
if use_pinned_memory_for_block_swap:
|
| 904 |
+
inner_cmd.append("--use_pinned_memory_for_block_swap")
|
| 905 |
+
|
| 906 |
+
inner_cmd.append("--sdpa")
|
| 907 |
+
inner_cmd.append("--split_attn")
|
| 908 |
+
|
| 909 |
+
# Model specific command modification
|
| 910 |
+
if model == "Z-Image-Turbo":
|
| 911 |
+
pass
|
| 912 |
+
elif model == "Qwen-Image":
|
| 913 |
+
pass
|
| 914 |
+
|
| 915 |
+
# Parse and append additional args
|
| 916 |
+
if add_args:
|
| 917 |
+
try:
|
| 918 |
+
split_args = shlex.split(add_args)
|
| 919 |
+
inner_cmd.extend(split_args)
|
| 920 |
+
except Exception as e:
|
| 921 |
+
return f"Error parsing additional arguments / 追加引数の解析に失敗しました: {str(e)}"
|
| 922 |
+
|
| 923 |
+
# Construct the full command string for cmd /c
|
| 924 |
+
# list2cmdline will quote arguments as needed for Windows
|
| 925 |
+
inner_cmd_str = subprocess.list2cmdline(inner_cmd)
|
| 926 |
+
|
| 927 |
+
# Chain commands: Run training -> echo message -> pause >nul (hides default message)
|
| 928 |
+
final_cmd_str = f"{inner_cmd_str} & echo. & echo Training finished. Press any key to close this window... 学習が完了しました。このウィンドウを閉じるには任意のキーを押してください。 & pause >nul"
|
| 929 |
+
|
| 930 |
+
try:
|
| 931 |
+
# Open in new console window
|
| 932 |
+
flags = subprocess.CREATE_NEW_CONSOLE if os.name == "nt" else 0
|
| 933 |
+
# Pass explicit 'cmd', '/c', string to ensure proper execution
|
| 934 |
+
subprocess.Popen(["cmd", "/c", final_cmd_str], creationflags=flags, shell=False)
|
| 935 |
+
return f"Training started in a new window! / 新しいウィンドウで学習が開始されました!\nCommand: {inner_cmd_str}"
|
| 936 |
+
except Exception as e:
|
| 937 |
+
return f"Error starting training / 学習の開始に失敗しました: {str(e)}"
|
| 938 |
+
|
| 939 |
+
def update_model_info(model):
|
| 940 |
+
if model == "Z-Image-Turbo":
|
| 941 |
+
return i18n("desc_training_zimage")
|
| 942 |
+
elif model == "Qwen-Image":
|
| 943 |
+
return i18n("desc_qwen_notes")
|
| 944 |
+
return ""
|
| 945 |
+
|
| 946 |
+
# Event wiring moved to end to prevent UnboundLocalError
|
| 947 |
+
init_btn.click(
|
| 948 |
+
fn=init_project,
|
| 949 |
+
inputs=[project_dir],
|
| 950 |
+
outputs=[
|
| 951 |
+
project_status,
|
| 952 |
+
model_arch,
|
| 953 |
+
vram_size,
|
| 954 |
+
comfy_models_dir,
|
| 955 |
+
resolution_w,
|
| 956 |
+
resolution_h,
|
| 957 |
+
batch_size,
|
| 958 |
+
toml_preview,
|
| 959 |
+
vae_path,
|
| 960 |
+
text_encoder1_path,
|
| 961 |
+
text_encoder2_path,
|
| 962 |
+
dit_path,
|
| 963 |
+
output_name,
|
| 964 |
+
network_dim,
|
| 965 |
+
learning_rate,
|
| 966 |
+
num_epochs,
|
| 967 |
+
save_every_n_epochs,
|
| 968 |
+
discrete_flow_shift,
|
| 969 |
+
block_swap,
|
| 970 |
+
use_pinned_memory_for_block_swap,
|
| 971 |
+
mixed_precision,
|
| 972 |
+
gradient_checkpointing,
|
| 973 |
+
fp8_scaled,
|
| 974 |
+
fp8_llm,
|
| 975 |
+
additional_args,
|
| 976 |
+
sample_images,
|
| 977 |
+
sample_every_n,
|
| 978 |
+
sample_prompt,
|
| 979 |
+
sample_negative_prompt,
|
| 980 |
+
sample_w,
|
| 981 |
+
sample_h,
|
| 982 |
+
input_lora,
|
| 983 |
+
output_comfy_lora,
|
| 984 |
+
],
|
| 985 |
+
)
|
| 986 |
+
|
| 987 |
+
model_arch.change(fn=update_model_info, inputs=[model_arch], outputs=[training_model_info])
|
| 988 |
+
|
| 989 |
+
gen_toml_btn.click(
|
| 990 |
+
fn=generate_config,
|
| 991 |
+
inputs=[
|
| 992 |
+
project_dir,
|
| 993 |
+
resolution_w,
|
| 994 |
+
resolution_h,
|
| 995 |
+
batch_size,
|
| 996 |
+
model_arch,
|
| 997 |
+
vram_size,
|
| 998 |
+
comfy_models_dir,
|
| 999 |
+
vae_path,
|
| 1000 |
+
text_encoder1_path,
|
| 1001 |
+
text_encoder2_path,
|
| 1002 |
+
],
|
| 1003 |
+
outputs=[dataset_status, toml_preview],
|
| 1004 |
+
)
|
| 1005 |
+
|
| 1006 |
+
validate_models_btn.click(fn=validate_models_dir, inputs=[comfy_models_dir], outputs=[models_status])
|
| 1007 |
+
|
| 1008 |
+
set_rec_settings_btn.click(
|
| 1009 |
+
fn=set_recommended_settings,
|
| 1010 |
+
inputs=[project_dir, model_arch, vram_size],
|
| 1011 |
+
outputs=[resolution_w, resolution_h, batch_size],
|
| 1012 |
+
)
|
| 1013 |
+
|
| 1014 |
+
set_preprocessing_defaults_btn.click(
|
| 1015 |
+
fn=set_preprocessing_defaults,
|
| 1016 |
+
inputs=[project_dir, comfy_models_dir, model_arch],
|
| 1017 |
+
outputs=[vae_path, text_encoder1_path, text_encoder2_path],
|
| 1018 |
+
)
|
| 1019 |
+
|
| 1020 |
+
set_post_proc_defaults_btn.click(
|
| 1021 |
+
fn=set_post_processing_defaults, inputs=[project_dir, output_name], outputs=[input_lora, output_comfy_lora]
|
| 1022 |
+
)
|
| 1023 |
+
|
| 1024 |
+
set_training_defaults_btn.click(
|
| 1025 |
+
fn=set_training_defaults,
|
| 1026 |
+
inputs=[project_dir, comfy_models_dir, model_arch, vram_size],
|
| 1027 |
+
outputs=[
|
| 1028 |
+
dit_path,
|
| 1029 |
+
network_dim,
|
| 1030 |
+
learning_rate,
|
| 1031 |
+
num_epochs,
|
| 1032 |
+
save_every_n_epochs,
|
| 1033 |
+
discrete_flow_shift,
|
| 1034 |
+
block_swap,
|
| 1035 |
+
use_pinned_memory_for_block_swap,
|
| 1036 |
+
mixed_precision,
|
| 1037 |
+
gradient_checkpointing,
|
| 1038 |
+
fp8_scaled,
|
| 1039 |
+
fp8_llm,
|
| 1040 |
+
sample_every_n,
|
| 1041 |
+
sample_w,
|
| 1042 |
+
sample_h,
|
| 1043 |
+
],
|
| 1044 |
+
)
|
| 1045 |
+
|
| 1046 |
+
cache_latents_btn.click(
|
| 1047 |
+
fn=cache_latents,
|
| 1048 |
+
inputs=[
|
| 1049 |
+
project_dir,
|
| 1050 |
+
vae_path,
|
| 1051 |
+
text_encoder1_path,
|
| 1052 |
+
text_encoder2_path,
|
| 1053 |
+
model_arch,
|
| 1054 |
+
comfy_models_dir,
|
| 1055 |
+
resolution_w,
|
| 1056 |
+
resolution_h,
|
| 1057 |
+
batch_size,
|
| 1058 |
+
vram_size,
|
| 1059 |
+
],
|
| 1060 |
+
outputs=[caching_output],
|
| 1061 |
+
)
|
| 1062 |
+
|
| 1063 |
+
cache_text_btn.click(
|
| 1064 |
+
fn=cache_text_encoder,
|
| 1065 |
+
inputs=[
|
| 1066 |
+
project_dir,
|
| 1067 |
+
text_encoder1_path,
|
| 1068 |
+
text_encoder2_path,
|
| 1069 |
+
vae_path,
|
| 1070 |
+
model_arch,
|
| 1071 |
+
comfy_models_dir,
|
| 1072 |
+
resolution_w,
|
| 1073 |
+
resolution_h,
|
| 1074 |
+
batch_size,
|
| 1075 |
+
vram_size,
|
| 1076 |
+
],
|
| 1077 |
+
outputs=[caching_output],
|
| 1078 |
+
)
|
| 1079 |
+
|
| 1080 |
+
start_training_btn.click(
|
| 1081 |
+
fn=start_training,
|
| 1082 |
+
inputs=[
|
| 1083 |
+
project_dir,
|
| 1084 |
+
model_arch,
|
| 1085 |
+
dit_path,
|
| 1086 |
+
vae_path,
|
| 1087 |
+
text_encoder1_path,
|
| 1088 |
+
output_name,
|
| 1089 |
+
network_dim,
|
| 1090 |
+
learning_rate,
|
| 1091 |
+
num_epochs,
|
| 1092 |
+
save_every_n_epochs,
|
| 1093 |
+
discrete_flow_shift,
|
| 1094 |
+
block_swap,
|
| 1095 |
+
use_pinned_memory_for_block_swap,
|
| 1096 |
+
mixed_precision,
|
| 1097 |
+
gradient_checkpointing,
|
| 1098 |
+
fp8_scaled,
|
| 1099 |
+
fp8_llm,
|
| 1100 |
+
additional_args,
|
| 1101 |
+
sample_images,
|
| 1102 |
+
sample_every_n,
|
| 1103 |
+
sample_prompt,
|
| 1104 |
+
sample_negative_prompt,
|
| 1105 |
+
sample_w,
|
| 1106 |
+
sample_h,
|
| 1107 |
+
],
|
| 1108 |
+
outputs=[training_status],
|
| 1109 |
+
)
|
| 1110 |
+
|
| 1111 |
+
convert_btn.click(
|
| 1112 |
+
fn=convert_lora_to_comfy,
|
| 1113 |
+
inputs=[
|
| 1114 |
+
project_dir,
|
| 1115 |
+
input_lora,
|
| 1116 |
+
output_comfy_lora,
|
| 1117 |
+
model_arch,
|
| 1118 |
+
comfy_models_dir,
|
| 1119 |
+
resolution_w,
|
| 1120 |
+
resolution_h,
|
| 1121 |
+
batch_size,
|
| 1122 |
+
vae_path,
|
| 1123 |
+
text_encoder1_path,
|
| 1124 |
+
text_encoder2_path,
|
| 1125 |
+
],
|
| 1126 |
+
outputs=[conversion_log],
|
| 1127 |
+
)
|
| 1128 |
+
|
| 1129 |
+
return demo
|
| 1130 |
+
|
| 1131 |
+
|
| 1132 |
+
if __name__ == "__main__":
|
| 1133 |
+
demo = construct_ui()
|
| 1134 |
+
demo.launch(i18n=i18n)
|
src/musubi_tuner/gui/gui_implementation_plan.md
ADDED
|
@@ -0,0 +1,97 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
※このファイルはGUIの実装方針をAI Agentに説明するためのドキュメントです。
|
| 2 |
+
|
| 3 |
+
## GUIの実装方針
|
| 4 |
+
|
| 5 |
+
Musubi Tunerの学習スクリプトの、非常にシンプルなGUIを実装します。対象ユーザーのレベルは、Python、gitとuvはインストール、起動できるが、仮想環境 (venv) の作成、起動はちょっと難しい、くらいを想定します。
|
| 6 |
+
|
| 7 |
+
### GUIの実装
|
| 8 |
+
|
| 9 |
+
`src/musubi_tuner/gui` フォルダに `gui.py` を配置します。Gradioを使用します。
|
| 10 |
+
|
| 11 |
+
起動は `uv run gui` 等で行います(PyTorchやCUDAバージョンの考慮など、オプション等を別途検討)。
|
| 12 |
+
|
| 13 |
+
### GUIの構成
|
| 14 |
+
|
| 15 |
+
単純なひとつの画面のみで構成し、上から下に、順に作業の手順にあわせて配置します。
|
| 16 |
+
|
| 17 |
+
1. プロジェクト設定
|
| 18 |
+
|
| 19 |
+
プロジェクトフォルダを指定します。プロジェクトフォルダについては後述。
|
| 20 |
+
|
| 21 |
+
この時点で`training`フォルダを(もし存在しなければ)自動的に作成します。
|
| 22 |
+
|
| 23 |
+
2. モデルアーキテクチャの選択とモデルディレクトリ選択
|
| 24 |
+
|
| 25 |
+
Qwen-Image、Z-Image-Turboなどのモデルアーキテクチャを選択します。最初は画像生成モデルのみ対応を予定し、Z-Image-TurboとQwen-Imageを対象に実装します。
|
| 26 |
+
|
| 27 |
+
また学習時の推奨パラメータ等を適切に設定するため、VRAMサイズを選択します。
|
| 28 |
+
|
| 29 |
+
モデルディレクトリは、ComfyUI の `models` フォルダをユーザーに指定してもらいます。
|
| 30 |
+
|
| 31 |
+
3. データセット設定
|
| 32 |
+
|
| 33 |
+
学習解像度等を指定しデータセット設定ファイルを作成します。
|
| 34 |
+
|
| 35 |
+
学習解像度はモデルに応じた推奨解像度を自動設定するボタンを用意します。解像度以外は、バッチサイズのみ指定可能とします。
|
| 36 |
+
|
| 37 |
+
4. 前処理
|
| 38 |
+
|
| 39 |
+
学習データの前処理、具体的にはlatentの事前キャッシュと、text encoderの事前キャッシュを行います。モデルに応じたデフォルトのパスを指定するボタンを用意します。
|
| 40 |
+
|
| 41 |
+
5. 学習
|
| 42 |
+
|
| 43 |
+
いくつかのパラメータを指定し、学習を開始します。
|
| 44 |
+
|
| 45 |
+
- LoRA出力名
|
| 46 |
+
- 学習率
|
| 47 |
+
- 学習エポック数
|
| 48 |
+
- 何エポックごとに保存するか(サンプル画像生成も同じタイミング)
|
| 49 |
+
- Discrete Flow Shiftの値
|
| 50 |
+
- Block Swapを有効にするか、有効にする場合のブロック数
|
| 51 |
+
- 任意指定のコマンドライン引数
|
| 52 |
+
|
| 53 |
+
※ 学習のバッチサイズはデータセット設定で指定済み。
|
| 54 |
+
|
| 55 |
+
各パラメータについて、モデルとVRAMサイズに応じた推奨値を自動設定するボタンを用意します。
|
| 56 |
+
|
| 57 |
+
進捗管理は行わず、プロセスの死活監視だけ行います。進捗の確認はユーザーにコマンドプロンプトのウィンドウで確認してもらいます。
|
| 58 |
+
|
| 59 |
+
6. ポスト処理
|
| 60 |
+
|
| 61 |
+
ComfyUI用へのLoRA変換などを必要に応じて行います。デフォルトのパスを指定するボタンを用意します。
|
| 62 |
+
|
| 63 |
+
### プロジェクトフォルダの構成
|
| 64 |
+
|
| 65 |
+
簡単にするため、プロジェクトフォルダ内の設定ファイル名、サブフォルダ名はすべて固定します。
|
| 66 |
+
|
| 67 |
+
```
|
| 68 |
+
+ training : 学習用画像を配置するフォルダ、GUIスクリプトが自動作成します。
|
| 69 |
+
+ cache : latentとtext encoderの事前キャッシュを配置するフォルダ(キャッシュ処理で自動的に作成されます)
|
| 70 |
+
+ models : 学習結果のLoRAモデルが配置されるフォルダ(学習開始で自動的に作成されます)
|
| 71 |
+
+ sample : Musubi Tunerが学習中のサンプル生成画像を出力するフォルダ
|
| 72 |
+
+ dataset_config.toml : データセット設定ファイル、GUIスクリプトが自動作成します。
|
| 73 |
+
+ musubi_project.toml : GUIのプロジェクト設定(選択したモデルや解像度など)を保存するファイル。
|
| 74 |
+
```
|
| 75 |
+
|
| 76 |
+
`training`フォルダには、Musubi Tunerの形式、つまり画像 (\*.jpg/pngなど) + キャプションファイル(basenameが同じで、拡張子が.txt)を、ユーザーが配置します。データセット設定のステップで存在確認を行い、なければエラー表示します。
|
| 77 |
+
|
| 78 |
+
### プロジェクト設定の保存
|
| 79 |
+
|
| 80 |
+
`musubi_project.toml` に以下の項目等を保存し、次回読み込み時に復元します。
|
| 81 |
+
|
| 82 |
+
- 選択したモデルアーキテクチャ (`model_arch`)
|
| 83 |
+
- ComfyUIモデルディレクトリ (`comfy_models_dir`)
|
| 84 |
+
- 解像度 (`resolution_w`, `resolution_h`)
|
| 85 |
+
- バッチサイズ (`batch_size`)
|
| 86 |
+
|
| 87 |
+
これにより、プロジェクトを開き直した際もスムーズに作業を再開できます。
|
| 88 |
+
|
| 89 |
+
### モデル、VRAMサイズに応じた推奨値等の管理
|
| 90 |
+
|
| 91 |
+
`config_manager.py` に、モデル、VRAMサイズに応じた推奨値等を管理するクラスを実装します。
|
| 92 |
+
|
| 93 |
+
### 国際化 (Internationalization / i18n)
|
| 94 |
+
|
| 95 |
+
GradioのI18n機能 (`gr.I18n`) を使用して、GUIのテキストを多言語対応(英語・日本語)します。
|
| 96 |
+
`i18n` 辞書に言語ごとのテキストを定義し、GUI構築時に参照するようにします。
|
| 97 |
+
ユーザーのブラウザ設定などに基づいて言語が切り替わる想定です。
|
src/musubi_tuner/gui/i18n_data.py
ADDED
|
@@ -0,0 +1,186 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# UI Text Dictionary for potential i18n
|
| 2 |
+
# I18N Configuration
|
| 3 |
+
I18N_DATA = {
|
| 4 |
+
"en": {
|
| 5 |
+
"app_title": "Musubi Tuner GUI",
|
| 6 |
+
"app_header": "# Musubi Tuner GUI",
|
| 7 |
+
"app_desc": "A simple frontend for training LoRA models with Musubi Tuner.",
|
| 8 |
+
"acc_project": "1. Project Settings",
|
| 9 |
+
"desc_project": "All working files will be created under this directory.",
|
| 10 |
+
"lbl_proj_dir": "Project Working Directory",
|
| 11 |
+
"ph_proj_dir": "Absolute path to your project folder",
|
| 12 |
+
"btn_init_project": "Initialize/Load Project",
|
| 13 |
+
"acc_model": "2. Model & Dataset Configuration",
|
| 14 |
+
"desc_model": "Choose the model architecture and specify the ComfyUI models directory.",
|
| 15 |
+
"lbl_model_arch": "Model Architecture",
|
| 16 |
+
"lbl_vram": "VRAM Size (GB)",
|
| 17 |
+
"lbl_comfy_dir": "ComfyUI Models Directory",
|
| 18 |
+
"ph_comfy_dir": "Absolute path to ComfyUI/models",
|
| 19 |
+
"btn_validate_models": "Validate Models Directory",
|
| 20 |
+
"header_dataset": "### 3. Dataset Settings",
|
| 21 |
+
"desc_dataset": "Configure the resolution and batch size for the dataset. Regenerate the dataset config if you change resolution or batch size.",
|
| 22 |
+
"btn_rec_res_batch": "Set Recommended Resolution & Batch Size",
|
| 23 |
+
"lbl_res_w": "Resolution Width",
|
| 24 |
+
"lbl_res_h": "Resolution Height",
|
| 25 |
+
"lbl_batch_size": "Batch Size",
|
| 26 |
+
"btn_gen_config": "Generate Dataset Config",
|
| 27 |
+
"lbl_toml_preview": "TOML Preview",
|
| 28 |
+
"acc_preprocessing": "4. Preprocessing",
|
| 29 |
+
"desc_preprocessing": "Pre-calculate latents and text encoder outputs required for training.",
|
| 30 |
+
"btn_set_paths": "Set Default Paths",
|
| 31 |
+
"lbl_vae_path": "VAE Path",
|
| 32 |
+
"ph_vae_path": "Path to VAE model",
|
| 33 |
+
"lbl_te1_path": "Text Encoder 1 Path",
|
| 34 |
+
"ph_te1_path": "Path to Text Encoder 1",
|
| 35 |
+
"lbl_te2_path": "Text Encoder 2 Path",
|
| 36 |
+
"ph_te2_path": "Path to Text Encoder 2 (Optional)",
|
| 37 |
+
"btn_cache_latents": "Cache Latents",
|
| 38 |
+
"btn_cache_text": "Cache Text Encoder Outputs",
|
| 39 |
+
"lbl_cache_log": "Caching Log Output",
|
| 40 |
+
"acc_training": "5. Training",
|
| 41 |
+
"desc_training_basic": "Configure the training parameters. If you train with the same name again, the previous LoRA will be overwritten.",
|
| 42 |
+
"desc_training_zimage": "Recommended: Use **bf16** for mixed precision. Because the base model has not been released yet, please use `z_image_de_turbo_v1_bf16.safetensors` as the base model.",
|
| 43 |
+
"btn_rec_params": "Set Recommended Parameters",
|
| 44 |
+
"lbl_dit_path": "Base Model / DiT Path",
|
| 45 |
+
"ph_dit_path": "Path to DiT model",
|
| 46 |
+
"lbl_output_name": "Output LoRA Name",
|
| 47 |
+
"header_basic_params": "### Basic Parameters",
|
| 48 |
+
"lbl_dim": "LoRA Rank (Dim)",
|
| 49 |
+
"lbl_lr": "Learning Rate",
|
| 50 |
+
"lbl_epochs": "Epochs",
|
| 51 |
+
"lbl_save_every": "Save Every N Epochs",
|
| 52 |
+
"accordion_advanced": "Advanced Parameters",
|
| 53 |
+
"desc_training_detailed": """
|
| 54 |
+
### Detailed Explanation
|
| 55 |
+
- **Learning Rate**: Controls how much the model weights are updated during training. Lower values are safer but slower.
|
| 56 |
+
- **Epochs**: One complete pass through the entire training dataset.
|
| 57 |
+
- **Save Every N Epochs**: How often to save the model and generate sample images.
|
| 58 |
+
- **Discrete Flow Shift**: A parameter specific to flow matching models.
|
| 59 |
+
- **Block Swap**: Offloads model blocks to CPU to save VRAM. Higher values save more VRAM but slow down training. Using pinned memory can speed up Block Swap (64GB+ system RAM recommended).
|
| 60 |
+
- **Mixed Precision**: fp16 and bf16 are both supported; which is better depends on the model architecture. For bf16, RTX30xx or higher is required.
|
| 61 |
+
- **Gradient Checkpointing**: Saves VRAM by recomputing activations during backward pass.
|
| 62 |
+
- **FP8**: Further reduces memory usage by using 8-bit floating point arithmetic.
|
| 63 |
+
""",
|
| 64 |
+
"lbl_flow_shift": "Discrete Flow Shift",
|
| 65 |
+
"lbl_block_swap": "Block Swap (Z-Image: 0-28, Qwen: 0-58)",
|
| 66 |
+
"lbl_use_pinned_memory_for_block_swap": "Use Pinned Memory for Block Swap",
|
| 67 |
+
"lbl_mixed_precision": "Mixed Precision",
|
| 68 |
+
"lbl_grad_cp": "Gradient Checkpointing",
|
| 69 |
+
"lbl_fp8_scaled": "FP8 Scaled (DiT) - Enables --fp8_base and --fp8_scaled",
|
| 70 |
+
"lbl_fp8_llm": "FP8 LLM/VLM (Text Encoder)",
|
| 71 |
+
"header_sample_images": "### Sample Image Generation",
|
| 72 |
+
"lbl_enable_sample": "Generate Sample Images During Training",
|
| 73 |
+
"lbl_sample_every_n": "Generate Sample Every N Epochs",
|
| 74 |
+
"lbl_sample_prompt": "Sample Prompt",
|
| 75 |
+
"ph_sample_prompt": "Prompt for sample generation",
|
| 76 |
+
"lbl_sample_negative_prompt": "Sample Negative Prompt",
|
| 77 |
+
"ph_sample_negative_prompt": "Negative prompt for sample generation",
|
| 78 |
+
"lbl_sample_w": "Sample Width",
|
| 79 |
+
"lbl_sample_h": "Sample Height",
|
| 80 |
+
"accordion_additional": "Additional Options",
|
| 81 |
+
"desc_additional_args": "Enter any additional command line arguments here. They will be appended to the training command.",
|
| 82 |
+
"lbl_additional_args": "Additional Optional Arguments",
|
| 83 |
+
"ph_additional_args": "--arg value --flag",
|
| 84 |
+
"btn_start_training": "Start Training (New Window)",
|
| 85 |
+
"acc_post_processing": "6. Post-Processing",
|
| 86 |
+
"desc_post_proc": "Convert Z-Image LoRA to ComfyUI format.",
|
| 87 |
+
"lbl_input_lora": "Input LoRA Path",
|
| 88 |
+
"ph_input_lora": "Path to trained .safetensors file",
|
| 89 |
+
"lbl_output_comfy": "Output ComfyUI LoRA Path",
|
| 90 |
+
"ph_output_comfy": "Path to save converted model",
|
| 91 |
+
"btn_convert": "Convert to ComfyUI Format",
|
| 92 |
+
"lbl_conversion_log": "Conversion Log",
|
| 93 |
+
"desc_qwen_notes": "Qwen-Image specific notes here.",
|
| 94 |
+
},
|
| 95 |
+
"ja": {
|
| 96 |
+
"app_title": "Musubi Tuner GUI",
|
| 97 |
+
"app_header": "# Musubi Tuner GUI",
|
| 98 |
+
"app_desc": "Musubi TunerでLoRAモデルを学習するためのシンプルなフロントエンドです。",
|
| 99 |
+
"acc_project": "1. プロジェクト設定",
|
| 100 |
+
"desc_project": "すべての作業ファイルはこのディレクトリ下に作成されます。",
|
| 101 |
+
"lbl_proj_dir": "プロジェクト作業ディレクトリ",
|
| 102 |
+
"ph_proj_dir": "プロジェクトフォルダへの絶対パス",
|
| 103 |
+
"btn_init_project": "プロジェクトを初期化/読み込み",
|
| 104 |
+
"acc_model": "2. モデル&データセット設定",
|
| 105 |
+
"desc_model": "モデルアーキテクチャを選択し、ComfyUIのモデルディレクトリを指定してください。",
|
| 106 |
+
"lbl_model_arch": "モデルアーキテクチャ",
|
| 107 |
+
"lbl_vram": "VRAMサイズ (GB)",
|
| 108 |
+
"lbl_comfy_dir": "ComfyUI モデルディレクトリ",
|
| 109 |
+
"ph_comfy_dir": "ComfyUI/models への絶対パス",
|
| 110 |
+
"btn_validate_models": "モデルディレクトリを検証",
|
| 111 |
+
"header_dataset": "### 3. データセット設定",
|
| 112 |
+
"desc_dataset": "データセットの解像度とバッチサイズを設定してください。解像度やバッチサイズを変えた場合は、データセット設定を再生成してください。",
|
| 113 |
+
"btn_rec_res_batch": "推奨解像度とバッチサイズを設定",
|
| 114 |
+
"lbl_res_w": "解像度 幅",
|
| 115 |
+
"lbl_res_h": "解像度 高さ",
|
| 116 |
+
"lbl_batch_size": "バッチサイズ",
|
| 117 |
+
"btn_gen_config": "データセット設定(TOML)を生成",
|
| 118 |
+
"lbl_toml_preview": "TOML プレビュー",
|
| 119 |
+
"acc_preprocessing": "4. 前処理 (Preprocessing)",
|
| 120 |
+
"desc_preprocessing": "学習に必要となるLatentsとテキストエンコーダーの出力を事前計算します。",
|
| 121 |
+
"btn_set_paths": "デフォルトパスを設定",
|
| 122 |
+
"lbl_vae_path": "VAE パス",
|
| 123 |
+
"ph_vae_path": "VAEモデルへのパス",
|
| 124 |
+
"lbl_te1_path": "テキストエンコーダー1 パス",
|
| 125 |
+
"ph_te1_path": "テキストエンコーダー1へのパス",
|
| 126 |
+
"lbl_te2_path": "テキストエンコーダー2 パス",
|
| 127 |
+
"ph_te2_path": "テキストエンコーダー2へのパス (オプション)",
|
| 128 |
+
"btn_cache_latents": "Latentsをキャッシュ",
|
| 129 |
+
"btn_cache_text": "テキストエンコーダー出力をキャッシュ",
|
| 130 |
+
"lbl_cache_log": "キャッシュログ出力",
|
| 131 |
+
"acc_training": "5. 学習 (Training)",
|
| 132 |
+
"desc_training_basic": "学習パラメータを設定してください。学習後、同じ名前で学習すると前のLoRAが上書きされます。",
|
| 133 |
+
"desc_training_zimage": "推奨: 混合精度には **bf16** を使用してください。Baseモデルがリリースされていないため、ostris氏の `z_image_de_turbo_v1_bf16.safetensors` を使用してください。",
|
| 134 |
+
"btn_rec_params": "推奨パラメータを設定",
|
| 135 |
+
"lbl_dit_path": "ベースモデル / DiT パス",
|
| 136 |
+
"ph_dit_path": "DiTモデルへのパス",
|
| 137 |
+
"lbl_output_name": "出力 LoRA 名",
|
| 138 |
+
"header_basic_params": "### 基本パラメータ",
|
| 139 |
+
"lbl_dim": "LoRAランク (Dim)",
|
| 140 |
+
"lbl_lr": "学習率 (Learning Rate)",
|
| 141 |
+
"lbl_epochs": "エポック数 (Epochs)",
|
| 142 |
+
"lbl_save_every": "Nエポックごとに保存",
|
| 143 |
+
"accordion_advanced": "詳細パラメータ",
|
| 144 |
+
"desc_training_detailed": """
|
| 145 |
+
### 詳細説明
|
| 146 |
+
- **学習率 (Learning Rate)**: 学習中にモデルの重みをどれくらい更新するかを制御します。低い値の方が安全ですが、学習が遅くなります。
|
| 147 |
+
- **エポック数 (Epochs)**: 学習データセット全体を通す回数です。
|
| 148 |
+
- **保存頻度 (Save Every N Epochs)**: モデルの保存とサンプル生成を行う頻度です。
|
| 149 |
+
- **Discrete Flow Shift**: Flow Matchingモデル特有のパラメータです。
|
| 150 |
+
- **Block Swap**: VRAMを節約するためにモデルブロックをCPUにオフロードします。値を大きくするとVRAMを節約できますが、学習が遅くなります。共有メモリを使うとBlock Swapが高速化されます(64GB以上のメインRAMを推奨)。
|
| 151 |
+
- **混合精度 (Mixed Precision)**: モデルアーキテクチャによりfp16とbf16のどちらが適しているかは異なります。bf16はRTX30xx以降のGPUが必要です。
|
| 152 |
+
- **Gradient Checkpointing**: Backwardパス中にアクティベーションを再計算することでVRAMを節約します。
|
| 153 |
+
- **FP8**: 8ビット浮動小数点演算を使用することでメモリ使用量をさらに削減します。
|
| 154 |
+
""",
|
| 155 |
+
"lbl_flow_shift": "Discrete Flow Shift",
|
| 156 |
+
"lbl_block_swap": "Block Swap (Z-Image: 0-28, Qwen: 0-58)",
|
| 157 |
+
"lbl_use_pinned_memory_for_block_swap": "Block Swapに共有メモリを使う",
|
| 158 |
+
"lbl_mixed_precision": "混合精度 (Mixed Precision)",
|
| 159 |
+
"lbl_grad_cp": "Gradient Checkpointing",
|
| 160 |
+
"lbl_fp8_scaled": "FP8 Scaled (DiT) - --fp8_base と --fp8_scaled を有効化",
|
| 161 |
+
"lbl_fp8_llm": "FP8 LLM/VLM (テキストエンコーダー)",
|
| 162 |
+
"header_sample_images": "### サンプル画像生成",
|
| 163 |
+
"lbl_enable_sample": "学習中にサンプル画像を生成する",
|
| 164 |
+
"lbl_sample_every_n": "Nエポックごとにサンプルを生成",
|
| 165 |
+
"lbl_sample_prompt": "サンプル画像プロンプト",
|
| 166 |
+
"ph_sample_prompt": "サンプル生成用のプロンプト",
|
| 167 |
+
"lbl_sample_negative_prompt": "サンプル画像ネガティブプロンプト",
|
| 168 |
+
"ph_sample_negative_prompt": "サンプル生成用のネガティブプロンプト",
|
| 169 |
+
"lbl_sample_w": "サンプル画像 幅",
|
| 170 |
+
"lbl_sample_h": "サンプル画像 高さ",
|
| 171 |
+
"accordion_additional": "追加オプション",
|
| 172 |
+
"desc_additional_args": "追加のコマンドライン引数を入力してください。これらは学習コマンドに追加されます。",
|
| 173 |
+
"lbl_additional_args": "追加のオプション引数",
|
| 174 |
+
"ph_additional_args": "--arg value --flag",
|
| 175 |
+
"btn_start_training": "学習を開始 (新しいウィンドウが開きます)",
|
| 176 |
+
"acc_post_processing": "6. 後処理 (Post-Processing)",
|
| 177 |
+
"desc_post_proc": "Z-Image LoRAをComfyUI形式に変換します。",
|
| 178 |
+
"lbl_input_lora": "入力 LoRA パス",
|
| 179 |
+
"ph_input_lora": "学習済み .safetensors ファイルへのパス",
|
| 180 |
+
"lbl_output_comfy": "出力 ComfyUI LoRA パス",
|
| 181 |
+
"ph_output_comfy": "変換後のモデルの保存先パス",
|
| 182 |
+
"btn_convert": "ComfyUI形式に変換",
|
| 183 |
+
"lbl_conversion_log": "変換ログ",
|
| 184 |
+
"desc_qwen_notes": "Qwen-Image 特有の注意点。",
|
| 185 |
+
},
|
| 186 |
+
}
|
src/musubi_tuner/gui_dashboard/__init__.py
ADDED
|
@@ -0,0 +1,10 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from musubi_tuner.gui_dashboard.metrics_writer import MetricsWriter
|
| 2 |
+
from musubi_tuner.gui_dashboard.server import start_server
|
| 3 |
+
|
| 4 |
+
|
| 5 |
+
def create_metrics_writer(run_dir: str, flush_every: int = 10) -> MetricsWriter:
|
| 6 |
+
return MetricsWriter(run_dir, flush_every=flush_every)
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
def start_gui_server(run_dir: str, host: str = "0.0.0.0", port: int = 7860):
|
| 10 |
+
return start_server(run_dir, host=host, port=port)
|
src/musubi_tuner/gui_dashboard/command_builder.py
ADDED
|
@@ -0,0 +1,881 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Convert ProjectConfig into CLI argument lists for subprocess launch."""
|
| 2 |
+
|
| 3 |
+
from __future__ import annotations
|
| 4 |
+
|
| 5 |
+
import sys
|
| 6 |
+
from pathlib import Path
|
| 7 |
+
|
| 8 |
+
from musubi_tuner.gui_dashboard.project_schema import ProjectConfig
|
| 9 |
+
from musubi_tuner.gui_dashboard.toml_export import (
|
| 10 |
+
_write_slider_toml,
|
| 11 |
+
build_slider_toml_path,
|
| 12 |
+
export_dataset_toml,
|
| 13 |
+
)
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
def _find_script(name: str) -> str:
|
| 17 |
+
"""Find a script in the musubi_tuner package."""
|
| 18 |
+
import musubi_tuner
|
| 19 |
+
pkg_dir = Path(musubi_tuner.__file__).parent
|
| 20 |
+
script = pkg_dir / name
|
| 21 |
+
if script.exists():
|
| 22 |
+
return str(script)
|
| 23 |
+
raise FileNotFoundError(f"Script not found: {name}")
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
def build_cache_latents_cmd(config: ProjectConfig) -> list[str]:
|
| 27 |
+
"""Build CLI args for ltx2_cache_latents.py."""
|
| 28 |
+
toml_path = export_dataset_toml(config)
|
| 29 |
+
c = config.caching
|
| 30 |
+
|
| 31 |
+
cmd = [
|
| 32 |
+
sys.executable,
|
| 33 |
+
_find_script("ltx2_cache_latents.py"),
|
| 34 |
+
"--dataset_config", str(toml_path),
|
| 35 |
+
"--ltx2_checkpoint", c.ltx2_checkpoint,
|
| 36 |
+
"--ltx2_mode", c.ltx2_mode,
|
| 37 |
+
]
|
| 38 |
+
|
| 39 |
+
if c.vae_dtype:
|
| 40 |
+
cmd += ["--vae_dtype", c.vae_dtype]
|
| 41 |
+
if c.device:
|
| 42 |
+
cmd += ["--device", c.device]
|
| 43 |
+
if c.skip_existing:
|
| 44 |
+
cmd.append("--skip_existing")
|
| 45 |
+
if c.keep_cache:
|
| 46 |
+
cmd.append("--keep_cache")
|
| 47 |
+
if c.num_workers is not None:
|
| 48 |
+
cmd += ["--num_workers", str(c.num_workers)]
|
| 49 |
+
if c.vae_chunk_size is not None:
|
| 50 |
+
cmd += ["--vae_chunk_size", str(c.vae_chunk_size)]
|
| 51 |
+
if c.vae_spatial_tile_size is not None:
|
| 52 |
+
cmd += ["--vae_spatial_tile_size", str(c.vae_spatial_tile_size)]
|
| 53 |
+
if c.vae_spatial_tile_overlap is not None:
|
| 54 |
+
cmd += ["--vae_spatial_tile_overlap", str(c.vae_spatial_tile_overlap)]
|
| 55 |
+
if c.vae_temporal_tile_size is not None:
|
| 56 |
+
cmd += ["--vae_temporal_tile_size", str(c.vae_temporal_tile_size)]
|
| 57 |
+
if c.vae_temporal_tile_overlap is not None:
|
| 58 |
+
cmd += ["--vae_temporal_tile_overlap", str(c.vae_temporal_tile_overlap)]
|
| 59 |
+
|
| 60 |
+
# Reference (V2V)
|
| 61 |
+
if c.reference_frames != 1:
|
| 62 |
+
cmd += ["--reference_frames", str(c.reference_frames)]
|
| 63 |
+
if c.reference_downscale != 1:
|
| 64 |
+
cmd += ["--reference_downscale", str(c.reference_downscale)]
|
| 65 |
+
|
| 66 |
+
# Audio source options
|
| 67 |
+
if c.ltx2_mode in ("av", "audio"):
|
| 68 |
+
cmd += ["--ltx2_audio_source", c.ltx2_audio_source]
|
| 69 |
+
if c.ltx2_audio_source == "audio_files" and c.ltx2_audio_dir:
|
| 70 |
+
cmd += ["--ltx2_audio_dir", c.ltx2_audio_dir]
|
| 71 |
+
if c.ltx2_audio_ext:
|
| 72 |
+
cmd += ["--ltx2_audio_ext", c.ltx2_audio_ext]
|
| 73 |
+
if c.ltx2_audio_dtype:
|
| 74 |
+
cmd += ["--ltx2_audio_dtype", c.ltx2_audio_dtype]
|
| 75 |
+
if c.audio_only_sequence_resolution != 64:
|
| 76 |
+
cmd += ["--audio_only_sequence_resolution", str(c.audio_only_sequence_resolution)]
|
| 77 |
+
|
| 78 |
+
# I2V latent precaching
|
| 79 |
+
if c.precache_sample_latents and c.sample_prompts:
|
| 80 |
+
cmd.append("--precache_sample_latents")
|
| 81 |
+
cmd += ["--sample_prompts", c.sample_prompts]
|
| 82 |
+
if c.sample_latents_cache:
|
| 83 |
+
cmd += ["--sample_latents_cache", c.sample_latents_cache]
|
| 84 |
+
|
| 85 |
+
if c.quantize_device:
|
| 86 |
+
cmd += ["--quantize_device", c.quantize_device]
|
| 87 |
+
if c.save_dataset_manifest:
|
| 88 |
+
cmd += ["--save_dataset_manifest", c.save_dataset_manifest]
|
| 89 |
+
|
| 90 |
+
return cmd
|
| 91 |
+
|
| 92 |
+
|
| 93 |
+
def build_cache_text_cmd(config: ProjectConfig) -> list[str]:
|
| 94 |
+
"""Build CLI args for ltx2_cache_text_encoder_outputs.py."""
|
| 95 |
+
toml_path = export_dataset_toml(config)
|
| 96 |
+
c = config.caching
|
| 97 |
+
|
| 98 |
+
cmd = [
|
| 99 |
+
sys.executable,
|
| 100 |
+
_find_script("ltx2_cache_text_encoder_outputs.py"),
|
| 101 |
+
"--dataset_config", str(toml_path),
|
| 102 |
+
"--ltx2_checkpoint", c.ltx2_checkpoint,
|
| 103 |
+
"--gemma_root", c.gemma_root,
|
| 104 |
+
"--ltx2_mode", c.ltx2_mode,
|
| 105 |
+
]
|
| 106 |
+
|
| 107 |
+
if c.gemma_safetensors:
|
| 108 |
+
cmd += ["--gemma_safetensors", c.gemma_safetensors]
|
| 109 |
+
if c.ltx2_text_encoder_checkpoint:
|
| 110 |
+
cmd += ["--ltx2_text_encoder_checkpoint", c.ltx2_text_encoder_checkpoint]
|
| 111 |
+
if c.mixed_precision != "no":
|
| 112 |
+
cmd += ["--mixed_precision", c.mixed_precision]
|
| 113 |
+
if c.skip_existing:
|
| 114 |
+
cmd.append("--skip_existing")
|
| 115 |
+
if c.keep_cache:
|
| 116 |
+
cmd.append("--keep_cache")
|
| 117 |
+
if c.num_workers is not None:
|
| 118 |
+
cmd += ["--num_workers", str(c.num_workers)]
|
| 119 |
+
if c.gemma_load_in_8bit:
|
| 120 |
+
cmd.append("--gemma_load_in_8bit")
|
| 121 |
+
if c.gemma_load_in_4bit:
|
| 122 |
+
cmd.append("--gemma_load_in_4bit")
|
| 123 |
+
cmd += ["--gemma_bnb_4bit_quant_type", c.gemma_bnb_4bit_quant_type]
|
| 124 |
+
if c.gemma_bnb_4bit_disable_double_quant:
|
| 125 |
+
cmd.append("--gemma_bnb_4bit_disable_double_quant")
|
| 126 |
+
if c.gemma_bnb_4bit_compute_dtype != "auto":
|
| 127 |
+
cmd += ["--gemma_bnb_4bit_compute_dtype", c.gemma_bnb_4bit_compute_dtype]
|
| 128 |
+
|
| 129 |
+
# Precaching
|
| 130 |
+
if c.precache_sample_prompts and c.sample_prompts:
|
| 131 |
+
cmd.append("--precache_sample_prompts")
|
| 132 |
+
cmd += ["--sample_prompts", c.sample_prompts]
|
| 133 |
+
if c.sample_prompts_cache:
|
| 134 |
+
cmd += ["--sample_prompts_cache", c.sample_prompts_cache]
|
| 135 |
+
if c.precache_preservation_prompts:
|
| 136 |
+
cmd.append("--precache_preservation_prompts")
|
| 137 |
+
if c.preservation_prompts_cache:
|
| 138 |
+
cmd += ["--preservation_prompts_cache", c.preservation_prompts_cache]
|
| 139 |
+
if c.blank_preservation:
|
| 140 |
+
cmd.append("--blank_preservation")
|
| 141 |
+
if c.dop:
|
| 142 |
+
cmd.append("--dop")
|
| 143 |
+
if c.dop_class_prompt:
|
| 144 |
+
cmd += ["--dop_class_prompt", c.dop_class_prompt]
|
| 145 |
+
|
| 146 |
+
return cmd
|
| 147 |
+
|
| 148 |
+
|
| 149 |
+
def build_inference_cmd(config: ProjectConfig) -> list[str]:
|
| 150 |
+
"""Build CLI args for ltx2_generate_video.py."""
|
| 151 |
+
s = config.inference
|
| 152 |
+
|
| 153 |
+
cmd = [
|
| 154 |
+
sys.executable,
|
| 155 |
+
_find_script("ltx2_generate_video.py"),
|
| 156 |
+
"--ltx2_checkpoint", s.ltx2_checkpoint,
|
| 157 |
+
"--gemma_root", s.gemma_root,
|
| 158 |
+
"--ltx2_mode", s.ltx2_mode,
|
| 159 |
+
]
|
| 160 |
+
|
| 161 |
+
# LoRA
|
| 162 |
+
if s.lora_weight:
|
| 163 |
+
cmd += ["--lora_weight", s.lora_weight]
|
| 164 |
+
cmd += ["--lora_multiplier", str(s.lora_multiplier)]
|
| 165 |
+
|
| 166 |
+
# Prompt
|
| 167 |
+
if s.prompt:
|
| 168 |
+
cmd += ["--prompt", s.prompt]
|
| 169 |
+
if s.negative_prompt:
|
| 170 |
+
cmd += ["--negative_prompt", s.negative_prompt]
|
| 171 |
+
if s.from_file:
|
| 172 |
+
cmd += ["--from_file", s.from_file]
|
| 173 |
+
|
| 174 |
+
# Sampling params
|
| 175 |
+
cmd += ["--height", str(s.height)]
|
| 176 |
+
cmd += ["--width", str(s.width)]
|
| 177 |
+
cmd += ["--frame_count", str(s.frame_count)]
|
| 178 |
+
cmd += ["--frame_rate", str(s.frame_rate)]
|
| 179 |
+
cmd += ["--sample_steps", str(s.sample_steps)]
|
| 180 |
+
cmd += ["--guidance_scale", str(s.guidance_scale)]
|
| 181 |
+
if s.cfg_scale is not None:
|
| 182 |
+
cmd += ["--cfg_scale", str(s.cfg_scale)]
|
| 183 |
+
cmd += ["--discrete_flow_shift", str(s.discrete_flow_shift)]
|
| 184 |
+
if s.seed is not None:
|
| 185 |
+
cmd += ["--seed", str(s.seed)]
|
| 186 |
+
|
| 187 |
+
# Precision
|
| 188 |
+
if s.mixed_precision != "no":
|
| 189 |
+
cmd += ["--mixed_precision", s.mixed_precision]
|
| 190 |
+
cmd += ["--attn_mode", s.attn_mode]
|
| 191 |
+
if s.fp8_base:
|
| 192 |
+
cmd.append("--fp8_base")
|
| 193 |
+
if s.fp8_scaled:
|
| 194 |
+
cmd.append("--fp8_scaled")
|
| 195 |
+
|
| 196 |
+
# Gemma quantization
|
| 197 |
+
if s.gemma_load_in_8bit:
|
| 198 |
+
cmd.append("--gemma_load_in_8bit")
|
| 199 |
+
if s.gemma_load_in_4bit:
|
| 200 |
+
cmd.append("--gemma_load_in_4bit")
|
| 201 |
+
|
| 202 |
+
# Memory
|
| 203 |
+
if s.offloading:
|
| 204 |
+
cmd.append("--offloading")
|
| 205 |
+
if s.blocks_to_swap is not None:
|
| 206 |
+
cmd += ["--blocks_to_swap", str(s.blocks_to_swap)]
|
| 207 |
+
|
| 208 |
+
# Output
|
| 209 |
+
if s.output_dir:
|
| 210 |
+
cmd += ["--output_dir", s.output_dir]
|
| 211 |
+
if s.output_name:
|
| 212 |
+
cmd += ["--output_name", s.output_name]
|
| 213 |
+
|
| 214 |
+
return cmd
|
| 215 |
+
|
| 216 |
+
|
| 217 |
+
def build_training_cmd(config: ProjectConfig) -> list[str]:
|
| 218 |
+
"""Build CLI args for training via accelerate launch."""
|
| 219 |
+
toml_path = export_dataset_toml(config)
|
| 220 |
+
t = config.training
|
| 221 |
+
|
| 222 |
+
# Use accelerate launch
|
| 223 |
+
cmd = [
|
| 224 |
+
sys.executable, "-m", "accelerate.commands.launch",
|
| 225 |
+
"--mixed_precision", t.mixed_precision,
|
| 226 |
+
"--num_processes", "1",
|
| 227 |
+
"--num_machines", "1",
|
| 228 |
+
_find_script("ltx2_train_network.py"),
|
| 229 |
+
]
|
| 230 |
+
|
| 231 |
+
# Dataset
|
| 232 |
+
if t.dataset_manifest:
|
| 233 |
+
cmd += ["--dataset_manifest", t.dataset_manifest]
|
| 234 |
+
else:
|
| 235 |
+
cmd += ["--dataset_config", str(toml_path)]
|
| 236 |
+
|
| 237 |
+
# Model
|
| 238 |
+
cmd += ["--ltx2_checkpoint", t.ltx2_checkpoint]
|
| 239 |
+
if t.gemma_root:
|
| 240 |
+
cmd += ["--gemma_root", t.gemma_root]
|
| 241 |
+
if t.gemma_safetensors:
|
| 242 |
+
cmd += ["--gemma_safetensors", t.gemma_safetensors]
|
| 243 |
+
cmd += ["--ltx2_mode", t.ltx2_mode]
|
| 244 |
+
if t.ltx_version != "2.0":
|
| 245 |
+
cmd += ["--ltx_version", t.ltx_version]
|
| 246 |
+
if t.ltx_version_check_mode != "warn":
|
| 247 |
+
cmd += ["--ltx_version_check_mode", t.ltx_version_check_mode]
|
| 248 |
+
if t.fp8_base:
|
| 249 |
+
cmd.append("--fp8_base")
|
| 250 |
+
if t.fp8_scaled:
|
| 251 |
+
cmd.append("--fp8_scaled")
|
| 252 |
+
if t.flash_attn:
|
| 253 |
+
cmd.append("--flash_attn")
|
| 254 |
+
if t.sdpa:
|
| 255 |
+
cmd.append("--sdpa")
|
| 256 |
+
if t.sage_attn:
|
| 257 |
+
cmd.append("--sage_attn")
|
| 258 |
+
if t.xformers:
|
| 259 |
+
cmd.append("--xformers")
|
| 260 |
+
if t.gemma_load_in_8bit:
|
| 261 |
+
cmd.append("--gemma_load_in_8bit")
|
| 262 |
+
if t.gemma_load_in_4bit:
|
| 263 |
+
cmd.append("--gemma_load_in_4bit")
|
| 264 |
+
if t.gemma_bnb_4bit_disable_double_quant:
|
| 265 |
+
cmd.append("--gemma_bnb_4bit_disable_double_quant")
|
| 266 |
+
if t.ltx2_audio_only_model:
|
| 267 |
+
cmd.append("--ltx2_audio_only_model")
|
| 268 |
+
|
| 269 |
+
# Quantization
|
| 270 |
+
if t.nf4_base:
|
| 271 |
+
cmd.append("--nf4_base")
|
| 272 |
+
if t.nf4_block_size != 32:
|
| 273 |
+
cmd += ["--nf4_block_size", str(t.nf4_block_size)]
|
| 274 |
+
if t.loftq_init:
|
| 275 |
+
cmd.append("--loftq_init")
|
| 276 |
+
if t.loftq_iters != 2:
|
| 277 |
+
cmd += ["--loftq_iters", str(t.loftq_iters)]
|
| 278 |
+
if t.fp8_w8a8:
|
| 279 |
+
cmd.append("--fp8_w8a8")
|
| 280 |
+
if t.w8a8_mode != "int8":
|
| 281 |
+
cmd += ["--w8a8_mode", t.w8a8_mode]
|
| 282 |
+
if t.awq_calibration:
|
| 283 |
+
cmd.append("--awq_calibration")
|
| 284 |
+
if t.awq_alpha != 0.25:
|
| 285 |
+
cmd += ["--awq_alpha", str(t.awq_alpha)]
|
| 286 |
+
if t.awq_num_batches != 8:
|
| 287 |
+
cmd += ["--awq_num_batches", str(t.awq_num_batches)]
|
| 288 |
+
if t.quantize_device:
|
| 289 |
+
cmd += ["--quantize_device", t.quantize_device]
|
| 290 |
+
|
| 291 |
+
# LoRA / Network
|
| 292 |
+
if t.network_module:
|
| 293 |
+
cmd += ["--network_module", t.network_module]
|
| 294 |
+
cmd += ["--network_dim", str(t.network_dim)]
|
| 295 |
+
cmd += ["--network_alpha", str(t.network_alpha)]
|
| 296 |
+
cmd += ["--lora_target_preset", t.lora_target_preset]
|
| 297 |
+
if t.network_args:
|
| 298 |
+
cmd += ["--network_args"] + t.network_args.split()
|
| 299 |
+
if t.network_weights:
|
| 300 |
+
cmd += ["--network_weights", t.network_weights]
|
| 301 |
+
if t.network_dropout is not None:
|
| 302 |
+
cmd += ["--network_dropout", str(t.network_dropout)]
|
| 303 |
+
if t.scale_weight_norms is not None:
|
| 304 |
+
cmd += ["--scale_weight_norms", str(t.scale_weight_norms)]
|
| 305 |
+
if t.dim_from_weights:
|
| 306 |
+
cmd.append("--dim_from_weights")
|
| 307 |
+
if t.base_weights:
|
| 308 |
+
cmd += ["--base_weights"] + t.base_weights.split()
|
| 309 |
+
if t.base_weights_multiplier:
|
| 310 |
+
cmd += ["--base_weights_multiplier"] + t.base_weights_multiplier.split()
|
| 311 |
+
if t.lycoris_config:
|
| 312 |
+
cmd += ["--lycoris_config", t.lycoris_config]
|
| 313 |
+
if t.lycoris_quantized_base_check_mode != "warn":
|
| 314 |
+
cmd += ["--lycoris_quantized_base_check_mode", t.lycoris_quantized_base_check_mode]
|
| 315 |
+
if t.init_lokr_norm is not None:
|
| 316 |
+
cmd += ["--init_lokr_norm", str(t.init_lokr_norm)]
|
| 317 |
+
if t.caption_dropout_rate > 0:
|
| 318 |
+
cmd += ["--caption_dropout_rate", str(t.caption_dropout_rate)]
|
| 319 |
+
if not t.save_original_lora:
|
| 320 |
+
cmd.append("--no-save_original_lora")
|
| 321 |
+
if t.ic_lora_strategy != "auto":
|
| 322 |
+
cmd += ["--ic_lora_strategy", t.ic_lora_strategy]
|
| 323 |
+
if t.audio_ref_use_negative_positions:
|
| 324 |
+
cmd.append("--audio_ref_use_negative_positions")
|
| 325 |
+
if t.audio_ref_mask_cross_attention_to_reference:
|
| 326 |
+
cmd.append("--audio_ref_mask_cross_attention_to_reference")
|
| 327 |
+
if t.audio_ref_mask_reference_from_text_attention:
|
| 328 |
+
cmd.append("--audio_ref_mask_reference_from_text_attention")
|
| 329 |
+
if t.audio_ref_identity_guidance_scale != 0.0:
|
| 330 |
+
cmd += ["--audio_ref_identity_guidance_scale", str(t.audio_ref_identity_guidance_scale)]
|
| 331 |
+
|
| 332 |
+
# Optimizer
|
| 333 |
+
cmd += ["--learning_rate", str(t.learning_rate)]
|
| 334 |
+
cmd += ["--optimizer_type", t.optimizer_type]
|
| 335 |
+
if t.optimizer_args:
|
| 336 |
+
cmd += ["--optimizer_args"] + t.optimizer_args.split()
|
| 337 |
+
cmd += ["--lr_scheduler", t.lr_scheduler]
|
| 338 |
+
cmd += ["--lr_warmup_steps", str(t.lr_warmup_steps)]
|
| 339 |
+
if t.lr_decay_steps is not None:
|
| 340 |
+
cmd += ["--lr_decay_steps", str(t.lr_decay_steps)]
|
| 341 |
+
if t.lr_scheduler_num_cycles is not None:
|
| 342 |
+
cmd += ["--lr_scheduler_num_cycles", str(t.lr_scheduler_num_cycles)]
|
| 343 |
+
if t.lr_scheduler_power is not None:
|
| 344 |
+
cmd += ["--lr_scheduler_power", str(t.lr_scheduler_power)]
|
| 345 |
+
if t.lr_scheduler_min_lr_ratio is not None:
|
| 346 |
+
cmd += ["--lr_scheduler_min_lr_ratio", str(t.lr_scheduler_min_lr_ratio)]
|
| 347 |
+
if t.lr_scheduler_type:
|
| 348 |
+
cmd += ["--lr_scheduler_type", t.lr_scheduler_type]
|
| 349 |
+
if t.lr_scheduler_args:
|
| 350 |
+
cmd += ["--lr_scheduler_args"] + t.lr_scheduler_args.split()
|
| 351 |
+
if t.lr_scheduler_timescale is not None:
|
| 352 |
+
cmd += ["--lr_scheduler_timescale", str(t.lr_scheduler_timescale)]
|
| 353 |
+
cmd += ["--gradient_accumulation_steps", str(t.gradient_accumulation_steps)]
|
| 354 |
+
cmd += ["--max_grad_norm", str(t.max_grad_norm)]
|
| 355 |
+
if t.audio_lr is not None:
|
| 356 |
+
cmd += ["--audio_lr", str(t.audio_lr)]
|
| 357 |
+
if t.lr_args:
|
| 358 |
+
cmd += ["--lr_args"] + t.lr_args.split()
|
| 359 |
+
|
| 360 |
+
# Schedule
|
| 361 |
+
if t.max_train_epochs is not None:
|
| 362 |
+
cmd += ["--max_train_epochs", str(t.max_train_epochs)]
|
| 363 |
+
else:
|
| 364 |
+
cmd += ["--max_train_steps", str(t.max_train_steps)]
|
| 365 |
+
cmd += ["--timestep_sampling", t.timestep_sampling]
|
| 366 |
+
cmd += ["--discrete_flow_shift", str(t.discrete_flow_shift)]
|
| 367 |
+
cmd += ["--weighting_scheme", t.weighting_scheme]
|
| 368 |
+
if t.seed is not None:
|
| 369 |
+
cmd += ["--seed", str(t.seed)]
|
| 370 |
+
if t.guidance_scale is not None:
|
| 371 |
+
cmd += ["--guidance_scale", str(t.guidance_scale)]
|
| 372 |
+
if t.sigmoid_scale is not None:
|
| 373 |
+
cmd += ["--sigmoid_scale", str(t.sigmoid_scale)]
|
| 374 |
+
if t.logit_mean is not None:
|
| 375 |
+
cmd += ["--logit_mean", str(t.logit_mean)]
|
| 376 |
+
if t.logit_std is not None:
|
| 377 |
+
cmd += ["--logit_std", str(t.logit_std)]
|
| 378 |
+
if t.mode_scale is not None:
|
| 379 |
+
cmd += ["--mode_scale", str(t.mode_scale)]
|
| 380 |
+
if t.min_timestep is not None:
|
| 381 |
+
cmd += ["--min_timestep", str(t.min_timestep)]
|
| 382 |
+
if t.max_timestep is not None:
|
| 383 |
+
cmd += ["--max_timestep", str(t.max_timestep)]
|
| 384 |
+
|
| 385 |
+
# Advanced timestep
|
| 386 |
+
if t.shifted_logit_mode:
|
| 387 |
+
cmd += ["--shifted_logit_mode", t.shifted_logit_mode]
|
| 388 |
+
if t.shifted_logit_eps != 1e-3:
|
| 389 |
+
cmd += ["--shifted_logit_eps", str(t.shifted_logit_eps)]
|
| 390 |
+
if t.shifted_logit_uniform_prob != 0.1:
|
| 391 |
+
cmd += ["--shifted_logit_uniform_prob", str(t.shifted_logit_uniform_prob)]
|
| 392 |
+
if t.shifted_logit_shift is not None:
|
| 393 |
+
cmd += ["--shifted_logit_shift", str(t.shifted_logit_shift)]
|
| 394 |
+
if t.preserve_distribution_shape:
|
| 395 |
+
cmd.append("--preserve_distribution_shape")
|
| 396 |
+
if t.num_timestep_buckets is not None:
|
| 397 |
+
cmd += ["--num_timestep_buckets", str(t.num_timestep_buckets)]
|
| 398 |
+
|
| 399 |
+
# Memory
|
| 400 |
+
if t.blocks_to_swap is not None:
|
| 401 |
+
cmd += ["--blocks_to_swap", str(t.blocks_to_swap)]
|
| 402 |
+
if t.gradient_checkpointing:
|
| 403 |
+
cmd.append("--gradient_checkpointing")
|
| 404 |
+
if t.gradient_checkpointing_cpu_offload:
|
| 405 |
+
cmd.append("--gradient_checkpointing_cpu_offload")
|
| 406 |
+
if t.split_attn_target:
|
| 407 |
+
cmd += ["--split_attn_target", t.split_attn_target]
|
| 408 |
+
if t.split_attn_mode:
|
| 409 |
+
cmd += ["--split_attn_mode", t.split_attn_mode]
|
| 410 |
+
if t.split_attn_chunk_size is not None:
|
| 411 |
+
cmd += ["--split_attn_chunk_size", str(t.split_attn_chunk_size)]
|
| 412 |
+
if t.blockwise_checkpointing:
|
| 413 |
+
cmd.append("--blockwise_checkpointing")
|
| 414 |
+
if t.blocks_to_checkpoint is not None:
|
| 415 |
+
cmd += ["--blocks_to_checkpoint", str(t.blocks_to_checkpoint)]
|
| 416 |
+
if t.full_fp16:
|
| 417 |
+
cmd.append("--full_fp16")
|
| 418 |
+
if t.full_bf16:
|
| 419 |
+
cmd.append("--full_bf16")
|
| 420 |
+
if t.ffn_chunk_target:
|
| 421 |
+
cmd += ["--ffn_chunk_target", t.ffn_chunk_target]
|
| 422 |
+
if t.ffn_chunk_size:
|
| 423 |
+
cmd += ["--ffn_chunk_size", str(t.ffn_chunk_size)]
|
| 424 |
+
if t.use_pinned_memory_for_block_swap:
|
| 425 |
+
cmd.append("--use_pinned_memory_for_block_swap")
|
| 426 |
+
if t.img_in_txt_in_offloading:
|
| 427 |
+
cmd.append("--img_in_txt_in_offloading")
|
| 428 |
+
|
| 429 |
+
# Compile
|
| 430 |
+
if t.compile:
|
| 431 |
+
cmd.append("--compile")
|
| 432 |
+
if t.compile_backend:
|
| 433 |
+
cmd += ["--compile_backend", t.compile_backend]
|
| 434 |
+
if t.compile_mode:
|
| 435 |
+
cmd += ["--compile_mode", t.compile_mode]
|
| 436 |
+
if t.compile_dynamic:
|
| 437 |
+
cmd.append("--compile_dynamic")
|
| 438 |
+
if t.compile_fullgraph:
|
| 439 |
+
cmd.append("--compile_fullgraph")
|
| 440 |
+
if t.compile_cache_size_limit is not None:
|
| 441 |
+
cmd += ["--compile_cache_size_limit", str(t.compile_cache_size_limit)]
|
| 442 |
+
|
| 443 |
+
# CUDA
|
| 444 |
+
if t.cuda_allow_tf32:
|
| 445 |
+
cmd.append("--cuda_allow_tf32")
|
| 446 |
+
if t.cuda_cudnn_benchmark:
|
| 447 |
+
cmd.append("--cuda_cudnn_benchmark")
|
| 448 |
+
if t.cuda_memory_fraction is not None:
|
| 449 |
+
cmd += ["--cuda_memory_fraction", str(t.cuda_memory_fraction)]
|
| 450 |
+
|
| 451 |
+
# Sampling
|
| 452 |
+
if t.sample_every_n_steps:
|
| 453 |
+
cmd += ["--sample_every_n_steps", str(t.sample_every_n_steps)]
|
| 454 |
+
if t.sample_every_n_epochs:
|
| 455 |
+
cmd += ["--sample_every_n_epochs", str(t.sample_every_n_epochs)]
|
| 456 |
+
if t.sample_prompts:
|
| 457 |
+
cmd += ["--sample_prompts", t.sample_prompts]
|
| 458 |
+
if t.use_precached_sample_prompts:
|
| 459 |
+
cmd.append("--use_precached_sample_prompts")
|
| 460 |
+
if t.sample_prompts_cache:
|
| 461 |
+
cmd += ["--sample_prompts_cache", t.sample_prompts_cache]
|
| 462 |
+
if t.use_precached_sample_latents:
|
| 463 |
+
cmd.append("--use_precached_sample_latents")
|
| 464 |
+
if t.sample_latents_cache:
|
| 465 |
+
cmd += ["--sample_latents_cache", t.sample_latents_cache]
|
| 466 |
+
cmd += ["--height", str(t.height)]
|
| 467 |
+
cmd += ["--width", str(t.width)]
|
| 468 |
+
cmd += ["--sample_num_frames", str(t.sample_num_frames)]
|
| 469 |
+
if t.sample_with_offloading:
|
| 470 |
+
cmd.append("--sample_with_offloading")
|
| 471 |
+
if t.sample_merge_audio:
|
| 472 |
+
cmd.append("--sample_merge_audio")
|
| 473 |
+
if t.sample_disable_audio:
|
| 474 |
+
cmd.append("--sample_disable_audio")
|
| 475 |
+
if t.sample_at_first:
|
| 476 |
+
cmd.append("--sample_at_first")
|
| 477 |
+
if t.sample_tiled_vae:
|
| 478 |
+
cmd.append("--sample_tiled_vae")
|
| 479 |
+
if t.sample_vae_tile_size is not None:
|
| 480 |
+
cmd += ["--sample_vae_tile_size", str(t.sample_vae_tile_size)]
|
| 481 |
+
if t.sample_vae_tile_overlap is not None:
|
| 482 |
+
cmd += ["--sample_vae_tile_overlap", str(t.sample_vae_tile_overlap)]
|
| 483 |
+
if t.sample_vae_temporal_tile_size is not None:
|
| 484 |
+
cmd += ["--sample_vae_temporal_tile_size", str(t.sample_vae_temporal_tile_size)]
|
| 485 |
+
if t.sample_vae_temporal_tile_overlap is not None:
|
| 486 |
+
cmd += ["--sample_vae_temporal_tile_overlap", str(t.sample_vae_temporal_tile_overlap)]
|
| 487 |
+
if t.sample_two_stage:
|
| 488 |
+
cmd.append("--sample_two_stage")
|
| 489 |
+
if t.spatial_upsampler_path:
|
| 490 |
+
cmd += ["--spatial_upsampler_path", t.spatial_upsampler_path]
|
| 491 |
+
if t.distilled_lora_path:
|
| 492 |
+
cmd += ["--distilled_lora_path", t.distilled_lora_path]
|
| 493 |
+
if t.sample_stage2_steps != 3:
|
| 494 |
+
cmd += ["--sample_stage2_steps", str(t.sample_stage2_steps)]
|
| 495 |
+
if t.sample_audio_only:
|
| 496 |
+
cmd.append("--sample_audio_only")
|
| 497 |
+
if t.sample_disable_flash_attn:
|
| 498 |
+
cmd.append("--sample_disable_flash_attn")
|
| 499 |
+
if not t.sample_i2v_token_timestep_mask:
|
| 500 |
+
cmd.append("--no-sample_i2v_token_timestep_mask")
|
| 501 |
+
if not t.sample_audio_subprocess:
|
| 502 |
+
cmd.append("--no-sample_audio_subprocess")
|
| 503 |
+
if t.sample_include_reference:
|
| 504 |
+
cmd.append("--sample_include_reference")
|
| 505 |
+
if t.reference_downscale != 1:
|
| 506 |
+
cmd += ["--reference_downscale", str(t.reference_downscale)]
|
| 507 |
+
if t.reference_frames != 1:
|
| 508 |
+
cmd += ["--reference_frames", str(t.reference_frames)]
|
| 509 |
+
|
| 510 |
+
# Validation
|
| 511 |
+
if t.validate_every_n_steps is not None:
|
| 512 |
+
cmd += ["--validate_every_n_steps", str(t.validate_every_n_steps)]
|
| 513 |
+
if t.validate_every_n_epochs is not None:
|
| 514 |
+
cmd += ["--validate_every_n_epochs", str(t.validate_every_n_epochs)]
|
| 515 |
+
|
| 516 |
+
# Output
|
| 517 |
+
if t.output_dir:
|
| 518 |
+
cmd += ["--output_dir", t.output_dir]
|
| 519 |
+
if t.output_name:
|
| 520 |
+
cmd += ["--output_name", t.output_name]
|
| 521 |
+
if t.save_every_n_epochs:
|
| 522 |
+
cmd += ["--save_every_n_epochs", str(t.save_every_n_epochs)]
|
| 523 |
+
if t.save_every_n_steps:
|
| 524 |
+
cmd += ["--save_every_n_steps", str(t.save_every_n_steps)]
|
| 525 |
+
if t.save_last_n_epochs is not None:
|
| 526 |
+
cmd += ["--save_last_n_epochs", str(t.save_last_n_epochs)]
|
| 527 |
+
if t.save_last_n_steps is not None:
|
| 528 |
+
cmd += ["--save_last_n_steps", str(t.save_last_n_steps)]
|
| 529 |
+
if t.save_last_n_epochs_state is not None:
|
| 530 |
+
cmd += ["--save_last_n_epochs_state", str(t.save_last_n_epochs_state)]
|
| 531 |
+
if t.save_last_n_steps_state is not None:
|
| 532 |
+
cmd += ["--save_last_n_steps_state", str(t.save_last_n_steps_state)]
|
| 533 |
+
if t.save_state:
|
| 534 |
+
cmd.append("--save_state")
|
| 535 |
+
if t.save_state_on_train_end:
|
| 536 |
+
cmd.append("--save_state_on_train_end")
|
| 537 |
+
if t.save_checkpoint_metadata:
|
| 538 |
+
cmd.append("--save_checkpoint_metadata")
|
| 539 |
+
if t.no_metadata:
|
| 540 |
+
cmd.append("--no_metadata")
|
| 541 |
+
if t.no_convert_to_comfy:
|
| 542 |
+
cmd.append("--no_convert_to_comfy")
|
| 543 |
+
if t.log_with:
|
| 544 |
+
cmd += ["--log_with", t.log_with]
|
| 545 |
+
if t.logging_dir:
|
| 546 |
+
cmd += ["--logging_dir", t.logging_dir]
|
| 547 |
+
if t.log_prefix:
|
| 548 |
+
cmd += ["--log_prefix", t.log_prefix]
|
| 549 |
+
if t.log_tracker_name:
|
| 550 |
+
cmd += ["--log_tracker_name", t.log_tracker_name]
|
| 551 |
+
if t.wandb_run_name:
|
| 552 |
+
cmd += ["--wandb_run_name", t.wandb_run_name]
|
| 553 |
+
if t.wandb_api_key:
|
| 554 |
+
cmd += ["--wandb_api_key", t.wandb_api_key]
|
| 555 |
+
if t.log_cuda_memory_every_n_steps is not None:
|
| 556 |
+
cmd += ["--log_cuda_memory_every_n_steps", str(t.log_cuda_memory_every_n_steps)]
|
| 557 |
+
if t.resume:
|
| 558 |
+
cmd += ["--resume", t.resume]
|
| 559 |
+
if t.training_comment:
|
| 560 |
+
cmd += ["--training_comment", t.training_comment]
|
| 561 |
+
if t.loss_type != "mse":
|
| 562 |
+
cmd += ["--loss_type", t.loss_type]
|
| 563 |
+
if t.loss_type in ("huber", "smooth_l1") and t.huber_delta != 1.0:
|
| 564 |
+
cmd += ["--huber_delta", str(t.huber_delta)]
|
| 565 |
+
|
| 566 |
+
# Metadata
|
| 567 |
+
if t.metadata_title:
|
| 568 |
+
cmd += ["--metadata_title", t.metadata_title]
|
| 569 |
+
if t.metadata_author:
|
| 570 |
+
cmd += ["--metadata_author", t.metadata_author]
|
| 571 |
+
if t.metadata_description:
|
| 572 |
+
cmd += ["--metadata_description", t.metadata_description]
|
| 573 |
+
if t.metadata_license:
|
| 574 |
+
cmd += ["--metadata_license", t.metadata_license]
|
| 575 |
+
if t.metadata_tags:
|
| 576 |
+
cmd += ["--metadata_tags", t.metadata_tags]
|
| 577 |
+
|
| 578 |
+
# HuggingFace upload
|
| 579 |
+
if t.huggingface_repo_id:
|
| 580 |
+
cmd += ["--huggingface_repo_id", t.huggingface_repo_id]
|
| 581 |
+
if t.huggingface_repo_type:
|
| 582 |
+
cmd += ["--huggingface_repo_type", t.huggingface_repo_type]
|
| 583 |
+
if t.huggingface_path_in_repo:
|
| 584 |
+
cmd += ["--huggingface_path_in_repo", t.huggingface_path_in_repo]
|
| 585 |
+
if t.huggingface_token:
|
| 586 |
+
cmd += ["--huggingface_token", t.huggingface_token]
|
| 587 |
+
if t.huggingface_repo_visibility:
|
| 588 |
+
cmd += ["--huggingface_repo_visibility", t.huggingface_repo_visibility]
|
| 589 |
+
if t.save_state_to_huggingface:
|
| 590 |
+
cmd.append("--save_state_to_huggingface")
|
| 591 |
+
if t.resume_from_huggingface:
|
| 592 |
+
cmd.append("--resume_from_huggingface")
|
| 593 |
+
if t.async_upload:
|
| 594 |
+
cmd.append("--async_upload")
|
| 595 |
+
|
| 596 |
+
# CREPA
|
| 597 |
+
if t.crepa:
|
| 598 |
+
cmd.append("--crepa")
|
| 599 |
+
args_parts = []
|
| 600 |
+
if t.crepa_mode != "backbone":
|
| 601 |
+
args_parts.append(f"mode={t.crepa_mode}")
|
| 602 |
+
if t.crepa_student_block_idx != 16:
|
| 603 |
+
args_parts.append(f"student_block_idx={t.crepa_student_block_idx}")
|
| 604 |
+
if t.crepa_mode == "backbone" and t.crepa_teacher_block_idx != 32:
|
| 605 |
+
args_parts.append(f"teacher_block_idx={t.crepa_teacher_block_idx}")
|
| 606 |
+
if t.crepa_mode == "dino" and t.crepa_dino_model != "dinov2_vitb14":
|
| 607 |
+
args_parts.append(f"dino_model={t.crepa_dino_model}")
|
| 608 |
+
if t.crepa_lambda != 0.1:
|
| 609 |
+
args_parts.append(f"lambda_crepa={t.crepa_lambda}")
|
| 610 |
+
if t.crepa_tau != 1.0:
|
| 611 |
+
args_parts.append(f"tau={t.crepa_tau}")
|
| 612 |
+
if t.crepa_num_neighbors != 2:
|
| 613 |
+
args_parts.append(f"num_neighbors={t.crepa_num_neighbors}")
|
| 614 |
+
if t.crepa_schedule != "constant":
|
| 615 |
+
args_parts.append(f"schedule={t.crepa_schedule}")
|
| 616 |
+
if t.crepa_warmup_steps != 0:
|
| 617 |
+
args_parts.append(f"warmup_steps={t.crepa_warmup_steps}")
|
| 618 |
+
if not t.crepa_normalize:
|
| 619 |
+
args_parts.append("normalize=false")
|
| 620 |
+
if args_parts:
|
| 621 |
+
cmd += ["--crepa_args"] + args_parts
|
| 622 |
+
|
| 623 |
+
# Self-Flow
|
| 624 |
+
if t.self_flow:
|
| 625 |
+
cmd.append("--self_flow")
|
| 626 |
+
args_parts = []
|
| 627 |
+
if t.self_flow_teacher_mode != "base":
|
| 628 |
+
args_parts.append(f"teacher_mode={t.self_flow_teacher_mode}")
|
| 629 |
+
if t.self_flow_student_block_idx != 16:
|
| 630 |
+
args_parts.append(f"student_block_idx={t.self_flow_student_block_idx}")
|
| 631 |
+
if t.self_flow_teacher_block_idx != 32:
|
| 632 |
+
args_parts.append(f"teacher_block_idx={t.self_flow_teacher_block_idx}")
|
| 633 |
+
if t.self_flow_student_block_ratio != 0.3:
|
| 634 |
+
args_parts.append(f"student_block_ratio={t.self_flow_student_block_ratio}")
|
| 635 |
+
if t.self_flow_teacher_block_ratio != 0.7:
|
| 636 |
+
args_parts.append(f"teacher_block_ratio={t.self_flow_teacher_block_ratio}")
|
| 637 |
+
if t.self_flow_student_block_stochastic_range != 0:
|
| 638 |
+
args_parts.append(f"student_block_stochastic_range={t.self_flow_student_block_stochastic_range}")
|
| 639 |
+
if t.self_flow_lambda != 0.1:
|
| 640 |
+
args_parts.append(f"lambda_self_flow={t.self_flow_lambda}")
|
| 641 |
+
if t.self_flow_mask_ratio != 0.1:
|
| 642 |
+
args_parts.append(f"mask_ratio={t.self_flow_mask_ratio}")
|
| 643 |
+
if t.self_flow_frame_level_mask:
|
| 644 |
+
args_parts.append("frame_level_mask=true")
|
| 645 |
+
if t.self_flow_mask_focus_loss:
|
| 646 |
+
args_parts.append("mask_focus_loss=true")
|
| 647 |
+
if t.self_flow_max_loss != 0.0:
|
| 648 |
+
args_parts.append(f"max_loss={t.self_flow_max_loss}")
|
| 649 |
+
if t.self_flow_teacher_momentum != 0.999:
|
| 650 |
+
args_parts.append(f"teacher_momentum={t.self_flow_teacher_momentum}")
|
| 651 |
+
if not t.self_flow_dual_timestep:
|
| 652 |
+
args_parts.append("dual_timestep=false")
|
| 653 |
+
if t.self_flow_projector_lr is not None:
|
| 654 |
+
args_parts.append(f"projector_lr={t.self_flow_projector_lr}")
|
| 655 |
+
if getattr(t, "self_flow_temporal_mode", "off") != "off":
|
| 656 |
+
args_parts.append(f"temporal_mode={t.self_flow_temporal_mode}")
|
| 657 |
+
if getattr(t, "self_flow_lambda_temporal", 0.0) != 0.0:
|
| 658 |
+
args_parts.append(f"lambda_temporal={t.self_flow_lambda_temporal}")
|
| 659 |
+
if getattr(t, "self_flow_lambda_delta", 0.0) != 0.0:
|
| 660 |
+
args_parts.append(f"lambda_delta={t.self_flow_lambda_delta}")
|
| 661 |
+
if getattr(t, "self_flow_temporal_tau", 1.0) != 1.0:
|
| 662 |
+
args_parts.append(f"temporal_tau={t.self_flow_temporal_tau}")
|
| 663 |
+
if getattr(t, "self_flow_num_neighbors", 2) != 2:
|
| 664 |
+
args_parts.append(f"num_neighbors={t.self_flow_num_neighbors}")
|
| 665 |
+
if getattr(t, "self_flow_temporal_granularity", "frame") != "frame":
|
| 666 |
+
args_parts.append(f"temporal_granularity={t.self_flow_temporal_granularity}")
|
| 667 |
+
if getattr(t, "self_flow_patch_spatial_radius", 0) != 0:
|
| 668 |
+
args_parts.append(f"patch_spatial_radius={t.self_flow_patch_spatial_radius}")
|
| 669 |
+
if getattr(t, "self_flow_patch_match_mode", "hard") != "hard":
|
| 670 |
+
args_parts.append(f"patch_match_mode={t.self_flow_patch_match_mode}")
|
| 671 |
+
if getattr(t, "self_flow_delta_num_steps", 1) != 1:
|
| 672 |
+
args_parts.append(f"delta_num_steps={t.self_flow_delta_num_steps}")
|
| 673 |
+
if getattr(t, "self_flow_motion_weighting", "none") != "none":
|
| 674 |
+
args_parts.append(f"motion_weighting={t.self_flow_motion_weighting}")
|
| 675 |
+
if getattr(t, "self_flow_motion_weight_strength", 0.0) != 0.0:
|
| 676 |
+
args_parts.append(f"motion_weight_strength={t.self_flow_motion_weight_strength}")
|
| 677 |
+
if getattr(t, "self_flow_temporal_schedule", "constant") != "constant":
|
| 678 |
+
args_parts.append(f"temporal_schedule={t.self_flow_temporal_schedule}")
|
| 679 |
+
if getattr(t, "self_flow_temporal_warmup_steps", 0) != 0:
|
| 680 |
+
args_parts.append(f"temporal_warmup_steps={t.self_flow_temporal_warmup_steps}")
|
| 681 |
+
if getattr(t, "self_flow_temporal_max_steps", 0) != 0:
|
| 682 |
+
args_parts.append(f"temporal_max_steps={t.self_flow_temporal_max_steps}")
|
| 683 |
+
if getattr(t, "self_flow_offload_teacher_features", False):
|
| 684 |
+
args_parts.append("offload_teacher_features=true")
|
| 685 |
+
if args_parts:
|
| 686 |
+
cmd += ["--self_flow_args"] + args_parts
|
| 687 |
+
|
| 688 |
+
# Preservation
|
| 689 |
+
if t.blank_preservation:
|
| 690 |
+
cmd.append("--blank_preservation")
|
| 691 |
+
args_parts = []
|
| 692 |
+
if t.blank_preservation_multiplier != 1.0:
|
| 693 |
+
args_parts.append(f"multiplier={t.blank_preservation_multiplier}")
|
| 694 |
+
if args_parts:
|
| 695 |
+
cmd += ["--blank_preservation_args"] + args_parts
|
| 696 |
+
if t.dop:
|
| 697 |
+
cmd.append("--dop")
|
| 698 |
+
args_parts = []
|
| 699 |
+
if t.dop_class:
|
| 700 |
+
args_parts.append(f"class={t.dop_class}")
|
| 701 |
+
if t.dop_multiplier != 1.0:
|
| 702 |
+
args_parts.append(f"multiplier={t.dop_multiplier}")
|
| 703 |
+
if args_parts:
|
| 704 |
+
cmd += ["--dop_args"] + args_parts
|
| 705 |
+
if t.prior_divergence:
|
| 706 |
+
cmd.append("--prior_divergence")
|
| 707 |
+
args_parts = []
|
| 708 |
+
if t.prior_divergence_multiplier != 0.1:
|
| 709 |
+
args_parts.append(f"multiplier={t.prior_divergence_multiplier}")
|
| 710 |
+
if args_parts:
|
| 711 |
+
cmd += ["--prior_divergence_args"] + args_parts
|
| 712 |
+
if t.use_precached_preservation:
|
| 713 |
+
cmd.append("--use_precached_preservation")
|
| 714 |
+
if t.preservation_prompts_cache:
|
| 715 |
+
cmd += ["--preservation_prompts_cache", t.preservation_prompts_cache]
|
| 716 |
+
|
| 717 |
+
# Audio features
|
| 718 |
+
if t.audio_loss_balance_mode != "none":
|
| 719 |
+
cmd += ["--audio_loss_balance_mode", t.audio_loss_balance_mode]
|
| 720 |
+
if t.audio_loss_balance_mode == "inv_freq":
|
| 721 |
+
if t.audio_loss_balance_beta != 0.01:
|
| 722 |
+
cmd += ["--audio_loss_balance_beta", str(t.audio_loss_balance_beta)]
|
| 723 |
+
if t.audio_loss_balance_eps != 0.05:
|
| 724 |
+
cmd += ["--audio_loss_balance_eps", str(t.audio_loss_balance_eps)]
|
| 725 |
+
if t.audio_loss_balance_min != 0.05:
|
| 726 |
+
cmd += ["--audio_loss_balance_min", str(t.audio_loss_balance_min)]
|
| 727 |
+
if t.audio_loss_balance_max != 4.0:
|
| 728 |
+
cmd += ["--audio_loss_balance_max", str(t.audio_loss_balance_max)]
|
| 729 |
+
if t.audio_loss_balance_ema_init != 1.0:
|
| 730 |
+
cmd += ["--audio_loss_balance_ema_init", str(t.audio_loss_balance_ema_init)]
|
| 731 |
+
if t.audio_loss_balance_mode == "ema_mag":
|
| 732 |
+
if t.audio_loss_balance_target_ratio != 0.33:
|
| 733 |
+
cmd += ["--audio_loss_balance_target_ratio", str(t.audio_loss_balance_target_ratio)]
|
| 734 |
+
if t.audio_loss_balance_ema_decay != 0.99:
|
| 735 |
+
cmd += ["--audio_loss_balance_ema_decay", str(t.audio_loss_balance_ema_decay)]
|
| 736 |
+
if t.independent_audio_timestep:
|
| 737 |
+
cmd.append("--independent_audio_timestep")
|
| 738 |
+
if t.audio_silence_regularizer:
|
| 739 |
+
cmd.append("--audio_silence_regularizer")
|
| 740 |
+
if t.audio_silence_regularizer_weight != 1.0:
|
| 741 |
+
cmd += ["--audio_silence_regularizer_weight", str(t.audio_silence_regularizer_weight)]
|
| 742 |
+
if t.audio_supervision_mode != "off":
|
| 743 |
+
cmd += ["--audio_supervision_mode", t.audio_supervision_mode]
|
| 744 |
+
if t.audio_supervision_warmup_steps != 50:
|
| 745 |
+
cmd += ["--audio_supervision_warmup_steps", str(t.audio_supervision_warmup_steps)]
|
| 746 |
+
if t.audio_supervision_check_interval != 50:
|
| 747 |
+
cmd += ["--audio_supervision_check_interval", str(t.audio_supervision_check_interval)]
|
| 748 |
+
if t.audio_supervision_min_ratio != 0.9:
|
| 749 |
+
cmd += ["--audio_supervision_min_ratio", str(t.audio_supervision_min_ratio)]
|
| 750 |
+
if t.audio_dop:
|
| 751 |
+
cmd.append("--audio_dop")
|
| 752 |
+
if t.audio_dop_multiplier != 0.5:
|
| 753 |
+
cmd += ["--audio_dop_args", f"multiplier={t.audio_dop_multiplier}"]
|
| 754 |
+
if t.audio_bucket_strategy:
|
| 755 |
+
cmd += ["--audio_bucket_strategy", t.audio_bucket_strategy]
|
| 756 |
+
if t.audio_bucket_interval is not None:
|
| 757 |
+
cmd += ["--audio_bucket_interval", str(t.audio_bucket_interval)]
|
| 758 |
+
if t.audio_only_sequence_resolution != 64:
|
| 759 |
+
cmd += ["--audio_only_sequence_resolution", str(t.audio_only_sequence_resolution)]
|
| 760 |
+
if t.min_audio_batches_per_accum > 0:
|
| 761 |
+
cmd += ["--min_audio_batches_per_accum", str(t.min_audio_batches_per_accum)]
|
| 762 |
+
if t.audio_batch_probability is not None:
|
| 763 |
+
cmd += ["--audio_batch_probability", str(t.audio_batch_probability)]
|
| 764 |
+
|
| 765 |
+
# Loss weighting
|
| 766 |
+
if t.video_loss_weight != 1.0:
|
| 767 |
+
cmd += ["--video_loss_weight", str(t.video_loss_weight)]
|
| 768 |
+
if t.audio_loss_weight != 1.0:
|
| 769 |
+
cmd += ["--audio_loss_weight", str(t.audio_loss_weight)]
|
| 770 |
+
|
| 771 |
+
# Misc
|
| 772 |
+
if t.separate_audio_buckets:
|
| 773 |
+
cmd.append("--separate_audio_buckets")
|
| 774 |
+
cmd += ["--max_data_loader_n_workers", str(t.max_data_loader_n_workers)]
|
| 775 |
+
if t.persistent_data_loader_workers:
|
| 776 |
+
cmd.append("--persistent_data_loader_workers")
|
| 777 |
+
cmd += ["--ltx2_first_frame_conditioning_p", str(t.ltx2_first_frame_conditioning_p)]
|
| 778 |
+
|
| 779 |
+
# GUI dashboard
|
| 780 |
+
cmd.append("--gui")
|
| 781 |
+
|
| 782 |
+
return cmd
|
| 783 |
+
|
| 784 |
+
|
| 785 |
+
def build_slider_training_cmd(config: ProjectConfig) -> list[str]:
|
| 786 |
+
"""Build CLI args for slider LoRA training via accelerate launch.
|
| 787 |
+
|
| 788 |
+
Shared settings (model, LoRA, optimizer, memory, output) are inherited
|
| 789 |
+
from the training config. Only slider-specific values (steps, output name,
|
| 790 |
+
slider config, latent dims) come from ``config.slider``.
|
| 791 |
+
"""
|
| 792 |
+
s = config.slider
|
| 793 |
+
t = config.training
|
| 794 |
+
slider_toml = _write_slider_toml(config, build_slider_toml_path(config))
|
| 795 |
+
|
| 796 |
+
cmd = [
|
| 797 |
+
sys.executable, "-m", "accelerate.commands.launch",
|
| 798 |
+
"--mixed_precision", t.mixed_precision,
|
| 799 |
+
"--num_processes", "1",
|
| 800 |
+
"--num_machines", "1",
|
| 801 |
+
_find_script("ltx2_train_slider.py"),
|
| 802 |
+
]
|
| 803 |
+
|
| 804 |
+
# Slider config
|
| 805 |
+
cmd += ["--slider_config", str(slider_toml)]
|
| 806 |
+
|
| 807 |
+
# Model — from training config
|
| 808 |
+
cmd += ["--ltx2_checkpoint", t.ltx2_checkpoint]
|
| 809 |
+
if t.gemma_root:
|
| 810 |
+
cmd += ["--gemma_root", t.gemma_root]
|
| 811 |
+
if t.fp8_base:
|
| 812 |
+
cmd.append("--fp8_base")
|
| 813 |
+
if t.fp8_scaled:
|
| 814 |
+
cmd.append("--fp8_scaled")
|
| 815 |
+
if t.flash_attn:
|
| 816 |
+
cmd.append("--flash_attn")
|
| 817 |
+
if t.gemma_load_in_8bit:
|
| 818 |
+
cmd.append("--gemma_load_in_8bit")
|
| 819 |
+
if t.gemma_load_in_4bit:
|
| 820 |
+
cmd.append("--gemma_load_in_4bit")
|
| 821 |
+
|
| 822 |
+
# Text mode latent dimensions — slider-specific
|
| 823 |
+
if s.mode == "text":
|
| 824 |
+
cmd += ["--latent_frames", str(s.latent_frames)]
|
| 825 |
+
cmd += ["--latent_height", str(s.latent_height)]
|
| 826 |
+
cmd += ["--latent_width", str(s.latent_width)]
|
| 827 |
+
|
| 828 |
+
# LoRA — from training config
|
| 829 |
+
cmd += ["--network_dim", str(t.network_dim)]
|
| 830 |
+
cmd += ["--network_alpha", str(t.network_alpha)]
|
| 831 |
+
|
| 832 |
+
# Optimizer — from training config
|
| 833 |
+
cmd += ["--learning_rate", str(t.learning_rate)]
|
| 834 |
+
cmd += ["--optimizer_type", t.optimizer_type]
|
| 835 |
+
if t.optimizer_args:
|
| 836 |
+
cmd += ["--optimizer_args"] + t.optimizer_args.split()
|
| 837 |
+
cmd += ["--gradient_accumulation_steps", str(t.gradient_accumulation_steps)]
|
| 838 |
+
cmd += ["--max_grad_norm", str(t.max_grad_norm)]
|
| 839 |
+
|
| 840 |
+
# Schedule — slider override for steps
|
| 841 |
+
cmd += ["--max_train_steps", str(s.max_train_steps)]
|
| 842 |
+
if t.seed is not None:
|
| 843 |
+
cmd += ["--seed", str(t.seed)]
|
| 844 |
+
|
| 845 |
+
# Memory — from training config
|
| 846 |
+
if t.blocks_to_swap is not None:
|
| 847 |
+
cmd += ["--blocks_to_swap", str(t.blocks_to_swap)]
|
| 848 |
+
if t.gradient_checkpointing:
|
| 849 |
+
cmd.append("--gradient_checkpointing")
|
| 850 |
+
|
| 851 |
+
# Output — dir from training, name from slider
|
| 852 |
+
if t.output_dir:
|
| 853 |
+
cmd += ["--output_dir", t.output_dir]
|
| 854 |
+
if s.output_name:
|
| 855 |
+
cmd += ["--output_name", s.output_name]
|
| 856 |
+
if t.save_every_n_steps:
|
| 857 |
+
cmd += ["--save_every_n_steps", str(t.save_every_n_steps)]
|
| 858 |
+
|
| 859 |
+
return cmd
|
| 860 |
+
|
| 861 |
+
|
| 862 |
+
def build_cache_dino_cmd(config: ProjectConfig) -> list[str]:
|
| 863 |
+
"""Build CLI args for ltx2_cache_dino_features.py."""
|
| 864 |
+
toml_path = export_dataset_toml(config)
|
| 865 |
+
c = config.caching
|
| 866 |
+
t = config.training
|
| 867 |
+
|
| 868 |
+
cmd = [
|
| 869 |
+
sys.executable,
|
| 870 |
+
_find_script("ltx2_cache_dino_features.py"),
|
| 871 |
+
"--dataset_config", str(toml_path),
|
| 872 |
+
"--dino_model", t.crepa_dino_model, # Use training model setting, not caching
|
| 873 |
+
"--dino_batch_size", str(c.dino_batch_size),
|
| 874 |
+
]
|
| 875 |
+
|
| 876 |
+
if c.device:
|
| 877 |
+
cmd += ["--device", c.device]
|
| 878 |
+
if c.skip_existing:
|
| 879 |
+
cmd.append("--skip_existing")
|
| 880 |
+
|
| 881 |
+
return cmd
|
src/musubi_tuner/gui_dashboard/management_server.py
ADDED
|
@@ -0,0 +1,146 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Standalone management FastAPI app for the full training dashboard."""
|
| 2 |
+
|
| 3 |
+
from __future__ import annotations
|
| 4 |
+
|
| 5 |
+
import logging
|
| 6 |
+
import mimetypes
|
| 7 |
+
import os
|
| 8 |
+
from pathlib import Path
|
| 9 |
+
from typing import Optional
|
| 10 |
+
|
| 11 |
+
# Windows registry often maps .js to text/plain — fix it
|
| 12 |
+
mimetypes.add_type("application/javascript", ".js")
|
| 13 |
+
mimetypes.add_type("text/css", ".css")
|
| 14 |
+
|
| 15 |
+
from fastapi import FastAPI
|
| 16 |
+
from fastapi.responses import FileResponse, HTMLResponse, Response
|
| 17 |
+
|
| 18 |
+
from musubi_tuner.gui_dashboard.process_manager import ProcessManager
|
| 19 |
+
from musubi_tuner.gui_dashboard.project_schema import ProjectConfig
|
| 20 |
+
from musubi_tuner.gui_dashboard.routers import datasets, filesystem, processes, projects, stats, system
|
| 21 |
+
|
| 22 |
+
logger = logging.getLogger(__name__)
|
| 23 |
+
|
| 24 |
+
FRONTEND_DIST = os.path.join(os.path.dirname(__file__), "frontend", "dist")
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
def create_management_app(project_path: Optional[str] = None) -> FastAPI:
|
| 28 |
+
"""Create the management FastAPI application."""
|
| 29 |
+
app = FastAPI(title="LTX-2 Training Manager")
|
| 30 |
+
|
| 31 |
+
# App state
|
| 32 |
+
app.state.process_manager = ProcessManager()
|
| 33 |
+
app.state.project_config = None
|
| 34 |
+
app.state.project_path = None # Path to the actual .json file
|
| 35 |
+
|
| 36 |
+
# Load project if path provided
|
| 37 |
+
if project_path:
|
| 38 |
+
p = Path(project_path)
|
| 39 |
+
if p.is_dir():
|
| 40 |
+
p = p / "project.json"
|
| 41 |
+
if p.exists():
|
| 42 |
+
try:
|
| 43 |
+
app.state.project_config = ProjectConfig.load(p)
|
| 44 |
+
app.state.project_path = p
|
| 45 |
+
logger.info(f"Loaded project: {p}")
|
| 46 |
+
except Exception as e:
|
| 47 |
+
logger.warning(f"Failed to load project {p}: {e}")
|
| 48 |
+
|
| 49 |
+
# Mount API routers
|
| 50 |
+
app.include_router(projects.router)
|
| 51 |
+
app.include_router(datasets.router)
|
| 52 |
+
app.include_router(processes.router)
|
| 53 |
+
app.include_router(filesystem.router)
|
| 54 |
+
app.include_router(system.router)
|
| 55 |
+
app.include_router(stats.router)
|
| 56 |
+
|
| 57 |
+
# Metrics router — dynamically bound to training output_dir
|
| 58 |
+
@app.get("/data/metrics.parquet")
|
| 59 |
+
async def get_metrics():
|
| 60 |
+
run_dir = _get_run_dir(app)
|
| 61 |
+
if not run_dir:
|
| 62 |
+
return Response(status_code=204)
|
| 63 |
+
path = os.path.join(run_dir, "dashboard", "metrics.parquet")
|
| 64 |
+
if not os.path.exists(path):
|
| 65 |
+
return Response(status_code=204)
|
| 66 |
+
return FileResponse(path, media_type="application/octet-stream")
|
| 67 |
+
|
| 68 |
+
@app.get("/data/status.json")
|
| 69 |
+
async def get_status():
|
| 70 |
+
run_dir = _get_run_dir(app)
|
| 71 |
+
if not run_dir:
|
| 72 |
+
return Response(status_code=204)
|
| 73 |
+
path = os.path.join(run_dir, "dashboard", "status.json")
|
| 74 |
+
if not os.path.exists(path):
|
| 75 |
+
return Response(status_code=204)
|
| 76 |
+
return FileResponse(path, media_type="application/json")
|
| 77 |
+
|
| 78 |
+
@app.get("/data/events.json")
|
| 79 |
+
async def get_events():
|
| 80 |
+
run_dir = _get_run_dir(app)
|
| 81 |
+
if not run_dir:
|
| 82 |
+
return Response(status_code=204)
|
| 83 |
+
path = os.path.join(run_dir, "dashboard", "events.json")
|
| 84 |
+
if not os.path.exists(path):
|
| 85 |
+
return Response(status_code=204)
|
| 86 |
+
return FileResponse(path, media_type="application/json")
|
| 87 |
+
|
| 88 |
+
@app.get("/data/samples/{file_path:path}")
|
| 89 |
+
async def get_sample(file_path: str):
|
| 90 |
+
run_dir = _get_run_dir(app)
|
| 91 |
+
if not run_dir:
|
| 92 |
+
return HTMLResponse("no training output configured", status_code=404)
|
| 93 |
+
full_path = os.path.join(run_dir, "sample", file_path)
|
| 94 |
+
if not os.path.exists(full_path):
|
| 95 |
+
return HTMLResponse("not found", status_code=404)
|
| 96 |
+
return FileResponse(full_path)
|
| 97 |
+
|
| 98 |
+
# SSE for metrics updates
|
| 99 |
+
import asyncio
|
| 100 |
+
from sse_starlette.sse import EventSourceResponse
|
| 101 |
+
|
| 102 |
+
@app.get("/sse")
|
| 103 |
+
async def sse_stream():
|
| 104 |
+
async def event_generator():
|
| 105 |
+
last_mtime = 0.0
|
| 106 |
+
while True:
|
| 107 |
+
await asyncio.sleep(2)
|
| 108 |
+
run_dir = _get_run_dir(app)
|
| 109 |
+
if not run_dir:
|
| 110 |
+
continue
|
| 111 |
+
metrics_path = os.path.join(run_dir, "dashboard", "metrics.parquet")
|
| 112 |
+
try:
|
| 113 |
+
mtime = os.path.getmtime(metrics_path) if os.path.exists(metrics_path) else 0.0
|
| 114 |
+
except OSError:
|
| 115 |
+
mtime = 0.0
|
| 116 |
+
if mtime > last_mtime:
|
| 117 |
+
last_mtime = mtime
|
| 118 |
+
yield {"event": "update", "data": f'{{"mtime": {mtime}}}'}
|
| 119 |
+
|
| 120 |
+
return EventSourceResponse(event_generator())
|
| 121 |
+
|
| 122 |
+
# SvelteKit frontend (must be last — catches all routes)
|
| 123 |
+
if os.path.isdir(FRONTEND_DIST):
|
| 124 |
+
@app.get("/{full_path:path}")
|
| 125 |
+
async def serve_frontend(full_path: str):
|
| 126 |
+
file_path = os.path.join(FRONTEND_DIST, full_path)
|
| 127 |
+
if full_path and os.path.isfile(file_path):
|
| 128 |
+
return FileResponse(file_path)
|
| 129 |
+
return FileResponse(os.path.join(FRONTEND_DIST, "index.html"))
|
| 130 |
+
else:
|
| 131 |
+
@app.get("/")
|
| 132 |
+
async def no_frontend():
|
| 133 |
+
return HTMLResponse(
|
| 134 |
+
"<h2>LTX-2 Training Manager</h2>"
|
| 135 |
+
"<p>Frontend not built. Run <code>npm run build</code> in <code>gui_dashboard/frontend/</code></p>"
|
| 136 |
+
)
|
| 137 |
+
|
| 138 |
+
return app
|
| 139 |
+
|
| 140 |
+
|
| 141 |
+
def _get_run_dir(app: FastAPI) -> Optional[str]:
|
| 142 |
+
"""Get the current training output directory from project config."""
|
| 143 |
+
config: ProjectConfig | None = app.state.project_config
|
| 144 |
+
if config and config.training.output_dir:
|
| 145 |
+
return config.training.output_dir
|
| 146 |
+
return None
|
src/musubi_tuner/gui_dashboard/metrics_writer.py
ADDED
|
@@ -0,0 +1,144 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import json
|
| 2 |
+
import os
|
| 3 |
+
import threading
|
| 4 |
+
import time
|
| 5 |
+
from typing import Any, Optional
|
| 6 |
+
|
| 7 |
+
import pyarrow as pa
|
| 8 |
+
import pyarrow.parquet as pq
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
SCHEMA = pa.schema(
|
| 12 |
+
[
|
| 13 |
+
("step", pa.int64()),
|
| 14 |
+
("epoch", pa.int32()),
|
| 15 |
+
("loss", pa.float32()),
|
| 16 |
+
("avr_loss", pa.float32()),
|
| 17 |
+
("loss_v", pa.float32()),
|
| 18 |
+
("loss_a", pa.float32()),
|
| 19 |
+
("lr", pa.float64()),
|
| 20 |
+
("step_time", pa.float32()),
|
| 21 |
+
]
|
| 22 |
+
)
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
class MetricsWriter:
|
| 26 |
+
"""Buffered Parquet writer for training metrics.
|
| 27 |
+
|
| 28 |
+
Accumulates rows in memory and flushes to a Parquet file periodically
|
| 29 |
+
via a background daemon thread. Also manages status.json and events.json.
|
| 30 |
+
"""
|
| 31 |
+
|
| 32 |
+
def __init__(self, run_dir: str, flush_every: int = 10):
|
| 33 |
+
self.run_dir = run_dir
|
| 34 |
+
self.flush_every = flush_every
|
| 35 |
+
|
| 36 |
+
self.metrics_path = os.path.join(run_dir, "dashboard", "metrics.parquet")
|
| 37 |
+
self.status_path = os.path.join(run_dir, "dashboard", "status.json")
|
| 38 |
+
self.events_path = os.path.join(run_dir, "dashboard", "events.json")
|
| 39 |
+
|
| 40 |
+
os.makedirs(os.path.join(run_dir, "dashboard"), exist_ok=True)
|
| 41 |
+
|
| 42 |
+
self._buffer: list[dict[str, Any]] = []
|
| 43 |
+
self._lock = threading.Lock()
|
| 44 |
+
self._start_time = time.monotonic()
|
| 45 |
+
self._step_count = 0
|
| 46 |
+
|
| 47 |
+
# Initialize events file
|
| 48 |
+
if not os.path.exists(self.events_path):
|
| 49 |
+
self._write_json(self.events_path, [])
|
| 50 |
+
|
| 51 |
+
# Initialize status
|
| 52 |
+
self.update_status(step=0, max_steps=0, epoch=0, max_epochs=0, status="initializing")
|
| 53 |
+
|
| 54 |
+
# -- public API --
|
| 55 |
+
|
| 56 |
+
def log(
|
| 57 |
+
self,
|
| 58 |
+
step: int,
|
| 59 |
+
epoch: int = 0,
|
| 60 |
+
loss: float = 0.0,
|
| 61 |
+
avr_loss: float = 0.0,
|
| 62 |
+
loss_v: Optional[float] = None,
|
| 63 |
+
loss_a: Optional[float] = None,
|
| 64 |
+
lr: float = 0.0,
|
| 65 |
+
step_time: float = 0.0,
|
| 66 |
+
):
|
| 67 |
+
row = {
|
| 68 |
+
"step": step,
|
| 69 |
+
"epoch": epoch,
|
| 70 |
+
"loss": loss,
|
| 71 |
+
"avr_loss": avr_loss,
|
| 72 |
+
"loss_v": loss_v if loss_v is not None else float("nan"),
|
| 73 |
+
"loss_a": loss_a if loss_a is not None else float("nan"),
|
| 74 |
+
"lr": lr,
|
| 75 |
+
"step_time": step_time,
|
| 76 |
+
}
|
| 77 |
+
with self._lock:
|
| 78 |
+
self._buffer.append(row)
|
| 79 |
+
self._step_count += 1
|
| 80 |
+
if len(self._buffer) >= self.flush_every:
|
| 81 |
+
self._flush_background()
|
| 82 |
+
|
| 83 |
+
def log_event(self, event_type: str, step: int, **extra):
|
| 84 |
+
entry = {"type": event_type, "step": step, "time": time.time(), **extra}
|
| 85 |
+
t = threading.Thread(target=self._append_event, args=(entry,), daemon=True)
|
| 86 |
+
t.start()
|
| 87 |
+
|
| 88 |
+
def update_status(self, **kw):
|
| 89 |
+
elapsed = time.monotonic() - self._start_time
|
| 90 |
+
speed = self._step_count / elapsed if elapsed > 0 and self._step_count > 0 else 0.0
|
| 91 |
+
status = {
|
| 92 |
+
"elapsed_sec": round(elapsed, 1),
|
| 93 |
+
"speed_steps_per_sec": round(speed, 4),
|
| 94 |
+
"time": time.time(),
|
| 95 |
+
}
|
| 96 |
+
status.update(kw)
|
| 97 |
+
t = threading.Thread(target=self._write_json, args=(self.status_path, status), daemon=True)
|
| 98 |
+
t.start()
|
| 99 |
+
|
| 100 |
+
def flush(self):
|
| 101 |
+
with self._lock:
|
| 102 |
+
if self._buffer:
|
| 103 |
+
self._do_flush(list(self._buffer))
|
| 104 |
+
self._buffer.clear()
|
| 105 |
+
|
| 106 |
+
def close(self):
|
| 107 |
+
self.flush()
|
| 108 |
+
|
| 109 |
+
# -- internals --
|
| 110 |
+
|
| 111 |
+
def _flush_background(self):
|
| 112 |
+
rows = list(self._buffer)
|
| 113 |
+
self._buffer.clear()
|
| 114 |
+
t = threading.Thread(target=self._do_flush, args=(rows,), daemon=True)
|
| 115 |
+
t.start()
|
| 116 |
+
|
| 117 |
+
def _do_flush(self, rows: list[dict]):
|
| 118 |
+
table = pa.table({col: [r[col] for r in rows] for col in SCHEMA.names}, schema=SCHEMA)
|
| 119 |
+
try:
|
| 120 |
+
if os.path.exists(self.metrics_path):
|
| 121 |
+
existing = pq.read_table(self.metrics_path, schema=SCHEMA)
|
| 122 |
+
table = pa.concat_tables([existing, table])
|
| 123 |
+
pq.write_table(table, self.metrics_path)
|
| 124 |
+
except Exception:
|
| 125 |
+
# If read fails (corrupt file), overwrite
|
| 126 |
+
pq.write_table(table, self.metrics_path)
|
| 127 |
+
|
| 128 |
+
def _append_event(self, entry: dict):
|
| 129 |
+
try:
|
| 130 |
+
events = []
|
| 131 |
+
if os.path.exists(self.events_path):
|
| 132 |
+
with open(self.events_path, "r") as f:
|
| 133 |
+
events = json.load(f)
|
| 134 |
+
events.append(entry)
|
| 135 |
+
self._write_json(self.events_path, events)
|
| 136 |
+
except Exception:
|
| 137 |
+
pass
|
| 138 |
+
|
| 139 |
+
@staticmethod
|
| 140 |
+
def _write_json(path: str, data):
|
| 141 |
+
tmp = path + ".tmp"
|
| 142 |
+
with open(tmp, "w") as f:
|
| 143 |
+
json.dump(data, f)
|
| 144 |
+
os.replace(tmp, path)
|
src/musubi_tuner/gui_dashboard/process_manager.py
ADDED
|
@@ -0,0 +1,165 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Subprocess manager for caching and training processes."""
|
| 2 |
+
|
| 3 |
+
from __future__ import annotations
|
| 4 |
+
|
| 5 |
+
import logging
|
| 6 |
+
import subprocess
|
| 7 |
+
import sys
|
| 8 |
+
import threading
|
| 9 |
+
from collections import deque
|
| 10 |
+
from enum import Enum
|
| 11 |
+
from typing import Literal, Optional
|
| 12 |
+
|
| 13 |
+
logger = logging.getLogger(__name__)
|
| 14 |
+
|
| 15 |
+
ProcessType = Literal["cache_latents", "cache_text", "cache_dino", "training", "inference", "slider_training"]
|
| 16 |
+
|
| 17 |
+
# Windows-specific flags for clean subprocess shutdown
|
| 18 |
+
_CREATION_FLAGS = 0
|
| 19 |
+
if sys.platform == "win32":
|
| 20 |
+
_CREATION_FLAGS = subprocess.CREATE_NEW_PROCESS_GROUP
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
class ProcessState(str, Enum):
|
| 24 |
+
IDLE = "idle"
|
| 25 |
+
RUNNING = "running"
|
| 26 |
+
STOPPING = "stopping"
|
| 27 |
+
FINISHED = "finished"
|
| 28 |
+
ERROR = "error"
|
| 29 |
+
|
| 30 |
+
|
| 31 |
+
class ManagedProcess:
|
| 32 |
+
"""Wraps a subprocess with state tracking and log buffering."""
|
| 33 |
+
|
| 34 |
+
def __init__(self, cmd: list[str], cwd: Optional[str] = None):
|
| 35 |
+
self.cmd = cmd
|
| 36 |
+
self.cwd = cwd
|
| 37 |
+
self.state = ProcessState.IDLE
|
| 38 |
+
self.exit_code: Optional[int] = None
|
| 39 |
+
self.logs: deque[str] = deque(maxlen=5000)
|
| 40 |
+
self._proc: Optional[subprocess.Popen] = None
|
| 41 |
+
self._reader_thread: Optional[threading.Thread] = None
|
| 42 |
+
self._lock = threading.Lock()
|
| 43 |
+
|
| 44 |
+
def start(self):
|
| 45 |
+
with self._lock:
|
| 46 |
+
if self.state == ProcessState.RUNNING:
|
| 47 |
+
raise RuntimeError("Process already running")
|
| 48 |
+
|
| 49 |
+
self.state = ProcessState.RUNNING
|
| 50 |
+
self.exit_code = None
|
| 51 |
+
self.logs.clear()
|
| 52 |
+
self.logs.append(f"$ {' '.join(self.cmd)}\n")
|
| 53 |
+
|
| 54 |
+
self._proc = subprocess.Popen(
|
| 55 |
+
self.cmd,
|
| 56 |
+
stdout=subprocess.PIPE,
|
| 57 |
+
stderr=subprocess.STDOUT,
|
| 58 |
+
cwd=self.cwd,
|
| 59 |
+
creationflags=_CREATION_FLAGS,
|
| 60 |
+
bufsize=1,
|
| 61 |
+
text=True,
|
| 62 |
+
encoding="utf-8",
|
| 63 |
+
errors="replace",
|
| 64 |
+
)
|
| 65 |
+
|
| 66 |
+
self._reader_thread = threading.Thread(
|
| 67 |
+
target=self._read_output, daemon=True
|
| 68 |
+
)
|
| 69 |
+
self._reader_thread.start()
|
| 70 |
+
|
| 71 |
+
def _read_output(self):
|
| 72 |
+
try:
|
| 73 |
+
assert self._proc and self._proc.stdout
|
| 74 |
+
for line in self._proc.stdout:
|
| 75 |
+
self.logs.append(line)
|
| 76 |
+
self._proc.wait()
|
| 77 |
+
except Exception as e:
|
| 78 |
+
self.logs.append(f"\n[Process reader error: {e}]\n")
|
| 79 |
+
|
| 80 |
+
with self._lock:
|
| 81 |
+
self.exit_code = self._proc.returncode if self._proc else -1
|
| 82 |
+
if self.state == ProcessState.STOPPING:
|
| 83 |
+
self.state = ProcessState.FINISHED
|
| 84 |
+
elif self.exit_code == 0:
|
| 85 |
+
self.state = ProcessState.FINISHED
|
| 86 |
+
else:
|
| 87 |
+
self.state = ProcessState.ERROR
|
| 88 |
+
self.logs.append(f"\n[Process exited with code {self.exit_code}]\n")
|
| 89 |
+
|
| 90 |
+
def terminate(self):
|
| 91 |
+
with self._lock:
|
| 92 |
+
if self.state != ProcessState.RUNNING:
|
| 93 |
+
return
|
| 94 |
+
self.state = ProcessState.STOPPING
|
| 95 |
+
self.logs.append("\n[Stopping process...]\n")
|
| 96 |
+
|
| 97 |
+
if self._proc:
|
| 98 |
+
self._proc.terminate()
|
| 99 |
+
# Wait up to 10s then force kill
|
| 100 |
+
t = threading.Thread(target=self._force_kill, daemon=True)
|
| 101 |
+
t.start()
|
| 102 |
+
|
| 103 |
+
def _force_kill(self):
|
| 104 |
+
if self._proc:
|
| 105 |
+
try:
|
| 106 |
+
self._proc.wait(timeout=10)
|
| 107 |
+
except subprocess.TimeoutExpired:
|
| 108 |
+
self.logs.append("\n[Force killing process...]\n")
|
| 109 |
+
self._proc.kill()
|
| 110 |
+
|
| 111 |
+
def get_status(self) -> dict:
|
| 112 |
+
return {
|
| 113 |
+
"state": self.state.value,
|
| 114 |
+
"exit_code": self.exit_code,
|
| 115 |
+
}
|
| 116 |
+
|
| 117 |
+
def get_logs(self, last_n: Optional[int] = None) -> list[str]:
|
| 118 |
+
if last_n is None:
|
| 119 |
+
return list(self.logs)
|
| 120 |
+
return list(self.logs)[-last_n:]
|
| 121 |
+
|
| 122 |
+
|
| 123 |
+
class ProcessManager:
|
| 124 |
+
"""Manages up to 3 concurrent subprocess slots."""
|
| 125 |
+
|
| 126 |
+
def __init__(self):
|
| 127 |
+
self._processes: dict[str, ManagedProcess] = {}
|
| 128 |
+
self._lock = threading.Lock()
|
| 129 |
+
|
| 130 |
+
def start(self, proc_type: ProcessType, cmd: list[str], cwd: Optional[str] = None):
|
| 131 |
+
with self._lock:
|
| 132 |
+
existing = self._processes.get(proc_type)
|
| 133 |
+
if existing and existing.state == ProcessState.RUNNING:
|
| 134 |
+
raise RuntimeError(f"{proc_type} is already running")
|
| 135 |
+
|
| 136 |
+
mp = ManagedProcess(cmd, cwd=cwd)
|
| 137 |
+
self._processes[proc_type] = mp
|
| 138 |
+
|
| 139 |
+
mp.start()
|
| 140 |
+
logger.info(f"Started {proc_type}: {' '.join(cmd[:5])}...")
|
| 141 |
+
|
| 142 |
+
def stop(self, proc_type: ProcessType):
|
| 143 |
+
with self._lock:
|
| 144 |
+
mp = self._processes.get(proc_type)
|
| 145 |
+
if not mp:
|
| 146 |
+
return
|
| 147 |
+
mp.terminate()
|
| 148 |
+
|
| 149 |
+
def get_status(self, proc_type: ProcessType) -> dict:
|
| 150 |
+
mp = self._processes.get(proc_type)
|
| 151 |
+
if not mp:
|
| 152 |
+
return {"state": ProcessState.IDLE.value, "exit_code": None}
|
| 153 |
+
return mp.get_status()
|
| 154 |
+
|
| 155 |
+
def get_logs(self, proc_type: ProcessType, last_n: Optional[int] = None) -> list[str]:
|
| 156 |
+
mp = self._processes.get(proc_type)
|
| 157 |
+
if not mp:
|
| 158 |
+
return []
|
| 159 |
+
return mp.get_logs(last_n)
|
| 160 |
+
|
| 161 |
+
def get_all_statuses(self) -> dict[str, dict]:
|
| 162 |
+
result = {}
|
| 163 |
+
for pt in ("cache_latents", "cache_text", "cache_dino", "training", "inference", "slider_training"):
|
| 164 |
+
result[pt] = self.get_status(pt)
|
| 165 |
+
return result
|
src/musubi_tuner/gui_dashboard/project_schema.py
ADDED
|
@@ -0,0 +1,481 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Pydantic v2 models for GUI dashboard project configuration."""
|
| 2 |
+
|
| 3 |
+
from __future__ import annotations
|
| 4 |
+
|
| 5 |
+
from pathlib import Path
|
| 6 |
+
from typing import Literal, Optional
|
| 7 |
+
|
| 8 |
+
from pydantic import BaseModel, ConfigDict, Field, model_validator
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
class GeneralConfig(BaseModel):
|
| 12 |
+
enable_bucket: bool = True
|
| 13 |
+
bucket_no_upscale: bool = True
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
class DatasetEntry(BaseModel):
|
| 17 |
+
type: Literal["video", "image", "audio"] = "video"
|
| 18 |
+
directory: str = ""
|
| 19 |
+
cache_directory: str = ""
|
| 20 |
+
reference_cache_directory: str = ""
|
| 21 |
+
control_directory: str = ""
|
| 22 |
+
jsonl_file: str = ""
|
| 23 |
+
resolution_w: int = 768
|
| 24 |
+
resolution_h: int = 512
|
| 25 |
+
batch_size: int = 1
|
| 26 |
+
num_repeats: int = 1
|
| 27 |
+
caption_extension: str = ".txt"
|
| 28 |
+
# video-specific
|
| 29 |
+
target_frames: int = 33
|
| 30 |
+
frame_extraction: Literal["head", "chunk", "slide", "uniform", "full"] = "head"
|
| 31 |
+
frame_sample: Optional[int] = None
|
| 32 |
+
max_frames: Optional[int] = None
|
| 33 |
+
frame_stride: Optional[int] = None
|
| 34 |
+
source_fps: Optional[float] = None
|
| 35 |
+
target_fps: Optional[float] = None
|
| 36 |
+
|
| 37 |
+
|
| 38 |
+
class DatasetConfig(BaseModel):
|
| 39 |
+
general: GeneralConfig = Field(default_factory=GeneralConfig)
|
| 40 |
+
datasets: list[DatasetEntry] = Field(default_factory=list)
|
| 41 |
+
validation_datasets: list[DatasetEntry] = Field(default_factory=list)
|
| 42 |
+
|
| 43 |
+
|
| 44 |
+
class CachingConfig(BaseModel):
|
| 45 |
+
ltx2_checkpoint: str = ""
|
| 46 |
+
gemma_root: str = ""
|
| 47 |
+
gemma_safetensors: str = ""
|
| 48 |
+
ltx2_text_encoder_checkpoint: str = ""
|
| 49 |
+
ltx2_mode: Literal["video", "av", "audio"] = "video"
|
| 50 |
+
vae_dtype: Literal["float16", "bfloat16", "float32"] = "bfloat16"
|
| 51 |
+
device: str = "cuda"
|
| 52 |
+
skip_existing: bool = True
|
| 53 |
+
keep_cache: bool = False
|
| 54 |
+
num_workers: Optional[int] = None
|
| 55 |
+
# VAE tiling
|
| 56 |
+
vae_chunk_size: Optional[int] = None
|
| 57 |
+
vae_spatial_tile_size: Optional[int] = None
|
| 58 |
+
vae_spatial_tile_overlap: Optional[int] = None
|
| 59 |
+
vae_temporal_tile_size: Optional[int] = None
|
| 60 |
+
vae_temporal_tile_overlap: Optional[int] = None
|
| 61 |
+
# Gemma quantization
|
| 62 |
+
mixed_precision: Literal["no", "fp16", "bf16"] = "bf16"
|
| 63 |
+
gemma_load_in_8bit: bool = False
|
| 64 |
+
gemma_load_in_4bit: bool = False
|
| 65 |
+
gemma_bnb_4bit_quant_type: Literal["nf4", "fp4"] = "nf4"
|
| 66 |
+
gemma_bnb_4bit_disable_double_quant: bool = False
|
| 67 |
+
gemma_bnb_4bit_compute_dtype: Literal["auto", "fp16", "bf16", "fp32"] = "auto"
|
| 68 |
+
# Text encoder precaching
|
| 69 |
+
precache_sample_prompts: bool = False
|
| 70 |
+
sample_prompts: str = ""
|
| 71 |
+
sample_prompts_cache: str = ""
|
| 72 |
+
precache_preservation_prompts: bool = False
|
| 73 |
+
preservation_prompts_cache: str = ""
|
| 74 |
+
# VAE I2V latent precaching
|
| 75 |
+
precache_sample_latents: bool = False
|
| 76 |
+
sample_latents_cache: str = ""
|
| 77 |
+
blank_preservation: bool = False
|
| 78 |
+
dop: bool = False
|
| 79 |
+
dop_class_prompt: str = ""
|
| 80 |
+
# Reference (V2V)
|
| 81 |
+
reference_frames: int = 1
|
| 82 |
+
reference_downscale: int = 1
|
| 83 |
+
# Audio
|
| 84 |
+
ltx2_audio_source: Literal["video", "audio_files"] = "video"
|
| 85 |
+
ltx2_audio_dir: str = ""
|
| 86 |
+
ltx2_audio_ext: str = ".wav"
|
| 87 |
+
ltx2_audio_dtype: str = ""
|
| 88 |
+
audio_only_sequence_resolution: int = 64
|
| 89 |
+
# DINOv2 feature caching (for CREPA dino mode - model selection in training.crepa_dino_model)
|
| 90 |
+
dino_batch_size: int = 16
|
| 91 |
+
# Quantization device
|
| 92 |
+
quantize_device: Optional[str] = None
|
| 93 |
+
# Dataset manifest
|
| 94 |
+
save_dataset_manifest: str = ""
|
| 95 |
+
|
| 96 |
+
|
| 97 |
+
class TrainingConfig(BaseModel):
|
| 98 |
+
# Model
|
| 99 |
+
ltx2_checkpoint: str = ""
|
| 100 |
+
gemma_root: str = ""
|
| 101 |
+
gemma_safetensors: str = ""
|
| 102 |
+
ltx2_mode: Literal["video", "av", "audio"] = "video"
|
| 103 |
+
ltx_version: Literal["2.0", "2.3"] = "2.0"
|
| 104 |
+
ltx_version_check_mode: Literal["off", "warn", "error"] = "warn"
|
| 105 |
+
fp8_base: bool = False
|
| 106 |
+
fp8_scaled: bool = False
|
| 107 |
+
flash_attn: bool = True
|
| 108 |
+
sdpa: bool = False
|
| 109 |
+
sage_attn: bool = False
|
| 110 |
+
xformers: bool = False
|
| 111 |
+
gemma_load_in_8bit: bool = False
|
| 112 |
+
gemma_load_in_4bit: bool = False
|
| 113 |
+
gemma_bnb_4bit_disable_double_quant: bool = False
|
| 114 |
+
ltx2_audio_only_model: bool = False
|
| 115 |
+
|
| 116 |
+
# Quantization
|
| 117 |
+
nf4_base: bool = False
|
| 118 |
+
nf4_block_size: int = 32
|
| 119 |
+
loftq_init: bool = False
|
| 120 |
+
loftq_iters: int = 2
|
| 121 |
+
fp8_w8a8: bool = False
|
| 122 |
+
w8a8_mode: Literal["int8", "fp8"] = "int8"
|
| 123 |
+
awq_calibration: bool = False
|
| 124 |
+
awq_alpha: float = 0.25
|
| 125 |
+
awq_num_batches: int = 8
|
| 126 |
+
quantize_device: Optional[str] = None
|
| 127 |
+
|
| 128 |
+
# LoRA / Network
|
| 129 |
+
network_module: Optional[str] = None
|
| 130 |
+
network_dim: int = 16
|
| 131 |
+
network_alpha: int = 16
|
| 132 |
+
lora_target_preset: Literal["t2v", "v2v", "audio", "full"] = "t2v"
|
| 133 |
+
network_args: str = ""
|
| 134 |
+
network_weights: str = ""
|
| 135 |
+
network_dropout: Optional[float] = None
|
| 136 |
+
scale_weight_norms: Optional[float] = None
|
| 137 |
+
dim_from_weights: bool = False
|
| 138 |
+
base_weights: str = ""
|
| 139 |
+
base_weights_multiplier: str = ""
|
| 140 |
+
lycoris_config: str = ""
|
| 141 |
+
lycoris_quantized_base_check_mode: Literal["off", "warn", "error"] = "warn"
|
| 142 |
+
init_lokr_norm: Optional[float] = None
|
| 143 |
+
caption_dropout_rate: float = 0.0
|
| 144 |
+
save_original_lora: bool = True
|
| 145 |
+
ic_lora_strategy: Literal["auto", "none", "v2v", "audio_ref_only_ic"] = "auto"
|
| 146 |
+
audio_ref_use_negative_positions: bool = False
|
| 147 |
+
audio_ref_mask_cross_attention_to_reference: bool = False
|
| 148 |
+
audio_ref_mask_reference_from_text_attention: bool = False
|
| 149 |
+
audio_ref_identity_guidance_scale: float = 0.0
|
| 150 |
+
|
| 151 |
+
# Optimizer
|
| 152 |
+
learning_rate: float = 1e-4
|
| 153 |
+
optimizer_type: str = "adamw8bit"
|
| 154 |
+
optimizer_args: str = ""
|
| 155 |
+
lr_scheduler: str = "constant_with_warmup"
|
| 156 |
+
lr_warmup_steps: int = 100
|
| 157 |
+
lr_decay_steps: Optional[int] = None
|
| 158 |
+
lr_scheduler_num_cycles: Optional[int] = None
|
| 159 |
+
lr_scheduler_power: Optional[float] = None
|
| 160 |
+
lr_scheduler_min_lr_ratio: Optional[float] = None
|
| 161 |
+
lr_scheduler_type: str = ""
|
| 162 |
+
lr_scheduler_args: str = ""
|
| 163 |
+
lr_scheduler_timescale: Optional[int] = None
|
| 164 |
+
gradient_accumulation_steps: int = 1
|
| 165 |
+
max_grad_norm: float = 1.0
|
| 166 |
+
audio_lr: Optional[float] = None
|
| 167 |
+
lr_args: str = ""
|
| 168 |
+
|
| 169 |
+
# Schedule
|
| 170 |
+
max_train_steps: int = 1600
|
| 171 |
+
max_train_epochs: Optional[int] = None
|
| 172 |
+
timestep_sampling: str = "shifted_logit_normal"
|
| 173 |
+
discrete_flow_shift: float = 1.0
|
| 174 |
+
weighting_scheme: str = "none"
|
| 175 |
+
seed: Optional[int] = None
|
| 176 |
+
guidance_scale: Optional[float] = None
|
| 177 |
+
sigmoid_scale: Optional[float] = None
|
| 178 |
+
logit_mean: Optional[float] = None
|
| 179 |
+
logit_std: Optional[float] = None
|
| 180 |
+
mode_scale: Optional[float] = None
|
| 181 |
+
min_timestep: Optional[float] = None
|
| 182 |
+
max_timestep: Optional[float] = None
|
| 183 |
+
|
| 184 |
+
# Advanced timestep
|
| 185 |
+
shifted_logit_mode: Optional[str] = None
|
| 186 |
+
shifted_logit_eps: float = 1e-3
|
| 187 |
+
shifted_logit_uniform_prob: float = 0.1
|
| 188 |
+
shifted_logit_shift: Optional[float] = None
|
| 189 |
+
preserve_distribution_shape: bool = False
|
| 190 |
+
num_timestep_buckets: Optional[int] = None
|
| 191 |
+
|
| 192 |
+
# Memory
|
| 193 |
+
blocks_to_swap: Optional[int] = None
|
| 194 |
+
gradient_checkpointing: bool = True
|
| 195 |
+
gradient_checkpointing_cpu_offload: bool = False
|
| 196 |
+
split_attn_target: Optional[str] = None
|
| 197 |
+
split_attn_mode: Optional[str] = None
|
| 198 |
+
split_attn_chunk_size: Optional[int] = None
|
| 199 |
+
blockwise_checkpointing: bool = False
|
| 200 |
+
blocks_to_checkpoint: Optional[int] = None
|
| 201 |
+
mixed_precision: str = "bf16"
|
| 202 |
+
full_fp16: bool = False
|
| 203 |
+
full_bf16: bool = False
|
| 204 |
+
ffn_chunk_target: Optional[str] = None
|
| 205 |
+
ffn_chunk_size: int = 0
|
| 206 |
+
use_pinned_memory_for_block_swap: bool = False
|
| 207 |
+
img_in_txt_in_offloading: bool = False
|
| 208 |
+
|
| 209 |
+
# Compile
|
| 210 |
+
compile: bool = False
|
| 211 |
+
compile_backend: str = "inductor"
|
| 212 |
+
compile_mode: str = ""
|
| 213 |
+
compile_dynamic: bool = False
|
| 214 |
+
compile_fullgraph: bool = False
|
| 215 |
+
compile_cache_size_limit: Optional[int] = None
|
| 216 |
+
|
| 217 |
+
# CUDA
|
| 218 |
+
cuda_allow_tf32: bool = False
|
| 219 |
+
cuda_cudnn_benchmark: bool = False
|
| 220 |
+
cuda_memory_fraction: Optional[float] = None
|
| 221 |
+
|
| 222 |
+
# Sampling
|
| 223 |
+
sample_every_n_steps: Optional[int] = None
|
| 224 |
+
sample_every_n_epochs: Optional[int] = None
|
| 225 |
+
sample_prompts: str = ""
|
| 226 |
+
use_precached_sample_prompts: bool = False
|
| 227 |
+
sample_prompts_cache: str = ""
|
| 228 |
+
use_precached_sample_latents: bool = False
|
| 229 |
+
sample_latents_cache: str = ""
|
| 230 |
+
height: int = 512
|
| 231 |
+
width: int = 768
|
| 232 |
+
sample_num_frames: int = 45
|
| 233 |
+
sample_with_offloading: bool = False
|
| 234 |
+
sample_merge_audio: bool = False
|
| 235 |
+
sample_disable_audio: bool = False
|
| 236 |
+
sample_at_first: bool = False
|
| 237 |
+
sample_tiled_vae: bool = False
|
| 238 |
+
sample_vae_tile_size: Optional[int] = None
|
| 239 |
+
sample_vae_tile_overlap: Optional[int] = None
|
| 240 |
+
sample_vae_temporal_tile_size: Optional[int] = None
|
| 241 |
+
sample_vae_temporal_tile_overlap: Optional[int] = None
|
| 242 |
+
sample_two_stage: bool = False
|
| 243 |
+
spatial_upsampler_path: str = ""
|
| 244 |
+
distilled_lora_path: str = ""
|
| 245 |
+
sample_stage2_steps: int = 3
|
| 246 |
+
sample_audio_only: bool = False
|
| 247 |
+
sample_disable_flash_attn: bool = False
|
| 248 |
+
sample_i2v_token_timestep_mask: bool = True
|
| 249 |
+
sample_audio_subprocess: bool = True
|
| 250 |
+
sample_include_reference: bool = False
|
| 251 |
+
reference_downscale: int = 1
|
| 252 |
+
reference_frames: int = 1
|
| 253 |
+
|
| 254 |
+
# Validation
|
| 255 |
+
validate_every_n_steps: Optional[int] = None
|
| 256 |
+
validate_every_n_epochs: Optional[int] = None
|
| 257 |
+
|
| 258 |
+
# Output
|
| 259 |
+
output_dir: str = ""
|
| 260 |
+
output_name: str = "ltx2_lora"
|
| 261 |
+
save_every_n_epochs: Optional[int] = None
|
| 262 |
+
save_every_n_steps: Optional[int] = None
|
| 263 |
+
save_last_n_epochs: Optional[int] = None
|
| 264 |
+
save_last_n_steps: Optional[int] = None
|
| 265 |
+
save_last_n_epochs_state: Optional[int] = None
|
| 266 |
+
save_last_n_steps_state: Optional[int] = None
|
| 267 |
+
save_state: bool = False
|
| 268 |
+
save_state_on_train_end: bool = False
|
| 269 |
+
save_checkpoint_metadata: bool = False
|
| 270 |
+
no_metadata: bool = False
|
| 271 |
+
no_convert_to_comfy: bool = False
|
| 272 |
+
log_with: Optional[str] = None
|
| 273 |
+
logging_dir: str = ""
|
| 274 |
+
log_prefix: str = ""
|
| 275 |
+
log_tracker_name: str = ""
|
| 276 |
+
wandb_run_name: str = ""
|
| 277 |
+
wandb_api_key: str = ""
|
| 278 |
+
log_cuda_memory_every_n_steps: Optional[int] = None
|
| 279 |
+
resume: str = ""
|
| 280 |
+
training_comment: str = ""
|
| 281 |
+
loss_type: Literal["mse", "mae", "l1", "huber", "smooth_l1"] = "mse"
|
| 282 |
+
huber_delta: float = 1.0
|
| 283 |
+
|
| 284 |
+
# Metadata
|
| 285 |
+
metadata_title: str = ""
|
| 286 |
+
metadata_author: str = ""
|
| 287 |
+
metadata_description: str = ""
|
| 288 |
+
metadata_license: str = ""
|
| 289 |
+
metadata_tags: str = ""
|
| 290 |
+
|
| 291 |
+
# HuggingFace upload
|
| 292 |
+
huggingface_repo_id: str = ""
|
| 293 |
+
huggingface_repo_type: str = ""
|
| 294 |
+
huggingface_path_in_repo: str = ""
|
| 295 |
+
huggingface_token: str = ""
|
| 296 |
+
huggingface_repo_visibility: str = ""
|
| 297 |
+
save_state_to_huggingface: bool = False
|
| 298 |
+
resume_from_huggingface: bool = False
|
| 299 |
+
async_upload: bool = False
|
| 300 |
+
|
| 301 |
+
# Dataset
|
| 302 |
+
dataset_manifest: str = ""
|
| 303 |
+
|
| 304 |
+
# Preservation
|
| 305 |
+
blank_preservation: bool = False
|
| 306 |
+
blank_preservation_multiplier: float = 1.0
|
| 307 |
+
dop: bool = False
|
| 308 |
+
dop_class: str = ""
|
| 309 |
+
dop_multiplier: float = 1.0
|
| 310 |
+
prior_divergence: bool = False
|
| 311 |
+
prior_divergence_multiplier: float = 0.1
|
| 312 |
+
use_precached_preservation: bool = False
|
| 313 |
+
preservation_prompts_cache: str = ""
|
| 314 |
+
|
| 315 |
+
# CREPA
|
| 316 |
+
crepa: bool = False
|
| 317 |
+
crepa_mode: Literal["backbone", "dino"] = "backbone"
|
| 318 |
+
crepa_student_block_idx: int = 16
|
| 319 |
+
crepa_teacher_block_idx: int = 32
|
| 320 |
+
crepa_dino_model: Literal["dinov2_vits14", "dinov2_vitb14", "dinov2_vitl14", "dinov2_vitg14"] = "dinov2_vitb14"
|
| 321 |
+
crepa_lambda: float = 0.1
|
| 322 |
+
crepa_tau: float = 1.0
|
| 323 |
+
crepa_num_neighbors: int = 2
|
| 324 |
+
crepa_schedule: Literal["constant", "linear", "cosine"] = "constant"
|
| 325 |
+
crepa_warmup_steps: int = 0
|
| 326 |
+
crepa_normalize: bool = True
|
| 327 |
+
|
| 328 |
+
# Self-Flow
|
| 329 |
+
self_flow: bool = False
|
| 330 |
+
self_flow_teacher_mode: Literal["base", "ema", "partial_ema"] = "base"
|
| 331 |
+
self_flow_student_block_idx: int = 16
|
| 332 |
+
self_flow_teacher_block_idx: int = 32
|
| 333 |
+
self_flow_student_block_ratio: float = 0.3
|
| 334 |
+
self_flow_teacher_block_ratio: float = 0.7
|
| 335 |
+
self_flow_student_block_stochastic_range: int = 0
|
| 336 |
+
self_flow_lambda: float = 0.1
|
| 337 |
+
self_flow_mask_ratio: float = 0.1
|
| 338 |
+
self_flow_frame_level_mask: bool = False
|
| 339 |
+
self_flow_mask_focus_loss: bool = False
|
| 340 |
+
self_flow_max_loss: float = 0.0
|
| 341 |
+
self_flow_teacher_momentum: float = 0.999
|
| 342 |
+
self_flow_dual_timestep: bool = True
|
| 343 |
+
self_flow_projector_lr: Optional[float] = None
|
| 344 |
+
self_flow_temporal_mode: Literal["off", "frame", "delta", "hybrid"] = "off"
|
| 345 |
+
self_flow_lambda_temporal: float = 0.0
|
| 346 |
+
self_flow_lambda_delta: float = 0.0
|
| 347 |
+
self_flow_temporal_tau: float = 1.0
|
| 348 |
+
self_flow_num_neighbors: int = 2
|
| 349 |
+
self_flow_temporal_granularity: Literal["frame", "patch"] = "frame"
|
| 350 |
+
self_flow_patch_spatial_radius: int = 0
|
| 351 |
+
self_flow_patch_match_mode: Literal["hard", "soft"] = "hard"
|
| 352 |
+
self_flow_delta_num_steps: int = 1
|
| 353 |
+
self_flow_motion_weighting: Literal["none", "teacher_delta"] = "none"
|
| 354 |
+
self_flow_motion_weight_strength: float = 0.0
|
| 355 |
+
self_flow_temporal_schedule: Literal["constant", "linear", "cosine"] = "constant"
|
| 356 |
+
self_flow_temporal_warmup_steps: int = 0
|
| 357 |
+
self_flow_temporal_max_steps: int = 0
|
| 358 |
+
self_flow_offload_teacher_features: bool = False
|
| 359 |
+
|
| 360 |
+
# Audio features
|
| 361 |
+
audio_loss_balance_mode: Literal["none", "inv_freq", "ema_mag"] = "none"
|
| 362 |
+
audio_loss_balance_beta: float = 0.01
|
| 363 |
+
audio_loss_balance_eps: float = 0.05
|
| 364 |
+
audio_loss_balance_min: float = 0.05
|
| 365 |
+
audio_loss_balance_max: float = 4.0
|
| 366 |
+
audio_loss_balance_ema_init: float = 1.0
|
| 367 |
+
audio_loss_balance_target_ratio: float = 0.33
|
| 368 |
+
audio_loss_balance_ema_decay: float = 0.99
|
| 369 |
+
independent_audio_timestep: bool = False
|
| 370 |
+
audio_silence_regularizer: bool = False
|
| 371 |
+
audio_silence_regularizer_weight: float = 1.0
|
| 372 |
+
audio_supervision_mode: Literal["off", "warn", "error"] = "off"
|
| 373 |
+
audio_supervision_warmup_steps: int = 50
|
| 374 |
+
audio_supervision_check_interval: int = 50
|
| 375 |
+
audio_supervision_min_ratio: float = 0.9
|
| 376 |
+
audio_dop: bool = False
|
| 377 |
+
audio_dop_multiplier: float = 0.5
|
| 378 |
+
audio_bucket_strategy: Optional[str] = None
|
| 379 |
+
audio_bucket_interval: Optional[float] = None
|
| 380 |
+
audio_only_sequence_resolution: int = 64
|
| 381 |
+
min_audio_batches_per_accum: int = 0
|
| 382 |
+
audio_batch_probability: Optional[float] = None
|
| 383 |
+
|
| 384 |
+
# Loss weighting
|
| 385 |
+
video_loss_weight: float = 1.0
|
| 386 |
+
audio_loss_weight: float = 1.0
|
| 387 |
+
|
| 388 |
+
# Misc
|
| 389 |
+
separate_audio_buckets: bool = True
|
| 390 |
+
max_data_loader_n_workers: int = 2
|
| 391 |
+
persistent_data_loader_workers: bool = True
|
| 392 |
+
ltx2_first_frame_conditioning_p: float = 0.1
|
| 393 |
+
|
| 394 |
+
|
| 395 |
+
class InferenceConfig(BaseModel):
|
| 396 |
+
ltx2_checkpoint: str = ""
|
| 397 |
+
gemma_root: str = ""
|
| 398 |
+
lora_weight: str = ""
|
| 399 |
+
lora_multiplier: float = 1.0
|
| 400 |
+
prompt: str = ""
|
| 401 |
+
negative_prompt: str = ""
|
| 402 |
+
from_file: str = ""
|
| 403 |
+
output_dir: str = "output"
|
| 404 |
+
output_name: str = "ltx2_sample"
|
| 405 |
+
height: int = 512
|
| 406 |
+
width: int = 768
|
| 407 |
+
frame_count: int = 45
|
| 408 |
+
frame_rate: float = 25.0
|
| 409 |
+
sample_steps: int = 20
|
| 410 |
+
guidance_scale: float = 1.0
|
| 411 |
+
cfg_scale: Optional[float] = None
|
| 412 |
+
discrete_flow_shift: float = 5.0
|
| 413 |
+
seed: Optional[int] = None
|
| 414 |
+
mixed_precision: Literal["no", "fp16", "bf16"] = "bf16"
|
| 415 |
+
ltx2_mode: Literal["video", "av", "audio"] = "video"
|
| 416 |
+
attn_mode: str = "torch"
|
| 417 |
+
fp8_base: bool = False
|
| 418 |
+
fp8_scaled: bool = False
|
| 419 |
+
offloading: bool = False
|
| 420 |
+
blocks_to_swap: Optional[int] = None
|
| 421 |
+
gemma_load_in_8bit: bool = False
|
| 422 |
+
gemma_load_in_4bit: bool = False
|
| 423 |
+
|
| 424 |
+
|
| 425 |
+
class SliderTargetConfig(BaseModel):
|
| 426 |
+
positive: str = ""
|
| 427 |
+
negative: str = ""
|
| 428 |
+
target_class: str = ""
|
| 429 |
+
weight: float = 1.0
|
| 430 |
+
|
| 431 |
+
|
| 432 |
+
class SliderConfig(BaseModel):
|
| 433 |
+
model_config = ConfigDict(extra="ignore") # old projects may have fields that moved to TrainingConfig
|
| 434 |
+
|
| 435 |
+
# Mode
|
| 436 |
+
mode: Literal["text", "reference"] = "text"
|
| 437 |
+
|
| 438 |
+
# Targets (text-only mode)
|
| 439 |
+
targets: list[SliderTargetConfig] = Field(default_factory=lambda: [SliderTargetConfig()])
|
| 440 |
+
|
| 441 |
+
# Text mode settings
|
| 442 |
+
guidance_strength: float = 1.0
|
| 443 |
+
latent_frames: int = 1
|
| 444 |
+
latent_height: int = 512
|
| 445 |
+
latent_width: int = 768
|
| 446 |
+
|
| 447 |
+
# Sampling
|
| 448 |
+
sample_slider_range: str = "-2,-1,0,1,2"
|
| 449 |
+
|
| 450 |
+
# Slider-specific overrides (empty = inherit from training config)
|
| 451 |
+
max_train_steps: int = 500
|
| 452 |
+
output_name: str = "ltx2_slider"
|
| 453 |
+
|
| 454 |
+
|
| 455 |
+
class ProjectConfig(BaseModel):
|
| 456 |
+
version: int = 1
|
| 457 |
+
name: str = "New Project"
|
| 458 |
+
project_dir: str = ""
|
| 459 |
+
model_dir: str = "" # directory where downloaded models are stored
|
| 460 |
+
dataset: DatasetConfig = Field(default_factory=DatasetConfig)
|
| 461 |
+
caching: CachingConfig = Field(default_factory=CachingConfig)
|
| 462 |
+
training: TrainingConfig = Field(default_factory=TrainingConfig)
|
| 463 |
+
inference: InferenceConfig = Field(default_factory=InferenceConfig)
|
| 464 |
+
slider: SliderConfig = Field(default_factory=SliderConfig)
|
| 465 |
+
|
| 466 |
+
@model_validator(mode='before')
|
| 467 |
+
@classmethod
|
| 468 |
+
def _migrate_sampling_key(cls, data):
|
| 469 |
+
"""Backward compat: rename old 'sampling' key to 'inference'."""
|
| 470 |
+
if isinstance(data, dict) and 'sampling' in data and 'inference' not in data:
|
| 471 |
+
data['inference'] = data.pop('sampling')
|
| 472 |
+
return data
|
| 473 |
+
|
| 474 |
+
def save(self, path: Optional[Path] = None):
|
| 475 |
+
p = path or Path(self.project_dir) / "project.json"
|
| 476 |
+
p.parent.mkdir(parents=True, exist_ok=True)
|
| 477 |
+
p.write_text(self.model_dump_json(indent=2), encoding="utf-8")
|
| 478 |
+
|
| 479 |
+
@classmethod
|
| 480 |
+
def load(cls, path: Path) -> "ProjectConfig":
|
| 481 |
+
return cls.model_validate_json(path.read_text(encoding="utf-8"))
|
src/musubi_tuner/gui_dashboard/server.py
ADDED
|
@@ -0,0 +1,111 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import asyncio
|
| 2 |
+
import logging
|
| 3 |
+
import mimetypes
|
| 4 |
+
import os
|
| 5 |
+
import threading
|
| 6 |
+
|
| 7 |
+
# Windows registry often maps .js to text/plain — fix it
|
| 8 |
+
mimetypes.add_type("application/javascript", ".js")
|
| 9 |
+
mimetypes.add_type("text/css", ".css")
|
| 10 |
+
|
| 11 |
+
from fastapi import FastAPI
|
| 12 |
+
from fastapi.responses import FileResponse, HTMLResponse, Response
|
| 13 |
+
from fastapi.staticfiles import StaticFiles
|
| 14 |
+
from sse_starlette.sse import EventSourceResponse
|
| 15 |
+
|
| 16 |
+
logger = logging.getLogger(__name__)
|
| 17 |
+
|
| 18 |
+
FRONTEND_DIST = os.path.join(os.path.dirname(__file__), "frontend", "dist")
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
def create_app(run_dir: str) -> FastAPI:
|
| 22 |
+
app = FastAPI(title="Training Dashboard")
|
| 23 |
+
dashboard_dir = os.path.join(run_dir, "dashboard")
|
| 24 |
+
sample_dir = os.path.join(run_dir, "sample")
|
| 25 |
+
|
| 26 |
+
# -- data endpoints --
|
| 27 |
+
|
| 28 |
+
@app.get("/data/metrics.parquet")
|
| 29 |
+
async def get_metrics():
|
| 30 |
+
path = os.path.join(dashboard_dir, "metrics.parquet")
|
| 31 |
+
if not os.path.exists(path):
|
| 32 |
+
return Response(status_code=204)
|
| 33 |
+
return FileResponse(path, media_type="application/octet-stream")
|
| 34 |
+
|
| 35 |
+
@app.get("/data/status.json")
|
| 36 |
+
async def get_status():
|
| 37 |
+
path = os.path.join(dashboard_dir, "status.json")
|
| 38 |
+
if not os.path.exists(path):
|
| 39 |
+
return Response(status_code=204)
|
| 40 |
+
return FileResponse(path, media_type="application/json")
|
| 41 |
+
|
| 42 |
+
@app.get("/data/events.json")
|
| 43 |
+
async def get_events():
|
| 44 |
+
path = os.path.join(dashboard_dir, "events.json")
|
| 45 |
+
if not os.path.exists(path):
|
| 46 |
+
return Response(status_code=204)
|
| 47 |
+
return FileResponse(path, media_type="application/json")
|
| 48 |
+
|
| 49 |
+
# -- SSE --
|
| 50 |
+
|
| 51 |
+
@app.get("/sse")
|
| 52 |
+
async def sse_stream():
|
| 53 |
+
metrics_path = os.path.join(dashboard_dir, "metrics.parquet")
|
| 54 |
+
|
| 55 |
+
async def event_generator():
|
| 56 |
+
last_mtime = 0.0
|
| 57 |
+
while True:
|
| 58 |
+
await asyncio.sleep(2)
|
| 59 |
+
try:
|
| 60 |
+
mtime = os.path.getmtime(metrics_path) if os.path.exists(metrics_path) else 0.0
|
| 61 |
+
except OSError:
|
| 62 |
+
mtime = 0.0
|
| 63 |
+
if mtime > last_mtime:
|
| 64 |
+
last_mtime = mtime
|
| 65 |
+
yield {"event": "update", "data": f'{{"mtime": {mtime}}}'}
|
| 66 |
+
|
| 67 |
+
return EventSourceResponse(event_generator())
|
| 68 |
+
|
| 69 |
+
# -- samples static files --
|
| 70 |
+
|
| 71 |
+
if os.path.isdir(sample_dir):
|
| 72 |
+
app.mount("/data/samples", StaticFiles(directory=sample_dir), name="samples")
|
| 73 |
+
else:
|
| 74 |
+
# Create the dir so the mount doesn't fail; samples will appear later
|
| 75 |
+
os.makedirs(sample_dir, exist_ok=True)
|
| 76 |
+
app.mount("/data/samples", StaticFiles(directory=sample_dir), name="samples")
|
| 77 |
+
|
| 78 |
+
# -- SvelteKit frontend (must be last - catches all other routes) --
|
| 79 |
+
|
| 80 |
+
if os.path.isdir(FRONTEND_DIST):
|
| 81 |
+
# Serve index.html for SPA routing
|
| 82 |
+
@app.get("/{full_path:path}")
|
| 83 |
+
async def serve_frontend(full_path: str):
|
| 84 |
+
file_path = os.path.join(FRONTEND_DIST, full_path)
|
| 85 |
+
if full_path and os.path.isfile(file_path):
|
| 86 |
+
return FileResponse(file_path)
|
| 87 |
+
return FileResponse(os.path.join(FRONTEND_DIST, "index.html"))
|
| 88 |
+
else:
|
| 89 |
+
@app.get("/")
|
| 90 |
+
async def no_frontend():
|
| 91 |
+
return HTMLResponse(
|
| 92 |
+
"<h2>Training Dashboard</h2>"
|
| 93 |
+
"<p>Frontend not built. Run <code>npm run build</code> in <code>gui_dashboard/frontend/</code></p>"
|
| 94 |
+
"<p>API available at <code>/data/metrics.parquet</code>, <code>/data/status.json</code>, <code>/data/events.json</code></p>"
|
| 95 |
+
)
|
| 96 |
+
|
| 97 |
+
return app
|
| 98 |
+
|
| 99 |
+
|
| 100 |
+
def start_server(run_dir: str, host: str = "0.0.0.0", port: int = 7860):
|
| 101 |
+
import uvicorn
|
| 102 |
+
|
| 103 |
+
app = create_app(run_dir)
|
| 104 |
+
|
| 105 |
+
def _run():
|
| 106 |
+
uvicorn.run(app, host=host, port=port, log_level="warning")
|
| 107 |
+
|
| 108 |
+
t = threading.Thread(target=_run, daemon=True, name="dashboard-server")
|
| 109 |
+
t.start()
|
| 110 |
+
logger.info(f"Training dashboard started at http://{host}:{port}")
|
| 111 |
+
return t
|
src/musubi_tuner/gui_dashboard/toml_export.py
ADDED
|
@@ -0,0 +1,176 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""TOML serialization for dataset and slider configs."""
|
| 2 |
+
|
| 3 |
+
from __future__ import annotations
|
| 4 |
+
|
| 5 |
+
from pathlib import Path
|
| 6 |
+
|
| 7 |
+
try:
|
| 8 |
+
import tomli_w
|
| 9 |
+
except ImportError:
|
| 10 |
+
tomli_w = None
|
| 11 |
+
|
| 12 |
+
try:
|
| 13 |
+
import tomllib
|
| 14 |
+
except ImportError:
|
| 15 |
+
try:
|
| 16 |
+
import tomli as tomllib
|
| 17 |
+
except ImportError:
|
| 18 |
+
tomllib = None
|
| 19 |
+
|
| 20 |
+
from musubi_tuner.gui_dashboard.project_schema import ProjectConfig
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
def _toml_value(v) -> str:
|
| 24 |
+
if isinstance(v, bool):
|
| 25 |
+
return "true" if v else "false"
|
| 26 |
+
if isinstance(v, int):
|
| 27 |
+
return str(v)
|
| 28 |
+
if isinstance(v, float):
|
| 29 |
+
return str(v)
|
| 30 |
+
if isinstance(v, str):
|
| 31 |
+
return f'"{v}"'
|
| 32 |
+
if isinstance(v, list):
|
| 33 |
+
items = ", ".join(_toml_value(i) for i in v)
|
| 34 |
+
return f"[{items}]"
|
| 35 |
+
return str(v)
|
| 36 |
+
|
| 37 |
+
|
| 38 |
+
def _write_toml_fallback(doc: dict, path: Path):
|
| 39 |
+
"""Simple TOML writer for when tomli_w is not available."""
|
| 40 |
+
lines = []
|
| 41 |
+
|
| 42 |
+
if "general" in doc:
|
| 43 |
+
lines.append("[general]")
|
| 44 |
+
for k, v in doc["general"].items():
|
| 45 |
+
lines.append(f"{k} = {_toml_value(v)}")
|
| 46 |
+
lines.append("")
|
| 47 |
+
|
| 48 |
+
for section_name in ("datasets", "validation_datasets"):
|
| 49 |
+
if section_name not in doc:
|
| 50 |
+
continue
|
| 51 |
+
for entry in doc[section_name]:
|
| 52 |
+
lines.append(f"[[{section_name}]]")
|
| 53 |
+
for k, v in entry.items():
|
| 54 |
+
lines.append(f"{k} = {_toml_value(v)}")
|
| 55 |
+
lines.append("")
|
| 56 |
+
|
| 57 |
+
path.write_text("\n".join(lines), encoding="utf-8")
|
| 58 |
+
|
| 59 |
+
|
| 60 |
+
def _dataset_entry_to_dict(entry) -> dict:
|
| 61 |
+
"""Convert a DatasetEntry to a TOML-ready dict."""
|
| 62 |
+
d: dict = {}
|
| 63 |
+
# Directory or JSONL source
|
| 64 |
+
if entry.jsonl_file:
|
| 65 |
+
key_prefix = {"video": "video", "image": "image", "audio": "audio"}[entry.type]
|
| 66 |
+
d[f"{key_prefix}_jsonl_file"] = entry.jsonl_file
|
| 67 |
+
else:
|
| 68 |
+
if entry.type == "video":
|
| 69 |
+
d["video_directory"] = entry.directory
|
| 70 |
+
elif entry.type == "image":
|
| 71 |
+
d["image_directory"] = entry.directory
|
| 72 |
+
elif entry.type == "audio":
|
| 73 |
+
d["audio_directory"] = entry.directory
|
| 74 |
+
|
| 75 |
+
d["cache_directory"] = entry.cache_directory
|
| 76 |
+
if entry.reference_cache_directory:
|
| 77 |
+
d["reference_cache_directory"] = entry.reference_cache_directory
|
| 78 |
+
if entry.control_directory:
|
| 79 |
+
d["control_directory"] = entry.control_directory
|
| 80 |
+
if entry.type != "audio":
|
| 81 |
+
d["resolution"] = [entry.resolution_w, entry.resolution_h]
|
| 82 |
+
d["batch_size"] = entry.batch_size
|
| 83 |
+
d["num_repeats"] = entry.num_repeats
|
| 84 |
+
d["caption_extension"] = entry.caption_extension
|
| 85 |
+
|
| 86 |
+
if entry.type == "video":
|
| 87 |
+
d["target_frames"] = [entry.target_frames]
|
| 88 |
+
d["frame_extraction"] = entry.frame_extraction
|
| 89 |
+
if entry.frame_sample is not None:
|
| 90 |
+
d["frame_sample"] = entry.frame_sample
|
| 91 |
+
if entry.max_frames is not None:
|
| 92 |
+
d["max_frames"] = entry.max_frames
|
| 93 |
+
if entry.frame_stride is not None:
|
| 94 |
+
d["frame_stride"] = entry.frame_stride
|
| 95 |
+
if entry.source_fps is not None:
|
| 96 |
+
d["source_fps"] = entry.source_fps
|
| 97 |
+
if entry.target_fps is not None:
|
| 98 |
+
d["target_fps"] = entry.target_fps
|
| 99 |
+
|
| 100 |
+
return d
|
| 101 |
+
|
| 102 |
+
|
| 103 |
+
def _write_dataset_toml(config: ProjectConfig, output_path: Path) -> Path:
|
| 104 |
+
"""Generate dataset_config.toml from project config and return path."""
|
| 105 |
+
doc: dict = {}
|
| 106 |
+
|
| 107 |
+
# General section
|
| 108 |
+
doc["general"] = {
|
| 109 |
+
"enable_bucket": config.dataset.general.enable_bucket,
|
| 110 |
+
"bucket_no_upscale": config.dataset.general.bucket_no_upscale,
|
| 111 |
+
}
|
| 112 |
+
|
| 113 |
+
# Datasets
|
| 114 |
+
doc["datasets"] = [_dataset_entry_to_dict(e) for e in config.dataset.datasets]
|
| 115 |
+
|
| 116 |
+
# Validation datasets
|
| 117 |
+
if config.dataset.validation_datasets:
|
| 118 |
+
doc["validation_datasets"] = [
|
| 119 |
+
_dataset_entry_to_dict(e) for e in config.dataset.validation_datasets
|
| 120 |
+
]
|
| 121 |
+
|
| 122 |
+
output_path.parent.mkdir(parents=True, exist_ok=True)
|
| 123 |
+
|
| 124 |
+
if tomli_w is not None:
|
| 125 |
+
output_path.write_bytes(tomli_w.dumps(doc).encode("utf-8"))
|
| 126 |
+
else:
|
| 127 |
+
_write_toml_fallback(doc, output_path)
|
| 128 |
+
|
| 129 |
+
return output_path
|
| 130 |
+
|
| 131 |
+
|
| 132 |
+
def build_dataset_toml_path(config: ProjectConfig) -> Path:
|
| 133 |
+
return Path(config.project_dir) / "dataset_config.toml"
|
| 134 |
+
|
| 135 |
+
|
| 136 |
+
def export_dataset_toml(config: ProjectConfig) -> Path:
|
| 137 |
+
return _write_dataset_toml(config, build_dataset_toml_path(config))
|
| 138 |
+
|
| 139 |
+
|
| 140 |
+
def _write_slider_toml(config: ProjectConfig, output_path: Path) -> Path:
|
| 141 |
+
"""Generate slider_config.toml from project config and return path."""
|
| 142 |
+
s = config.slider
|
| 143 |
+
doc: dict = {
|
| 144 |
+
"mode": s.mode,
|
| 145 |
+
"guidance_strength": s.guidance_strength,
|
| 146 |
+
}
|
| 147 |
+
|
| 148 |
+
# Parse sample_slider_range
|
| 149 |
+
try:
|
| 150 |
+
doc["sample_slider_range"] = [float(x.strip()) for x in s.sample_slider_range.split(",")]
|
| 151 |
+
except (ValueError, AttributeError):
|
| 152 |
+
doc["sample_slider_range"] = [-2.0, -1.0, 0.0, 1.0, 2.0]
|
| 153 |
+
|
| 154 |
+
output_path.parent.mkdir(parents=True, exist_ok=True)
|
| 155 |
+
|
| 156 |
+
# Write TOML with targets as [[targets]] array
|
| 157 |
+
lines = []
|
| 158 |
+
for k, v in doc.items():
|
| 159 |
+
lines.append(f"{k} = {_toml_value(v)}")
|
| 160 |
+
lines.append("")
|
| 161 |
+
|
| 162 |
+
for target in s.targets:
|
| 163 |
+
lines.append("[[targets]]")
|
| 164 |
+
lines.append(f'positive = "{target.positive}"')
|
| 165 |
+
lines.append(f'negative = "{target.negative}"')
|
| 166 |
+
if target.target_class:
|
| 167 |
+
lines.append(f'target_class = "{target.target_class}"')
|
| 168 |
+
lines.append(f"weight = {target.weight}")
|
| 169 |
+
lines.append("")
|
| 170 |
+
|
| 171 |
+
output_path.write_text("\n".join(lines), encoding="utf-8")
|
| 172 |
+
return output_path
|
| 173 |
+
|
| 174 |
+
|
| 175 |
+
def build_slider_toml_path(config: ProjectConfig) -> Path:
|
| 176 |
+
return Path(config.project_dir) / "slider_config.toml"
|
src/musubi_tuner/hunyuan_model/__init__.py
ADDED
|
File without changes
|
src/musubi_tuner/hunyuan_model/activation_layers.py
ADDED
|
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch.nn as nn
|
| 2 |
+
|
| 3 |
+
|
| 4 |
+
def get_activation_layer(act_type):
|
| 5 |
+
"""get activation layer
|
| 6 |
+
|
| 7 |
+
Args:
|
| 8 |
+
act_type (str): the activation type
|
| 9 |
+
|
| 10 |
+
Returns:
|
| 11 |
+
torch.nn.functional: the activation layer
|
| 12 |
+
"""
|
| 13 |
+
if act_type == "gelu":
|
| 14 |
+
return lambda: nn.GELU()
|
| 15 |
+
elif act_type == "gelu_tanh":
|
| 16 |
+
# Approximate `tanh` requires torch >= 1.13
|
| 17 |
+
return lambda: nn.GELU(approximate="tanh")
|
| 18 |
+
elif act_type == "relu":
|
| 19 |
+
return nn.ReLU
|
| 20 |
+
elif act_type == "silu":
|
| 21 |
+
return nn.SiLU
|
| 22 |
+
else:
|
| 23 |
+
raise ValueError(f"Unknown activation type: {act_type}")
|
src/musubi_tuner/hunyuan_model/attention.py
ADDED
|
@@ -0,0 +1,309 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import math
|
| 2 |
+
|
| 3 |
+
import torch
|
| 4 |
+
import torch.nn.functional as F
|
| 5 |
+
|
| 6 |
+
try:
|
| 7 |
+
import flash_attn
|
| 8 |
+
from flash_attn.flash_attn_interface import _flash_attn_forward
|
| 9 |
+
from flash_attn.flash_attn_interface import flash_attn_varlen_func
|
| 10 |
+
from flash_attn.flash_attn_interface import flash_attn_func
|
| 11 |
+
except ImportError:
|
| 12 |
+
flash_attn = None
|
| 13 |
+
flash_attn_varlen_func = None
|
| 14 |
+
_flash_attn_forward = None
|
| 15 |
+
flash_attn_func = None
|
| 16 |
+
|
| 17 |
+
try:
|
| 18 |
+
print("Trying to import sageattention")
|
| 19 |
+
from sageattention import sageattn_varlen, sageattn
|
| 20 |
+
|
| 21 |
+
print("Successfully imported sageattention")
|
| 22 |
+
except ImportError:
|
| 23 |
+
print("Failed to import sageattention")
|
| 24 |
+
sageattn_varlen = None
|
| 25 |
+
sageattn = None
|
| 26 |
+
|
| 27 |
+
try:
|
| 28 |
+
import xformers.ops as xops
|
| 29 |
+
except ImportError:
|
| 30 |
+
xops = None
|
| 31 |
+
|
| 32 |
+
MEMORY_LAYOUT = {
|
| 33 |
+
"flash": (
|
| 34 |
+
lambda x: x.view(x.shape[0] * x.shape[1], *x.shape[2:]),
|
| 35 |
+
lambda x: x,
|
| 36 |
+
),
|
| 37 |
+
"flash_fixlen": (
|
| 38 |
+
lambda x: x,
|
| 39 |
+
lambda x: x,
|
| 40 |
+
),
|
| 41 |
+
"sageattn": (
|
| 42 |
+
lambda x: x.view(x.shape[0] * x.shape[1], *x.shape[2:]),
|
| 43 |
+
lambda x: x,
|
| 44 |
+
),
|
| 45 |
+
"sageattn_fixlen": (
|
| 46 |
+
lambda x: x.transpose(1, 2),
|
| 47 |
+
lambda x: x.transpose(1, 2),
|
| 48 |
+
),
|
| 49 |
+
"torch": (
|
| 50 |
+
lambda x: x.transpose(1, 2),
|
| 51 |
+
lambda x: x.transpose(1, 2),
|
| 52 |
+
),
|
| 53 |
+
"xformers": (
|
| 54 |
+
lambda x: x,
|
| 55 |
+
lambda x: x,
|
| 56 |
+
),
|
| 57 |
+
"vanilla": (
|
| 58 |
+
lambda x: x.transpose(1, 2),
|
| 59 |
+
lambda x: x.transpose(1, 2),
|
| 60 |
+
),
|
| 61 |
+
}
|
| 62 |
+
|
| 63 |
+
|
| 64 |
+
def get_cu_seqlens(text_mask, img_len):
|
| 65 |
+
"""Calculate cu_seqlens_q, cu_seqlens_kv using text_mask and img_len
|
| 66 |
+
|
| 67 |
+
Args:
|
| 68 |
+
text_mask (torch.Tensor): the mask of text
|
| 69 |
+
img_len (int): the length of image
|
| 70 |
+
|
| 71 |
+
Returns:
|
| 72 |
+
torch.Tensor: the calculated cu_seqlens for flash attention
|
| 73 |
+
"""
|
| 74 |
+
batch_size = text_mask.shape[0]
|
| 75 |
+
text_len = text_mask.sum(dim=1)
|
| 76 |
+
max_len = text_mask.shape[1] + img_len
|
| 77 |
+
|
| 78 |
+
cu_seqlens = torch.zeros([2 * batch_size + 1], dtype=torch.int32, device="cuda")
|
| 79 |
+
|
| 80 |
+
for i in range(batch_size):
|
| 81 |
+
s = text_len[i] + img_len
|
| 82 |
+
s1 = i * max_len + s
|
| 83 |
+
s2 = (i + 1) * max_len
|
| 84 |
+
cu_seqlens[2 * i + 1] = s1
|
| 85 |
+
cu_seqlens[2 * i + 2] = s2
|
| 86 |
+
|
| 87 |
+
return cu_seqlens
|
| 88 |
+
|
| 89 |
+
|
| 90 |
+
def attention(
|
| 91 |
+
q_or_qkv_list,
|
| 92 |
+
k=None,
|
| 93 |
+
v=None,
|
| 94 |
+
mode="flash",
|
| 95 |
+
drop_rate=0,
|
| 96 |
+
attn_mask=None,
|
| 97 |
+
total_len=None,
|
| 98 |
+
causal=False,
|
| 99 |
+
cu_seqlens_q=None,
|
| 100 |
+
cu_seqlens_kv=None,
|
| 101 |
+
max_seqlen_q=None,
|
| 102 |
+
max_seqlen_kv=None,
|
| 103 |
+
batch_size=1,
|
| 104 |
+
):
|
| 105 |
+
"""
|
| 106 |
+
Perform QKV self attention.
|
| 107 |
+
|
| 108 |
+
Args:
|
| 109 |
+
q (torch.Tensor): Query tensor with shape [b, s, a, d], where a is the number of heads.
|
| 110 |
+
k (torch.Tensor): Key tensor with shape [b, s1, a, d]
|
| 111 |
+
v (torch.Tensor): Value tensor with shape [b, s1, a, d]
|
| 112 |
+
mode (str): Attention mode. Choose from 'self_flash', 'cross_flash', 'torch', and 'vanilla'.
|
| 113 |
+
drop_rate (float): Dropout rate in attention map. (default: 0)
|
| 114 |
+
attn_mask (torch.Tensor): Attention mask with shape [b, s1] (cross_attn), or [b, a, s, s1] (torch or vanilla).
|
| 115 |
+
(default: None)
|
| 116 |
+
causal (bool): Whether to use causal attention. (default: False)
|
| 117 |
+
cu_seqlens_q (torch.Tensor): dtype torch.int32. The cumulative sequence lengths of the sequences in the batch,
|
| 118 |
+
used to index into q.
|
| 119 |
+
cu_seqlens_kv (torch.Tensor): dtype torch.int32. The cumulative sequence lengths of the sequences in the batch,
|
| 120 |
+
used to index into kv.
|
| 121 |
+
max_seqlen_q (int): The maximum sequence length in the batch of q.
|
| 122 |
+
max_seqlen_kv (int): The maximum sequence length in the batch of k and v.
|
| 123 |
+
|
| 124 |
+
Returns:
|
| 125 |
+
torch.Tensor: Output tensor after self attention with shape [b, s, ad]
|
| 126 |
+
"""
|
| 127 |
+
q, k, v = q_or_qkv_list if type(q_or_qkv_list) == list else (q_or_qkv_list, k, v)
|
| 128 |
+
if type(q_or_qkv_list) == list:
|
| 129 |
+
q_or_qkv_list.clear()
|
| 130 |
+
split_attn = total_len is not None
|
| 131 |
+
if (split_attn or cu_seqlens_q is None) and mode == "sageattn":
|
| 132 |
+
mode = "sageattn_fixlen"
|
| 133 |
+
elif (split_attn or cu_seqlens_q is None) and mode == "flash":
|
| 134 |
+
mode = "flash_fixlen"
|
| 135 |
+
# print(f"Attention mode: {mode}, split_attn: {split_attn}")
|
| 136 |
+
pre_attn_layout, post_attn_layout = MEMORY_LAYOUT[mode]
|
| 137 |
+
|
| 138 |
+
# trim the sequence length to the actual length instead of attn_mask
|
| 139 |
+
if split_attn:
|
| 140 |
+
trimmed_len = q.shape[1] - total_len
|
| 141 |
+
q = [q[i : i + 1, : total_len[i]] for i in range(len(q))]
|
| 142 |
+
k = [k[i : i + 1, : total_len[i]] for i in range(len(k))]
|
| 143 |
+
v = [v[i : i + 1, : total_len[i]] for i in range(len(v))]
|
| 144 |
+
q = [pre_attn_layout(q_i) for q_i in q]
|
| 145 |
+
k = [pre_attn_layout(k_i) for k_i in k]
|
| 146 |
+
v = [pre_attn_layout(v_i) for v_i in v]
|
| 147 |
+
# print(
|
| 148 |
+
# f"Trimming the sequence length to {total_len},trimmed_len: {trimmed_len}, q.shape: {[q_i.shape for q_i in q]}, mode: {mode}"
|
| 149 |
+
# )
|
| 150 |
+
else:
|
| 151 |
+
q = pre_attn_layout(q)
|
| 152 |
+
k = pre_attn_layout(k)
|
| 153 |
+
v = pre_attn_layout(v)
|
| 154 |
+
|
| 155 |
+
if mode == "torch":
|
| 156 |
+
if split_attn:
|
| 157 |
+
x = []
|
| 158 |
+
for i in range(len(q)):
|
| 159 |
+
x_i = F.scaled_dot_product_attention(q[i], k[i], v[i], dropout_p=drop_rate, is_causal=causal)
|
| 160 |
+
q[i], k[i], v[i] = None, None, None
|
| 161 |
+
x.append(x_i)
|
| 162 |
+
del q, k, v
|
| 163 |
+
else:
|
| 164 |
+
if attn_mask is not None and attn_mask.dtype != torch.bool:
|
| 165 |
+
attn_mask = attn_mask.to(q.dtype)
|
| 166 |
+
x = F.scaled_dot_product_attention(q, k, v, attn_mask=attn_mask, dropout_p=drop_rate, is_causal=causal)
|
| 167 |
+
del q, k, v
|
| 168 |
+
del attn_mask
|
| 169 |
+
|
| 170 |
+
elif mode == "xformers":
|
| 171 |
+
# B, M, H, K: M is the sequence length, H is the number of heads, K is the dimension of the heads -> it is same as input dimension
|
| 172 |
+
# currently only support batch_size = 1
|
| 173 |
+
assert split_attn or cu_seqlens_q is None, "Xformers only supports splitting"
|
| 174 |
+
if split_attn:
|
| 175 |
+
x = []
|
| 176 |
+
for i in range(len(q)):
|
| 177 |
+
x_i = xops.memory_efficient_attention(q[i], k[i], v[i], p=drop_rate) # , causal=causal)
|
| 178 |
+
q[i], k[i], v[i] = None, None, None
|
| 179 |
+
x.append(x_i)
|
| 180 |
+
del q, k, v
|
| 181 |
+
else:
|
| 182 |
+
x = xops.memory_efficient_attention(q, k, v, p=drop_rate)
|
| 183 |
+
del q, k, v
|
| 184 |
+
|
| 185 |
+
elif mode == "flash":
|
| 186 |
+
x = flash_attn_varlen_func(q, k, v, cu_seqlens_q, cu_seqlens_kv, max_seqlen_q, max_seqlen_kv)
|
| 187 |
+
del q, k, v
|
| 188 |
+
# x with shape [(bxs), a, d]
|
| 189 |
+
x = x.view(batch_size, max_seqlen_q, x.shape[-2], x.shape[-1]) # reshape x to [b, s, a, d]
|
| 190 |
+
|
| 191 |
+
elif mode == "flash_fixlen":
|
| 192 |
+
if split_attn:
|
| 193 |
+
x = []
|
| 194 |
+
for i in range(len(q)):
|
| 195 |
+
# q: (batch_size, seqlen, nheads, headdim), k: (batch_size, seqlen, nheads_k, headdim), v: (batch_size, seqlen, nheads_k, headdim)
|
| 196 |
+
x_i = flash_attn_func(q[i], k[i], v[i], dropout_p=drop_rate, causal=causal)
|
| 197 |
+
q[i], k[i], v[i] = None, None, None
|
| 198 |
+
x.append(x_i)
|
| 199 |
+
del q, k, v
|
| 200 |
+
else:
|
| 201 |
+
x = flash_attn_func(q, k, v, dropout_p=drop_rate, causal=causal)
|
| 202 |
+
del q, k, v # this causes error in compiled mode with fullgraph=True
|
| 203 |
+
|
| 204 |
+
elif mode == "sageattn":
|
| 205 |
+
x = sageattn_varlen(q, k, v, cu_seqlens_q, cu_seqlens_kv, max_seqlen_q, max_seqlen_kv)
|
| 206 |
+
del q, k, v
|
| 207 |
+
# x with shape [(bxs), a, d]
|
| 208 |
+
x = x.view(batch_size, max_seqlen_q, x.shape[-2], x.shape[-1]) # reshape x to [b, s, a, d]
|
| 209 |
+
|
| 210 |
+
elif mode == "sageattn_fixlen":
|
| 211 |
+
if split_attn:
|
| 212 |
+
x = []
|
| 213 |
+
for i in range(len(q)):
|
| 214 |
+
# HND seems to cause an error
|
| 215 |
+
x_i = sageattn(q[i], k[i], v[i]) # (batch_size, seq_len, head_num, head_dim)
|
| 216 |
+
q[i], k[i], v[i] = None, None, None
|
| 217 |
+
x.append(x_i)
|
| 218 |
+
del q, k, v
|
| 219 |
+
else:
|
| 220 |
+
x = sageattn(q, k, v)
|
| 221 |
+
del q, k, v
|
| 222 |
+
|
| 223 |
+
elif mode == "vanilla":
|
| 224 |
+
assert not split_attn, "Vanilla attention does not support trimming"
|
| 225 |
+
scale_factor = 1 / math.sqrt(q.size(-1))
|
| 226 |
+
|
| 227 |
+
b, a, s, _ = q.shape
|
| 228 |
+
s1 = k.size(2)
|
| 229 |
+
attn_bias = torch.zeros(b, a, s, s1, dtype=q.dtype, device=q.device)
|
| 230 |
+
if causal:
|
| 231 |
+
# Only applied to self attention
|
| 232 |
+
assert attn_mask is None, "Causal mask and attn_mask cannot be used together"
|
| 233 |
+
temp_mask = torch.ones(b, a, s, s, dtype=torch.bool, device=q.device).tril(diagonal=0)
|
| 234 |
+
attn_bias.masked_fill_(temp_mask.logical_not(), float("-inf"))
|
| 235 |
+
attn_bias.to(q.dtype)
|
| 236 |
+
|
| 237 |
+
if attn_mask is not None:
|
| 238 |
+
if attn_mask.dtype == torch.bool:
|
| 239 |
+
attn_bias.masked_fill_(attn_mask.logical_not(), float("-inf"))
|
| 240 |
+
else:
|
| 241 |
+
attn_bias += attn_mask
|
| 242 |
+
|
| 243 |
+
# TODO: Maybe force q and k to be float32 to avoid numerical overflow
|
| 244 |
+
attn = (q @ k.transpose(-2, -1)) * scale_factor
|
| 245 |
+
attn += attn_bias
|
| 246 |
+
attn = attn.softmax(dim=-1)
|
| 247 |
+
attn = torch.dropout(attn, p=drop_rate, train=True)
|
| 248 |
+
x = attn @ v
|
| 249 |
+
else:
|
| 250 |
+
raise NotImplementedError(f"Unsupported attention mode: {mode}")
|
| 251 |
+
|
| 252 |
+
if split_attn:
|
| 253 |
+
x = [post_attn_layout(x_i) for x_i in x]
|
| 254 |
+
for i in range(len(x)):
|
| 255 |
+
x[i] = F.pad(x[i], (0, 0, 0, 0, 0, trimmed_len[i]))
|
| 256 |
+
x = torch.cat(x, dim=0)
|
| 257 |
+
else:
|
| 258 |
+
x = post_attn_layout(x)
|
| 259 |
+
|
| 260 |
+
b, s, a, d = x.shape
|
| 261 |
+
x = x.reshape(b, s, -1)
|
| 262 |
+
return x
|
| 263 |
+
|
| 264 |
+
|
| 265 |
+
def parallel_attention(hybrid_seq_parallel_attn, q, k, v, img_q_len, img_kv_len, cu_seqlens_q, cu_seqlens_kv):
|
| 266 |
+
attn1 = hybrid_seq_parallel_attn(
|
| 267 |
+
None,
|
| 268 |
+
q[:, :img_q_len, :, :],
|
| 269 |
+
k[:, :img_kv_len, :, :],
|
| 270 |
+
v[:, :img_kv_len, :, :],
|
| 271 |
+
dropout_p=0.0,
|
| 272 |
+
causal=False,
|
| 273 |
+
joint_tensor_query=q[:, img_q_len : cu_seqlens_q[1]],
|
| 274 |
+
joint_tensor_key=k[:, img_kv_len : cu_seqlens_kv[1]],
|
| 275 |
+
joint_tensor_value=v[:, img_kv_len : cu_seqlens_kv[1]],
|
| 276 |
+
joint_strategy="rear",
|
| 277 |
+
)
|
| 278 |
+
if flash_attn.__version__ >= "2.7.0":
|
| 279 |
+
attn2, *_ = _flash_attn_forward(
|
| 280 |
+
q[:, cu_seqlens_q[1] :],
|
| 281 |
+
k[:, cu_seqlens_kv[1] :],
|
| 282 |
+
v[:, cu_seqlens_kv[1] :],
|
| 283 |
+
dropout_p=0.0,
|
| 284 |
+
softmax_scale=q.shape[-1] ** (-0.5),
|
| 285 |
+
causal=False,
|
| 286 |
+
window_size_left=-1,
|
| 287 |
+
window_size_right=-1,
|
| 288 |
+
softcap=0.0,
|
| 289 |
+
alibi_slopes=None,
|
| 290 |
+
return_softmax=False,
|
| 291 |
+
)
|
| 292 |
+
else:
|
| 293 |
+
attn2, *_ = _flash_attn_forward(
|
| 294 |
+
q[:, cu_seqlens_q[1] :],
|
| 295 |
+
k[:, cu_seqlens_kv[1] :],
|
| 296 |
+
v[:, cu_seqlens_kv[1] :],
|
| 297 |
+
dropout_p=0.0,
|
| 298 |
+
softmax_scale=q.shape[-1] ** (-0.5),
|
| 299 |
+
causal=False,
|
| 300 |
+
window_size=(-1, -1),
|
| 301 |
+
softcap=0.0,
|
| 302 |
+
alibi_slopes=None,
|
| 303 |
+
return_softmax=False,
|
| 304 |
+
)
|
| 305 |
+
attn = torch.cat([attn1, attn2], dim=1)
|
| 306 |
+
b, s, a, d = attn.shape
|
| 307 |
+
attn = attn.reshape(b, s, -1)
|
| 308 |
+
|
| 309 |
+
return attn
|
src/musubi_tuner/hunyuan_model/embed_layers.py
ADDED
|
@@ -0,0 +1,131 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import math
|
| 2 |
+
import torch
|
| 3 |
+
import torch.nn as nn
|
| 4 |
+
|
| 5 |
+
from musubi_tuner.hunyuan_model.helpers import to_2tuple
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
class PatchEmbed(nn.Module):
|
| 9 |
+
"""2D Image to Patch Embedding
|
| 10 |
+
|
| 11 |
+
Image to Patch Embedding using Conv2d
|
| 12 |
+
|
| 13 |
+
A convolution based approach to patchifying a 2D image w/ embedding projection.
|
| 14 |
+
|
| 15 |
+
Based on the impl in https://github.com/google-research/vision_transformer
|
| 16 |
+
|
| 17 |
+
Hacked together by / Copyright 2020 Ross Wightman
|
| 18 |
+
|
| 19 |
+
Remove the _assert function in forward function to be compatible with multi-resolution images.
|
| 20 |
+
"""
|
| 21 |
+
|
| 22 |
+
def __init__(
|
| 23 |
+
self,
|
| 24 |
+
patch_size=16,
|
| 25 |
+
in_chans=3,
|
| 26 |
+
embed_dim=768,
|
| 27 |
+
norm_layer=None,
|
| 28 |
+
flatten=True,
|
| 29 |
+
bias=True,
|
| 30 |
+
dtype=None,
|
| 31 |
+
device=None,
|
| 32 |
+
):
|
| 33 |
+
factory_kwargs = {"dtype": dtype, "device": device}
|
| 34 |
+
super().__init__()
|
| 35 |
+
patch_size = to_2tuple(patch_size)
|
| 36 |
+
self.patch_size = patch_size
|
| 37 |
+
self.flatten = flatten
|
| 38 |
+
|
| 39 |
+
self.proj = nn.Conv3d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_size, bias=bias, **factory_kwargs)
|
| 40 |
+
nn.init.xavier_uniform_(self.proj.weight.view(self.proj.weight.size(0), -1))
|
| 41 |
+
if bias:
|
| 42 |
+
nn.init.zeros_(self.proj.bias)
|
| 43 |
+
|
| 44 |
+
self.norm = norm_layer(embed_dim) if norm_layer else nn.Identity()
|
| 45 |
+
|
| 46 |
+
def forward(self, x):
|
| 47 |
+
x = self.proj(x)
|
| 48 |
+
if self.flatten:
|
| 49 |
+
x = x.flatten(2).transpose(1, 2) # BCHW -> BNC
|
| 50 |
+
x = self.norm(x)
|
| 51 |
+
return x
|
| 52 |
+
|
| 53 |
+
|
| 54 |
+
class TextProjection(nn.Module):
|
| 55 |
+
"""
|
| 56 |
+
Projects text embeddings. Also handles dropout for classifier-free guidance.
|
| 57 |
+
|
| 58 |
+
Adapted from https://github.com/PixArt-alpha/PixArt-alpha/blob/master/diffusion/model/nets/PixArt_blocks.py
|
| 59 |
+
"""
|
| 60 |
+
|
| 61 |
+
def __init__(self, in_channels, hidden_size, act_layer, dtype=None, device=None):
|
| 62 |
+
factory_kwargs = {"dtype": dtype, "device": device}
|
| 63 |
+
super().__init__()
|
| 64 |
+
self.linear_1 = nn.Linear(in_features=in_channels, out_features=hidden_size, bias=True, **factory_kwargs)
|
| 65 |
+
self.act_1 = act_layer()
|
| 66 |
+
self.linear_2 = nn.Linear(in_features=hidden_size, out_features=hidden_size, bias=True, **factory_kwargs)
|
| 67 |
+
|
| 68 |
+
def forward(self, caption):
|
| 69 |
+
hidden_states = self.linear_1(caption)
|
| 70 |
+
hidden_states = self.act_1(hidden_states)
|
| 71 |
+
hidden_states = self.linear_2(hidden_states)
|
| 72 |
+
return hidden_states
|
| 73 |
+
|
| 74 |
+
|
| 75 |
+
def timestep_embedding(t, dim, max_period=10000):
|
| 76 |
+
"""
|
| 77 |
+
Create sinusoidal timestep embeddings.
|
| 78 |
+
|
| 79 |
+
Args:
|
| 80 |
+
t (torch.Tensor): a 1-D Tensor of N indices, one per batch element. These may be fractional.
|
| 81 |
+
dim (int): the dimension of the output.
|
| 82 |
+
max_period (int): controls the minimum frequency of the embeddings.
|
| 83 |
+
|
| 84 |
+
Returns:
|
| 85 |
+
embedding (torch.Tensor): An (N, D) Tensor of positional embeddings.
|
| 86 |
+
|
| 87 |
+
.. ref_link: https://github.com/openai/glide-text2im/blob/main/glide_text2im/nn.py
|
| 88 |
+
"""
|
| 89 |
+
half = dim // 2
|
| 90 |
+
freqs = torch.exp(-math.log(max_period) * torch.arange(start=0, end=half, dtype=torch.float32) / half).to(device=t.device)
|
| 91 |
+
args = t[:, None].float() * freqs[None]
|
| 92 |
+
embedding = torch.cat([torch.cos(args), torch.sin(args)], dim=-1)
|
| 93 |
+
if dim % 2:
|
| 94 |
+
embedding = torch.cat([embedding, torch.zeros_like(embedding[:, :1])], dim=-1)
|
| 95 |
+
return embedding
|
| 96 |
+
|
| 97 |
+
|
| 98 |
+
class TimestepEmbedder(nn.Module):
|
| 99 |
+
"""
|
| 100 |
+
Embeds scalar timesteps into vector representations.
|
| 101 |
+
"""
|
| 102 |
+
|
| 103 |
+
def __init__(
|
| 104 |
+
self,
|
| 105 |
+
hidden_size,
|
| 106 |
+
act_layer,
|
| 107 |
+
frequency_embedding_size=256,
|
| 108 |
+
max_period=10000,
|
| 109 |
+
out_size=None,
|
| 110 |
+
dtype=None,
|
| 111 |
+
device=None,
|
| 112 |
+
):
|
| 113 |
+
factory_kwargs = {"dtype": dtype, "device": device}
|
| 114 |
+
super().__init__()
|
| 115 |
+
self.frequency_embedding_size = frequency_embedding_size
|
| 116 |
+
self.max_period = max_period
|
| 117 |
+
if out_size is None:
|
| 118 |
+
out_size = hidden_size
|
| 119 |
+
|
| 120 |
+
self.mlp = nn.Sequential(
|
| 121 |
+
nn.Linear(frequency_embedding_size, hidden_size, bias=True, **factory_kwargs),
|
| 122 |
+
act_layer(),
|
| 123 |
+
nn.Linear(hidden_size, out_size, bias=True, **factory_kwargs),
|
| 124 |
+
)
|
| 125 |
+
nn.init.normal_(self.mlp[0].weight, std=0.02)
|
| 126 |
+
nn.init.normal_(self.mlp[2].weight, std=0.02)
|
| 127 |
+
|
| 128 |
+
def forward(self, t):
|
| 129 |
+
t_freq = timestep_embedding(t, self.frequency_embedding_size, self.max_period).type(self.mlp[0].weight.dtype)
|
| 130 |
+
t_emb = self.mlp(t_freq)
|
| 131 |
+
return t_emb
|